Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 2 | /* |
| 3 | * CAAM/SEC 4.x QI transport/backend driver |
| 4 | * Queue Interface backend functionality |
| 5 | * |
| 6 | * Copyright 2013-2016 Freescale Semiconductor, Inc. |
Horia Geantă | 6b17568 | 2019-05-03 17:17:40 +0300 | [diff] [blame] | 7 | * Copyright 2016-2017, 2019 NXP |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 8 | */ |
| 9 | |
| 10 | #include <linux/cpumask.h> |
| 11 | #include <linux/kthread.h> |
| 12 | #include <soc/fsl/qman.h> |
| 13 | |
| 14 | #include "regs.h" |
| 15 | #include "qi.h" |
| 16 | #include "desc.h" |
| 17 | #include "intern.h" |
| 18 | #include "desc_constr.h" |
| 19 | |
| 20 | #define PREHDR_RSLS_SHIFT 31 |
Horia Geantă | dcd9c76 | 2019-05-03 17:17:37 +0300 | [diff] [blame] | 21 | #define PREHDR_ABS BIT(25) |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 22 | |
| 23 | /* |
| 24 | * Use a reasonable backlog of frames (per CPU) as congestion threshold, |
| 25 | * so that resources used by the in-flight buffers do not become a memory hog. |
| 26 | */ |
| 27 | #define MAX_RSP_FQ_BACKLOG_PER_CPU 256 |
| 28 | |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 29 | #define CAAM_QI_ENQUEUE_RETRIES 10000 |
| 30 | |
| 31 | #define CAAM_NAPI_WEIGHT 63 |
| 32 | |
| 33 | /* |
| 34 | * caam_napi - struct holding CAAM NAPI-related params |
| 35 | * @irqtask: IRQ task for QI backend |
| 36 | * @p: QMan portal |
| 37 | */ |
| 38 | struct caam_napi { |
| 39 | struct napi_struct irqtask; |
| 40 | struct qman_portal *p; |
| 41 | }; |
| 42 | |
| 43 | /* |
| 44 | * caam_qi_pcpu_priv - percpu private data structure to main list of pending |
| 45 | * responses expected on each cpu. |
| 46 | * @caam_napi: CAAM NAPI params |
| 47 | * @net_dev: netdev used by NAPI |
| 48 | * @rsp_fq: response FQ from CAAM |
| 49 | */ |
| 50 | struct caam_qi_pcpu_priv { |
| 51 | struct caam_napi caam_napi; |
| 52 | struct net_device net_dev; |
| 53 | struct qman_fq *rsp_fq; |
| 54 | } ____cacheline_aligned; |
| 55 | |
| 56 | static DEFINE_PER_CPU(struct caam_qi_pcpu_priv, pcpu_qipriv); |
Horia Geantă | 1ed289f | 2017-07-10 08:40:29 +0300 | [diff] [blame] | 57 | static DEFINE_PER_CPU(int, last_cpu); |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 58 | |
| 59 | /* |
| 60 | * caam_qi_priv - CAAM QI backend private params |
| 61 | * @cgr: QMan congestion group |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 62 | */ |
| 63 | struct caam_qi_priv { |
| 64 | struct qman_cgr cgr; |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 65 | }; |
| 66 | |
| 67 | static struct caam_qi_priv qipriv ____cacheline_aligned; |
| 68 | |
| 69 | /* |
| 70 | * This is written by only one core - the one that initialized the CGR - and |
| 71 | * read by multiple cores (all the others). |
| 72 | */ |
| 73 | bool caam_congested __read_mostly; |
| 74 | EXPORT_SYMBOL(caam_congested); |
| 75 | |
| 76 | #ifdef CONFIG_DEBUG_FS |
| 77 | /* |
| 78 | * This is a counter for the number of times the congestion group (where all |
| 79 | * the request and response queueus are) reached congestion. Incremented |
| 80 | * each time the congestion callback is called with congested == true. |
| 81 | */ |
| 82 | static u64 times_congested; |
| 83 | #endif |
| 84 | |
| 85 | /* |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 86 | * This is a a cache of buffers, from which the users of CAAM QI driver |
| 87 | * can allocate short (CAAM_QI_MEMCACHE_SIZE) buffers. It's faster than |
| 88 | * doing malloc on the hotpath. |
| 89 | * NOTE: A more elegant solution would be to have some headroom in the frames |
| 90 | * being processed. This could be added by the dpaa-ethernet driver. |
| 91 | * This would pose a problem for userspace application processing which |
| 92 | * cannot know of this limitation. So for now, this will work. |
| 93 | * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here |
| 94 | */ |
| 95 | static struct kmem_cache *qi_cache; |
| 96 | |
Horia Geantă | b2b2ee3 | 2019-05-03 17:17:41 +0300 | [diff] [blame] | 97 | static void *caam_iova_to_virt(struct iommu_domain *domain, |
| 98 | dma_addr_t iova_addr) |
| 99 | { |
| 100 | phys_addr_t phys_addr; |
| 101 | |
| 102 | phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr; |
| 103 | |
| 104 | return phys_to_virt(phys_addr); |
| 105 | } |
| 106 | |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 107 | int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req) |
| 108 | { |
| 109 | struct qm_fd fd; |
| 110 | dma_addr_t addr; |
| 111 | int ret; |
| 112 | int num_retries = 0; |
| 113 | |
| 114 | qm_fd_clear_fd(&fd); |
| 115 | qm_fd_set_compound(&fd, qm_sg_entry_get_len(&req->fd_sgt[1])); |
| 116 | |
| 117 | addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt), |
| 118 | DMA_BIDIRECTIONAL); |
| 119 | if (dma_mapping_error(qidev, addr)) { |
| 120 | dev_err(qidev, "DMA mapping error for QI enqueue request\n"); |
| 121 | return -EIO; |
| 122 | } |
| 123 | qm_fd_addr_set64(&fd, addr); |
| 124 | |
| 125 | do { |
| 126 | ret = qman_enqueue(req->drv_ctx->req_fq, &fd); |
| 127 | if (likely(!ret)) |
| 128 | return 0; |
| 129 | |
| 130 | if (ret != -EBUSY) |
| 131 | break; |
| 132 | num_retries++; |
| 133 | } while (num_retries < CAAM_QI_ENQUEUE_RETRIES); |
| 134 | |
| 135 | dev_err(qidev, "qman_enqueue failed: %d\n", ret); |
| 136 | |
| 137 | return ret; |
| 138 | } |
| 139 | EXPORT_SYMBOL(caam_qi_enqueue); |
| 140 | |
| 141 | static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq, |
| 142 | const union qm_mr_entry *msg) |
| 143 | { |
| 144 | const struct qm_fd *fd; |
| 145 | struct caam_drv_req *drv_req; |
| 146 | struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev); |
Horia Geantă | b2b2ee3 | 2019-05-03 17:17:41 +0300 | [diff] [blame] | 147 | struct caam_drv_private *priv = dev_get_drvdata(qidev); |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 148 | |
| 149 | fd = &msg->ern.fd; |
| 150 | |
| 151 | if (qm_fd_get_format(fd) != qm_fd_compound) { |
| 152 | dev_err(qidev, "Non-compound FD from CAAM\n"); |
| 153 | return; |
| 154 | } |
| 155 | |
Horia Geantă | b2b2ee3 | 2019-05-03 17:17:41 +0300 | [diff] [blame] | 156 | drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd)); |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 157 | if (!drv_req) { |
| 158 | dev_err(qidev, |
| 159 | "Can't find original request for CAAM response\n"); |
| 160 | return; |
| 161 | } |
| 162 | |
| 163 | dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd), |
| 164 | sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL); |
| 165 | |
| 166 | drv_req->cbk(drv_req, -EIO); |
| 167 | } |
| 168 | |
| 169 | static struct qman_fq *create_caam_req_fq(struct device *qidev, |
| 170 | struct qman_fq *rsp_fq, |
| 171 | dma_addr_t hwdesc, |
| 172 | int fq_sched_flag) |
| 173 | { |
| 174 | int ret; |
| 175 | struct qman_fq *req_fq; |
| 176 | struct qm_mcc_initfq opts; |
| 177 | |
| 178 | req_fq = kzalloc(sizeof(*req_fq), GFP_ATOMIC); |
| 179 | if (!req_fq) |
| 180 | return ERR_PTR(-ENOMEM); |
| 181 | |
| 182 | req_fq->cb.ern = caam_fq_ern_cb; |
| 183 | req_fq->cb.fqs = NULL; |
| 184 | |
| 185 | ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID | |
| 186 | QMAN_FQ_FLAG_TO_DCPORTAL, req_fq); |
| 187 | if (ret) { |
| 188 | dev_err(qidev, "Failed to create session req FQ\n"); |
| 189 | goto create_req_fq_fail; |
| 190 | } |
| 191 | |
| 192 | memset(&opts, 0, sizeof(opts)); |
| 193 | opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ | |
| 194 | QM_INITFQ_WE_CONTEXTB | |
| 195 | QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID); |
| 196 | opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE); |
| 197 | qm_fqd_set_destwq(&opts.fqd, qm_channel_caam, 2); |
| 198 | opts.fqd.context_b = cpu_to_be32(qman_fq_fqid(rsp_fq)); |
| 199 | qm_fqd_context_a_set64(&opts.fqd, hwdesc); |
| 200 | opts.fqd.cgid = qipriv.cgr.cgrid; |
| 201 | |
| 202 | ret = qman_init_fq(req_fq, fq_sched_flag, &opts); |
| 203 | if (ret) { |
| 204 | dev_err(qidev, "Failed to init session req FQ\n"); |
| 205 | goto init_req_fq_fail; |
| 206 | } |
| 207 | |
Horia Geantă | c7a91eb | 2017-07-10 08:40:35 +0300 | [diff] [blame] | 208 | dev_dbg(qidev, "Allocated request FQ %u for CPU %u\n", req_fq->fqid, |
| 209 | smp_processor_id()); |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 210 | return req_fq; |
| 211 | |
| 212 | init_req_fq_fail: |
| 213 | qman_destroy_fq(req_fq); |
| 214 | create_req_fq_fail: |
| 215 | kfree(req_fq); |
| 216 | return ERR_PTR(ret); |
| 217 | } |
| 218 | |
| 219 | static int empty_retired_fq(struct device *qidev, struct qman_fq *fq) |
| 220 | { |
| 221 | int ret; |
| 222 | |
| 223 | ret = qman_volatile_dequeue(fq, QMAN_VOLATILE_FLAG_WAIT_INT | |
| 224 | QMAN_VOLATILE_FLAG_FINISH, |
| 225 | QM_VDQCR_PRECEDENCE_VDQCR | |
| 226 | QM_VDQCR_NUMFRAMES_TILLEMPTY); |
| 227 | if (ret) { |
| 228 | dev_err(qidev, "Volatile dequeue fail for FQ: %u\n", fq->fqid); |
| 229 | return ret; |
| 230 | } |
| 231 | |
| 232 | do { |
| 233 | struct qman_portal *p; |
| 234 | |
| 235 | p = qman_get_affine_portal(smp_processor_id()); |
| 236 | qman_p_poll_dqrr(p, 16); |
| 237 | } while (fq->flags & QMAN_FQ_STATE_NE); |
| 238 | |
| 239 | return 0; |
| 240 | } |
| 241 | |
| 242 | static int kill_fq(struct device *qidev, struct qman_fq *fq) |
| 243 | { |
| 244 | u32 flags; |
| 245 | int ret; |
| 246 | |
| 247 | ret = qman_retire_fq(fq, &flags); |
| 248 | if (ret < 0) { |
| 249 | dev_err(qidev, "qman_retire_fq failed: %d\n", ret); |
| 250 | return ret; |
| 251 | } |
| 252 | |
| 253 | if (!ret) |
| 254 | goto empty_fq; |
| 255 | |
| 256 | /* Async FQ retirement condition */ |
| 257 | if (ret == 1) { |
| 258 | /* Retry till FQ gets in retired state */ |
| 259 | do { |
| 260 | msleep(20); |
| 261 | } while (fq->state != qman_fq_state_retired); |
| 262 | |
| 263 | WARN_ON(fq->flags & QMAN_FQ_STATE_BLOCKOOS); |
| 264 | WARN_ON(fq->flags & QMAN_FQ_STATE_ORL); |
| 265 | } |
| 266 | |
| 267 | empty_fq: |
| 268 | if (fq->flags & QMAN_FQ_STATE_NE) { |
| 269 | ret = empty_retired_fq(qidev, fq); |
| 270 | if (ret) { |
| 271 | dev_err(qidev, "empty_retired_fq fail for FQ: %u\n", |
| 272 | fq->fqid); |
| 273 | return ret; |
| 274 | } |
| 275 | } |
| 276 | |
| 277 | ret = qman_oos_fq(fq); |
| 278 | if (ret) |
| 279 | dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid); |
| 280 | |
| 281 | qman_destroy_fq(fq); |
Xulin Sun | 430f133 | 2017-07-13 05:21:01 -0400 | [diff] [blame] | 282 | kfree(fq); |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 283 | |
| 284 | return ret; |
| 285 | } |
| 286 | |
| 287 | static int empty_caam_fq(struct qman_fq *fq) |
| 288 | { |
| 289 | int ret; |
| 290 | struct qm_mcr_queryfq_np np; |
| 291 | |
| 292 | /* Wait till the older CAAM FQ get empty */ |
| 293 | do { |
| 294 | ret = qman_query_fq_np(fq, &np); |
| 295 | if (ret) |
| 296 | return ret; |
| 297 | |
| 298 | if (!qm_mcr_np_get(&np, frm_cnt)) |
| 299 | break; |
| 300 | |
| 301 | msleep(20); |
| 302 | } while (1); |
| 303 | |
| 304 | /* |
| 305 | * Give extra time for pending jobs from this FQ in holding tanks |
| 306 | * to get processed |
| 307 | */ |
| 308 | msleep(20); |
| 309 | return 0; |
| 310 | } |
| 311 | |
| 312 | int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc) |
| 313 | { |
| 314 | int ret; |
| 315 | u32 num_words; |
| 316 | struct qman_fq *new_fq, *old_fq; |
| 317 | struct device *qidev = drv_ctx->qidev; |
| 318 | |
| 319 | num_words = desc_len(sh_desc); |
| 320 | if (num_words > MAX_SDLEN) { |
| 321 | dev_err(qidev, "Invalid descriptor len: %d words\n", num_words); |
| 322 | return -EINVAL; |
| 323 | } |
| 324 | |
| 325 | /* Note down older req FQ */ |
| 326 | old_fq = drv_ctx->req_fq; |
| 327 | |
| 328 | /* Create a new req FQ in parked state */ |
| 329 | new_fq = create_caam_req_fq(drv_ctx->qidev, drv_ctx->rsp_fq, |
| 330 | drv_ctx->context_a, 0); |
Dan Carpenter | ffecb69 | 2019-03-28 17:36:01 +0300 | [diff] [blame] | 331 | if (IS_ERR(new_fq)) { |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 332 | dev_err(qidev, "FQ allocation for shdesc update failed\n"); |
| 333 | return PTR_ERR(new_fq); |
| 334 | } |
| 335 | |
| 336 | /* Hook up new FQ to context so that new requests keep queuing */ |
| 337 | drv_ctx->req_fq = new_fq; |
| 338 | |
| 339 | /* Empty and remove the older FQ */ |
| 340 | ret = empty_caam_fq(old_fq); |
| 341 | if (ret) { |
| 342 | dev_err(qidev, "Old CAAM FQ empty failed: %d\n", ret); |
| 343 | |
| 344 | /* We can revert to older FQ */ |
| 345 | drv_ctx->req_fq = old_fq; |
| 346 | |
| 347 | if (kill_fq(qidev, new_fq)) |
Xulin Sun | 430f133 | 2017-07-13 05:21:01 -0400 | [diff] [blame] | 348 | dev_warn(qidev, "New CAAM FQ kill failed\n"); |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 349 | |
| 350 | return ret; |
| 351 | } |
| 352 | |
| 353 | /* |
| 354 | * Re-initialise pre-header. Set RSLS and SDLEN. |
| 355 | * Update the shared descriptor for driver context. |
| 356 | */ |
| 357 | drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) | |
| 358 | num_words); |
Horia Geantă | dcd9c76 | 2019-05-03 17:17:37 +0300 | [diff] [blame] | 359 | drv_ctx->prehdr[1] = cpu_to_caam32(PREHDR_ABS); |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 360 | memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc)); |
| 361 | dma_sync_single_for_device(qidev, drv_ctx->context_a, |
| 362 | sizeof(drv_ctx->sh_desc) + |
| 363 | sizeof(drv_ctx->prehdr), |
| 364 | DMA_BIDIRECTIONAL); |
| 365 | |
| 366 | /* Put the new FQ in scheduled state */ |
| 367 | ret = qman_schedule_fq(new_fq); |
| 368 | if (ret) { |
| 369 | dev_err(qidev, "Fail to sched new CAAM FQ, ecode = %d\n", ret); |
| 370 | |
| 371 | /* |
| 372 | * We can kill new FQ and revert to old FQ. |
| 373 | * Since the desc is already modified, it is success case |
| 374 | */ |
| 375 | |
| 376 | drv_ctx->req_fq = old_fq; |
| 377 | |
| 378 | if (kill_fq(qidev, new_fq)) |
Xulin Sun | 430f133 | 2017-07-13 05:21:01 -0400 | [diff] [blame] | 379 | dev_warn(qidev, "New CAAM FQ kill failed\n"); |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 380 | } else if (kill_fq(qidev, old_fq)) { |
Xulin Sun | 430f133 | 2017-07-13 05:21:01 -0400 | [diff] [blame] | 381 | dev_warn(qidev, "Old CAAM FQ kill failed\n"); |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 382 | } |
| 383 | |
| 384 | return 0; |
| 385 | } |
| 386 | EXPORT_SYMBOL(caam_drv_ctx_update); |
| 387 | |
| 388 | struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev, |
| 389 | int *cpu, |
| 390 | u32 *sh_desc) |
| 391 | { |
| 392 | size_t size; |
| 393 | u32 num_words; |
| 394 | dma_addr_t hwdesc; |
| 395 | struct caam_drv_ctx *drv_ctx; |
| 396 | const cpumask_t *cpus = qman_affine_cpus(); |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 397 | |
| 398 | num_words = desc_len(sh_desc); |
| 399 | if (num_words > MAX_SDLEN) { |
| 400 | dev_err(qidev, "Invalid descriptor len: %d words\n", |
| 401 | num_words); |
| 402 | return ERR_PTR(-EINVAL); |
| 403 | } |
| 404 | |
| 405 | drv_ctx = kzalloc(sizeof(*drv_ctx), GFP_ATOMIC); |
| 406 | if (!drv_ctx) |
| 407 | return ERR_PTR(-ENOMEM); |
| 408 | |
| 409 | /* |
| 410 | * Initialise pre-header - set RSLS and SDLEN - and shared descriptor |
| 411 | * and dma-map them. |
| 412 | */ |
| 413 | drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) | |
| 414 | num_words); |
Horia Geantă | dcd9c76 | 2019-05-03 17:17:37 +0300 | [diff] [blame] | 415 | drv_ctx->prehdr[1] = cpu_to_caam32(PREHDR_ABS); |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 416 | memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc)); |
| 417 | size = sizeof(drv_ctx->prehdr) + sizeof(drv_ctx->sh_desc); |
| 418 | hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size, |
| 419 | DMA_BIDIRECTIONAL); |
| 420 | if (dma_mapping_error(qidev, hwdesc)) { |
| 421 | dev_err(qidev, "DMA map error for preheader + shdesc\n"); |
| 422 | kfree(drv_ctx); |
| 423 | return ERR_PTR(-ENOMEM); |
| 424 | } |
| 425 | drv_ctx->context_a = hwdesc; |
| 426 | |
| 427 | /* If given CPU does not own the portal, choose another one that does */ |
| 428 | if (!cpumask_test_cpu(*cpu, cpus)) { |
| 429 | int *pcpu = &get_cpu_var(last_cpu); |
| 430 | |
| 431 | *pcpu = cpumask_next(*pcpu, cpus); |
| 432 | if (*pcpu >= nr_cpu_ids) |
| 433 | *pcpu = cpumask_first(cpus); |
| 434 | *cpu = *pcpu; |
| 435 | |
| 436 | put_cpu_var(last_cpu); |
| 437 | } |
| 438 | drv_ctx->cpu = *cpu; |
| 439 | |
| 440 | /* Find response FQ hooked with this CPU */ |
| 441 | drv_ctx->rsp_fq = per_cpu(pcpu_qipriv.rsp_fq, drv_ctx->cpu); |
| 442 | |
| 443 | /* Attach request FQ */ |
| 444 | drv_ctx->req_fq = create_caam_req_fq(qidev, drv_ctx->rsp_fq, hwdesc, |
| 445 | QMAN_INITFQ_FLAG_SCHED); |
Dan Carpenter | ffecb69 | 2019-03-28 17:36:01 +0300 | [diff] [blame] | 446 | if (IS_ERR(drv_ctx->req_fq)) { |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 447 | dev_err(qidev, "create_caam_req_fq failed\n"); |
| 448 | dma_unmap_single(qidev, hwdesc, size, DMA_BIDIRECTIONAL); |
| 449 | kfree(drv_ctx); |
| 450 | return ERR_PTR(-ENOMEM); |
| 451 | } |
| 452 | |
| 453 | drv_ctx->qidev = qidev; |
| 454 | return drv_ctx; |
| 455 | } |
| 456 | EXPORT_SYMBOL(caam_drv_ctx_init); |
| 457 | |
| 458 | void *qi_cache_alloc(gfp_t flags) |
| 459 | { |
| 460 | return kmem_cache_alloc(qi_cache, flags); |
| 461 | } |
| 462 | EXPORT_SYMBOL(qi_cache_alloc); |
| 463 | |
| 464 | void qi_cache_free(void *obj) |
| 465 | { |
| 466 | kmem_cache_free(qi_cache, obj); |
| 467 | } |
| 468 | EXPORT_SYMBOL(qi_cache_free); |
| 469 | |
| 470 | static int caam_qi_poll(struct napi_struct *napi, int budget) |
| 471 | { |
| 472 | struct caam_napi *np = container_of(napi, struct caam_napi, irqtask); |
| 473 | |
| 474 | int cleaned = qman_p_poll_dqrr(np->p, budget); |
| 475 | |
| 476 | if (cleaned < budget) { |
| 477 | napi_complete(napi); |
| 478 | qman_p_irqsource_add(np->p, QM_PIRQ_DQRI); |
| 479 | } |
| 480 | |
| 481 | return cleaned; |
| 482 | } |
| 483 | |
| 484 | void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx) |
| 485 | { |
| 486 | if (IS_ERR_OR_NULL(drv_ctx)) |
| 487 | return; |
| 488 | |
| 489 | /* Remove request FQ */ |
| 490 | if (kill_fq(drv_ctx->qidev, drv_ctx->req_fq)) |
| 491 | dev_err(drv_ctx->qidev, "Crypto session req FQ kill failed\n"); |
| 492 | |
| 493 | dma_unmap_single(drv_ctx->qidev, drv_ctx->context_a, |
| 494 | sizeof(drv_ctx->sh_desc) + sizeof(drv_ctx->prehdr), |
| 495 | DMA_BIDIRECTIONAL); |
| 496 | kfree(drv_ctx); |
| 497 | } |
| 498 | EXPORT_SYMBOL(caam_drv_ctx_rel); |
| 499 | |
Horia Geantă | 29e83c7 | 2018-10-08 14:09:37 +0300 | [diff] [blame] | 500 | void caam_qi_shutdown(struct device *qidev) |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 501 | { |
Horia Geantă | 29e83c7 | 2018-10-08 14:09:37 +0300 | [diff] [blame] | 502 | int i; |
Horia Geantă | 6b17568 | 2019-05-03 17:17:40 +0300 | [diff] [blame] | 503 | struct caam_qi_priv *priv = &qipriv; |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 504 | const cpumask_t *cpus = qman_affine_cpus(); |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 505 | |
| 506 | for_each_cpu(i, cpus) { |
| 507 | struct napi_struct *irqtask; |
| 508 | |
| 509 | irqtask = &per_cpu_ptr(&pcpu_qipriv.caam_napi, i)->irqtask; |
| 510 | napi_disable(irqtask); |
| 511 | netif_napi_del(irqtask); |
| 512 | |
| 513 | if (kill_fq(qidev, per_cpu(pcpu_qipriv.rsp_fq, i))) |
| 514 | dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i); |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 515 | } |
| 516 | |
Horia Geantă | 29e83c7 | 2018-10-08 14:09:37 +0300 | [diff] [blame] | 517 | qman_delete_cgr_safe(&priv->cgr); |
| 518 | qman_release_cgrid(priv->cgr.cgrid); |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 519 | |
| 520 | kmem_cache_destroy(qi_cache); |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 521 | } |
| 522 | |
| 523 | static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested) |
| 524 | { |
| 525 | caam_congested = congested; |
| 526 | |
| 527 | if (congested) { |
| 528 | #ifdef CONFIG_DEBUG_FS |
| 529 | times_congested++; |
| 530 | #endif |
| 531 | pr_debug_ratelimited("CAAM entered congestion\n"); |
| 532 | |
| 533 | } else { |
| 534 | pr_debug_ratelimited("CAAM exited congestion\n"); |
| 535 | } |
| 536 | } |
| 537 | |
| 538 | static int caam_qi_napi_schedule(struct qman_portal *p, struct caam_napi *np) |
| 539 | { |
| 540 | /* |
| 541 | * In case of threaded ISR, for RT kernels in_irq() does not return |
| 542 | * appropriate value, so use in_serving_softirq to distinguish between |
| 543 | * softirq and irq contexts. |
| 544 | */ |
| 545 | if (unlikely(in_irq() || !in_serving_softirq())) { |
| 546 | /* Disable QMan IRQ source and invoke NAPI */ |
| 547 | qman_p_irqsource_remove(p, QM_PIRQ_DQRI); |
| 548 | np->p = p; |
| 549 | napi_schedule(&np->irqtask); |
| 550 | return 1; |
| 551 | } |
| 552 | return 0; |
| 553 | } |
| 554 | |
| 555 | static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p, |
| 556 | struct qman_fq *rsp_fq, |
| 557 | const struct qm_dqrr_entry *dqrr) |
| 558 | { |
| 559 | struct caam_napi *caam_napi = raw_cpu_ptr(&pcpu_qipriv.caam_napi); |
| 560 | struct caam_drv_req *drv_req; |
| 561 | const struct qm_fd *fd; |
| 562 | struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev); |
Horia Geantă | b2b2ee3 | 2019-05-03 17:17:41 +0300 | [diff] [blame] | 563 | struct caam_drv_private *priv = dev_get_drvdata(qidev); |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 564 | u32 status; |
| 565 | |
| 566 | if (caam_qi_napi_schedule(p, caam_napi)) |
| 567 | return qman_cb_dqrr_stop; |
| 568 | |
| 569 | fd = &dqrr->fd; |
| 570 | status = be32_to_cpu(fd->status); |
Horia Geantă | cb3078f | 2018-01-29 10:38:35 +0200 | [diff] [blame] | 571 | if (unlikely(status)) { |
| 572 | u32 ssrc = status & JRSTA_SSRC_MASK; |
| 573 | u8 err_id = status & JRSTA_CCBERR_ERRID_MASK; |
| 574 | |
| 575 | if (ssrc != JRSTA_SSRC_CCB_ERROR || |
| 576 | err_id != JRSTA_CCBERR_ERRID_ICVCHK) |
| 577 | dev_err(qidev, "Error: %#x in CAAM response FD\n", |
| 578 | status); |
| 579 | } |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 580 | |
| 581 | if (unlikely(qm_fd_get_format(fd) != qm_fd_compound)) { |
| 582 | dev_err(qidev, "Non-compound FD from CAAM\n"); |
| 583 | return qman_cb_dqrr_consume; |
| 584 | } |
| 585 | |
Horia Geantă | b2b2ee3 | 2019-05-03 17:17:41 +0300 | [diff] [blame] | 586 | drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd)); |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 587 | if (unlikely(!drv_req)) { |
| 588 | dev_err(qidev, |
| 589 | "Can't find original request for caam response\n"); |
| 590 | return qman_cb_dqrr_consume; |
| 591 | } |
| 592 | |
| 593 | dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd), |
| 594 | sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL); |
| 595 | |
| 596 | drv_req->cbk(drv_req, status); |
| 597 | return qman_cb_dqrr_consume; |
| 598 | } |
| 599 | |
| 600 | static int alloc_rsp_fq_cpu(struct device *qidev, unsigned int cpu) |
| 601 | { |
| 602 | struct qm_mcc_initfq opts; |
| 603 | struct qman_fq *fq; |
| 604 | int ret; |
| 605 | |
| 606 | fq = kzalloc(sizeof(*fq), GFP_KERNEL | GFP_DMA); |
| 607 | if (!fq) |
| 608 | return -ENOMEM; |
| 609 | |
| 610 | fq->cb.dqrr = caam_rsp_fq_dqrr_cb; |
| 611 | |
| 612 | ret = qman_create_fq(0, QMAN_FQ_FLAG_NO_ENQUEUE | |
| 613 | QMAN_FQ_FLAG_DYNAMIC_FQID, fq); |
| 614 | if (ret) { |
| 615 | dev_err(qidev, "Rsp FQ create failed\n"); |
| 616 | kfree(fq); |
| 617 | return -ENODEV; |
| 618 | } |
| 619 | |
| 620 | memset(&opts, 0, sizeof(opts)); |
| 621 | opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ | |
| 622 | QM_INITFQ_WE_CONTEXTB | |
| 623 | QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID); |
| 624 | opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING | |
| 625 | QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE); |
| 626 | qm_fqd_set_destwq(&opts.fqd, qman_affine_channel(cpu), 3); |
| 627 | opts.fqd.cgid = qipriv.cgr.cgrid; |
| 628 | opts.fqd.context_a.stashing.exclusive = QM_STASHING_EXCL_CTX | |
| 629 | QM_STASHING_EXCL_DATA; |
| 630 | qm_fqd_set_stashing(&opts.fqd, 0, 1, 1); |
| 631 | |
| 632 | ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts); |
| 633 | if (ret) { |
| 634 | dev_err(qidev, "Rsp FQ init failed\n"); |
| 635 | kfree(fq); |
| 636 | return -ENODEV; |
| 637 | } |
| 638 | |
| 639 | per_cpu(pcpu_qipriv.rsp_fq, cpu) = fq; |
| 640 | |
Horia Geantă | c7a91eb | 2017-07-10 08:40:35 +0300 | [diff] [blame] | 641 | dev_dbg(qidev, "Allocated response FQ %u for CPU %u", fq->fqid, cpu); |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 642 | return 0; |
| 643 | } |
| 644 | |
| 645 | static int init_cgr(struct device *qidev) |
| 646 | { |
| 647 | int ret; |
| 648 | struct qm_mcc_initcgr opts; |
Horia Geantă | d9c3577 | 2018-05-23 14:32:42 +0300 | [diff] [blame] | 649 | const u64 val = (u64)cpumask_weight(qman_affine_cpus()) * |
| 650 | MAX_RSP_FQ_BACKLOG_PER_CPU; |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 651 | |
| 652 | ret = qman_alloc_cgrid(&qipriv.cgr.cgrid); |
| 653 | if (ret) { |
| 654 | dev_err(qidev, "CGR alloc failed for rsp FQs: %d\n", ret); |
| 655 | return ret; |
| 656 | } |
| 657 | |
| 658 | qipriv.cgr.cb = cgr_cb; |
| 659 | memset(&opts, 0, sizeof(opts)); |
| 660 | opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES | |
| 661 | QM_CGR_WE_MODE); |
| 662 | opts.cgr.cscn_en = QM_CGR_EN; |
| 663 | opts.cgr.mode = QMAN_CGR_MODE_FRAME; |
| 664 | qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, val, 1); |
| 665 | |
| 666 | ret = qman_create_cgr(&qipriv.cgr, QMAN_CGR_FLAG_USE_INIT, &opts); |
| 667 | if (ret) { |
| 668 | dev_err(qidev, "Error %d creating CAAM CGRID: %u\n", ret, |
| 669 | qipriv.cgr.cgrid); |
| 670 | return ret; |
| 671 | } |
| 672 | |
Horia Geantă | c7a91eb | 2017-07-10 08:40:35 +0300 | [diff] [blame] | 673 | dev_dbg(qidev, "Congestion threshold set to %llu\n", val); |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 674 | return 0; |
| 675 | } |
| 676 | |
| 677 | static int alloc_rsp_fqs(struct device *qidev) |
| 678 | { |
| 679 | int ret, i; |
| 680 | const cpumask_t *cpus = qman_affine_cpus(); |
| 681 | |
| 682 | /*Now create response FQs*/ |
| 683 | for_each_cpu(i, cpus) { |
| 684 | ret = alloc_rsp_fq_cpu(qidev, i); |
| 685 | if (ret) { |
| 686 | dev_err(qidev, "CAAM rsp FQ alloc failed, cpu: %u", i); |
| 687 | return ret; |
| 688 | } |
| 689 | } |
| 690 | |
| 691 | return 0; |
| 692 | } |
| 693 | |
| 694 | static void free_rsp_fqs(void) |
| 695 | { |
| 696 | int i; |
| 697 | const cpumask_t *cpus = qman_affine_cpus(); |
| 698 | |
| 699 | for_each_cpu(i, cpus) |
| 700 | kfree(per_cpu(pcpu_qipriv.rsp_fq, i)); |
| 701 | } |
| 702 | |
| 703 | int caam_qi_init(struct platform_device *caam_pdev) |
| 704 | { |
| 705 | int err, i; |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 706 | struct device *ctrldev = &caam_pdev->dev, *qidev; |
| 707 | struct caam_drv_private *ctrlpriv; |
| 708 | const cpumask_t *cpus = qman_affine_cpus(); |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 709 | |
| 710 | ctrlpriv = dev_get_drvdata(ctrldev); |
Horia Geantă | 6b17568 | 2019-05-03 17:17:40 +0300 | [diff] [blame] | 711 | qidev = ctrldev; |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 712 | |
| 713 | /* Initialize the congestion detection */ |
| 714 | err = init_cgr(qidev); |
| 715 | if (err) { |
| 716 | dev_err(qidev, "CGR initialization failed: %d\n", err); |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 717 | return err; |
| 718 | } |
| 719 | |
| 720 | /* Initialise response FQs */ |
| 721 | err = alloc_rsp_fqs(qidev); |
| 722 | if (err) { |
| 723 | dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err); |
| 724 | free_rsp_fqs(); |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 725 | return err; |
| 726 | } |
| 727 | |
| 728 | /* |
| 729 | * Enable the NAPI contexts on each of the core which has an affine |
| 730 | * portal. |
| 731 | */ |
| 732 | for_each_cpu(i, cpus) { |
| 733 | struct caam_qi_pcpu_priv *priv = per_cpu_ptr(&pcpu_qipriv, i); |
| 734 | struct caam_napi *caam_napi = &priv->caam_napi; |
| 735 | struct napi_struct *irqtask = &caam_napi->irqtask; |
| 736 | struct net_device *net_dev = &priv->net_dev; |
| 737 | |
| 738 | net_dev->dev = *qidev; |
| 739 | INIT_LIST_HEAD(&net_dev->napi_list); |
| 740 | |
| 741 | netif_napi_add(net_dev, irqtask, caam_qi_poll, |
| 742 | CAAM_NAPI_WEIGHT); |
| 743 | |
| 744 | napi_enable(irqtask); |
| 745 | } |
| 746 | |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 747 | qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0, |
| 748 | SLAB_CACHE_DMA, NULL); |
| 749 | if (!qi_cache) { |
| 750 | dev_err(qidev, "Can't allocate CAAM cache\n"); |
| 751 | free_rsp_fqs(); |
Wei Yongjun | 7e207d8 | 2017-04-11 16:04:09 +0000 | [diff] [blame] | 752 | return -ENOMEM; |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 753 | } |
| 754 | |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 755 | #ifdef CONFIG_DEBUG_FS |
Fabio Estevam | a92f7af | 2017-08-01 10:45:01 -0300 | [diff] [blame] | 756 | debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl, |
| 757 | ×_congested, &caam_fops_u64_ro); |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 758 | #endif |
Horia Geantă | 6b17568 | 2019-05-03 17:17:40 +0300 | [diff] [blame] | 759 | |
| 760 | ctrlpriv->qi_init = 1; |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 761 | dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n"); |
| 762 | return 0; |
| 763 | } |