huangdaode | 6fe6611 | 2015-09-17 14:51:48 +0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2014-2015 Hisilicon Limited. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License as published by |
| 6 | * the Free Software Foundation; either version 2 of the License, or |
| 7 | * (at your option) any later version. |
| 8 | */ |
| 9 | |
| 10 | #include <linux/dma-mapping.h> |
| 11 | #include <linux/interrupt.h> |
| 12 | #include <linux/skbuff.h> |
| 13 | #include <linux/slab.h> |
| 14 | |
| 15 | #include "hnae.h" |
| 16 | |
| 17 | #define cls_to_ae_dev(dev) container_of(dev, struct hnae_ae_dev, cls_dev) |
| 18 | |
| 19 | static struct class *hnae_class; |
| 20 | |
| 21 | static void |
| 22 | hnae_list_add(spinlock_t *lock, struct list_head *node, struct list_head *head) |
| 23 | { |
| 24 | unsigned long flags; |
| 25 | |
| 26 | spin_lock_irqsave(lock, flags); |
| 27 | list_add_tail_rcu(node, head); |
| 28 | spin_unlock_irqrestore(lock, flags); |
| 29 | } |
| 30 | |
| 31 | static void hnae_list_del(spinlock_t *lock, struct list_head *node) |
| 32 | { |
| 33 | unsigned long flags; |
| 34 | |
| 35 | spin_lock_irqsave(lock, flags); |
| 36 | list_del_rcu(node); |
| 37 | spin_unlock_irqrestore(lock, flags); |
| 38 | } |
| 39 | |
| 40 | static int hnae_alloc_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb) |
| 41 | { |
| 42 | unsigned int order = hnae_page_order(ring); |
| 43 | struct page *p = dev_alloc_pages(order); |
| 44 | |
| 45 | if (!p) |
| 46 | return -ENOMEM; |
| 47 | |
| 48 | cb->priv = p; |
| 49 | cb->page_offset = 0; |
| 50 | cb->reuse_flag = 0; |
| 51 | cb->buf = page_address(p); |
| 52 | cb->length = hnae_page_size(ring); |
| 53 | cb->type = DESC_TYPE_PAGE; |
| 54 | |
| 55 | return 0; |
| 56 | } |
| 57 | |
| 58 | static void hnae_free_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb) |
| 59 | { |
| 60 | if (cb->type == DESC_TYPE_SKB) |
| 61 | dev_kfree_skb_any((struct sk_buff *)cb->priv); |
| 62 | else if (unlikely(is_rx_ring(ring))) |
| 63 | put_page((struct page *)cb->priv); |
| 64 | memset(cb, 0, sizeof(*cb)); |
| 65 | } |
| 66 | |
| 67 | static int hnae_map_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb) |
| 68 | { |
| 69 | cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0, |
| 70 | cb->length, ring_to_dma_dir(ring)); |
| 71 | |
| 72 | if (dma_mapping_error(ring_to_dev(ring), cb->dma)) |
| 73 | return -EIO; |
| 74 | |
| 75 | return 0; |
| 76 | } |
| 77 | |
| 78 | static void hnae_unmap_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb) |
| 79 | { |
| 80 | if (cb->type == DESC_TYPE_SKB) |
| 81 | dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length, |
| 82 | ring_to_dma_dir(ring)); |
| 83 | else |
| 84 | dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length, |
| 85 | ring_to_dma_dir(ring)); |
| 86 | } |
| 87 | |
| 88 | static struct hnae_buf_ops hnae_bops = { |
| 89 | .alloc_buffer = hnae_alloc_buffer, |
| 90 | .free_buffer = hnae_free_buffer, |
| 91 | .map_buffer = hnae_map_buffer, |
| 92 | .unmap_buffer = hnae_unmap_buffer, |
| 93 | }; |
| 94 | |
| 95 | static int __ae_match(struct device *dev, const void *data) |
| 96 | { |
| 97 | struct hnae_ae_dev *hdev = cls_to_ae_dev(dev); |
huangdaode | 6fe6611 | 2015-09-17 14:51:48 +0800 | [diff] [blame] | 98 | |
Kejian Yan | 652d39b | 2016-06-03 10:55:16 +0800 | [diff] [blame] | 99 | if (dev_of_node(hdev->dev)) |
| 100 | return (data == &hdev->dev->of_node->fwnode); |
| 101 | else if (is_acpi_node(hdev->dev->fwnode)) |
| 102 | return (data == hdev->dev->fwnode); |
| 103 | |
| 104 | dev_err(dev, "__ae_match cannot read cfg data from OF or acpi\n"); |
| 105 | return 0; |
huangdaode | 6fe6611 | 2015-09-17 14:51:48 +0800 | [diff] [blame] | 106 | } |
| 107 | |
Kejian Yan | 7b2acae | 2016-06-03 10:55:15 +0800 | [diff] [blame] | 108 | static struct hnae_ae_dev *find_ae(const struct fwnode_handle *fwnode) |
huangdaode | 6fe6611 | 2015-09-17 14:51:48 +0800 | [diff] [blame] | 109 | { |
| 110 | struct device *dev; |
| 111 | |
Kejian Yan | 7b2acae | 2016-06-03 10:55:15 +0800 | [diff] [blame] | 112 | WARN_ON(!fwnode); |
huangdaode | 6fe6611 | 2015-09-17 14:51:48 +0800 | [diff] [blame] | 113 | |
Kejian Yan | 7b2acae | 2016-06-03 10:55:15 +0800 | [diff] [blame] | 114 | dev = class_find_device(hnae_class, NULL, fwnode, __ae_match); |
huangdaode | 6fe6611 | 2015-09-17 14:51:48 +0800 | [diff] [blame] | 115 | |
| 116 | return dev ? cls_to_ae_dev(dev) : NULL; |
| 117 | } |
| 118 | |
| 119 | static void hnae_free_buffers(struct hnae_ring *ring) |
| 120 | { |
| 121 | int i; |
| 122 | |
| 123 | for (i = 0; i < ring->desc_num; i++) |
| 124 | hnae_free_buffer_detach(ring, i); |
| 125 | } |
| 126 | |
| 127 | /* Allocate memory for raw pkg, and map with dma */ |
| 128 | static int hnae_alloc_buffers(struct hnae_ring *ring) |
| 129 | { |
| 130 | int i, j, ret; |
| 131 | |
| 132 | for (i = 0; i < ring->desc_num; i++) { |
| 133 | ret = hnae_alloc_buffer_attach(ring, i); |
| 134 | if (ret) |
| 135 | goto out_buffer_fail; |
| 136 | } |
| 137 | |
| 138 | return 0; |
| 139 | |
| 140 | out_buffer_fail: |
| 141 | for (j = i - 1; j >= 0; j--) |
| 142 | hnae_free_buffer_detach(ring, j); |
| 143 | return ret; |
| 144 | } |
| 145 | |
| 146 | /* free desc along with its attached buffer */ |
| 147 | static void hnae_free_desc(struct hnae_ring *ring) |
| 148 | { |
| 149 | hnae_free_buffers(ring); |
| 150 | dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr, |
| 151 | ring->desc_num * sizeof(ring->desc[0]), |
| 152 | ring_to_dma_dir(ring)); |
| 153 | ring->desc_dma_addr = 0; |
| 154 | kfree(ring->desc); |
| 155 | ring->desc = NULL; |
| 156 | } |
| 157 | |
| 158 | /* alloc desc, without buffer attached */ |
| 159 | static int hnae_alloc_desc(struct hnae_ring *ring) |
| 160 | { |
| 161 | int size = ring->desc_num * sizeof(ring->desc[0]); |
| 162 | |
| 163 | ring->desc = kzalloc(size, GFP_KERNEL); |
| 164 | if (!ring->desc) |
| 165 | return -ENOMEM; |
| 166 | |
| 167 | ring->desc_dma_addr = dma_map_single(ring_to_dev(ring), |
| 168 | ring->desc, size, ring_to_dma_dir(ring)); |
| 169 | if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) { |
| 170 | ring->desc_dma_addr = 0; |
| 171 | kfree(ring->desc); |
| 172 | ring->desc = NULL; |
| 173 | return -ENOMEM; |
| 174 | } |
| 175 | |
| 176 | return 0; |
| 177 | } |
| 178 | |
| 179 | /* fini ring, also free the buffer for the ring */ |
| 180 | static void hnae_fini_ring(struct hnae_ring *ring) |
| 181 | { |
| 182 | hnae_free_desc(ring); |
| 183 | kfree(ring->desc_cb); |
| 184 | ring->desc_cb = NULL; |
| 185 | ring->next_to_clean = 0; |
| 186 | ring->next_to_use = 0; |
| 187 | } |
| 188 | |
| 189 | /* init ring, and with buffer for rx ring */ |
| 190 | static int |
| 191 | hnae_init_ring(struct hnae_queue *q, struct hnae_ring *ring, int flags) |
| 192 | { |
| 193 | int ret; |
| 194 | |
| 195 | if (ring->desc_num <= 0 || ring->buf_size <= 0) |
| 196 | return -EINVAL; |
| 197 | |
| 198 | ring->q = q; |
| 199 | ring->flags = flags; |
| 200 | assert(!ring->desc && !ring->desc_cb && !ring->desc_dma_addr); |
| 201 | |
| 202 | /* not matter for tx or rx ring, the ntc and ntc start from 0 */ |
| 203 | assert(ring->next_to_use == 0); |
| 204 | assert(ring->next_to_clean == 0); |
| 205 | |
| 206 | ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]), |
| 207 | GFP_KERNEL); |
| 208 | if (!ring->desc_cb) { |
| 209 | ret = -ENOMEM; |
| 210 | goto out; |
| 211 | } |
| 212 | |
| 213 | ret = hnae_alloc_desc(ring); |
| 214 | if (ret) |
| 215 | goto out_with_desc_cb; |
| 216 | |
| 217 | if (is_rx_ring(ring)) { |
| 218 | ret = hnae_alloc_buffers(ring); |
| 219 | if (ret) |
| 220 | goto out_with_desc; |
| 221 | } |
| 222 | |
| 223 | return 0; |
| 224 | |
| 225 | out_with_desc: |
| 226 | hnae_free_desc(ring); |
| 227 | out_with_desc_cb: |
| 228 | kfree(ring->desc_cb); |
| 229 | ring->desc_cb = NULL; |
| 230 | out: |
| 231 | return ret; |
| 232 | } |
| 233 | |
| 234 | static int hnae_init_queue(struct hnae_handle *h, struct hnae_queue *q, |
| 235 | struct hnae_ae_dev *dev) |
| 236 | { |
| 237 | int ret; |
| 238 | |
| 239 | q->dev = dev; |
| 240 | q->handle = h; |
| 241 | |
| 242 | ret = hnae_init_ring(q, &q->tx_ring, q->tx_ring.flags | RINGF_DIR); |
| 243 | if (ret) |
| 244 | goto out; |
| 245 | |
| 246 | ret = hnae_init_ring(q, &q->rx_ring, q->rx_ring.flags & ~RINGF_DIR); |
| 247 | if (ret) |
| 248 | goto out_with_tx_ring; |
| 249 | |
| 250 | if (dev->ops->init_queue) |
| 251 | dev->ops->init_queue(q); |
| 252 | |
| 253 | return 0; |
| 254 | |
| 255 | out_with_tx_ring: |
| 256 | hnae_fini_ring(&q->tx_ring); |
| 257 | out: |
| 258 | return ret; |
| 259 | } |
| 260 | |
| 261 | static void hnae_fini_queue(struct hnae_queue *q) |
| 262 | { |
| 263 | if (q->dev->ops->fini_queue) |
| 264 | q->dev->ops->fini_queue(q); |
| 265 | |
| 266 | hnae_fini_ring(&q->tx_ring); |
| 267 | hnae_fini_ring(&q->rx_ring); |
| 268 | } |
| 269 | |
| 270 | /** |
| 271 | * ae_chain - define ae chain head |
| 272 | */ |
| 273 | static RAW_NOTIFIER_HEAD(ae_chain); |
| 274 | |
| 275 | int hnae_register_notifier(struct notifier_block *nb) |
| 276 | { |
| 277 | return raw_notifier_chain_register(&ae_chain, nb); |
| 278 | } |
| 279 | EXPORT_SYMBOL(hnae_register_notifier); |
| 280 | |
| 281 | void hnae_unregister_notifier(struct notifier_block *nb) |
| 282 | { |
| 283 | if (raw_notifier_chain_unregister(&ae_chain, nb)) |
| 284 | dev_err(NULL, "notifier chain unregister fail\n"); |
| 285 | } |
| 286 | EXPORT_SYMBOL(hnae_unregister_notifier); |
| 287 | |
| 288 | int hnae_reinit_handle(struct hnae_handle *handle) |
| 289 | { |
| 290 | int i, j; |
| 291 | int ret; |
| 292 | |
| 293 | for (i = 0; i < handle->q_num; i++) /* free ring*/ |
| 294 | hnae_fini_queue(handle->qs[i]); |
| 295 | |
| 296 | if (handle->dev->ops->reset) |
| 297 | handle->dev->ops->reset(handle); |
| 298 | |
| 299 | for (i = 0; i < handle->q_num; i++) {/* reinit ring*/ |
| 300 | ret = hnae_init_queue(handle, handle->qs[i], handle->dev); |
| 301 | if (ret) |
| 302 | goto out_when_init_queue; |
| 303 | } |
| 304 | return 0; |
| 305 | out_when_init_queue: |
| 306 | for (j = i - 1; j >= 0; j--) |
| 307 | hnae_fini_queue(handle->qs[j]); |
| 308 | return ret; |
| 309 | } |
| 310 | EXPORT_SYMBOL(hnae_reinit_handle); |
| 311 | |
| 312 | /* hnae_get_handle - get a handle from the AE |
| 313 | * @owner_dev: the dev use this handle |
| 314 | * @ae_id: the id of the ae to be used |
| 315 | * @ae_opts: the options set for the handle |
| 316 | * @bops: the callbacks for buffer management |
| 317 | * |
| 318 | * return handle ptr or ERR_PTR |
| 319 | */ |
| 320 | struct hnae_handle *hnae_get_handle(struct device *owner_dev, |
Kejian Yan | 7b2acae | 2016-06-03 10:55:15 +0800 | [diff] [blame] | 321 | const struct fwnode_handle *fwnode, |
yankejian | 48189d6 | 2016-01-20 16:00:19 +0800 | [diff] [blame] | 322 | u32 port_id, |
huangdaode | 6fe6611 | 2015-09-17 14:51:48 +0800 | [diff] [blame] | 323 | struct hnae_buf_ops *bops) |
| 324 | { |
| 325 | struct hnae_ae_dev *dev; |
| 326 | struct hnae_handle *handle; |
| 327 | int i, j; |
| 328 | int ret; |
| 329 | |
Kejian Yan | 7b2acae | 2016-06-03 10:55:15 +0800 | [diff] [blame] | 330 | dev = find_ae(fwnode); |
huangdaode | 6fe6611 | 2015-09-17 14:51:48 +0800 | [diff] [blame] | 331 | if (!dev) |
| 332 | return ERR_PTR(-ENODEV); |
| 333 | |
| 334 | handle = dev->ops->get_handle(dev, port_id); |
Johan Hovold | 2271150 | 2016-11-03 18:40:22 +0100 | [diff] [blame] | 335 | if (IS_ERR(handle)) { |
| 336 | put_device(&dev->cls_dev); |
huangdaode | 6fe6611 | 2015-09-17 14:51:48 +0800 | [diff] [blame] | 337 | return handle; |
Johan Hovold | 2271150 | 2016-11-03 18:40:22 +0100 | [diff] [blame] | 338 | } |
huangdaode | 6fe6611 | 2015-09-17 14:51:48 +0800 | [diff] [blame] | 339 | |
| 340 | handle->dev = dev; |
| 341 | handle->owner_dev = owner_dev; |
| 342 | handle->bops = bops ? bops : &hnae_bops; |
| 343 | handle->eport_id = port_id; |
| 344 | |
| 345 | for (i = 0; i < handle->q_num; i++) { |
| 346 | ret = hnae_init_queue(handle, handle->qs[i], dev); |
| 347 | if (ret) |
| 348 | goto out_when_init_queue; |
| 349 | } |
| 350 | |
| 351 | __module_get(dev->owner); |
| 352 | |
| 353 | hnae_list_add(&dev->lock, &handle->node, &dev->handle_list); |
| 354 | |
| 355 | return handle; |
| 356 | |
| 357 | out_when_init_queue: |
| 358 | for (j = i - 1; j >= 0; j--) |
| 359 | hnae_fini_queue(handle->qs[j]); |
| 360 | |
Johan Hovold | 2271150 | 2016-11-03 18:40:22 +0100 | [diff] [blame] | 361 | put_device(&dev->cls_dev); |
| 362 | |
huangdaode | 6fe6611 | 2015-09-17 14:51:48 +0800 | [diff] [blame] | 363 | return ERR_PTR(-ENOMEM); |
| 364 | } |
| 365 | EXPORT_SYMBOL(hnae_get_handle); |
| 366 | |
| 367 | void hnae_put_handle(struct hnae_handle *h) |
| 368 | { |
| 369 | struct hnae_ae_dev *dev = h->dev; |
| 370 | int i; |
| 371 | |
| 372 | for (i = 0; i < h->q_num; i++) |
| 373 | hnae_fini_queue(h->qs[i]); |
| 374 | |
| 375 | if (h->dev->ops->reset) |
| 376 | h->dev->ops->reset(h); |
| 377 | |
| 378 | hnae_list_del(&dev->lock, &h->node); |
| 379 | |
| 380 | if (dev->ops->put_handle) |
| 381 | dev->ops->put_handle(h); |
| 382 | |
| 383 | module_put(dev->owner); |
Johan Hovold | 2271150 | 2016-11-03 18:40:22 +0100 | [diff] [blame] | 384 | |
| 385 | put_device(&dev->cls_dev); |
huangdaode | 6fe6611 | 2015-09-17 14:51:48 +0800 | [diff] [blame] | 386 | } |
| 387 | EXPORT_SYMBOL(hnae_put_handle); |
| 388 | |
| 389 | static void hnae_release(struct device *dev) |
| 390 | { |
| 391 | } |
| 392 | |
| 393 | /** |
| 394 | * hnae_ae_register - register a AE engine to hnae framework |
| 395 | * @hdev: the hnae ae engine device |
| 396 | * @owner: the module who provides this dev |
| 397 | * NOTE: the duplicated name will not be checked |
| 398 | */ |
| 399 | int hnae_ae_register(struct hnae_ae_dev *hdev, struct module *owner) |
| 400 | { |
| 401 | static atomic_t id = ATOMIC_INIT(-1); |
| 402 | int ret; |
| 403 | |
| 404 | if (!hdev->dev) |
| 405 | return -ENODEV; |
| 406 | |
| 407 | if (!hdev->ops || !hdev->ops->get_handle || |
| 408 | !hdev->ops->toggle_ring_irq || |
huangdaode | 6fe6611 | 2015-09-17 14:51:48 +0800 | [diff] [blame] | 409 | !hdev->ops->get_status || !hdev->ops->adjust_link) |
| 410 | return -EINVAL; |
| 411 | |
| 412 | hdev->owner = owner; |
| 413 | hdev->id = (int)atomic_inc_return(&id); |
| 414 | hdev->cls_dev.parent = hdev->dev; |
| 415 | hdev->cls_dev.class = hnae_class; |
| 416 | hdev->cls_dev.release = hnae_release; |
| 417 | (void)dev_set_name(&hdev->cls_dev, "hnae%d", hdev->id); |
| 418 | ret = device_register(&hdev->cls_dev); |
| 419 | if (ret) |
| 420 | return ret; |
| 421 | |
| 422 | __module_get(THIS_MODULE); |
| 423 | |
| 424 | INIT_LIST_HEAD(&hdev->handle_list); |
| 425 | spin_lock_init(&hdev->lock); |
| 426 | |
| 427 | ret = raw_notifier_call_chain(&ae_chain, HNAE_AE_REGISTER, NULL); |
| 428 | if (ret) |
| 429 | dev_dbg(hdev->dev, |
| 430 | "has not notifier for AE: %s\n", hdev->name); |
| 431 | |
| 432 | return 0; |
| 433 | } |
| 434 | EXPORT_SYMBOL(hnae_ae_register); |
| 435 | |
| 436 | /** |
| 437 | * hnae_ae_unregister - unregisters a HNAE AE engine |
| 438 | * @cdev: the device to unregister |
| 439 | */ |
| 440 | void hnae_ae_unregister(struct hnae_ae_dev *hdev) |
| 441 | { |
| 442 | device_unregister(&hdev->cls_dev); |
| 443 | module_put(THIS_MODULE); |
| 444 | } |
| 445 | EXPORT_SYMBOL(hnae_ae_unregister); |
| 446 | |
huangdaode | 6fe6611 | 2015-09-17 14:51:48 +0800 | [diff] [blame] | 447 | static int __init hnae_init(void) |
| 448 | { |
| 449 | hnae_class = class_create(THIS_MODULE, "hnae"); |
Wu Fengguang | c6aa74d | 2015-10-20 14:56:00 +0800 | [diff] [blame] | 450 | return PTR_ERR_OR_ZERO(hnae_class); |
huangdaode | 6fe6611 | 2015-09-17 14:51:48 +0800 | [diff] [blame] | 451 | } |
| 452 | |
| 453 | static void __exit hnae_exit(void) |
| 454 | { |
| 455 | class_destroy(hnae_class); |
| 456 | } |
| 457 | |
| 458 | subsys_initcall(hnae_init); |
| 459 | module_exit(hnae_exit); |
| 460 | |
| 461 | MODULE_AUTHOR("Hisilicon, Inc."); |
| 462 | MODULE_LICENSE("GPL"); |
| 463 | MODULE_DESCRIPTION("Hisilicon Network Acceleration Engine Framework"); |
| 464 | |
| 465 | /* vi: set tw=78 noet: */ |