blob: ae61b443f412a807883ba88e624620b2b6cdff72 [file] [log] [blame]
Jon Masonfce8a7b2012-11-16 19:27:12 -07001/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2012 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * BSD LICENSE
14 *
15 * Copyright(c) 2012 Intel Corporation. All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
20 *
21 * * Redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer.
23 * * Redistributions in binary form must reproduce the above copy
24 * notice, this list of conditions and the following disclaimer in
25 * the documentation and/or other materials provided with the
26 * distribution.
27 * * Neither the name of Intel Corporation nor the names of its
28 * contributors may be used to endorse or promote products derived
29 * from this software without specific prior written permission.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
37 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
39 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
41 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 *
43 * Intel PCIe NTB Linux driver
44 *
45 * Contact Information:
46 * Jon Mason <jon.mason@intel.com>
47 */
48#include <linux/debugfs.h>
49#include <linux/delay.h>
Jon Mason282a2fe2013-02-12 09:52:50 -070050#include <linux/dmaengine.h>
Jon Masonfce8a7b2012-11-16 19:27:12 -070051#include <linux/dma-mapping.h>
52#include <linux/errno.h>
53#include <linux/export.h>
54#include <linux/interrupt.h>
55#include <linux/module.h>
56#include <linux/pci.h>
57#include <linux/slab.h>
58#include <linux/types.h>
59#include <linux/ntb.h>
60#include "ntb_hw.h"
61
Jon Mason113fc502013-01-30 11:40:52 -070062#define NTB_TRANSPORT_VERSION 3
Jon Masonfce8a7b2012-11-16 19:27:12 -070063
Jon Masonef114ed2013-01-19 02:02:18 -070064static unsigned int transport_mtu = 0x401E;
Jon Masonfce8a7b2012-11-16 19:27:12 -070065module_param(transport_mtu, uint, 0644);
66MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets");
67
Jon Mason948d3a62013-04-18 17:07:36 -070068static unsigned char max_num_clients;
Jon Masonfce8a7b2012-11-16 19:27:12 -070069module_param(max_num_clients, byte, 0644);
70MODULE_PARM_DESC(max_num_clients, "Maximum number of NTB transport clients");
71
Jon Mason282a2fe2013-02-12 09:52:50 -070072static unsigned int copy_bytes = 1024;
73module_param(copy_bytes, uint, 0644);
74MODULE_PARM_DESC(copy_bytes, "Threshold under which NTB will use the CPU to copy instead of DMA");
75
Jon Masonfce8a7b2012-11-16 19:27:12 -070076struct ntb_queue_entry {
77 /* ntb_queue list reference */
78 struct list_head entry;
79 /* pointers to data to be transfered */
80 void *cb_data;
81 void *buf;
82 unsigned int len;
83 unsigned int flags;
Jon Mason282a2fe2013-02-12 09:52:50 -070084
85 struct ntb_transport_qp *qp;
86 union {
87 struct ntb_payload_header __iomem *tx_hdr;
88 struct ntb_payload_header *rx_hdr;
89 };
90 unsigned int index;
Jon Masonfce8a7b2012-11-16 19:27:12 -070091};
92
Jon Mason793c20e2013-01-19 02:02:26 -070093struct ntb_rx_info {
94 unsigned int entry;
95};
96
Jon Masonfce8a7b2012-11-16 19:27:12 -070097struct ntb_transport_qp {
98 struct ntb_transport *transport;
99 struct ntb_device *ndev;
100 void *cb_data;
Jon Mason282a2fe2013-02-12 09:52:50 -0700101 struct dma_chan *dma_chan;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700102
103 bool client_ready;
104 bool qp_link;
105 u8 qp_num; /* Only 64 QP's are allowed. 0-63 */
106
Jon Mason74465642013-01-21 15:28:52 -0700107 struct ntb_rx_info __iomem *rx_info;
Jon Mason793c20e2013-01-19 02:02:26 -0700108 struct ntb_rx_info *remote_rx_info;
109
Jon Masonfce8a7b2012-11-16 19:27:12 -0700110 void (*tx_handler) (struct ntb_transport_qp *qp, void *qp_data,
111 void *data, int len);
112 struct list_head tx_free_q;
113 spinlock_t ntb_tx_free_q_lock;
Jon Mason74465642013-01-21 15:28:52 -0700114 void __iomem *tx_mw;
Jon Mason282a2fe2013-02-12 09:52:50 -0700115 dma_addr_t tx_mw_phys;
Jon Mason793c20e2013-01-19 02:02:26 -0700116 unsigned int tx_index;
117 unsigned int tx_max_entry;
Jon Masonef114ed2013-01-19 02:02:18 -0700118 unsigned int tx_max_frame;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700119
120 void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data,
121 void *data, int len);
122 struct tasklet_struct rx_work;
123 struct list_head rx_pend_q;
124 struct list_head rx_free_q;
125 spinlock_t ntb_rx_pend_q_lock;
126 spinlock_t ntb_rx_free_q_lock;
Jon Mason793c20e2013-01-19 02:02:26 -0700127 void *rx_buff;
128 unsigned int rx_index;
129 unsigned int rx_max_entry;
Jon Masonef114ed2013-01-19 02:02:18 -0700130 unsigned int rx_max_frame;
Jon Mason282a2fe2013-02-12 09:52:50 -0700131 dma_cookie_t last_cookie;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700132
133 void (*event_handler) (void *data, int status);
134 struct delayed_work link_work;
Jon Mason7b4f2d32013-01-19 02:02:19 -0700135 struct work_struct link_cleanup;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700136
137 struct dentry *debugfs_dir;
138 struct dentry *debugfs_stats;
139
140 /* Stats */
141 u64 rx_bytes;
142 u64 rx_pkts;
143 u64 rx_ring_empty;
144 u64 rx_err_no_buf;
145 u64 rx_err_oflow;
146 u64 rx_err_ver;
Jon Mason282a2fe2013-02-12 09:52:50 -0700147 u64 rx_memcpy;
148 u64 rx_async;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700149 u64 tx_bytes;
150 u64 tx_pkts;
151 u64 tx_ring_full;
Jon Mason282a2fe2013-02-12 09:52:50 -0700152 u64 tx_err_no_buf;
153 u64 tx_memcpy;
154 u64 tx_async;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700155};
156
157struct ntb_transport_mw {
158 size_t size;
159 void *virt_addr;
160 dma_addr_t dma_addr;
161};
162
163struct ntb_transport_client_dev {
164 struct list_head entry;
165 struct device dev;
166};
167
168struct ntb_transport {
169 struct list_head entry;
170 struct list_head client_devs;
171
172 struct ntb_device *ndev;
Jon Mason948d3a62013-04-18 17:07:36 -0700173 struct ntb_transport_mw *mw;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700174 struct ntb_transport_qp *qps;
175 unsigned int max_qps;
176 unsigned long qp_bitmap;
177 bool transport_link;
178 struct delayed_work link_work;
Jon Mason7b4f2d32013-01-19 02:02:19 -0700179 struct work_struct link_cleanup;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700180};
181
182enum {
183 DESC_DONE_FLAG = 1 << 0,
184 LINK_DOWN_FLAG = 1 << 1,
185};
186
187struct ntb_payload_header {
Jon Mason74465642013-01-21 15:28:52 -0700188 unsigned int ver;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700189 unsigned int len;
190 unsigned int flags;
191};
192
193enum {
194 VERSION = 0,
Jon Masonfce8a7b2012-11-16 19:27:12 -0700195 QP_LINKS,
Jon Mason113fc502013-01-30 11:40:52 -0700196 NUM_QPS,
197 NUM_MWS,
198 MW0_SZ_HIGH,
199 MW0_SZ_LOW,
200 MW1_SZ_HIGH,
201 MW1_SZ_LOW,
Jon Masonfce8a7b2012-11-16 19:27:12 -0700202 MAX_SPAD,
203};
204
Jon Mason948d3a62013-04-18 17:07:36 -0700205#define QP_TO_MW(ndev, qp) ((qp) % ntb_max_mw(ndev))
Jon Masonfce8a7b2012-11-16 19:27:12 -0700206#define NTB_QP_DEF_NUM_ENTRIES 100
207#define NTB_LINK_DOWN_TIMEOUT 10
208
209static int ntb_match_bus(struct device *dev, struct device_driver *drv)
210{
211 return !strncmp(dev_name(dev), drv->name, strlen(drv->name));
212}
213
214static int ntb_client_probe(struct device *dev)
215{
216 const struct ntb_client *drv = container_of(dev->driver,
217 struct ntb_client, driver);
218 struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
219 int rc = -EINVAL;
220
221 get_device(dev);
222 if (drv && drv->probe)
223 rc = drv->probe(pdev);
224 if (rc)
225 put_device(dev);
226
227 return rc;
228}
229
230static int ntb_client_remove(struct device *dev)
231{
232 const struct ntb_client *drv = container_of(dev->driver,
233 struct ntb_client, driver);
234 struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
235
236 if (drv && drv->remove)
237 drv->remove(pdev);
238
239 put_device(dev);
240
241 return 0;
242}
243
Jon Mason170d35a2013-01-19 02:02:23 -0700244static struct bus_type ntb_bus_type = {
Jon Masonfce8a7b2012-11-16 19:27:12 -0700245 .name = "ntb_bus",
246 .match = ntb_match_bus,
247 .probe = ntb_client_probe,
248 .remove = ntb_client_remove,
249};
250
251static LIST_HEAD(ntb_transport_list);
252
Greg Kroah-Hartman78a61ab2013-01-17 19:17:42 -0800253static int ntb_bus_init(struct ntb_transport *nt)
Jon Masonfce8a7b2012-11-16 19:27:12 -0700254{
255 if (list_empty(&ntb_transport_list)) {
256 int rc = bus_register(&ntb_bus_type);
257 if (rc)
258 return rc;
259 }
260
261 list_add(&nt->entry, &ntb_transport_list);
262
263 return 0;
264}
265
Greg Kroah-Hartman78a61ab2013-01-17 19:17:42 -0800266static void ntb_bus_remove(struct ntb_transport *nt)
Jon Masonfce8a7b2012-11-16 19:27:12 -0700267{
268 struct ntb_transport_client_dev *client_dev, *cd;
269
270 list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) {
271 dev_err(client_dev->dev.parent, "%s still attached to bus, removing\n",
272 dev_name(&client_dev->dev));
273 list_del(&client_dev->entry);
274 device_unregister(&client_dev->dev);
275 }
276
277 list_del(&nt->entry);
278
279 if (list_empty(&ntb_transport_list))
280 bus_unregister(&ntb_bus_type);
281}
282
283static void ntb_client_release(struct device *dev)
284{
285 struct ntb_transport_client_dev *client_dev;
286 client_dev = container_of(dev, struct ntb_transport_client_dev, dev);
287
288 kfree(client_dev);
289}
290
291/**
292 * ntb_unregister_client_dev - Unregister NTB client device
293 * @device_name: Name of NTB client device
294 *
295 * Unregister an NTB client device with the NTB transport layer
296 */
297void ntb_unregister_client_dev(char *device_name)
298{
299 struct ntb_transport_client_dev *client, *cd;
300 struct ntb_transport *nt;
301
302 list_for_each_entry(nt, &ntb_transport_list, entry)
303 list_for_each_entry_safe(client, cd, &nt->client_devs, entry)
304 if (!strncmp(dev_name(&client->dev), device_name,
305 strlen(device_name))) {
306 list_del(&client->entry);
307 device_unregister(&client->dev);
308 }
309}
310EXPORT_SYMBOL_GPL(ntb_unregister_client_dev);
311
312/**
313 * ntb_register_client_dev - Register NTB client device
314 * @device_name: Name of NTB client device
315 *
316 * Register an NTB client device with the NTB transport layer
317 */
318int ntb_register_client_dev(char *device_name)
319{
320 struct ntb_transport_client_dev *client_dev;
321 struct ntb_transport *nt;
Jon Mason8b19d452013-04-26 14:51:57 -0700322 int rc, i = 0;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700323
Jon Mason8222b402013-01-19 02:02:16 -0700324 if (list_empty(&ntb_transport_list))
325 return -ENODEV;
326
Jon Masonfce8a7b2012-11-16 19:27:12 -0700327 list_for_each_entry(nt, &ntb_transport_list, entry) {
328 struct device *dev;
329
330 client_dev = kzalloc(sizeof(struct ntb_transport_client_dev),
331 GFP_KERNEL);
332 if (!client_dev) {
333 rc = -ENOMEM;
334 goto err;
335 }
336
337 dev = &client_dev->dev;
338
339 /* setup and register client devices */
Jon Mason8b19d452013-04-26 14:51:57 -0700340 dev_set_name(dev, "%s%d", device_name, i);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700341 dev->bus = &ntb_bus_type;
342 dev->release = ntb_client_release;
343 dev->parent = &ntb_query_pdev(nt->ndev)->dev;
344
345 rc = device_register(dev);
346 if (rc) {
347 kfree(client_dev);
348 goto err;
349 }
350
351 list_add_tail(&client_dev->entry, &nt->client_devs);
Jon Mason8b19d452013-04-26 14:51:57 -0700352 i++;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700353 }
354
355 return 0;
356
357err:
358 ntb_unregister_client_dev(device_name);
359
360 return rc;
361}
362EXPORT_SYMBOL_GPL(ntb_register_client_dev);
363
364/**
365 * ntb_register_client - Register NTB client driver
366 * @drv: NTB client driver to be registered
367 *
368 * Register an NTB client driver with the NTB transport layer
369 *
370 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
371 */
372int ntb_register_client(struct ntb_client *drv)
373{
374 drv->driver.bus = &ntb_bus_type;
375
Jon Mason8222b402013-01-19 02:02:16 -0700376 if (list_empty(&ntb_transport_list))
377 return -ENODEV;
378
Jon Masonfce8a7b2012-11-16 19:27:12 -0700379 return driver_register(&drv->driver);
380}
381EXPORT_SYMBOL_GPL(ntb_register_client);
382
383/**
384 * ntb_unregister_client - Unregister NTB client driver
385 * @drv: NTB client driver to be unregistered
386 *
387 * Unregister an NTB client driver with the NTB transport layer
388 *
389 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
390 */
391void ntb_unregister_client(struct ntb_client *drv)
392{
393 driver_unregister(&drv->driver);
394}
395EXPORT_SYMBOL_GPL(ntb_unregister_client);
396
Jon Masonfce8a7b2012-11-16 19:27:12 -0700397static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
398 loff_t *offp)
399{
400 struct ntb_transport_qp *qp;
Jon Masond7237e22013-01-19 02:02:25 -0700401 char *buf;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700402 ssize_t ret, out_offset, out_count;
403
Jon Mason282a2fe2013-02-12 09:52:50 -0700404 out_count = 1000;
Jon Masond7237e22013-01-19 02:02:25 -0700405
406 buf = kmalloc(out_count, GFP_KERNEL);
407 if (!buf)
408 return -ENOMEM;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700409
410 qp = filp->private_data;
411 out_offset = 0;
412 out_offset += snprintf(buf + out_offset, out_count - out_offset,
413 "NTB QP stats\n");
414 out_offset += snprintf(buf + out_offset, out_count - out_offset,
415 "rx_bytes - \t%llu\n", qp->rx_bytes);
416 out_offset += snprintf(buf + out_offset, out_count - out_offset,
417 "rx_pkts - \t%llu\n", qp->rx_pkts);
418 out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Mason282a2fe2013-02-12 09:52:50 -0700419 "rx_memcpy - \t%llu\n", qp->rx_memcpy);
420 out_offset += snprintf(buf + out_offset, out_count - out_offset,
421 "rx_async - \t%llu\n", qp->rx_async);
422 out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Masonfce8a7b2012-11-16 19:27:12 -0700423 "rx_ring_empty - %llu\n", qp->rx_ring_empty);
424 out_offset += snprintf(buf + out_offset, out_count - out_offset,
425 "rx_err_no_buf - %llu\n", qp->rx_err_no_buf);
426 out_offset += snprintf(buf + out_offset, out_count - out_offset,
427 "rx_err_oflow - \t%llu\n", qp->rx_err_oflow);
428 out_offset += snprintf(buf + out_offset, out_count - out_offset,
429 "rx_err_ver - \t%llu\n", qp->rx_err_ver);
430 out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Mason793c20e2013-01-19 02:02:26 -0700431 "rx_buff - \t%p\n", qp->rx_buff);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700432 out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Mason793c20e2013-01-19 02:02:26 -0700433 "rx_index - \t%u\n", qp->rx_index);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700434 out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Mason793c20e2013-01-19 02:02:26 -0700435 "rx_max_entry - \t%u\n", qp->rx_max_entry);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700436
437 out_offset += snprintf(buf + out_offset, out_count - out_offset,
438 "tx_bytes - \t%llu\n", qp->tx_bytes);
439 out_offset += snprintf(buf + out_offset, out_count - out_offset,
440 "tx_pkts - \t%llu\n", qp->tx_pkts);
441 out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Mason282a2fe2013-02-12 09:52:50 -0700442 "tx_memcpy - \t%llu\n", qp->tx_memcpy);
443 out_offset += snprintf(buf + out_offset, out_count - out_offset,
444 "tx_async - \t%llu\n", qp->tx_async);
445 out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Masonfce8a7b2012-11-16 19:27:12 -0700446 "tx_ring_full - \t%llu\n", qp->tx_ring_full);
447 out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Mason282a2fe2013-02-12 09:52:50 -0700448 "tx_err_no_buf - %llu\n", qp->tx_err_no_buf);
449 out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Mason793c20e2013-01-19 02:02:26 -0700450 "tx_mw - \t%p\n", qp->tx_mw);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700451 out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Mason793c20e2013-01-19 02:02:26 -0700452 "tx_index - \t%u\n", qp->tx_index);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700453 out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Mason793c20e2013-01-19 02:02:26 -0700454 "tx_max_entry - \t%u\n", qp->tx_max_entry);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700455
456 out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Masond7237e22013-01-19 02:02:25 -0700457 "\nQP Link %s\n", (qp->qp_link == NTB_LINK_UP) ?
Jon Masonfce8a7b2012-11-16 19:27:12 -0700458 "Up" : "Down");
Jon Masond7237e22013-01-19 02:02:25 -0700459 if (out_offset > out_count)
460 out_offset = out_count;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700461
462 ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
Jon Masond7237e22013-01-19 02:02:25 -0700463 kfree(buf);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700464 return ret;
465}
466
467static const struct file_operations ntb_qp_debugfs_stats = {
468 .owner = THIS_MODULE,
Jon Masond66d7ac2013-01-19 02:02:20 -0700469 .open = simple_open,
Jon Masonfce8a7b2012-11-16 19:27:12 -0700470 .read = debugfs_read,
471};
472
473static void ntb_list_add(spinlock_t *lock, struct list_head *entry,
474 struct list_head *list)
475{
476 unsigned long flags;
477
478 spin_lock_irqsave(lock, flags);
479 list_add_tail(entry, list);
480 spin_unlock_irqrestore(lock, flags);
481}
482
483static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock,
484 struct list_head *list)
485{
486 struct ntb_queue_entry *entry;
487 unsigned long flags;
488
489 spin_lock_irqsave(lock, flags);
490 if (list_empty(list)) {
491 entry = NULL;
492 goto out;
493 }
494 entry = list_first_entry(list, struct ntb_queue_entry, entry);
495 list_del(&entry->entry);
496out:
497 spin_unlock_irqrestore(lock, flags);
498
499 return entry;
500}
501
502static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
503 unsigned int qp_num)
504{
505 struct ntb_transport_qp *qp = &nt->qps[qp_num];
Jon Masonef114ed2013-01-19 02:02:18 -0700506 unsigned int rx_size, num_qps_mw;
Jon Mason948d3a62013-04-18 17:07:36 -0700507 u8 mw_num, mw_max;
Jon Mason793c20e2013-01-19 02:02:26 -0700508 unsigned int i;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700509
Jon Mason948d3a62013-04-18 17:07:36 -0700510 mw_max = ntb_max_mw(nt->ndev);
511 mw_num = QP_TO_MW(nt->ndev, qp_num);
512
Jon Mason74465642013-01-21 15:28:52 -0700513 WARN_ON(nt->mw[mw_num].virt_addr == NULL);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700514
Jon Mason948d3a62013-04-18 17:07:36 -0700515 if (nt->max_qps % mw_max && mw_num < nt->max_qps % mw_max)
516 num_qps_mw = nt->max_qps / mw_max + 1;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700517 else
Jon Mason948d3a62013-04-18 17:07:36 -0700518 num_qps_mw = nt->max_qps / mw_max;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700519
Jon Mason793c20e2013-01-19 02:02:26 -0700520 rx_size = (unsigned int) nt->mw[mw_num].size / num_qps_mw;
Jon Mason282a2fe2013-02-12 09:52:50 -0700521 qp->rx_buff = nt->mw[mw_num].virt_addr + qp_num / mw_max * rx_size;
Jon Mason793c20e2013-01-19 02:02:26 -0700522 rx_size -= sizeof(struct ntb_rx_info);
523
Jon Mason282a2fe2013-02-12 09:52:50 -0700524 qp->remote_rx_info = qp->rx_buff + rx_size;
525
Jon Masonc9d534c2013-02-01 15:45:16 -0700526 /* Due to housekeeping, there must be atleast 2 buffs */
527 qp->rx_max_frame = min(transport_mtu, rx_size / 2);
Jon Mason793c20e2013-01-19 02:02:26 -0700528 qp->rx_max_entry = rx_size / qp->rx_max_frame;
529 qp->rx_index = 0;
530
Jon Masonc9d534c2013-02-01 15:45:16 -0700531 qp->remote_rx_info->entry = qp->rx_max_entry - 1;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700532
Jon Masonef114ed2013-01-19 02:02:18 -0700533 /* setup the hdr offsets with 0's */
Jon Mason793c20e2013-01-19 02:02:26 -0700534 for (i = 0; i < qp->rx_max_entry; i++) {
535 void *offset = qp->rx_buff + qp->rx_max_frame * (i + 1) -
536 sizeof(struct ntb_payload_header);
Jon Masonef114ed2013-01-19 02:02:18 -0700537 memset(offset, 0, sizeof(struct ntb_payload_header));
Jon Mason793c20e2013-01-19 02:02:26 -0700538 }
Jon Masonfce8a7b2012-11-16 19:27:12 -0700539
540 qp->rx_pkts = 0;
541 qp->tx_pkts = 0;
Jon Mason90f9e932013-02-01 15:34:35 -0700542 qp->tx_index = 0;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700543}
544
Jon Mason113fc502013-01-30 11:40:52 -0700545static void ntb_free_mw(struct ntb_transport *nt, int num_mw)
546{
547 struct ntb_transport_mw *mw = &nt->mw[num_mw];
548 struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
549
550 if (!mw->virt_addr)
551 return;
552
553 dma_free_coherent(&pdev->dev, mw->size, mw->virt_addr, mw->dma_addr);
554 mw->virt_addr = NULL;
555}
556
Jon Masonb77b2632013-02-01 15:25:37 -0700557static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
558{
559 struct ntb_transport_mw *mw = &nt->mw[num_mw];
560 struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
561
562 /* No need to re-setup */
563 if (mw->size == ALIGN(size, 4096))
564 return 0;
565
566 if (mw->size != 0)
567 ntb_free_mw(nt, num_mw);
568
569 /* Alloc memory for receiving data. Must be 4k aligned */
570 mw->size = ALIGN(size, 4096);
571
572 mw->virt_addr = dma_alloc_coherent(&pdev->dev, mw->size, &mw->dma_addr,
573 GFP_KERNEL);
574 if (!mw->virt_addr) {
575 mw->size = 0;
576 dev_err(&pdev->dev, "Unable to allocate MW buffer of size %d\n",
577 (int) mw->size);
578 return -ENOMEM;
579 }
580
581 /* Notify HW the memory location of the receive buffer */
582 ntb_set_mw_addr(nt->ndev, num_mw, mw->dma_addr);
583
584 return 0;
585}
586
Jon Mason7b4f2d32013-01-19 02:02:19 -0700587static void ntb_qp_link_cleanup(struct work_struct *work)
Jon Masonfce8a7b2012-11-16 19:27:12 -0700588{
Jon Mason7b4f2d32013-01-19 02:02:19 -0700589 struct ntb_transport_qp *qp = container_of(work,
590 struct ntb_transport_qp,
591 link_cleanup);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700592 struct ntb_transport *nt = qp->transport;
593 struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
594
595 if (qp->qp_link == NTB_LINK_DOWN) {
596 cancel_delayed_work_sync(&qp->link_work);
597 return;
598 }
599
600 if (qp->event_handler)
601 qp->event_handler(qp->cb_data, NTB_LINK_DOWN);
602
603 dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
604 qp->qp_link = NTB_LINK_DOWN;
605
606 if (nt->transport_link == NTB_LINK_UP)
607 schedule_delayed_work(&qp->link_work,
608 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
609}
610
Jon Mason7b4f2d32013-01-19 02:02:19 -0700611static void ntb_qp_link_down(struct ntb_transport_qp *qp)
Jon Masonfce8a7b2012-11-16 19:27:12 -0700612{
Jon Mason7b4f2d32013-01-19 02:02:19 -0700613 schedule_work(&qp->link_cleanup);
614}
615
616static void ntb_transport_link_cleanup(struct work_struct *work)
617{
618 struct ntb_transport *nt = container_of(work, struct ntb_transport,
619 link_cleanup);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700620 int i;
621
622 if (nt->transport_link == NTB_LINK_DOWN)
623 cancel_delayed_work_sync(&nt->link_work);
624 else
625 nt->transport_link = NTB_LINK_DOWN;
626
627 /* Pass along the info to any clients */
628 for (i = 0; i < nt->max_qps; i++)
629 if (!test_bit(i, &nt->qp_bitmap))
630 ntb_qp_link_down(&nt->qps[i]);
631
632 /* The scratchpad registers keep the values if the remote side
633 * goes down, blast them now to give them a sane value the next
634 * time they are accessed
635 */
636 for (i = 0; i < MAX_SPAD; i++)
637 ntb_write_local_spad(nt->ndev, i, 0);
638}
639
640static void ntb_transport_event_callback(void *data, enum ntb_hw_event event)
641{
642 struct ntb_transport *nt = data;
643
644 switch (event) {
645 case NTB_EVENT_HW_LINK_UP:
646 schedule_delayed_work(&nt->link_work, 0);
647 break;
648 case NTB_EVENT_HW_LINK_DOWN:
Jon Mason7b4f2d32013-01-19 02:02:19 -0700649 schedule_work(&nt->link_cleanup);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700650 break;
651 default:
652 BUG();
653 }
654}
655
656static void ntb_transport_link_work(struct work_struct *work)
657{
658 struct ntb_transport *nt = container_of(work, struct ntb_transport,
659 link_work.work);
660 struct ntb_device *ndev = nt->ndev;
661 struct pci_dev *pdev = ntb_query_pdev(ndev);
662 u32 val;
663 int rc, i;
664
Jon Mason113fc502013-01-30 11:40:52 -0700665 /* send the local info, in the opposite order of the way we read it */
Jon Mason948d3a62013-04-18 17:07:36 -0700666 for (i = 0; i < ntb_max_mw(ndev); i++) {
Jon Mason113fc502013-01-30 11:40:52 -0700667 rc = ntb_write_remote_spad(ndev, MW0_SZ_HIGH + (i * 2),
668 ntb_get_mw_size(ndev, i) >> 32);
669 if (rc) {
670 dev_err(&pdev->dev, "Error writing %u to remote spad %d\n",
671 (u32)(ntb_get_mw_size(ndev, i) >> 32),
672 MW0_SZ_HIGH + (i * 2));
673 goto out;
674 }
675
676 rc = ntb_write_remote_spad(ndev, MW0_SZ_LOW + (i * 2),
677 (u32) ntb_get_mw_size(ndev, i));
678 if (rc) {
679 dev_err(&pdev->dev, "Error writing %u to remote spad %d\n",
680 (u32) ntb_get_mw_size(ndev, i),
681 MW0_SZ_LOW + (i * 2));
682 goto out;
683 }
Jon Masonfce8a7b2012-11-16 19:27:12 -0700684 }
685
Jon Mason948d3a62013-04-18 17:07:36 -0700686 rc = ntb_write_remote_spad(ndev, NUM_MWS, ntb_max_mw(ndev));
Jon Masonfce8a7b2012-11-16 19:27:12 -0700687 if (rc) {
688 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
Jon Mason948d3a62013-04-18 17:07:36 -0700689 ntb_max_mw(ndev), NUM_MWS);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700690 goto out;
691 }
692
693 rc = ntb_write_remote_spad(ndev, NUM_QPS, nt->max_qps);
694 if (rc) {
695 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
696 nt->max_qps, NUM_QPS);
697 goto out;
698 }
699
Jon Mason113fc502013-01-30 11:40:52 -0700700 rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700701 if (rc) {
702 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
Jon Mason113fc502013-01-30 11:40:52 -0700703 NTB_TRANSPORT_VERSION, VERSION);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700704 goto out;
705 }
706
707 /* Query the remote side for its info */
708 rc = ntb_read_remote_spad(ndev, VERSION, &val);
709 if (rc) {
710 dev_err(&pdev->dev, "Error reading remote spad %d\n", VERSION);
711 goto out;
712 }
713
714 if (val != NTB_TRANSPORT_VERSION)
715 goto out;
716 dev_dbg(&pdev->dev, "Remote version = %d\n", val);
717
718 rc = ntb_read_remote_spad(ndev, NUM_QPS, &val);
719 if (rc) {
720 dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_QPS);
721 goto out;
722 }
723
724 if (val != nt->max_qps)
725 goto out;
726 dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);
727
Jon Mason113fc502013-01-30 11:40:52 -0700728 rc = ntb_read_remote_spad(ndev, NUM_MWS, &val);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700729 if (rc) {
Jon Mason113fc502013-01-30 11:40:52 -0700730 dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_MWS);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700731 goto out;
732 }
733
Jon Mason948d3a62013-04-18 17:07:36 -0700734 if (val != ntb_max_mw(ndev))
Jon Masonfce8a7b2012-11-16 19:27:12 -0700735 goto out;
Jon Mason113fc502013-01-30 11:40:52 -0700736 dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700737
Jon Mason948d3a62013-04-18 17:07:36 -0700738 for (i = 0; i < ntb_max_mw(ndev); i++) {
Jon Mason113fc502013-01-30 11:40:52 -0700739 u64 val64;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700740
Jon Mason113fc502013-01-30 11:40:52 -0700741 rc = ntb_read_remote_spad(ndev, MW0_SZ_HIGH + (i * 2), &val);
742 if (rc) {
743 dev_err(&pdev->dev, "Error reading remote spad %d\n",
744 MW0_SZ_HIGH + (i * 2));
745 goto out1;
746 }
747
748 val64 = (u64) val << 32;
749
750 rc = ntb_read_remote_spad(ndev, MW0_SZ_LOW + (i * 2), &val);
751 if (rc) {
752 dev_err(&pdev->dev, "Error reading remote spad %d\n",
753 MW0_SZ_LOW + (i * 2));
754 goto out1;
755 }
756
757 val64 |= val;
758
759 dev_dbg(&pdev->dev, "Remote MW%d size = %llu\n", i, val64);
760
761 rc = ntb_set_mw(nt, i, val64);
762 if (rc)
763 goto out1;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700764 }
765
Jon Masonfce8a7b2012-11-16 19:27:12 -0700766 nt->transport_link = NTB_LINK_UP;
767
768 for (i = 0; i < nt->max_qps; i++) {
769 struct ntb_transport_qp *qp = &nt->qps[i];
770
771 ntb_transport_setup_qp_mw(nt, i);
772
773 if (qp->client_ready == NTB_LINK_UP)
774 schedule_delayed_work(&qp->link_work, 0);
775 }
776
777 return;
778
Jon Mason113fc502013-01-30 11:40:52 -0700779out1:
Jon Mason948d3a62013-04-18 17:07:36 -0700780 for (i = 0; i < ntb_max_mw(ndev); i++)
Jon Mason113fc502013-01-30 11:40:52 -0700781 ntb_free_mw(nt, i);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700782out:
783 if (ntb_hw_link_status(ndev))
784 schedule_delayed_work(&nt->link_work,
785 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
786}
787
788static void ntb_qp_link_work(struct work_struct *work)
789{
790 struct ntb_transport_qp *qp = container_of(work,
791 struct ntb_transport_qp,
792 link_work.work);
793 struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
794 struct ntb_transport *nt = qp->transport;
795 int rc, val;
796
797 WARN_ON(nt->transport_link != NTB_LINK_UP);
798
799 rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val);
800 if (rc) {
801 dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
802 return;
803 }
804
805 rc = ntb_write_remote_spad(nt->ndev, QP_LINKS, val | 1 << qp->qp_num);
806 if (rc)
807 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
808 val | 1 << qp->qp_num, QP_LINKS);
809
810 /* query remote spad for qp ready bits */
811 rc = ntb_read_remote_spad(nt->ndev, QP_LINKS, &val);
812 if (rc)
813 dev_err(&pdev->dev, "Error reading remote spad %d\n", QP_LINKS);
814
815 dev_dbg(&pdev->dev, "Remote QP link status = %x\n", val);
816
817 /* See if the remote side is up */
818 if (1 << qp->qp_num & val) {
819 qp->qp_link = NTB_LINK_UP;
820
821 dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num);
822 if (qp->event_handler)
823 qp->event_handler(qp->cb_data, NTB_LINK_UP);
824 } else if (nt->transport_link == NTB_LINK_UP)
825 schedule_delayed_work(&qp->link_work,
826 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
827}
828
Jon Mason282a2fe2013-02-12 09:52:50 -0700829static int ntb_transport_init_queue(struct ntb_transport *nt,
Jon Masonfce8a7b2012-11-16 19:27:12 -0700830 unsigned int qp_num)
831{
832 struct ntb_transport_qp *qp;
Jon Masonef114ed2013-01-19 02:02:18 -0700833 unsigned int num_qps_mw, tx_size;
Jon Mason948d3a62013-04-18 17:07:36 -0700834 u8 mw_num, mw_max;
Jon Mason282a2fe2013-02-12 09:52:50 -0700835 u64 qp_offset;
Jon Mason948d3a62013-04-18 17:07:36 -0700836
837 mw_max = ntb_max_mw(nt->ndev);
838 mw_num = QP_TO_MW(nt->ndev, qp_num);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700839
840 qp = &nt->qps[qp_num];
841 qp->qp_num = qp_num;
842 qp->transport = nt;
843 qp->ndev = nt->ndev;
844 qp->qp_link = NTB_LINK_DOWN;
845 qp->client_ready = NTB_LINK_DOWN;
846 qp->event_handler = NULL;
847
Jon Mason948d3a62013-04-18 17:07:36 -0700848 if (nt->max_qps % mw_max && mw_num < nt->max_qps % mw_max)
849 num_qps_mw = nt->max_qps / mw_max + 1;
Jon Masonef114ed2013-01-19 02:02:18 -0700850 else
Jon Mason948d3a62013-04-18 17:07:36 -0700851 num_qps_mw = nt->max_qps / mw_max;
Jon Masonef114ed2013-01-19 02:02:18 -0700852
Jon Mason793c20e2013-01-19 02:02:26 -0700853 tx_size = (unsigned int) ntb_get_mw_size(qp->ndev, mw_num) / num_qps_mw;
Jon Mason282a2fe2013-02-12 09:52:50 -0700854 qp_offset = qp_num / mw_max * tx_size;
855 qp->tx_mw = ntb_get_mw_vbase(nt->ndev, mw_num) + qp_offset;
856 if (!qp->tx_mw)
857 return -EINVAL;
Jon Mason793c20e2013-01-19 02:02:26 -0700858
Jon Mason282a2fe2013-02-12 09:52:50 -0700859 qp->tx_mw_phys = ntb_get_mw_base(qp->ndev, mw_num) + qp_offset;
860 if (!qp->tx_mw_phys)
861 return -EINVAL;
862
863 tx_size -= sizeof(struct ntb_rx_info);
864 qp->rx_info = qp->tx_mw + tx_size;
865
Jon Masonc9d534c2013-02-01 15:45:16 -0700866 /* Due to housekeeping, there must be atleast 2 buffs */
867 qp->tx_max_frame = min(transport_mtu, tx_size / 2);
Jon Mason793c20e2013-01-19 02:02:26 -0700868 qp->tx_max_entry = tx_size / qp->tx_max_frame;
Jon Masonef114ed2013-01-19 02:02:18 -0700869
Jon Mason1517a3f2013-07-30 15:58:49 -0700870 if (ntb_query_debugfs(nt->ndev)) {
Jon Masonfce8a7b2012-11-16 19:27:12 -0700871 char debugfs_name[4];
872
873 snprintf(debugfs_name, 4, "qp%d", qp_num);
874 qp->debugfs_dir = debugfs_create_dir(debugfs_name,
Jon Mason1517a3f2013-07-30 15:58:49 -0700875 ntb_query_debugfs(nt->ndev));
Jon Masonfce8a7b2012-11-16 19:27:12 -0700876
877 qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
878 qp->debugfs_dir, qp,
879 &ntb_qp_debugfs_stats);
880 }
881
882 INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
Jon Mason7b4f2d32013-01-19 02:02:19 -0700883 INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700884
885 spin_lock_init(&qp->ntb_rx_pend_q_lock);
886 spin_lock_init(&qp->ntb_rx_free_q_lock);
887 spin_lock_init(&qp->ntb_tx_free_q_lock);
888
889 INIT_LIST_HEAD(&qp->rx_pend_q);
890 INIT_LIST_HEAD(&qp->rx_free_q);
891 INIT_LIST_HEAD(&qp->tx_free_q);
Jon Mason282a2fe2013-02-12 09:52:50 -0700892
893 return 0;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700894}
895
896int ntb_transport_init(struct pci_dev *pdev)
897{
898 struct ntb_transport *nt;
899 int rc, i;
900
901 nt = kzalloc(sizeof(struct ntb_transport), GFP_KERNEL);
902 if (!nt)
903 return -ENOMEM;
904
Jon Masonfce8a7b2012-11-16 19:27:12 -0700905 nt->ndev = ntb_register_transport(pdev, nt);
906 if (!nt->ndev) {
907 rc = -EIO;
908 goto err;
909 }
910
Jon Mason948d3a62013-04-18 17:07:36 -0700911 nt->mw = kcalloc(ntb_max_mw(nt->ndev), sizeof(struct ntb_transport_mw),
912 GFP_KERNEL);
913 if (!nt->mw) {
914 rc = -ENOMEM;
915 goto err1;
916 }
917
918 if (max_num_clients)
919 nt->max_qps = min(ntb_max_cbs(nt->ndev), max_num_clients);
920 else
921 nt->max_qps = min(ntb_max_cbs(nt->ndev), ntb_max_mw(nt->ndev));
Jon Masonfce8a7b2012-11-16 19:27:12 -0700922
923 nt->qps = kcalloc(nt->max_qps, sizeof(struct ntb_transport_qp),
924 GFP_KERNEL);
925 if (!nt->qps) {
926 rc = -ENOMEM;
Jon Mason948d3a62013-04-18 17:07:36 -0700927 goto err2;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700928 }
929
930 nt->qp_bitmap = ((u64) 1 << nt->max_qps) - 1;
931
Jon Mason282a2fe2013-02-12 09:52:50 -0700932 for (i = 0; i < nt->max_qps; i++) {
933 rc = ntb_transport_init_queue(nt, i);
934 if (rc)
935 goto err3;
936 }
Jon Masonfce8a7b2012-11-16 19:27:12 -0700937
938 INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
Jon Mason7b4f2d32013-01-19 02:02:19 -0700939 INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700940
941 rc = ntb_register_event_callback(nt->ndev,
942 ntb_transport_event_callback);
943 if (rc)
Jon Mason948d3a62013-04-18 17:07:36 -0700944 goto err3;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700945
946 INIT_LIST_HEAD(&nt->client_devs);
947 rc = ntb_bus_init(nt);
948 if (rc)
Jon Mason948d3a62013-04-18 17:07:36 -0700949 goto err4;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700950
951 if (ntb_hw_link_status(nt->ndev))
952 schedule_delayed_work(&nt->link_work, 0);
953
954 return 0;
955
Jon Mason948d3a62013-04-18 17:07:36 -0700956err4:
Jon Masonfce8a7b2012-11-16 19:27:12 -0700957 ntb_unregister_event_callback(nt->ndev);
Jon Mason948d3a62013-04-18 17:07:36 -0700958err3:
Jon Masonfce8a7b2012-11-16 19:27:12 -0700959 kfree(nt->qps);
Jon Mason948d3a62013-04-18 17:07:36 -0700960err2:
961 kfree(nt->mw);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700962err1:
963 ntb_unregister_transport(nt->ndev);
964err:
Jon Masonfce8a7b2012-11-16 19:27:12 -0700965 kfree(nt);
966 return rc;
967}
968
969void ntb_transport_free(void *transport)
970{
971 struct ntb_transport *nt = transport;
Jon Mason948d3a62013-04-18 17:07:36 -0700972 struct ntb_device *ndev = nt->ndev;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700973 struct pci_dev *pdev;
974 int i;
975
976 nt->transport_link = NTB_LINK_DOWN;
977
978 /* verify that all the qp's are freed */
Jon Mason1517a3f2013-07-30 15:58:49 -0700979 for (i = 0; i < nt->max_qps; i++) {
Jon Masonfce8a7b2012-11-16 19:27:12 -0700980 if (!test_bit(i, &nt->qp_bitmap))
981 ntb_transport_free_queue(&nt->qps[i]);
Jon Mason1517a3f2013-07-30 15:58:49 -0700982 debugfs_remove_recursive(nt->qps[i].debugfs_dir);
983 }
Jon Masonfce8a7b2012-11-16 19:27:12 -0700984
985 ntb_bus_remove(nt);
986
987 cancel_delayed_work_sync(&nt->link_work);
988
Jon Mason948d3a62013-04-18 17:07:36 -0700989 ntb_unregister_event_callback(ndev);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700990
Jon Mason948d3a62013-04-18 17:07:36 -0700991 pdev = ntb_query_pdev(ndev);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700992
Jon Mason948d3a62013-04-18 17:07:36 -0700993 for (i = 0; i < ntb_max_mw(ndev); i++)
Jon Mason113fc502013-01-30 11:40:52 -0700994 ntb_free_mw(nt, i);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700995
996 kfree(nt->qps);
Jon Mason948d3a62013-04-18 17:07:36 -0700997 kfree(nt->mw);
998 ntb_unregister_transport(ndev);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700999 kfree(nt);
1000}
1001
Jon Mason282a2fe2013-02-12 09:52:50 -07001002static void ntb_rx_copy_callback(void *data)
Jon Masonfce8a7b2012-11-16 19:27:12 -07001003{
Jon Mason282a2fe2013-02-12 09:52:50 -07001004 struct ntb_queue_entry *entry = data;
1005 struct ntb_transport_qp *qp = entry->qp;
Jon Mason448c6fb2013-01-19 02:02:27 -07001006 void *cb_data = entry->cb_data;
1007 unsigned int len = entry->len;
Jon Mason282a2fe2013-02-12 09:52:50 -07001008 struct ntb_payload_header *hdr = entry->rx_hdr;
Jon Mason448c6fb2013-01-19 02:02:27 -07001009
Jon Mason282a2fe2013-02-12 09:52:50 -07001010 /* Ensure that the data is fully copied out before clearing the flag */
1011 wmb();
1012 hdr->flags = 0;
1013
1014 iowrite32(entry->index, &qp->rx_info->entry);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001015
Jon Masonfce8a7b2012-11-16 19:27:12 -07001016 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
Jon Mason448c6fb2013-01-19 02:02:27 -07001017
1018 if (qp->rx_handler && qp->client_ready == NTB_LINK_UP)
1019 qp->rx_handler(qp, qp->cb_data, cb_data, len);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001020}
1021
Jon Mason282a2fe2013-02-12 09:52:50 -07001022static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
1023{
1024 void *buf = entry->buf;
1025 size_t len = entry->len;
1026
1027 memcpy(buf, offset, len);
1028
1029 ntb_rx_copy_callback(entry);
1030}
1031
1032static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
1033 size_t len)
1034{
1035 struct dma_async_tx_descriptor *txd;
1036 struct ntb_transport_qp *qp = entry->qp;
1037 struct dma_chan *chan = qp->dma_chan;
1038 struct dma_device *device;
1039 size_t pay_off, buff_off;
1040 dma_addr_t src, dest;
1041 dma_cookie_t cookie;
1042 void *buf = entry->buf;
1043 unsigned long flags;
1044
1045 entry->len = len;
1046
1047 if (!chan)
1048 goto err;
1049
1050 if (len < copy_bytes)
1051 goto err1;
1052
1053 device = chan->device;
1054 pay_off = (size_t) offset & ~PAGE_MASK;
1055 buff_off = (size_t) buf & ~PAGE_MASK;
1056
1057 if (!is_dma_copy_aligned(device, pay_off, buff_off, len))
1058 goto err1;
1059
1060 dest = dma_map_single(device->dev, buf, len, DMA_FROM_DEVICE);
1061 if (dma_mapping_error(device->dev, dest))
1062 goto err1;
1063
1064 src = dma_map_single(device->dev, offset, len, DMA_TO_DEVICE);
1065 if (dma_mapping_error(device->dev, src))
1066 goto err2;
1067
1068 flags = DMA_COMPL_DEST_UNMAP_SINGLE | DMA_COMPL_SRC_UNMAP_SINGLE |
1069 DMA_PREP_INTERRUPT;
1070 txd = device->device_prep_dma_memcpy(chan, dest, src, len, flags);
1071 if (!txd)
1072 goto err3;
1073
1074 txd->callback = ntb_rx_copy_callback;
1075 txd->callback_param = entry;
1076
1077 cookie = dmaengine_submit(txd);
1078 if (dma_submit_error(cookie))
1079 goto err3;
1080
1081 qp->last_cookie = cookie;
1082
1083 qp->rx_async++;
1084
1085 return;
1086
1087err3:
1088 dma_unmap_single(device->dev, src, len, DMA_TO_DEVICE);
1089err2:
1090 dma_unmap_single(device->dev, dest, len, DMA_FROM_DEVICE);
1091err1:
1092 /* If the callbacks come out of order, the writing of the index to the
1093 * last completed will be out of order. This may result in the
1094 * receive stalling forever.
1095 */
1096 dma_sync_wait(chan, qp->last_cookie);
1097err:
1098 ntb_memcpy_rx(entry, offset);
1099 qp->rx_memcpy++;
1100}
1101
Jon Masonfce8a7b2012-11-16 19:27:12 -07001102static int ntb_process_rxc(struct ntb_transport_qp *qp)
1103{
1104 struct ntb_payload_header *hdr;
1105 struct ntb_queue_entry *entry;
1106 void *offset;
1107
Jon Mason793c20e2013-01-19 02:02:26 -07001108 offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
1109 hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
1110
Jon Masonfce8a7b2012-11-16 19:27:12 -07001111 entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
1112 if (!entry) {
Jon Masonfce8a7b2012-11-16 19:27:12 -07001113 dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
Jon Mason74465642013-01-21 15:28:52 -07001114 "no buffer - HDR ver %u, len %d, flags %x\n",
Jon Masonfce8a7b2012-11-16 19:27:12 -07001115 hdr->ver, hdr->len, hdr->flags);
1116 qp->rx_err_no_buf++;
1117 return -ENOMEM;
1118 }
1119
Jon Masonfce8a7b2012-11-16 19:27:12 -07001120 if (!(hdr->flags & DESC_DONE_FLAG)) {
1121 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
Jon Masonf7667552013-01-19 02:02:24 -07001122 &qp->rx_pend_q);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001123 qp->rx_ring_empty++;
1124 return -EAGAIN;
1125 }
1126
Jon Mason74465642013-01-21 15:28:52 -07001127 if (hdr->ver != (u32) qp->rx_pkts) {
Jon Masonfce8a7b2012-11-16 19:27:12 -07001128 dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
Jon Mason74465642013-01-21 15:28:52 -07001129 "qp %d: version mismatch, expected %llu - got %u\n",
Jon Masonfce8a7b2012-11-16 19:27:12 -07001130 qp->qp_num, qp->rx_pkts, hdr->ver);
1131 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
Jon Masonf7667552013-01-19 02:02:24 -07001132 &qp->rx_pend_q);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001133 qp->rx_err_ver++;
1134 return -EIO;
1135 }
1136
1137 if (hdr->flags & LINK_DOWN_FLAG) {
1138 ntb_qp_link_down(qp);
1139
Jon Mason282a2fe2013-02-12 09:52:50 -07001140 goto err;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001141 }
1142
1143 dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
Jon Mason74465642013-01-21 15:28:52 -07001144 "rx offset %u, ver %u - %d payload received, buf size %d\n",
Jon Mason793c20e2013-01-19 02:02:26 -07001145 qp->rx_index, hdr->ver, hdr->len, entry->len);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001146
Jon Mason282a2fe2013-02-12 09:52:50 -07001147 qp->rx_bytes += hdr->len;
1148 qp->rx_pkts++;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001149
Jon Mason282a2fe2013-02-12 09:52:50 -07001150 if (hdr->len > entry->len) {
Jon Masonfce8a7b2012-11-16 19:27:12 -07001151 qp->rx_err_oflow++;
1152 dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
1153 "RX overflow! Wanted %d got %d\n",
1154 hdr->len, entry->len);
Jon Mason282a2fe2013-02-12 09:52:50 -07001155
1156 goto err;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001157 }
1158
Jon Mason282a2fe2013-02-12 09:52:50 -07001159 entry->index = qp->rx_index;
1160 entry->rx_hdr = hdr;
1161
1162 ntb_async_rx(entry, offset, hdr->len);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001163
1164out:
Jon Mason282a2fe2013-02-12 09:52:50 -07001165 qp->rx_index++;
1166 qp->rx_index %= qp->rx_max_entry;
1167
1168 return 0;
1169
1170err:
1171 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
1172 &qp->rx_pend_q);
Jon Mason793c20e2013-01-19 02:02:26 -07001173 /* Ensure that the data is fully copied out before clearing the flag */
1174 wmb();
1175 hdr->flags = 0;
Jon Mason74465642013-01-21 15:28:52 -07001176 iowrite32(qp->rx_index, &qp->rx_info->entry);
Jon Mason793c20e2013-01-19 02:02:26 -07001177
Jon Mason282a2fe2013-02-12 09:52:50 -07001178 goto out;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001179}
1180
1181static void ntb_transport_rx(unsigned long data)
1182{
1183 struct ntb_transport_qp *qp = (struct ntb_transport_qp *)data;
Jon Masonc336acd2013-01-17 15:28:45 -07001184 int rc, i;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001185
Jon Masonc336acd2013-01-17 15:28:45 -07001186 /* Limit the number of packets processed in a single interrupt to
1187 * provide fairness to others
1188 */
1189 for (i = 0; i < qp->rx_max_entry; i++) {
Jon Masonfce8a7b2012-11-16 19:27:12 -07001190 rc = ntb_process_rxc(qp);
Jon Masonc336acd2013-01-17 15:28:45 -07001191 if (rc)
1192 break;
1193 }
Jon Mason282a2fe2013-02-12 09:52:50 -07001194
1195 if (qp->dma_chan)
1196 dma_async_issue_pending(qp->dma_chan);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001197}
1198
1199static void ntb_transport_rxc_db(void *data, int db_num)
1200{
1201 struct ntb_transport_qp *qp = data;
1202
1203 dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%s: doorbell %d received\n",
1204 __func__, db_num);
1205
1206 tasklet_schedule(&qp->rx_work);
1207}
1208
Jon Mason282a2fe2013-02-12 09:52:50 -07001209static void ntb_tx_copy_callback(void *data)
Jon Masonfce8a7b2012-11-16 19:27:12 -07001210{
Jon Mason282a2fe2013-02-12 09:52:50 -07001211 struct ntb_queue_entry *entry = data;
1212 struct ntb_transport_qp *qp = entry->qp;
1213 struct ntb_payload_header __iomem *hdr = entry->tx_hdr;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001214
Jon Mason282a2fe2013-02-12 09:52:50 -07001215 /* Ensure that the data is fully copied out before setting the flags */
Jon Mason842c1dd2013-01-19 02:02:17 -07001216 wmb();
Jon Mason74465642013-01-21 15:28:52 -07001217 iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001218
Jon Mason49793882013-07-15 15:53:54 -07001219 ntb_ring_doorbell(qp->ndev, qp->qp_num);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001220
1221 /* The entry length can only be zero if the packet is intended to be a
1222 * "link down" or similar. Since no payload is being sent in these
1223 * cases, there is nothing to add to the completion queue.
1224 */
1225 if (entry->len > 0) {
1226 qp->tx_bytes += entry->len;
1227
1228 if (qp->tx_handler)
1229 qp->tx_handler(qp, qp->cb_data, entry->cb_data,
1230 entry->len);
1231 }
1232
1233 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q);
1234}
1235
Jon Mason282a2fe2013-02-12 09:52:50 -07001236static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset)
1237{
1238 memcpy_toio(offset, entry->buf, entry->len);
1239
1240 ntb_tx_copy_callback(entry);
1241}
1242
1243static void ntb_async_tx(struct ntb_transport_qp *qp,
1244 struct ntb_queue_entry *entry)
1245{
1246 struct ntb_payload_header __iomem *hdr;
1247 struct dma_async_tx_descriptor *txd;
1248 struct dma_chan *chan = qp->dma_chan;
1249 struct dma_device *device;
1250 size_t dest_off, buff_off;
1251 dma_addr_t src, dest;
1252 dma_cookie_t cookie;
1253 void __iomem *offset;
1254 size_t len = entry->len;
1255 void *buf = entry->buf;
1256 unsigned long flags;
1257
1258 offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index;
1259 hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
1260 entry->tx_hdr = hdr;
1261
1262 iowrite32(entry->len, &hdr->len);
1263 iowrite32((u32) qp->tx_pkts, &hdr->ver);
1264
1265 if (!chan)
1266 goto err;
1267
1268 if (len < copy_bytes)
1269 goto err;
1270
1271 device = chan->device;
1272 dest = qp->tx_mw_phys + qp->tx_max_frame * qp->tx_index;
1273 buff_off = (size_t) buf & ~PAGE_MASK;
1274 dest_off = (size_t) dest & ~PAGE_MASK;
1275
1276 if (!is_dma_copy_aligned(device, buff_off, dest_off, len))
1277 goto err;
1278
1279 src = dma_map_single(device->dev, buf, len, DMA_TO_DEVICE);
1280 if (dma_mapping_error(device->dev, src))
1281 goto err;
1282
1283 flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_PREP_INTERRUPT;
1284 txd = device->device_prep_dma_memcpy(chan, dest, src, len, flags);
1285 if (!txd)
1286 goto err1;
1287
1288 txd->callback = ntb_tx_copy_callback;
1289 txd->callback_param = entry;
1290
1291 cookie = dmaengine_submit(txd);
1292 if (dma_submit_error(cookie))
1293 goto err1;
1294
1295 dma_async_issue_pending(chan);
1296 qp->tx_async++;
1297
1298 return;
1299err1:
1300 dma_unmap_single(device->dev, src, len, DMA_TO_DEVICE);
1301err:
1302 ntb_memcpy_tx(entry, offset);
1303 qp->tx_memcpy++;
1304}
1305
Jon Masonfce8a7b2012-11-16 19:27:12 -07001306static int ntb_process_tx(struct ntb_transport_qp *qp,
1307 struct ntb_queue_entry *entry)
1308{
Jon Mason282a2fe2013-02-12 09:52:50 -07001309 dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%lld - tx %u, entry len %d flags %x buff %p\n",
1310 qp->tx_pkts, qp->tx_index, entry->len, entry->flags,
Jon Masonf7667552013-01-19 02:02:24 -07001311 entry->buf);
Jon Mason793c20e2013-01-19 02:02:26 -07001312 if (qp->tx_index == qp->remote_rx_info->entry) {
Jon Masonfce8a7b2012-11-16 19:27:12 -07001313 qp->tx_ring_full++;
1314 return -EAGAIN;
1315 }
1316
Jon Masonef114ed2013-01-19 02:02:18 -07001317 if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) {
Jon Masonfce8a7b2012-11-16 19:27:12 -07001318 if (qp->tx_handler)
1319 qp->tx_handler(qp->cb_data, qp, NULL, -EIO);
1320
1321 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1322 &qp->tx_free_q);
1323 return 0;
1324 }
1325
Jon Mason282a2fe2013-02-12 09:52:50 -07001326 ntb_async_tx(qp, entry);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001327
Jon Mason793c20e2013-01-19 02:02:26 -07001328 qp->tx_index++;
1329 qp->tx_index %= qp->tx_max_entry;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001330
1331 qp->tx_pkts++;
1332
1333 return 0;
1334}
1335
1336static void ntb_send_link_down(struct ntb_transport_qp *qp)
1337{
1338 struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
1339 struct ntb_queue_entry *entry;
1340 int i, rc;
1341
1342 if (qp->qp_link == NTB_LINK_DOWN)
1343 return;
1344
1345 qp->qp_link = NTB_LINK_DOWN;
1346 dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
1347
1348 for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) {
Jon Masonf7667552013-01-19 02:02:24 -07001349 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001350 if (entry)
1351 break;
1352 msleep(100);
1353 }
1354
1355 if (!entry)
1356 return;
1357
1358 entry->cb_data = NULL;
1359 entry->buf = NULL;
1360 entry->len = 0;
1361 entry->flags = LINK_DOWN_FLAG;
1362
1363 rc = ntb_process_tx(qp, entry);
1364 if (rc)
1365 dev_err(&pdev->dev, "ntb: QP%d unable to send linkdown msg\n",
1366 qp->qp_num);
1367}
1368
1369/**
1370 * ntb_transport_create_queue - Create a new NTB transport layer queue
1371 * @rx_handler: receive callback function
1372 * @tx_handler: transmit callback function
1373 * @event_handler: event callback function
1374 *
1375 * Create a new NTB transport layer queue and provide the queue with a callback
1376 * routine for both transmit and receive. The receive callback routine will be
1377 * used to pass up data when the transport has received it on the queue. The
1378 * transmit callback routine will be called when the transport has completed the
1379 * transmission of the data on the queue and the data is ready to be freed.
1380 *
1381 * RETURNS: pointer to newly created ntb_queue, NULL on error.
1382 */
1383struct ntb_transport_qp *
1384ntb_transport_create_queue(void *data, struct pci_dev *pdev,
1385 const struct ntb_queue_handlers *handlers)
1386{
1387 struct ntb_queue_entry *entry;
1388 struct ntb_transport_qp *qp;
1389 struct ntb_transport *nt;
1390 unsigned int free_queue;
1391 int rc, i;
1392
1393 nt = ntb_find_transport(pdev);
1394 if (!nt)
1395 goto err;
1396
1397 free_queue = ffs(nt->qp_bitmap);
1398 if (!free_queue)
1399 goto err;
1400
1401 /* decrement free_queue to make it zero based */
1402 free_queue--;
1403
1404 clear_bit(free_queue, &nt->qp_bitmap);
1405
1406 qp = &nt->qps[free_queue];
1407 qp->cb_data = data;
1408 qp->rx_handler = handlers->rx_handler;
1409 qp->tx_handler = handlers->tx_handler;
1410 qp->event_handler = handlers->event_handler;
1411
Jon Mason282a2fe2013-02-12 09:52:50 -07001412 qp->dma_chan = dma_find_channel(DMA_MEMCPY);
1413 if (!qp->dma_chan)
1414 dev_info(&pdev->dev, "Unable to allocate DMA channel, using CPU instead\n");
1415 else
1416 dmaengine_get();
1417
Jon Masonfce8a7b2012-11-16 19:27:12 -07001418 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
1419 entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
1420 if (!entry)
1421 goto err1;
1422
Jon Mason282a2fe2013-02-12 09:52:50 -07001423 entry->qp = qp;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001424 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry,
Jon Masonf7667552013-01-19 02:02:24 -07001425 &qp->rx_free_q);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001426 }
1427
1428 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
1429 entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
1430 if (!entry)
1431 goto err2;
1432
Jon Mason282a2fe2013-02-12 09:52:50 -07001433 entry->qp = qp;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001434 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
Jon Masonf7667552013-01-19 02:02:24 -07001435 &qp->tx_free_q);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001436 }
1437
1438 tasklet_init(&qp->rx_work, ntb_transport_rx, (unsigned long) qp);
1439
1440 rc = ntb_register_db_callback(qp->ndev, free_queue, qp,
1441 ntb_transport_rxc_db);
1442 if (rc)
1443 goto err3;
1444
1445 dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num);
1446
1447 return qp;
1448
1449err3:
1450 tasklet_disable(&qp->rx_work);
1451err2:
Jon Masonf7667552013-01-19 02:02:24 -07001452 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
Jon Masonfce8a7b2012-11-16 19:27:12 -07001453 kfree(entry);
1454err1:
Jon Masonf7667552013-01-19 02:02:24 -07001455 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
Jon Masonfce8a7b2012-11-16 19:27:12 -07001456 kfree(entry);
1457 set_bit(free_queue, &nt->qp_bitmap);
1458err:
1459 return NULL;
1460}
1461EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
1462
1463/**
1464 * ntb_transport_free_queue - Frees NTB transport queue
1465 * @qp: NTB queue to be freed
1466 *
1467 * Frees NTB transport queue
1468 */
1469void ntb_transport_free_queue(struct ntb_transport_qp *qp)
1470{
Jon Mason186f27f2013-01-22 11:35:40 -07001471 struct pci_dev *pdev;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001472 struct ntb_queue_entry *entry;
1473
1474 if (!qp)
1475 return;
1476
Jon Mason186f27f2013-01-22 11:35:40 -07001477 pdev = ntb_query_pdev(qp->ndev);
1478
Jon Mason282a2fe2013-02-12 09:52:50 -07001479 if (qp->dma_chan) {
1480 struct dma_chan *chan = qp->dma_chan;
1481 /* Putting the dma_chan to NULL will force any new traffic to be
1482 * processed by the CPU instead of the DAM engine
1483 */
1484 qp->dma_chan = NULL;
1485
1486 /* Try to be nice and wait for any queued DMA engine
1487 * transactions to process before smashing it with a rock
1488 */
1489 dma_sync_wait(chan, qp->last_cookie);
1490 dmaengine_terminate_all(chan);
1491 dmaengine_put();
1492 }
Jon Masonfce8a7b2012-11-16 19:27:12 -07001493
1494 ntb_unregister_db_callback(qp->ndev, qp->qp_num);
1495 tasklet_disable(&qp->rx_work);
1496
Jon Mason282a2fe2013-02-12 09:52:50 -07001497 cancel_delayed_work_sync(&qp->link_work);
1498
Jon Masonf7667552013-01-19 02:02:24 -07001499 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
Jon Masonfce8a7b2012-11-16 19:27:12 -07001500 kfree(entry);
1501
Jon Masonf7667552013-01-19 02:02:24 -07001502 while ((entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) {
Jon Masonfce8a7b2012-11-16 19:27:12 -07001503 dev_warn(&pdev->dev, "Freeing item from a non-empty queue\n");
1504 kfree(entry);
1505 }
1506
Jon Masonf7667552013-01-19 02:02:24 -07001507 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
Jon Masonfce8a7b2012-11-16 19:27:12 -07001508 kfree(entry);
1509
1510 set_bit(qp->qp_num, &qp->transport->qp_bitmap);
1511
1512 dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
1513}
1514EXPORT_SYMBOL_GPL(ntb_transport_free_queue);
1515
1516/**
1517 * ntb_transport_rx_remove - Dequeues enqueued rx packet
1518 * @qp: NTB queue to be freed
1519 * @len: pointer to variable to write enqueued buffers length
1520 *
1521 * Dequeues unused buffers from receive queue. Should only be used during
1522 * shutdown of qp.
1523 *
1524 * RETURNS: NULL error value on error, or void* for success.
1525 */
1526void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
1527{
1528 struct ntb_queue_entry *entry;
1529 void *buf;
1530
1531 if (!qp || qp->client_ready == NTB_LINK_UP)
1532 return NULL;
1533
1534 entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
1535 if (!entry)
1536 return NULL;
1537
1538 buf = entry->cb_data;
1539 *len = entry->len;
1540
Jon Masonf7667552013-01-19 02:02:24 -07001541 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001542
1543 return buf;
1544}
1545EXPORT_SYMBOL_GPL(ntb_transport_rx_remove);
1546
1547/**
1548 * ntb_transport_rx_enqueue - Enqueue a new NTB queue entry
1549 * @qp: NTB transport layer queue the entry is to be enqueued on
1550 * @cb: per buffer pointer for callback function to use
1551 * @data: pointer to data buffer that incoming packets will be copied into
1552 * @len: length of the data buffer
1553 *
1554 * Enqueue a new receive buffer onto the transport queue into which a NTB
1555 * payload can be received into.
1556 *
1557 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1558 */
1559int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1560 unsigned int len)
1561{
1562 struct ntb_queue_entry *entry;
1563
1564 if (!qp)
1565 return -EINVAL;
1566
1567 entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q);
1568 if (!entry)
1569 return -ENOMEM;
1570
1571 entry->cb_data = cb;
1572 entry->buf = data;
1573 entry->len = len;
1574
Jon Masonf7667552013-01-19 02:02:24 -07001575 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001576
1577 return 0;
1578}
1579EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue);
1580
1581/**
1582 * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry
1583 * @qp: NTB transport layer queue the entry is to be enqueued on
1584 * @cb: per buffer pointer for callback function to use
1585 * @data: pointer to data buffer that will be sent
1586 * @len: length of the data buffer
1587 *
1588 * Enqueue a new transmit buffer onto the transport queue from which a NTB
1589 * payload will be transmitted. This assumes that a lock is behing held to
1590 * serialize access to the qp.
1591 *
1592 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1593 */
1594int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1595 unsigned int len)
1596{
1597 struct ntb_queue_entry *entry;
1598 int rc;
1599
1600 if (!qp || qp->qp_link != NTB_LINK_UP || !len)
1601 return -EINVAL;
1602
1603 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
Jon Mason282a2fe2013-02-12 09:52:50 -07001604 if (!entry) {
1605 qp->tx_err_no_buf++;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001606 return -ENOMEM;
Jon Mason282a2fe2013-02-12 09:52:50 -07001607 }
Jon Masonfce8a7b2012-11-16 19:27:12 -07001608
1609 entry->cb_data = cb;
1610 entry->buf = data;
1611 entry->len = len;
1612 entry->flags = 0;
1613
1614 rc = ntb_process_tx(qp, entry);
1615 if (rc)
1616 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1617 &qp->tx_free_q);
1618
1619 return rc;
1620}
1621EXPORT_SYMBOL_GPL(ntb_transport_tx_enqueue);
1622
1623/**
1624 * ntb_transport_link_up - Notify NTB transport of client readiness to use queue
1625 * @qp: NTB transport layer queue to be enabled
1626 *
1627 * Notify NTB transport layer of client readiness to use queue
1628 */
1629void ntb_transport_link_up(struct ntb_transport_qp *qp)
1630{
1631 if (!qp)
1632 return;
1633
1634 qp->client_ready = NTB_LINK_UP;
1635
1636 if (qp->transport->transport_link == NTB_LINK_UP)
1637 schedule_delayed_work(&qp->link_work, 0);
1638}
1639EXPORT_SYMBOL_GPL(ntb_transport_link_up);
1640
1641/**
1642 * ntb_transport_link_down - Notify NTB transport to no longer enqueue data
1643 * @qp: NTB transport layer queue to be disabled
1644 *
1645 * Notify NTB transport layer of client's desire to no longer receive data on
1646 * transport queue specified. It is the client's responsibility to ensure all
1647 * entries on queue are purged or otherwise handled appropraitely.
1648 */
1649void ntb_transport_link_down(struct ntb_transport_qp *qp)
1650{
Jon Mason186f27f2013-01-22 11:35:40 -07001651 struct pci_dev *pdev;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001652 int rc, val;
1653
1654 if (!qp)
1655 return;
1656
Jon Mason186f27f2013-01-22 11:35:40 -07001657 pdev = ntb_query_pdev(qp->ndev);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001658 qp->client_ready = NTB_LINK_DOWN;
1659
1660 rc = ntb_read_local_spad(qp->ndev, QP_LINKS, &val);
1661 if (rc) {
1662 dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
1663 return;
1664 }
1665
1666 rc = ntb_write_remote_spad(qp->ndev, QP_LINKS,
1667 val & ~(1 << qp->qp_num));
1668 if (rc)
1669 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
1670 val & ~(1 << qp->qp_num), QP_LINKS);
1671
1672 if (qp->qp_link == NTB_LINK_UP)
1673 ntb_send_link_down(qp);
1674 else
1675 cancel_delayed_work_sync(&qp->link_work);
1676}
1677EXPORT_SYMBOL_GPL(ntb_transport_link_down);
1678
1679/**
1680 * ntb_transport_link_query - Query transport link state
1681 * @qp: NTB transport layer queue to be queried
1682 *
1683 * Query connectivity to the remote system of the NTB transport queue
1684 *
1685 * RETURNS: true for link up or false for link down
1686 */
1687bool ntb_transport_link_query(struct ntb_transport_qp *qp)
1688{
Jon Mason186f27f2013-01-22 11:35:40 -07001689 if (!qp)
1690 return false;
1691
Jon Masonfce8a7b2012-11-16 19:27:12 -07001692 return qp->qp_link == NTB_LINK_UP;
1693}
1694EXPORT_SYMBOL_GPL(ntb_transport_link_query);
1695
1696/**
1697 * ntb_transport_qp_num - Query the qp number
1698 * @qp: NTB transport layer queue to be queried
1699 *
1700 * Query qp number of the NTB transport queue
1701 *
1702 * RETURNS: a zero based number specifying the qp number
1703 */
1704unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp)
1705{
Jon Mason186f27f2013-01-22 11:35:40 -07001706 if (!qp)
1707 return 0;
1708
Jon Masonfce8a7b2012-11-16 19:27:12 -07001709 return qp->qp_num;
1710}
1711EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
1712
1713/**
1714 * ntb_transport_max_size - Query the max payload size of a qp
1715 * @qp: NTB transport layer queue to be queried
1716 *
1717 * Query the maximum payload size permissible on the given qp
1718 *
1719 * RETURNS: the max payload size of a qp
1720 */
Jon Masonef114ed2013-01-19 02:02:18 -07001721unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
Jon Masonfce8a7b2012-11-16 19:27:12 -07001722{
Jon Mason282a2fe2013-02-12 09:52:50 -07001723 unsigned int max;
1724
Jon Mason186f27f2013-01-22 11:35:40 -07001725 if (!qp)
1726 return 0;
1727
Jon Mason282a2fe2013-02-12 09:52:50 -07001728 if (!qp->dma_chan)
1729 return qp->tx_max_frame - sizeof(struct ntb_payload_header);
1730
1731 /* If DMA engine usage is possible, try to find the max size for that */
1732 max = qp->tx_max_frame - sizeof(struct ntb_payload_header);
1733 max -= max % (1 << qp->dma_chan->device->copy_align);
1734
1735 return max;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001736}
1737EXPORT_SYMBOL_GPL(ntb_transport_max_size);