Merge tag 'ntb-3.13' of git://github.com/jonmason/ntb
[linux-drm-fsl-dcu.git] / drivers / ntb / ntb_transport.c
1 /*
2  * This file is provided under a dual BSD/GPLv2 license.  When using or
3  *   redistributing this file, you may do so under either license.
4  *
5  *   GPL LICENSE SUMMARY
6  *
7  *   Copyright(c) 2012 Intel Corporation. All rights reserved.
8  *
9  *   This program is free software; you can redistribute it and/or modify
10  *   it under the terms of version 2 of the GNU General Public License as
11  *   published by the Free Software Foundation.
12  *
13  *   BSD LICENSE
14  *
15  *   Copyright(c) 2012 Intel Corporation. All rights reserved.
16  *
17  *   Redistribution and use in source and binary forms, with or without
18  *   modification, are permitted provided that the following conditions
19  *   are met:
20  *
21  *     * Redistributions of source code must retain the above copyright
22  *       notice, this list of conditions and the following disclaimer.
23  *     * Redistributions in binary form must reproduce the above copy
24  *       notice, this list of conditions and the following disclaimer in
25  *       the documentation and/or other materials provided with the
26  *       distribution.
27  *     * Neither the name of Intel Corporation nor the names of its
28  *       contributors may be used to endorse or promote products derived
29  *       from this software without specific prior written permission.
30  *
31  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
34  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
37  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
39  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
41  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42  *
43  * Intel PCIe NTB Linux driver
44  *
45  * Contact Information:
46  * Jon Mason <jon.mason@intel.com>
47  */
48 #include <linux/debugfs.h>
49 #include <linux/delay.h>
50 #include <linux/dmaengine.h>
51 #include <linux/dma-mapping.h>
52 #include <linux/errno.h>
53 #include <linux/export.h>
54 #include <linux/interrupt.h>
55 #include <linux/module.h>
56 #include <linux/pci.h>
57 #include <linux/slab.h>
58 #include <linux/types.h>
59 #include <linux/ntb.h>
60 #include "ntb_hw.h"
61
62 #define NTB_TRANSPORT_VERSION   3
63
64 static unsigned int transport_mtu = 0x401E;
65 module_param(transport_mtu, uint, 0644);
66 MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets");
67
68 static unsigned char max_num_clients;
69 module_param(max_num_clients, byte, 0644);
70 MODULE_PARM_DESC(max_num_clients, "Maximum number of NTB transport clients");
71
72 static unsigned int copy_bytes = 1024;
73 module_param(copy_bytes, uint, 0644);
74 MODULE_PARM_DESC(copy_bytes, "Threshold under which NTB will use the CPU to copy instead of DMA");
75
76 struct ntb_queue_entry {
77         /* ntb_queue list reference */
78         struct list_head entry;
79         /* pointers to data to be transfered */
80         void *cb_data;
81         void *buf;
82         unsigned int len;
83         unsigned int flags;
84
85         struct ntb_transport_qp *qp;
86         union {
87                 struct ntb_payload_header __iomem *tx_hdr;
88                 struct ntb_payload_header *rx_hdr;
89         };
90         unsigned int index;
91 };
92
93 struct ntb_rx_info {
94         unsigned int entry;
95 };
96
97 struct ntb_transport_qp {
98         struct ntb_transport *transport;
99         struct ntb_device *ndev;
100         void *cb_data;
101         struct dma_chan *dma_chan;
102
103         bool client_ready;
104         bool qp_link;
105         u8 qp_num;      /* Only 64 QP's are allowed.  0-63 */
106
107         struct ntb_rx_info __iomem *rx_info;
108         struct ntb_rx_info *remote_rx_info;
109
110         void (*tx_handler) (struct ntb_transport_qp *qp, void *qp_data,
111                             void *data, int len);
112         struct list_head tx_free_q;
113         spinlock_t ntb_tx_free_q_lock;
114         void __iomem *tx_mw;
115         dma_addr_t tx_mw_phys;
116         unsigned int tx_index;
117         unsigned int tx_max_entry;
118         unsigned int tx_max_frame;
119
120         void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data,
121                             void *data, int len);
122         struct list_head rx_pend_q;
123         struct list_head rx_free_q;
124         spinlock_t ntb_rx_pend_q_lock;
125         spinlock_t ntb_rx_free_q_lock;
126         void *rx_buff;
127         unsigned int rx_index;
128         unsigned int rx_max_entry;
129         unsigned int rx_max_frame;
130         dma_cookie_t last_cookie;
131
132         void (*event_handler) (void *data, int status);
133         struct delayed_work link_work;
134         struct work_struct link_cleanup;
135
136         struct dentry *debugfs_dir;
137         struct dentry *debugfs_stats;
138
139         /* Stats */
140         u64 rx_bytes;
141         u64 rx_pkts;
142         u64 rx_ring_empty;
143         u64 rx_err_no_buf;
144         u64 rx_err_oflow;
145         u64 rx_err_ver;
146         u64 rx_memcpy;
147         u64 rx_async;
148         u64 tx_bytes;
149         u64 tx_pkts;
150         u64 tx_ring_full;
151         u64 tx_err_no_buf;
152         u64 tx_memcpy;
153         u64 tx_async;
154 };
155
156 struct ntb_transport_mw {
157         size_t size;
158         void *virt_addr;
159         dma_addr_t dma_addr;
160 };
161
162 struct ntb_transport_client_dev {
163         struct list_head entry;
164         struct device dev;
165 };
166
167 struct ntb_transport {
168         struct list_head entry;
169         struct list_head client_devs;
170
171         struct ntb_device *ndev;
172         struct ntb_transport_mw *mw;
173         struct ntb_transport_qp *qps;
174         unsigned int max_qps;
175         unsigned long qp_bitmap;
176         bool transport_link;
177         struct delayed_work link_work;
178         struct work_struct link_cleanup;
179 };
180
181 enum {
182         DESC_DONE_FLAG = 1 << 0,
183         LINK_DOWN_FLAG = 1 << 1,
184 };
185
186 struct ntb_payload_header {
187         unsigned int ver;
188         unsigned int len;
189         unsigned int flags;
190 };
191
192 enum {
193         VERSION = 0,
194         QP_LINKS,
195         NUM_QPS,
196         NUM_MWS,
197         MW0_SZ_HIGH,
198         MW0_SZ_LOW,
199         MW1_SZ_HIGH,
200         MW1_SZ_LOW,
201         MAX_SPAD,
202 };
203
204 #define QP_TO_MW(ndev, qp)      ((qp) % ntb_max_mw(ndev))
205 #define NTB_QP_DEF_NUM_ENTRIES  100
206 #define NTB_LINK_DOWN_TIMEOUT   10
207
208 static int ntb_match_bus(struct device *dev, struct device_driver *drv)
209 {
210         return !strncmp(dev_name(dev), drv->name, strlen(drv->name));
211 }
212
213 static int ntb_client_probe(struct device *dev)
214 {
215         const struct ntb_client *drv = container_of(dev->driver,
216                                                     struct ntb_client, driver);
217         struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
218         int rc = -EINVAL;
219
220         get_device(dev);
221         if (drv && drv->probe)
222                 rc = drv->probe(pdev);
223         if (rc)
224                 put_device(dev);
225
226         return rc;
227 }
228
229 static int ntb_client_remove(struct device *dev)
230 {
231         const struct ntb_client *drv = container_of(dev->driver,
232                                                     struct ntb_client, driver);
233         struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
234
235         if (drv && drv->remove)
236                 drv->remove(pdev);
237
238         put_device(dev);
239
240         return 0;
241 }
242
243 static struct bus_type ntb_bus_type = {
244         .name = "ntb_bus",
245         .match = ntb_match_bus,
246         .probe = ntb_client_probe,
247         .remove = ntb_client_remove,
248 };
249
250 static LIST_HEAD(ntb_transport_list);
251
252 static int ntb_bus_init(struct ntb_transport *nt)
253 {
254         if (list_empty(&ntb_transport_list)) {
255                 int rc = bus_register(&ntb_bus_type);
256                 if (rc)
257                         return rc;
258         }
259
260         list_add(&nt->entry, &ntb_transport_list);
261
262         return 0;
263 }
264
265 static void ntb_bus_remove(struct ntb_transport *nt)
266 {
267         struct ntb_transport_client_dev *client_dev, *cd;
268
269         list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) {
270                 dev_err(client_dev->dev.parent, "%s still attached to bus, removing\n",
271                         dev_name(&client_dev->dev));
272                 list_del(&client_dev->entry);
273                 device_unregister(&client_dev->dev);
274         }
275
276         list_del(&nt->entry);
277
278         if (list_empty(&ntb_transport_list))
279                 bus_unregister(&ntb_bus_type);
280 }
281
282 static void ntb_client_release(struct device *dev)
283 {
284         struct ntb_transport_client_dev *client_dev;
285         client_dev = container_of(dev, struct ntb_transport_client_dev, dev);
286
287         kfree(client_dev);
288 }
289
290 /**
291  * ntb_unregister_client_dev - Unregister NTB client device
292  * @device_name: Name of NTB client device
293  *
294  * Unregister an NTB client device with the NTB transport layer
295  */
296 void ntb_unregister_client_dev(char *device_name)
297 {
298         struct ntb_transport_client_dev *client, *cd;
299         struct ntb_transport *nt;
300
301         list_for_each_entry(nt, &ntb_transport_list, entry)
302                 list_for_each_entry_safe(client, cd, &nt->client_devs, entry)
303                         if (!strncmp(dev_name(&client->dev), device_name,
304                                      strlen(device_name))) {
305                                 list_del(&client->entry);
306                                 device_unregister(&client->dev);
307                         }
308 }
309 EXPORT_SYMBOL_GPL(ntb_unregister_client_dev);
310
311 /**
312  * ntb_register_client_dev - Register NTB client device
313  * @device_name: Name of NTB client device
314  *
315  * Register an NTB client device with the NTB transport layer
316  */
317 int ntb_register_client_dev(char *device_name)
318 {
319         struct ntb_transport_client_dev *client_dev;
320         struct ntb_transport *nt;
321         int rc, i = 0;
322
323         if (list_empty(&ntb_transport_list))
324                 return -ENODEV;
325
326         list_for_each_entry(nt, &ntb_transport_list, entry) {
327                 struct device *dev;
328
329                 client_dev = kzalloc(sizeof(struct ntb_transport_client_dev),
330                                      GFP_KERNEL);
331                 if (!client_dev) {
332                         rc = -ENOMEM;
333                         goto err;
334                 }
335
336                 dev = &client_dev->dev;
337
338                 /* setup and register client devices */
339                 dev_set_name(dev, "%s%d", device_name, i);
340                 dev->bus = &ntb_bus_type;
341                 dev->release = ntb_client_release;
342                 dev->parent = &ntb_query_pdev(nt->ndev)->dev;
343
344                 rc = device_register(dev);
345                 if (rc) {
346                         kfree(client_dev);
347                         goto err;
348                 }
349
350                 list_add_tail(&client_dev->entry, &nt->client_devs);
351                 i++;
352         }
353
354         return 0;
355
356 err:
357         ntb_unregister_client_dev(device_name);
358
359         return rc;
360 }
361 EXPORT_SYMBOL_GPL(ntb_register_client_dev);
362
363 /**
364  * ntb_register_client - Register NTB client driver
365  * @drv: NTB client driver to be registered
366  *
367  * Register an NTB client driver with the NTB transport layer
368  *
369  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
370  */
371 int ntb_register_client(struct ntb_client *drv)
372 {
373         drv->driver.bus = &ntb_bus_type;
374
375         if (list_empty(&ntb_transport_list))
376                 return -ENODEV;
377
378         return driver_register(&drv->driver);
379 }
380 EXPORT_SYMBOL_GPL(ntb_register_client);
381
382 /**
383  * ntb_unregister_client - Unregister NTB client driver
384  * @drv: NTB client driver to be unregistered
385  *
386  * Unregister an NTB client driver with the NTB transport layer
387  *
388  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
389  */
390 void ntb_unregister_client(struct ntb_client *drv)
391 {
392         driver_unregister(&drv->driver);
393 }
394 EXPORT_SYMBOL_GPL(ntb_unregister_client);
395
396 static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
397                             loff_t *offp)
398 {
399         struct ntb_transport_qp *qp;
400         char *buf;
401         ssize_t ret, out_offset, out_count;
402
403         out_count = 1000;
404
405         buf = kmalloc(out_count, GFP_KERNEL);
406         if (!buf)
407                 return -ENOMEM;
408
409         qp = filp->private_data;
410         out_offset = 0;
411         out_offset += snprintf(buf + out_offset, out_count - out_offset,
412                                "NTB QP stats\n");
413         out_offset += snprintf(buf + out_offset, out_count - out_offset,
414                                "rx_bytes - \t%llu\n", qp->rx_bytes);
415         out_offset += snprintf(buf + out_offset, out_count - out_offset,
416                                "rx_pkts - \t%llu\n", qp->rx_pkts);
417         out_offset += snprintf(buf + out_offset, out_count - out_offset,
418                                "rx_memcpy - \t%llu\n", qp->rx_memcpy);
419         out_offset += snprintf(buf + out_offset, out_count - out_offset,
420                                "rx_async - \t%llu\n", qp->rx_async);
421         out_offset += snprintf(buf + out_offset, out_count - out_offset,
422                                "rx_ring_empty - %llu\n", qp->rx_ring_empty);
423         out_offset += snprintf(buf + out_offset, out_count - out_offset,
424                                "rx_err_no_buf - %llu\n", qp->rx_err_no_buf);
425         out_offset += snprintf(buf + out_offset, out_count - out_offset,
426                                "rx_err_oflow - \t%llu\n", qp->rx_err_oflow);
427         out_offset += snprintf(buf + out_offset, out_count - out_offset,
428                                "rx_err_ver - \t%llu\n", qp->rx_err_ver);
429         out_offset += snprintf(buf + out_offset, out_count - out_offset,
430                                "rx_buff - \t%p\n", qp->rx_buff);
431         out_offset += snprintf(buf + out_offset, out_count - out_offset,
432                                "rx_index - \t%u\n", qp->rx_index);
433         out_offset += snprintf(buf + out_offset, out_count - out_offset,
434                                "rx_max_entry - \t%u\n", qp->rx_max_entry);
435
436         out_offset += snprintf(buf + out_offset, out_count - out_offset,
437                                "tx_bytes - \t%llu\n", qp->tx_bytes);
438         out_offset += snprintf(buf + out_offset, out_count - out_offset,
439                                "tx_pkts - \t%llu\n", qp->tx_pkts);
440         out_offset += snprintf(buf + out_offset, out_count - out_offset,
441                                "tx_memcpy - \t%llu\n", qp->tx_memcpy);
442         out_offset += snprintf(buf + out_offset, out_count - out_offset,
443                                "tx_async - \t%llu\n", qp->tx_async);
444         out_offset += snprintf(buf + out_offset, out_count - out_offset,
445                                "tx_ring_full - \t%llu\n", qp->tx_ring_full);
446         out_offset += snprintf(buf + out_offset, out_count - out_offset,
447                                "tx_err_no_buf - %llu\n", qp->tx_err_no_buf);
448         out_offset += snprintf(buf + out_offset, out_count - out_offset,
449                                "tx_mw - \t%p\n", qp->tx_mw);
450         out_offset += snprintf(buf + out_offset, out_count - out_offset,
451                                "tx_index - \t%u\n", qp->tx_index);
452         out_offset += snprintf(buf + out_offset, out_count - out_offset,
453                                "tx_max_entry - \t%u\n", qp->tx_max_entry);
454
455         out_offset += snprintf(buf + out_offset, out_count - out_offset,
456                                "\nQP Link %s\n", (qp->qp_link == NTB_LINK_UP) ?
457                                "Up" : "Down");
458         if (out_offset > out_count)
459                 out_offset = out_count;
460
461         ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
462         kfree(buf);
463         return ret;
464 }
465
466 static const struct file_operations ntb_qp_debugfs_stats = {
467         .owner = THIS_MODULE,
468         .open = simple_open,
469         .read = debugfs_read,
470 };
471
472 static void ntb_list_add(spinlock_t *lock, struct list_head *entry,
473                          struct list_head *list)
474 {
475         unsigned long flags;
476
477         spin_lock_irqsave(lock, flags);
478         list_add_tail(entry, list);
479         spin_unlock_irqrestore(lock, flags);
480 }
481
482 static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock,
483                                                 struct list_head *list)
484 {
485         struct ntb_queue_entry *entry;
486         unsigned long flags;
487
488         spin_lock_irqsave(lock, flags);
489         if (list_empty(list)) {
490                 entry = NULL;
491                 goto out;
492         }
493         entry = list_first_entry(list, struct ntb_queue_entry, entry);
494         list_del(&entry->entry);
495 out:
496         spin_unlock_irqrestore(lock, flags);
497
498         return entry;
499 }
500
501 static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
502                                       unsigned int qp_num)
503 {
504         struct ntb_transport_qp *qp = &nt->qps[qp_num];
505         unsigned int rx_size, num_qps_mw;
506         u8 mw_num, mw_max;
507         unsigned int i;
508
509         mw_max = ntb_max_mw(nt->ndev);
510         mw_num = QP_TO_MW(nt->ndev, qp_num);
511
512         WARN_ON(nt->mw[mw_num].virt_addr == NULL);
513
514         if (nt->max_qps % mw_max && mw_num < nt->max_qps % mw_max)
515                 num_qps_mw = nt->max_qps / mw_max + 1;
516         else
517                 num_qps_mw = nt->max_qps / mw_max;
518
519         rx_size = (unsigned int) nt->mw[mw_num].size / num_qps_mw;
520         qp->rx_buff = nt->mw[mw_num].virt_addr + qp_num / mw_max * rx_size;
521         rx_size -= sizeof(struct ntb_rx_info);
522
523         qp->remote_rx_info = qp->rx_buff + rx_size;
524
525         /* Due to housekeeping, there must be atleast 2 buffs */
526         qp->rx_max_frame = min(transport_mtu, rx_size / 2);
527         qp->rx_max_entry = rx_size / qp->rx_max_frame;
528         qp->rx_index = 0;
529
530         qp->remote_rx_info->entry = qp->rx_max_entry - 1;
531
532         /* setup the hdr offsets with 0's */
533         for (i = 0; i < qp->rx_max_entry; i++) {
534                 void *offset = qp->rx_buff + qp->rx_max_frame * (i + 1) -
535                                sizeof(struct ntb_payload_header);
536                 memset(offset, 0, sizeof(struct ntb_payload_header));
537         }
538
539         qp->rx_pkts = 0;
540         qp->tx_pkts = 0;
541         qp->tx_index = 0;
542 }
543
544 static void ntb_free_mw(struct ntb_transport *nt, int num_mw)
545 {
546         struct ntb_transport_mw *mw = &nt->mw[num_mw];
547         struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
548
549         if (!mw->virt_addr)
550                 return;
551
552         dma_free_coherent(&pdev->dev, mw->size, mw->virt_addr, mw->dma_addr);
553         mw->virt_addr = NULL;
554 }
555
556 static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
557 {
558         struct ntb_transport_mw *mw = &nt->mw[num_mw];
559         struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
560
561         /* No need to re-setup */
562         if (mw->size == ALIGN(size, 4096))
563                 return 0;
564
565         if (mw->size != 0)
566                 ntb_free_mw(nt, num_mw);
567
568         /* Alloc memory for receiving data.  Must be 4k aligned */
569         mw->size = ALIGN(size, 4096);
570
571         mw->virt_addr = dma_alloc_coherent(&pdev->dev, mw->size, &mw->dma_addr,
572                                            GFP_KERNEL);
573         if (!mw->virt_addr) {
574                 mw->size = 0;
575                 dev_err(&pdev->dev, "Unable to allocate MW buffer of size %d\n",
576                        (int) mw->size);
577                 return -ENOMEM;
578         }
579
580         /* Notify HW the memory location of the receive buffer */
581         ntb_set_mw_addr(nt->ndev, num_mw, mw->dma_addr);
582
583         return 0;
584 }
585
586 static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
587 {
588         struct ntb_transport *nt = qp->transport;
589         struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
590
591         if (qp->qp_link == NTB_LINK_DOWN) {
592                 cancel_delayed_work_sync(&qp->link_work);
593                 return;
594         }
595
596         if (qp->event_handler)
597                 qp->event_handler(qp->cb_data, NTB_LINK_DOWN);
598
599         dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
600         qp->qp_link = NTB_LINK_DOWN;
601 }
602
603 static void ntb_qp_link_cleanup_work(struct work_struct *work)
604 {
605         struct ntb_transport_qp *qp = container_of(work,
606                                                    struct ntb_transport_qp,
607                                                    link_cleanup);
608         struct ntb_transport *nt = qp->transport;
609
610         ntb_qp_link_cleanup(qp);
611
612         if (nt->transport_link == NTB_LINK_UP)
613                 schedule_delayed_work(&qp->link_work,
614                                       msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
615 }
616
617 static void ntb_qp_link_down(struct ntb_transport_qp *qp)
618 {
619         schedule_work(&qp->link_cleanup);
620 }
621
622 static void ntb_transport_link_cleanup(struct ntb_transport *nt)
623 {
624         int i;
625
626         /* Pass along the info to any clients */
627         for (i = 0; i < nt->max_qps; i++)
628                 if (!test_bit(i, &nt->qp_bitmap))
629                         ntb_qp_link_cleanup(&nt->qps[i]);
630
631         if (nt->transport_link == NTB_LINK_DOWN)
632                 cancel_delayed_work_sync(&nt->link_work);
633         else
634                 nt->transport_link = NTB_LINK_DOWN;
635
636         /* The scratchpad registers keep the values if the remote side
637          * goes down, blast them now to give them a sane value the next
638          * time they are accessed
639          */
640         for (i = 0; i < MAX_SPAD; i++)
641                 ntb_write_local_spad(nt->ndev, i, 0);
642 }
643
644 static void ntb_transport_link_cleanup_work(struct work_struct *work)
645 {
646         struct ntb_transport *nt = container_of(work, struct ntb_transport,
647                                                 link_cleanup);
648
649         ntb_transport_link_cleanup(nt);
650 }
651
652 static void ntb_transport_event_callback(void *data, enum ntb_hw_event event)
653 {
654         struct ntb_transport *nt = data;
655
656         switch (event) {
657         case NTB_EVENT_HW_LINK_UP:
658                 schedule_delayed_work(&nt->link_work, 0);
659                 break;
660         case NTB_EVENT_HW_LINK_DOWN:
661                 schedule_work(&nt->link_cleanup);
662                 break;
663         default:
664                 BUG();
665         }
666 }
667
668 static void ntb_transport_link_work(struct work_struct *work)
669 {
670         struct ntb_transport *nt = container_of(work, struct ntb_transport,
671                                                 link_work.work);
672         struct ntb_device *ndev = nt->ndev;
673         struct pci_dev *pdev = ntb_query_pdev(ndev);
674         u32 val;
675         int rc, i;
676
677         /* send the local info, in the opposite order of the way we read it */
678         for (i = 0; i < ntb_max_mw(ndev); i++) {
679                 rc = ntb_write_remote_spad(ndev, MW0_SZ_HIGH + (i * 2),
680                                            ntb_get_mw_size(ndev, i) >> 32);
681                 if (rc) {
682                         dev_err(&pdev->dev, "Error writing %u to remote spad %d\n",
683                                 (u32)(ntb_get_mw_size(ndev, i) >> 32),
684                                 MW0_SZ_HIGH + (i * 2));
685                         goto out;
686                 }
687
688                 rc = ntb_write_remote_spad(ndev, MW0_SZ_LOW + (i * 2),
689                                            (u32) ntb_get_mw_size(ndev, i));
690                 if (rc) {
691                         dev_err(&pdev->dev, "Error writing %u to remote spad %d\n",
692                                 (u32) ntb_get_mw_size(ndev, i),
693                                 MW0_SZ_LOW + (i * 2));
694                         goto out;
695                 }
696         }
697
698         rc = ntb_write_remote_spad(ndev, NUM_MWS, ntb_max_mw(ndev));
699         if (rc) {
700                 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
701                         ntb_max_mw(ndev), NUM_MWS);
702                 goto out;
703         }
704
705         rc = ntb_write_remote_spad(ndev, NUM_QPS, nt->max_qps);
706         if (rc) {
707                 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
708                         nt->max_qps, NUM_QPS);
709                 goto out;
710         }
711
712         rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION);
713         if (rc) {
714                 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
715                         NTB_TRANSPORT_VERSION, VERSION);
716                 goto out;
717         }
718
719         /* Query the remote side for its info */
720         rc = ntb_read_remote_spad(ndev, VERSION, &val);
721         if (rc) {
722                 dev_err(&pdev->dev, "Error reading remote spad %d\n", VERSION);
723                 goto out;
724         }
725
726         if (val != NTB_TRANSPORT_VERSION)
727                 goto out;
728         dev_dbg(&pdev->dev, "Remote version = %d\n", val);
729
730         rc = ntb_read_remote_spad(ndev, NUM_QPS, &val);
731         if (rc) {
732                 dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_QPS);
733                 goto out;
734         }
735
736         if (val != nt->max_qps)
737                 goto out;
738         dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);
739
740         rc = ntb_read_remote_spad(ndev, NUM_MWS, &val);
741         if (rc) {
742                 dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_MWS);
743                 goto out;
744         }
745
746         if (val != ntb_max_mw(ndev))
747                 goto out;
748         dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val);
749
750         for (i = 0; i < ntb_max_mw(ndev); i++) {
751                 u64 val64;
752
753                 rc = ntb_read_remote_spad(ndev, MW0_SZ_HIGH + (i * 2), &val);
754                 if (rc) {
755                         dev_err(&pdev->dev, "Error reading remote spad %d\n",
756                                 MW0_SZ_HIGH + (i * 2));
757                         goto out1;
758                 }
759
760                 val64 = (u64) val << 32;
761
762                 rc = ntb_read_remote_spad(ndev, MW0_SZ_LOW + (i * 2), &val);
763                 if (rc) {
764                         dev_err(&pdev->dev, "Error reading remote spad %d\n",
765                                 MW0_SZ_LOW + (i * 2));
766                         goto out1;
767                 }
768
769                 val64 |= val;
770
771                 dev_dbg(&pdev->dev, "Remote MW%d size = %llu\n", i, val64);
772
773                 rc = ntb_set_mw(nt, i, val64);
774                 if (rc)
775                         goto out1;
776         }
777
778         nt->transport_link = NTB_LINK_UP;
779
780         for (i = 0; i < nt->max_qps; i++) {
781                 struct ntb_transport_qp *qp = &nt->qps[i];
782
783                 ntb_transport_setup_qp_mw(nt, i);
784
785                 if (qp->client_ready == NTB_LINK_UP)
786                         schedule_delayed_work(&qp->link_work, 0);
787         }
788
789         return;
790
791 out1:
792         for (i = 0; i < ntb_max_mw(ndev); i++)
793                 ntb_free_mw(nt, i);
794 out:
795         if (ntb_hw_link_status(ndev))
796                 schedule_delayed_work(&nt->link_work,
797                                       msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
798 }
799
800 static void ntb_qp_link_work(struct work_struct *work)
801 {
802         struct ntb_transport_qp *qp = container_of(work,
803                                                    struct ntb_transport_qp,
804                                                    link_work.work);
805         struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
806         struct ntb_transport *nt = qp->transport;
807         int rc, val;
808
809         WARN_ON(nt->transport_link != NTB_LINK_UP);
810
811         rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val);
812         if (rc) {
813                 dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
814                 return;
815         }
816
817         rc = ntb_write_remote_spad(nt->ndev, QP_LINKS, val | 1 << qp->qp_num);
818         if (rc)
819                 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
820                         val | 1 << qp->qp_num, QP_LINKS);
821
822         /* query remote spad for qp ready bits */
823         rc = ntb_read_remote_spad(nt->ndev, QP_LINKS, &val);
824         if (rc)
825                 dev_err(&pdev->dev, "Error reading remote spad %d\n", QP_LINKS);
826
827         dev_dbg(&pdev->dev, "Remote QP link status = %x\n", val);
828
829         /* See if the remote side is up */
830         if (1 << qp->qp_num & val) {
831                 qp->qp_link = NTB_LINK_UP;
832
833                 dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num);
834                 if (qp->event_handler)
835                         qp->event_handler(qp->cb_data, NTB_LINK_UP);
836         } else if (nt->transport_link == NTB_LINK_UP)
837                 schedule_delayed_work(&qp->link_work,
838                                       msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
839 }
840
841 static int ntb_transport_init_queue(struct ntb_transport *nt,
842                                      unsigned int qp_num)
843 {
844         struct ntb_transport_qp *qp;
845         unsigned int num_qps_mw, tx_size;
846         u8 mw_num, mw_max;
847         u64 qp_offset;
848
849         mw_max = ntb_max_mw(nt->ndev);
850         mw_num = QP_TO_MW(nt->ndev, qp_num);
851
852         qp = &nt->qps[qp_num];
853         qp->qp_num = qp_num;
854         qp->transport = nt;
855         qp->ndev = nt->ndev;
856         qp->qp_link = NTB_LINK_DOWN;
857         qp->client_ready = NTB_LINK_DOWN;
858         qp->event_handler = NULL;
859
860         if (nt->max_qps % mw_max && mw_num < nt->max_qps % mw_max)
861                 num_qps_mw = nt->max_qps / mw_max + 1;
862         else
863                 num_qps_mw = nt->max_qps / mw_max;
864
865         tx_size = (unsigned int) ntb_get_mw_size(qp->ndev, mw_num) / num_qps_mw;
866         qp_offset = qp_num / mw_max * tx_size;
867         qp->tx_mw = ntb_get_mw_vbase(nt->ndev, mw_num) + qp_offset;
868         if (!qp->tx_mw)
869                 return -EINVAL;
870
871         qp->tx_mw_phys = ntb_get_mw_base(qp->ndev, mw_num) + qp_offset;
872         if (!qp->tx_mw_phys)
873                 return -EINVAL;
874
875         tx_size -= sizeof(struct ntb_rx_info);
876         qp->rx_info = qp->tx_mw + tx_size;
877
878         /* Due to housekeeping, there must be atleast 2 buffs */
879         qp->tx_max_frame = min(transport_mtu, tx_size / 2);
880         qp->tx_max_entry = tx_size / qp->tx_max_frame;
881
882         if (ntb_query_debugfs(nt->ndev)) {
883                 char debugfs_name[4];
884
885                 snprintf(debugfs_name, 4, "qp%d", qp_num);
886                 qp->debugfs_dir = debugfs_create_dir(debugfs_name,
887                                                  ntb_query_debugfs(nt->ndev));
888
889                 qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
890                                                         qp->debugfs_dir, qp,
891                                                         &ntb_qp_debugfs_stats);
892         }
893
894         INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
895         INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work);
896
897         spin_lock_init(&qp->ntb_rx_pend_q_lock);
898         spin_lock_init(&qp->ntb_rx_free_q_lock);
899         spin_lock_init(&qp->ntb_tx_free_q_lock);
900
901         INIT_LIST_HEAD(&qp->rx_pend_q);
902         INIT_LIST_HEAD(&qp->rx_free_q);
903         INIT_LIST_HEAD(&qp->tx_free_q);
904
905         return 0;
906 }
907
908 int ntb_transport_init(struct pci_dev *pdev)
909 {
910         struct ntb_transport *nt;
911         int rc, i;
912
913         nt = kzalloc(sizeof(struct ntb_transport), GFP_KERNEL);
914         if (!nt)
915                 return -ENOMEM;
916
917         nt->ndev = ntb_register_transport(pdev, nt);
918         if (!nt->ndev) {
919                 rc = -EIO;
920                 goto err;
921         }
922
923         nt->mw = kcalloc(ntb_max_mw(nt->ndev), sizeof(struct ntb_transport_mw),
924                          GFP_KERNEL);
925         if (!nt->mw) {
926                 rc = -ENOMEM;
927                 goto err1;
928         }
929
930         if (max_num_clients)
931                 nt->max_qps = min(ntb_max_cbs(nt->ndev), max_num_clients);
932         else
933                 nt->max_qps = min(ntb_max_cbs(nt->ndev), ntb_max_mw(nt->ndev));
934
935         nt->qps = kcalloc(nt->max_qps, sizeof(struct ntb_transport_qp),
936                           GFP_KERNEL);
937         if (!nt->qps) {
938                 rc = -ENOMEM;
939                 goto err2;
940         }
941
942         nt->qp_bitmap = ((u64) 1 << nt->max_qps) - 1;
943
944         for (i = 0; i < nt->max_qps; i++) {
945                 rc = ntb_transport_init_queue(nt, i);
946                 if (rc)
947                         goto err3;
948         }
949
950         INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
951         INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup_work);
952
953         rc = ntb_register_event_callback(nt->ndev,
954                                          ntb_transport_event_callback);
955         if (rc)
956                 goto err3;
957
958         INIT_LIST_HEAD(&nt->client_devs);
959         rc = ntb_bus_init(nt);
960         if (rc)
961                 goto err4;
962
963         if (ntb_hw_link_status(nt->ndev))
964                 schedule_delayed_work(&nt->link_work, 0);
965
966         return 0;
967
968 err4:
969         ntb_unregister_event_callback(nt->ndev);
970 err3:
971         kfree(nt->qps);
972 err2:
973         kfree(nt->mw);
974 err1:
975         ntb_unregister_transport(nt->ndev);
976 err:
977         kfree(nt);
978         return rc;
979 }
980
981 void ntb_transport_free(void *transport)
982 {
983         struct ntb_transport *nt = transport;
984         struct ntb_device *ndev = nt->ndev;
985         int i;
986
987         ntb_transport_link_cleanup(nt);
988
989         /* verify that all the qp's are freed */
990         for (i = 0; i < nt->max_qps; i++) {
991                 if (!test_bit(i, &nt->qp_bitmap))
992                         ntb_transport_free_queue(&nt->qps[i]);
993                 debugfs_remove_recursive(nt->qps[i].debugfs_dir);
994         }
995
996         ntb_bus_remove(nt);
997
998         cancel_delayed_work_sync(&nt->link_work);
999
1000         ntb_unregister_event_callback(ndev);
1001
1002         for (i = 0; i < ntb_max_mw(ndev); i++)
1003                 ntb_free_mw(nt, i);
1004
1005         kfree(nt->qps);
1006         kfree(nt->mw);
1007         ntb_unregister_transport(ndev);
1008         kfree(nt);
1009 }
1010
1011 static void ntb_rx_copy_callback(void *data)
1012 {
1013         struct ntb_queue_entry *entry = data;
1014         struct ntb_transport_qp *qp = entry->qp;
1015         void *cb_data = entry->cb_data;
1016         unsigned int len = entry->len;
1017         struct ntb_payload_header *hdr = entry->rx_hdr;
1018
1019         /* Ensure that the data is fully copied out before clearing the flag */
1020         wmb();
1021         hdr->flags = 0;
1022
1023         iowrite32(entry->index, &qp->rx_info->entry);
1024
1025         ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
1026
1027         if (qp->rx_handler && qp->client_ready == NTB_LINK_UP)
1028                 qp->rx_handler(qp, qp->cb_data, cb_data, len);
1029 }
1030
1031 static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
1032 {
1033         void *buf = entry->buf;
1034         size_t len = entry->len;
1035
1036         memcpy(buf, offset, len);
1037
1038         ntb_rx_copy_callback(entry);
1039 }
1040
1041 static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
1042                          size_t len)
1043 {
1044         struct dma_async_tx_descriptor *txd;
1045         struct ntb_transport_qp *qp = entry->qp;
1046         struct dma_chan *chan = qp->dma_chan;
1047         struct dma_device *device;
1048         size_t pay_off, buff_off;
1049         struct dmaengine_unmap_data *unmap;
1050         dma_cookie_t cookie;
1051         void *buf = entry->buf;
1052
1053         entry->len = len;
1054
1055         if (!chan)
1056                 goto err;
1057
1058         if (len < copy_bytes) 
1059                 goto err_wait;
1060
1061         device = chan->device;
1062         pay_off = (size_t) offset & ~PAGE_MASK;
1063         buff_off = (size_t) buf & ~PAGE_MASK;
1064
1065         if (!is_dma_copy_aligned(device, pay_off, buff_off, len))
1066                 goto err_wait;
1067
1068         unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
1069         if (!unmap)
1070                 goto err_wait;
1071
1072         unmap->len = len;
1073         unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset),
1074                                       pay_off, len, DMA_TO_DEVICE);
1075         if (dma_mapping_error(device->dev, unmap->addr[0]))
1076                 goto err_get_unmap;
1077
1078         unmap->to_cnt = 1;
1079
1080         unmap->addr[1] = dma_map_page(device->dev, virt_to_page(buf),
1081                                       buff_off, len, DMA_FROM_DEVICE);
1082         if (dma_mapping_error(device->dev, unmap->addr[1]))
1083                 goto err_get_unmap;
1084
1085         unmap->from_cnt = 1;
1086
1087         txd = device->device_prep_dma_memcpy(chan, unmap->addr[1],
1088                                              unmap->addr[0], len,
1089                                              DMA_PREP_INTERRUPT);
1090         if (!txd)
1091                 goto err_get_unmap;
1092
1093         txd->callback = ntb_rx_copy_callback;
1094         txd->callback_param = entry;
1095         dma_set_unmap(txd, unmap);
1096
1097         cookie = dmaengine_submit(txd);
1098         if (dma_submit_error(cookie))
1099                 goto err_set_unmap;
1100
1101         dmaengine_unmap_put(unmap);
1102
1103         qp->last_cookie = cookie;
1104
1105         qp->rx_async++;
1106
1107         return;
1108
1109 err_set_unmap:
1110         dmaengine_unmap_put(unmap);
1111 err_get_unmap:
1112         dmaengine_unmap_put(unmap);
1113 err_wait:
1114         /* If the callbacks come out of order, the writing of the index to the
1115          * last completed will be out of order.  This may result in the
1116          * receive stalling forever.
1117          */
1118         dma_sync_wait(chan, qp->last_cookie);
1119 err:
1120         ntb_memcpy_rx(entry, offset);
1121         qp->rx_memcpy++;
1122 }
1123
1124 static int ntb_process_rxc(struct ntb_transport_qp *qp)
1125 {
1126         struct ntb_payload_header *hdr;
1127         struct ntb_queue_entry *entry;
1128         void *offset;
1129
1130         offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
1131         hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
1132
1133         entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
1134         if (!entry) {
1135                 dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
1136                         "no buffer - HDR ver %u, len %d, flags %x\n",
1137                         hdr->ver, hdr->len, hdr->flags);
1138                 qp->rx_err_no_buf++;
1139                 return -ENOMEM;
1140         }
1141
1142         if (!(hdr->flags & DESC_DONE_FLAG)) {
1143                 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
1144                              &qp->rx_pend_q);
1145                 qp->rx_ring_empty++;
1146                 return -EAGAIN;
1147         }
1148
1149         if (hdr->ver != (u32) qp->rx_pkts) {
1150                 dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
1151                         "qp %d: version mismatch, expected %llu - got %u\n",
1152                         qp->qp_num, qp->rx_pkts, hdr->ver);
1153                 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
1154                              &qp->rx_pend_q);
1155                 qp->rx_err_ver++;
1156                 return -EIO;
1157         }
1158
1159         if (hdr->flags & LINK_DOWN_FLAG) {
1160                 ntb_qp_link_down(qp);
1161
1162                 goto err;
1163         }
1164
1165         dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
1166                 "rx offset %u, ver %u - %d payload received, buf size %d\n",
1167                 qp->rx_index, hdr->ver, hdr->len, entry->len);
1168
1169         qp->rx_bytes += hdr->len;
1170         qp->rx_pkts++;
1171
1172         if (hdr->len > entry->len) {
1173                 qp->rx_err_oflow++;
1174                 dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
1175                         "RX overflow! Wanted %d got %d\n",
1176                         hdr->len, entry->len);
1177
1178                 goto err;
1179         }
1180
1181         entry->index = qp->rx_index;
1182         entry->rx_hdr = hdr;
1183
1184         ntb_async_rx(entry, offset, hdr->len);
1185
1186 out:
1187         qp->rx_index++;
1188         qp->rx_index %= qp->rx_max_entry;
1189
1190         return 0;
1191
1192 err:
1193         ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
1194                      &qp->rx_pend_q);
1195         /* Ensure that the data is fully copied out before clearing the flag */
1196         wmb();
1197         hdr->flags = 0;
1198         iowrite32(qp->rx_index, &qp->rx_info->entry);
1199
1200         goto out;
1201 }
1202
1203 static int ntb_transport_rxc_db(void *data, int db_num)
1204 {
1205         struct ntb_transport_qp *qp = data;
1206         int rc, i;
1207
1208         dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%s: doorbell %d received\n",
1209                 __func__, db_num);
1210
1211         /* Limit the number of packets processed in a single interrupt to
1212          * provide fairness to others
1213          */
1214         for (i = 0; i < qp->rx_max_entry; i++) {
1215                 rc = ntb_process_rxc(qp);
1216                 if (rc)
1217                         break;
1218         }
1219
1220         if (qp->dma_chan)
1221                 dma_async_issue_pending(qp->dma_chan);
1222
1223         return i;
1224 }
1225
1226 static void ntb_tx_copy_callback(void *data)
1227 {
1228         struct ntb_queue_entry *entry = data;
1229         struct ntb_transport_qp *qp = entry->qp;
1230         struct ntb_payload_header __iomem *hdr = entry->tx_hdr;
1231
1232         /* Ensure that the data is fully copied out before setting the flags */
1233         wmb();
1234         iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags);
1235
1236         ntb_ring_doorbell(qp->ndev, qp->qp_num);
1237
1238         /* The entry length can only be zero if the packet is intended to be a
1239          * "link down" or similar.  Since no payload is being sent in these
1240          * cases, there is nothing to add to the completion queue.
1241          */
1242         if (entry->len > 0) {
1243                 qp->tx_bytes += entry->len;
1244
1245                 if (qp->tx_handler)
1246                         qp->tx_handler(qp, qp->cb_data, entry->cb_data,
1247                                        entry->len);
1248         }
1249
1250         ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q);
1251 }
1252
1253 static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset)
1254 {
1255         memcpy_toio(offset, entry->buf, entry->len);
1256
1257         ntb_tx_copy_callback(entry);
1258 }
1259
1260 static void ntb_async_tx(struct ntb_transport_qp *qp,
1261                          struct ntb_queue_entry *entry)
1262 {
1263         struct ntb_payload_header __iomem *hdr;
1264         struct dma_async_tx_descriptor *txd;
1265         struct dma_chan *chan = qp->dma_chan;
1266         struct dma_device *device;
1267         size_t dest_off, buff_off;
1268         struct dmaengine_unmap_data *unmap;
1269         dma_addr_t dest;
1270         dma_cookie_t cookie;
1271         void __iomem *offset;
1272         size_t len = entry->len;
1273         void *buf = entry->buf;
1274
1275         offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index;
1276         hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
1277         entry->tx_hdr = hdr;
1278
1279         iowrite32(entry->len, &hdr->len);
1280         iowrite32((u32) qp->tx_pkts, &hdr->ver);
1281
1282         if (!chan)
1283                 goto err;
1284
1285         if (len < copy_bytes)
1286                 goto err;
1287
1288         device = chan->device;
1289         dest = qp->tx_mw_phys + qp->tx_max_frame * qp->tx_index;
1290         buff_off = (size_t) buf & ~PAGE_MASK;
1291         dest_off = (size_t) dest & ~PAGE_MASK;
1292
1293         if (!is_dma_copy_aligned(device, buff_off, dest_off, len))
1294                 goto err;
1295
1296         unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT);
1297         if (!unmap)
1298                 goto err;
1299
1300         unmap->len = len;
1301         unmap->addr[0] = dma_map_page(device->dev, virt_to_page(buf),
1302                                       buff_off, len, DMA_TO_DEVICE);
1303         if (dma_mapping_error(device->dev, unmap->addr[0]))
1304                 goto err_get_unmap;
1305
1306         unmap->to_cnt = 1;
1307
1308         txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len,
1309                                              DMA_PREP_INTERRUPT);
1310         if (!txd)
1311                 goto err_get_unmap;
1312
1313         txd->callback = ntb_tx_copy_callback;
1314         txd->callback_param = entry;
1315         dma_set_unmap(txd, unmap);
1316
1317         cookie = dmaengine_submit(txd);
1318         if (dma_submit_error(cookie))
1319                 goto err_set_unmap;
1320
1321         dmaengine_unmap_put(unmap);
1322
1323         dma_async_issue_pending(chan);
1324         qp->tx_async++;
1325
1326         return;
1327 err_set_unmap:
1328         dmaengine_unmap_put(unmap);
1329 err_get_unmap:
1330         dmaengine_unmap_put(unmap);
1331 err:
1332         ntb_memcpy_tx(entry, offset);
1333         qp->tx_memcpy++;
1334 }
1335
1336 static int ntb_process_tx(struct ntb_transport_qp *qp,
1337                           struct ntb_queue_entry *entry)
1338 {
1339         dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%lld - tx %u, entry len %d flags %x buff %p\n",
1340                 qp->tx_pkts, qp->tx_index, entry->len, entry->flags,
1341                 entry->buf);
1342         if (qp->tx_index == qp->remote_rx_info->entry) {
1343                 qp->tx_ring_full++;
1344                 return -EAGAIN;
1345         }
1346
1347         if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) {
1348                 if (qp->tx_handler)
1349                         qp->tx_handler(qp->cb_data, qp, NULL, -EIO);
1350
1351                 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1352                              &qp->tx_free_q);
1353                 return 0;
1354         }
1355
1356         ntb_async_tx(qp, entry);
1357
1358         qp->tx_index++;
1359         qp->tx_index %= qp->tx_max_entry;
1360
1361         qp->tx_pkts++;
1362
1363         return 0;
1364 }
1365
1366 static void ntb_send_link_down(struct ntb_transport_qp *qp)
1367 {
1368         struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
1369         struct ntb_queue_entry *entry;
1370         int i, rc;
1371
1372         if (qp->qp_link == NTB_LINK_DOWN)
1373                 return;
1374
1375         qp->qp_link = NTB_LINK_DOWN;
1376         dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
1377
1378         for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) {
1379                 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
1380                 if (entry)
1381                         break;
1382                 msleep(100);
1383         }
1384
1385         if (!entry)
1386                 return;
1387
1388         entry->cb_data = NULL;
1389         entry->buf = NULL;
1390         entry->len = 0;
1391         entry->flags = LINK_DOWN_FLAG;
1392
1393         rc = ntb_process_tx(qp, entry);
1394         if (rc)
1395                 dev_err(&pdev->dev, "ntb: QP%d unable to send linkdown msg\n",
1396                         qp->qp_num);
1397 }
1398
1399 /**
1400  * ntb_transport_create_queue - Create a new NTB transport layer queue
1401  * @rx_handler: receive callback function
1402  * @tx_handler: transmit callback function
1403  * @event_handler: event callback function
1404  *
1405  * Create a new NTB transport layer queue and provide the queue with a callback
1406  * routine for both transmit and receive.  The receive callback routine will be
1407  * used to pass up data when the transport has received it on the queue.   The
1408  * transmit callback routine will be called when the transport has completed the
1409  * transmission of the data on the queue and the data is ready to be freed.
1410  *
1411  * RETURNS: pointer to newly created ntb_queue, NULL on error.
1412  */
1413 struct ntb_transport_qp *
1414 ntb_transport_create_queue(void *data, struct pci_dev *pdev,
1415                            const struct ntb_queue_handlers *handlers)
1416 {
1417         struct ntb_queue_entry *entry;
1418         struct ntb_transport_qp *qp;
1419         struct ntb_transport *nt;
1420         unsigned int free_queue;
1421         int rc, i;
1422
1423         nt = ntb_find_transport(pdev);
1424         if (!nt)
1425                 goto err;
1426
1427         free_queue = ffs(nt->qp_bitmap);
1428         if (!free_queue)
1429                 goto err;
1430
1431         /* decrement free_queue to make it zero based */
1432         free_queue--;
1433
1434         clear_bit(free_queue, &nt->qp_bitmap);
1435
1436         qp = &nt->qps[free_queue];
1437         qp->cb_data = data;
1438         qp->rx_handler = handlers->rx_handler;
1439         qp->tx_handler = handlers->tx_handler;
1440         qp->event_handler = handlers->event_handler;
1441
1442         dmaengine_get();
1443         qp->dma_chan = dma_find_channel(DMA_MEMCPY);
1444         if (!qp->dma_chan) {
1445                 dmaengine_put();
1446                 dev_info(&pdev->dev, "Unable to allocate DMA channel, using CPU instead\n");
1447         }
1448
1449         for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
1450                 entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
1451                 if (!entry)
1452                         goto err1;
1453
1454                 entry->qp = qp;
1455                 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry,
1456                              &qp->rx_free_q);
1457         }
1458
1459         for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
1460                 entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
1461                 if (!entry)
1462                         goto err2;
1463
1464                 entry->qp = qp;
1465                 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1466                              &qp->tx_free_q);
1467         }
1468
1469         rc = ntb_register_db_callback(qp->ndev, free_queue, qp,
1470                                       ntb_transport_rxc_db);
1471         if (rc)
1472                 goto err2;
1473
1474         dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num);
1475
1476         return qp;
1477
1478 err2:
1479         while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1480                 kfree(entry);
1481 err1:
1482         while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
1483                 kfree(entry);
1484         if (qp->dma_chan)
1485                 dmaengine_put();
1486         set_bit(free_queue, &nt->qp_bitmap);
1487 err:
1488         return NULL;
1489 }
1490 EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
1491
1492 /**
1493  * ntb_transport_free_queue - Frees NTB transport queue
1494  * @qp: NTB queue to be freed
1495  *
1496  * Frees NTB transport queue
1497  */
1498 void ntb_transport_free_queue(struct ntb_transport_qp *qp)
1499 {
1500         struct pci_dev *pdev;
1501         struct ntb_queue_entry *entry;
1502
1503         if (!qp)
1504                 return;
1505
1506         pdev = ntb_query_pdev(qp->ndev);
1507
1508         if (qp->dma_chan) {
1509                 struct dma_chan *chan = qp->dma_chan;
1510                 /* Putting the dma_chan to NULL will force any new traffic to be
1511                  * processed by the CPU instead of the DAM engine
1512                  */
1513                 qp->dma_chan = NULL;
1514
1515                 /* Try to be nice and wait for any queued DMA engine
1516                  * transactions to process before smashing it with a rock
1517                  */
1518                 dma_sync_wait(chan, qp->last_cookie);
1519                 dmaengine_terminate_all(chan);
1520                 dmaengine_put();
1521         }
1522
1523         ntb_unregister_db_callback(qp->ndev, qp->qp_num);
1524
1525         cancel_delayed_work_sync(&qp->link_work);
1526
1527         while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
1528                 kfree(entry);
1529
1530         while ((entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) {
1531                 dev_warn(&pdev->dev, "Freeing item from a non-empty queue\n");
1532                 kfree(entry);
1533         }
1534
1535         while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1536                 kfree(entry);
1537
1538         set_bit(qp->qp_num, &qp->transport->qp_bitmap);
1539
1540         dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
1541 }
1542 EXPORT_SYMBOL_GPL(ntb_transport_free_queue);
1543
1544 /**
1545  * ntb_transport_rx_remove - Dequeues enqueued rx packet
1546  * @qp: NTB queue to be freed
1547  * @len: pointer to variable to write enqueued buffers length
1548  *
1549  * Dequeues unused buffers from receive queue.  Should only be used during
1550  * shutdown of qp.
1551  *
1552  * RETURNS: NULL error value on error, or void* for success.
1553  */
1554 void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
1555 {
1556         struct ntb_queue_entry *entry;
1557         void *buf;
1558
1559         if (!qp || qp->client_ready == NTB_LINK_UP)
1560                 return NULL;
1561
1562         entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
1563         if (!entry)
1564                 return NULL;
1565
1566         buf = entry->cb_data;
1567         *len = entry->len;
1568
1569         ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
1570
1571         return buf;
1572 }
1573 EXPORT_SYMBOL_GPL(ntb_transport_rx_remove);
1574
1575 /**
1576  * ntb_transport_rx_enqueue - Enqueue a new NTB queue entry
1577  * @qp: NTB transport layer queue the entry is to be enqueued on
1578  * @cb: per buffer pointer for callback function to use
1579  * @data: pointer to data buffer that incoming packets will be copied into
1580  * @len: length of the data buffer
1581  *
1582  * Enqueue a new receive buffer onto the transport queue into which a NTB
1583  * payload can be received into.
1584  *
1585  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1586  */
1587 int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1588                              unsigned int len)
1589 {
1590         struct ntb_queue_entry *entry;
1591
1592         if (!qp)
1593                 return -EINVAL;
1594
1595         entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q);
1596         if (!entry)
1597                 return -ENOMEM;
1598
1599         entry->cb_data = cb;
1600         entry->buf = data;
1601         entry->len = len;
1602
1603         ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q);
1604
1605         return 0;
1606 }
1607 EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue);
1608
1609 /**
1610  * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry
1611  * @qp: NTB transport layer queue the entry is to be enqueued on
1612  * @cb: per buffer pointer for callback function to use
1613  * @data: pointer to data buffer that will be sent
1614  * @len: length of the data buffer
1615  *
1616  * Enqueue a new transmit buffer onto the transport queue from which a NTB
1617  * payload will be transmitted.  This assumes that a lock is being held to
1618  * serialize access to the qp.
1619  *
1620  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1621  */
1622 int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1623                              unsigned int len)
1624 {
1625         struct ntb_queue_entry *entry;
1626         int rc;
1627
1628         if (!qp || qp->qp_link != NTB_LINK_UP || !len)
1629                 return -EINVAL;
1630
1631         entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
1632         if (!entry) {
1633                 qp->tx_err_no_buf++;
1634                 return -ENOMEM;
1635         }
1636
1637         entry->cb_data = cb;
1638         entry->buf = data;
1639         entry->len = len;
1640         entry->flags = 0;
1641
1642         rc = ntb_process_tx(qp, entry);
1643         if (rc)
1644                 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1645                              &qp->tx_free_q);
1646
1647         return rc;
1648 }
1649 EXPORT_SYMBOL_GPL(ntb_transport_tx_enqueue);
1650
1651 /**
1652  * ntb_transport_link_up - Notify NTB transport of client readiness to use queue
1653  * @qp: NTB transport layer queue to be enabled
1654  *
1655  * Notify NTB transport layer of client readiness to use queue
1656  */
1657 void ntb_transport_link_up(struct ntb_transport_qp *qp)
1658 {
1659         if (!qp)
1660                 return;
1661
1662         qp->client_ready = NTB_LINK_UP;
1663
1664         if (qp->transport->transport_link == NTB_LINK_UP)
1665                 schedule_delayed_work(&qp->link_work, 0);
1666 }
1667 EXPORT_SYMBOL_GPL(ntb_transport_link_up);
1668
1669 /**
1670  * ntb_transport_link_down - Notify NTB transport to no longer enqueue data
1671  * @qp: NTB transport layer queue to be disabled
1672  *
1673  * Notify NTB transport layer of client's desire to no longer receive data on
1674  * transport queue specified.  It is the client's responsibility to ensure all
1675  * entries on queue are purged or otherwise handled appropriately.
1676  */
1677 void ntb_transport_link_down(struct ntb_transport_qp *qp)
1678 {
1679         struct pci_dev *pdev;
1680         int rc, val;
1681
1682         if (!qp)
1683                 return;
1684
1685         pdev = ntb_query_pdev(qp->ndev);
1686         qp->client_ready = NTB_LINK_DOWN;
1687
1688         rc = ntb_read_local_spad(qp->ndev, QP_LINKS, &val);
1689         if (rc) {
1690                 dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
1691                 return;
1692         }
1693
1694         rc = ntb_write_remote_spad(qp->ndev, QP_LINKS,
1695                                    val & ~(1 << qp->qp_num));
1696         if (rc)
1697                 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
1698                         val & ~(1 << qp->qp_num), QP_LINKS);
1699
1700         if (qp->qp_link == NTB_LINK_UP)
1701                 ntb_send_link_down(qp);
1702         else
1703                 cancel_delayed_work_sync(&qp->link_work);
1704 }
1705 EXPORT_SYMBOL_GPL(ntb_transport_link_down);
1706
1707 /**
1708  * ntb_transport_link_query - Query transport link state
1709  * @qp: NTB transport layer queue to be queried
1710  *
1711  * Query connectivity to the remote system of the NTB transport queue
1712  *
1713  * RETURNS: true for link up or false for link down
1714  */
1715 bool ntb_transport_link_query(struct ntb_transport_qp *qp)
1716 {
1717         if (!qp)
1718                 return false;
1719
1720         return qp->qp_link == NTB_LINK_UP;
1721 }
1722 EXPORT_SYMBOL_GPL(ntb_transport_link_query);
1723
1724 /**
1725  * ntb_transport_qp_num - Query the qp number
1726  * @qp: NTB transport layer queue to be queried
1727  *
1728  * Query qp number of the NTB transport queue
1729  *
1730  * RETURNS: a zero based number specifying the qp number
1731  */
1732 unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp)
1733 {
1734         if (!qp)
1735                 return 0;
1736
1737         return qp->qp_num;
1738 }
1739 EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
1740
1741 /**
1742  * ntb_transport_max_size - Query the max payload size of a qp
1743  * @qp: NTB transport layer queue to be queried
1744  *
1745  * Query the maximum payload size permissible on the given qp
1746  *
1747  * RETURNS: the max payload size of a qp
1748  */
1749 unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
1750 {
1751         unsigned int max;
1752
1753         if (!qp)
1754                 return 0;
1755
1756         if (!qp->dma_chan)
1757                 return qp->tx_max_frame - sizeof(struct ntb_payload_header);
1758
1759         /* If DMA engine usage is possible, try to find the max size for that */
1760         max = qp->tx_max_frame - sizeof(struct ntb_payload_header);
1761         max -= max % (1 << qp->dma_chan->device->copy_align);
1762
1763         return max;
1764 }
1765 EXPORT_SYMBOL_GPL(ntb_transport_max_size);