172501d476009ae8035826136dddf22b975ed5b5
[linux-drm-fsl-dcu.git] / drivers / ntb / ntb_transport.c
1 /*
2  * This file is provided under a dual BSD/GPLv2 license.  When using or
3  *   redistributing this file, you may do so under either license.
4  *
5  *   GPL LICENSE SUMMARY
6  *
7  *   Copyright(c) 2012 Intel Corporation. All rights reserved.
8  *
9  *   This program is free software; you can redistribute it and/or modify
10  *   it under the terms of version 2 of the GNU General Public License as
11  *   published by the Free Software Foundation.
12  *
13  *   BSD LICENSE
14  *
15  *   Copyright(c) 2012 Intel Corporation. All rights reserved.
16  *
17  *   Redistribution and use in source and binary forms, with or without
18  *   modification, are permitted provided that the following conditions
19  *   are met:
20  *
21  *     * Redistributions of source code must retain the above copyright
22  *       notice, this list of conditions and the following disclaimer.
23  *     * Redistributions in binary form must reproduce the above copy
24  *       notice, this list of conditions and the following disclaimer in
25  *       the documentation and/or other materials provided with the
26  *       distribution.
27  *     * Neither the name of Intel Corporation nor the names of its
28  *       contributors may be used to endorse or promote products derived
29  *       from this software without specific prior written permission.
30  *
31  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
34  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
37  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
39  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
41  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42  *
43  * Intel PCIe NTB Linux driver
44  *
45  * Contact Information:
46  * Jon Mason <jon.mason@intel.com>
47  */
48 #include <linux/debugfs.h>
49 #include <linux/delay.h>
50 #include <linux/dmaengine.h>
51 #include <linux/dma-mapping.h>
52 #include <linux/errno.h>
53 #include <linux/export.h>
54 #include <linux/interrupt.h>
55 #include <linux/module.h>
56 #include <linux/pci.h>
57 #include <linux/slab.h>
58 #include <linux/types.h>
59 #include <linux/ntb.h>
60 #include "ntb_hw.h"
61
62 #define NTB_TRANSPORT_VERSION   3
63
64 static unsigned int transport_mtu = 0x401E;
65 module_param(transport_mtu, uint, 0644);
66 MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets");
67
68 static unsigned char max_num_clients;
69 module_param(max_num_clients, byte, 0644);
70 MODULE_PARM_DESC(max_num_clients, "Maximum number of NTB transport clients");
71
72 static unsigned int copy_bytes = 1024;
73 module_param(copy_bytes, uint, 0644);
74 MODULE_PARM_DESC(copy_bytes, "Threshold under which NTB will use the CPU to copy instead of DMA");
75
76 struct ntb_queue_entry {
77         /* ntb_queue list reference */
78         struct list_head entry;
79         /* pointers to data to be transfered */
80         void *cb_data;
81         void *buf;
82         unsigned int len;
83         unsigned int flags;
84
85         struct ntb_transport_qp *qp;
86         union {
87                 struct ntb_payload_header __iomem *tx_hdr;
88                 struct ntb_payload_header *rx_hdr;
89         };
90         unsigned int index;
91 };
92
93 struct ntb_rx_info {
94         unsigned int entry;
95 };
96
97 struct ntb_transport_qp {
98         struct ntb_transport *transport;
99         struct ntb_device *ndev;
100         void *cb_data;
101         struct dma_chan *dma_chan;
102
103         bool client_ready;
104         bool qp_link;
105         u8 qp_num;      /* Only 64 QP's are allowed.  0-63 */
106
107         struct ntb_rx_info __iomem *rx_info;
108         struct ntb_rx_info *remote_rx_info;
109
110         void (*tx_handler) (struct ntb_transport_qp *qp, void *qp_data,
111                             void *data, int len);
112         struct list_head tx_free_q;
113         spinlock_t ntb_tx_free_q_lock;
114         void __iomem *tx_mw;
115         dma_addr_t tx_mw_phys;
116         unsigned int tx_index;
117         unsigned int tx_max_entry;
118         unsigned int tx_max_frame;
119
120         void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data,
121                             void *data, int len);
122         struct tasklet_struct rx_work;
123         struct list_head rx_pend_q;
124         struct list_head rx_free_q;
125         spinlock_t ntb_rx_pend_q_lock;
126         spinlock_t ntb_rx_free_q_lock;
127         void *rx_buff;
128         unsigned int rx_index;
129         unsigned int rx_max_entry;
130         unsigned int rx_max_frame;
131         dma_cookie_t last_cookie;
132
133         void (*event_handler) (void *data, int status);
134         struct delayed_work link_work;
135         struct work_struct link_cleanup;
136
137         struct dentry *debugfs_dir;
138         struct dentry *debugfs_stats;
139
140         /* Stats */
141         u64 rx_bytes;
142         u64 rx_pkts;
143         u64 rx_ring_empty;
144         u64 rx_err_no_buf;
145         u64 rx_err_oflow;
146         u64 rx_err_ver;
147         u64 rx_memcpy;
148         u64 rx_async;
149         u64 tx_bytes;
150         u64 tx_pkts;
151         u64 tx_ring_full;
152         u64 tx_err_no_buf;
153         u64 tx_memcpy;
154         u64 tx_async;
155 };
156
157 struct ntb_transport_mw {
158         size_t size;
159         void *virt_addr;
160         dma_addr_t dma_addr;
161 };
162
163 struct ntb_transport_client_dev {
164         struct list_head entry;
165         struct device dev;
166 };
167
168 struct ntb_transport {
169         struct list_head entry;
170         struct list_head client_devs;
171
172         struct ntb_device *ndev;
173         struct ntb_transport_mw *mw;
174         struct ntb_transport_qp *qps;
175         unsigned int max_qps;
176         unsigned long qp_bitmap;
177         bool transport_link;
178         struct delayed_work link_work;
179         struct work_struct link_cleanup;
180 };
181
182 enum {
183         DESC_DONE_FLAG = 1 << 0,
184         LINK_DOWN_FLAG = 1 << 1,
185 };
186
187 struct ntb_payload_header {
188         unsigned int ver;
189         unsigned int len;
190         unsigned int flags;
191 };
192
193 enum {
194         VERSION = 0,
195         QP_LINKS,
196         NUM_QPS,
197         NUM_MWS,
198         MW0_SZ_HIGH,
199         MW0_SZ_LOW,
200         MW1_SZ_HIGH,
201         MW1_SZ_LOW,
202         MAX_SPAD,
203 };
204
205 #define QP_TO_MW(ndev, qp)      ((qp) % ntb_max_mw(ndev))
206 #define NTB_QP_DEF_NUM_ENTRIES  100
207 #define NTB_LINK_DOWN_TIMEOUT   10
208
209 static int ntb_match_bus(struct device *dev, struct device_driver *drv)
210 {
211         return !strncmp(dev_name(dev), drv->name, strlen(drv->name));
212 }
213
214 static int ntb_client_probe(struct device *dev)
215 {
216         const struct ntb_client *drv = container_of(dev->driver,
217                                                     struct ntb_client, driver);
218         struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
219         int rc = -EINVAL;
220
221         get_device(dev);
222         if (drv && drv->probe)
223                 rc = drv->probe(pdev);
224         if (rc)
225                 put_device(dev);
226
227         return rc;
228 }
229
230 static int ntb_client_remove(struct device *dev)
231 {
232         const struct ntb_client *drv = container_of(dev->driver,
233                                                     struct ntb_client, driver);
234         struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
235
236         if (drv && drv->remove)
237                 drv->remove(pdev);
238
239         put_device(dev);
240
241         return 0;
242 }
243
244 static struct bus_type ntb_bus_type = {
245         .name = "ntb_bus",
246         .match = ntb_match_bus,
247         .probe = ntb_client_probe,
248         .remove = ntb_client_remove,
249 };
250
251 static LIST_HEAD(ntb_transport_list);
252
253 static int ntb_bus_init(struct ntb_transport *nt)
254 {
255         if (list_empty(&ntb_transport_list)) {
256                 int rc = bus_register(&ntb_bus_type);
257                 if (rc)
258                         return rc;
259         }
260
261         list_add(&nt->entry, &ntb_transport_list);
262
263         return 0;
264 }
265
266 static void ntb_bus_remove(struct ntb_transport *nt)
267 {
268         struct ntb_transport_client_dev *client_dev, *cd;
269
270         list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) {
271                 dev_err(client_dev->dev.parent, "%s still attached to bus, removing\n",
272                         dev_name(&client_dev->dev));
273                 list_del(&client_dev->entry);
274                 device_unregister(&client_dev->dev);
275         }
276
277         list_del(&nt->entry);
278
279         if (list_empty(&ntb_transport_list))
280                 bus_unregister(&ntb_bus_type);
281 }
282
283 static void ntb_client_release(struct device *dev)
284 {
285         struct ntb_transport_client_dev *client_dev;
286         client_dev = container_of(dev, struct ntb_transport_client_dev, dev);
287
288         kfree(client_dev);
289 }
290
291 /**
292  * ntb_unregister_client_dev - Unregister NTB client device
293  * @device_name: Name of NTB client device
294  *
295  * Unregister an NTB client device with the NTB transport layer
296  */
297 void ntb_unregister_client_dev(char *device_name)
298 {
299         struct ntb_transport_client_dev *client, *cd;
300         struct ntb_transport *nt;
301
302         list_for_each_entry(nt, &ntb_transport_list, entry)
303                 list_for_each_entry_safe(client, cd, &nt->client_devs, entry)
304                         if (!strncmp(dev_name(&client->dev), device_name,
305                                      strlen(device_name))) {
306                                 list_del(&client->entry);
307                                 device_unregister(&client->dev);
308                         }
309 }
310 EXPORT_SYMBOL_GPL(ntb_unregister_client_dev);
311
312 /**
313  * ntb_register_client_dev - Register NTB client device
314  * @device_name: Name of NTB client device
315  *
316  * Register an NTB client device with the NTB transport layer
317  */
318 int ntb_register_client_dev(char *device_name)
319 {
320         struct ntb_transport_client_dev *client_dev;
321         struct ntb_transport *nt;
322         int rc, i = 0;
323
324         if (list_empty(&ntb_transport_list))
325                 return -ENODEV;
326
327         list_for_each_entry(nt, &ntb_transport_list, entry) {
328                 struct device *dev;
329
330                 client_dev = kzalloc(sizeof(struct ntb_transport_client_dev),
331                                      GFP_KERNEL);
332                 if (!client_dev) {
333                         rc = -ENOMEM;
334                         goto err;
335                 }
336
337                 dev = &client_dev->dev;
338
339                 /* setup and register client devices */
340                 dev_set_name(dev, "%s%d", device_name, i);
341                 dev->bus = &ntb_bus_type;
342                 dev->release = ntb_client_release;
343                 dev->parent = &ntb_query_pdev(nt->ndev)->dev;
344
345                 rc = device_register(dev);
346                 if (rc) {
347                         kfree(client_dev);
348                         goto err;
349                 }
350
351                 list_add_tail(&client_dev->entry, &nt->client_devs);
352                 i++;
353         }
354
355         return 0;
356
357 err:
358         ntb_unregister_client_dev(device_name);
359
360         return rc;
361 }
362 EXPORT_SYMBOL_GPL(ntb_register_client_dev);
363
364 /**
365  * ntb_register_client - Register NTB client driver
366  * @drv: NTB client driver to be registered
367  *
368  * Register an NTB client driver with the NTB transport layer
369  *
370  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
371  */
372 int ntb_register_client(struct ntb_client *drv)
373 {
374         drv->driver.bus = &ntb_bus_type;
375
376         if (list_empty(&ntb_transport_list))
377                 return -ENODEV;
378
379         return driver_register(&drv->driver);
380 }
381 EXPORT_SYMBOL_GPL(ntb_register_client);
382
383 /**
384  * ntb_unregister_client - Unregister NTB client driver
385  * @drv: NTB client driver to be unregistered
386  *
387  * Unregister an NTB client driver with the NTB transport layer
388  *
389  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
390  */
391 void ntb_unregister_client(struct ntb_client *drv)
392 {
393         driver_unregister(&drv->driver);
394 }
395 EXPORT_SYMBOL_GPL(ntb_unregister_client);
396
397 static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
398                             loff_t *offp)
399 {
400         struct ntb_transport_qp *qp;
401         char *buf;
402         ssize_t ret, out_offset, out_count;
403
404         out_count = 1000;
405
406         buf = kmalloc(out_count, GFP_KERNEL);
407         if (!buf)
408                 return -ENOMEM;
409
410         qp = filp->private_data;
411         out_offset = 0;
412         out_offset += snprintf(buf + out_offset, out_count - out_offset,
413                                "NTB QP stats\n");
414         out_offset += snprintf(buf + out_offset, out_count - out_offset,
415                                "rx_bytes - \t%llu\n", qp->rx_bytes);
416         out_offset += snprintf(buf + out_offset, out_count - out_offset,
417                                "rx_pkts - \t%llu\n", qp->rx_pkts);
418         out_offset += snprintf(buf + out_offset, out_count - out_offset,
419                                "rx_memcpy - \t%llu\n", qp->rx_memcpy);
420         out_offset += snprintf(buf + out_offset, out_count - out_offset,
421                                "rx_async - \t%llu\n", qp->rx_async);
422         out_offset += snprintf(buf + out_offset, out_count - out_offset,
423                                "rx_ring_empty - %llu\n", qp->rx_ring_empty);
424         out_offset += snprintf(buf + out_offset, out_count - out_offset,
425                                "rx_err_no_buf - %llu\n", qp->rx_err_no_buf);
426         out_offset += snprintf(buf + out_offset, out_count - out_offset,
427                                "rx_err_oflow - \t%llu\n", qp->rx_err_oflow);
428         out_offset += snprintf(buf + out_offset, out_count - out_offset,
429                                "rx_err_ver - \t%llu\n", qp->rx_err_ver);
430         out_offset += snprintf(buf + out_offset, out_count - out_offset,
431                                "rx_buff - \t%p\n", qp->rx_buff);
432         out_offset += snprintf(buf + out_offset, out_count - out_offset,
433                                "rx_index - \t%u\n", qp->rx_index);
434         out_offset += snprintf(buf + out_offset, out_count - out_offset,
435                                "rx_max_entry - \t%u\n", qp->rx_max_entry);
436
437         out_offset += snprintf(buf + out_offset, out_count - out_offset,
438                                "tx_bytes - \t%llu\n", qp->tx_bytes);
439         out_offset += snprintf(buf + out_offset, out_count - out_offset,
440                                "tx_pkts - \t%llu\n", qp->tx_pkts);
441         out_offset += snprintf(buf + out_offset, out_count - out_offset,
442                                "tx_memcpy - \t%llu\n", qp->tx_memcpy);
443         out_offset += snprintf(buf + out_offset, out_count - out_offset,
444                                "tx_async - \t%llu\n", qp->tx_async);
445         out_offset += snprintf(buf + out_offset, out_count - out_offset,
446                                "tx_ring_full - \t%llu\n", qp->tx_ring_full);
447         out_offset += snprintf(buf + out_offset, out_count - out_offset,
448                                "tx_err_no_buf - %llu\n", qp->tx_err_no_buf);
449         out_offset += snprintf(buf + out_offset, out_count - out_offset,
450                                "tx_mw - \t%p\n", qp->tx_mw);
451         out_offset += snprintf(buf + out_offset, out_count - out_offset,
452                                "tx_index - \t%u\n", qp->tx_index);
453         out_offset += snprintf(buf + out_offset, out_count - out_offset,
454                                "tx_max_entry - \t%u\n", qp->tx_max_entry);
455
456         out_offset += snprintf(buf + out_offset, out_count - out_offset,
457                                "\nQP Link %s\n", (qp->qp_link == NTB_LINK_UP) ?
458                                "Up" : "Down");
459         if (out_offset > out_count)
460                 out_offset = out_count;
461
462         ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
463         kfree(buf);
464         return ret;
465 }
466
467 static const struct file_operations ntb_qp_debugfs_stats = {
468         .owner = THIS_MODULE,
469         .open = simple_open,
470         .read = debugfs_read,
471 };
472
473 static void ntb_list_add(spinlock_t *lock, struct list_head *entry,
474                          struct list_head *list)
475 {
476         unsigned long flags;
477
478         spin_lock_irqsave(lock, flags);
479         list_add_tail(entry, list);
480         spin_unlock_irqrestore(lock, flags);
481 }
482
483 static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock,
484                                                 struct list_head *list)
485 {
486         struct ntb_queue_entry *entry;
487         unsigned long flags;
488
489         spin_lock_irqsave(lock, flags);
490         if (list_empty(list)) {
491                 entry = NULL;
492                 goto out;
493         }
494         entry = list_first_entry(list, struct ntb_queue_entry, entry);
495         list_del(&entry->entry);
496 out:
497         spin_unlock_irqrestore(lock, flags);
498
499         return entry;
500 }
501
502 static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
503                                       unsigned int qp_num)
504 {
505         struct ntb_transport_qp *qp = &nt->qps[qp_num];
506         unsigned int rx_size, num_qps_mw;
507         u8 mw_num, mw_max;
508         unsigned int i;
509
510         mw_max = ntb_max_mw(nt->ndev);
511         mw_num = QP_TO_MW(nt->ndev, qp_num);
512
513         WARN_ON(nt->mw[mw_num].virt_addr == NULL);
514
515         if (nt->max_qps % mw_max && mw_num < nt->max_qps % mw_max)
516                 num_qps_mw = nt->max_qps / mw_max + 1;
517         else
518                 num_qps_mw = nt->max_qps / mw_max;
519
520         rx_size = (unsigned int) nt->mw[mw_num].size / num_qps_mw;
521         qp->rx_buff = nt->mw[mw_num].virt_addr + qp_num / mw_max * rx_size;
522         rx_size -= sizeof(struct ntb_rx_info);
523
524         qp->remote_rx_info = qp->rx_buff + rx_size;
525
526         /* Due to housekeeping, there must be atleast 2 buffs */
527         qp->rx_max_frame = min(transport_mtu, rx_size / 2);
528         qp->rx_max_entry = rx_size / qp->rx_max_frame;
529         qp->rx_index = 0;
530
531         qp->remote_rx_info->entry = qp->rx_max_entry - 1;
532
533         /* setup the hdr offsets with 0's */
534         for (i = 0; i < qp->rx_max_entry; i++) {
535                 void *offset = qp->rx_buff + qp->rx_max_frame * (i + 1) -
536                                sizeof(struct ntb_payload_header);
537                 memset(offset, 0, sizeof(struct ntb_payload_header));
538         }
539
540         qp->rx_pkts = 0;
541         qp->tx_pkts = 0;
542         qp->tx_index = 0;
543 }
544
545 static void ntb_free_mw(struct ntb_transport *nt, int num_mw)
546 {
547         struct ntb_transport_mw *mw = &nt->mw[num_mw];
548         struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
549
550         if (!mw->virt_addr)
551                 return;
552
553         dma_free_coherent(&pdev->dev, mw->size, mw->virt_addr, mw->dma_addr);
554         mw->virt_addr = NULL;
555 }
556
557 static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
558 {
559         struct ntb_transport_mw *mw = &nt->mw[num_mw];
560         struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
561
562         /* No need to re-setup */
563         if (mw->size == ALIGN(size, 4096))
564                 return 0;
565
566         if (mw->size != 0)
567                 ntb_free_mw(nt, num_mw);
568
569         /* Alloc memory for receiving data.  Must be 4k aligned */
570         mw->size = ALIGN(size, 4096);
571
572         mw->virt_addr = dma_alloc_coherent(&pdev->dev, mw->size, &mw->dma_addr,
573                                            GFP_KERNEL);
574         if (!mw->virt_addr) {
575                 mw->size = 0;
576                 dev_err(&pdev->dev, "Unable to allocate MW buffer of size %d\n",
577                        (int) mw->size);
578                 return -ENOMEM;
579         }
580
581         /* Notify HW the memory location of the receive buffer */
582         ntb_set_mw_addr(nt->ndev, num_mw, mw->dma_addr);
583
584         return 0;
585 }
586
587 static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
588 {
589         struct ntb_transport *nt = qp->transport;
590         struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
591
592         if (qp->qp_link == NTB_LINK_DOWN) {
593                 cancel_delayed_work_sync(&qp->link_work);
594                 return;
595         }
596
597         if (qp->event_handler)
598                 qp->event_handler(qp->cb_data, NTB_LINK_DOWN);
599
600         dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
601         qp->qp_link = NTB_LINK_DOWN;
602 }
603
604 static void ntb_qp_link_cleanup_work(struct work_struct *work)
605 {
606         struct ntb_transport_qp *qp = container_of(work,
607                                                    struct ntb_transport_qp,
608                                                    link_cleanup);
609         struct ntb_transport *nt = qp->transport;
610
611         ntb_qp_link_cleanup(qp);
612
613         if (nt->transport_link == NTB_LINK_UP)
614                 schedule_delayed_work(&qp->link_work,
615                                       msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
616 }
617
618 static void ntb_qp_link_down(struct ntb_transport_qp *qp)
619 {
620         schedule_work(&qp->link_cleanup);
621 }
622
623 static void ntb_transport_link_cleanup(struct ntb_transport *nt)
624 {
625         int i;
626
627         /* Pass along the info to any clients */
628         for (i = 0; i < nt->max_qps; i++)
629                 if (!test_bit(i, &nt->qp_bitmap))
630                         ntb_qp_link_cleanup(&nt->qps[i]);
631
632         if (nt->transport_link == NTB_LINK_DOWN)
633                 cancel_delayed_work_sync(&nt->link_work);
634         else
635                 nt->transport_link = NTB_LINK_DOWN;
636
637         /* The scratchpad registers keep the values if the remote side
638          * goes down, blast them now to give them a sane value the next
639          * time they are accessed
640          */
641         for (i = 0; i < MAX_SPAD; i++)
642                 ntb_write_local_spad(nt->ndev, i, 0);
643 }
644
645 static void ntb_transport_link_cleanup_work(struct work_struct *work)
646 {
647         struct ntb_transport *nt = container_of(work, struct ntb_transport,
648                                                 link_cleanup);
649
650         ntb_transport_link_cleanup(nt);
651 }
652
653 static void ntb_transport_event_callback(void *data, enum ntb_hw_event event)
654 {
655         struct ntb_transport *nt = data;
656
657         switch (event) {
658         case NTB_EVENT_HW_LINK_UP:
659                 schedule_delayed_work(&nt->link_work, 0);
660                 break;
661         case NTB_EVENT_HW_LINK_DOWN:
662                 schedule_work(&nt->link_cleanup);
663                 break;
664         default:
665                 BUG();
666         }
667 }
668
669 static void ntb_transport_link_work(struct work_struct *work)
670 {
671         struct ntb_transport *nt = container_of(work, struct ntb_transport,
672                                                 link_work.work);
673         struct ntb_device *ndev = nt->ndev;
674         struct pci_dev *pdev = ntb_query_pdev(ndev);
675         u32 val;
676         int rc, i;
677
678         /* send the local info, in the opposite order of the way we read it */
679         for (i = 0; i < ntb_max_mw(ndev); i++) {
680                 rc = ntb_write_remote_spad(ndev, MW0_SZ_HIGH + (i * 2),
681                                            ntb_get_mw_size(ndev, i) >> 32);
682                 if (rc) {
683                         dev_err(&pdev->dev, "Error writing %u to remote spad %d\n",
684                                 (u32)(ntb_get_mw_size(ndev, i) >> 32),
685                                 MW0_SZ_HIGH + (i * 2));
686                         goto out;
687                 }
688
689                 rc = ntb_write_remote_spad(ndev, MW0_SZ_LOW + (i * 2),
690                                            (u32) ntb_get_mw_size(ndev, i));
691                 if (rc) {
692                         dev_err(&pdev->dev, "Error writing %u to remote spad %d\n",
693                                 (u32) ntb_get_mw_size(ndev, i),
694                                 MW0_SZ_LOW + (i * 2));
695                         goto out;
696                 }
697         }
698
699         rc = ntb_write_remote_spad(ndev, NUM_MWS, ntb_max_mw(ndev));
700         if (rc) {
701                 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
702                         ntb_max_mw(ndev), NUM_MWS);
703                 goto out;
704         }
705
706         rc = ntb_write_remote_spad(ndev, NUM_QPS, nt->max_qps);
707         if (rc) {
708                 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
709                         nt->max_qps, NUM_QPS);
710                 goto out;
711         }
712
713         rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION);
714         if (rc) {
715                 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
716                         NTB_TRANSPORT_VERSION, VERSION);
717                 goto out;
718         }
719
720         /* Query the remote side for its info */
721         rc = ntb_read_remote_spad(ndev, VERSION, &val);
722         if (rc) {
723                 dev_err(&pdev->dev, "Error reading remote spad %d\n", VERSION);
724                 goto out;
725         }
726
727         if (val != NTB_TRANSPORT_VERSION)
728                 goto out;
729         dev_dbg(&pdev->dev, "Remote version = %d\n", val);
730
731         rc = ntb_read_remote_spad(ndev, NUM_QPS, &val);
732         if (rc) {
733                 dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_QPS);
734                 goto out;
735         }
736
737         if (val != nt->max_qps)
738                 goto out;
739         dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);
740
741         rc = ntb_read_remote_spad(ndev, NUM_MWS, &val);
742         if (rc) {
743                 dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_MWS);
744                 goto out;
745         }
746
747         if (val != ntb_max_mw(ndev))
748                 goto out;
749         dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val);
750
751         for (i = 0; i < ntb_max_mw(ndev); i++) {
752                 u64 val64;
753
754                 rc = ntb_read_remote_spad(ndev, MW0_SZ_HIGH + (i * 2), &val);
755                 if (rc) {
756                         dev_err(&pdev->dev, "Error reading remote spad %d\n",
757                                 MW0_SZ_HIGH + (i * 2));
758                         goto out1;
759                 }
760
761                 val64 = (u64) val << 32;
762
763                 rc = ntb_read_remote_spad(ndev, MW0_SZ_LOW + (i * 2), &val);
764                 if (rc) {
765                         dev_err(&pdev->dev, "Error reading remote spad %d\n",
766                                 MW0_SZ_LOW + (i * 2));
767                         goto out1;
768                 }
769
770                 val64 |= val;
771
772                 dev_dbg(&pdev->dev, "Remote MW%d size = %llu\n", i, val64);
773
774                 rc = ntb_set_mw(nt, i, val64);
775                 if (rc)
776                         goto out1;
777         }
778
779         nt->transport_link = NTB_LINK_UP;
780
781         for (i = 0; i < nt->max_qps; i++) {
782                 struct ntb_transport_qp *qp = &nt->qps[i];
783
784                 ntb_transport_setup_qp_mw(nt, i);
785
786                 if (qp->client_ready == NTB_LINK_UP)
787                         schedule_delayed_work(&qp->link_work, 0);
788         }
789
790         return;
791
792 out1:
793         for (i = 0; i < ntb_max_mw(ndev); i++)
794                 ntb_free_mw(nt, i);
795 out:
796         if (ntb_hw_link_status(ndev))
797                 schedule_delayed_work(&nt->link_work,
798                                       msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
799 }
800
801 static void ntb_qp_link_work(struct work_struct *work)
802 {
803         struct ntb_transport_qp *qp = container_of(work,
804                                                    struct ntb_transport_qp,
805                                                    link_work.work);
806         struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
807         struct ntb_transport *nt = qp->transport;
808         int rc, val;
809
810         WARN_ON(nt->transport_link != NTB_LINK_UP);
811
812         rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val);
813         if (rc) {
814                 dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
815                 return;
816         }
817
818         rc = ntb_write_remote_spad(nt->ndev, QP_LINKS, val | 1 << qp->qp_num);
819         if (rc)
820                 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
821                         val | 1 << qp->qp_num, QP_LINKS);
822
823         /* query remote spad for qp ready bits */
824         rc = ntb_read_remote_spad(nt->ndev, QP_LINKS, &val);
825         if (rc)
826                 dev_err(&pdev->dev, "Error reading remote spad %d\n", QP_LINKS);
827
828         dev_dbg(&pdev->dev, "Remote QP link status = %x\n", val);
829
830         /* See if the remote side is up */
831         if (1 << qp->qp_num & val) {
832                 qp->qp_link = NTB_LINK_UP;
833
834                 dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num);
835                 if (qp->event_handler)
836                         qp->event_handler(qp->cb_data, NTB_LINK_UP);
837         } else if (nt->transport_link == NTB_LINK_UP)
838                 schedule_delayed_work(&qp->link_work,
839                                       msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
840 }
841
842 static int ntb_transport_init_queue(struct ntb_transport *nt,
843                                      unsigned int qp_num)
844 {
845         struct ntb_transport_qp *qp;
846         unsigned int num_qps_mw, tx_size;
847         u8 mw_num, mw_max;
848         u64 qp_offset;
849
850         mw_max = ntb_max_mw(nt->ndev);
851         mw_num = QP_TO_MW(nt->ndev, qp_num);
852
853         qp = &nt->qps[qp_num];
854         qp->qp_num = qp_num;
855         qp->transport = nt;
856         qp->ndev = nt->ndev;
857         qp->qp_link = NTB_LINK_DOWN;
858         qp->client_ready = NTB_LINK_DOWN;
859         qp->event_handler = NULL;
860
861         if (nt->max_qps % mw_max && mw_num < nt->max_qps % mw_max)
862                 num_qps_mw = nt->max_qps / mw_max + 1;
863         else
864                 num_qps_mw = nt->max_qps / mw_max;
865
866         tx_size = (unsigned int) ntb_get_mw_size(qp->ndev, mw_num) / num_qps_mw;
867         qp_offset = qp_num / mw_max * tx_size;
868         qp->tx_mw = ntb_get_mw_vbase(nt->ndev, mw_num) + qp_offset;
869         if (!qp->tx_mw)
870                 return -EINVAL;
871
872         qp->tx_mw_phys = ntb_get_mw_base(qp->ndev, mw_num) + qp_offset;
873         if (!qp->tx_mw_phys)
874                 return -EINVAL;
875
876         tx_size -= sizeof(struct ntb_rx_info);
877         qp->rx_info = qp->tx_mw + tx_size;
878
879         /* Due to housekeeping, there must be atleast 2 buffs */
880         qp->tx_max_frame = min(transport_mtu, tx_size / 2);
881         qp->tx_max_entry = tx_size / qp->tx_max_frame;
882
883         if (ntb_query_debugfs(nt->ndev)) {
884                 char debugfs_name[4];
885
886                 snprintf(debugfs_name, 4, "qp%d", qp_num);
887                 qp->debugfs_dir = debugfs_create_dir(debugfs_name,
888                                                  ntb_query_debugfs(nt->ndev));
889
890                 qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
891                                                         qp->debugfs_dir, qp,
892                                                         &ntb_qp_debugfs_stats);
893         }
894
895         INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
896         INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work);
897
898         spin_lock_init(&qp->ntb_rx_pend_q_lock);
899         spin_lock_init(&qp->ntb_rx_free_q_lock);
900         spin_lock_init(&qp->ntb_tx_free_q_lock);
901
902         INIT_LIST_HEAD(&qp->rx_pend_q);
903         INIT_LIST_HEAD(&qp->rx_free_q);
904         INIT_LIST_HEAD(&qp->tx_free_q);
905
906         return 0;
907 }
908
909 int ntb_transport_init(struct pci_dev *pdev)
910 {
911         struct ntb_transport *nt;
912         int rc, i;
913
914         nt = kzalloc(sizeof(struct ntb_transport), GFP_KERNEL);
915         if (!nt)
916                 return -ENOMEM;
917
918         nt->ndev = ntb_register_transport(pdev, nt);
919         if (!nt->ndev) {
920                 rc = -EIO;
921                 goto err;
922         }
923
924         nt->mw = kcalloc(ntb_max_mw(nt->ndev), sizeof(struct ntb_transport_mw),
925                          GFP_KERNEL);
926         if (!nt->mw) {
927                 rc = -ENOMEM;
928                 goto err1;
929         }
930
931         if (max_num_clients)
932                 nt->max_qps = min(ntb_max_cbs(nt->ndev), max_num_clients);
933         else
934                 nt->max_qps = min(ntb_max_cbs(nt->ndev), ntb_max_mw(nt->ndev));
935
936         nt->qps = kcalloc(nt->max_qps, sizeof(struct ntb_transport_qp),
937                           GFP_KERNEL);
938         if (!nt->qps) {
939                 rc = -ENOMEM;
940                 goto err2;
941         }
942
943         nt->qp_bitmap = ((u64) 1 << nt->max_qps) - 1;
944
945         for (i = 0; i < nt->max_qps; i++) {
946                 rc = ntb_transport_init_queue(nt, i);
947                 if (rc)
948                         goto err3;
949         }
950
951         INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
952         INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup_work);
953
954         rc = ntb_register_event_callback(nt->ndev,
955                                          ntb_transport_event_callback);
956         if (rc)
957                 goto err3;
958
959         INIT_LIST_HEAD(&nt->client_devs);
960         rc = ntb_bus_init(nt);
961         if (rc)
962                 goto err4;
963
964         if (ntb_hw_link_status(nt->ndev))
965                 schedule_delayed_work(&nt->link_work, 0);
966
967         return 0;
968
969 err4:
970         ntb_unregister_event_callback(nt->ndev);
971 err3:
972         kfree(nt->qps);
973 err2:
974         kfree(nt->mw);
975 err1:
976         ntb_unregister_transport(nt->ndev);
977 err:
978         kfree(nt);
979         return rc;
980 }
981
982 void ntb_transport_free(void *transport)
983 {
984         struct ntb_transport *nt = transport;
985         struct ntb_device *ndev = nt->ndev;
986         int i;
987
988         ntb_transport_link_cleanup(nt);
989
990         /* verify that all the qp's are freed */
991         for (i = 0; i < nt->max_qps; i++) {
992                 if (!test_bit(i, &nt->qp_bitmap))
993                         ntb_transport_free_queue(&nt->qps[i]);
994                 debugfs_remove_recursive(nt->qps[i].debugfs_dir);
995         }
996
997         ntb_bus_remove(nt);
998
999         cancel_delayed_work_sync(&nt->link_work);
1000
1001         ntb_unregister_event_callback(ndev);
1002
1003         for (i = 0; i < ntb_max_mw(ndev); i++)
1004                 ntb_free_mw(nt, i);
1005
1006         kfree(nt->qps);
1007         kfree(nt->mw);
1008         ntb_unregister_transport(ndev);
1009         kfree(nt);
1010 }
1011
1012 static void ntb_rx_copy_callback(void *data)
1013 {
1014         struct ntb_queue_entry *entry = data;
1015         struct ntb_transport_qp *qp = entry->qp;
1016         void *cb_data = entry->cb_data;
1017         unsigned int len = entry->len;
1018         struct ntb_payload_header *hdr = entry->rx_hdr;
1019
1020         /* Ensure that the data is fully copied out before clearing the flag */
1021         wmb();
1022         hdr->flags = 0;
1023
1024         iowrite32(entry->index, &qp->rx_info->entry);
1025
1026         ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
1027
1028         if (qp->rx_handler && qp->client_ready == NTB_LINK_UP)
1029                 qp->rx_handler(qp, qp->cb_data, cb_data, len);
1030 }
1031
1032 static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
1033 {
1034         void *buf = entry->buf;
1035         size_t len = entry->len;
1036
1037         memcpy(buf, offset, len);
1038
1039         ntb_rx_copy_callback(entry);
1040 }
1041
1042 static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
1043                          size_t len)
1044 {
1045         struct dma_async_tx_descriptor *txd;
1046         struct ntb_transport_qp *qp = entry->qp;
1047         struct dma_chan *chan = qp->dma_chan;
1048         struct dma_device *device;
1049         size_t pay_off, buff_off;
1050         dma_addr_t src, dest;
1051         dma_cookie_t cookie;
1052         void *buf = entry->buf;
1053         unsigned long flags;
1054
1055         entry->len = len;
1056
1057         if (!chan)
1058                 goto err;
1059
1060         if (len < copy_bytes) 
1061                 goto err1;
1062
1063         device = chan->device;
1064         pay_off = (size_t) offset & ~PAGE_MASK;
1065         buff_off = (size_t) buf & ~PAGE_MASK;
1066
1067         if (!is_dma_copy_aligned(device, pay_off, buff_off, len))
1068                 goto err1;
1069
1070         dest = dma_map_single(device->dev, buf, len, DMA_FROM_DEVICE);
1071         if (dma_mapping_error(device->dev, dest))
1072                 goto err1;
1073
1074         src = dma_map_single(device->dev, offset, len, DMA_TO_DEVICE);
1075         if (dma_mapping_error(device->dev, src))
1076                 goto err2;
1077
1078         flags = DMA_COMPL_DEST_UNMAP_SINGLE | DMA_COMPL_SRC_UNMAP_SINGLE |
1079                 DMA_PREP_INTERRUPT;
1080         txd = device->device_prep_dma_memcpy(chan, dest, src, len, flags);
1081         if (!txd)
1082                 goto err3;
1083
1084         txd->callback = ntb_rx_copy_callback;
1085         txd->callback_param = entry;
1086
1087         cookie = dmaengine_submit(txd);
1088         if (dma_submit_error(cookie))
1089                 goto err3;
1090
1091         qp->last_cookie = cookie;
1092
1093         qp->rx_async++;
1094
1095         return;
1096
1097 err3:
1098         dma_unmap_single(device->dev, src, len, DMA_TO_DEVICE);
1099 err2:
1100         dma_unmap_single(device->dev, dest, len, DMA_FROM_DEVICE);
1101 err1:
1102         /* If the callbacks come out of order, the writing of the index to the
1103          * last completed will be out of order.  This may result in the
1104          * receive stalling forever.
1105          */
1106         dma_sync_wait(chan, qp->last_cookie);
1107 err:
1108         ntb_memcpy_rx(entry, offset);
1109         qp->rx_memcpy++;
1110 }
1111
1112 static int ntb_process_rxc(struct ntb_transport_qp *qp)
1113 {
1114         struct ntb_payload_header *hdr;
1115         struct ntb_queue_entry *entry;
1116         void *offset;
1117
1118         offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
1119         hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
1120
1121         entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
1122         if (!entry) {
1123                 dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
1124                         "no buffer - HDR ver %u, len %d, flags %x\n",
1125                         hdr->ver, hdr->len, hdr->flags);
1126                 qp->rx_err_no_buf++;
1127                 return -ENOMEM;
1128         }
1129
1130         if (!(hdr->flags & DESC_DONE_FLAG)) {
1131                 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
1132                              &qp->rx_pend_q);
1133                 qp->rx_ring_empty++;
1134                 return -EAGAIN;
1135         }
1136
1137         if (hdr->ver != (u32) qp->rx_pkts) {
1138                 dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
1139                         "qp %d: version mismatch, expected %llu - got %u\n",
1140                         qp->qp_num, qp->rx_pkts, hdr->ver);
1141                 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
1142                              &qp->rx_pend_q);
1143                 qp->rx_err_ver++;
1144                 return -EIO;
1145         }
1146
1147         if (hdr->flags & LINK_DOWN_FLAG) {
1148                 ntb_qp_link_down(qp);
1149
1150                 goto err;
1151         }
1152
1153         dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
1154                 "rx offset %u, ver %u - %d payload received, buf size %d\n",
1155                 qp->rx_index, hdr->ver, hdr->len, entry->len);
1156
1157         qp->rx_bytes += hdr->len;
1158         qp->rx_pkts++;
1159
1160         if (hdr->len > entry->len) {
1161                 qp->rx_err_oflow++;
1162                 dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
1163                         "RX overflow! Wanted %d got %d\n",
1164                         hdr->len, entry->len);
1165
1166                 goto err;
1167         }
1168
1169         entry->index = qp->rx_index;
1170         entry->rx_hdr = hdr;
1171
1172         ntb_async_rx(entry, offset, hdr->len);
1173
1174 out:
1175         qp->rx_index++;
1176         qp->rx_index %= qp->rx_max_entry;
1177
1178         return 0;
1179
1180 err:
1181         ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
1182                      &qp->rx_pend_q);
1183         /* Ensure that the data is fully copied out before clearing the flag */
1184         wmb();
1185         hdr->flags = 0;
1186         iowrite32(qp->rx_index, &qp->rx_info->entry);
1187
1188         goto out;
1189 }
1190
1191 static void ntb_transport_rx(unsigned long data)
1192 {
1193         struct ntb_transport_qp *qp = (struct ntb_transport_qp *)data;
1194         int rc, i;
1195
1196         /* Limit the number of packets processed in a single interrupt to
1197          * provide fairness to others
1198          */
1199         for (i = 0; i < qp->rx_max_entry; i++) {
1200                 rc = ntb_process_rxc(qp);
1201                 if (rc)
1202                         break;
1203         }
1204
1205         if (qp->dma_chan)
1206                 dma_async_issue_pending(qp->dma_chan);
1207 }
1208
1209 static void ntb_transport_rxc_db(void *data, int db_num)
1210 {
1211         struct ntb_transport_qp *qp = data;
1212
1213         dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%s: doorbell %d received\n",
1214                 __func__, db_num);
1215
1216         tasklet_schedule(&qp->rx_work);
1217 }
1218
1219 static void ntb_tx_copy_callback(void *data)
1220 {
1221         struct ntb_queue_entry *entry = data;
1222         struct ntb_transport_qp *qp = entry->qp;
1223         struct ntb_payload_header __iomem *hdr = entry->tx_hdr;
1224
1225         /* Ensure that the data is fully copied out before setting the flags */
1226         wmb();
1227         iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags);
1228
1229         ntb_ring_doorbell(qp->ndev, qp->qp_num);
1230
1231         /* The entry length can only be zero if the packet is intended to be a
1232          * "link down" or similar.  Since no payload is being sent in these
1233          * cases, there is nothing to add to the completion queue.
1234          */
1235         if (entry->len > 0) {
1236                 qp->tx_bytes += entry->len;
1237
1238                 if (qp->tx_handler)
1239                         qp->tx_handler(qp, qp->cb_data, entry->cb_data,
1240                                        entry->len);
1241         }
1242
1243         ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q);
1244 }
1245
1246 static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset)
1247 {
1248         memcpy_toio(offset, entry->buf, entry->len);
1249
1250         ntb_tx_copy_callback(entry);
1251 }
1252
1253 static void ntb_async_tx(struct ntb_transport_qp *qp,
1254                          struct ntb_queue_entry *entry)
1255 {
1256         struct ntb_payload_header __iomem *hdr;
1257         struct dma_async_tx_descriptor *txd;
1258         struct dma_chan *chan = qp->dma_chan;
1259         struct dma_device *device;
1260         size_t dest_off, buff_off;
1261         dma_addr_t src, dest;
1262         dma_cookie_t cookie;
1263         void __iomem *offset;
1264         size_t len = entry->len;
1265         void *buf = entry->buf;
1266         unsigned long flags;
1267
1268         offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index;
1269         hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
1270         entry->tx_hdr = hdr;
1271
1272         iowrite32(entry->len, &hdr->len);
1273         iowrite32((u32) qp->tx_pkts, &hdr->ver);
1274
1275         if (!chan)
1276                 goto err;
1277
1278         if (len < copy_bytes)
1279                 goto err;
1280
1281         device = chan->device;
1282         dest = qp->tx_mw_phys + qp->tx_max_frame * qp->tx_index;
1283         buff_off = (size_t) buf & ~PAGE_MASK;
1284         dest_off = (size_t) dest & ~PAGE_MASK;
1285
1286         if (!is_dma_copy_aligned(device, buff_off, dest_off, len))
1287                 goto err;
1288
1289         src = dma_map_single(device->dev, buf, len, DMA_TO_DEVICE);
1290         if (dma_mapping_error(device->dev, src))
1291                 goto err;
1292
1293         flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_PREP_INTERRUPT;
1294         txd = device->device_prep_dma_memcpy(chan, dest, src, len, flags);
1295         if (!txd)
1296                 goto err1;
1297
1298         txd->callback = ntb_tx_copy_callback;
1299         txd->callback_param = entry;
1300
1301         cookie = dmaengine_submit(txd);
1302         if (dma_submit_error(cookie))
1303                 goto err1;
1304
1305         dma_async_issue_pending(chan);
1306         qp->tx_async++;
1307
1308         return;
1309 err1:
1310         dma_unmap_single(device->dev, src, len, DMA_TO_DEVICE);
1311 err:
1312         ntb_memcpy_tx(entry, offset);
1313         qp->tx_memcpy++;
1314 }
1315
1316 static int ntb_process_tx(struct ntb_transport_qp *qp,
1317                           struct ntb_queue_entry *entry)
1318 {
1319         dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%lld - tx %u, entry len %d flags %x buff %p\n",
1320                 qp->tx_pkts, qp->tx_index, entry->len, entry->flags,
1321                 entry->buf);
1322         if (qp->tx_index == qp->remote_rx_info->entry) {
1323                 qp->tx_ring_full++;
1324                 return -EAGAIN;
1325         }
1326
1327         if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) {
1328                 if (qp->tx_handler)
1329                         qp->tx_handler(qp->cb_data, qp, NULL, -EIO);
1330
1331                 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1332                              &qp->tx_free_q);
1333                 return 0;
1334         }
1335
1336         ntb_async_tx(qp, entry);
1337
1338         qp->tx_index++;
1339         qp->tx_index %= qp->tx_max_entry;
1340
1341         qp->tx_pkts++;
1342
1343         return 0;
1344 }
1345
1346 static void ntb_send_link_down(struct ntb_transport_qp *qp)
1347 {
1348         struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
1349         struct ntb_queue_entry *entry;
1350         int i, rc;
1351
1352         if (qp->qp_link == NTB_LINK_DOWN)
1353                 return;
1354
1355         qp->qp_link = NTB_LINK_DOWN;
1356         dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
1357
1358         for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) {
1359                 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
1360                 if (entry)
1361                         break;
1362                 msleep(100);
1363         }
1364
1365         if (!entry)
1366                 return;
1367
1368         entry->cb_data = NULL;
1369         entry->buf = NULL;
1370         entry->len = 0;
1371         entry->flags = LINK_DOWN_FLAG;
1372
1373         rc = ntb_process_tx(qp, entry);
1374         if (rc)
1375                 dev_err(&pdev->dev, "ntb: QP%d unable to send linkdown msg\n",
1376                         qp->qp_num);
1377 }
1378
1379 /**
1380  * ntb_transport_create_queue - Create a new NTB transport layer queue
1381  * @rx_handler: receive callback function
1382  * @tx_handler: transmit callback function
1383  * @event_handler: event callback function
1384  *
1385  * Create a new NTB transport layer queue and provide the queue with a callback
1386  * routine for both transmit and receive.  The receive callback routine will be
1387  * used to pass up data when the transport has received it on the queue.   The
1388  * transmit callback routine will be called when the transport has completed the
1389  * transmission of the data on the queue and the data is ready to be freed.
1390  *
1391  * RETURNS: pointer to newly created ntb_queue, NULL on error.
1392  */
1393 struct ntb_transport_qp *
1394 ntb_transport_create_queue(void *data, struct pci_dev *pdev,
1395                            const struct ntb_queue_handlers *handlers)
1396 {
1397         struct ntb_queue_entry *entry;
1398         struct ntb_transport_qp *qp;
1399         struct ntb_transport *nt;
1400         unsigned int free_queue;
1401         int rc, i;
1402
1403         nt = ntb_find_transport(pdev);
1404         if (!nt)
1405                 goto err;
1406
1407         free_queue = ffs(nt->qp_bitmap);
1408         if (!free_queue)
1409                 goto err;
1410
1411         /* decrement free_queue to make it zero based */
1412         free_queue--;
1413
1414         clear_bit(free_queue, &nt->qp_bitmap);
1415
1416         qp = &nt->qps[free_queue];
1417         qp->cb_data = data;
1418         qp->rx_handler = handlers->rx_handler;
1419         qp->tx_handler = handlers->tx_handler;
1420         qp->event_handler = handlers->event_handler;
1421
1422         dmaengine_get();
1423         qp->dma_chan = dma_find_channel(DMA_MEMCPY);
1424         if (!qp->dma_chan) {
1425                 dmaengine_put();
1426                 dev_info(&pdev->dev, "Unable to allocate DMA channel, using CPU instead\n");
1427         }
1428
1429         for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
1430                 entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
1431                 if (!entry)
1432                         goto err1;
1433
1434                 entry->qp = qp;
1435                 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry,
1436                              &qp->rx_free_q);
1437         }
1438
1439         for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
1440                 entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
1441                 if (!entry)
1442                         goto err2;
1443
1444                 entry->qp = qp;
1445                 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1446                              &qp->tx_free_q);
1447         }
1448
1449         tasklet_init(&qp->rx_work, ntb_transport_rx, (unsigned long) qp);
1450
1451         rc = ntb_register_db_callback(qp->ndev, free_queue, qp,
1452                                       ntb_transport_rxc_db);
1453         if (rc)
1454                 goto err3;
1455
1456         dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num);
1457
1458         return qp;
1459
1460 err3:
1461         tasklet_disable(&qp->rx_work);
1462 err2:
1463         while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1464                 kfree(entry);
1465 err1:
1466         while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
1467                 kfree(entry);
1468         if (qp->dma_chan)
1469                 dmaengine_put();
1470         set_bit(free_queue, &nt->qp_bitmap);
1471 err:
1472         return NULL;
1473 }
1474 EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
1475
1476 /**
1477  * ntb_transport_free_queue - Frees NTB transport queue
1478  * @qp: NTB queue to be freed
1479  *
1480  * Frees NTB transport queue
1481  */
1482 void ntb_transport_free_queue(struct ntb_transport_qp *qp)
1483 {
1484         struct pci_dev *pdev;
1485         struct ntb_queue_entry *entry;
1486
1487         if (!qp)
1488                 return;
1489
1490         pdev = ntb_query_pdev(qp->ndev);
1491
1492         if (qp->dma_chan) {
1493                 struct dma_chan *chan = qp->dma_chan;
1494                 /* Putting the dma_chan to NULL will force any new traffic to be
1495                  * processed by the CPU instead of the DAM engine
1496                  */
1497                 qp->dma_chan = NULL;
1498
1499                 /* Try to be nice and wait for any queued DMA engine
1500                  * transactions to process before smashing it with a rock
1501                  */
1502                 dma_sync_wait(chan, qp->last_cookie);
1503                 dmaengine_terminate_all(chan);
1504                 dmaengine_put();
1505         }
1506
1507         ntb_unregister_db_callback(qp->ndev, qp->qp_num);
1508         tasklet_disable(&qp->rx_work);
1509
1510         cancel_delayed_work_sync(&qp->link_work);
1511
1512         while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
1513                 kfree(entry);
1514
1515         while ((entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) {
1516                 dev_warn(&pdev->dev, "Freeing item from a non-empty queue\n");
1517                 kfree(entry);
1518         }
1519
1520         while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1521                 kfree(entry);
1522
1523         set_bit(qp->qp_num, &qp->transport->qp_bitmap);
1524
1525         dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
1526 }
1527 EXPORT_SYMBOL_GPL(ntb_transport_free_queue);
1528
1529 /**
1530  * ntb_transport_rx_remove - Dequeues enqueued rx packet
1531  * @qp: NTB queue to be freed
1532  * @len: pointer to variable to write enqueued buffers length
1533  *
1534  * Dequeues unused buffers from receive queue.  Should only be used during
1535  * shutdown of qp.
1536  *
1537  * RETURNS: NULL error value on error, or void* for success.
1538  */
1539 void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
1540 {
1541         struct ntb_queue_entry *entry;
1542         void *buf;
1543
1544         if (!qp || qp->client_ready == NTB_LINK_UP)
1545                 return NULL;
1546
1547         entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
1548         if (!entry)
1549                 return NULL;
1550
1551         buf = entry->cb_data;
1552         *len = entry->len;
1553
1554         ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
1555
1556         return buf;
1557 }
1558 EXPORT_SYMBOL_GPL(ntb_transport_rx_remove);
1559
1560 /**
1561  * ntb_transport_rx_enqueue - Enqueue a new NTB queue entry
1562  * @qp: NTB transport layer queue the entry is to be enqueued on
1563  * @cb: per buffer pointer for callback function to use
1564  * @data: pointer to data buffer that incoming packets will be copied into
1565  * @len: length of the data buffer
1566  *
1567  * Enqueue a new receive buffer onto the transport queue into which a NTB
1568  * payload can be received into.
1569  *
1570  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1571  */
1572 int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1573                              unsigned int len)
1574 {
1575         struct ntb_queue_entry *entry;
1576
1577         if (!qp)
1578                 return -EINVAL;
1579
1580         entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q);
1581         if (!entry)
1582                 return -ENOMEM;
1583
1584         entry->cb_data = cb;
1585         entry->buf = data;
1586         entry->len = len;
1587
1588         ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q);
1589
1590         return 0;
1591 }
1592 EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue);
1593
1594 /**
1595  * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry
1596  * @qp: NTB transport layer queue the entry is to be enqueued on
1597  * @cb: per buffer pointer for callback function to use
1598  * @data: pointer to data buffer that will be sent
1599  * @len: length of the data buffer
1600  *
1601  * Enqueue a new transmit buffer onto the transport queue from which a NTB
1602  * payload will be transmitted.  This assumes that a lock is being held to
1603  * serialize access to the qp.
1604  *
1605  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1606  */
1607 int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1608                              unsigned int len)
1609 {
1610         struct ntb_queue_entry *entry;
1611         int rc;
1612
1613         if (!qp || qp->qp_link != NTB_LINK_UP || !len)
1614                 return -EINVAL;
1615
1616         entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
1617         if (!entry) {
1618                 qp->tx_err_no_buf++;
1619                 return -ENOMEM;
1620         }
1621
1622         entry->cb_data = cb;
1623         entry->buf = data;
1624         entry->len = len;
1625         entry->flags = 0;
1626
1627         rc = ntb_process_tx(qp, entry);
1628         if (rc)
1629                 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1630                              &qp->tx_free_q);
1631
1632         return rc;
1633 }
1634 EXPORT_SYMBOL_GPL(ntb_transport_tx_enqueue);
1635
1636 /**
1637  * ntb_transport_link_up - Notify NTB transport of client readiness to use queue
1638  * @qp: NTB transport layer queue to be enabled
1639  *
1640  * Notify NTB transport layer of client readiness to use queue
1641  */
1642 void ntb_transport_link_up(struct ntb_transport_qp *qp)
1643 {
1644         if (!qp)
1645                 return;
1646
1647         qp->client_ready = NTB_LINK_UP;
1648
1649         if (qp->transport->transport_link == NTB_LINK_UP)
1650                 schedule_delayed_work(&qp->link_work, 0);
1651 }
1652 EXPORT_SYMBOL_GPL(ntb_transport_link_up);
1653
1654 /**
1655  * ntb_transport_link_down - Notify NTB transport to no longer enqueue data
1656  * @qp: NTB transport layer queue to be disabled
1657  *
1658  * Notify NTB transport layer of client's desire to no longer receive data on
1659  * transport queue specified.  It is the client's responsibility to ensure all
1660  * entries on queue are purged or otherwise handled appropriately.
1661  */
1662 void ntb_transport_link_down(struct ntb_transport_qp *qp)
1663 {
1664         struct pci_dev *pdev;
1665         int rc, val;
1666
1667         if (!qp)
1668                 return;
1669
1670         pdev = ntb_query_pdev(qp->ndev);
1671         qp->client_ready = NTB_LINK_DOWN;
1672
1673         rc = ntb_read_local_spad(qp->ndev, QP_LINKS, &val);
1674         if (rc) {
1675                 dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
1676                 return;
1677         }
1678
1679         rc = ntb_write_remote_spad(qp->ndev, QP_LINKS,
1680                                    val & ~(1 << qp->qp_num));
1681         if (rc)
1682                 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
1683                         val & ~(1 << qp->qp_num), QP_LINKS);
1684
1685         if (qp->qp_link == NTB_LINK_UP)
1686                 ntb_send_link_down(qp);
1687         else
1688                 cancel_delayed_work_sync(&qp->link_work);
1689 }
1690 EXPORT_SYMBOL_GPL(ntb_transport_link_down);
1691
1692 /**
1693  * ntb_transport_link_query - Query transport link state
1694  * @qp: NTB transport layer queue to be queried
1695  *
1696  * Query connectivity to the remote system of the NTB transport queue
1697  *
1698  * RETURNS: true for link up or false for link down
1699  */
1700 bool ntb_transport_link_query(struct ntb_transport_qp *qp)
1701 {
1702         if (!qp)
1703                 return false;
1704
1705         return qp->qp_link == NTB_LINK_UP;
1706 }
1707 EXPORT_SYMBOL_GPL(ntb_transport_link_query);
1708
1709 /**
1710  * ntb_transport_qp_num - Query the qp number
1711  * @qp: NTB transport layer queue to be queried
1712  *
1713  * Query qp number of the NTB transport queue
1714  *
1715  * RETURNS: a zero based number specifying the qp number
1716  */
1717 unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp)
1718 {
1719         if (!qp)
1720                 return 0;
1721
1722         return qp->qp_num;
1723 }
1724 EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
1725
1726 /**
1727  * ntb_transport_max_size - Query the max payload size of a qp
1728  * @qp: NTB transport layer queue to be queried
1729  *
1730  * Query the maximum payload size permissible on the given qp
1731  *
1732  * RETURNS: the max payload size of a qp
1733  */
1734 unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
1735 {
1736         unsigned int max;
1737
1738         if (!qp)
1739                 return 0;
1740
1741         if (!qp->dma_chan)
1742                 return qp->tx_max_frame - sizeof(struct ntb_payload_header);
1743
1744         /* If DMA engine usage is possible, try to find the max size for that */
1745         max = qp->tx_max_frame - sizeof(struct ntb_payload_header);
1746         max -= max % (1 << qp->dma_chan->device->copy_align);
1747
1748         return max;
1749 }
1750 EXPORT_SYMBOL_GPL(ntb_transport_max_size);