Merge branch 'acpi-ec'
[linux-drm-fsl-dcu.git] / drivers / infiniband / ulp / ipoib / ipoib_ib.c
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4  * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5  * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35
36 #include <linux/delay.h>
37 #include <linux/moduleparam.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/slab.h>
40
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43
44 #include "ipoib.h"
45
46 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
47 static int data_debug_level;
48
49 module_param(data_debug_level, int, 0644);
50 MODULE_PARM_DESC(data_debug_level,
51                  "Enable data path debug tracing if > 0");
52 #endif
53
54 static DEFINE_MUTEX(pkey_mutex);
55
56 struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
57                                  struct ib_pd *pd, struct ib_ah_attr *attr)
58 {
59         struct ipoib_ah *ah;
60         struct ib_ah *vah;
61
62         ah = kmalloc(sizeof *ah, GFP_KERNEL);
63         if (!ah)
64                 return ERR_PTR(-ENOMEM);
65
66         ah->dev       = dev;
67         ah->last_send = 0;
68         kref_init(&ah->ref);
69
70         vah = ib_create_ah(pd, attr);
71         if (IS_ERR(vah)) {
72                 kfree(ah);
73                 ah = (struct ipoib_ah *)vah;
74         } else {
75                 ah->ah = vah;
76                 ipoib_dbg(netdev_priv(dev), "Created ah %p\n", ah->ah);
77         }
78
79         return ah;
80 }
81
82 void ipoib_free_ah(struct kref *kref)
83 {
84         struct ipoib_ah *ah = container_of(kref, struct ipoib_ah, ref);
85         struct ipoib_dev_priv *priv = netdev_priv(ah->dev);
86
87         unsigned long flags;
88
89         spin_lock_irqsave(&priv->lock, flags);
90         list_add_tail(&ah->list, &priv->dead_ahs);
91         spin_unlock_irqrestore(&priv->lock, flags);
92 }
93
94 static void ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv *priv,
95                                   u64 mapping[IPOIB_UD_RX_SG])
96 {
97         if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
98                 ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_UD_HEAD_SIZE,
99                                     DMA_FROM_DEVICE);
100                 ib_dma_unmap_page(priv->ca, mapping[1], PAGE_SIZE,
101                                   DMA_FROM_DEVICE);
102         } else
103                 ib_dma_unmap_single(priv->ca, mapping[0],
104                                     IPOIB_UD_BUF_SIZE(priv->max_ib_mtu),
105                                     DMA_FROM_DEVICE);
106 }
107
108 static void ipoib_ud_skb_put_frags(struct ipoib_dev_priv *priv,
109                                    struct sk_buff *skb,
110                                    unsigned int length)
111 {
112         if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
113                 skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
114                 unsigned int size;
115                 /*
116                  * There is only two buffers needed for max_payload = 4K,
117                  * first buf size is IPOIB_UD_HEAD_SIZE
118                  */
119                 skb->tail += IPOIB_UD_HEAD_SIZE;
120                 skb->len  += length;
121
122                 size = length - IPOIB_UD_HEAD_SIZE;
123
124                 skb_frag_size_set(frag, size);
125                 skb->data_len += size;
126                 skb->truesize += PAGE_SIZE;
127         } else
128                 skb_put(skb, length);
129
130 }
131
132 static int ipoib_ib_post_receive(struct net_device *dev, int id)
133 {
134         struct ipoib_dev_priv *priv = netdev_priv(dev);
135         struct ib_recv_wr *bad_wr;
136         int ret;
137
138         priv->rx_wr.wr_id   = id | IPOIB_OP_RECV;
139         priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0];
140         priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1];
141
142
143         ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr);
144         if (unlikely(ret)) {
145                 ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
146                 ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping);
147                 dev_kfree_skb_any(priv->rx_ring[id].skb);
148                 priv->rx_ring[id].skb = NULL;
149         }
150
151         return ret;
152 }
153
154 static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
155 {
156         struct ipoib_dev_priv *priv = netdev_priv(dev);
157         struct sk_buff *skb;
158         int buf_size;
159         int tailroom;
160         u64 *mapping;
161
162         if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
163                 buf_size = IPOIB_UD_HEAD_SIZE;
164                 tailroom = 128; /* reserve some tailroom for IP/TCP headers */
165         } else {
166                 buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
167                 tailroom = 0;
168         }
169
170         skb = dev_alloc_skb(buf_size + tailroom + 4);
171         if (unlikely(!skb))
172                 return NULL;
173
174         /*
175          * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte
176          * header.  So we need 4 more bytes to get to 48 and align the
177          * IP header to a multiple of 16.
178          */
179         skb_reserve(skb, 4);
180
181         mapping = priv->rx_ring[id].mapping;
182         mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
183                                        DMA_FROM_DEVICE);
184         if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0])))
185                 goto error;
186
187         if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
188                 struct page *page = alloc_page(GFP_ATOMIC);
189                 if (!page)
190                         goto partial_error;
191                 skb_fill_page_desc(skb, 0, page, 0, PAGE_SIZE);
192                 mapping[1] =
193                         ib_dma_map_page(priv->ca, page,
194                                         0, PAGE_SIZE, DMA_FROM_DEVICE);
195                 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[1])))
196                         goto partial_error;
197         }
198
199         priv->rx_ring[id].skb = skb;
200         return skb;
201
202 partial_error:
203         ib_dma_unmap_single(priv->ca, mapping[0], buf_size, DMA_FROM_DEVICE);
204 error:
205         dev_kfree_skb_any(skb);
206         return NULL;
207 }
208
209 static int ipoib_ib_post_receives(struct net_device *dev)
210 {
211         struct ipoib_dev_priv *priv = netdev_priv(dev);
212         int i;
213
214         for (i = 0; i < ipoib_recvq_size; ++i) {
215                 if (!ipoib_alloc_rx_skb(dev, i)) {
216                         ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
217                         return -ENOMEM;
218                 }
219                 if (ipoib_ib_post_receive(dev, i)) {
220                         ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i);
221                         return -EIO;
222                 }
223         }
224
225         return 0;
226 }
227
228 static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
229 {
230         struct ipoib_dev_priv *priv = netdev_priv(dev);
231         unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
232         struct sk_buff *skb;
233         u64 mapping[IPOIB_UD_RX_SG];
234         union ib_gid *dgid;
235
236         ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
237                        wr_id, wc->status);
238
239         if (unlikely(wr_id >= ipoib_recvq_size)) {
240                 ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n",
241                            wr_id, ipoib_recvq_size);
242                 return;
243         }
244
245         skb  = priv->rx_ring[wr_id].skb;
246
247         if (unlikely(wc->status != IB_WC_SUCCESS)) {
248                 if (wc->status != IB_WC_WR_FLUSH_ERR)
249                         ipoib_warn(priv, "failed recv event "
250                                    "(status=%d, wrid=%d vend_err %x)\n",
251                                    wc->status, wr_id, wc->vendor_err);
252                 ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping);
253                 dev_kfree_skb_any(skb);
254                 priv->rx_ring[wr_id].skb = NULL;
255                 return;
256         }
257
258         /*
259          * Drop packets that this interface sent, ie multicast packets
260          * that the HCA has replicated.
261          */
262         if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num)
263                 goto repost;
264
265         memcpy(mapping, priv->rx_ring[wr_id].mapping,
266                IPOIB_UD_RX_SG * sizeof *mapping);
267
268         /*
269          * If we can't allocate a new RX buffer, dump
270          * this packet and reuse the old buffer.
271          */
272         if (unlikely(!ipoib_alloc_rx_skb(dev, wr_id))) {
273                 ++dev->stats.rx_dropped;
274                 goto repost;
275         }
276
277         ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
278                        wc->byte_len, wc->slid);
279
280         ipoib_ud_dma_unmap_rx(priv, mapping);
281         ipoib_ud_skb_put_frags(priv, skb, wc->byte_len);
282
283         /* First byte of dgid signals multicast when 0xff */
284         dgid = &((struct ib_grh *)skb->data)->dgid;
285
286         if (!(wc->wc_flags & IB_WC_GRH) || dgid->raw[0] != 0xff)
287                 skb->pkt_type = PACKET_HOST;
288         else if (memcmp(dgid, dev->broadcast + 4, sizeof(union ib_gid)) == 0)
289                 skb->pkt_type = PACKET_BROADCAST;
290         else
291                 skb->pkt_type = PACKET_MULTICAST;
292
293         skb_pull(skb, IB_GRH_BYTES);
294
295         skb->protocol = ((struct ipoib_header *) skb->data)->proto;
296         skb_reset_mac_header(skb);
297         skb_pull(skb, IPOIB_ENCAP_LEN);
298
299         ++dev->stats.rx_packets;
300         dev->stats.rx_bytes += skb->len;
301
302         skb->dev = dev;
303         if ((dev->features & NETIF_F_RXCSUM) &&
304                         likely(wc->wc_flags & IB_WC_IP_CSUM_OK))
305                 skb->ip_summed = CHECKSUM_UNNECESSARY;
306
307         napi_gro_receive(&priv->napi, skb);
308
309 repost:
310         if (unlikely(ipoib_ib_post_receive(dev, wr_id)))
311                 ipoib_warn(priv, "ipoib_ib_post_receive failed "
312                            "for buf %d\n", wr_id);
313 }
314
315 static int ipoib_dma_map_tx(struct ib_device *ca,
316                             struct ipoib_tx_buf *tx_req)
317 {
318         struct sk_buff *skb = tx_req->skb;
319         u64 *mapping = tx_req->mapping;
320         int i;
321         int off;
322
323         if (skb_headlen(skb)) {
324                 mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb),
325                                                DMA_TO_DEVICE);
326                 if (unlikely(ib_dma_mapping_error(ca, mapping[0])))
327                         return -EIO;
328
329                 off = 1;
330         } else
331                 off = 0;
332
333         for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
334                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
335                 mapping[i + off] = ib_dma_map_page(ca,
336                                                  skb_frag_page(frag),
337                                                  frag->page_offset, skb_frag_size(frag),
338                                                  DMA_TO_DEVICE);
339                 if (unlikely(ib_dma_mapping_error(ca, mapping[i + off])))
340                         goto partial_error;
341         }
342         return 0;
343
344 partial_error:
345         for (; i > 0; --i) {
346                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
347
348                 ib_dma_unmap_page(ca, mapping[i - !off], skb_frag_size(frag), DMA_TO_DEVICE);
349         }
350
351         if (off)
352                 ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
353
354         return -EIO;
355 }
356
357 static void ipoib_dma_unmap_tx(struct ib_device *ca,
358                                struct ipoib_tx_buf *tx_req)
359 {
360         struct sk_buff *skb = tx_req->skb;
361         u64 *mapping = tx_req->mapping;
362         int i;
363         int off;
364
365         if (skb_headlen(skb)) {
366                 ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
367                 off = 1;
368         } else
369                 off = 0;
370
371         for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
372                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
373
374                 ib_dma_unmap_page(ca, mapping[i + off], skb_frag_size(frag),
375                                   DMA_TO_DEVICE);
376         }
377 }
378
379 static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
380 {
381         struct ipoib_dev_priv *priv = netdev_priv(dev);
382         unsigned int wr_id = wc->wr_id;
383         struct ipoib_tx_buf *tx_req;
384
385         ipoib_dbg_data(priv, "send completion: id %d, status: %d\n",
386                        wr_id, wc->status);
387
388         if (unlikely(wr_id >= ipoib_sendq_size)) {
389                 ipoib_warn(priv, "send completion event with wrid %d (> %d)\n",
390                            wr_id, ipoib_sendq_size);
391                 return;
392         }
393
394         tx_req = &priv->tx_ring[wr_id];
395
396         ipoib_dma_unmap_tx(priv->ca, tx_req);
397
398         ++dev->stats.tx_packets;
399         dev->stats.tx_bytes += tx_req->skb->len;
400
401         dev_kfree_skb_any(tx_req->skb);
402
403         ++priv->tx_tail;
404         if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
405             netif_queue_stopped(dev) &&
406             test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
407                 netif_wake_queue(dev);
408
409         if (wc->status != IB_WC_SUCCESS &&
410             wc->status != IB_WC_WR_FLUSH_ERR)
411                 ipoib_warn(priv, "failed send event "
412                            "(status=%d, wrid=%d vend_err %x)\n",
413                            wc->status, wr_id, wc->vendor_err);
414 }
415
416 static int poll_tx(struct ipoib_dev_priv *priv)
417 {
418         int n, i;
419
420         n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc);
421         for (i = 0; i < n; ++i)
422                 ipoib_ib_handle_tx_wc(priv->dev, priv->send_wc + i);
423
424         return n == MAX_SEND_CQE;
425 }
426
427 int ipoib_poll(struct napi_struct *napi, int budget)
428 {
429         struct ipoib_dev_priv *priv = container_of(napi, struct ipoib_dev_priv, napi);
430         struct net_device *dev = priv->dev;
431         int done;
432         int t;
433         int n, i;
434
435         done  = 0;
436
437 poll_more:
438         while (done < budget) {
439                 int max = (budget - done);
440
441                 t = min(IPOIB_NUM_WC, max);
442                 n = ib_poll_cq(priv->recv_cq, t, priv->ibwc);
443
444                 for (i = 0; i < n; i++) {
445                         struct ib_wc *wc = priv->ibwc + i;
446
447                         if (wc->wr_id & IPOIB_OP_RECV) {
448                                 ++done;
449                                 if (wc->wr_id & IPOIB_OP_CM)
450                                         ipoib_cm_handle_rx_wc(dev, wc);
451                                 else
452                                         ipoib_ib_handle_rx_wc(dev, wc);
453                         } else
454                                 ipoib_cm_handle_tx_wc(priv->dev, wc);
455                 }
456
457                 if (n != t)
458                         break;
459         }
460
461         if (done < budget) {
462                 napi_complete(napi);
463                 if (unlikely(ib_req_notify_cq(priv->recv_cq,
464                                               IB_CQ_NEXT_COMP |
465                                               IB_CQ_REPORT_MISSED_EVENTS)) &&
466                     napi_reschedule(napi))
467                         goto poll_more;
468         }
469
470         return done;
471 }
472
473 void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
474 {
475         struct net_device *dev = dev_ptr;
476         struct ipoib_dev_priv *priv = netdev_priv(dev);
477
478         napi_schedule(&priv->napi);
479 }
480
481 static void drain_tx_cq(struct net_device *dev)
482 {
483         struct ipoib_dev_priv *priv = netdev_priv(dev);
484
485         netif_tx_lock(dev);
486         while (poll_tx(priv))
487                 ; /* nothing */
488
489         if (netif_queue_stopped(dev))
490                 mod_timer(&priv->poll_timer, jiffies + 1);
491
492         netif_tx_unlock(dev);
493 }
494
495 void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr)
496 {
497         struct ipoib_dev_priv *priv = netdev_priv(dev_ptr);
498
499         mod_timer(&priv->poll_timer, jiffies);
500 }
501
502 static inline int post_send(struct ipoib_dev_priv *priv,
503                             unsigned int wr_id,
504                             struct ib_ah *address, u32 qpn,
505                             struct ipoib_tx_buf *tx_req,
506                             void *head, int hlen)
507 {
508         struct ib_send_wr *bad_wr;
509         int i, off;
510         struct sk_buff *skb = tx_req->skb;
511         skb_frag_t *frags = skb_shinfo(skb)->frags;
512         int nr_frags = skb_shinfo(skb)->nr_frags;
513         u64 *mapping = tx_req->mapping;
514
515         if (skb_headlen(skb)) {
516                 priv->tx_sge[0].addr         = mapping[0];
517                 priv->tx_sge[0].length       = skb_headlen(skb);
518                 off = 1;
519         } else
520                 off = 0;
521
522         for (i = 0; i < nr_frags; ++i) {
523                 priv->tx_sge[i + off].addr = mapping[i + off];
524                 priv->tx_sge[i + off].length = skb_frag_size(&frags[i]);
525         }
526         priv->tx_wr.num_sge          = nr_frags + off;
527         priv->tx_wr.wr_id            = wr_id;
528         priv->tx_wr.wr.ud.remote_qpn = qpn;
529         priv->tx_wr.wr.ud.ah         = address;
530
531         if (head) {
532                 priv->tx_wr.wr.ud.mss    = skb_shinfo(skb)->gso_size;
533                 priv->tx_wr.wr.ud.header = head;
534                 priv->tx_wr.wr.ud.hlen   = hlen;
535                 priv->tx_wr.opcode       = IB_WR_LSO;
536         } else
537                 priv->tx_wr.opcode       = IB_WR_SEND;
538
539         return ib_post_send(priv->qp, &priv->tx_wr, &bad_wr);
540 }
541
542 void ipoib_send(struct net_device *dev, struct sk_buff *skb,
543                 struct ipoib_ah *address, u32 qpn)
544 {
545         struct ipoib_dev_priv *priv = netdev_priv(dev);
546         struct ipoib_tx_buf *tx_req;
547         int hlen, rc;
548         void *phead;
549
550         if (skb_is_gso(skb)) {
551                 hlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
552                 phead = skb->data;
553                 if (unlikely(!skb_pull(skb, hlen))) {
554                         ipoib_warn(priv, "linear data too small\n");
555                         ++dev->stats.tx_dropped;
556                         ++dev->stats.tx_errors;
557                         dev_kfree_skb_any(skb);
558                         return;
559                 }
560         } else {
561                 if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) {
562                         ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
563                                    skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN);
564                         ++dev->stats.tx_dropped;
565                         ++dev->stats.tx_errors;
566                         ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu);
567                         return;
568                 }
569                 phead = NULL;
570                 hlen  = 0;
571         }
572
573         ipoib_dbg_data(priv, "sending packet, length=%d address=%p qpn=0x%06x\n",
574                        skb->len, address, qpn);
575
576         /*
577          * We put the skb into the tx_ring _before_ we call post_send()
578          * because it's entirely possible that the completion handler will
579          * run before we execute anything after the post_send().  That
580          * means we have to make sure everything is properly recorded and
581          * our state is consistent before we call post_send().
582          */
583         tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)];
584         tx_req->skb = skb;
585         if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
586                 ++dev->stats.tx_errors;
587                 dev_kfree_skb_any(skb);
588                 return;
589         }
590
591         if (skb->ip_summed == CHECKSUM_PARTIAL)
592                 priv->tx_wr.send_flags |= IB_SEND_IP_CSUM;
593         else
594                 priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
595
596         if (++priv->tx_outstanding == ipoib_sendq_size) {
597                 ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
598                 if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
599                         ipoib_warn(priv, "request notify on send CQ failed\n");
600                 netif_stop_queue(dev);
601         }
602
603         skb_orphan(skb);
604         skb_dst_drop(skb);
605
606         rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
607                        address->ah, qpn, tx_req, phead, hlen);
608         if (unlikely(rc)) {
609                 ipoib_warn(priv, "post_send failed, error %d\n", rc);
610                 ++dev->stats.tx_errors;
611                 --priv->tx_outstanding;
612                 ipoib_dma_unmap_tx(priv->ca, tx_req);
613                 dev_kfree_skb_any(skb);
614                 if (netif_queue_stopped(dev))
615                         netif_wake_queue(dev);
616         } else {
617                 dev->trans_start = jiffies;
618
619                 address->last_send = priv->tx_head;
620                 ++priv->tx_head;
621         }
622
623         if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
624                 while (poll_tx(priv))
625                         ; /* nothing */
626 }
627
628 static void __ipoib_reap_ah(struct net_device *dev)
629 {
630         struct ipoib_dev_priv *priv = netdev_priv(dev);
631         struct ipoib_ah *ah, *tah;
632         LIST_HEAD(remove_list);
633         unsigned long flags;
634
635         netif_tx_lock_bh(dev);
636         spin_lock_irqsave(&priv->lock, flags);
637
638         list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list)
639                 if ((int) priv->tx_tail - (int) ah->last_send >= 0) {
640                         list_del(&ah->list);
641                         ib_destroy_ah(ah->ah);
642                         kfree(ah);
643                 }
644
645         spin_unlock_irqrestore(&priv->lock, flags);
646         netif_tx_unlock_bh(dev);
647 }
648
649 void ipoib_reap_ah(struct work_struct *work)
650 {
651         struct ipoib_dev_priv *priv =
652                 container_of(work, struct ipoib_dev_priv, ah_reap_task.work);
653         struct net_device *dev = priv->dev;
654
655         __ipoib_reap_ah(dev);
656
657         if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
658                 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
659                                    round_jiffies_relative(HZ));
660 }
661
662 static void ipoib_ib_tx_timer_func(unsigned long ctx)
663 {
664         drain_tx_cq((struct net_device *)ctx);
665 }
666
667 int ipoib_ib_dev_open(struct net_device *dev, int flush)
668 {
669         struct ipoib_dev_priv *priv = netdev_priv(dev);
670         int ret;
671
672         ipoib_pkey_dev_check_presence(dev);
673
674         if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
675                 ipoib_warn(priv, "P_Key 0x%04x is %s\n", priv->pkey,
676                            (!(priv->pkey & 0x7fff) ? "Invalid" : "not found"));
677                 return -1;
678         }
679
680         ret = ipoib_init_qp(dev);
681         if (ret) {
682                 ipoib_warn(priv, "ipoib_init_qp returned %d\n", ret);
683                 return -1;
684         }
685
686         ret = ipoib_ib_post_receives(dev);
687         if (ret) {
688                 ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret);
689                 goto dev_stop;
690         }
691
692         ret = ipoib_cm_dev_open(dev);
693         if (ret) {
694                 ipoib_warn(priv, "ipoib_cm_dev_open returned %d\n", ret);
695                 goto dev_stop;
696         }
697
698         clear_bit(IPOIB_STOP_REAPER, &priv->flags);
699         queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
700                            round_jiffies_relative(HZ));
701
702         if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
703                 napi_enable(&priv->napi);
704
705         return 0;
706 dev_stop:
707         if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
708                 napi_enable(&priv->napi);
709         ipoib_ib_dev_stop(dev, flush);
710         return -1;
711 }
712
713 void ipoib_pkey_dev_check_presence(struct net_device *dev)
714 {
715         struct ipoib_dev_priv *priv = netdev_priv(dev);
716
717         if (!(priv->pkey & 0x7fff) ||
718             ib_find_pkey(priv->ca, priv->port, priv->pkey,
719                          &priv->pkey_index))
720                 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
721         else
722                 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
723 }
724
725 int ipoib_ib_dev_up(struct net_device *dev)
726 {
727         struct ipoib_dev_priv *priv = netdev_priv(dev);
728
729         ipoib_pkey_dev_check_presence(dev);
730
731         if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
732                 ipoib_dbg(priv, "PKEY is not assigned.\n");
733                 return 0;
734         }
735
736         set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
737
738         return ipoib_mcast_start_thread(dev);
739 }
740
741 int ipoib_ib_dev_down(struct net_device *dev, int flush)
742 {
743         struct ipoib_dev_priv *priv = netdev_priv(dev);
744
745         ipoib_dbg(priv, "downing ib_dev\n");
746
747         clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
748         netif_carrier_off(dev);
749
750         ipoib_mcast_stop_thread(dev, flush);
751         ipoib_mcast_dev_flush(dev);
752
753         ipoib_flush_paths(dev);
754
755         return 0;
756 }
757
758 static int recvs_pending(struct net_device *dev)
759 {
760         struct ipoib_dev_priv *priv = netdev_priv(dev);
761         int pending = 0;
762         int i;
763
764         for (i = 0; i < ipoib_recvq_size; ++i)
765                 if (priv->rx_ring[i].skb)
766                         ++pending;
767
768         return pending;
769 }
770
771 void ipoib_drain_cq(struct net_device *dev)
772 {
773         struct ipoib_dev_priv *priv = netdev_priv(dev);
774         int i, n;
775
776         /*
777          * We call completion handling routines that expect to be
778          * called from the BH-disabled NAPI poll context, so disable
779          * BHs here too.
780          */
781         local_bh_disable();
782
783         do {
784                 n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc);
785                 for (i = 0; i < n; ++i) {
786                         /*
787                          * Convert any successful completions to flush
788                          * errors to avoid passing packets up the
789                          * stack after bringing the device down.
790                          */
791                         if (priv->ibwc[i].status == IB_WC_SUCCESS)
792                                 priv->ibwc[i].status = IB_WC_WR_FLUSH_ERR;
793
794                         if (priv->ibwc[i].wr_id & IPOIB_OP_RECV) {
795                                 if (priv->ibwc[i].wr_id & IPOIB_OP_CM)
796                                         ipoib_cm_handle_rx_wc(dev, priv->ibwc + i);
797                                 else
798                                         ipoib_ib_handle_rx_wc(dev, priv->ibwc + i);
799                         } else
800                                 ipoib_cm_handle_tx_wc(dev, priv->ibwc + i);
801                 }
802         } while (n == IPOIB_NUM_WC);
803
804         while (poll_tx(priv))
805                 ; /* nothing */
806
807         local_bh_enable();
808 }
809
810 int ipoib_ib_dev_stop(struct net_device *dev, int flush)
811 {
812         struct ipoib_dev_priv *priv = netdev_priv(dev);
813         struct ib_qp_attr qp_attr;
814         unsigned long begin;
815         struct ipoib_tx_buf *tx_req;
816         int i;
817
818         if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
819                 napi_disable(&priv->napi);
820
821         ipoib_cm_dev_stop(dev);
822
823         /*
824          * Move our QP to the error state and then reinitialize in
825          * when all work requests have completed or have been flushed.
826          */
827         qp_attr.qp_state = IB_QPS_ERR;
828         if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
829                 ipoib_warn(priv, "Failed to modify QP to ERROR state\n");
830
831         /* Wait for all sends and receives to complete */
832         begin = jiffies;
833
834         while (priv->tx_head != priv->tx_tail || recvs_pending(dev)) {
835                 if (time_after(jiffies, begin + 5 * HZ)) {
836                         ipoib_warn(priv, "timing out; %d sends %d receives not completed\n",
837                                    priv->tx_head - priv->tx_tail, recvs_pending(dev));
838
839                         /*
840                          * assume the HW is wedged and just free up
841                          * all our pending work requests.
842                          */
843                         while ((int) priv->tx_tail - (int) priv->tx_head < 0) {
844                                 tx_req = &priv->tx_ring[priv->tx_tail &
845                                                         (ipoib_sendq_size - 1)];
846                                 ipoib_dma_unmap_tx(priv->ca, tx_req);
847                                 dev_kfree_skb_any(tx_req->skb);
848                                 ++priv->tx_tail;
849                                 --priv->tx_outstanding;
850                         }
851
852                         for (i = 0; i < ipoib_recvq_size; ++i) {
853                                 struct ipoib_rx_buf *rx_req;
854
855                                 rx_req = &priv->rx_ring[i];
856                                 if (!rx_req->skb)
857                                         continue;
858                                 ipoib_ud_dma_unmap_rx(priv,
859                                                       priv->rx_ring[i].mapping);
860                                 dev_kfree_skb_any(rx_req->skb);
861                                 rx_req->skb = NULL;
862                         }
863
864                         goto timeout;
865                 }
866
867                 ipoib_drain_cq(dev);
868
869                 msleep(1);
870         }
871
872         ipoib_dbg(priv, "All sends and receives done.\n");
873
874 timeout:
875         del_timer_sync(&priv->poll_timer);
876         qp_attr.qp_state = IB_QPS_RESET;
877         if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
878                 ipoib_warn(priv, "Failed to modify QP to RESET state\n");
879
880         /* Wait for all AHs to be reaped */
881         set_bit(IPOIB_STOP_REAPER, &priv->flags);
882         cancel_delayed_work(&priv->ah_reap_task);
883         if (flush)
884                 flush_workqueue(ipoib_workqueue);
885
886         begin = jiffies;
887
888         while (!list_empty(&priv->dead_ahs)) {
889                 __ipoib_reap_ah(dev);
890
891                 if (time_after(jiffies, begin + HZ)) {
892                         ipoib_warn(priv, "timing out; will leak address handles\n");
893                         break;
894                 }
895
896                 msleep(1);
897         }
898
899         ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
900
901         return 0;
902 }
903
904 int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
905 {
906         struct ipoib_dev_priv *priv = netdev_priv(dev);
907
908         priv->ca = ca;
909         priv->port = port;
910         priv->qp = NULL;
911
912         if (ipoib_transport_dev_init(dev, ca)) {
913                 printk(KERN_WARNING "%s: ipoib_transport_dev_init failed\n", ca->name);
914                 return -ENODEV;
915         }
916
917         setup_timer(&priv->poll_timer, ipoib_ib_tx_timer_func,
918                     (unsigned long) dev);
919
920         if (dev->flags & IFF_UP) {
921                 if (ipoib_ib_dev_open(dev, 1)) {
922                         ipoib_transport_dev_cleanup(dev);
923                         return -ENODEV;
924                 }
925         }
926
927         return 0;
928 }
929
930 /*
931  * Takes whatever value which is in pkey index 0 and updates priv->pkey
932  * returns 0 if the pkey value was changed.
933  */
934 static inline int update_parent_pkey(struct ipoib_dev_priv *priv)
935 {
936         int result;
937         u16 prev_pkey;
938
939         prev_pkey = priv->pkey;
940         result = ib_query_pkey(priv->ca, priv->port, 0, &priv->pkey);
941         if (result) {
942                 ipoib_warn(priv, "ib_query_pkey port %d failed (ret = %d)\n",
943                            priv->port, result);
944                 return result;
945         }
946
947         priv->pkey |= 0x8000;
948
949         if (prev_pkey != priv->pkey) {
950                 ipoib_dbg(priv, "pkey changed from 0x%x to 0x%x\n",
951                           prev_pkey, priv->pkey);
952                 /*
953                  * Update the pkey in the broadcast address, while making sure to set
954                  * the full membership bit, so that we join the right broadcast group.
955                  */
956                 priv->dev->broadcast[8] = priv->pkey >> 8;
957                 priv->dev->broadcast[9] = priv->pkey & 0xff;
958                 return 0;
959         }
960
961         return 1;
962 }
963 /*
964  * returns 0 if pkey value was found in a different slot.
965  */
966 static inline int update_child_pkey(struct ipoib_dev_priv *priv)
967 {
968         u16 old_index = priv->pkey_index;
969
970         priv->pkey_index = 0;
971         ipoib_pkey_dev_check_presence(priv->dev);
972
973         if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) &&
974             (old_index == priv->pkey_index))
975                 return 1;
976         return 0;
977 }
978
979 static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
980                                 enum ipoib_flush_level level)
981 {
982         struct ipoib_dev_priv *cpriv;
983         struct net_device *dev = priv->dev;
984         int result;
985
986         down_read(&priv->vlan_rwsem);
987
988         /*
989          * Flush any child interfaces too -- they might be up even if
990          * the parent is down.
991          */
992         list_for_each_entry(cpriv, &priv->child_intfs, list)
993                 __ipoib_ib_dev_flush(cpriv, level);
994
995         up_read(&priv->vlan_rwsem);
996
997         if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) &&
998             level != IPOIB_FLUSH_HEAVY) {
999                 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
1000                 return;
1001         }
1002
1003         if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
1004                 /* interface is down. update pkey and leave. */
1005                 if (level == IPOIB_FLUSH_HEAVY) {
1006                         if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
1007                                 update_parent_pkey(priv);
1008                         else
1009                                 update_child_pkey(priv);
1010                 }
1011                 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n");
1012                 return;
1013         }
1014
1015         if (level == IPOIB_FLUSH_HEAVY) {
1016                 /* child devices chase their origin pkey value, while non-child
1017                  * (parent) devices should always takes what present in pkey index 0
1018                  */
1019                 if (test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
1020                         result = update_child_pkey(priv);
1021                         if (result) {
1022                                 /* restart QP only if P_Key index is changed */
1023                                 ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n");
1024                                 return;
1025                         }
1026
1027                 } else {
1028                         result = update_parent_pkey(priv);
1029                         /* restart QP only if P_Key value changed */
1030                         if (result) {
1031                                 ipoib_dbg(priv, "Not flushing - P_Key value not changed.\n");
1032                                 return;
1033                         }
1034                 }
1035         }
1036
1037         if (level == IPOIB_FLUSH_LIGHT) {
1038                 ipoib_mark_paths_invalid(dev);
1039                 ipoib_mcast_dev_flush(dev);
1040         }
1041
1042         if (level >= IPOIB_FLUSH_NORMAL)
1043                 ipoib_ib_dev_down(dev, 0);
1044
1045         if (level == IPOIB_FLUSH_HEAVY) {
1046                 if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
1047                         ipoib_ib_dev_stop(dev, 0);
1048                 if (ipoib_ib_dev_open(dev, 0) != 0)
1049                         return;
1050                 if (netif_queue_stopped(dev))
1051                         netif_start_queue(dev);
1052         }
1053
1054         /*
1055          * The device could have been brought down between the start and when
1056          * we get here, don't bring it back up if it's not configured up
1057          */
1058         if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
1059                 if (level >= IPOIB_FLUSH_NORMAL)
1060                         ipoib_ib_dev_up(dev);
1061                 ipoib_mcast_restart_task(&priv->restart_task);
1062         }
1063 }
1064
1065 void ipoib_ib_dev_flush_light(struct work_struct *work)
1066 {
1067         struct ipoib_dev_priv *priv =
1068                 container_of(work, struct ipoib_dev_priv, flush_light);
1069
1070         __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT);
1071 }
1072
1073 void ipoib_ib_dev_flush_normal(struct work_struct *work)
1074 {
1075         struct ipoib_dev_priv *priv =
1076                 container_of(work, struct ipoib_dev_priv, flush_normal);
1077
1078         __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL);
1079 }
1080
1081 void ipoib_ib_dev_flush_heavy(struct work_struct *work)
1082 {
1083         struct ipoib_dev_priv *priv =
1084                 container_of(work, struct ipoib_dev_priv, flush_heavy);
1085
1086         __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY);
1087 }
1088
1089 void ipoib_ib_dev_cleanup(struct net_device *dev)
1090 {
1091         struct ipoib_dev_priv *priv = netdev_priv(dev);
1092
1093         ipoib_dbg(priv, "cleaning up ib_dev\n");
1094         /*
1095          * We must make sure there are no more (path) completions
1096          * that may wish to touch priv fields that are no longer valid
1097          */
1098         ipoib_flush_paths(dev);
1099
1100         ipoib_mcast_stop_thread(dev, 1);
1101         ipoib_mcast_dev_flush(dev);
1102
1103         ipoib_transport_dev_cleanup(dev);
1104 }
1105
1106