2 * Linux driver for VMware's vmxnet3 ethernet NIC.
4 * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING".
23 * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
27 #include <linux/module.h>
28 #include <net/ip6_checksum.h>
30 #include "vmxnet3_int.h"
32 char vmxnet3_driver_name[] = "vmxnet3";
33 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
37 * Last entry must be all 0s
39 static DEFINE_PCI_DEVICE_TABLE(vmxnet3_pciid_table) = {
40 {PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
44 MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
46 static atomic_t devices_found;
48 static int enable_mq = 1;
49 static int irq_share_mode;
52 vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac);
55 * Enable/Disable the given intr
58 vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
60 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0);
65 vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
67 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1);
72 * Enable/Disable all intrs used by the device
75 vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter)
79 for (i = 0; i < adapter->intr.num_intrs; i++)
80 vmxnet3_enable_intr(adapter, i);
81 adapter->shared->devRead.intrConf.intrCtrl &=
82 cpu_to_le32(~VMXNET3_IC_DISABLE_ALL);
87 vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter)
91 adapter->shared->devRead.intrConf.intrCtrl |=
92 cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
93 for (i = 0; i < adapter->intr.num_intrs; i++)
94 vmxnet3_disable_intr(adapter, i);
99 vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
101 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events);
106 vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
113 vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
116 netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue);
121 vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
124 netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue));
129 vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
133 netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
138 * Check the link state. This may start or stop the tx queue.
141 vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
147 spin_lock_irqsave(&adapter->cmd_lock, flags);
148 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
149 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
150 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
152 adapter->link_speed = ret >> 16;
153 if (ret & 1) { /* Link is up. */
154 netdev_info(adapter->netdev, "NIC Link is Up %d Mbps\n",
155 adapter->link_speed);
156 if (!netif_carrier_ok(adapter->netdev))
157 netif_carrier_on(adapter->netdev);
160 for (i = 0; i < adapter->num_tx_queues; i++)
161 vmxnet3_tq_start(&adapter->tx_queue[i],
165 netdev_info(adapter->netdev, "NIC Link is Down\n");
166 if (netif_carrier_ok(adapter->netdev))
167 netif_carrier_off(adapter->netdev);
170 for (i = 0; i < adapter->num_tx_queues; i++)
171 vmxnet3_tq_stop(&adapter->tx_queue[i], adapter);
177 vmxnet3_process_events(struct vmxnet3_adapter *adapter)
181 u32 events = le32_to_cpu(adapter->shared->ecr);
185 vmxnet3_ack_events(adapter, events);
187 /* Check if link state has changed */
188 if (events & VMXNET3_ECR_LINK)
189 vmxnet3_check_link(adapter, true);
191 /* Check if there is an error on xmit/recv queues */
192 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
193 spin_lock_irqsave(&adapter->cmd_lock, flags);
194 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
195 VMXNET3_CMD_GET_QUEUE_STATUS);
196 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
198 for (i = 0; i < adapter->num_tx_queues; i++)
199 if (adapter->tqd_start[i].status.stopped)
200 dev_err(&adapter->netdev->dev,
201 "%s: tq[%d] error 0x%x\n",
202 adapter->netdev->name, i, le32_to_cpu(
203 adapter->tqd_start[i].status.error));
204 for (i = 0; i < adapter->num_rx_queues; i++)
205 if (adapter->rqd_start[i].status.stopped)
206 dev_err(&adapter->netdev->dev,
207 "%s: rq[%d] error 0x%x\n",
208 adapter->netdev->name, i,
209 adapter->rqd_start[i].status.error);
211 schedule_work(&adapter->work);
215 #ifdef __BIG_ENDIAN_BITFIELD
217 * The device expects the bitfields in shared structures to be written in
218 * little endian. When CPU is big endian, the following routines are used to
219 * correctly read and write into ABI.
220 * The general technique used here is : double word bitfields are defined in
221 * opposite order for big endian architecture. Then before reading them in
222 * driver the complete double word is translated using le32_to_cpu. Similarly
223 * After the driver writes into bitfields, cpu_to_le32 is used to translate the
224 * double words into required format.
225 * In order to avoid touching bits in shared structure more than once, temporary
226 * descriptors are used. These are passed as srcDesc to following functions.
228 static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc,
229 struct Vmxnet3_RxDesc *dstDesc)
231 u32 *src = (u32 *)srcDesc + 2;
232 u32 *dst = (u32 *)dstDesc + 2;
233 dstDesc->addr = le64_to_cpu(srcDesc->addr);
234 *dst = le32_to_cpu(*src);
235 dstDesc->ext1 = le32_to_cpu(srcDesc->ext1);
238 static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc,
239 struct Vmxnet3_TxDesc *dstDesc)
242 u32 *src = (u32 *)(srcDesc + 1);
243 u32 *dst = (u32 *)(dstDesc + 1);
245 /* Working backwards so that the gen bit is set at the end. */
246 for (i = 2; i > 0; i--) {
249 *dst = cpu_to_le32(*src);
254 static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc,
255 struct Vmxnet3_RxCompDesc *dstDesc)
258 u32 *src = (u32 *)srcDesc;
259 u32 *dst = (u32 *)dstDesc;
260 for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) {
261 *dst = le32_to_cpu(*src);
268 /* Used to read bitfield values from double words. */
269 static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size)
271 u32 temp = le32_to_cpu(*bitfield);
272 u32 mask = ((1 << size) - 1) << pos;
280 #endif /* __BIG_ENDIAN_BITFIELD */
282 #ifdef __BIG_ENDIAN_BITFIELD
284 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
285 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
286 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
287 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
288 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
289 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
290 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
291 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
292 VMXNET3_TCD_GEN_SIZE)
293 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
294 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
295 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
297 vmxnet3_RxCompToCPU((rcd), (tmp)); \
299 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
301 vmxnet3_RxDescToCPU((rxd), (tmp)); \
306 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
307 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
308 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
309 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
310 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
311 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
313 #endif /* __BIG_ENDIAN_BITFIELD */
317 vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
318 struct pci_dev *pdev)
320 if (tbi->map_type == VMXNET3_MAP_SINGLE)
321 pci_unmap_single(pdev, tbi->dma_addr, tbi->len,
323 else if (tbi->map_type == VMXNET3_MAP_PAGE)
324 pci_unmap_page(pdev, tbi->dma_addr, tbi->len,
327 BUG_ON(tbi->map_type != VMXNET3_MAP_NONE);
329 tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */
334 vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
335 struct pci_dev *pdev, struct vmxnet3_adapter *adapter)
340 /* no out of order completion */
341 BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
342 BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1);
344 skb = tq->buf_info[eop_idx].skb;
346 tq->buf_info[eop_idx].skb = NULL;
348 VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size);
350 while (tq->tx_ring.next2comp != eop_idx) {
351 vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp,
354 /* update next2comp w/o tx_lock. Since we are marking more,
355 * instead of less, tx ring entries avail, the worst case is
356 * that the tx routine incorrectly re-queues a pkt due to
357 * insufficient tx ring entries.
359 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
363 dev_kfree_skb_any(skb);
369 vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
370 struct vmxnet3_adapter *adapter)
373 union Vmxnet3_GenericDesc *gdesc;
375 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
376 while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
377 completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
378 &gdesc->tcd), tq, adapter->pdev,
381 vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
382 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
386 spin_lock(&tq->tx_lock);
387 if (unlikely(vmxnet3_tq_stopped(tq, adapter) &&
388 vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) >
389 VMXNET3_WAKE_QUEUE_THRESHOLD(tq) &&
390 netif_carrier_ok(adapter->netdev))) {
391 vmxnet3_tq_wake(tq, adapter);
393 spin_unlock(&tq->tx_lock);
400 vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
401 struct vmxnet3_adapter *adapter)
405 while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
406 struct vmxnet3_tx_buf_info *tbi;
408 tbi = tq->buf_info + tq->tx_ring.next2comp;
410 vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
412 dev_kfree_skb_any(tbi->skb);
415 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
418 /* sanity check, verify all buffers are indeed unmapped and freed */
419 for (i = 0; i < tq->tx_ring.size; i++) {
420 BUG_ON(tq->buf_info[i].skb != NULL ||
421 tq->buf_info[i].map_type != VMXNET3_MAP_NONE);
424 tq->tx_ring.gen = VMXNET3_INIT_GEN;
425 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
427 tq->comp_ring.gen = VMXNET3_INIT_GEN;
428 tq->comp_ring.next2proc = 0;
433 vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
434 struct vmxnet3_adapter *adapter)
436 if (tq->tx_ring.base) {
437 pci_free_consistent(adapter->pdev, tq->tx_ring.size *
438 sizeof(struct Vmxnet3_TxDesc),
439 tq->tx_ring.base, tq->tx_ring.basePA);
440 tq->tx_ring.base = NULL;
442 if (tq->data_ring.base) {
443 pci_free_consistent(adapter->pdev, tq->data_ring.size *
444 sizeof(struct Vmxnet3_TxDataDesc),
445 tq->data_ring.base, tq->data_ring.basePA);
446 tq->data_ring.base = NULL;
448 if (tq->comp_ring.base) {
449 pci_free_consistent(adapter->pdev, tq->comp_ring.size *
450 sizeof(struct Vmxnet3_TxCompDesc),
451 tq->comp_ring.base, tq->comp_ring.basePA);
452 tq->comp_ring.base = NULL;
459 /* Destroy all tx queues */
461 vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter)
465 for (i = 0; i < adapter->num_tx_queues; i++)
466 vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter);
471 vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
472 struct vmxnet3_adapter *adapter)
476 /* reset the tx ring contents to 0 and reset the tx ring states */
477 memset(tq->tx_ring.base, 0, tq->tx_ring.size *
478 sizeof(struct Vmxnet3_TxDesc));
479 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
480 tq->tx_ring.gen = VMXNET3_INIT_GEN;
482 memset(tq->data_ring.base, 0, tq->data_ring.size *
483 sizeof(struct Vmxnet3_TxDataDesc));
485 /* reset the tx comp ring contents to 0 and reset comp ring states */
486 memset(tq->comp_ring.base, 0, tq->comp_ring.size *
487 sizeof(struct Vmxnet3_TxCompDesc));
488 tq->comp_ring.next2proc = 0;
489 tq->comp_ring.gen = VMXNET3_INIT_GEN;
491 /* reset the bookkeeping data */
492 memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size);
493 for (i = 0; i < tq->tx_ring.size; i++)
494 tq->buf_info[i].map_type = VMXNET3_MAP_NONE;
496 /* stats are not reset */
501 vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
502 struct vmxnet3_adapter *adapter)
504 BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
505 tq->comp_ring.base || tq->buf_info);
507 tq->tx_ring.base = pci_alloc_consistent(adapter->pdev, tq->tx_ring.size
508 * sizeof(struct Vmxnet3_TxDesc),
509 &tq->tx_ring.basePA);
510 if (!tq->tx_ring.base) {
511 netdev_err(adapter->netdev, "failed to allocate tx ring\n");
515 tq->data_ring.base = pci_alloc_consistent(adapter->pdev,
517 sizeof(struct Vmxnet3_TxDataDesc),
518 &tq->data_ring.basePA);
519 if (!tq->data_ring.base) {
520 netdev_err(adapter->netdev, "failed to allocate data ring\n");
524 tq->comp_ring.base = pci_alloc_consistent(adapter->pdev,
526 sizeof(struct Vmxnet3_TxCompDesc),
527 &tq->comp_ring.basePA);
528 if (!tq->comp_ring.base) {
529 netdev_err(adapter->netdev, "failed to allocate tx comp ring\n");
533 tq->buf_info = kcalloc(tq->tx_ring.size, sizeof(tq->buf_info[0]),
541 vmxnet3_tq_destroy(tq, adapter);
546 vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter)
550 for (i = 0; i < adapter->num_tx_queues; i++)
551 vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter);
555 * starting from ring->next2fill, allocate rx buffers for the given ring
556 * of the rx queue and update the rx desc. stop after @num_to_alloc buffers
557 * are allocated or allocation fails
561 vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
562 int num_to_alloc, struct vmxnet3_adapter *adapter)
564 int num_allocated = 0;
565 struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
566 struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
569 while (num_allocated <= num_to_alloc) {
570 struct vmxnet3_rx_buf_info *rbi;
571 union Vmxnet3_GenericDesc *gd;
573 rbi = rbi_base + ring->next2fill;
574 gd = ring->base + ring->next2fill;
576 if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
577 if (rbi->skb == NULL) {
578 rbi->skb = __netdev_alloc_skb_ip_align(adapter->netdev,
581 if (unlikely(rbi->skb == NULL)) {
582 rq->stats.rx_buf_alloc_failure++;
586 rbi->dma_addr = pci_map_single(adapter->pdev,
587 rbi->skb->data, rbi->len,
590 /* rx buffer skipped by the device */
592 val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
594 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE ||
595 rbi->len != PAGE_SIZE);
597 if (rbi->page == NULL) {
598 rbi->page = alloc_page(GFP_ATOMIC);
599 if (unlikely(rbi->page == NULL)) {
600 rq->stats.rx_buf_alloc_failure++;
603 rbi->dma_addr = pci_map_page(adapter->pdev,
604 rbi->page, 0, PAGE_SIZE,
607 /* rx buffers skipped by the device */
609 val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
612 BUG_ON(rbi->dma_addr == 0);
613 gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
614 gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT)
617 /* Fill the last buffer but dont mark it ready, or else the
618 * device will think that the queue is full */
619 if (num_allocated == num_to_alloc)
622 gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT);
624 vmxnet3_cmd_ring_adv_next2fill(ring);
627 netdev_dbg(adapter->netdev,
628 "alloc_rx_buf: %d allocated, next2fill %u, next2comp %u\n",
629 num_allocated, ring->next2fill, ring->next2comp);
631 /* so that the device can distinguish a full ring and an empty ring */
632 BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp);
634 return num_allocated;
639 vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
640 struct vmxnet3_rx_buf_info *rbi)
642 struct skb_frag_struct *frag = skb_shinfo(skb)->frags +
643 skb_shinfo(skb)->nr_frags;
645 BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
647 __skb_frag_set_page(frag, rbi->page);
648 frag->page_offset = 0;
649 skb_frag_size_set(frag, rcd->len);
650 skb->data_len += rcd->len;
651 skb->truesize += PAGE_SIZE;
652 skb_shinfo(skb)->nr_frags++;
657 vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
658 struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
659 struct vmxnet3_adapter *adapter)
662 unsigned long buf_offset;
664 union Vmxnet3_GenericDesc *gdesc;
665 struct vmxnet3_tx_buf_info *tbi = NULL;
667 BUG_ON(ctx->copy_size > skb_headlen(skb));
669 /* use the previous gen bit for the SOP desc */
670 dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
672 ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
673 gdesc = ctx->sop_txd; /* both loops below can be skipped */
675 /* no need to map the buffer if headers are copied */
676 if (ctx->copy_size) {
677 ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
678 tq->tx_ring.next2fill *
679 sizeof(struct Vmxnet3_TxDataDesc));
680 ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
681 ctx->sop_txd->dword[3] = 0;
683 tbi = tq->buf_info + tq->tx_ring.next2fill;
684 tbi->map_type = VMXNET3_MAP_NONE;
686 netdev_dbg(adapter->netdev,
687 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
688 tq->tx_ring.next2fill,
689 le64_to_cpu(ctx->sop_txd->txd.addr),
690 ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
691 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
693 /* use the right gen for non-SOP desc */
694 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
697 /* linear part can use multiple tx desc if it's big */
698 len = skb_headlen(skb) - ctx->copy_size;
699 buf_offset = ctx->copy_size;
703 if (len < VMXNET3_MAX_TX_BUF_SIZE) {
707 buf_size = VMXNET3_MAX_TX_BUF_SIZE;
708 /* spec says that for TxDesc.len, 0 == 2^14 */
711 tbi = tq->buf_info + tq->tx_ring.next2fill;
712 tbi->map_type = VMXNET3_MAP_SINGLE;
713 tbi->dma_addr = pci_map_single(adapter->pdev,
714 skb->data + buf_offset, buf_size,
719 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
720 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
722 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
723 gdesc->dword[2] = cpu_to_le32(dw2);
726 netdev_dbg(adapter->netdev,
727 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
728 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
729 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
730 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
731 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
734 buf_offset += buf_size;
737 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
738 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
742 len = skb_frag_size(frag);
744 tbi = tq->buf_info + tq->tx_ring.next2fill;
745 if (len < VMXNET3_MAX_TX_BUF_SIZE) {
749 buf_size = VMXNET3_MAX_TX_BUF_SIZE;
750 /* spec says that for TxDesc.len, 0 == 2^14 */
752 tbi->map_type = VMXNET3_MAP_PAGE;
753 tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
754 buf_offset, buf_size,
759 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
760 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
762 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
763 gdesc->dword[2] = cpu_to_le32(dw2);
766 netdev_dbg(adapter->netdev,
767 "txd[%u]: 0x%llu %u %u\n",
768 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
769 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
770 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
771 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
774 buf_offset += buf_size;
778 ctx->eop_txd = gdesc;
780 /* set the last buf_info for the pkt */
782 tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
786 /* Init all tx queues */
788 vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
792 for (i = 0; i < adapter->num_tx_queues; i++)
793 vmxnet3_tq_init(&adapter->tx_queue[i], adapter);
798 * parse and copy relevant protocol headers:
799 * For a tso pkt, relevant headers are L2/3/4 including options
800 * For a pkt requesting csum offloading, they are L2/3 and may include L4
801 * if it's a TCP/UDP pkt
804 * -1: error happens during parsing
805 * 0: protocol headers parsed, but too big to be copied
806 * 1: protocol headers parsed and copied
809 * 1. related *ctx fields are updated.
810 * 2. ctx->copy_size is # of bytes copied
811 * 3. the portion copied is guaranteed to be in the linear part
815 vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
816 struct vmxnet3_tx_ctx *ctx,
817 struct vmxnet3_adapter *adapter)
819 struct Vmxnet3_TxDataDesc *tdd;
821 if (ctx->mss) { /* TSO */
822 ctx->eth_ip_hdr_size = skb_transport_offset(skb);
823 ctx->l4_hdr_size = tcp_hdrlen(skb);
824 ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size;
826 if (skb->ip_summed == CHECKSUM_PARTIAL) {
827 ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb);
830 const struct iphdr *iph = ip_hdr(skb);
832 if (iph->protocol == IPPROTO_TCP)
833 ctx->l4_hdr_size = tcp_hdrlen(skb);
834 else if (iph->protocol == IPPROTO_UDP)
835 ctx->l4_hdr_size = sizeof(struct udphdr);
837 ctx->l4_hdr_size = 0;
839 /* for simplicity, don't copy L4 headers */
840 ctx->l4_hdr_size = 0;
842 ctx->copy_size = min(ctx->eth_ip_hdr_size +
843 ctx->l4_hdr_size, skb->len);
845 ctx->eth_ip_hdr_size = 0;
846 ctx->l4_hdr_size = 0;
847 /* copy as much as allowed */
848 ctx->copy_size = min((unsigned int)VMXNET3_HDR_COPY_SIZE
852 /* make sure headers are accessible directly */
853 if (unlikely(!pskb_may_pull(skb, ctx->copy_size)))
857 if (unlikely(ctx->copy_size > VMXNET3_HDR_COPY_SIZE)) {
858 tq->stats.oversized_hdr++;
863 tdd = tq->data_ring.base + tq->tx_ring.next2fill;
865 memcpy(tdd->data, skb->data, ctx->copy_size);
866 netdev_dbg(adapter->netdev,
867 "copy %u bytes to dataRing[%u]\n",
868 ctx->copy_size, tq->tx_ring.next2fill);
877 vmxnet3_prepare_tso(struct sk_buff *skb,
878 struct vmxnet3_tx_ctx *ctx)
880 struct tcphdr *tcph = tcp_hdr(skb);
883 struct iphdr *iph = ip_hdr(skb);
886 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
889 struct ipv6hdr *iph = ipv6_hdr(skb);
891 tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
896 static int txd_estimate(const struct sk_buff *skb)
898 int count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
901 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
902 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
904 count += VMXNET3_TXD_NEEDED(skb_frag_size(frag));
910 * Transmits a pkt thru a given tq
912 * NETDEV_TX_OK: descriptors are setup successfully
913 * NETDEV_TX_OK: error occurred, the pkt is dropped
914 * NETDEV_TX_BUSY: tx ring is full, queue is stopped
917 * 1. tx ring may be changed
918 * 2. tq stats may be updated accordingly
919 * 3. shared->txNumDeferred may be updated
923 vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
924 struct vmxnet3_adapter *adapter, struct net_device *netdev)
929 struct vmxnet3_tx_ctx ctx;
930 union Vmxnet3_GenericDesc *gdesc;
931 #ifdef __BIG_ENDIAN_BITFIELD
932 /* Use temporary descriptor to avoid touching bits multiple times */
933 union Vmxnet3_GenericDesc tempTxDesc;
936 count = txd_estimate(skb);
938 ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP));
940 ctx.mss = skb_shinfo(skb)->gso_size;
942 if (skb_header_cloned(skb)) {
943 if (unlikely(pskb_expand_head(skb, 0, 0,
945 tq->stats.drop_tso++;
948 tq->stats.copy_skb_header++;
950 vmxnet3_prepare_tso(skb, &ctx);
952 if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) {
954 /* non-tso pkts must not use more than
955 * VMXNET3_MAX_TXD_PER_PKT entries
957 if (skb_linearize(skb) != 0) {
958 tq->stats.drop_too_many_frags++;
961 tq->stats.linearized++;
963 /* recalculate the # of descriptors to use */
964 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
968 spin_lock_irqsave(&tq->tx_lock, flags);
970 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
971 tq->stats.tx_ring_full++;
972 netdev_dbg(adapter->netdev,
973 "tx queue stopped on %s, next2comp %u"
974 " next2fill %u\n", adapter->netdev->name,
975 tq->tx_ring.next2comp, tq->tx_ring.next2fill);
977 vmxnet3_tq_stop(tq, adapter);
978 spin_unlock_irqrestore(&tq->tx_lock, flags);
979 return NETDEV_TX_BUSY;
983 ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter);
985 BUG_ON(ret <= 0 && ctx.copy_size != 0);
986 /* hdrs parsed, check against other limits */
988 if (unlikely(ctx.eth_ip_hdr_size + ctx.l4_hdr_size >
989 VMXNET3_MAX_TX_BUF_SIZE)) {
993 if (skb->ip_summed == CHECKSUM_PARTIAL) {
994 if (unlikely(ctx.eth_ip_hdr_size +
996 VMXNET3_MAX_CSUM_OFFSET)) {
1002 tq->stats.drop_hdr_inspect_err++;
1003 goto unlock_drop_pkt;
1006 /* fill tx descs related to addr & len */
1007 vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter);
1009 /* setup the EOP desc */
1010 ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
1012 /* setup the SOP desc */
1013 #ifdef __BIG_ENDIAN_BITFIELD
1014 gdesc = &tempTxDesc;
1015 gdesc->dword[2] = ctx.sop_txd->dword[2];
1016 gdesc->dword[3] = ctx.sop_txd->dword[3];
1018 gdesc = ctx.sop_txd;
1021 gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size;
1022 gdesc->txd.om = VMXNET3_OM_TSO;
1023 gdesc->txd.msscof = ctx.mss;
1024 le32_add_cpu(&tq->shared->txNumDeferred, (skb->len -
1025 gdesc->txd.hlen + ctx.mss - 1) / ctx.mss);
1027 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1028 gdesc->txd.hlen = ctx.eth_ip_hdr_size;
1029 gdesc->txd.om = VMXNET3_OM_CSUM;
1030 gdesc->txd.msscof = ctx.eth_ip_hdr_size +
1034 gdesc->txd.msscof = 0;
1036 le32_add_cpu(&tq->shared->txNumDeferred, 1);
1039 if (vlan_tx_tag_present(skb)) {
1041 gdesc->txd.tci = vlan_tx_tag_get(skb);
1044 /* finally flips the GEN bit of the SOP desc. */
1045 gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
1047 #ifdef __BIG_ENDIAN_BITFIELD
1048 /* Finished updating in bitfields of Tx Desc, so write them in original
1051 vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc,
1052 (struct Vmxnet3_TxDesc *)ctx.sop_txd);
1053 gdesc = ctx.sop_txd;
1055 netdev_dbg(adapter->netdev,
1056 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
1058 tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
1059 le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
1061 spin_unlock_irqrestore(&tq->tx_lock, flags);
1063 if (le32_to_cpu(tq->shared->txNumDeferred) >=
1064 le32_to_cpu(tq->shared->txThreshold)) {
1065 tq->shared->txNumDeferred = 0;
1066 VMXNET3_WRITE_BAR0_REG(adapter,
1067 VMXNET3_REG_TXPROD + tq->qid * 8,
1068 tq->tx_ring.next2fill);
1071 return NETDEV_TX_OK;
1074 tq->stats.drop_oversized_hdr++;
1076 spin_unlock_irqrestore(&tq->tx_lock, flags);
1078 tq->stats.drop_total++;
1080 return NETDEV_TX_OK;
1085 vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1087 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1089 BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
1090 return vmxnet3_tq_xmit(skb,
1091 &adapter->tx_queue[skb->queue_mapping],
1097 vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
1098 struct sk_buff *skb,
1099 union Vmxnet3_GenericDesc *gdesc)
1101 if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
1102 /* typical case: TCP/UDP over IP and both csums are correct */
1103 if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) ==
1104 VMXNET3_RCD_CSUM_OK) {
1105 skb->ip_summed = CHECKSUM_UNNECESSARY;
1106 BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
1107 BUG_ON(!(gdesc->rcd.v4 || gdesc->rcd.v6));
1108 BUG_ON(gdesc->rcd.frg);
1110 if (gdesc->rcd.csum) {
1111 skb->csum = htons(gdesc->rcd.csum);
1112 skb->ip_summed = CHECKSUM_PARTIAL;
1114 skb_checksum_none_assert(skb);
1118 skb_checksum_none_assert(skb);
1124 vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
1125 struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter)
1127 rq->stats.drop_err++;
1129 rq->stats.drop_fcs++;
1131 rq->stats.drop_total++;
1134 * We do not unmap and chain the rx buffer to the skb.
1135 * We basically pretend this buffer is not used and will be recycled
1136 * by vmxnet3_rq_alloc_rx_buf()
1140 * ctx->skb may be NULL if this is the first and the only one
1144 dev_kfree_skb_irq(ctx->skb);
1151 vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1152 struct vmxnet3_adapter *adapter, int quota)
1154 static const u32 rxprod_reg[2] = {
1155 VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
1158 bool skip_page_frags = false;
1159 struct Vmxnet3_RxCompDesc *rcd;
1160 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
1161 #ifdef __BIG_ENDIAN_BITFIELD
1162 struct Vmxnet3_RxDesc rxCmdDesc;
1163 struct Vmxnet3_RxCompDesc rxComp;
1165 vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
1167 while (rcd->gen == rq->comp_ring.gen) {
1168 struct vmxnet3_rx_buf_info *rbi;
1169 struct sk_buff *skb, *new_skb = NULL;
1170 struct page *new_page = NULL;
1172 struct Vmxnet3_RxDesc *rxd;
1174 struct vmxnet3_cmd_ring *ring = NULL;
1175 if (num_rxd >= quota) {
1176 /* we may stop even before we see the EOP desc of
1182 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2);
1184 ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1;
1185 ring = rq->rx_ring + ring_idx;
1186 vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
1188 rbi = rq->buf_info[ring_idx] + idx;
1190 BUG_ON(rxd->addr != rbi->dma_addr ||
1191 rxd->len != rbi->len);
1193 if (unlikely(rcd->eop && rcd->err)) {
1194 vmxnet3_rx_error(rq, rcd, ctx, adapter);
1198 if (rcd->sop) { /* first buf of the pkt */
1199 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD ||
1200 rcd->rqID != rq->qid);
1202 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB);
1203 BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
1205 if (unlikely(rcd->len == 0)) {
1206 /* Pretend the rx buffer is skipped. */
1207 BUG_ON(!(rcd->sop && rcd->eop));
1208 netdev_dbg(adapter->netdev,
1209 "rxRing[%u][%u] 0 length\n",
1214 skip_page_frags = false;
1215 ctx->skb = rbi->skb;
1216 new_skb = netdev_alloc_skb_ip_align(adapter->netdev,
1218 if (new_skb == NULL) {
1219 /* Skb allocation failed, do not handover this
1220 * skb to stack. Reuse it. Drop the existing pkt
1222 rq->stats.rx_buf_alloc_failure++;
1224 rq->stats.drop_total++;
1225 skip_page_frags = true;
1229 pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len,
1230 PCI_DMA_FROMDEVICE);
1232 skb_put(ctx->skb, rcd->len);
1234 /* Immediate refill */
1236 rbi->dma_addr = pci_map_single(adapter->pdev,
1237 rbi->skb->data, rbi->len,
1238 PCI_DMA_FROMDEVICE);
1239 rxd->addr = cpu_to_le64(rbi->dma_addr);
1240 rxd->len = rbi->len;
1243 BUG_ON(ctx->skb == NULL && !skip_page_frags);
1245 /* non SOP buffer must be type 1 in most cases */
1246 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE);
1247 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
1249 /* If an sop buffer was dropped, skip all
1250 * following non-sop fragments. They will be reused.
1252 if (skip_page_frags)
1255 new_page = alloc_page(GFP_ATOMIC);
1256 if (unlikely(new_page == NULL)) {
1257 /* Replacement page frag could not be allocated.
1258 * Reuse this page. Drop the pkt and free the
1259 * skb which contained this page as a frag. Skip
1260 * processing all the following non-sop frags.
1262 rq->stats.rx_buf_alloc_failure++;
1263 dev_kfree_skb(ctx->skb);
1265 skip_page_frags = true;
1270 pci_unmap_page(adapter->pdev,
1271 rbi->dma_addr, rbi->len,
1272 PCI_DMA_FROMDEVICE);
1274 vmxnet3_append_frag(ctx->skb, rcd, rbi);
1277 /* Immediate refill */
1278 rbi->page = new_page;
1279 rbi->dma_addr = pci_map_page(adapter->pdev, rbi->page,
1281 PCI_DMA_FROMDEVICE);
1282 rxd->addr = cpu_to_le64(rbi->dma_addr);
1283 rxd->len = rbi->len;
1289 skb->len += skb->data_len;
1291 vmxnet3_rx_csum(adapter, skb,
1292 (union Vmxnet3_GenericDesc *)rcd);
1293 skb->protocol = eth_type_trans(skb, adapter->netdev);
1295 if (unlikely(rcd->ts))
1296 __vlan_hwaccel_put_tag(skb, rcd->tci);
1298 if (adapter->netdev->features & NETIF_F_LRO)
1299 netif_receive_skb(skb);
1301 napi_gro_receive(&rq->napi, skb);
1307 /* device may have skipped some rx descs */
1308 ring->next2comp = idx;
1309 num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
1310 ring = rq->rx_ring + ring_idx;
1311 while (num_to_alloc) {
1312 vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
1316 /* Recv desc is ready to be used by the device */
1317 rxd->gen = ring->gen;
1318 vmxnet3_cmd_ring_adv_next2fill(ring);
1322 /* if needed, update the register */
1323 if (unlikely(rq->shared->updateRxProd)) {
1324 VMXNET3_WRITE_BAR0_REG(adapter,
1325 rxprod_reg[ring_idx] + rq->qid * 8,
1329 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
1330 vmxnet3_getRxComp(rcd,
1331 &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
1339 vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
1340 struct vmxnet3_adapter *adapter)
1343 struct Vmxnet3_RxDesc *rxd;
1345 for (ring_idx = 0; ring_idx < 2; ring_idx++) {
1346 for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
1347 #ifdef __BIG_ENDIAN_BITFIELD
1348 struct Vmxnet3_RxDesc rxDesc;
1350 vmxnet3_getRxDesc(rxd,
1351 &rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
1353 if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
1354 rq->buf_info[ring_idx][i].skb) {
1355 pci_unmap_single(adapter->pdev, rxd->addr,
1356 rxd->len, PCI_DMA_FROMDEVICE);
1357 dev_kfree_skb(rq->buf_info[ring_idx][i].skb);
1358 rq->buf_info[ring_idx][i].skb = NULL;
1359 } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
1360 rq->buf_info[ring_idx][i].page) {
1361 pci_unmap_page(adapter->pdev, rxd->addr,
1362 rxd->len, PCI_DMA_FROMDEVICE);
1363 put_page(rq->buf_info[ring_idx][i].page);
1364 rq->buf_info[ring_idx][i].page = NULL;
1368 rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
1369 rq->rx_ring[ring_idx].next2fill =
1370 rq->rx_ring[ring_idx].next2comp = 0;
1373 rq->comp_ring.gen = VMXNET3_INIT_GEN;
1374 rq->comp_ring.next2proc = 0;
1379 vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter)
1383 for (i = 0; i < adapter->num_rx_queues; i++)
1384 vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter);
1388 void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1389 struct vmxnet3_adapter *adapter)
1394 /* all rx buffers must have already been freed */
1395 for (i = 0; i < 2; i++) {
1396 if (rq->buf_info[i]) {
1397 for (j = 0; j < rq->rx_ring[i].size; j++)
1398 BUG_ON(rq->buf_info[i][j].page != NULL);
1403 kfree(rq->buf_info[0]);
1405 for (i = 0; i < 2; i++) {
1406 if (rq->rx_ring[i].base) {
1407 pci_free_consistent(adapter->pdev, rq->rx_ring[i].size
1408 * sizeof(struct Vmxnet3_RxDesc),
1409 rq->rx_ring[i].base,
1410 rq->rx_ring[i].basePA);
1411 rq->rx_ring[i].base = NULL;
1413 rq->buf_info[i] = NULL;
1416 if (rq->comp_ring.base) {
1417 pci_free_consistent(adapter->pdev, rq->comp_ring.size *
1418 sizeof(struct Vmxnet3_RxCompDesc),
1419 rq->comp_ring.base, rq->comp_ring.basePA);
1420 rq->comp_ring.base = NULL;
1426 vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
1427 struct vmxnet3_adapter *adapter)
1431 /* initialize buf_info */
1432 for (i = 0; i < rq->rx_ring[0].size; i++) {
1434 /* 1st buf for a pkt is skbuff */
1435 if (i % adapter->rx_buf_per_pkt == 0) {
1436 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB;
1437 rq->buf_info[0][i].len = adapter->skb_buf_size;
1438 } else { /* subsequent bufs for a pkt is frag */
1439 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
1440 rq->buf_info[0][i].len = PAGE_SIZE;
1443 for (i = 0; i < rq->rx_ring[1].size; i++) {
1444 rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE;
1445 rq->buf_info[1][i].len = PAGE_SIZE;
1448 /* reset internal state and allocate buffers for both rings */
1449 for (i = 0; i < 2; i++) {
1450 rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
1452 memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
1453 sizeof(struct Vmxnet3_RxDesc));
1454 rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
1456 if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
1458 /* at least has 1 rx buffer for the 1st ring */
1461 vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
1463 /* reset the comp ring */
1464 rq->comp_ring.next2proc = 0;
1465 memset(rq->comp_ring.base, 0, rq->comp_ring.size *
1466 sizeof(struct Vmxnet3_RxCompDesc));
1467 rq->comp_ring.gen = VMXNET3_INIT_GEN;
1470 rq->rx_ctx.skb = NULL;
1472 /* stats are not reset */
1478 vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter)
1482 for (i = 0; i < adapter->num_rx_queues; i++) {
1483 err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter);
1484 if (unlikely(err)) {
1485 dev_err(&adapter->netdev->dev, "%s: failed to "
1486 "initialize rx queue%i\n",
1487 adapter->netdev->name, i);
1497 vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
1501 struct vmxnet3_rx_buf_info *bi;
1503 for (i = 0; i < 2; i++) {
1505 sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
1506 rq->rx_ring[i].base = pci_alloc_consistent(adapter->pdev, sz,
1507 &rq->rx_ring[i].basePA);
1508 if (!rq->rx_ring[i].base) {
1509 netdev_err(adapter->netdev,
1510 "failed to allocate rx ring %d\n", i);
1515 sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
1516 rq->comp_ring.base = pci_alloc_consistent(adapter->pdev, sz,
1517 &rq->comp_ring.basePA);
1518 if (!rq->comp_ring.base) {
1519 netdev_err(adapter->netdev, "failed to allocate rx comp ring\n");
1523 sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
1524 rq->rx_ring[1].size);
1525 bi = kzalloc(sz, GFP_KERNEL);
1529 rq->buf_info[0] = bi;
1530 rq->buf_info[1] = bi + rq->rx_ring[0].size;
1535 vmxnet3_rq_destroy(rq, adapter);
1541 vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
1545 for (i = 0; i < adapter->num_rx_queues; i++) {
1546 err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter);
1547 if (unlikely(err)) {
1548 dev_err(&adapter->netdev->dev,
1549 "%s: failed to create rx queue%i\n",
1550 adapter->netdev->name, i);
1556 vmxnet3_rq_destroy_all(adapter);
1561 /* Multiple queue aware polling function for tx and rx */
1564 vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
1566 int rcd_done = 0, i;
1567 if (unlikely(adapter->shared->ecr))
1568 vmxnet3_process_events(adapter);
1569 for (i = 0; i < adapter->num_tx_queues; i++)
1570 vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter);
1572 for (i = 0; i < adapter->num_rx_queues; i++)
1573 rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i],
1580 vmxnet3_poll(struct napi_struct *napi, int budget)
1582 struct vmxnet3_rx_queue *rx_queue = container_of(napi,
1583 struct vmxnet3_rx_queue, napi);
1586 rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget);
1588 if (rxd_done < budget) {
1589 napi_complete(napi);
1590 vmxnet3_enable_all_intrs(rx_queue->adapter);
1596 * NAPI polling function for MSI-X mode with multiple Rx queues
1597 * Returns the # of the NAPI credit consumed (# of rx descriptors processed)
1601 vmxnet3_poll_rx_only(struct napi_struct *napi, int budget)
1603 struct vmxnet3_rx_queue *rq = container_of(napi,
1604 struct vmxnet3_rx_queue, napi);
1605 struct vmxnet3_adapter *adapter = rq->adapter;
1608 /* When sharing interrupt with corresponding tx queue, process
1609 * tx completions in that queue as well
1611 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) {
1612 struct vmxnet3_tx_queue *tq =
1613 &adapter->tx_queue[rq - adapter->rx_queue];
1614 vmxnet3_tq_tx_complete(tq, adapter);
1617 rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
1619 if (rxd_done < budget) {
1620 napi_complete(napi);
1621 vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
1627 #ifdef CONFIG_PCI_MSI
1630 * Handle completion interrupts on tx queues
1631 * Returns whether or not the intr is handled
1635 vmxnet3_msix_tx(int irq, void *data)
1637 struct vmxnet3_tx_queue *tq = data;
1638 struct vmxnet3_adapter *adapter = tq->adapter;
1640 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1641 vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx);
1643 /* Handle the case where only one irq is allocate for all tx queues */
1644 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
1646 for (i = 0; i < adapter->num_tx_queues; i++) {
1647 struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i];
1648 vmxnet3_tq_tx_complete(txq, adapter);
1651 vmxnet3_tq_tx_complete(tq, adapter);
1653 vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx);
1660 * Handle completion interrupts on rx queues. Returns whether or not the
1665 vmxnet3_msix_rx(int irq, void *data)
1667 struct vmxnet3_rx_queue *rq = data;
1668 struct vmxnet3_adapter *adapter = rq->adapter;
1670 /* disable intr if needed */
1671 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1672 vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
1673 napi_schedule(&rq->napi);
1679 *----------------------------------------------------------------------------
1681 * vmxnet3_msix_event --
1683 * vmxnet3 msix event intr handler
1686 * whether or not the intr is handled
1688 *----------------------------------------------------------------------------
1692 vmxnet3_msix_event(int irq, void *data)
1694 struct net_device *dev = data;
1695 struct vmxnet3_adapter *adapter = netdev_priv(dev);
1697 /* disable intr if needed */
1698 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1699 vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx);
1701 if (adapter->shared->ecr)
1702 vmxnet3_process_events(adapter);
1704 vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx);
1709 #endif /* CONFIG_PCI_MSI */
1712 /* Interrupt handler for vmxnet3 */
1714 vmxnet3_intr(int irq, void *dev_id)
1716 struct net_device *dev = dev_id;
1717 struct vmxnet3_adapter *adapter = netdev_priv(dev);
1719 if (adapter->intr.type == VMXNET3_IT_INTX) {
1720 u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
1721 if (unlikely(icr == 0))
1727 /* disable intr if needed */
1728 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1729 vmxnet3_disable_all_intrs(adapter);
1731 napi_schedule(&adapter->rx_queue[0].napi);
1736 #ifdef CONFIG_NET_POLL_CONTROLLER
1738 /* netpoll callback. */
1740 vmxnet3_netpoll(struct net_device *netdev)
1742 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1744 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1745 vmxnet3_disable_all_intrs(adapter);
1747 vmxnet3_do_poll(adapter, adapter->rx_queue[0].rx_ring[0].size);
1748 vmxnet3_enable_all_intrs(adapter);
1751 #endif /* CONFIG_NET_POLL_CONTROLLER */
1754 vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
1756 struct vmxnet3_intr *intr = &adapter->intr;
1760 #ifdef CONFIG_PCI_MSI
1761 if (adapter->intr.type == VMXNET3_IT_MSIX) {
1762 for (i = 0; i < adapter->num_tx_queues; i++) {
1763 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
1764 sprintf(adapter->tx_queue[i].name, "%s-tx-%d",
1765 adapter->netdev->name, vector);
1767 intr->msix_entries[vector].vector,
1769 adapter->tx_queue[i].name,
1770 &adapter->tx_queue[i]);
1772 sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d",
1773 adapter->netdev->name, vector);
1776 dev_err(&adapter->netdev->dev,
1777 "Failed to request irq for MSIX, %s, "
1779 adapter->tx_queue[i].name, err);
1783 /* Handle the case where only 1 MSIx was allocated for
1785 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
1786 for (; i < adapter->num_tx_queues; i++)
1787 adapter->tx_queue[i].comp_ring.intr_idx
1792 adapter->tx_queue[i].comp_ring.intr_idx
1796 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE)
1799 for (i = 0; i < adapter->num_rx_queues; i++) {
1800 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE)
1801 sprintf(adapter->rx_queue[i].name, "%s-rx-%d",
1802 adapter->netdev->name, vector);
1804 sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d",
1805 adapter->netdev->name, vector);
1806 err = request_irq(intr->msix_entries[vector].vector,
1808 adapter->rx_queue[i].name,
1809 &(adapter->rx_queue[i]));
1811 netdev_err(adapter->netdev,
1812 "Failed to request irq for MSIX, "
1814 adapter->rx_queue[i].name, err);
1818 adapter->rx_queue[i].comp_ring.intr_idx = vector++;
1821 sprintf(intr->event_msi_vector_name, "%s-event-%d",
1822 adapter->netdev->name, vector);
1823 err = request_irq(intr->msix_entries[vector].vector,
1824 vmxnet3_msix_event, 0,
1825 intr->event_msi_vector_name, adapter->netdev);
1826 intr->event_intr_idx = vector;
1828 } else if (intr->type == VMXNET3_IT_MSI) {
1829 adapter->num_rx_queues = 1;
1830 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
1831 adapter->netdev->name, adapter->netdev);
1834 adapter->num_rx_queues = 1;
1835 err = request_irq(adapter->pdev->irq, vmxnet3_intr,
1836 IRQF_SHARED, adapter->netdev->name,
1838 #ifdef CONFIG_PCI_MSI
1841 intr->num_intrs = vector + 1;
1843 netdev_err(adapter->netdev,
1844 "Failed to request irq (intr type:%d), error %d\n",
1847 /* Number of rx queues will not change after this */
1848 for (i = 0; i < adapter->num_rx_queues; i++) {
1849 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
1851 rq->qid2 = i + adapter->num_rx_queues;
1856 /* init our intr settings */
1857 for (i = 0; i < intr->num_intrs; i++)
1858 intr->mod_levels[i] = UPT1_IML_ADAPTIVE;
1859 if (adapter->intr.type != VMXNET3_IT_MSIX) {
1860 adapter->intr.event_intr_idx = 0;
1861 for (i = 0; i < adapter->num_tx_queues; i++)
1862 adapter->tx_queue[i].comp_ring.intr_idx = 0;
1863 adapter->rx_queue[0].comp_ring.intr_idx = 0;
1866 netdev_info(adapter->netdev,
1867 "intr type %u, mode %u, %u vectors allocated\n",
1868 intr->type, intr->mask_mode, intr->num_intrs);
1876 vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
1878 struct vmxnet3_intr *intr = &adapter->intr;
1879 BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0);
1881 switch (intr->type) {
1882 #ifdef CONFIG_PCI_MSI
1883 case VMXNET3_IT_MSIX:
1887 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
1888 for (i = 0; i < adapter->num_tx_queues; i++) {
1889 free_irq(intr->msix_entries[vector++].vector,
1890 &(adapter->tx_queue[i]));
1891 if (adapter->share_intr == VMXNET3_INTR_TXSHARE)
1896 for (i = 0; i < adapter->num_rx_queues; i++) {
1897 free_irq(intr->msix_entries[vector++].vector,
1898 &(adapter->rx_queue[i]));
1901 free_irq(intr->msix_entries[vector].vector,
1903 BUG_ON(vector >= intr->num_intrs);
1907 case VMXNET3_IT_MSI:
1908 free_irq(adapter->pdev->irq, adapter->netdev);
1910 case VMXNET3_IT_INTX:
1911 free_irq(adapter->pdev->irq, adapter->netdev);
1920 vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
1922 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1925 /* allow untagged pkts */
1926 VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
1928 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
1929 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
1934 vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1936 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1938 if (!(netdev->flags & IFF_PROMISC)) {
1939 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1940 unsigned long flags;
1942 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
1943 spin_lock_irqsave(&adapter->cmd_lock, flags);
1944 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1945 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1946 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
1949 set_bit(vid, adapter->active_vlans);
1956 vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1958 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1960 if (!(netdev->flags & IFF_PROMISC)) {
1961 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1962 unsigned long flags;
1964 VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
1965 spin_lock_irqsave(&adapter->cmd_lock, flags);
1966 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1967 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1968 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
1971 clear_bit(vid, adapter->active_vlans);
1978 vmxnet3_copy_mc(struct net_device *netdev)
1981 u32 sz = netdev_mc_count(netdev) * ETH_ALEN;
1983 /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
1985 /* We may be called with BH disabled */
1986 buf = kmalloc(sz, GFP_ATOMIC);
1988 struct netdev_hw_addr *ha;
1991 netdev_for_each_mc_addr(ha, netdev)
1992 memcpy(buf + i++ * ETH_ALEN, ha->addr,
2001 vmxnet3_set_mc(struct net_device *netdev)
2003 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2004 unsigned long flags;
2005 struct Vmxnet3_RxFilterConf *rxConf =
2006 &adapter->shared->devRead.rxFilterConf;
2007 u8 *new_table = NULL;
2008 u32 new_mode = VMXNET3_RXM_UCAST;
2010 if (netdev->flags & IFF_PROMISC) {
2011 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2012 memset(vfTable, 0, VMXNET3_VFT_SIZE * sizeof(*vfTable));
2014 new_mode |= VMXNET3_RXM_PROMISC;
2016 vmxnet3_restore_vlan(adapter);
2019 if (netdev->flags & IFF_BROADCAST)
2020 new_mode |= VMXNET3_RXM_BCAST;
2022 if (netdev->flags & IFF_ALLMULTI)
2023 new_mode |= VMXNET3_RXM_ALL_MULTI;
2025 if (!netdev_mc_empty(netdev)) {
2026 new_table = vmxnet3_copy_mc(netdev);
2028 new_mode |= VMXNET3_RXM_MCAST;
2029 rxConf->mfTableLen = cpu_to_le16(
2030 netdev_mc_count(netdev) * ETH_ALEN);
2031 rxConf->mfTablePA = cpu_to_le64(virt_to_phys(
2034 netdev_info(netdev, "failed to copy mcast list"
2035 ", setting ALL_MULTI\n");
2036 new_mode |= VMXNET3_RXM_ALL_MULTI;
2041 if (!(new_mode & VMXNET3_RXM_MCAST)) {
2042 rxConf->mfTableLen = 0;
2043 rxConf->mfTablePA = 0;
2046 spin_lock_irqsave(&adapter->cmd_lock, flags);
2047 if (new_mode != rxConf->rxMode) {
2048 rxConf->rxMode = cpu_to_le32(new_mode);
2049 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2050 VMXNET3_CMD_UPDATE_RX_MODE);
2051 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2052 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2055 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2056 VMXNET3_CMD_UPDATE_MAC_FILTERS);
2057 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2063 vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter)
2067 for (i = 0; i < adapter->num_rx_queues; i++)
2068 vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter);
2073 * Set up driver_shared based on settings in adapter.
2077 vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2079 struct Vmxnet3_DriverShared *shared = adapter->shared;
2080 struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
2081 struct Vmxnet3_TxQueueConf *tqc;
2082 struct Vmxnet3_RxQueueConf *rqc;
2085 memset(shared, 0, sizeof(*shared));
2087 /* driver settings */
2088 shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC);
2089 devRead->misc.driverInfo.version = cpu_to_le32(
2090 VMXNET3_DRIVER_VERSION_NUM);
2091 devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
2092 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
2093 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
2094 *((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32(
2095 *((u32 *)&devRead->misc.driverInfo.gos));
2096 devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
2097 devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
2099 devRead->misc.ddPA = cpu_to_le64(virt_to_phys(adapter));
2100 devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
2102 /* set up feature flags */
2103 if (adapter->netdev->features & NETIF_F_RXCSUM)
2104 devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
2106 if (adapter->netdev->features & NETIF_F_LRO) {
2107 devRead->misc.uptFeatures |= UPT1_F_LRO;
2108 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
2110 if (adapter->netdev->features & NETIF_F_HW_VLAN_RX)
2111 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
2113 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
2114 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
2115 devRead->misc.queueDescLen = cpu_to_le32(
2116 adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
2117 adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc));
2119 /* tx queue settings */
2120 devRead->misc.numTxQueues = adapter->num_tx_queues;
2121 for (i = 0; i < adapter->num_tx_queues; i++) {
2122 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
2123 BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL);
2124 tqc = &adapter->tqd_start[i].conf;
2125 tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA);
2126 tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
2127 tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
2128 tqc->ddPA = cpu_to_le64(virt_to_phys(tq->buf_info));
2129 tqc->txRingSize = cpu_to_le32(tq->tx_ring.size);
2130 tqc->dataRingSize = cpu_to_le32(tq->data_ring.size);
2131 tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
2132 tqc->ddLen = cpu_to_le32(
2133 sizeof(struct vmxnet3_tx_buf_info) *
2135 tqc->intrIdx = tq->comp_ring.intr_idx;
2138 /* rx queue settings */
2139 devRead->misc.numRxQueues = adapter->num_rx_queues;
2140 for (i = 0; i < adapter->num_rx_queues; i++) {
2141 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2142 rqc = &adapter->rqd_start[i].conf;
2143 rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
2144 rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
2145 rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA);
2146 rqc->ddPA = cpu_to_le64(virt_to_phys(
2148 rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size);
2149 rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size);
2150 rqc->compRingSize = cpu_to_le32(rq->comp_ring.size);
2151 rqc->ddLen = cpu_to_le32(
2152 sizeof(struct vmxnet3_rx_buf_info) *
2153 (rqc->rxRingSize[0] +
2154 rqc->rxRingSize[1]));
2155 rqc->intrIdx = rq->comp_ring.intr_idx;
2159 memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf));
2162 struct UPT1_RSSConf *rssConf = adapter->rss_conf;
2163 devRead->misc.uptFeatures |= UPT1_F_RSS;
2164 devRead->misc.numRxQueues = adapter->num_rx_queues;
2165 rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 |
2166 UPT1_RSS_HASH_TYPE_IPV4 |
2167 UPT1_RSS_HASH_TYPE_TCP_IPV6 |
2168 UPT1_RSS_HASH_TYPE_IPV6;
2169 rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ;
2170 rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE;
2171 rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
2172 get_random_bytes(&rssConf->hashKey[0], rssConf->hashKeySize);
2173 for (i = 0; i < rssConf->indTableSize; i++)
2174 rssConf->indTable[i] = ethtool_rxfh_indir_default(
2175 i, adapter->num_rx_queues);
2177 devRead->rssConfDesc.confVer = 1;
2178 devRead->rssConfDesc.confLen = sizeof(*rssConf);
2179 devRead->rssConfDesc.confPA = virt_to_phys(rssConf);
2182 #endif /* VMXNET3_RSS */
2185 devRead->intrConf.autoMask = adapter->intr.mask_mode ==
2187 devRead->intrConf.numIntrs = adapter->intr.num_intrs;
2188 for (i = 0; i < adapter->intr.num_intrs; i++)
2189 devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
2191 devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
2192 devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
2194 /* rx filter settings */
2195 devRead->rxFilterConf.rxMode = 0;
2196 vmxnet3_restore_vlan(adapter);
2197 vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr);
2199 /* the rest are already zeroed */
2204 vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
2208 unsigned long flags;
2210 netdev_dbg(adapter->netdev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
2211 " ring sizes %u %u %u\n", adapter->netdev->name,
2212 adapter->skb_buf_size, adapter->rx_buf_per_pkt,
2213 adapter->tx_queue[0].tx_ring.size,
2214 adapter->rx_queue[0].rx_ring[0].size,
2215 adapter->rx_queue[0].rx_ring[1].size);
2217 vmxnet3_tq_init_all(adapter);
2218 err = vmxnet3_rq_init_all(adapter);
2220 netdev_err(adapter->netdev,
2221 "Failed to init rx queue error %d\n", err);
2225 err = vmxnet3_request_irqs(adapter);
2227 netdev_err(adapter->netdev,
2228 "Failed to setup irq for error %d\n", err);
2232 vmxnet3_setup_driver_shared(adapter);
2234 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO(
2235 adapter->shared_pa));
2236 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
2237 adapter->shared_pa));
2238 spin_lock_irqsave(&adapter->cmd_lock, flags);
2239 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2240 VMXNET3_CMD_ACTIVATE_DEV);
2241 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2242 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2245 netdev_err(adapter->netdev,
2246 "Failed to activate dev: error %u\n", ret);
2251 for (i = 0; i < adapter->num_rx_queues; i++) {
2252 VMXNET3_WRITE_BAR0_REG(adapter,
2253 VMXNET3_REG_RXPROD + i * VMXNET3_REG_ALIGN,
2254 adapter->rx_queue[i].rx_ring[0].next2fill);
2255 VMXNET3_WRITE_BAR0_REG(adapter, (VMXNET3_REG_RXPROD2 +
2256 (i * VMXNET3_REG_ALIGN)),
2257 adapter->rx_queue[i].rx_ring[1].next2fill);
2260 /* Apply the rx filter settins last. */
2261 vmxnet3_set_mc(adapter->netdev);
2264 * Check link state when first activating device. It will start the
2265 * tx queue if the link is up.
2267 vmxnet3_check_link(adapter, true);
2268 for (i = 0; i < adapter->num_rx_queues; i++)
2269 napi_enable(&adapter->rx_queue[i].napi);
2270 vmxnet3_enable_all_intrs(adapter);
2271 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
2275 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0);
2276 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0);
2277 vmxnet3_free_irqs(adapter);
2280 /* free up buffers we allocated */
2281 vmxnet3_rq_cleanup_all(adapter);
2287 vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
2289 unsigned long flags;
2290 spin_lock_irqsave(&adapter->cmd_lock, flags);
2291 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
2292 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2297 vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
2300 unsigned long flags;
2301 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
2305 spin_lock_irqsave(&adapter->cmd_lock, flags);
2306 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2307 VMXNET3_CMD_QUIESCE_DEV);
2308 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2309 vmxnet3_disable_all_intrs(adapter);
2311 for (i = 0; i < adapter->num_rx_queues; i++)
2312 napi_disable(&adapter->rx_queue[i].napi);
2313 netif_tx_disable(adapter->netdev);
2314 adapter->link_speed = 0;
2315 netif_carrier_off(adapter->netdev);
2317 vmxnet3_tq_cleanup_all(adapter);
2318 vmxnet3_rq_cleanup_all(adapter);
2319 vmxnet3_free_irqs(adapter);
2325 vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
2330 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp);
2332 tmp = (mac[5] << 8) | mac[4];
2333 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp);
2338 vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
2340 struct sockaddr *addr = p;
2341 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2343 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2344 vmxnet3_write_mac_addr(adapter, addr->sa_data);
2350 /* ==================== initialization and cleanup routines ============ */
2353 vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
2356 unsigned long mmio_start, mmio_len;
2357 struct pci_dev *pdev = adapter->pdev;
2359 err = pci_enable_device(pdev);
2361 dev_err(&pdev->dev, "Failed to enable adapter: error %d\n", err);
2365 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
2366 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
2368 "pci_set_consistent_dma_mask failed\n");
2374 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
2376 "pci_set_dma_mask failed\n");
2383 err = pci_request_selected_regions(pdev, (1 << 2) - 1,
2384 vmxnet3_driver_name);
2387 "Failed to request region for adapter: error %d\n", err);
2391 pci_set_master(pdev);
2393 mmio_start = pci_resource_start(pdev, 0);
2394 mmio_len = pci_resource_len(pdev, 0);
2395 adapter->hw_addr0 = ioremap(mmio_start, mmio_len);
2396 if (!adapter->hw_addr0) {
2397 dev_err(&pdev->dev, "Failed to map bar0\n");
2402 mmio_start = pci_resource_start(pdev, 1);
2403 mmio_len = pci_resource_len(pdev, 1);
2404 adapter->hw_addr1 = ioremap(mmio_start, mmio_len);
2405 if (!adapter->hw_addr1) {
2406 dev_err(&pdev->dev, "Failed to map bar1\n");
2413 iounmap(adapter->hw_addr0);
2415 pci_release_selected_regions(pdev, (1 << 2) - 1);
2417 pci_disable_device(pdev);
2423 vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
2425 BUG_ON(!adapter->pdev);
2427 iounmap(adapter->hw_addr0);
2428 iounmap(adapter->hw_addr1);
2429 pci_release_selected_regions(adapter->pdev, (1 << 2) - 1);
2430 pci_disable_device(adapter->pdev);
2435 vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
2437 size_t sz, i, ring0_size, ring1_size, comp_size;
2438 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[0];
2441 if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
2442 VMXNET3_MAX_ETH_HDR_SIZE) {
2443 adapter->skb_buf_size = adapter->netdev->mtu +
2444 VMXNET3_MAX_ETH_HDR_SIZE;
2445 if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
2446 adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
2448 adapter->rx_buf_per_pkt = 1;
2450 adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
2451 sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
2452 VMXNET3_MAX_ETH_HDR_SIZE;
2453 adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
2457 * for simplicity, force the ring0 size to be a multiple of
2458 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
2460 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
2461 ring0_size = adapter->rx_queue[0].rx_ring[0].size;
2462 ring0_size = (ring0_size + sz - 1) / sz * sz;
2463 ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
2465 ring1_size = adapter->rx_queue[0].rx_ring[1].size;
2466 comp_size = ring0_size + ring1_size;
2468 for (i = 0; i < adapter->num_rx_queues; i++) {
2469 rq = &adapter->rx_queue[i];
2470 rq->rx_ring[0].size = ring0_size;
2471 rq->rx_ring[1].size = ring1_size;
2472 rq->comp_ring.size = comp_size;
2478 vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
2479 u32 rx_ring_size, u32 rx_ring2_size)
2483 for (i = 0; i < adapter->num_tx_queues; i++) {
2484 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
2485 tq->tx_ring.size = tx_ring_size;
2486 tq->data_ring.size = tx_ring_size;
2487 tq->comp_ring.size = tx_ring_size;
2488 tq->shared = &adapter->tqd_start[i].ctrl;
2490 tq->adapter = adapter;
2492 err = vmxnet3_tq_create(tq, adapter);
2494 * Too late to change num_tx_queues. We cannot do away with
2495 * lesser number of queues than what we asked for
2501 adapter->rx_queue[0].rx_ring[0].size = rx_ring_size;
2502 adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size;
2503 vmxnet3_adjust_rx_ring_size(adapter);
2504 for (i = 0; i < adapter->num_rx_queues; i++) {
2505 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2506 /* qid and qid2 for rx queues will be assigned later when num
2507 * of rx queues is finalized after allocating intrs */
2508 rq->shared = &adapter->rqd_start[i].ctrl;
2509 rq->adapter = adapter;
2510 err = vmxnet3_rq_create(rq, adapter);
2513 netdev_err(adapter->netdev,
2514 "Could not allocate any rx queues. "
2518 netdev_info(adapter->netdev,
2519 "Number of rx queues changed "
2521 adapter->num_rx_queues = i;
2529 vmxnet3_tq_destroy_all(adapter);
2534 vmxnet3_open(struct net_device *netdev)
2536 struct vmxnet3_adapter *adapter;
2539 adapter = netdev_priv(netdev);
2541 for (i = 0; i < adapter->num_tx_queues; i++)
2542 spin_lock_init(&adapter->tx_queue[i].tx_lock);
2544 err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE,
2545 VMXNET3_DEF_RX_RING_SIZE,
2546 VMXNET3_DEF_RX_RING_SIZE);
2550 err = vmxnet3_activate_dev(adapter);
2557 vmxnet3_rq_destroy_all(adapter);
2558 vmxnet3_tq_destroy_all(adapter);
2565 vmxnet3_close(struct net_device *netdev)
2567 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2570 * Reset_work may be in the middle of resetting the device, wait for its
2573 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2576 vmxnet3_quiesce_dev(adapter);
2578 vmxnet3_rq_destroy_all(adapter);
2579 vmxnet3_tq_destroy_all(adapter);
2581 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2589 vmxnet3_force_close(struct vmxnet3_adapter *adapter)
2594 * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
2595 * vmxnet3_close() will deadlock.
2597 BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
2599 /* we need to enable NAPI, otherwise dev_close will deadlock */
2600 for (i = 0; i < adapter->num_rx_queues; i++)
2601 napi_enable(&adapter->rx_queue[i].napi);
2602 dev_close(adapter->netdev);
2607 vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
2609 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2612 if (new_mtu < VMXNET3_MIN_MTU || new_mtu > VMXNET3_MAX_MTU)
2615 netdev->mtu = new_mtu;
2618 * Reset_work may be in the middle of resetting the device, wait for its
2621 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2624 if (netif_running(netdev)) {
2625 vmxnet3_quiesce_dev(adapter);
2626 vmxnet3_reset_dev(adapter);
2628 /* we need to re-create the rx queue based on the new mtu */
2629 vmxnet3_rq_destroy_all(adapter);
2630 vmxnet3_adjust_rx_ring_size(adapter);
2631 err = vmxnet3_rq_create_all(adapter);
2634 "failed to re-create rx queues, "
2635 " error %d. Closing it.\n", err);
2639 err = vmxnet3_activate_dev(adapter);
2642 "failed to re-activate, error %d. "
2643 "Closing it\n", err);
2649 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2651 vmxnet3_force_close(adapter);
2658 vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64)
2660 struct net_device *netdev = adapter->netdev;
2662 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
2663 NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_TX |
2664 NETIF_F_HW_VLAN_RX | NETIF_F_TSO | NETIF_F_TSO6 |
2667 netdev->hw_features |= NETIF_F_HIGHDMA;
2668 netdev->vlan_features = netdev->hw_features &
2669 ~(NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
2670 netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_FILTER;
2675 vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
2679 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
2682 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
2683 mac[4] = tmp & 0xff;
2684 mac[5] = (tmp >> 8) & 0xff;
2687 #ifdef CONFIG_PCI_MSI
2690 * Enable MSIx vectors.
2692 * 0 on successful enabling of required vectors,
2693 * VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required
2695 * number of vectors which can be enabled otherwise (this number is smaller
2696 * than VMXNET3_LINUX_MIN_MSIX_VECT)
2700 vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter,
2703 int err = 0, vector_threshold;
2704 vector_threshold = VMXNET3_LINUX_MIN_MSIX_VECT;
2706 while (vectors >= vector_threshold) {
2707 err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries,
2710 adapter->intr.num_intrs = vectors;
2712 } else if (err < 0) {
2713 dev_err(&adapter->netdev->dev,
2714 "Failed to enable MSI-X, error: %d\n", err);
2716 } else if (err < vector_threshold) {
2719 /* If fails to enable required number of MSI-x vectors
2720 * try enabling minimum number of vectors required.
2722 dev_err(&adapter->netdev->dev,
2723 "Failed to enable %d MSI-X, trying %d instead\n",
2724 vectors, vector_threshold);
2725 vectors = vector_threshold;
2729 dev_info(&adapter->pdev->dev,
2730 "Number of MSI-X interrupts which can be allocated "
2731 "is lower than min threshold required.\n");
2736 #endif /* CONFIG_PCI_MSI */
2739 vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
2742 unsigned long flags;
2745 spin_lock_irqsave(&adapter->cmd_lock, flags);
2746 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2747 VMXNET3_CMD_GET_CONF_INTR);
2748 cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2749 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2750 adapter->intr.type = cfg & 0x3;
2751 adapter->intr.mask_mode = (cfg >> 2) & 0x3;
2753 if (adapter->intr.type == VMXNET3_IT_AUTO) {
2754 adapter->intr.type = VMXNET3_IT_MSIX;
2757 #ifdef CONFIG_PCI_MSI
2758 if (adapter->intr.type == VMXNET3_IT_MSIX) {
2759 int vector, err = 0;
2761 adapter->intr.num_intrs = (adapter->share_intr ==
2762 VMXNET3_INTR_TXSHARE) ? 1 :
2763 adapter->num_tx_queues;
2764 adapter->intr.num_intrs += (adapter->share_intr ==
2765 VMXNET3_INTR_BUDDYSHARE) ? 0 :
2766 adapter->num_rx_queues;
2767 adapter->intr.num_intrs += 1; /* for link event */
2769 adapter->intr.num_intrs = (adapter->intr.num_intrs >
2770 VMXNET3_LINUX_MIN_MSIX_VECT
2771 ? adapter->intr.num_intrs :
2772 VMXNET3_LINUX_MIN_MSIX_VECT);
2774 for (vector = 0; vector < adapter->intr.num_intrs; vector++)
2775 adapter->intr.msix_entries[vector].entry = vector;
2777 err = vmxnet3_acquire_msix_vectors(adapter,
2778 adapter->intr.num_intrs);
2779 /* If we cannot allocate one MSIx vector per queue
2780 * then limit the number of rx queues to 1
2782 if (err == VMXNET3_LINUX_MIN_MSIX_VECT) {
2783 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
2784 || adapter->num_rx_queues != 1) {
2785 adapter->share_intr = VMXNET3_INTR_TXSHARE;
2786 netdev_err(adapter->netdev,
2787 "Number of rx queues : 1\n");
2788 adapter->num_rx_queues = 1;
2789 adapter->intr.num_intrs =
2790 VMXNET3_LINUX_MIN_MSIX_VECT;
2797 /* If we cannot allocate MSIx vectors use only one rx queue */
2798 dev_info(&adapter->pdev->dev,
2799 "Failed to enable MSI-X, error %d. "
2800 "Limiting #rx queues to 1, try MSI.\n", err);
2802 adapter->intr.type = VMXNET3_IT_MSI;
2805 if (adapter->intr.type == VMXNET3_IT_MSI) {
2807 err = pci_enable_msi(adapter->pdev);
2809 adapter->num_rx_queues = 1;
2810 adapter->intr.num_intrs = 1;
2814 #endif /* CONFIG_PCI_MSI */
2816 adapter->num_rx_queues = 1;
2817 dev_info(&adapter->netdev->dev,
2818 "Using INTx interrupt, #Rx queues: 1.\n");
2819 adapter->intr.type = VMXNET3_IT_INTX;
2821 /* INT-X related setting */
2822 adapter->intr.num_intrs = 1;
2827 vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter)
2829 if (adapter->intr.type == VMXNET3_IT_MSIX)
2830 pci_disable_msix(adapter->pdev);
2831 else if (adapter->intr.type == VMXNET3_IT_MSI)
2832 pci_disable_msi(adapter->pdev);
2834 BUG_ON(adapter->intr.type != VMXNET3_IT_INTX);
2839 vmxnet3_tx_timeout(struct net_device *netdev)
2841 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2842 adapter->tx_timeout_count++;
2844 netdev_err(adapter->netdev, "tx hang\n");
2845 schedule_work(&adapter->work);
2846 netif_wake_queue(adapter->netdev);
2851 vmxnet3_reset_work(struct work_struct *data)
2853 struct vmxnet3_adapter *adapter;
2855 adapter = container_of(data, struct vmxnet3_adapter, work);
2857 /* if another thread is resetting the device, no need to proceed */
2858 if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2861 /* if the device is closed, we must leave it alone */
2863 if (netif_running(adapter->netdev)) {
2864 netdev_notice(adapter->netdev, "resetting\n");
2865 vmxnet3_quiesce_dev(adapter);
2866 vmxnet3_reset_dev(adapter);
2867 vmxnet3_activate_dev(adapter);
2869 netdev_info(adapter->netdev, "already closed\n");
2873 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2878 vmxnet3_probe_device(struct pci_dev *pdev,
2879 const struct pci_device_id *id)
2881 static const struct net_device_ops vmxnet3_netdev_ops = {
2882 .ndo_open = vmxnet3_open,
2883 .ndo_stop = vmxnet3_close,
2884 .ndo_start_xmit = vmxnet3_xmit_frame,
2885 .ndo_set_mac_address = vmxnet3_set_mac_addr,
2886 .ndo_change_mtu = vmxnet3_change_mtu,
2887 .ndo_set_features = vmxnet3_set_features,
2888 .ndo_get_stats64 = vmxnet3_get_stats64,
2889 .ndo_tx_timeout = vmxnet3_tx_timeout,
2890 .ndo_set_rx_mode = vmxnet3_set_mc,
2891 .ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid,
2892 .ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid,
2893 #ifdef CONFIG_NET_POLL_CONTROLLER
2894 .ndo_poll_controller = vmxnet3_netpoll,
2898 bool dma64 = false; /* stupid gcc */
2900 struct net_device *netdev;
2901 struct vmxnet3_adapter *adapter;
2907 if (!pci_msi_enabled())
2912 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
2913 (int)num_online_cpus());
2917 num_rx_queues = rounddown_pow_of_two(num_rx_queues);
2920 num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
2921 (int)num_online_cpus());
2925 num_tx_queues = rounddown_pow_of_two(num_tx_queues);
2926 netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
2927 max(num_tx_queues, num_rx_queues));
2928 dev_info(&pdev->dev,
2929 "# of Tx queues : %d, # of Rx queues : %d\n",
2930 num_tx_queues, num_rx_queues);
2935 pci_set_drvdata(pdev, netdev);
2936 adapter = netdev_priv(netdev);
2937 adapter->netdev = netdev;
2938 adapter->pdev = pdev;
2940 spin_lock_init(&adapter->cmd_lock);
2941 adapter->shared = pci_alloc_consistent(adapter->pdev,
2942 sizeof(struct Vmxnet3_DriverShared),
2943 &adapter->shared_pa);
2944 if (!adapter->shared) {
2945 dev_err(&pdev->dev, "Failed to allocate memory\n");
2947 goto err_alloc_shared;
2950 adapter->num_rx_queues = num_rx_queues;
2951 adapter->num_tx_queues = num_tx_queues;
2953 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
2954 size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
2955 adapter->tqd_start = pci_alloc_consistent(adapter->pdev, size,
2956 &adapter->queue_desc_pa);
2958 if (!adapter->tqd_start) {
2959 dev_err(&pdev->dev, "Failed to allocate memory\n");
2961 goto err_alloc_queue_desc;
2963 adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
2964 adapter->num_tx_queues);
2966 adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL);
2967 if (adapter->pm_conf == NULL) {
2974 adapter->rss_conf = kmalloc(sizeof(struct UPT1_RSSConf), GFP_KERNEL);
2975 if (adapter->rss_conf == NULL) {
2979 #endif /* VMXNET3_RSS */
2981 err = vmxnet3_alloc_pci_resources(adapter, &dma64);
2985 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
2987 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1);
2990 "Incompatible h/w version (0x%x) for adapter\n", ver);
2995 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
2997 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1);
3000 "Incompatible upt version (0x%x) for adapter\n", ver);
3005 SET_NETDEV_DEV(netdev, &pdev->dev);
3006 vmxnet3_declare_features(adapter, dma64);
3008 adapter->dev_number = atomic_read(&devices_found);
3010 adapter->share_intr = irq_share_mode;
3011 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE &&
3012 adapter->num_tx_queues != adapter->num_rx_queues)
3013 adapter->share_intr = VMXNET3_INTR_DONTSHARE;
3015 vmxnet3_alloc_intr_resources(adapter);
3018 if (adapter->num_rx_queues > 1 &&
3019 adapter->intr.type == VMXNET3_IT_MSIX) {
3020 adapter->rss = true;
3021 dev_dbg(&pdev->dev, "RSS is enabled.\n");
3023 adapter->rss = false;
3027 vmxnet3_read_mac_addr(adapter, mac);
3028 memcpy(netdev->dev_addr, mac, netdev->addr_len);
3030 netdev->netdev_ops = &vmxnet3_netdev_ops;
3031 vmxnet3_set_ethtool_ops(netdev);
3032 netdev->watchdog_timeo = 5 * HZ;
3034 INIT_WORK(&adapter->work, vmxnet3_reset_work);
3035 set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
3037 if (adapter->intr.type == VMXNET3_IT_MSIX) {
3039 for (i = 0; i < adapter->num_rx_queues; i++) {
3040 netif_napi_add(adapter->netdev,
3041 &adapter->rx_queue[i].napi,
3042 vmxnet3_poll_rx_only, 64);
3045 netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi,
3049 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
3050 netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
3052 err = register_netdev(netdev);
3055 dev_err(&pdev->dev, "Failed to register adapter\n");
3059 vmxnet3_check_link(adapter, false);
3060 atomic_inc(&devices_found);
3064 vmxnet3_free_intr_resources(adapter);
3066 vmxnet3_free_pci_resources(adapter);
3069 kfree(adapter->rss_conf);
3072 kfree(adapter->pm_conf);
3074 pci_free_consistent(adapter->pdev, size, adapter->tqd_start,
3075 adapter->queue_desc_pa);
3076 err_alloc_queue_desc:
3077 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
3078 adapter->shared, adapter->shared_pa);
3080 pci_set_drvdata(pdev, NULL);
3081 free_netdev(netdev);
3087 vmxnet3_remove_device(struct pci_dev *pdev)
3089 struct net_device *netdev = pci_get_drvdata(pdev);
3090 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3096 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
3097 (int)num_online_cpus());
3101 num_rx_queues = rounddown_pow_of_two(num_rx_queues);
3103 cancel_work_sync(&adapter->work);
3105 unregister_netdev(netdev);
3107 vmxnet3_free_intr_resources(adapter);
3108 vmxnet3_free_pci_resources(adapter);
3110 kfree(adapter->rss_conf);
3112 kfree(adapter->pm_conf);
3114 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
3115 size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues;
3116 pci_free_consistent(adapter->pdev, size, adapter->tqd_start,
3117 adapter->queue_desc_pa);
3118 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
3119 adapter->shared, adapter->shared_pa);
3120 free_netdev(netdev);
3127 vmxnet3_suspend(struct device *device)
3129 struct pci_dev *pdev = to_pci_dev(device);
3130 struct net_device *netdev = pci_get_drvdata(pdev);
3131 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3132 struct Vmxnet3_PMConf *pmConf;
3133 struct ethhdr *ehdr;
3134 struct arphdr *ahdr;
3136 struct in_device *in_dev;
3137 struct in_ifaddr *ifa;
3138 unsigned long flags;
3141 if (!netif_running(netdev))
3144 for (i = 0; i < adapter->num_rx_queues; i++)
3145 napi_disable(&adapter->rx_queue[i].napi);
3147 vmxnet3_disable_all_intrs(adapter);
3148 vmxnet3_free_irqs(adapter);
3149 vmxnet3_free_intr_resources(adapter);
3151 netif_device_detach(netdev);
3152 netif_tx_stop_all_queues(netdev);
3154 /* Create wake-up filters. */
3155 pmConf = adapter->pm_conf;
3156 memset(pmConf, 0, sizeof(*pmConf));
3158 if (adapter->wol & WAKE_UCAST) {
3159 pmConf->filters[i].patternSize = ETH_ALEN;
3160 pmConf->filters[i].maskSize = 1;
3161 memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
3162 pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
3164 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
3168 if (adapter->wol & WAKE_ARP) {
3169 in_dev = in_dev_get(netdev);
3173 ifa = (struct in_ifaddr *)in_dev->ifa_list;
3177 pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/
3178 sizeof(struct arphdr) + /* ARP header */
3179 2 * ETH_ALEN + /* 2 Ethernet addresses*/
3180 2 * sizeof(u32); /*2 IPv4 addresses */
3181 pmConf->filters[i].maskSize =
3182 (pmConf->filters[i].patternSize - 1) / 8 + 1;
3184 /* ETH_P_ARP in Ethernet header. */
3185 ehdr = (struct ethhdr *)pmConf->filters[i].pattern;
3186 ehdr->h_proto = htons(ETH_P_ARP);
3188 /* ARPOP_REQUEST in ARP header. */
3189 ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN];
3190 ahdr->ar_op = htons(ARPOP_REQUEST);
3191 arpreq = (u8 *)(ahdr + 1);
3193 /* The Unicast IPv4 address in 'tip' field. */
3194 arpreq += 2 * ETH_ALEN + sizeof(u32);
3195 *(u32 *)arpreq = ifa->ifa_address;
3197 /* The mask for the relevant bits. */
3198 pmConf->filters[i].mask[0] = 0x00;
3199 pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */
3200 pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */
3201 pmConf->filters[i].mask[3] = 0x00;
3202 pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */
3203 pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
3206 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
3211 if (adapter->wol & WAKE_MAGIC)
3212 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
3214 pmConf->numFilters = i;
3216 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
3217 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
3219 adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
3222 spin_lock_irqsave(&adapter->cmd_lock, flags);
3223 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3224 VMXNET3_CMD_UPDATE_PMCFG);
3225 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3227 pci_save_state(pdev);
3228 pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
3230 pci_disable_device(pdev);
3231 pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND));
3238 vmxnet3_resume(struct device *device)
3241 unsigned long flags;
3242 struct pci_dev *pdev = to_pci_dev(device);
3243 struct net_device *netdev = pci_get_drvdata(pdev);
3244 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3245 struct Vmxnet3_PMConf *pmConf;
3247 if (!netif_running(netdev))
3250 /* Destroy wake-up filters. */
3251 pmConf = adapter->pm_conf;
3252 memset(pmConf, 0, sizeof(*pmConf));
3254 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
3255 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
3257 adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
3260 netif_device_attach(netdev);
3261 pci_set_power_state(pdev, PCI_D0);
3262 pci_restore_state(pdev);
3263 err = pci_enable_device_mem(pdev);
3267 pci_enable_wake(pdev, PCI_D0, 0);
3269 spin_lock_irqsave(&adapter->cmd_lock, flags);
3270 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3271 VMXNET3_CMD_UPDATE_PMCFG);
3272 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3273 vmxnet3_alloc_intr_resources(adapter);
3274 vmxnet3_request_irqs(adapter);
3275 for (i = 0; i < adapter->num_rx_queues; i++)
3276 napi_enable(&adapter->rx_queue[i].napi);
3277 vmxnet3_enable_all_intrs(adapter);
3282 static const struct dev_pm_ops vmxnet3_pm_ops = {
3283 .suspend = vmxnet3_suspend,
3284 .resume = vmxnet3_resume,
3288 static struct pci_driver vmxnet3_driver = {
3289 .name = vmxnet3_driver_name,
3290 .id_table = vmxnet3_pciid_table,
3291 .probe = vmxnet3_probe_device,
3292 .remove = vmxnet3_remove_device,
3294 .driver.pm = &vmxnet3_pm_ops,
3300 vmxnet3_init_module(void)
3302 pr_info("%s - version %s\n", VMXNET3_DRIVER_DESC,
3303 VMXNET3_DRIVER_VERSION_REPORT);
3304 return pci_register_driver(&vmxnet3_driver);
3307 module_init(vmxnet3_init_module);
3311 vmxnet3_exit_module(void)
3313 pci_unregister_driver(&vmxnet3_driver);
3316 module_exit(vmxnet3_exit_module);
3318 MODULE_AUTHOR("VMware, Inc.");
3319 MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC);
3320 MODULE_LICENSE("GPL v2");
3321 MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING);