Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[linux-drm-fsl-dcu.git] / drivers / net / bnx2.c
index 381887ba677c36b39e422e7f76273a5dbfa4f2ff..ab26bbc2a1d39c7a331990a22c2c4b75f36d4a98 100644 (file)
@@ -58,8 +58,8 @@
 #include "bnx2_fw.h"
 
 #define DRV_MODULE_NAME                "bnx2"
-#define DRV_MODULE_VERSION     "2.0.8"
-#define DRV_MODULE_RELDATE     "Feb 15, 2010"
+#define DRV_MODULE_VERSION     "2.0.9"
+#define DRV_MODULE_RELDATE     "April 27, 2010"
 #define FW_MIPS_FILE_06                "bnx2/bnx2-mips-06-5.0.0.j6.fw"
 #define FW_RV2P_FILE_06                "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
 #define FW_MIPS_FILE_09                "bnx2/bnx2-mips-09-5.0.0.j9.fw"
@@ -246,6 +246,8 @@ static const struct flash_spec flash_5709 = {
 
 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
 
+static void bnx2_init_napi(struct bnx2 *bp);
+
 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
 {
        u32 diff;
@@ -649,9 +651,10 @@ bnx2_napi_enable(struct bnx2 *bp)
 }
 
 static void
-bnx2_netif_stop(struct bnx2 *bp)
+bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
 {
-       bnx2_cnic_stop(bp);
+       if (stop_cnic)
+               bnx2_cnic_stop(bp);
        if (netif_running(bp->dev)) {
                int i;
 
@@ -669,14 +672,15 @@ bnx2_netif_stop(struct bnx2 *bp)
 }
 
 static void
-bnx2_netif_start(struct bnx2 *bp)
+bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
 {
        if (atomic_dec_and_test(&bp->intr_sem)) {
                if (netif_running(bp->dev)) {
                        netif_tx_wake_all_queues(bp->dev);
                        bnx2_napi_enable(bp);
                        bnx2_enable_int(bp);
-                       bnx2_cnic_start(bp);
+                       if (start_cnic)
+                               bnx2_cnic_start(bp);
                }
        }
 }
@@ -2668,7 +2672,7 @@ bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
        }
 
        rx_pg->page = page;
-       pci_unmap_addr_set(rx_pg, mapping, mapping);
+       dma_unmap_addr_set(rx_pg, mapping, mapping);
        rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
        rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
        return 0;
@@ -2683,7 +2687,7 @@ bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
        if (!page)
                return;
 
-       pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
+       pci_unmap_page(bp->pdev, dma_unmap_addr(rx_pg, mapping), PAGE_SIZE,
                       PCI_DMA_FROMDEVICE);
 
        __free_page(page);
@@ -2715,7 +2719,7 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
        }
 
        rx_buf->skb = skb;
-       pci_unmap_addr_set(rx_buf, mapping, mapping);
+       dma_unmap_addr_set(rx_buf, mapping, mapping);
 
        rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
        rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
@@ -2814,7 +2818,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
                        }
                }
 
-               pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
+               pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping),
                        skb_headlen(skb), PCI_DMA_TODEVICE);
 
                tx_buf->skb = NULL;
@@ -2824,7 +2828,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
                        sw_cons = NEXT_TX_BD(sw_cons);
 
                        pci_unmap_page(bp->pdev,
-                               pci_unmap_addr(
+                               dma_unmap_addr(
                                        &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
                                        mapping),
                                skb_shinfo(skb)->frags[i].size,
@@ -2906,8 +2910,8 @@ bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
                if (prod != cons) {
                        prod_rx_pg->page = cons_rx_pg->page;
                        cons_rx_pg->page = NULL;
-                       pci_unmap_addr_set(prod_rx_pg, mapping,
-                               pci_unmap_addr(cons_rx_pg, mapping));
+                       dma_unmap_addr_set(prod_rx_pg, mapping,
+                               dma_unmap_addr(cons_rx_pg, mapping));
 
                        prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
                        prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
@@ -2931,7 +2935,7 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
        prod_rx_buf = &rxr->rx_buf_ring[prod];
 
        pci_dma_sync_single_for_device(bp->pdev,
-               pci_unmap_addr(cons_rx_buf, mapping),
+               dma_unmap_addr(cons_rx_buf, mapping),
                BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
 
        rxr->rx_prod_bseq += bp->rx_buf_use_size;
@@ -2941,8 +2945,8 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
        if (cons == prod)
                return;
 
-       pci_unmap_addr_set(prod_rx_buf, mapping,
-                       pci_unmap_addr(cons_rx_buf, mapping));
+       dma_unmap_addr_set(prod_rx_buf, mapping,
+                       dma_unmap_addr(cons_rx_buf, mapping));
 
        cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
        prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
@@ -3015,7 +3019,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
                        /* Don't unmap yet.  If we're unable to allocate a new
                         * page, we need to recycle the page and the DMA addr.
                         */
-                       mapping_old = pci_unmap_addr(rx_pg, mapping);
+                       mapping_old = dma_unmap_addr(rx_pg, mapping);
                        if (i == pages - 1)
                                frag_len -= 4;
 
@@ -3096,7 +3100,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
 
                rx_buf->skb = NULL;
 
-               dma_addr = pci_unmap_addr(rx_buf, mapping);
+               dma_addr = dma_unmap_addr(rx_buf, mapping);
 
                pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
                        BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
@@ -3544,7 +3548,6 @@ bnx2_set_rx_mode(struct net_device *dev)
        }
        else {
                /* Accept one or more multicast(s). */
-               struct dev_mc_list *mclist;
                u32 mc_filter[NUM_MC_HASH_REGISTERS];
                u32 regidx;
                u32 bit;
@@ -3552,8 +3555,8 @@ bnx2_set_rx_mode(struct net_device *dev)
 
                memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
 
-               netdev_for_each_mc_addr(mclist, dev) {
-                       crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
+               netdev_for_each_mc_addr(ha, dev) {
+                       crc = ether_crc_le(ETH_ALEN, ha->addr);
                        bit = crc & 0xff;
                        regidx = (bit & 0xe0) >> 5;
                        bit &= 0x1f;
@@ -4757,8 +4760,12 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
                rc = bnx2_alloc_bad_rbuf(bp);
        }
 
-       if (bp->flags & BNX2_FLAG_USING_MSIX)
+       if (bp->flags & BNX2_FLAG_USING_MSIX) {
                bnx2_setup_msix_tbl(bp);
+               /* Prevent MSIX table reads and write from timing out */
+               REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
+                       BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
+       }
 
        return rc;
 }
@@ -5310,7 +5317,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
                        }
 
                        pci_unmap_single(bp->pdev,
-                                        pci_unmap_addr(tx_buf, mapping),
+                                        dma_unmap_addr(tx_buf, mapping),
                                         skb_headlen(skb),
                                         PCI_DMA_TODEVICE);
 
@@ -5321,7 +5328,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
                        for (k = 0; k < last; k++, j++) {
                                tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
                                pci_unmap_page(bp->pdev,
-                                       pci_unmap_addr(tx_buf, mapping),
+                                       dma_unmap_addr(tx_buf, mapping),
                                        skb_shinfo(skb)->frags[k].size,
                                        PCI_DMA_TODEVICE);
                        }
@@ -5351,7 +5358,7 @@ bnx2_free_rx_skbs(struct bnx2 *bp)
                                continue;
 
                        pci_unmap_single(bp->pdev,
-                                        pci_unmap_addr(rx_buf, mapping),
+                                        dma_unmap_addr(rx_buf, mapping),
                                         bp->rx_buf_use_size,
                                         PCI_DMA_FROMDEVICE);
 
@@ -5761,7 +5768,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
        skb_reserve(rx_skb, BNX2_RX_OFFSET);
 
        pci_dma_sync_single_for_cpu(bp->pdev,
-               pci_unmap_addr(rx_buf, mapping),
+               dma_unmap_addr(rx_buf, mapping),
                bp->rx_buf_size, PCI_DMA_FROMDEVICE);
 
        if (rx_hdr->l2_fhdr_status &
@@ -6197,6 +6204,7 @@ bnx2_open(struct net_device *dev)
        bnx2_disable_int(bp);
 
        bnx2_setup_int_mode(bp, disable_msi);
+       bnx2_init_napi(bp);
        bnx2_napi_enable(bp);
        rc = bnx2_alloc_mem(bp);
        if (rc)
@@ -6270,12 +6278,12 @@ bnx2_reset_task(struct work_struct *work)
                return;
        }
 
-       bnx2_netif_stop(bp);
+       bnx2_netif_stop(bp, true);
 
        bnx2_init_nic(bp, 1);
 
        atomic_set(&bp->intr_sem, 1);
-       bnx2_netif_start(bp);
+       bnx2_netif_start(bp, true);
        rtnl_unlock();
 }
 
@@ -6317,7 +6325,7 @@ bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
        struct bnx2 *bp = netdev_priv(dev);
 
        if (netif_running(dev))
-               bnx2_netif_stop(bp);
+               bnx2_netif_stop(bp, false);
 
        bp->vlgrp = vlgrp;
 
@@ -6328,7 +6336,7 @@ bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
        if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
                bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
 
-       bnx2_netif_start(bp);
+       bnx2_netif_start(bp, false);
 }
 #endif
 
@@ -6420,7 +6428,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        tx_buf = &txr->tx_buf_ring[ring_prod];
        tx_buf->skb = skb;
-       pci_unmap_addr_set(tx_buf, mapping, mapping);
+       dma_unmap_addr_set(tx_buf, mapping, mapping);
 
        txbd = &txr->tx_desc_ring[ring_prod];
 
@@ -6445,7 +6453,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        len, PCI_DMA_TODEVICE);
                if (pci_dma_mapping_error(bp->pdev, mapping))
                        goto dma_error;
-               pci_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
+               dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
                                   mapping);
 
                txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
@@ -6482,7 +6490,7 @@ dma_error:
        ring_prod = TX_RING_IDX(prod);
        tx_buf = &txr->tx_buf_ring[ring_prod];
        tx_buf->skb = NULL;
-       pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
+       pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping),
                         skb_headlen(skb), PCI_DMA_TODEVICE);
 
        /* unmap remaining mapped pages */
@@ -6490,7 +6498,7 @@ dma_error:
                prod = NEXT_TX_BD(prod);
                ring_prod = TX_RING_IDX(prod);
                tx_buf = &txr->tx_buf_ring[ring_prod];
-               pci_unmap_page(bp->pdev, pci_unmap_addr(tx_buf, mapping),
+               pci_unmap_page(bp->pdev, dma_unmap_addr(tx_buf, mapping),
                               skb_shinfo(skb)->frags[i].size,
                               PCI_DMA_TODEVICE);
        }
@@ -7048,9 +7056,9 @@ bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
        bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
 
        if (netif_running(bp->dev)) {
-               bnx2_netif_stop(bp);
+               bnx2_netif_stop(bp, true);
                bnx2_init_nic(bp, 0);
-               bnx2_netif_start(bp);
+               bnx2_netif_start(bp, true);
        }
 
        return 0;
@@ -7080,7 +7088,7 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
                /* Reset will erase chipset stats; save them */
                bnx2_save_stats(bp);
 
-               bnx2_netif_stop(bp);
+               bnx2_netif_stop(bp, true);
                bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
                bnx2_free_skbs(bp);
                bnx2_free_mem(bp);
@@ -7108,7 +7116,7 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
                        bnx2_setup_cnic_irq_info(bp);
                mutex_unlock(&bp->cnic_lock);
 #endif
-               bnx2_netif_start(bp);
+               bnx2_netif_start(bp, true);
        }
        return 0;
 }
@@ -7361,7 +7369,7 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
        if (etest->flags & ETH_TEST_FL_OFFLINE) {
                int i;
 
-               bnx2_netif_stop(bp);
+               bnx2_netif_stop(bp, true);
                bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
                bnx2_free_skbs(bp);
 
@@ -7380,7 +7388,7 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
                        bnx2_shutdown_chip(bp);
                else {
                        bnx2_init_nic(bp, 1);
-                       bnx2_netif_start(bp);
+                       bnx2_netif_start(bp, true);
                }
 
                /* wait for link up */
@@ -7643,9 +7651,11 @@ poll_bnx2(struct net_device *dev)
        int i;
 
        for (i = 0; i < bp->irq_nvecs; i++) {
-               disable_irq(bp->irq_tbl[i].vector);
-               bnx2_interrupt(bp->irq_tbl[i].vector, &bp->bnx2_napi[i]);
-               enable_irq(bp->irq_tbl[i].vector);
+               struct bnx2_irq *irq = &bp->irq_tbl[i];
+
+               disable_irq(irq->vector);
+               irq->handler(irq->vector, &bp->bnx2_napi[i]);
+               enable_irq(irq->vector);
        }
 }
 #endif
@@ -8207,7 +8217,7 @@ bnx2_init_napi(struct bnx2 *bp)
 {
        int i;
 
-       for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
+       for (i = 0; i < bp->irq_nvecs; i++) {
                struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
                int (*poll)(struct napi_struct *, int);
 
@@ -8276,7 +8286,6 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        dev->ethtool_ops = &bnx2_ethtool_ops;
 
        bp = netdev_priv(dev);
-       bnx2_init_napi(bp);
 
        pci_set_drvdata(pdev, dev);
 
@@ -8373,7 +8382,7 @@ bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
                return 0;
 
        flush_scheduled_work();
-       bnx2_netif_stop(bp);
+       bnx2_netif_stop(bp, true);
        netif_device_detach(dev);
        del_timer_sync(&bp->timer);
        bnx2_shutdown_chip(bp);
@@ -8395,7 +8404,7 @@ bnx2_resume(struct pci_dev *pdev)
        bnx2_set_power_state(bp, PCI_D0);
        netif_device_attach(dev);
        bnx2_init_nic(bp, 1);
-       bnx2_netif_start(bp);
+       bnx2_netif_start(bp, true);
        return 0;
 }
 
@@ -8422,7 +8431,7 @@ static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
        }
 
        if (netif_running(dev)) {
-               bnx2_netif_stop(bp);
+               bnx2_netif_stop(bp, true);
                del_timer_sync(&bp->timer);
                bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
        }
@@ -8479,7 +8488,7 @@ static void bnx2_io_resume(struct pci_dev *pdev)
 
        rtnl_lock();
        if (netif_running(dev))
-               bnx2_netif_start(bp);
+               bnx2_netif_start(bp, true);
 
        netif_device_attach(dev);
        rtnl_unlock();