Merge git://oss.sgi.com:8090/xfs/xfs-2.6
[linux-drm-fsl-dcu.git] / net / core / netpoll.c
index 26ee1791aa0215cf8aa72f028e5e8fcec937a235..da1019451ccbe68185af1db8b7db3bb30a305c8f 100644 (file)
@@ -50,10 +50,12 @@ static atomic_t trapped;
 static void zap_completion_queue(void);
 static void arp_reply(struct sk_buff *skb);
 
-static void queue_process(void *p)
+static void queue_process(struct work_struct *work)
 {
-       struct netpoll_info *npinfo = p;
+       struct netpoll_info *npinfo =
+               container_of(work, struct netpoll_info, tx_work.work);
        struct sk_buff *skb;
+       unsigned long flags;
 
        while ((skb = skb_dequeue(&npinfo->txq))) {
                struct net_device *dev = skb->dev;
@@ -63,22 +65,24 @@ static void queue_process(void *p)
                        continue;
                }
 
-               netif_tx_lock_bh(dev);
+               local_irq_save(flags);
+               netif_tx_lock(dev);
                if (netif_queue_stopped(dev) ||
                    dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
                        skb_queue_head(&npinfo->txq, skb);
-                       netif_tx_unlock_bh(dev);
+                       netif_tx_unlock(dev);
+                       local_irq_restore(flags);
 
                        schedule_delayed_work(&npinfo->tx_work, HZ/10);
                        return;
                }
-
-               netif_tx_unlock_bh(dev);
+               netif_tx_unlock(dev);
+               local_irq_restore(flags);
        }
 }
 
-static int checksum_udp(struct sk_buff *skb, struct udphdr *uh,
-                       unsigned short ulen, __be32 saddr, __be32 daddr)
+static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
+                           unsigned short ulen, __be32 saddr, __be32 daddr)
 {
        __wsum psum;
 
@@ -233,37 +237,43 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
 {
        int status = NETDEV_TX_BUSY;
        unsigned long tries;
-       struct net_device *dev = np->dev;
-       struct netpoll_info *npinfo = np->dev->npinfo;
+       struct net_device *dev = np->dev;
+       struct netpoll_info *npinfo = np->dev->npinfo;
 
-       if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
-               __kfree_skb(skb);
-               return;
-       }
+       if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
+               __kfree_skb(skb);
+               return;
+       }
 
        /* don't get messages out of order, and no recursion */
        if (skb_queue_len(&npinfo->txq) == 0 &&
-           npinfo->poll_owner != smp_processor_id() &&
-           netif_tx_trylock(dev)) {
-               /* try until next clock tick */
-               for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; tries > 0; --tries) {
-                       if (!netif_queue_stopped(dev))
-                               status = dev->hard_start_xmit(skb, dev);
+                   npinfo->poll_owner != smp_processor_id()) {
+               unsigned long flags;
 
-                       if (status == NETDEV_TX_OK)
-                               break;
+               local_irq_save(flags);
+               if (netif_tx_trylock(dev)) {
+                       /* try until next clock tick */
+                       for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
+                                       tries > 0; --tries) {
+                               if (!netif_queue_stopped(dev))
+                                       status = dev->hard_start_xmit(skb, dev);
 
-                       /* tickle device maybe there is some cleanup */
-                       netpoll_poll(np);
+                               if (status == NETDEV_TX_OK)
+                                       break;
+
+                               /* tickle device maybe there is some cleanup */
+                               netpoll_poll(np);
 
-                       udelay(USEC_PER_POLL);
+                               udelay(USEC_PER_POLL);
+                       }
+                       netif_tx_unlock(dev);
                }
-               netif_tx_unlock(dev);
+               local_irq_restore(flags);
        }
 
        if (status != NETDEV_TX_OK) {
                skb_queue_tail(&npinfo->txq, skb);
-               schedule_work(&npinfo->tx_work);
+               schedule_delayed_work(&npinfo->tx_work,0);
        }
 }
 
@@ -296,7 +306,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
                                        udp_len, IPPROTO_UDP,
                                        csum_partial((unsigned char *)udph, udp_len, 0));
        if (udph->check == 0)
-               udph->check = -1;
+               udph->check = CSUM_MANGLED_0;
 
        skb->nh.iph = iph = (struct iphdr *)skb_push(skb, sizeof(*iph));
 
@@ -331,6 +341,7 @@ static void arp_reply(struct sk_buff *skb)
        unsigned char *arp_ptr;
        int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
        __be32 sip, tip;
+       unsigned char *sha;
        struct sk_buff *send_skb;
        struct netpoll *np = NULL;
 
@@ -357,9 +368,14 @@ static void arp_reply(struct sk_buff *skb)
            arp->ar_op != htons(ARPOP_REQUEST))
                return;
 
-       arp_ptr = (unsigned char *)(arp+1) + skb->dev->addr_len;
+       arp_ptr = (unsigned char *)(arp+1);
+       /* save the location of the src hw addr */
+       sha = arp_ptr;
+       arp_ptr += skb->dev->addr_len;
        memcpy(&sip, arp_ptr, 4);
-       arp_ptr += 4 + skb->dev->addr_len;
+       arp_ptr += 4;
+       /* if we actually cared about dst hw addr, it would get copied here */
+       arp_ptr += skb->dev->addr_len;
        memcpy(&tip, arp_ptr, 4);
 
        /* Should we ignore arp? */
@@ -382,7 +398,7 @@ static void arp_reply(struct sk_buff *skb)
 
        if (np->dev->hard_header &&
            np->dev->hard_header(send_skb, skb->dev, ptype,
-                                np->remote_mac, np->local_mac,
+                                sha, np->local_mac,
                                 send_skb->len) < 0) {
                kfree_skb(send_skb);
                return;
@@ -406,7 +422,7 @@ static void arp_reply(struct sk_buff *skb)
        arp_ptr += np->dev->addr_len;
        memcpy(arp_ptr, &tip, 4);
        arp_ptr += 4;
-       memcpy(arp_ptr, np->remote_mac, np->dev->addr_len);
+       memcpy(arp_ptr, sha, np->dev->addr_len);
        arp_ptr += np->dev->addr_len;
        memcpy(arp_ptr, &sip, 4);
 
@@ -628,7 +644,7 @@ int netpoll_setup(struct netpoll *np)
                spin_lock_init(&npinfo->rx_lock);
                skb_queue_head_init(&npinfo->arp_tx);
                skb_queue_head_init(&npinfo->txq);
-               INIT_WORK(&npinfo->tx_work, queue_process, npinfo);
+               INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
 
                atomic_set(&npinfo->refcnt, 1);
        } else {
@@ -660,7 +676,7 @@ int netpoll_setup(struct netpoll *np)
                }
 
                atleast = jiffies + HZ/10;
-               atmost = jiffies + 4*HZ;
+               atmost = jiffies + 4*HZ;
                while (!netif_carrier_ok(ndev)) {
                        if (time_after(jiffies, atmost)) {
                                printk(KERN_NOTICE
@@ -756,9 +772,9 @@ void netpoll_cleanup(struct netpoll *np)
                        np->dev->npinfo = NULL;
                        if (atomic_dec_and_test(&npinfo->refcnt)) {
                                skb_queue_purge(&npinfo->arp_tx);
-                               skb_queue_purge(&npinfo->txq);
+                               skb_queue_purge(&npinfo->txq);
                                cancel_rearming_delayed_work(&npinfo->tx_work);
-                               flush_scheduled_work();
+                               flush_scheduled_work();
 
                                kfree(npinfo);
                        }