Merge ../linux-2.6-watchdog-mm
[linux-drm-fsl-dcu.git] / arch / um / drivers / net_kern.c
1 /*
2  * Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org) and 
3  * James Leu (jleu@mindspring.net).
4  * Copyright (C) 2001 by various other people who didn't put their name here.
5  * Licensed under the GPL.
6  */
7
8 #include "linux/kernel.h"
9 #include "linux/netdevice.h"
10 #include "linux/rtnetlink.h"
11 #include "linux/skbuff.h"
12 #include "linux/socket.h"
13 #include "linux/spinlock.h"
14 #include "linux/module.h"
15 #include "linux/init.h"
16 #include "linux/etherdevice.h"
17 #include "linux/list.h"
18 #include "linux/inetdevice.h"
19 #include "linux/ctype.h"
20 #include "linux/bootmem.h"
21 #include "linux/ethtool.h"
22 #include "linux/platform_device.h"
23 #include "asm/uaccess.h"
24 #include "user_util.h"
25 #include "kern_util.h"
26 #include "net_kern.h"
27 #include "net_user.h"
28 #include "mconsole_kern.h"
29 #include "init.h"
30 #include "irq_user.h"
31 #include "irq_kern.h"
32
33 static inline void set_ether_mac(struct net_device *dev, unsigned char *addr)
34 {
35         memcpy(dev->dev_addr, addr, ETH_ALEN);
36 }
37
38 #define DRIVER_NAME "uml-netdev"
39
40 static DEFINE_SPINLOCK(opened_lock);
41 static LIST_HEAD(opened);
42
43 static int uml_net_rx(struct net_device *dev)
44 {
45         struct uml_net_private *lp = dev->priv;
46         int pkt_len;
47         struct sk_buff *skb;
48
49         /* If we can't allocate memory, try again next round. */
50         skb = dev_alloc_skb(dev->mtu);
51         if (skb == NULL) {
52                 lp->stats.rx_dropped++;
53                 return 0;
54         }
55
56         skb->dev = dev;
57         skb_put(skb, dev->mtu);
58         skb->mac.raw = skb->data;
59         pkt_len = (*lp->read)(lp->fd, &skb, lp);
60
61         if (pkt_len > 0) {
62                 skb_trim(skb, pkt_len);
63                 skb->protocol = (*lp->protocol)(skb);
64                 netif_rx(skb);
65
66                 lp->stats.rx_bytes += skb->len;
67                 lp->stats.rx_packets++;
68                 return pkt_len;
69         }
70
71         kfree_skb(skb);
72         return pkt_len;
73 }
74
75 static void uml_dev_close(void* dev)
76 {
77         dev_close( (struct net_device *) dev);
78 }
79
80 irqreturn_t uml_net_interrupt(int irq, void *dev_id)
81 {
82         struct net_device *dev = dev_id;
83         struct uml_net_private *lp = dev->priv;
84         int err;
85
86         if(!netif_running(dev))
87                 return(IRQ_NONE);
88
89         spin_lock(&lp->lock);
90         while((err = uml_net_rx(dev)) > 0) ;
91         if(err < 0) {
92                 DECLARE_WORK(close_work, uml_dev_close, dev);
93                 printk(KERN_ERR 
94                        "Device '%s' read returned %d, shutting it down\n", 
95                        dev->name, err);
96                 /* dev_close can't be called in interrupt context, and takes
97                  * again lp->lock.
98                  * And dev_close() can be safely called multiple times on the
99                  * same device, since it tests for (dev->flags & IFF_UP). So
100                  * there's no harm in delaying the device shutdown. */
101                 schedule_work(&close_work);
102                 goto out;
103         }
104         reactivate_fd(lp->fd, UM_ETH_IRQ);
105
106 out:
107         spin_unlock(&lp->lock);
108         return(IRQ_HANDLED);
109 }
110
111 static int uml_net_open(struct net_device *dev)
112 {
113         struct uml_net_private *lp = dev->priv;
114         int err;
115
116         if(lp->fd >= 0){
117                 err = -ENXIO;
118                 goto out;
119         }
120
121         lp->fd = (*lp->open)(&lp->user);
122         if(lp->fd < 0){
123                 err = lp->fd;
124                 goto out;
125         }
126
127         err = um_request_irq(dev->irq, lp->fd, IRQ_READ, uml_net_interrupt,
128                              IRQF_DISABLED | IRQF_SHARED, dev->name, dev);
129         if(err != 0){
130                 printk(KERN_ERR "uml_net_open: failed to get irq(%d)\n", err);
131                 err = -ENETUNREACH;
132                 goto out_close;
133         }
134
135         lp->tl.data = (unsigned long) &lp->user;
136         netif_start_queue(dev);
137
138         /* clear buffer - it can happen that the host side of the interface
139          * is full when we get here.  In this case, new data is never queued,
140          * SIGIOs never arrive, and the net never works.
141          */
142         while((err = uml_net_rx(dev)) > 0) ;
143
144         spin_lock(&opened_lock);
145         list_add(&lp->list, &opened);
146         spin_unlock(&opened_lock);
147
148         return 0;
149 out_close:
150         if(lp->close != NULL) (*lp->close)(lp->fd, &lp->user);
151         lp->fd = -1;
152 out:
153         return err;
154 }
155
156 static int uml_net_close(struct net_device *dev)
157 {
158         struct uml_net_private *lp = dev->priv;
159         
160         netif_stop_queue(dev);
161
162         free_irq(dev->irq, dev);
163         if(lp->close != NULL)
164                 (*lp->close)(lp->fd, &lp->user);
165         lp->fd = -1;
166
167         spin_lock(&opened_lock);
168         list_del(&lp->list);
169         spin_unlock(&opened_lock);
170
171         return 0;
172 }
173
174 static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
175 {
176         struct uml_net_private *lp = dev->priv;
177         unsigned long flags;
178         int len;
179
180         netif_stop_queue(dev);
181
182         spin_lock_irqsave(&lp->lock, flags);
183
184         len = (*lp->write)(lp->fd, &skb, lp);
185
186         if(len == skb->len) {
187                 lp->stats.tx_packets++;
188                 lp->stats.tx_bytes += skb->len;
189                 dev->trans_start = jiffies;
190                 netif_start_queue(dev);
191
192                 /* this is normally done in the interrupt when tx finishes */
193                 netif_wake_queue(dev);
194         } 
195         else if(len == 0){
196                 netif_start_queue(dev);
197                 lp->stats.tx_dropped++;
198         }
199         else {
200                 netif_start_queue(dev);
201                 printk(KERN_ERR "uml_net_start_xmit: failed(%d)\n", len);
202         }
203
204         spin_unlock_irqrestore(&lp->lock, flags);
205
206         dev_kfree_skb(skb);
207
208         return 0;
209 }
210
211 static struct net_device_stats *uml_net_get_stats(struct net_device *dev)
212 {
213         struct uml_net_private *lp = dev->priv;
214         return &lp->stats;
215 }
216
217 static void uml_net_set_multicast_list(struct net_device *dev)
218 {
219         if (dev->flags & IFF_PROMISC) return;
220         else if (dev->mc_count) dev->flags |= IFF_ALLMULTI;
221         else dev->flags &= ~IFF_ALLMULTI;
222 }
223
224 static void uml_net_tx_timeout(struct net_device *dev)
225 {
226         dev->trans_start = jiffies;
227         netif_wake_queue(dev);
228 }
229
230 static int uml_net_set_mac(struct net_device *dev, void *addr)
231 {
232         struct uml_net_private *lp = dev->priv;
233         struct sockaddr *hwaddr = addr;
234
235         spin_lock_irq(&lp->lock);
236         set_ether_mac(dev, hwaddr->sa_data);
237         spin_unlock_irq(&lp->lock);
238
239         return(0);
240 }
241
242 static int uml_net_change_mtu(struct net_device *dev, int new_mtu)
243 {
244         struct uml_net_private *lp = dev->priv;
245         int err = 0;
246
247         spin_lock_irq(&lp->lock);
248
249         new_mtu = (*lp->set_mtu)(new_mtu, &lp->user);
250         if(new_mtu < 0){
251                 err = new_mtu;
252                 goto out;
253         }
254
255         dev->mtu = new_mtu;
256
257  out:
258         spin_unlock_irq(&lp->lock);
259         return err;
260 }
261
262 static void uml_net_get_drvinfo(struct net_device *dev,
263                                 struct ethtool_drvinfo *info)
264 {
265         strcpy(info->driver, DRIVER_NAME);
266         strcpy(info->version, "42");
267 }
268
269 static struct ethtool_ops uml_net_ethtool_ops = {
270         .get_drvinfo    = uml_net_get_drvinfo,
271         .get_link       = ethtool_op_get_link,
272 };
273
274 void uml_net_user_timer_expire(unsigned long _conn)
275 {
276 #ifdef undef
277         struct connection *conn = (struct connection *)_conn;
278
279         dprintk(KERN_INFO "uml_net_user_timer_expire [%p]\n", conn);
280         do_connect(conn);
281 #endif
282 }
283
284 static void setup_etheraddr(char *str, unsigned char *addr)
285 {
286         char *end;
287         int i;
288
289         if(str == NULL)
290                 goto random;
291
292         for(i=0;i<6;i++){
293                 addr[i] = simple_strtoul(str, &end, 16);
294                 if((end == str) ||
295                    ((*end != ':') && (*end != ',') && (*end != '\0'))){
296                         printk(KERN_ERR
297                                "setup_etheraddr: failed to parse '%s' "
298                                "as an ethernet address\n", str);
299                         goto random;
300                 }
301                 str = end + 1;
302         }
303         if(addr[0] & 1){
304                 printk(KERN_ERR
305                        "Attempt to assign a broadcast ethernet address to a "
306                        "device disallowed\n");
307                 goto random;
308         }
309         return;
310
311 random:
312         random_ether_addr(addr);
313 }
314
315 static DEFINE_SPINLOCK(devices_lock);
316 static LIST_HEAD(devices);
317
318 static struct platform_driver uml_net_driver = {
319         .driver = {
320                 .name  = DRIVER_NAME,
321         },
322 };
323 static int driver_registered;
324
325 static int eth_configure(int n, void *init, char *mac,
326                          struct transport *transport)
327 {
328         struct uml_net *device;
329         struct net_device *dev;
330         struct uml_net_private *lp;
331         int save, err, size;
332
333         size = transport->private_size + sizeof(struct uml_net_private) + 
334                 sizeof(((struct uml_net_private *) 0)->user);
335
336         device = kmalloc(sizeof(*device), GFP_KERNEL);
337         if (device == NULL) {
338                 printk(KERN_ERR "eth_configure failed to allocate uml_net\n");
339                 return(1);
340         }
341
342         memset(device, 0, sizeof(*device));
343         INIT_LIST_HEAD(&device->list);
344         device->index = n;
345
346         spin_lock(&devices_lock);
347         list_add(&device->list, &devices);
348         spin_unlock(&devices_lock);
349
350         setup_etheraddr(mac, device->mac);
351
352         printk(KERN_INFO "Netdevice %d ", n);
353         printk("(%02x:%02x:%02x:%02x:%02x:%02x) ",
354                device->mac[0], device->mac[1],
355                device->mac[2], device->mac[3],
356                device->mac[4], device->mac[5]);
357         printk(": ");
358         dev = alloc_etherdev(size);
359         if (dev == NULL) {
360                 printk(KERN_ERR "eth_configure: failed to allocate device\n");
361                 return 1;
362         }
363
364         lp = dev->priv;
365         /* This points to the transport private data. It's still clear, but we
366          * must memset it to 0 *now*. Let's help the drivers. */
367         memset(lp, 0, size);
368
369         /* sysfs register */
370         if (!driver_registered) {
371                 platform_driver_register(&uml_net_driver);
372                 driver_registered = 1;
373         }
374         device->pdev.id = n;
375         device->pdev.name = DRIVER_NAME;
376         platform_device_register(&device->pdev);
377         SET_NETDEV_DEV(dev,&device->pdev.dev);
378
379         /* If this name ends up conflicting with an existing registered
380          * netdevice, that is OK, register_netdev{,ice}() will notice this
381          * and fail.
382          */
383         snprintf(dev->name, sizeof(dev->name), "eth%d", n);
384         device->dev = dev;
385
386         (*transport->kern->init)(dev, init);
387
388         dev->mtu = transport->user->max_packet;
389         dev->open = uml_net_open;
390         dev->hard_start_xmit = uml_net_start_xmit;
391         dev->stop = uml_net_close;
392         dev->get_stats = uml_net_get_stats;
393         dev->set_multicast_list = uml_net_set_multicast_list;
394         dev->tx_timeout = uml_net_tx_timeout;
395         dev->set_mac_address = uml_net_set_mac;
396         dev->change_mtu = uml_net_change_mtu;
397         dev->ethtool_ops = &uml_net_ethtool_ops;
398         dev->watchdog_timeo = (HZ >> 1);
399         dev->irq = UM_ETH_IRQ;
400
401         rtnl_lock();
402         err = register_netdevice(dev);
403         rtnl_unlock();
404         if (err) {
405                 device->dev = NULL;
406                 /* XXX: should we call ->remove() here? */
407                 free_netdev(dev);
408                 return 1;
409         }
410
411         /* lp.user is the first four bytes of the transport data, which
412          * has already been initialized.  This structure assignment will
413          * overwrite that, so we make sure that .user gets overwritten with
414          * what it already has.
415          */
416         save = lp->user[0];
417         *lp = ((struct uml_net_private)
418                 { .list                 = LIST_HEAD_INIT(lp->list),
419                   .dev                  = dev,
420                   .fd                   = -1,
421                   .mac                  = { 0xfe, 0xfd, 0x0, 0x0, 0x0, 0x0},
422                   .protocol             = transport->kern->protocol,
423                   .open                 = transport->user->open,
424                   .close                = transport->user->close,
425                   .remove               = transport->user->remove,
426                   .read                 = transport->kern->read,
427                   .write                = transport->kern->write,
428                   .add_address          = transport->user->add_address,
429                   .delete_address       = transport->user->delete_address,
430                   .set_mtu              = transport->user->set_mtu,
431                   .user                 = { save } });
432
433         init_timer(&lp->tl);
434         spin_lock_init(&lp->lock);
435         lp->tl.function = uml_net_user_timer_expire;
436         memcpy(lp->mac, device->mac, sizeof(lp->mac));
437
438         if (transport->user->init) 
439                 (*transport->user->init)(&lp->user, dev);
440
441         set_ether_mac(dev, device->mac);
442
443         return 0;
444 }
445
446 static struct uml_net *find_device(int n)
447 {
448         struct uml_net *device;
449         struct list_head *ele;
450
451         spin_lock(&devices_lock);
452         list_for_each(ele, &devices){
453                 device = list_entry(ele, struct uml_net, list);
454                 if(device->index == n)
455                         goto out;
456         }
457         device = NULL;
458  out:
459         spin_unlock(&devices_lock);
460         return(device);
461 }
462
463 static int eth_parse(char *str, int *index_out, char **str_out)
464 {
465         char *end;
466         int n;
467
468         n = simple_strtoul(str, &end, 0);
469         if(end == str){
470                 printk(KERN_ERR "eth_setup: Failed to parse '%s'\n", str);
471                 return(1);
472         }
473         if(n < 0){
474                 printk(KERN_ERR "eth_setup: device %d is negative\n", n);
475                 return(1);
476         }
477         str = end;
478         if(*str != '='){
479                 printk(KERN_ERR 
480                        "eth_setup: expected '=' after device number\n");
481                 return(1);
482         }
483         str++;
484         if(find_device(n)){
485                 printk(KERN_ERR "eth_setup: Device %d already configured\n",
486                        n);
487                 return(1);
488         }
489         if(index_out) *index_out = n;
490         *str_out = str;
491         return(0);
492 }
493
494 struct eth_init {
495         struct list_head list;
496         char *init;
497         int index;
498 };
499
500 /* Filled in at boot time.  Will need locking if the transports become
501  * modular.
502  */
503 struct list_head transports = LIST_HEAD_INIT(transports);
504
505 /* Filled in during early boot */
506 struct list_head eth_cmd_line = LIST_HEAD_INIT(eth_cmd_line);
507
508 static int check_transport(struct transport *transport, char *eth, int n,
509                            void **init_out, char **mac_out)
510 {
511         int len;
512
513         len = strlen(transport->name);
514         if(strncmp(eth, transport->name, len))
515                 return(0);
516
517         eth += len;
518         if(*eth == ',')
519                 eth++;
520         else if(*eth != '\0')
521                 return(0);
522
523         *init_out = kmalloc(transport->setup_size, GFP_KERNEL);
524         if(*init_out == NULL)
525                 return(1);
526
527         if(!transport->setup(eth, mac_out, *init_out)){
528                 kfree(*init_out);
529                 *init_out = NULL;
530         }
531         return(1);
532 }
533
534 void register_transport(struct transport *new)
535 {
536         struct list_head *ele, *next;
537         struct eth_init *eth;
538         void *init;
539         char *mac = NULL;
540         int match;
541
542         list_add(&new->list, &transports);
543
544         list_for_each_safe(ele, next, &eth_cmd_line){
545                 eth = list_entry(ele, struct eth_init, list);
546                 match = check_transport(new, eth->init, eth->index, &init,
547                                         &mac);
548                 if(!match)
549                         continue;
550                 else if(init != NULL){
551                         eth_configure(eth->index, init, mac, new);
552                         kfree(init);
553                 }
554                 list_del(&eth->list);
555         }
556 }
557
558 static int eth_setup_common(char *str, int index)
559 {
560         struct list_head *ele;
561         struct transport *transport;
562         void *init;
563         char *mac = NULL;
564
565         list_for_each(ele, &transports){
566                 transport = list_entry(ele, struct transport, list);
567                 if(!check_transport(transport, str, index, &init, &mac))
568                         continue;
569                 if(init != NULL){
570                         eth_configure(index, init, mac, transport);
571                         kfree(init);
572                 }
573                 return(1);
574         }
575         return(0);
576 }
577
578 static int eth_setup(char *str)
579 {
580         struct eth_init *new;
581         int n, err;
582
583         err = eth_parse(str, &n, &str);
584         if(err)
585                 return 1;
586
587         new = alloc_bootmem(sizeof(*new));
588         if (new == NULL){
589                 printk("eth_init : alloc_bootmem failed\n");
590                 return 1;
591         }
592
593         INIT_LIST_HEAD(&new->list);
594         new->index = n;
595         new->init = str;
596
597         list_add_tail(&new->list, &eth_cmd_line);
598         return 1;
599 }
600
601 __setup("eth", eth_setup);
602 __uml_help(eth_setup,
603 "eth[0-9]+=<transport>,<options>\n"
604 "    Configure a network device.\n\n"
605 );
606
607 #if 0
608 static int eth_init(void)
609 {
610         struct list_head *ele, *next;
611         struct eth_init *eth;
612
613         list_for_each_safe(ele, next, &eth_cmd_line){
614                 eth = list_entry(ele, struct eth_init, list);
615
616                 if(eth_setup_common(eth->init, eth->index))
617                         list_del(&eth->list);
618         }
619         
620         return(1);
621 }
622 __initcall(eth_init);
623 #endif
624
625 static int net_config(char *str)
626 {
627         int n, err;
628
629         err = eth_parse(str, &n, &str);
630         if(err) return(err);
631
632         str = kstrdup(str, GFP_KERNEL);
633         if(str == NULL){
634                 printk(KERN_ERR "net_config failed to strdup string\n");
635                 return(-1);
636         }
637         err = !eth_setup_common(str, n);
638         if(err) 
639                 kfree(str);
640         return(err);
641 }
642
643 static int net_id(char **str, int *start_out, int *end_out)
644 {
645         char *end;
646         int n;
647
648         n = simple_strtoul(*str, &end, 0);
649         if((*end != '\0') || (end == *str))
650                 return -1;
651
652         *start_out = n;
653         *end_out = n;
654         *str = end;
655         return n;
656 }
657
658 static int net_remove(int n)
659 {
660         struct uml_net *device;
661         struct net_device *dev;
662         struct uml_net_private *lp;
663
664         device = find_device(n);
665         if(device == NULL)
666                 return -ENODEV;
667
668         dev = device->dev;
669         lp = dev->priv;
670         if(lp->fd > 0)
671                 return -EBUSY;
672         if(lp->remove != NULL) (*lp->remove)(&lp->user);
673         unregister_netdev(dev);
674         platform_device_unregister(&device->pdev);
675
676         list_del(&device->list);
677         kfree(device);
678         free_netdev(dev);
679         return 0;
680 }
681
682 static struct mc_device net_mc = {
683         .name           = "eth",
684         .config         = net_config,
685         .get_config     = NULL,
686         .id             = net_id,
687         .remove         = net_remove,
688 };
689
690 static int uml_inetaddr_event(struct notifier_block *this, unsigned long event,
691                               void *ptr)
692 {
693         struct in_ifaddr *ifa = ptr;
694         struct net_device *dev = ifa->ifa_dev->dev;
695         struct uml_net_private *lp;
696         void (*proc)(unsigned char *, unsigned char *, void *);
697         unsigned char addr_buf[4], netmask_buf[4];
698
699         if(dev->open != uml_net_open) return(NOTIFY_DONE);
700
701         lp = dev->priv;
702
703         proc = NULL;
704         switch (event){
705         case NETDEV_UP:
706                 proc = lp->add_address;
707                 break;
708         case NETDEV_DOWN:
709                 proc = lp->delete_address;
710                 break;
711         }
712         if(proc != NULL){
713                 memcpy(addr_buf, &ifa->ifa_address, sizeof(addr_buf));
714                 memcpy(netmask_buf, &ifa->ifa_mask, sizeof(netmask_buf));
715                 (*proc)(addr_buf, netmask_buf, &lp->user);
716         }
717         return(NOTIFY_DONE);
718 }
719
720 struct notifier_block uml_inetaddr_notifier = {
721         .notifier_call          = uml_inetaddr_event,
722 };
723
724 static int uml_net_init(void)
725 {
726         struct list_head *ele;
727         struct uml_net_private *lp;     
728         struct in_device *ip;
729         struct in_ifaddr *in;
730
731         mconsole_register_dev(&net_mc);
732         register_inetaddr_notifier(&uml_inetaddr_notifier);
733
734         /* Devices may have been opened already, so the uml_inetaddr_notifier
735          * didn't get a chance to run for them.  This fakes it so that
736          * addresses which have already been set up get handled properly.
737          */
738         list_for_each(ele, &opened){
739                 lp = list_entry(ele, struct uml_net_private, list);
740                 ip = lp->dev->ip_ptr;
741                 if(ip == NULL) continue;
742                 in = ip->ifa_list;
743                 while(in != NULL){
744                         uml_inetaddr_event(NULL, NETDEV_UP, in);
745                         in = in->ifa_next;
746                 }
747         }       
748
749         return(0);
750 }
751
752 __initcall(uml_net_init);
753
754 static void close_devices(void)
755 {
756         struct list_head *ele;
757         struct uml_net_private *lp;
758
759         list_for_each(ele, &opened){
760                 lp = list_entry(ele, struct uml_net_private, list);
761                 free_irq(lp->dev->irq, lp->dev);
762                 if((lp->close != NULL) && (lp->fd >= 0))
763                         (*lp->close)(lp->fd, &lp->user);
764                 if(lp->remove != NULL) (*lp->remove)(&lp->user);
765         }
766 }
767
768 __uml_exitcall(close_devices);
769
770 struct sk_buff *ether_adjust_skb(struct sk_buff *skb, int extra)
771 {
772         if((skb != NULL) && (skb_tailroom(skb) < extra)){
773                 struct sk_buff *skb2;
774
775                 skb2 = skb_copy_expand(skb, 0, extra, GFP_ATOMIC);
776                 dev_kfree_skb(skb);
777                 skb = skb2;
778         }
779         if(skb != NULL) skb_put(skb, extra);
780         return(skb);
781 }
782
783 void iter_addresses(void *d, void (*cb)(unsigned char *, unsigned char *, 
784                                         void *), 
785                     void *arg)
786 {
787         struct net_device *dev = d;
788         struct in_device *ip = dev->ip_ptr;
789         struct in_ifaddr *in;
790         unsigned char address[4], netmask[4];
791
792         if(ip == NULL) return;
793         in = ip->ifa_list;
794         while(in != NULL){
795                 memcpy(address, &in->ifa_address, sizeof(address));
796                 memcpy(netmask, &in->ifa_mask, sizeof(netmask));
797                 (*cb)(address, netmask, arg);
798                 in = in->ifa_next;
799         }
800 }
801
802 int dev_netmask(void *d, void *m)
803 {
804         struct net_device *dev = d;
805         struct in_device *ip = dev->ip_ptr;
806         struct in_ifaddr *in;
807         __be32 *mask_out = m;
808
809         if(ip == NULL) 
810                 return(1);
811
812         in = ip->ifa_list;
813         if(in == NULL) 
814                 return(1);
815
816         *mask_out = in->ifa_mask;
817         return(0);
818 }
819
820 void *get_output_buffer(int *len_out)
821 {
822         void *ret;
823
824         ret = (void *) __get_free_pages(GFP_KERNEL, 0);
825         if(ret) *len_out = PAGE_SIZE;
826         else *len_out = 0;
827         return(ret);
828 }
829
830 void free_output_buffer(void *buffer)
831 {
832         free_pages((unsigned long) buffer, 0);
833 }
834
835 int tap_setup_common(char *str, char *type, char **dev_name, char **mac_out, 
836                      char **gate_addr)
837 {
838         char *remain;
839
840         remain = split_if_spec(str, dev_name, mac_out, gate_addr, NULL);
841         if(remain != NULL){
842                 printk("tap_setup_common - Extra garbage on specification : "
843                        "'%s'\n", remain);
844                 return(1);
845         }
846
847         return(0);
848 }
849
850 unsigned short eth_protocol(struct sk_buff *skb)
851 {
852         return(eth_type_trans(skb, skb->dev));
853 }
854
855 /*
856  * Overrides for Emacs so that we follow Linus's tabbing style.
857  * Emacs will notice this stuff at the end of the file and automatically
858  * adjust the settings for this buffer only.  This must remain at the end
859  * of the file.
860  * ---------------------------------------------------------------------------
861  * Local variables:
862  * c-file-style: "linux"
863  * End:
864  */