initramfs: fix initramfs size calculation
[linux-drm-fsl-dcu.git] / net / core / neighbour.c
1 /*
2  *      Generic address resolution entity
3  *
4  *      Authors:
5  *      Pedro Roque             <roque@di.fc.ul.pt>
6  *      Alexey Kuznetsov        <kuznet@ms2.inr.ac.ru>
7  *
8  *      This program is free software; you can redistribute it and/or
9  *      modify it under the terms of the GNU General Public License
10  *      as published by the Free Software Foundation; either version
11  *      2 of the License, or (at your option) any later version.
12  *
13  *      Fixes:
14  *      Vitaly E. Lavrov        releasing NULL neighbor in neigh_add.
15  *      Harald Welte            Add neighbour cache statistics like rtstat
16  */
17
18 #include <linux/slab.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/socket.h>
23 #include <linux/netdevice.h>
24 #include <linux/proc_fs.h>
25 #ifdef CONFIG_SYSCTL
26 #include <linux/sysctl.h>
27 #endif
28 #include <linux/times.h>
29 #include <net/net_namespace.h>
30 #include <net/neighbour.h>
31 #include <net/dst.h>
32 #include <net/sock.h>
33 #include <net/netevent.h>
34 #include <net/netlink.h>
35 #include <linux/rtnetlink.h>
36 #include <linux/random.h>
37 #include <linux/string.h>
38 #include <linux/log2.h>
39
40 #define NEIGH_DEBUG 1
41
42 #define NEIGH_PRINTK(x...) printk(x)
43 #define NEIGH_NOPRINTK(x...) do { ; } while(0)
44 #define NEIGH_PRINTK0 NEIGH_PRINTK
45 #define NEIGH_PRINTK1 NEIGH_NOPRINTK
46 #define NEIGH_PRINTK2 NEIGH_NOPRINTK
47
48 #if NEIGH_DEBUG >= 1
49 #undef NEIGH_PRINTK1
50 #define NEIGH_PRINTK1 NEIGH_PRINTK
51 #endif
52 #if NEIGH_DEBUG >= 2
53 #undef NEIGH_PRINTK2
54 #define NEIGH_PRINTK2 NEIGH_PRINTK
55 #endif
56
57 #define PNEIGH_HASHMASK         0xF
58
59 static void neigh_timer_handler(unsigned long arg);
60 static void __neigh_notify(struct neighbour *n, int type, int flags);
61 static void neigh_update_notify(struct neighbour *neigh);
62 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
63
64 static struct neigh_table *neigh_tables;
65 #ifdef CONFIG_PROC_FS
66 static const struct file_operations neigh_stat_seq_fops;
67 #endif
68
69 /*
70    Neighbour hash table buckets are protected with rwlock tbl->lock.
71
72    - All the scans/updates to hash buckets MUST be made under this lock.
73    - NOTHING clever should be made under this lock: no callbacks
74      to protocol backends, no attempts to send something to network.
75      It will result in deadlocks, if backend/driver wants to use neighbour
76      cache.
77    - If the entry requires some non-trivial actions, increase
78      its reference count and release table lock.
79
80    Neighbour entries are protected:
81    - with reference count.
82    - with rwlock neigh->lock
83
84    Reference count prevents destruction.
85
86    neigh->lock mainly serializes ll address data and its validity state.
87    However, the same lock is used to protect another entry fields:
88     - timer
89     - resolution queue
90
91    Again, nothing clever shall be made under neigh->lock,
92    the most complicated procedure, which we allow is dev->hard_header.
93    It is supposed, that dev->hard_header is simplistic and does
94    not make callbacks to neighbour tables.
95
96    The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
97    list of neighbour tables. This list is used only in process context,
98  */
99
100 static DEFINE_RWLOCK(neigh_tbl_lock);
101
102 static int neigh_blackhole(struct sk_buff *skb)
103 {
104         kfree_skb(skb);
105         return -ENETDOWN;
106 }
107
108 static void neigh_cleanup_and_release(struct neighbour *neigh)
109 {
110         if (neigh->parms->neigh_cleanup)
111                 neigh->parms->neigh_cleanup(neigh);
112
113         __neigh_notify(neigh, RTM_DELNEIGH, 0);
114         neigh_release(neigh);
115 }
116
117 /*
118  * It is random distribution in the interval (1/2)*base...(3/2)*base.
119  * It corresponds to default IPv6 settings and is not overridable,
120  * because it is really reasonable choice.
121  */
122
123 unsigned long neigh_rand_reach_time(unsigned long base)
124 {
125         return (base ? (net_random() % base) + (base >> 1) : 0);
126 }
127 EXPORT_SYMBOL(neigh_rand_reach_time);
128
129
130 static int neigh_forced_gc(struct neigh_table *tbl)
131 {
132         int shrunk = 0;
133         int i;
134
135         NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
136
137         write_lock_bh(&tbl->lock);
138         for (i = 0; i <= tbl->hash_mask; i++) {
139                 struct neighbour *n, **np;
140
141                 np = &tbl->hash_buckets[i];
142                 while ((n = *np) != NULL) {
143                         /* Neighbour record may be discarded if:
144                          * - nobody refers to it.
145                          * - it is not permanent
146                          */
147                         write_lock(&n->lock);
148                         if (atomic_read(&n->refcnt) == 1 &&
149                             !(n->nud_state & NUD_PERMANENT)) {
150                                 *np     = n->next;
151                                 n->dead = 1;
152                                 shrunk  = 1;
153                                 write_unlock(&n->lock);
154                                 neigh_cleanup_and_release(n);
155                                 continue;
156                         }
157                         write_unlock(&n->lock);
158                         np = &n->next;
159                 }
160         }
161
162         tbl->last_flush = jiffies;
163
164         write_unlock_bh(&tbl->lock);
165
166         return shrunk;
167 }
168
169 static void neigh_add_timer(struct neighbour *n, unsigned long when)
170 {
171         neigh_hold(n);
172         if (unlikely(mod_timer(&n->timer, when))) {
173                 printk("NEIGH: BUG, double timer add, state is %x\n",
174                        n->nud_state);
175                 dump_stack();
176         }
177 }
178
179 static int neigh_del_timer(struct neighbour *n)
180 {
181         if ((n->nud_state & NUD_IN_TIMER) &&
182             del_timer(&n->timer)) {
183                 neigh_release(n);
184                 return 1;
185         }
186         return 0;
187 }
188
189 static void pneigh_queue_purge(struct sk_buff_head *list)
190 {
191         struct sk_buff *skb;
192
193         while ((skb = skb_dequeue(list)) != NULL) {
194                 dev_put(skb->dev);
195                 kfree_skb(skb);
196         }
197 }
198
199 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
200 {
201         int i;
202
203         for (i = 0; i <= tbl->hash_mask; i++) {
204                 struct neighbour *n, **np = &tbl->hash_buckets[i];
205
206                 while ((n = *np) != NULL) {
207                         if (dev && n->dev != dev) {
208                                 np = &n->next;
209                                 continue;
210                         }
211                         *np = n->next;
212                         write_lock(&n->lock);
213                         neigh_del_timer(n);
214                         n->dead = 1;
215
216                         if (atomic_read(&n->refcnt) != 1) {
217                                 /* The most unpleasant situation.
218                                    We must destroy neighbour entry,
219                                    but someone still uses it.
220
221                                    The destroy will be delayed until
222                                    the last user releases us, but
223                                    we must kill timers etc. and move
224                                    it to safe state.
225                                  */
226                                 skb_queue_purge(&n->arp_queue);
227                                 n->output = neigh_blackhole;
228                                 if (n->nud_state & NUD_VALID)
229                                         n->nud_state = NUD_NOARP;
230                                 else
231                                         n->nud_state = NUD_NONE;
232                                 NEIGH_PRINTK2("neigh %p is stray.\n", n);
233                         }
234                         write_unlock(&n->lock);
235                         neigh_cleanup_and_release(n);
236                 }
237         }
238 }
239
240 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
241 {
242         write_lock_bh(&tbl->lock);
243         neigh_flush_dev(tbl, dev);
244         write_unlock_bh(&tbl->lock);
245 }
246 EXPORT_SYMBOL(neigh_changeaddr);
247
248 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
249 {
250         write_lock_bh(&tbl->lock);
251         neigh_flush_dev(tbl, dev);
252         pneigh_ifdown(tbl, dev);
253         write_unlock_bh(&tbl->lock);
254
255         del_timer_sync(&tbl->proxy_timer);
256         pneigh_queue_purge(&tbl->proxy_queue);
257         return 0;
258 }
259 EXPORT_SYMBOL(neigh_ifdown);
260
261 static struct neighbour *neigh_alloc(struct neigh_table *tbl)
262 {
263         struct neighbour *n = NULL;
264         unsigned long now = jiffies;
265         int entries;
266
267         entries = atomic_inc_return(&tbl->entries) - 1;
268         if (entries >= tbl->gc_thresh3 ||
269             (entries >= tbl->gc_thresh2 &&
270              time_after(now, tbl->last_flush + 5 * HZ))) {
271                 if (!neigh_forced_gc(tbl) &&
272                     entries >= tbl->gc_thresh3)
273                         goto out_entries;
274         }
275
276         n = kmem_cache_zalloc(tbl->kmem_cachep, GFP_ATOMIC);
277         if (!n)
278                 goto out_entries;
279
280         skb_queue_head_init(&n->arp_queue);
281         rwlock_init(&n->lock);
282         n->updated        = n->used = now;
283         n->nud_state      = NUD_NONE;
284         n->output         = neigh_blackhole;
285         n->parms          = neigh_parms_clone(&tbl->parms);
286         setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
287
288         NEIGH_CACHE_STAT_INC(tbl, allocs);
289         n->tbl            = tbl;
290         atomic_set(&n->refcnt, 1);
291         n->dead           = 1;
292 out:
293         return n;
294
295 out_entries:
296         atomic_dec(&tbl->entries);
297         goto out;
298 }
299
300 static struct neighbour **neigh_hash_alloc(unsigned int entries)
301 {
302         unsigned long size = entries * sizeof(struct neighbour *);
303         struct neighbour **ret;
304
305         if (size <= PAGE_SIZE) {
306                 ret = kzalloc(size, GFP_ATOMIC);
307         } else {
308                 ret = (struct neighbour **)
309                       __get_free_pages(GFP_ATOMIC|__GFP_ZERO, get_order(size));
310         }
311         return ret;
312 }
313
314 static void neigh_hash_free(struct neighbour **hash, unsigned int entries)
315 {
316         unsigned long size = entries * sizeof(struct neighbour *);
317
318         if (size <= PAGE_SIZE)
319                 kfree(hash);
320         else
321                 free_pages((unsigned long)hash, get_order(size));
322 }
323
324 static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries)
325 {
326         struct neighbour **new_hash, **old_hash;
327         unsigned int i, new_hash_mask, old_entries;
328
329         NEIGH_CACHE_STAT_INC(tbl, hash_grows);
330
331         BUG_ON(!is_power_of_2(new_entries));
332         new_hash = neigh_hash_alloc(new_entries);
333         if (!new_hash)
334                 return;
335
336         old_entries = tbl->hash_mask + 1;
337         new_hash_mask = new_entries - 1;
338         old_hash = tbl->hash_buckets;
339
340         get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
341         for (i = 0; i < old_entries; i++) {
342                 struct neighbour *n, *next;
343
344                 for (n = old_hash[i]; n; n = next) {
345                         unsigned int hash_val = tbl->hash(n->primary_key, n->dev);
346
347                         hash_val &= new_hash_mask;
348                         next = n->next;
349
350                         n->next = new_hash[hash_val];
351                         new_hash[hash_val] = n;
352                 }
353         }
354         tbl->hash_buckets = new_hash;
355         tbl->hash_mask = new_hash_mask;
356
357         neigh_hash_free(old_hash, old_entries);
358 }
359
360 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
361                                struct net_device *dev)
362 {
363         struct neighbour *n;
364         int key_len = tbl->key_len;
365         u32 hash_val;
366
367         NEIGH_CACHE_STAT_INC(tbl, lookups);
368
369         read_lock_bh(&tbl->lock);
370         hash_val = tbl->hash(pkey, dev);
371         for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
372                 if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
373                         neigh_hold(n);
374                         NEIGH_CACHE_STAT_INC(tbl, hits);
375                         break;
376                 }
377         }
378         read_unlock_bh(&tbl->lock);
379         return n;
380 }
381 EXPORT_SYMBOL(neigh_lookup);
382
383 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
384                                      const void *pkey)
385 {
386         struct neighbour *n;
387         int key_len = tbl->key_len;
388         u32 hash_val;
389
390         NEIGH_CACHE_STAT_INC(tbl, lookups);
391
392         read_lock_bh(&tbl->lock);
393         hash_val = tbl->hash(pkey, NULL);
394         for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
395                 if (!memcmp(n->primary_key, pkey, key_len) &&
396                     net_eq(dev_net(n->dev), net)) {
397                         neigh_hold(n);
398                         NEIGH_CACHE_STAT_INC(tbl, hits);
399                         break;
400                 }
401         }
402         read_unlock_bh(&tbl->lock);
403         return n;
404 }
405 EXPORT_SYMBOL(neigh_lookup_nodev);
406
407 struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
408                                struct net_device *dev)
409 {
410         u32 hash_val;
411         int key_len = tbl->key_len;
412         int error;
413         struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
414
415         if (!n) {
416                 rc = ERR_PTR(-ENOBUFS);
417                 goto out;
418         }
419
420         memcpy(n->primary_key, pkey, key_len);
421         n->dev = dev;
422         dev_hold(dev);
423
424         /* Protocol specific setup. */
425         if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
426                 rc = ERR_PTR(error);
427                 goto out_neigh_release;
428         }
429
430         /* Device specific setup. */
431         if (n->parms->neigh_setup &&
432             (error = n->parms->neigh_setup(n)) < 0) {
433                 rc = ERR_PTR(error);
434                 goto out_neigh_release;
435         }
436
437         n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
438
439         write_lock_bh(&tbl->lock);
440
441         if (atomic_read(&tbl->entries) > (tbl->hash_mask + 1))
442                 neigh_hash_grow(tbl, (tbl->hash_mask + 1) << 1);
443
444         hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
445
446         if (n->parms->dead) {
447                 rc = ERR_PTR(-EINVAL);
448                 goto out_tbl_unlock;
449         }
450
451         for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
452                 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
453                         neigh_hold(n1);
454                         rc = n1;
455                         goto out_tbl_unlock;
456                 }
457         }
458
459         n->next = tbl->hash_buckets[hash_val];
460         tbl->hash_buckets[hash_val] = n;
461         n->dead = 0;
462         neigh_hold(n);
463         write_unlock_bh(&tbl->lock);
464         NEIGH_PRINTK2("neigh %p is created.\n", n);
465         rc = n;
466 out:
467         return rc;
468 out_tbl_unlock:
469         write_unlock_bh(&tbl->lock);
470 out_neigh_release:
471         neigh_release(n);
472         goto out;
473 }
474 EXPORT_SYMBOL(neigh_create);
475
476 static u32 pneigh_hash(const void *pkey, int key_len)
477 {
478         u32 hash_val = *(u32 *)(pkey + key_len - 4);
479         hash_val ^= (hash_val >> 16);
480         hash_val ^= hash_val >> 8;
481         hash_val ^= hash_val >> 4;
482         hash_val &= PNEIGH_HASHMASK;
483         return hash_val;
484 }
485
486 static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
487                                               struct net *net,
488                                               const void *pkey,
489                                               int key_len,
490                                               struct net_device *dev)
491 {
492         while (n) {
493                 if (!memcmp(n->key, pkey, key_len) &&
494                     net_eq(pneigh_net(n), net) &&
495                     (n->dev == dev || !n->dev))
496                         return n;
497                 n = n->next;
498         }
499         return NULL;
500 }
501
502 struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
503                 struct net *net, const void *pkey, struct net_device *dev)
504 {
505         int key_len = tbl->key_len;
506         u32 hash_val = pneigh_hash(pkey, key_len);
507
508         return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
509                                  net, pkey, key_len, dev);
510 }
511 EXPORT_SYMBOL_GPL(__pneigh_lookup);
512
513 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
514                                     struct net *net, const void *pkey,
515                                     struct net_device *dev, int creat)
516 {
517         struct pneigh_entry *n;
518         int key_len = tbl->key_len;
519         u32 hash_val = pneigh_hash(pkey, key_len);
520
521         read_lock_bh(&tbl->lock);
522         n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
523                               net, pkey, key_len, dev);
524         read_unlock_bh(&tbl->lock);
525
526         if (n || !creat)
527                 goto out;
528
529         ASSERT_RTNL();
530
531         n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
532         if (!n)
533                 goto out;
534
535         write_pnet(&n->net, hold_net(net));
536         memcpy(n->key, pkey, key_len);
537         n->dev = dev;
538         if (dev)
539                 dev_hold(dev);
540
541         if (tbl->pconstructor && tbl->pconstructor(n)) {
542                 if (dev)
543                         dev_put(dev);
544                 release_net(net);
545                 kfree(n);
546                 n = NULL;
547                 goto out;
548         }
549
550         write_lock_bh(&tbl->lock);
551         n->next = tbl->phash_buckets[hash_val];
552         tbl->phash_buckets[hash_val] = n;
553         write_unlock_bh(&tbl->lock);
554 out:
555         return n;
556 }
557 EXPORT_SYMBOL(pneigh_lookup);
558
559
560 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
561                   struct net_device *dev)
562 {
563         struct pneigh_entry *n, **np;
564         int key_len = tbl->key_len;
565         u32 hash_val = pneigh_hash(pkey, key_len);
566
567         write_lock_bh(&tbl->lock);
568         for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
569              np = &n->next) {
570                 if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
571                     net_eq(pneigh_net(n), net)) {
572                         *np = n->next;
573                         write_unlock_bh(&tbl->lock);
574                         if (tbl->pdestructor)
575                                 tbl->pdestructor(n);
576                         if (n->dev)
577                                 dev_put(n->dev);
578                         release_net(pneigh_net(n));
579                         kfree(n);
580                         return 0;
581                 }
582         }
583         write_unlock_bh(&tbl->lock);
584         return -ENOENT;
585 }
586
587 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
588 {
589         struct pneigh_entry *n, **np;
590         u32 h;
591
592         for (h = 0; h <= PNEIGH_HASHMASK; h++) {
593                 np = &tbl->phash_buckets[h];
594                 while ((n = *np) != NULL) {
595                         if (!dev || n->dev == dev) {
596                                 *np = n->next;
597                                 if (tbl->pdestructor)
598                                         tbl->pdestructor(n);
599                                 if (n->dev)
600                                         dev_put(n->dev);
601                                 release_net(pneigh_net(n));
602                                 kfree(n);
603                                 continue;
604                         }
605                         np = &n->next;
606                 }
607         }
608         return -ENOENT;
609 }
610
611 static void neigh_parms_destroy(struct neigh_parms *parms);
612
613 static inline void neigh_parms_put(struct neigh_parms *parms)
614 {
615         if (atomic_dec_and_test(&parms->refcnt))
616                 neigh_parms_destroy(parms);
617 }
618
619 /*
620  *      neighbour must already be out of the table;
621  *
622  */
623 void neigh_destroy(struct neighbour *neigh)
624 {
625         struct hh_cache *hh;
626
627         NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
628
629         if (!neigh->dead) {
630                 printk(KERN_WARNING
631                        "Destroying alive neighbour %p\n", neigh);
632                 dump_stack();
633                 return;
634         }
635
636         if (neigh_del_timer(neigh))
637                 printk(KERN_WARNING "Impossible event.\n");
638
639         while ((hh = neigh->hh) != NULL) {
640                 neigh->hh = hh->hh_next;
641                 hh->hh_next = NULL;
642
643                 write_seqlock_bh(&hh->hh_lock);
644                 hh->hh_output = neigh_blackhole;
645                 write_sequnlock_bh(&hh->hh_lock);
646                 if (atomic_dec_and_test(&hh->hh_refcnt))
647                         kfree(hh);
648         }
649
650         skb_queue_purge(&neigh->arp_queue);
651
652         dev_put(neigh->dev);
653         neigh_parms_put(neigh->parms);
654
655         NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
656
657         atomic_dec(&neigh->tbl->entries);
658         kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
659 }
660 EXPORT_SYMBOL(neigh_destroy);
661
662 /* Neighbour state is suspicious;
663    disable fast path.
664
665    Called with write_locked neigh.
666  */
667 static void neigh_suspect(struct neighbour *neigh)
668 {
669         struct hh_cache *hh;
670
671         NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
672
673         neigh->output = neigh->ops->output;
674
675         for (hh = neigh->hh; hh; hh = hh->hh_next)
676                 hh->hh_output = neigh->ops->output;
677 }
678
679 /* Neighbour state is OK;
680    enable fast path.
681
682    Called with write_locked neigh.
683  */
684 static void neigh_connect(struct neighbour *neigh)
685 {
686         struct hh_cache *hh;
687
688         NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
689
690         neigh->output = neigh->ops->connected_output;
691
692         for (hh = neigh->hh; hh; hh = hh->hh_next)
693                 hh->hh_output = neigh->ops->hh_output;
694 }
695
696 static void neigh_periodic_work(struct work_struct *work)
697 {
698         struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
699         struct neighbour *n, **np;
700         unsigned int i;
701
702         NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
703
704         write_lock_bh(&tbl->lock);
705
706         /*
707          *      periodically recompute ReachableTime from random function
708          */
709
710         if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
711                 struct neigh_parms *p;
712                 tbl->last_rand = jiffies;
713                 for (p = &tbl->parms; p; p = p->next)
714                         p->reachable_time =
715                                 neigh_rand_reach_time(p->base_reachable_time);
716         }
717
718         for (i = 0 ; i <= tbl->hash_mask; i++) {
719                 np = &tbl->hash_buckets[i];
720
721                 while ((n = *np) != NULL) {
722                         unsigned int state;
723
724                         write_lock(&n->lock);
725
726                         state = n->nud_state;
727                         if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
728                                 write_unlock(&n->lock);
729                                 goto next_elt;
730                         }
731
732                         if (time_before(n->used, n->confirmed))
733                                 n->used = n->confirmed;
734
735                         if (atomic_read(&n->refcnt) == 1 &&
736                             (state == NUD_FAILED ||
737                              time_after(jiffies, n->used + n->parms->gc_staletime))) {
738                                 *np = n->next;
739                                 n->dead = 1;
740                                 write_unlock(&n->lock);
741                                 neigh_cleanup_and_release(n);
742                                 continue;
743                         }
744                         write_unlock(&n->lock);
745
746 next_elt:
747                         np = &n->next;
748                 }
749                 /*
750                  * It's fine to release lock here, even if hash table
751                  * grows while we are preempted.
752                  */
753                 write_unlock_bh(&tbl->lock);
754                 cond_resched();
755                 write_lock_bh(&tbl->lock);
756         }
757         /* Cycle through all hash buckets every base_reachable_time/2 ticks.
758          * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
759          * base_reachable_time.
760          */
761         schedule_delayed_work(&tbl->gc_work,
762                               tbl->parms.base_reachable_time >> 1);
763         write_unlock_bh(&tbl->lock);
764 }
765
766 static __inline__ int neigh_max_probes(struct neighbour *n)
767 {
768         struct neigh_parms *p = n->parms;
769         return (n->nud_state & NUD_PROBE ?
770                 p->ucast_probes :
771                 p->ucast_probes + p->app_probes + p->mcast_probes);
772 }
773
774 static void neigh_invalidate(struct neighbour *neigh)
775         __releases(neigh->lock)
776         __acquires(neigh->lock)
777 {
778         struct sk_buff *skb;
779
780         NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
781         NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
782         neigh->updated = jiffies;
783
784         /* It is very thin place. report_unreachable is very complicated
785            routine. Particularly, it can hit the same neighbour entry!
786
787            So that, we try to be accurate and avoid dead loop. --ANK
788          */
789         while (neigh->nud_state == NUD_FAILED &&
790                (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
791                 write_unlock(&neigh->lock);
792                 neigh->ops->error_report(neigh, skb);
793                 write_lock(&neigh->lock);
794         }
795         skb_queue_purge(&neigh->arp_queue);
796 }
797
798 /* Called when a timer expires for a neighbour entry. */
799
800 static void neigh_timer_handler(unsigned long arg)
801 {
802         unsigned long now, next;
803         struct neighbour *neigh = (struct neighbour *)arg;
804         unsigned state;
805         int notify = 0;
806
807         write_lock(&neigh->lock);
808
809         state = neigh->nud_state;
810         now = jiffies;
811         next = now + HZ;
812
813         if (!(state & NUD_IN_TIMER)) {
814 #ifndef CONFIG_SMP
815                 printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");
816 #endif
817                 goto out;
818         }
819
820         if (state & NUD_REACHABLE) {
821                 if (time_before_eq(now,
822                                    neigh->confirmed + neigh->parms->reachable_time)) {
823                         NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
824                         next = neigh->confirmed + neigh->parms->reachable_time;
825                 } else if (time_before_eq(now,
826                                           neigh->used + neigh->parms->delay_probe_time)) {
827                         NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
828                         neigh->nud_state = NUD_DELAY;
829                         neigh->updated = jiffies;
830                         neigh_suspect(neigh);
831                         next = now + neigh->parms->delay_probe_time;
832                 } else {
833                         NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
834                         neigh->nud_state = NUD_STALE;
835                         neigh->updated = jiffies;
836                         neigh_suspect(neigh);
837                         notify = 1;
838                 }
839         } else if (state & NUD_DELAY) {
840                 if (time_before_eq(now,
841                                    neigh->confirmed + neigh->parms->delay_probe_time)) {
842                         NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
843                         neigh->nud_state = NUD_REACHABLE;
844                         neigh->updated = jiffies;
845                         neigh_connect(neigh);
846                         notify = 1;
847                         next = neigh->confirmed + neigh->parms->reachable_time;
848                 } else {
849                         NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
850                         neigh->nud_state = NUD_PROBE;
851                         neigh->updated = jiffies;
852                         atomic_set(&neigh->probes, 0);
853                         next = now + neigh->parms->retrans_time;
854                 }
855         } else {
856                 /* NUD_PROBE|NUD_INCOMPLETE */
857                 next = now + neigh->parms->retrans_time;
858         }
859
860         if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
861             atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
862                 neigh->nud_state = NUD_FAILED;
863                 notify = 1;
864                 neigh_invalidate(neigh);
865         }
866
867         if (neigh->nud_state & NUD_IN_TIMER) {
868                 if (time_before(next, jiffies + HZ/2))
869                         next = jiffies + HZ/2;
870                 if (!mod_timer(&neigh->timer, next))
871                         neigh_hold(neigh);
872         }
873         if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
874                 struct sk_buff *skb = skb_peek(&neigh->arp_queue);
875                 /* keep skb alive even if arp_queue overflows */
876                 if (skb)
877                         skb = skb_copy(skb, GFP_ATOMIC);
878                 write_unlock(&neigh->lock);
879                 neigh->ops->solicit(neigh, skb);
880                 atomic_inc(&neigh->probes);
881                 kfree_skb(skb);
882         } else {
883 out:
884                 write_unlock(&neigh->lock);
885         }
886
887         if (notify)
888                 neigh_update_notify(neigh);
889
890         neigh_release(neigh);
891 }
892
893 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
894 {
895         int rc;
896         unsigned long now;
897
898         write_lock_bh(&neigh->lock);
899
900         rc = 0;
901         if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
902                 goto out_unlock_bh;
903
904         now = jiffies;
905
906         if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
907                 if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
908                         atomic_set(&neigh->probes, neigh->parms->ucast_probes);
909                         neigh->nud_state     = NUD_INCOMPLETE;
910                         neigh->updated = jiffies;
911                         neigh_add_timer(neigh, now + 1);
912                 } else {
913                         neigh->nud_state = NUD_FAILED;
914                         neigh->updated = jiffies;
915                         write_unlock_bh(&neigh->lock);
916
917                         kfree_skb(skb);
918                         return 1;
919                 }
920         } else if (neigh->nud_state & NUD_STALE) {
921                 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
922                 neigh->nud_state = NUD_DELAY;
923                 neigh->updated = jiffies;
924                 neigh_add_timer(neigh,
925                                 jiffies + neigh->parms->delay_probe_time);
926         }
927
928         if (neigh->nud_state == NUD_INCOMPLETE) {
929                 if (skb) {
930                         if (skb_queue_len(&neigh->arp_queue) >=
931                             neigh->parms->queue_len) {
932                                 struct sk_buff *buff;
933                                 buff = __skb_dequeue(&neigh->arp_queue);
934                                 kfree_skb(buff);
935                                 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
936                         }
937                         skb_dst_force(skb);
938                         __skb_queue_tail(&neigh->arp_queue, skb);
939                 }
940                 rc = 1;
941         }
942 out_unlock_bh:
943         write_unlock_bh(&neigh->lock);
944         return rc;
945 }
946 EXPORT_SYMBOL(__neigh_event_send);
947
948 static void neigh_update_hhs(struct neighbour *neigh)
949 {
950         struct hh_cache *hh;
951         void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
952                 = NULL;
953
954         if (neigh->dev->header_ops)
955                 update = neigh->dev->header_ops->cache_update;
956
957         if (update) {
958                 for (hh = neigh->hh; hh; hh = hh->hh_next) {
959                         write_seqlock_bh(&hh->hh_lock);
960                         update(hh, neigh->dev, neigh->ha);
961                         write_sequnlock_bh(&hh->hh_lock);
962                 }
963         }
964 }
965
966
967
968 /* Generic update routine.
969    -- lladdr is new lladdr or NULL, if it is not supplied.
970    -- new    is new state.
971    -- flags
972         NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
973                                 if it is different.
974         NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
975                                 lladdr instead of overriding it
976                                 if it is different.
977                                 It also allows to retain current state
978                                 if lladdr is unchanged.
979         NEIGH_UPDATE_F_ADMIN    means that the change is administrative.
980
981         NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
982                                 NTF_ROUTER flag.
983         NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
984                                 a router.
985
986    Caller MUST hold reference count on the entry.
987  */
988
989 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
990                  u32 flags)
991 {
992         u8 old;
993         int err;
994         int notify = 0;
995         struct net_device *dev;
996         int update_isrouter = 0;
997
998         write_lock_bh(&neigh->lock);
999
1000         dev    = neigh->dev;
1001         old    = neigh->nud_state;
1002         err    = -EPERM;
1003
1004         if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1005             (old & (NUD_NOARP | NUD_PERMANENT)))
1006                 goto out;
1007
1008         if (!(new & NUD_VALID)) {
1009                 neigh_del_timer(neigh);
1010                 if (old & NUD_CONNECTED)
1011                         neigh_suspect(neigh);
1012                 neigh->nud_state = new;
1013                 err = 0;
1014                 notify = old & NUD_VALID;
1015                 if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1016                     (new & NUD_FAILED)) {
1017                         neigh_invalidate(neigh);
1018                         notify = 1;
1019                 }
1020                 goto out;
1021         }
1022
1023         /* Compare new lladdr with cached one */
1024         if (!dev->addr_len) {
1025                 /* First case: device needs no address. */
1026                 lladdr = neigh->ha;
1027         } else if (lladdr) {
1028                 /* The second case: if something is already cached
1029                    and a new address is proposed:
1030                    - compare new & old
1031                    - if they are different, check override flag
1032                  */
1033                 if ((old & NUD_VALID) &&
1034                     !memcmp(lladdr, neigh->ha, dev->addr_len))
1035                         lladdr = neigh->ha;
1036         } else {
1037                 /* No address is supplied; if we know something,
1038                    use it, otherwise discard the request.
1039                  */
1040                 err = -EINVAL;
1041                 if (!(old & NUD_VALID))
1042                         goto out;
1043                 lladdr = neigh->ha;
1044         }
1045
1046         if (new & NUD_CONNECTED)
1047                 neigh->confirmed = jiffies;
1048         neigh->updated = jiffies;
1049
1050         /* If entry was valid and address is not changed,
1051            do not change entry state, if new one is STALE.
1052          */
1053         err = 0;
1054         update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1055         if (old & NUD_VALID) {
1056                 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1057                         update_isrouter = 0;
1058                         if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1059                             (old & NUD_CONNECTED)) {
1060                                 lladdr = neigh->ha;
1061                                 new = NUD_STALE;
1062                         } else
1063                                 goto out;
1064                 } else {
1065                         if (lladdr == neigh->ha && new == NUD_STALE &&
1066                             ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1067                              (old & NUD_CONNECTED))
1068                             )
1069                                 new = old;
1070                 }
1071         }
1072
1073         if (new != old) {
1074                 neigh_del_timer(neigh);
1075                 if (new & NUD_IN_TIMER)
1076                         neigh_add_timer(neigh, (jiffies +
1077                                                 ((new & NUD_REACHABLE) ?
1078                                                  neigh->parms->reachable_time :
1079                                                  0)));
1080                 neigh->nud_state = new;
1081         }
1082
1083         if (lladdr != neigh->ha) {
1084                 memcpy(&neigh->ha, lladdr, dev->addr_len);
1085                 neigh_update_hhs(neigh);
1086                 if (!(new & NUD_CONNECTED))
1087                         neigh->confirmed = jiffies -
1088                                       (neigh->parms->base_reachable_time << 1);
1089                 notify = 1;
1090         }
1091         if (new == old)
1092                 goto out;
1093         if (new & NUD_CONNECTED)
1094                 neigh_connect(neigh);
1095         else
1096                 neigh_suspect(neigh);
1097         if (!(old & NUD_VALID)) {
1098                 struct sk_buff *skb;
1099
1100                 /* Again: avoid dead loop if something went wrong */
1101
1102                 while (neigh->nud_state & NUD_VALID &&
1103                        (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1104                         struct neighbour *n1 = neigh;
1105                         write_unlock_bh(&neigh->lock);
1106                         /* On shaper/eql skb->dst->neighbour != neigh :( */
1107                         if (skb_dst(skb) && skb_dst(skb)->neighbour)
1108                                 n1 = skb_dst(skb)->neighbour;
1109                         n1->output(skb);
1110                         write_lock_bh(&neigh->lock);
1111                 }
1112                 skb_queue_purge(&neigh->arp_queue);
1113         }
1114 out:
1115         if (update_isrouter) {
1116                 neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1117                         (neigh->flags | NTF_ROUTER) :
1118                         (neigh->flags & ~NTF_ROUTER);
1119         }
1120         write_unlock_bh(&neigh->lock);
1121
1122         if (notify)
1123                 neigh_update_notify(neigh);
1124
1125         return err;
1126 }
1127 EXPORT_SYMBOL(neigh_update);
1128
1129 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1130                                  u8 *lladdr, void *saddr,
1131                                  struct net_device *dev)
1132 {
1133         struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1134                                                  lladdr || !dev->addr_len);
1135         if (neigh)
1136                 neigh_update(neigh, lladdr, NUD_STALE,
1137                              NEIGH_UPDATE_F_OVERRIDE);
1138         return neigh;
1139 }
1140 EXPORT_SYMBOL(neigh_event_ns);
1141
1142 static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
1143                           __be16 protocol)
1144 {
1145         struct hh_cache *hh;
1146         struct net_device *dev = dst->dev;
1147
1148         for (hh = n->hh; hh; hh = hh->hh_next)
1149                 if (hh->hh_type == protocol)
1150                         break;
1151
1152         if (!hh && (hh = kzalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
1153                 seqlock_init(&hh->hh_lock);
1154                 hh->hh_type = protocol;
1155                 atomic_set(&hh->hh_refcnt, 0);
1156                 hh->hh_next = NULL;
1157
1158                 if (dev->header_ops->cache(n, hh)) {
1159                         kfree(hh);
1160                         hh = NULL;
1161                 } else {
1162                         atomic_inc(&hh->hh_refcnt);
1163                         hh->hh_next = n->hh;
1164                         n->hh       = hh;
1165                         if (n->nud_state & NUD_CONNECTED)
1166                                 hh->hh_output = n->ops->hh_output;
1167                         else
1168                                 hh->hh_output = n->ops->output;
1169                 }
1170         }
1171         if (hh) {
1172                 atomic_inc(&hh->hh_refcnt);
1173                 dst->hh = hh;
1174         }
1175 }
1176
1177 /* This function can be used in contexts, where only old dev_queue_xmit
1178    worked, f.e. if you want to override normal output path (eql, shaper),
1179    but resolution is not made yet.
1180  */
1181
1182 int neigh_compat_output(struct sk_buff *skb)
1183 {
1184         struct net_device *dev = skb->dev;
1185
1186         __skb_pull(skb, skb_network_offset(skb));
1187
1188         if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1189                             skb->len) < 0 &&
1190             dev->header_ops->rebuild(skb))
1191                 return 0;
1192
1193         return dev_queue_xmit(skb);
1194 }
1195 EXPORT_SYMBOL(neigh_compat_output);
1196
1197 /* Slow and careful. */
1198
1199 int neigh_resolve_output(struct sk_buff *skb)
1200 {
1201         struct dst_entry *dst = skb_dst(skb);
1202         struct neighbour *neigh;
1203         int rc = 0;
1204
1205         if (!dst || !(neigh = dst->neighbour))
1206                 goto discard;
1207
1208         __skb_pull(skb, skb_network_offset(skb));
1209
1210         if (!neigh_event_send(neigh, skb)) {
1211                 int err;
1212                 struct net_device *dev = neigh->dev;
1213                 if (dev->header_ops->cache && !dst->hh) {
1214                         write_lock_bh(&neigh->lock);
1215                         if (!dst->hh)
1216                                 neigh_hh_init(neigh, dst, dst->ops->protocol);
1217                         err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1218                                               neigh->ha, NULL, skb->len);
1219                         write_unlock_bh(&neigh->lock);
1220                 } else {
1221                         read_lock_bh(&neigh->lock);
1222                         err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1223                                               neigh->ha, NULL, skb->len);
1224                         read_unlock_bh(&neigh->lock);
1225                 }
1226                 if (err >= 0)
1227                         rc = neigh->ops->queue_xmit(skb);
1228                 else
1229                         goto out_kfree_skb;
1230         }
1231 out:
1232         return rc;
1233 discard:
1234         NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1235                       dst, dst ? dst->neighbour : NULL);
1236 out_kfree_skb:
1237         rc = -EINVAL;
1238         kfree_skb(skb);
1239         goto out;
1240 }
1241 EXPORT_SYMBOL(neigh_resolve_output);
1242
1243 /* As fast as possible without hh cache */
1244
1245 int neigh_connected_output(struct sk_buff *skb)
1246 {
1247         int err;
1248         struct dst_entry *dst = skb_dst(skb);
1249         struct neighbour *neigh = dst->neighbour;
1250         struct net_device *dev = neigh->dev;
1251
1252         __skb_pull(skb, skb_network_offset(skb));
1253
1254         read_lock_bh(&neigh->lock);
1255         err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1256                               neigh->ha, NULL, skb->len);
1257         read_unlock_bh(&neigh->lock);
1258         if (err >= 0)
1259                 err = neigh->ops->queue_xmit(skb);
1260         else {
1261                 err = -EINVAL;
1262                 kfree_skb(skb);
1263         }
1264         return err;
1265 }
1266 EXPORT_SYMBOL(neigh_connected_output);
1267
1268 static void neigh_proxy_process(unsigned long arg)
1269 {
1270         struct neigh_table *tbl = (struct neigh_table *)arg;
1271         long sched_next = 0;
1272         unsigned long now = jiffies;
1273         struct sk_buff *skb, *n;
1274
1275         spin_lock(&tbl->proxy_queue.lock);
1276
1277         skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1278                 long tdif = NEIGH_CB(skb)->sched_next - now;
1279
1280                 if (tdif <= 0) {
1281                         struct net_device *dev = skb->dev;
1282                         __skb_unlink(skb, &tbl->proxy_queue);
1283                         if (tbl->proxy_redo && netif_running(dev))
1284                                 tbl->proxy_redo(skb);
1285                         else
1286                                 kfree_skb(skb);
1287
1288                         dev_put(dev);
1289                 } else if (!sched_next || tdif < sched_next)
1290                         sched_next = tdif;
1291         }
1292         del_timer(&tbl->proxy_timer);
1293         if (sched_next)
1294                 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1295         spin_unlock(&tbl->proxy_queue.lock);
1296 }
1297
1298 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1299                     struct sk_buff *skb)
1300 {
1301         unsigned long now = jiffies;
1302         unsigned long sched_next = now + (net_random() % p->proxy_delay);
1303
1304         if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1305                 kfree_skb(skb);
1306                 return;
1307         }
1308
1309         NEIGH_CB(skb)->sched_next = sched_next;
1310         NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1311
1312         spin_lock(&tbl->proxy_queue.lock);
1313         if (del_timer(&tbl->proxy_timer)) {
1314                 if (time_before(tbl->proxy_timer.expires, sched_next))
1315                         sched_next = tbl->proxy_timer.expires;
1316         }
1317         skb_dst_drop(skb);
1318         dev_hold(skb->dev);
1319         __skb_queue_tail(&tbl->proxy_queue, skb);
1320         mod_timer(&tbl->proxy_timer, sched_next);
1321         spin_unlock(&tbl->proxy_queue.lock);
1322 }
1323 EXPORT_SYMBOL(pneigh_enqueue);
1324
1325 static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1326                                                       struct net *net, int ifindex)
1327 {
1328         struct neigh_parms *p;
1329
1330         for (p = &tbl->parms; p; p = p->next) {
1331                 if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1332                     (!p->dev && !ifindex))
1333                         return p;
1334         }
1335
1336         return NULL;
1337 }
1338
1339 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1340                                       struct neigh_table *tbl)
1341 {
1342         struct neigh_parms *p, *ref;
1343         struct net *net = dev_net(dev);
1344         const struct net_device_ops *ops = dev->netdev_ops;
1345
1346         ref = lookup_neigh_parms(tbl, net, 0);
1347         if (!ref)
1348                 return NULL;
1349
1350         p = kmemdup(ref, sizeof(*p), GFP_KERNEL);
1351         if (p) {
1352                 p->tbl            = tbl;
1353                 atomic_set(&p->refcnt, 1);
1354                 p->reachable_time =
1355                                 neigh_rand_reach_time(p->base_reachable_time);
1356
1357                 if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1358                         kfree(p);
1359                         return NULL;
1360                 }
1361
1362                 dev_hold(dev);
1363                 p->dev = dev;
1364                 write_pnet(&p->net, hold_net(net));
1365                 p->sysctl_table = NULL;
1366                 write_lock_bh(&tbl->lock);
1367                 p->next         = tbl->parms.next;
1368                 tbl->parms.next = p;
1369                 write_unlock_bh(&tbl->lock);
1370         }
1371         return p;
1372 }
1373 EXPORT_SYMBOL(neigh_parms_alloc);
1374
1375 static void neigh_rcu_free_parms(struct rcu_head *head)
1376 {
1377         struct neigh_parms *parms =
1378                 container_of(head, struct neigh_parms, rcu_head);
1379
1380         neigh_parms_put(parms);
1381 }
1382
1383 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1384 {
1385         struct neigh_parms **p;
1386
1387         if (!parms || parms == &tbl->parms)
1388                 return;
1389         write_lock_bh(&tbl->lock);
1390         for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1391                 if (*p == parms) {
1392                         *p = parms->next;
1393                         parms->dead = 1;
1394                         write_unlock_bh(&tbl->lock);
1395                         if (parms->dev)
1396                                 dev_put(parms->dev);
1397                         call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1398                         return;
1399                 }
1400         }
1401         write_unlock_bh(&tbl->lock);
1402         NEIGH_PRINTK1("neigh_parms_release: not found\n");
1403 }
1404 EXPORT_SYMBOL(neigh_parms_release);
1405
1406 static void neigh_parms_destroy(struct neigh_parms *parms)
1407 {
1408         release_net(neigh_parms_net(parms));
1409         kfree(parms);
1410 }
1411
1412 static struct lock_class_key neigh_table_proxy_queue_class;
1413
1414 void neigh_table_init_no_netlink(struct neigh_table *tbl)
1415 {
1416         unsigned long now = jiffies;
1417         unsigned long phsize;
1418
1419         write_pnet(&tbl->parms.net, &init_net);
1420         atomic_set(&tbl->parms.refcnt, 1);
1421         tbl->parms.reachable_time =
1422                           neigh_rand_reach_time(tbl->parms.base_reachable_time);
1423
1424         if (!tbl->kmem_cachep)
1425                 tbl->kmem_cachep =
1426                         kmem_cache_create(tbl->id, tbl->entry_size, 0,
1427                                           SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1428                                           NULL);
1429         tbl->stats = alloc_percpu(struct neigh_statistics);
1430         if (!tbl->stats)
1431                 panic("cannot create neighbour cache statistics");
1432
1433 #ifdef CONFIG_PROC_FS
1434         if (!proc_create_data(tbl->id, 0, init_net.proc_net_stat,
1435                               &neigh_stat_seq_fops, tbl))
1436                 panic("cannot create neighbour proc dir entry");
1437 #endif
1438
1439         tbl->hash_mask = 1;
1440         tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
1441
1442         phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1443         tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1444
1445         if (!tbl->hash_buckets || !tbl->phash_buckets)
1446                 panic("cannot allocate neighbour cache hashes");
1447
1448         get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
1449
1450         rwlock_init(&tbl->lock);
1451         INIT_DELAYED_WORK_DEFERRABLE(&tbl->gc_work, neigh_periodic_work);
1452         schedule_delayed_work(&tbl->gc_work, tbl->parms.reachable_time);
1453         setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
1454         skb_queue_head_init_class(&tbl->proxy_queue,
1455                         &neigh_table_proxy_queue_class);
1456
1457         tbl->last_flush = now;
1458         tbl->last_rand  = now + tbl->parms.reachable_time * 20;
1459 }
1460 EXPORT_SYMBOL(neigh_table_init_no_netlink);
1461
1462 void neigh_table_init(struct neigh_table *tbl)
1463 {
1464         struct neigh_table *tmp;
1465
1466         neigh_table_init_no_netlink(tbl);
1467         write_lock(&neigh_tbl_lock);
1468         for (tmp = neigh_tables; tmp; tmp = tmp->next) {
1469                 if (tmp->family == tbl->family)
1470                         break;
1471         }
1472         tbl->next       = neigh_tables;
1473         neigh_tables    = tbl;
1474         write_unlock(&neigh_tbl_lock);
1475
1476         if (unlikely(tmp)) {
1477                 printk(KERN_ERR "NEIGH: Registering multiple tables for "
1478                        "family %d\n", tbl->family);
1479                 dump_stack();
1480         }
1481 }
1482 EXPORT_SYMBOL(neigh_table_init);
1483
1484 int neigh_table_clear(struct neigh_table *tbl)
1485 {
1486         struct neigh_table **tp;
1487
1488         /* It is not clean... Fix it to unload IPv6 module safely */
1489         cancel_delayed_work(&tbl->gc_work);
1490         flush_scheduled_work();
1491         del_timer_sync(&tbl->proxy_timer);
1492         pneigh_queue_purge(&tbl->proxy_queue);
1493         neigh_ifdown(tbl, NULL);
1494         if (atomic_read(&tbl->entries))
1495                 printk(KERN_CRIT "neighbour leakage\n");
1496         write_lock(&neigh_tbl_lock);
1497         for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1498                 if (*tp == tbl) {
1499                         *tp = tbl->next;
1500                         break;
1501                 }
1502         }
1503         write_unlock(&neigh_tbl_lock);
1504
1505         neigh_hash_free(tbl->hash_buckets, tbl->hash_mask + 1);
1506         tbl->hash_buckets = NULL;
1507
1508         kfree(tbl->phash_buckets);
1509         tbl->phash_buckets = NULL;
1510
1511         remove_proc_entry(tbl->id, init_net.proc_net_stat);
1512
1513         free_percpu(tbl->stats);
1514         tbl->stats = NULL;
1515
1516         kmem_cache_destroy(tbl->kmem_cachep);
1517         tbl->kmem_cachep = NULL;
1518
1519         return 0;
1520 }
1521 EXPORT_SYMBOL(neigh_table_clear);
1522
1523 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1524 {
1525         struct net *net = sock_net(skb->sk);
1526         struct ndmsg *ndm;
1527         struct nlattr *dst_attr;
1528         struct neigh_table *tbl;
1529         struct net_device *dev = NULL;
1530         int err = -EINVAL;
1531
1532         if (nlmsg_len(nlh) < sizeof(*ndm))
1533                 goto out;
1534
1535         dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1536         if (dst_attr == NULL)
1537                 goto out;
1538
1539         ndm = nlmsg_data(nlh);
1540         if (ndm->ndm_ifindex) {
1541                 dev = dev_get_by_index(net, ndm->ndm_ifindex);
1542                 if (dev == NULL) {
1543                         err = -ENODEV;
1544                         goto out;
1545                 }
1546         }
1547
1548         read_lock(&neigh_tbl_lock);
1549         for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1550                 struct neighbour *neigh;
1551
1552                 if (tbl->family != ndm->ndm_family)
1553                         continue;
1554                 read_unlock(&neigh_tbl_lock);
1555
1556                 if (nla_len(dst_attr) < tbl->key_len)
1557                         goto out_dev_put;
1558
1559                 if (ndm->ndm_flags & NTF_PROXY) {
1560                         err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1561                         goto out_dev_put;
1562                 }
1563
1564                 if (dev == NULL)
1565                         goto out_dev_put;
1566
1567                 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1568                 if (neigh == NULL) {
1569                         err = -ENOENT;
1570                         goto out_dev_put;
1571                 }
1572
1573                 err = neigh_update(neigh, NULL, NUD_FAILED,
1574                                    NEIGH_UPDATE_F_OVERRIDE |
1575                                    NEIGH_UPDATE_F_ADMIN);
1576                 neigh_release(neigh);
1577                 goto out_dev_put;
1578         }
1579         read_unlock(&neigh_tbl_lock);
1580         err = -EAFNOSUPPORT;
1581
1582 out_dev_put:
1583         if (dev)
1584                 dev_put(dev);
1585 out:
1586         return err;
1587 }
1588
1589 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1590 {
1591         struct net *net = sock_net(skb->sk);
1592         struct ndmsg *ndm;
1593         struct nlattr *tb[NDA_MAX+1];
1594         struct neigh_table *tbl;
1595         struct net_device *dev = NULL;
1596         int err;
1597
1598         err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
1599         if (err < 0)
1600                 goto out;
1601
1602         err = -EINVAL;
1603         if (tb[NDA_DST] == NULL)
1604                 goto out;
1605
1606         ndm = nlmsg_data(nlh);
1607         if (ndm->ndm_ifindex) {
1608                 dev = dev_get_by_index(net, ndm->ndm_ifindex);
1609                 if (dev == NULL) {
1610                         err = -ENODEV;
1611                         goto out;
1612                 }
1613
1614                 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
1615                         goto out_dev_put;
1616         }
1617
1618         read_lock(&neigh_tbl_lock);
1619         for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1620                 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
1621                 struct neighbour *neigh;
1622                 void *dst, *lladdr;
1623
1624                 if (tbl->family != ndm->ndm_family)
1625                         continue;
1626                 read_unlock(&neigh_tbl_lock);
1627
1628                 if (nla_len(tb[NDA_DST]) < tbl->key_len)
1629                         goto out_dev_put;
1630                 dst = nla_data(tb[NDA_DST]);
1631                 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1632
1633                 if (ndm->ndm_flags & NTF_PROXY) {
1634                         struct pneigh_entry *pn;
1635
1636                         err = -ENOBUFS;
1637                         pn = pneigh_lookup(tbl, net, dst, dev, 1);
1638                         if (pn) {
1639                                 pn->flags = ndm->ndm_flags;
1640                                 err = 0;
1641                         }
1642                         goto out_dev_put;
1643                 }
1644
1645                 if (dev == NULL)
1646                         goto out_dev_put;
1647
1648                 neigh = neigh_lookup(tbl, dst, dev);
1649                 if (neigh == NULL) {
1650                         if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1651                                 err = -ENOENT;
1652                                 goto out_dev_put;
1653                         }
1654
1655                         neigh = __neigh_lookup_errno(tbl, dst, dev);
1656                         if (IS_ERR(neigh)) {
1657                                 err = PTR_ERR(neigh);
1658                                 goto out_dev_put;
1659                         }
1660                 } else {
1661                         if (nlh->nlmsg_flags & NLM_F_EXCL) {
1662                                 err = -EEXIST;
1663                                 neigh_release(neigh);
1664                                 goto out_dev_put;
1665                         }
1666
1667                         if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1668                                 flags &= ~NEIGH_UPDATE_F_OVERRIDE;
1669                 }
1670
1671                 if (ndm->ndm_flags & NTF_USE) {
1672                         neigh_event_send(neigh, NULL);
1673                         err = 0;
1674                 } else
1675                         err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
1676                 neigh_release(neigh);
1677                 goto out_dev_put;
1678         }
1679
1680         read_unlock(&neigh_tbl_lock);
1681         err = -EAFNOSUPPORT;
1682
1683 out_dev_put:
1684         if (dev)
1685                 dev_put(dev);
1686 out:
1687         return err;
1688 }
1689
1690 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1691 {
1692         struct nlattr *nest;
1693
1694         nest = nla_nest_start(skb, NDTA_PARMS);
1695         if (nest == NULL)
1696                 return -ENOBUFS;
1697
1698         if (parms->dev)
1699                 NLA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
1700
1701         NLA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
1702         NLA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
1703         NLA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
1704         NLA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
1705         NLA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
1706         NLA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
1707         NLA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
1708         NLA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
1709                       parms->base_reachable_time);
1710         NLA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
1711         NLA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
1712         NLA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
1713         NLA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
1714         NLA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
1715         NLA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
1716
1717         return nla_nest_end(skb, nest);
1718
1719 nla_put_failure:
1720         nla_nest_cancel(skb, nest);
1721         return -EMSGSIZE;
1722 }
1723
1724 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1725                               u32 pid, u32 seq, int type, int flags)
1726 {
1727         struct nlmsghdr *nlh;
1728         struct ndtmsg *ndtmsg;
1729
1730         nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1731         if (nlh == NULL)
1732                 return -EMSGSIZE;
1733
1734         ndtmsg = nlmsg_data(nlh);
1735
1736         read_lock_bh(&tbl->lock);
1737         ndtmsg->ndtm_family = tbl->family;
1738         ndtmsg->ndtm_pad1   = 0;
1739         ndtmsg->ndtm_pad2   = 0;
1740
1741         NLA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1742         NLA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
1743         NLA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
1744         NLA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
1745         NLA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
1746
1747         {
1748                 unsigned long now = jiffies;
1749                 unsigned int flush_delta = now - tbl->last_flush;
1750                 unsigned int rand_delta = now - tbl->last_rand;
1751
1752                 struct ndt_config ndc = {
1753                         .ndtc_key_len           = tbl->key_len,
1754                         .ndtc_entry_size        = tbl->entry_size,
1755                         .ndtc_entries           = atomic_read(&tbl->entries),
1756                         .ndtc_last_flush        = jiffies_to_msecs(flush_delta),
1757                         .ndtc_last_rand         = jiffies_to_msecs(rand_delta),
1758                         .ndtc_hash_rnd          = tbl->hash_rnd,
1759                         .ndtc_hash_mask         = tbl->hash_mask,
1760                         .ndtc_proxy_qlen        = tbl->proxy_queue.qlen,
1761                 };
1762
1763                 NLA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
1764         }
1765
1766         {
1767                 int cpu;
1768                 struct ndt_stats ndst;
1769
1770                 memset(&ndst, 0, sizeof(ndst));
1771
1772                 for_each_possible_cpu(cpu) {
1773                         struct neigh_statistics *st;
1774
1775                         st = per_cpu_ptr(tbl->stats, cpu);
1776                         ndst.ndts_allocs                += st->allocs;
1777                         ndst.ndts_destroys              += st->destroys;
1778                         ndst.ndts_hash_grows            += st->hash_grows;
1779                         ndst.ndts_res_failed            += st->res_failed;
1780                         ndst.ndts_lookups               += st->lookups;
1781                         ndst.ndts_hits                  += st->hits;
1782                         ndst.ndts_rcv_probes_mcast      += st->rcv_probes_mcast;
1783                         ndst.ndts_rcv_probes_ucast      += st->rcv_probes_ucast;
1784                         ndst.ndts_periodic_gc_runs      += st->periodic_gc_runs;
1785                         ndst.ndts_forced_gc_runs        += st->forced_gc_runs;
1786                 }
1787
1788                 NLA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
1789         }
1790
1791         BUG_ON(tbl->parms.dev);
1792         if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1793                 goto nla_put_failure;
1794
1795         read_unlock_bh(&tbl->lock);
1796         return nlmsg_end(skb, nlh);
1797
1798 nla_put_failure:
1799         read_unlock_bh(&tbl->lock);
1800         nlmsg_cancel(skb, nlh);
1801         return -EMSGSIZE;
1802 }
1803
1804 static int neightbl_fill_param_info(struct sk_buff *skb,
1805                                     struct neigh_table *tbl,
1806                                     struct neigh_parms *parms,
1807                                     u32 pid, u32 seq, int type,
1808                                     unsigned int flags)
1809 {
1810         struct ndtmsg *ndtmsg;
1811         struct nlmsghdr *nlh;
1812
1813         nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1814         if (nlh == NULL)
1815                 return -EMSGSIZE;
1816
1817         ndtmsg = nlmsg_data(nlh);
1818
1819         read_lock_bh(&tbl->lock);
1820         ndtmsg->ndtm_family = tbl->family;
1821         ndtmsg->ndtm_pad1   = 0;
1822         ndtmsg->ndtm_pad2   = 0;
1823
1824         if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1825             neightbl_fill_parms(skb, parms) < 0)
1826                 goto errout;
1827
1828         read_unlock_bh(&tbl->lock);
1829         return nlmsg_end(skb, nlh);
1830 errout:
1831         read_unlock_bh(&tbl->lock);
1832         nlmsg_cancel(skb, nlh);
1833         return -EMSGSIZE;
1834 }
1835
1836 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
1837         [NDTA_NAME]             = { .type = NLA_STRING },
1838         [NDTA_THRESH1]          = { .type = NLA_U32 },
1839         [NDTA_THRESH2]          = { .type = NLA_U32 },
1840         [NDTA_THRESH3]          = { .type = NLA_U32 },
1841         [NDTA_GC_INTERVAL]      = { .type = NLA_U64 },
1842         [NDTA_PARMS]            = { .type = NLA_NESTED },
1843 };
1844
1845 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
1846         [NDTPA_IFINDEX]                 = { .type = NLA_U32 },
1847         [NDTPA_QUEUE_LEN]               = { .type = NLA_U32 },
1848         [NDTPA_PROXY_QLEN]              = { .type = NLA_U32 },
1849         [NDTPA_APP_PROBES]              = { .type = NLA_U32 },
1850         [NDTPA_UCAST_PROBES]            = { .type = NLA_U32 },
1851         [NDTPA_MCAST_PROBES]            = { .type = NLA_U32 },
1852         [NDTPA_BASE_REACHABLE_TIME]     = { .type = NLA_U64 },
1853         [NDTPA_GC_STALETIME]            = { .type = NLA_U64 },
1854         [NDTPA_DELAY_PROBE_TIME]        = { .type = NLA_U64 },
1855         [NDTPA_RETRANS_TIME]            = { .type = NLA_U64 },
1856         [NDTPA_ANYCAST_DELAY]           = { .type = NLA_U64 },
1857         [NDTPA_PROXY_DELAY]             = { .type = NLA_U64 },
1858         [NDTPA_LOCKTIME]                = { .type = NLA_U64 },
1859 };
1860
1861 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1862 {
1863         struct net *net = sock_net(skb->sk);
1864         struct neigh_table *tbl;
1865         struct ndtmsg *ndtmsg;
1866         struct nlattr *tb[NDTA_MAX+1];
1867         int err;
1868
1869         err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
1870                           nl_neightbl_policy);
1871         if (err < 0)
1872                 goto errout;
1873
1874         if (tb[NDTA_NAME] == NULL) {
1875                 err = -EINVAL;
1876                 goto errout;
1877         }
1878
1879         ndtmsg = nlmsg_data(nlh);
1880         read_lock(&neigh_tbl_lock);
1881         for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1882                 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1883                         continue;
1884
1885                 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0)
1886                         break;
1887         }
1888
1889         if (tbl == NULL) {
1890                 err = -ENOENT;
1891                 goto errout_locked;
1892         }
1893
1894         /*
1895          * We acquire tbl->lock to be nice to the periodic timers and
1896          * make sure they always see a consistent set of values.
1897          */
1898         write_lock_bh(&tbl->lock);
1899
1900         if (tb[NDTA_PARMS]) {
1901                 struct nlattr *tbp[NDTPA_MAX+1];
1902                 struct neigh_parms *p;
1903                 int i, ifindex = 0;
1904
1905                 err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
1906                                        nl_ntbl_parm_policy);
1907                 if (err < 0)
1908                         goto errout_tbl_lock;
1909
1910                 if (tbp[NDTPA_IFINDEX])
1911                         ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
1912
1913                 p = lookup_neigh_parms(tbl, net, ifindex);
1914                 if (p == NULL) {
1915                         err = -ENOENT;
1916                         goto errout_tbl_lock;
1917                 }
1918
1919                 for (i = 1; i <= NDTPA_MAX; i++) {
1920                         if (tbp[i] == NULL)
1921                                 continue;
1922
1923                         switch (i) {
1924                         case NDTPA_QUEUE_LEN:
1925                                 p->queue_len = nla_get_u32(tbp[i]);
1926                                 break;
1927                         case NDTPA_PROXY_QLEN:
1928                                 p->proxy_qlen = nla_get_u32(tbp[i]);
1929                                 break;
1930                         case NDTPA_APP_PROBES:
1931                                 p->app_probes = nla_get_u32(tbp[i]);
1932                                 break;
1933                         case NDTPA_UCAST_PROBES:
1934                                 p->ucast_probes = nla_get_u32(tbp[i]);
1935                                 break;
1936                         case NDTPA_MCAST_PROBES:
1937                                 p->mcast_probes = nla_get_u32(tbp[i]);
1938                                 break;
1939                         case NDTPA_BASE_REACHABLE_TIME:
1940                                 p->base_reachable_time = nla_get_msecs(tbp[i]);
1941                                 break;
1942                         case NDTPA_GC_STALETIME:
1943                                 p->gc_staletime = nla_get_msecs(tbp[i]);
1944                                 break;
1945                         case NDTPA_DELAY_PROBE_TIME:
1946                                 p->delay_probe_time = nla_get_msecs(tbp[i]);
1947                                 break;
1948                         case NDTPA_RETRANS_TIME:
1949                                 p->retrans_time = nla_get_msecs(tbp[i]);
1950                                 break;
1951                         case NDTPA_ANYCAST_DELAY:
1952                                 p->anycast_delay = nla_get_msecs(tbp[i]);
1953                                 break;
1954                         case NDTPA_PROXY_DELAY:
1955                                 p->proxy_delay = nla_get_msecs(tbp[i]);
1956                                 break;
1957                         case NDTPA_LOCKTIME:
1958                                 p->locktime = nla_get_msecs(tbp[i]);
1959                                 break;
1960                         }
1961                 }
1962         }
1963
1964         if (tb[NDTA_THRESH1])
1965                 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
1966
1967         if (tb[NDTA_THRESH2])
1968                 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
1969
1970         if (tb[NDTA_THRESH3])
1971                 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
1972
1973         if (tb[NDTA_GC_INTERVAL])
1974                 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
1975
1976         err = 0;
1977
1978 errout_tbl_lock:
1979         write_unlock_bh(&tbl->lock);
1980 errout_locked:
1981         read_unlock(&neigh_tbl_lock);
1982 errout:
1983         return err;
1984 }
1985
1986 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1987 {
1988         struct net *net = sock_net(skb->sk);
1989         int family, tidx, nidx = 0;
1990         int tbl_skip = cb->args[0];
1991         int neigh_skip = cb->args[1];
1992         struct neigh_table *tbl;
1993
1994         family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
1995
1996         read_lock(&neigh_tbl_lock);
1997         for (tbl = neigh_tables, tidx = 0; tbl; tbl = tbl->next, tidx++) {
1998                 struct neigh_parms *p;
1999
2000                 if (tidx < tbl_skip || (family && tbl->family != family))
2001                         continue;
2002
2003                 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).pid,
2004                                        cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2005                                        NLM_F_MULTI) <= 0)
2006                         break;
2007
2008                 for (nidx = 0, p = tbl->parms.next; p; p = p->next) {
2009                         if (!net_eq(neigh_parms_net(p), net))
2010                                 continue;
2011
2012                         if (nidx < neigh_skip)
2013                                 goto next;
2014
2015                         if (neightbl_fill_param_info(skb, tbl, p,
2016                                                      NETLINK_CB(cb->skb).pid,
2017                                                      cb->nlh->nlmsg_seq,
2018                                                      RTM_NEWNEIGHTBL,
2019                                                      NLM_F_MULTI) <= 0)
2020                                 goto out;
2021                 next:
2022                         nidx++;
2023                 }
2024
2025                 neigh_skip = 0;
2026         }
2027 out:
2028         read_unlock(&neigh_tbl_lock);
2029         cb->args[0] = tidx;
2030         cb->args[1] = nidx;
2031
2032         return skb->len;
2033 }
2034
2035 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2036                            u32 pid, u32 seq, int type, unsigned int flags)
2037 {
2038         unsigned long now = jiffies;
2039         struct nda_cacheinfo ci;
2040         struct nlmsghdr *nlh;
2041         struct ndmsg *ndm;
2042
2043         nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2044         if (nlh == NULL)
2045                 return -EMSGSIZE;
2046
2047         ndm = nlmsg_data(nlh);
2048         ndm->ndm_family  = neigh->ops->family;
2049         ndm->ndm_pad1    = 0;
2050         ndm->ndm_pad2    = 0;
2051         ndm->ndm_flags   = neigh->flags;
2052         ndm->ndm_type    = neigh->type;
2053         ndm->ndm_ifindex = neigh->dev->ifindex;
2054
2055         NLA_PUT(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key);
2056
2057         read_lock_bh(&neigh->lock);
2058         ndm->ndm_state   = neigh->nud_state;
2059         if ((neigh->nud_state & NUD_VALID) &&
2060             nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, neigh->ha) < 0) {
2061                 read_unlock_bh(&neigh->lock);
2062                 goto nla_put_failure;
2063         }
2064
2065         ci.ndm_used      = jiffies_to_clock_t(now - neigh->used);
2066         ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2067         ci.ndm_updated   = jiffies_to_clock_t(now - neigh->updated);
2068         ci.ndm_refcnt    = atomic_read(&neigh->refcnt) - 1;
2069         read_unlock_bh(&neigh->lock);
2070
2071         NLA_PUT_U32(skb, NDA_PROBES, atomic_read(&neigh->probes));
2072         NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
2073
2074         return nlmsg_end(skb, nlh);
2075
2076 nla_put_failure:
2077         nlmsg_cancel(skb, nlh);
2078         return -EMSGSIZE;
2079 }
2080
2081 static void neigh_update_notify(struct neighbour *neigh)
2082 {
2083         call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2084         __neigh_notify(neigh, RTM_NEWNEIGH, 0);
2085 }
2086
2087 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2088                             struct netlink_callback *cb)
2089 {
2090         struct net * net = sock_net(skb->sk);
2091         struct neighbour *n;
2092         int rc, h, s_h = cb->args[1];
2093         int idx, s_idx = idx = cb->args[2];
2094
2095         read_lock_bh(&tbl->lock);
2096         for (h = 0; h <= tbl->hash_mask; h++) {
2097                 if (h < s_h)
2098                         continue;
2099                 if (h > s_h)
2100                         s_idx = 0;
2101                 for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next) {
2102                         if (!net_eq(dev_net(n->dev), net))
2103                                 continue;
2104                         if (idx < s_idx)
2105                                 goto next;
2106                         if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
2107                                             cb->nlh->nlmsg_seq,
2108                                             RTM_NEWNEIGH,
2109                                             NLM_F_MULTI) <= 0) {
2110                                 read_unlock_bh(&tbl->lock);
2111                                 rc = -1;
2112                                 goto out;
2113                         }
2114                 next:
2115                         idx++;
2116                 }
2117         }
2118         read_unlock_bh(&tbl->lock);
2119         rc = skb->len;
2120 out:
2121         cb->args[1] = h;
2122         cb->args[2] = idx;
2123         return rc;
2124 }
2125
2126 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2127 {
2128         struct neigh_table *tbl;
2129         int t, family, s_t;
2130
2131         read_lock(&neigh_tbl_lock);
2132         family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2133         s_t = cb->args[0];
2134
2135         for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
2136                 if (t < s_t || (family && tbl->family != family))
2137                         continue;
2138                 if (t > s_t)
2139                         memset(&cb->args[1], 0, sizeof(cb->args) -
2140                                                 sizeof(cb->args[0]));
2141                 if (neigh_dump_table(tbl, skb, cb) < 0)
2142                         break;
2143         }
2144         read_unlock(&neigh_tbl_lock);
2145
2146         cb->args[0] = t;
2147         return skb->len;
2148 }
2149
2150 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2151 {
2152         int chain;
2153
2154         read_lock_bh(&tbl->lock);
2155         for (chain = 0; chain <= tbl->hash_mask; chain++) {
2156                 struct neighbour *n;
2157
2158                 for (n = tbl->hash_buckets[chain]; n; n = n->next)
2159                         cb(n, cookie);
2160         }
2161         read_unlock_bh(&tbl->lock);
2162 }
2163 EXPORT_SYMBOL(neigh_for_each);
2164
2165 /* The tbl->lock must be held as a writer and BH disabled. */
2166 void __neigh_for_each_release(struct neigh_table *tbl,
2167                               int (*cb)(struct neighbour *))
2168 {
2169         int chain;
2170
2171         for (chain = 0; chain <= tbl->hash_mask; chain++) {
2172                 struct neighbour *n, **np;
2173
2174                 np = &tbl->hash_buckets[chain];
2175                 while ((n = *np) != NULL) {
2176                         int release;
2177
2178                         write_lock(&n->lock);
2179                         release = cb(n);
2180                         if (release) {
2181                                 *np = n->next;
2182                                 n->dead = 1;
2183                         } else
2184                                 np = &n->next;
2185                         write_unlock(&n->lock);
2186                         if (release)
2187                                 neigh_cleanup_and_release(n);
2188                 }
2189         }
2190 }
2191 EXPORT_SYMBOL(__neigh_for_each_release);
2192
2193 #ifdef CONFIG_PROC_FS
2194
2195 static struct neighbour *neigh_get_first(struct seq_file *seq)
2196 {
2197         struct neigh_seq_state *state = seq->private;
2198         struct net *net = seq_file_net(seq);
2199         struct neigh_table *tbl = state->tbl;
2200         struct neighbour *n = NULL;
2201         int bucket = state->bucket;
2202
2203         state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2204         for (bucket = 0; bucket <= tbl->hash_mask; bucket++) {
2205                 n = tbl->hash_buckets[bucket];
2206
2207                 while (n) {
2208                         if (!net_eq(dev_net(n->dev), net))
2209                                 goto next;
2210                         if (state->neigh_sub_iter) {
2211                                 loff_t fakep = 0;
2212                                 void *v;
2213
2214                                 v = state->neigh_sub_iter(state, n, &fakep);
2215                                 if (!v)
2216                                         goto next;
2217                         }
2218                         if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2219                                 break;
2220                         if (n->nud_state & ~NUD_NOARP)
2221                                 break;
2222                 next:
2223                         n = n->next;
2224                 }
2225
2226                 if (n)
2227                         break;
2228         }
2229         state->bucket = bucket;
2230
2231         return n;
2232 }
2233
2234 static struct neighbour *neigh_get_next(struct seq_file *seq,
2235                                         struct neighbour *n,
2236                                         loff_t *pos)
2237 {
2238         struct neigh_seq_state *state = seq->private;
2239         struct net *net = seq_file_net(seq);
2240         struct neigh_table *tbl = state->tbl;
2241
2242         if (state->neigh_sub_iter) {
2243                 void *v = state->neigh_sub_iter(state, n, pos);
2244                 if (v)
2245                         return n;
2246         }
2247         n = n->next;
2248
2249         while (1) {
2250                 while (n) {
2251                         if (!net_eq(dev_net(n->dev), net))
2252                                 goto next;
2253                         if (state->neigh_sub_iter) {
2254                                 void *v = state->neigh_sub_iter(state, n, pos);
2255                                 if (v)
2256                                         return n;
2257                                 goto next;
2258                         }
2259                         if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2260                                 break;
2261
2262                         if (n->nud_state & ~NUD_NOARP)
2263                                 break;
2264                 next:
2265                         n = n->next;
2266                 }
2267
2268                 if (n)
2269                         break;
2270
2271                 if (++state->bucket > tbl->hash_mask)
2272                         break;
2273
2274                 n = tbl->hash_buckets[state->bucket];
2275         }
2276
2277         if (n && pos)
2278                 --(*pos);
2279         return n;
2280 }
2281
2282 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2283 {
2284         struct neighbour *n = neigh_get_first(seq);
2285
2286         if (n) {
2287                 --(*pos);
2288                 while (*pos) {
2289                         n = neigh_get_next(seq, n, pos);
2290                         if (!n)
2291                                 break;
2292                 }
2293         }
2294         return *pos ? NULL : n;
2295 }
2296
2297 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2298 {
2299         struct neigh_seq_state *state = seq->private;
2300         struct net *net = seq_file_net(seq);
2301         struct neigh_table *tbl = state->tbl;
2302         struct pneigh_entry *pn = NULL;
2303         int bucket = state->bucket;
2304
2305         state->flags |= NEIGH_SEQ_IS_PNEIGH;
2306         for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2307                 pn = tbl->phash_buckets[bucket];
2308                 while (pn && !net_eq(pneigh_net(pn), net))
2309                         pn = pn->next;
2310                 if (pn)
2311                         break;
2312         }
2313         state->bucket = bucket;
2314
2315         return pn;
2316 }
2317
2318 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2319                                             struct pneigh_entry *pn,
2320                                             loff_t *pos)
2321 {
2322         struct neigh_seq_state *state = seq->private;
2323         struct net *net = seq_file_net(seq);
2324         struct neigh_table *tbl = state->tbl;
2325
2326         pn = pn->next;
2327         while (!pn) {
2328                 if (++state->bucket > PNEIGH_HASHMASK)
2329                         break;
2330                 pn = tbl->phash_buckets[state->bucket];
2331                 while (pn && !net_eq(pneigh_net(pn), net))
2332                         pn = pn->next;
2333                 if (pn)
2334                         break;
2335         }
2336
2337         if (pn && pos)
2338                 --(*pos);
2339
2340         return pn;
2341 }
2342
2343 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2344 {
2345         struct pneigh_entry *pn = pneigh_get_first(seq);
2346
2347         if (pn) {
2348                 --(*pos);
2349                 while (*pos) {
2350                         pn = pneigh_get_next(seq, pn, pos);
2351                         if (!pn)
2352                                 break;
2353                 }
2354         }
2355         return *pos ? NULL : pn;
2356 }
2357
2358 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2359 {
2360         struct neigh_seq_state *state = seq->private;
2361         void *rc;
2362         loff_t idxpos = *pos;
2363
2364         rc = neigh_get_idx(seq, &idxpos);
2365         if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2366                 rc = pneigh_get_idx(seq, &idxpos);
2367
2368         return rc;
2369 }
2370
2371 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2372         __acquires(tbl->lock)
2373 {
2374         struct neigh_seq_state *state = seq->private;
2375
2376         state->tbl = tbl;
2377         state->bucket = 0;
2378         state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2379
2380         read_lock_bh(&tbl->lock);
2381
2382         return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
2383 }
2384 EXPORT_SYMBOL(neigh_seq_start);
2385
2386 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2387 {
2388         struct neigh_seq_state *state;
2389         void *rc;
2390
2391         if (v == SEQ_START_TOKEN) {
2392                 rc = neigh_get_first(seq);
2393                 goto out;
2394         }
2395
2396         state = seq->private;
2397         if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2398                 rc = neigh_get_next(seq, v, NULL);
2399                 if (rc)
2400                         goto out;
2401                 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2402                         rc = pneigh_get_first(seq);
2403         } else {
2404                 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2405                 rc = pneigh_get_next(seq, v, NULL);
2406         }
2407 out:
2408         ++(*pos);
2409         return rc;
2410 }
2411 EXPORT_SYMBOL(neigh_seq_next);
2412
2413 void neigh_seq_stop(struct seq_file *seq, void *v)
2414         __releases(tbl->lock)
2415 {
2416         struct neigh_seq_state *state = seq->private;
2417         struct neigh_table *tbl = state->tbl;
2418
2419         read_unlock_bh(&tbl->lock);
2420 }
2421 EXPORT_SYMBOL(neigh_seq_stop);
2422
2423 /* statistics via seq_file */
2424
2425 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2426 {
2427         struct neigh_table *tbl = seq->private;
2428         int cpu;
2429
2430         if (*pos == 0)
2431                 return SEQ_START_TOKEN;
2432
2433         for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
2434                 if (!cpu_possible(cpu))
2435                         continue;
2436                 *pos = cpu+1;
2437                 return per_cpu_ptr(tbl->stats, cpu);
2438         }
2439         return NULL;
2440 }
2441
2442 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2443 {
2444         struct neigh_table *tbl = seq->private;
2445         int cpu;
2446
2447         for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
2448                 if (!cpu_possible(cpu))
2449                         continue;
2450                 *pos = cpu+1;
2451                 return per_cpu_ptr(tbl->stats, cpu);
2452         }
2453         return NULL;
2454 }
2455
2456 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2457 {
2458
2459 }
2460
2461 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2462 {
2463         struct neigh_table *tbl = seq->private;
2464         struct neigh_statistics *st = v;
2465
2466         if (v == SEQ_START_TOKEN) {
2467                 seq_printf(seq, "entries  allocs destroys hash_grows  lookups hits  res_failed  rcv_probes_mcast rcv_probes_ucast  periodic_gc_runs forced_gc_runs unresolved_discards\n");
2468                 return 0;
2469         }
2470
2471         seq_printf(seq, "%08x  %08lx %08lx %08lx  %08lx %08lx  %08lx  "
2472                         "%08lx %08lx  %08lx %08lx %08lx\n",
2473                    atomic_read(&tbl->entries),
2474
2475                    st->allocs,
2476                    st->destroys,
2477                    st->hash_grows,
2478
2479                    st->lookups,
2480                    st->hits,
2481
2482                    st->res_failed,
2483
2484                    st->rcv_probes_mcast,
2485                    st->rcv_probes_ucast,
2486
2487                    st->periodic_gc_runs,
2488                    st->forced_gc_runs,
2489                    st->unres_discards
2490                    );
2491
2492         return 0;
2493 }
2494
2495 static const struct seq_operations neigh_stat_seq_ops = {
2496         .start  = neigh_stat_seq_start,
2497         .next   = neigh_stat_seq_next,
2498         .stop   = neigh_stat_seq_stop,
2499         .show   = neigh_stat_seq_show,
2500 };
2501
2502 static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2503 {
2504         int ret = seq_open(file, &neigh_stat_seq_ops);
2505
2506         if (!ret) {
2507                 struct seq_file *sf = file->private_data;
2508                 sf->private = PDE(inode)->data;
2509         }
2510         return ret;
2511 };
2512
2513 static const struct file_operations neigh_stat_seq_fops = {
2514         .owner   = THIS_MODULE,
2515         .open    = neigh_stat_seq_open,
2516         .read    = seq_read,
2517         .llseek  = seq_lseek,
2518         .release = seq_release,
2519 };
2520
2521 #endif /* CONFIG_PROC_FS */
2522
2523 static inline size_t neigh_nlmsg_size(void)
2524 {
2525         return NLMSG_ALIGN(sizeof(struct ndmsg))
2526                + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2527                + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2528                + nla_total_size(sizeof(struct nda_cacheinfo))
2529                + nla_total_size(4); /* NDA_PROBES */
2530 }
2531
2532 static void __neigh_notify(struct neighbour *n, int type, int flags)
2533 {
2534         struct net *net = dev_net(n->dev);
2535         struct sk_buff *skb;
2536         int err = -ENOBUFS;
2537
2538         skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
2539         if (skb == NULL)
2540                 goto errout;
2541
2542         err = neigh_fill_info(skb, n, 0, 0, type, flags);
2543         if (err < 0) {
2544                 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2545                 WARN_ON(err == -EMSGSIZE);
2546                 kfree_skb(skb);
2547                 goto errout;
2548         }
2549         rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2550         return;
2551 errout:
2552         if (err < 0)
2553                 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
2554 }
2555
2556 #ifdef CONFIG_ARPD
2557 void neigh_app_ns(struct neighbour *n)
2558 {
2559         __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
2560 }
2561 EXPORT_SYMBOL(neigh_app_ns);
2562 #endif /* CONFIG_ARPD */
2563
2564 #ifdef CONFIG_SYSCTL
2565
2566 #define NEIGH_VARS_MAX 19
2567
2568 static struct neigh_sysctl_table {
2569         struct ctl_table_header *sysctl_header;
2570         struct ctl_table neigh_vars[NEIGH_VARS_MAX];
2571         char *dev_name;
2572 } neigh_sysctl_template __read_mostly = {
2573         .neigh_vars = {
2574                 {
2575                         .procname       = "mcast_solicit",
2576                         .maxlen         = sizeof(int),
2577                         .mode           = 0644,
2578                         .proc_handler   = proc_dointvec,
2579                 },
2580                 {
2581                         .procname       = "ucast_solicit",
2582                         .maxlen         = sizeof(int),
2583                         .mode           = 0644,
2584                         .proc_handler   = proc_dointvec,
2585                 },
2586                 {
2587                         .procname       = "app_solicit",
2588                         .maxlen         = sizeof(int),
2589                         .mode           = 0644,
2590                         .proc_handler   = proc_dointvec,
2591                 },
2592                 {
2593                         .procname       = "retrans_time",
2594                         .maxlen         = sizeof(int),
2595                         .mode           = 0644,
2596                         .proc_handler   = proc_dointvec_userhz_jiffies,
2597                 },
2598                 {
2599                         .procname       = "base_reachable_time",
2600                         .maxlen         = sizeof(int),
2601                         .mode           = 0644,
2602                         .proc_handler   = proc_dointvec_jiffies,
2603                 },
2604                 {
2605                         .procname       = "delay_first_probe_time",
2606                         .maxlen         = sizeof(int),
2607                         .mode           = 0644,
2608                         .proc_handler   = proc_dointvec_jiffies,
2609                 },
2610                 {
2611                         .procname       = "gc_stale_time",
2612                         .maxlen         = sizeof(int),
2613                         .mode           = 0644,
2614                         .proc_handler   = proc_dointvec_jiffies,
2615                 },
2616                 {
2617                         .procname       = "unres_qlen",
2618                         .maxlen         = sizeof(int),
2619                         .mode           = 0644,
2620                         .proc_handler   = proc_dointvec,
2621                 },
2622                 {
2623                         .procname       = "proxy_qlen",
2624                         .maxlen         = sizeof(int),
2625                         .mode           = 0644,
2626                         .proc_handler   = proc_dointvec,
2627                 },
2628                 {
2629                         .procname       = "anycast_delay",
2630                         .maxlen         = sizeof(int),
2631                         .mode           = 0644,
2632                         .proc_handler   = proc_dointvec_userhz_jiffies,
2633                 },
2634                 {
2635                         .procname       = "proxy_delay",
2636                         .maxlen         = sizeof(int),
2637                         .mode           = 0644,
2638                         .proc_handler   = proc_dointvec_userhz_jiffies,
2639                 },
2640                 {
2641                         .procname       = "locktime",
2642                         .maxlen         = sizeof(int),
2643                         .mode           = 0644,
2644                         .proc_handler   = proc_dointvec_userhz_jiffies,
2645                 },
2646                 {
2647                         .procname       = "retrans_time_ms",
2648                         .maxlen         = sizeof(int),
2649                         .mode           = 0644,
2650                         .proc_handler   = proc_dointvec_ms_jiffies,
2651                 },
2652                 {
2653                         .procname       = "base_reachable_time_ms",
2654                         .maxlen         = sizeof(int),
2655                         .mode           = 0644,
2656                         .proc_handler   = proc_dointvec_ms_jiffies,
2657                 },
2658                 {
2659                         .procname       = "gc_interval",
2660                         .maxlen         = sizeof(int),
2661                         .mode           = 0644,
2662                         .proc_handler   = proc_dointvec_jiffies,
2663                 },
2664                 {
2665                         .procname       = "gc_thresh1",
2666                         .maxlen         = sizeof(int),
2667                         .mode           = 0644,
2668                         .proc_handler   = proc_dointvec,
2669                 },
2670                 {
2671                         .procname       = "gc_thresh2",
2672                         .maxlen         = sizeof(int),
2673                         .mode           = 0644,
2674                         .proc_handler   = proc_dointvec,
2675                 },
2676                 {
2677                         .procname       = "gc_thresh3",
2678                         .maxlen         = sizeof(int),
2679                         .mode           = 0644,
2680                         .proc_handler   = proc_dointvec,
2681                 },
2682                 {},
2683         },
2684 };
2685
2686 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2687                           char *p_name, proc_handler *handler)
2688 {
2689         struct neigh_sysctl_table *t;
2690         const char *dev_name_source = NULL;
2691
2692 #define NEIGH_CTL_PATH_ROOT     0
2693 #define NEIGH_CTL_PATH_PROTO    1
2694 #define NEIGH_CTL_PATH_NEIGH    2
2695 #define NEIGH_CTL_PATH_DEV      3
2696
2697         struct ctl_path neigh_path[] = {
2698                 { .procname = "net",     },
2699                 { .procname = "proto",   },
2700                 { .procname = "neigh",   },
2701                 { .procname = "default", },
2702                 { },
2703         };
2704
2705         t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
2706         if (!t)
2707                 goto err;
2708
2709         t->neigh_vars[0].data  = &p->mcast_probes;
2710         t->neigh_vars[1].data  = &p->ucast_probes;
2711         t->neigh_vars[2].data  = &p->app_probes;
2712         t->neigh_vars[3].data  = &p->retrans_time;
2713         t->neigh_vars[4].data  = &p->base_reachable_time;
2714         t->neigh_vars[5].data  = &p->delay_probe_time;
2715         t->neigh_vars[6].data  = &p->gc_staletime;
2716         t->neigh_vars[7].data  = &p->queue_len;
2717         t->neigh_vars[8].data  = &p->proxy_qlen;
2718         t->neigh_vars[9].data  = &p->anycast_delay;
2719         t->neigh_vars[10].data = &p->proxy_delay;
2720         t->neigh_vars[11].data = &p->locktime;
2721         t->neigh_vars[12].data  = &p->retrans_time;
2722         t->neigh_vars[13].data  = &p->base_reachable_time;
2723
2724         if (dev) {
2725                 dev_name_source = dev->name;
2726                 /* Terminate the table early */
2727                 memset(&t->neigh_vars[14], 0, sizeof(t->neigh_vars[14]));
2728         } else {
2729                 dev_name_source = neigh_path[NEIGH_CTL_PATH_DEV].procname;
2730                 t->neigh_vars[14].data = (int *)(p + 1);
2731                 t->neigh_vars[15].data = (int *)(p + 1) + 1;
2732                 t->neigh_vars[16].data = (int *)(p + 1) + 2;
2733                 t->neigh_vars[17].data = (int *)(p + 1) + 3;
2734         }
2735
2736
2737         if (handler) {
2738                 /* RetransTime */
2739                 t->neigh_vars[3].proc_handler = handler;
2740                 t->neigh_vars[3].extra1 = dev;
2741                 /* ReachableTime */
2742                 t->neigh_vars[4].proc_handler = handler;
2743                 t->neigh_vars[4].extra1 = dev;
2744                 /* RetransTime (in milliseconds)*/
2745                 t->neigh_vars[12].proc_handler = handler;
2746                 t->neigh_vars[12].extra1 = dev;
2747                 /* ReachableTime (in milliseconds) */
2748                 t->neigh_vars[13].proc_handler = handler;
2749                 t->neigh_vars[13].extra1 = dev;
2750         }
2751
2752         t->dev_name = kstrdup(dev_name_source, GFP_KERNEL);
2753         if (!t->dev_name)
2754                 goto free;
2755
2756         neigh_path[NEIGH_CTL_PATH_DEV].procname = t->dev_name;
2757         neigh_path[NEIGH_CTL_PATH_PROTO].procname = p_name;
2758
2759         t->sysctl_header =
2760                 register_net_sysctl_table(neigh_parms_net(p), neigh_path, t->neigh_vars);
2761         if (!t->sysctl_header)
2762                 goto free_procname;
2763
2764         p->sysctl_table = t;
2765         return 0;
2766
2767 free_procname:
2768         kfree(t->dev_name);
2769 free:
2770         kfree(t);
2771 err:
2772         return -ENOBUFS;
2773 }
2774 EXPORT_SYMBOL(neigh_sysctl_register);
2775
2776 void neigh_sysctl_unregister(struct neigh_parms *p)
2777 {
2778         if (p->sysctl_table) {
2779                 struct neigh_sysctl_table *t = p->sysctl_table;
2780                 p->sysctl_table = NULL;
2781                 unregister_sysctl_table(t->sysctl_header);
2782                 kfree(t->dev_name);
2783                 kfree(t);
2784         }
2785 }
2786 EXPORT_SYMBOL(neigh_sysctl_unregister);
2787
2788 #endif  /* CONFIG_SYSCTL */
2789
2790 static int __init neigh_init(void)
2791 {
2792         rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL);
2793         rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL);
2794         rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info);
2795
2796         rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info);
2797         rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL);
2798
2799         return 0;
2800 }
2801
2802 subsys_initcall(neigh_init);
2803