initramfs: fix initramfs size calculation
[linux-drm-fsl-dcu.git] / net / core / link_watch.c
1 /*
2  * Linux network device link state notification
3  *
4  * Author:
5  *     Stefan Rompf <sux@loplof.de>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * as published by the Free Software Foundation; either version
10  * 2 of the License, or (at your option) any later version.
11  *
12  */
13
14 #include <linux/module.h>
15 #include <linux/netdevice.h>
16 #include <linux/if.h>
17 #include <net/sock.h>
18 #include <net/pkt_sched.h>
19 #include <linux/rtnetlink.h>
20 #include <linux/jiffies.h>
21 #include <linux/spinlock.h>
22 #include <linux/workqueue.h>
23 #include <linux/bitops.h>
24 #include <asm/types.h>
25
26
27 enum lw_bits {
28         LW_URGENT = 0,
29 };
30
31 static unsigned long linkwatch_flags;
32 static unsigned long linkwatch_nextevent;
33
34 static void linkwatch_event(struct work_struct *dummy);
35 static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event);
36
37 static LIST_HEAD(lweventlist);
38 static DEFINE_SPINLOCK(lweventlist_lock);
39
40 static unsigned char default_operstate(const struct net_device *dev)
41 {
42         if (!netif_carrier_ok(dev))
43                 return (dev->ifindex != dev->iflink ?
44                         IF_OPER_LOWERLAYERDOWN : IF_OPER_DOWN);
45
46         if (netif_dormant(dev))
47                 return IF_OPER_DORMANT;
48
49         return IF_OPER_UP;
50 }
51
52
53 static void rfc2863_policy(struct net_device *dev)
54 {
55         unsigned char operstate = default_operstate(dev);
56
57         if (operstate == dev->operstate)
58                 return;
59
60         write_lock_bh(&dev_base_lock);
61
62         switch(dev->link_mode) {
63         case IF_LINK_MODE_DORMANT:
64                 if (operstate == IF_OPER_UP)
65                         operstate = IF_OPER_DORMANT;
66                 break;
67
68         case IF_LINK_MODE_DEFAULT:
69         default:
70                 break;
71         }
72
73         dev->operstate = operstate;
74
75         write_unlock_bh(&dev_base_lock);
76 }
77
78
79 static bool linkwatch_urgent_event(struct net_device *dev)
80 {
81         return netif_running(dev) && netif_carrier_ok(dev) &&
82                 qdisc_tx_changing(dev);
83 }
84
85
86 static void linkwatch_add_event(struct net_device *dev)
87 {
88         unsigned long flags;
89
90         spin_lock_irqsave(&lweventlist_lock, flags);
91         if (list_empty(&dev->link_watch_list)) {
92                 list_add_tail(&dev->link_watch_list, &lweventlist);
93                 dev_hold(dev);
94         }
95         spin_unlock_irqrestore(&lweventlist_lock, flags);
96 }
97
98
99 static void linkwatch_schedule_work(int urgent)
100 {
101         unsigned long delay = linkwatch_nextevent - jiffies;
102
103         if (test_bit(LW_URGENT, &linkwatch_flags))
104                 return;
105
106         /* Minimise down-time: drop delay for up event. */
107         if (urgent) {
108                 if (test_and_set_bit(LW_URGENT, &linkwatch_flags))
109                         return;
110                 delay = 0;
111         }
112
113         /* If we wrap around we'll delay it by at most HZ. */
114         if (delay > HZ)
115                 delay = 0;
116
117         /*
118          * This is true if we've scheduled it immeditately or if we don't
119          * need an immediate execution and it's already pending.
120          */
121         if (schedule_delayed_work(&linkwatch_work, delay) == !delay)
122                 return;
123
124         /* Don't bother if there is nothing urgent. */
125         if (!test_bit(LW_URGENT, &linkwatch_flags))
126                 return;
127
128         /* It's already running which is good enough. */
129         if (!cancel_delayed_work(&linkwatch_work))
130                 return;
131
132         /* Otherwise we reschedule it again for immediate exection. */
133         schedule_delayed_work(&linkwatch_work, 0);
134 }
135
136
137 static void linkwatch_do_dev(struct net_device *dev)
138 {
139         /*
140          * Make sure the above read is complete since it can be
141          * rewritten as soon as we clear the bit below.
142          */
143         smp_mb__before_clear_bit();
144
145         /* We are about to handle this device,
146          * so new events can be accepted
147          */
148         clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state);
149
150         rfc2863_policy(dev);
151         if (dev->flags & IFF_UP) {
152                 if (netif_carrier_ok(dev))
153                         dev_activate(dev);
154                 else
155                         dev_deactivate(dev);
156
157                 netdev_state_change(dev);
158         }
159         dev_put(dev);
160 }
161
162 static void __linkwatch_run_queue(int urgent_only)
163 {
164         struct net_device *dev;
165         LIST_HEAD(wrk);
166
167         /*
168          * Limit the number of linkwatch events to one
169          * per second so that a runaway driver does not
170          * cause a storm of messages on the netlink
171          * socket.  This limit does not apply to up events
172          * while the device qdisc is down.
173          */
174         if (!urgent_only)
175                 linkwatch_nextevent = jiffies + HZ;
176         /* Limit wrap-around effect on delay. */
177         else if (time_after(linkwatch_nextevent, jiffies + HZ))
178                 linkwatch_nextevent = jiffies;
179
180         clear_bit(LW_URGENT, &linkwatch_flags);
181
182         spin_lock_irq(&lweventlist_lock);
183         list_splice_init(&lweventlist, &wrk);
184
185         while (!list_empty(&wrk)) {
186
187                 dev = list_first_entry(&wrk, struct net_device, link_watch_list);
188                 list_del_init(&dev->link_watch_list);
189
190                 if (urgent_only && !linkwatch_urgent_event(dev)) {
191                         list_add_tail(&dev->link_watch_list, &lweventlist);
192                         continue;
193                 }
194                 spin_unlock_irq(&lweventlist_lock);
195                 linkwatch_do_dev(dev);
196                 spin_lock_irq(&lweventlist_lock);
197         }
198
199         if (!list_empty(&lweventlist))
200                 linkwatch_schedule_work(0);
201         spin_unlock_irq(&lweventlist_lock);
202 }
203
204 void linkwatch_forget_dev(struct net_device *dev)
205 {
206         unsigned long flags;
207         int clean = 0;
208
209         spin_lock_irqsave(&lweventlist_lock, flags);
210         if (!list_empty(&dev->link_watch_list)) {
211                 list_del_init(&dev->link_watch_list);
212                 clean = 1;
213         }
214         spin_unlock_irqrestore(&lweventlist_lock, flags);
215         if (clean)
216                 linkwatch_do_dev(dev);
217 }
218
219
220 /* Must be called with the rtnl semaphore held */
221 void linkwatch_run_queue(void)
222 {
223         __linkwatch_run_queue(0);
224 }
225
226
227 static void linkwatch_event(struct work_struct *dummy)
228 {
229         rtnl_lock();
230         __linkwatch_run_queue(time_after(linkwatch_nextevent, jiffies));
231         rtnl_unlock();
232 }
233
234
235 void linkwatch_fire_event(struct net_device *dev)
236 {
237         bool urgent = linkwatch_urgent_event(dev);
238
239         if (!test_and_set_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) {
240                 linkwatch_add_event(dev);
241         } else if (!urgent)
242                 return;
243
244         linkwatch_schedule_work(urgent);
245 }
246
247 EXPORT_SYMBOL(linkwatch_fire_event);