Merge remote-tracking branches 'regulator/fix/88pm800', 'regulator/fix/max8973',...
[linux-drm-fsl-dcu.git] / net / bridge / br_forward.c
1 /*
2  *      Forwarding decision
3  *      Linux ethernet bridge
4  *
5  *      Authors:
6  *      Lennert Buytenhek               <buytenh@gnu.org>
7  *
8  *      This program is free software; you can redistribute it and/or
9  *      modify it under the terms of the GNU General Public License
10  *      as published by the Free Software Foundation; either version
11  *      2 of the License, or (at your option) any later version.
12  */
13
14 #include <linux/err.h>
15 #include <linux/slab.h>
16 #include <linux/kernel.h>
17 #include <linux/netdevice.h>
18 #include <linux/netpoll.h>
19 #include <linux/skbuff.h>
20 #include <linux/if_vlan.h>
21 #include <linux/netfilter_bridge.h>
22 #include "br_private.h"
23
24 static int deliver_clone(const struct net_bridge_port *prev,
25                          struct sk_buff *skb,
26                          void (*__packet_hook)(const struct net_bridge_port *p,
27                                                struct sk_buff *skb));
28
29 /* Don't forward packets to originating port or forwarding disabled */
30 static inline int should_deliver(const struct net_bridge_port *p,
31                                  const struct sk_buff *skb)
32 {
33         return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
34                 br_allowed_egress(p->br, nbp_get_vlan_info(p), skb) &&
35                 p->state == BR_STATE_FORWARDING;
36 }
37
38 int br_dev_queue_push_xmit(struct sock *sk, struct sk_buff *skb)
39 {
40         if (!is_skb_forwardable(skb->dev, skb)) {
41                 kfree_skb(skb);
42         } else {
43                 skb_push(skb, ETH_HLEN);
44                 br_drop_fake_rtable(skb);
45                 skb_sender_cpu_clear(skb);
46                 dev_queue_xmit(skb);
47         }
48
49         return 0;
50 }
51 EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit);
52
53 int br_forward_finish(struct sock *sk, struct sk_buff *skb)
54 {
55         return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, sk, skb,
56                        NULL, skb->dev,
57                        br_dev_queue_push_xmit);
58
59 }
60 EXPORT_SYMBOL_GPL(br_forward_finish);
61
62 static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
63 {
64         skb = br_handle_vlan(to->br, nbp_get_vlan_info(to), skb);
65         if (!skb)
66                 return;
67
68         skb->dev = to->dev;
69
70         if (unlikely(netpoll_tx_running(to->br->dev))) {
71                 if (!is_skb_forwardable(skb->dev, skb))
72                         kfree_skb(skb);
73                 else {
74                         skb_push(skb, ETH_HLEN);
75                         br_netpoll_send_skb(to, skb);
76                 }
77                 return;
78         }
79
80         NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, NULL, skb,
81                 NULL, skb->dev,
82                 br_forward_finish);
83 }
84
85 static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
86 {
87         struct net_device *indev;
88
89         if (skb_warn_if_lro(skb)) {
90                 kfree_skb(skb);
91                 return;
92         }
93
94         skb = br_handle_vlan(to->br, nbp_get_vlan_info(to), skb);
95         if (!skb)
96                 return;
97
98         indev = skb->dev;
99         skb->dev = to->dev;
100         skb_forward_csum(skb);
101
102         NF_HOOK(NFPROTO_BRIDGE, NF_BR_FORWARD, NULL, skb,
103                 indev, skb->dev,
104                 br_forward_finish);
105 }
106
107 /* called with rcu_read_lock */
108 void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
109 {
110         if (to && should_deliver(to, skb)) {
111                 __br_deliver(to, skb);
112                 return;
113         }
114
115         kfree_skb(skb);
116 }
117 EXPORT_SYMBOL_GPL(br_deliver);
118
119 /* called with rcu_read_lock */
120 void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0)
121 {
122         if (should_deliver(to, skb)) {
123                 if (skb0)
124                         deliver_clone(to, skb, __br_forward);
125                 else
126                         __br_forward(to, skb);
127                 return;
128         }
129
130         if (!skb0)
131                 kfree_skb(skb);
132 }
133
134 static int deliver_clone(const struct net_bridge_port *prev,
135                          struct sk_buff *skb,
136                          void (*__packet_hook)(const struct net_bridge_port *p,
137                                                struct sk_buff *skb))
138 {
139         struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
140
141         skb = skb_clone(skb, GFP_ATOMIC);
142         if (!skb) {
143                 dev->stats.tx_dropped++;
144                 return -ENOMEM;
145         }
146
147         __packet_hook(prev, skb);
148         return 0;
149 }
150
151 static struct net_bridge_port *maybe_deliver(
152         struct net_bridge_port *prev, struct net_bridge_port *p,
153         struct sk_buff *skb,
154         void (*__packet_hook)(const struct net_bridge_port *p,
155                               struct sk_buff *skb))
156 {
157         int err;
158
159         if (!should_deliver(p, skb))
160                 return prev;
161
162         if (!prev)
163                 goto out;
164
165         err = deliver_clone(prev, skb, __packet_hook);
166         if (err)
167                 return ERR_PTR(err);
168
169 out:
170         return p;
171 }
172
173 /* called under bridge lock */
174 static void br_flood(struct net_bridge *br, struct sk_buff *skb,
175                      struct sk_buff *skb0,
176                      void (*__packet_hook)(const struct net_bridge_port *p,
177                                            struct sk_buff *skb),
178                      bool unicast)
179 {
180         struct net_bridge_port *p;
181         struct net_bridge_port *prev;
182
183         prev = NULL;
184
185         list_for_each_entry_rcu(p, &br->port_list, list) {
186                 /* Do not flood unicast traffic to ports that turn it off */
187                 if (unicast && !(p->flags & BR_FLOOD))
188                         continue;
189
190                 /* Do not flood to ports that enable proxy ARP */
191                 if (p->flags & BR_PROXYARP)
192                         continue;
193                 if ((p->flags & BR_PROXYARP_WIFI) &&
194                     BR_INPUT_SKB_CB(skb)->proxyarp_replied)
195                         continue;
196
197                 prev = maybe_deliver(prev, p, skb, __packet_hook);
198                 if (IS_ERR(prev))
199                         goto out;
200         }
201
202         if (!prev)
203                 goto out;
204
205         if (skb0)
206                 deliver_clone(prev, skb, __packet_hook);
207         else
208                 __packet_hook(prev, skb);
209         return;
210
211 out:
212         if (!skb0)
213                 kfree_skb(skb);
214 }
215
216
217 /* called with rcu_read_lock */
218 void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb, bool unicast)
219 {
220         br_flood(br, skb, NULL, __br_deliver, unicast);
221 }
222
223 /* called under bridge lock */
224 void br_flood_forward(struct net_bridge *br, struct sk_buff *skb,
225                       struct sk_buff *skb2, bool unicast)
226 {
227         br_flood(br, skb, skb2, __br_forward, unicast);
228 }
229
230 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
231 /* called with rcu_read_lock */
232 static void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
233                                struct sk_buff *skb, struct sk_buff *skb0,
234                                void (*__packet_hook)(
235                                         const struct net_bridge_port *p,
236                                         struct sk_buff *skb))
237 {
238         struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
239         struct net_bridge *br = netdev_priv(dev);
240         struct net_bridge_port *prev = NULL;
241         struct net_bridge_port_group *p;
242         struct hlist_node *rp;
243
244         rp = rcu_dereference(hlist_first_rcu(&br->router_list));
245         p = mdst ? rcu_dereference(mdst->ports) : NULL;
246         while (p || rp) {
247                 struct net_bridge_port *port, *lport, *rport;
248
249                 lport = p ? p->port : NULL;
250                 rport = rp ? hlist_entry(rp, struct net_bridge_port, rlist) :
251                              NULL;
252
253                 port = (unsigned long)lport > (unsigned long)rport ?
254                        lport : rport;
255
256                 prev = maybe_deliver(prev, port, skb, __packet_hook);
257                 if (IS_ERR(prev))
258                         goto out;
259
260                 if ((unsigned long)lport >= (unsigned long)port)
261                         p = rcu_dereference(p->next);
262                 if ((unsigned long)rport >= (unsigned long)port)
263                         rp = rcu_dereference(hlist_next_rcu(rp));
264         }
265
266         if (!prev)
267                 goto out;
268
269         if (skb0)
270                 deliver_clone(prev, skb, __packet_hook);
271         else
272                 __packet_hook(prev, skb);
273         return;
274
275 out:
276         if (!skb0)
277                 kfree_skb(skb);
278 }
279
280 /* called with rcu_read_lock */
281 void br_multicast_deliver(struct net_bridge_mdb_entry *mdst,
282                           struct sk_buff *skb)
283 {
284         br_multicast_flood(mdst, skb, NULL, __br_deliver);
285 }
286
287 /* called with rcu_read_lock */
288 void br_multicast_forward(struct net_bridge_mdb_entry *mdst,
289                           struct sk_buff *skb, struct sk_buff *skb2)
290 {
291         br_multicast_flood(mdst, skb, skb2, __br_forward);
292 }
293 #endif