initramfs: fix initramfs size calculation
[linux-drm-fsl-dcu.git] / net / rxrpc / ar-peer.c
1 /* RxRPC remote transport endpoint management
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11
12 #include <linux/module.h>
13 #include <linux/net.h>
14 #include <linux/skbuff.h>
15 #include <linux/udp.h>
16 #include <linux/in.h>
17 #include <linux/in6.h>
18 #include <linux/icmp.h>
19 #include <linux/slab.h>
20 #include <net/sock.h>
21 #include <net/af_rxrpc.h>
22 #include <net/ip.h>
23 #include <net/route.h>
24 #include "ar-internal.h"
25
26 static LIST_HEAD(rxrpc_peers);
27 static DEFINE_RWLOCK(rxrpc_peer_lock);
28 static DECLARE_WAIT_QUEUE_HEAD(rxrpc_peer_wq);
29
30 static void rxrpc_destroy_peer(struct work_struct *work);
31
32 /*
33  * assess the MTU size for the network interface through which this peer is
34  * reached
35  */
36 static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer)
37 {
38         struct rtable *rt;
39         struct flowi fl;
40         int ret;
41
42         peer->if_mtu = 1500;
43
44         memset(&fl, 0, sizeof(fl));
45
46         switch (peer->srx.transport.family) {
47         case AF_INET:
48                 fl.oif = 0;
49                 fl.proto = IPPROTO_UDP,
50                 fl.nl_u.ip4_u.saddr = 0;
51                 fl.nl_u.ip4_u.daddr = peer->srx.transport.sin.sin_addr.s_addr;
52                 fl.nl_u.ip4_u.tos = 0;
53                 /* assume AFS.CM talking to AFS.FS */
54                 fl.uli_u.ports.sport = htons(7001);
55                 fl.uli_u.ports.dport = htons(7000);
56                 break;
57         default:
58                 BUG();
59         }
60
61         ret = ip_route_output_key(&init_net, &rt, &fl);
62         if (ret < 0) {
63                 _leave(" [route err %d]", ret);
64                 return;
65         }
66
67         peer->if_mtu = dst_mtu(&rt->u.dst);
68         dst_release(&rt->u.dst);
69
70         _leave(" [if_mtu %u]", peer->if_mtu);
71 }
72
73 /*
74  * allocate a new peer
75  */
76 static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
77                                            gfp_t gfp)
78 {
79         struct rxrpc_peer *peer;
80
81         _enter("");
82
83         peer = kzalloc(sizeof(struct rxrpc_peer), gfp);
84         if (peer) {
85                 INIT_WORK(&peer->destroyer, &rxrpc_destroy_peer);
86                 INIT_LIST_HEAD(&peer->link);
87                 INIT_LIST_HEAD(&peer->error_targets);
88                 spin_lock_init(&peer->lock);
89                 atomic_set(&peer->usage, 1);
90                 peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
91                 memcpy(&peer->srx, srx, sizeof(*srx));
92
93                 rxrpc_assess_MTU_size(peer);
94                 peer->mtu = peer->if_mtu;
95
96                 if (srx->transport.family == AF_INET) {
97                         peer->hdrsize = sizeof(struct iphdr);
98                         switch (srx->transport_type) {
99                         case SOCK_DGRAM:
100                                 peer->hdrsize += sizeof(struct udphdr);
101                                 break;
102                         default:
103                                 BUG();
104                                 break;
105                         }
106                 } else {
107                         BUG();
108                 }
109
110                 peer->hdrsize += sizeof(struct rxrpc_header);
111                 peer->maxdata = peer->mtu - peer->hdrsize;
112         }
113
114         _leave(" = %p", peer);
115         return peer;
116 }
117
118 /*
119  * obtain a remote transport endpoint for the specified address
120  */
121 struct rxrpc_peer *rxrpc_get_peer(struct sockaddr_rxrpc *srx, gfp_t gfp)
122 {
123         struct rxrpc_peer *peer, *candidate;
124         const char *new = "old";
125         int usage;
126
127         _enter("{%d,%d,%pI4+%hu}",
128                srx->transport_type,
129                srx->transport_len,
130                &srx->transport.sin.sin_addr,
131                ntohs(srx->transport.sin.sin_port));
132
133         /* search the peer list first */
134         read_lock_bh(&rxrpc_peer_lock);
135         list_for_each_entry(peer, &rxrpc_peers, link) {
136                 _debug("check PEER %d { u=%d t=%d l=%d }",
137                        peer->debug_id,
138                        atomic_read(&peer->usage),
139                        peer->srx.transport_type,
140                        peer->srx.transport_len);
141
142                 if (atomic_read(&peer->usage) > 0 &&
143                     peer->srx.transport_type == srx->transport_type &&
144                     peer->srx.transport_len == srx->transport_len &&
145                     memcmp(&peer->srx.transport,
146                            &srx->transport,
147                            srx->transport_len) == 0)
148                         goto found_extant_peer;
149         }
150         read_unlock_bh(&rxrpc_peer_lock);
151
152         /* not yet present - create a candidate for a new record and then
153          * redo the search */
154         candidate = rxrpc_alloc_peer(srx, gfp);
155         if (!candidate) {
156                 _leave(" = -ENOMEM");
157                 return ERR_PTR(-ENOMEM);
158         }
159
160         write_lock_bh(&rxrpc_peer_lock);
161
162         list_for_each_entry(peer, &rxrpc_peers, link) {
163                 if (atomic_read(&peer->usage) > 0 &&
164                     peer->srx.transport_type == srx->transport_type &&
165                     peer->srx.transport_len == srx->transport_len &&
166                     memcmp(&peer->srx.transport,
167                            &srx->transport,
168                            srx->transport_len) == 0)
169                         goto found_extant_second;
170         }
171
172         /* we can now add the new candidate to the list */
173         peer = candidate;
174         candidate = NULL;
175
176         list_add_tail(&peer->link, &rxrpc_peers);
177         write_unlock_bh(&rxrpc_peer_lock);
178         new = "new";
179
180 success:
181         _net("PEER %s %d {%d,%u,%pI4+%hu}",
182              new,
183              peer->debug_id,
184              peer->srx.transport_type,
185              peer->srx.transport.family,
186              &peer->srx.transport.sin.sin_addr,
187              ntohs(peer->srx.transport.sin.sin_port));
188
189         _leave(" = %p {u=%d}", peer, atomic_read(&peer->usage));
190         return peer;
191
192         /* we found the peer in the list immediately */
193 found_extant_peer:
194         usage = atomic_inc_return(&peer->usage);
195         read_unlock_bh(&rxrpc_peer_lock);
196         goto success;
197
198         /* we found the peer on the second time through the list */
199 found_extant_second:
200         usage = atomic_inc_return(&peer->usage);
201         write_unlock_bh(&rxrpc_peer_lock);
202         kfree(candidate);
203         goto success;
204 }
205
206 /*
207  * find the peer associated with a packet
208  */
209 struct rxrpc_peer *rxrpc_find_peer(struct rxrpc_local *local,
210                                    __be32 addr, __be16 port)
211 {
212         struct rxrpc_peer *peer;
213
214         _enter("");
215
216         /* search the peer list */
217         read_lock_bh(&rxrpc_peer_lock);
218
219         if (local->srx.transport.family == AF_INET &&
220             local->srx.transport_type == SOCK_DGRAM
221             ) {
222                 list_for_each_entry(peer, &rxrpc_peers, link) {
223                         if (atomic_read(&peer->usage) > 0 &&
224                             peer->srx.transport_type == SOCK_DGRAM &&
225                             peer->srx.transport.family == AF_INET &&
226                             peer->srx.transport.sin.sin_port == port &&
227                             peer->srx.transport.sin.sin_addr.s_addr == addr)
228                                 goto found_UDP_peer;
229                 }
230
231                 goto new_UDP_peer;
232         }
233
234         read_unlock_bh(&rxrpc_peer_lock);
235         _leave(" = -EAFNOSUPPORT");
236         return ERR_PTR(-EAFNOSUPPORT);
237
238 found_UDP_peer:
239         _net("Rx UDP DGRAM from peer %d", peer->debug_id);
240         atomic_inc(&peer->usage);
241         read_unlock_bh(&rxrpc_peer_lock);
242         _leave(" = %p", peer);
243         return peer;
244
245 new_UDP_peer:
246         _net("Rx UDP DGRAM from NEW peer %d", peer->debug_id);
247         read_unlock_bh(&rxrpc_peer_lock);
248         _leave(" = -EBUSY [new]");
249         return ERR_PTR(-EBUSY);
250 }
251
252 /*
253  * release a remote transport endpoint
254  */
255 void rxrpc_put_peer(struct rxrpc_peer *peer)
256 {
257         _enter("%p{u=%d}", peer, atomic_read(&peer->usage));
258
259         ASSERTCMP(atomic_read(&peer->usage), >, 0);
260
261         if (likely(!atomic_dec_and_test(&peer->usage))) {
262                 _leave(" [in use]");
263                 return;
264         }
265
266         rxrpc_queue_work(&peer->destroyer);
267         _leave("");
268 }
269
270 /*
271  * destroy a remote transport endpoint
272  */
273 static void rxrpc_destroy_peer(struct work_struct *work)
274 {
275         struct rxrpc_peer *peer =
276                 container_of(work, struct rxrpc_peer, destroyer);
277
278         _enter("%p{%d}", peer, atomic_read(&peer->usage));
279
280         write_lock_bh(&rxrpc_peer_lock);
281         list_del(&peer->link);
282         write_unlock_bh(&rxrpc_peer_lock);
283
284         _net("DESTROY PEER %d", peer->debug_id);
285         kfree(peer);
286
287         if (list_empty(&rxrpc_peers))
288                 wake_up_all(&rxrpc_peer_wq);
289         _leave("");
290 }
291
292 /*
293  * preemptively destroy all the peer records from a transport endpoint rather
294  * than waiting for them to time out
295  */
296 void __exit rxrpc_destroy_all_peers(void)
297 {
298         DECLARE_WAITQUEUE(myself,current);
299
300         _enter("");
301
302         /* we simply have to wait for them to go away */
303         if (!list_empty(&rxrpc_peers)) {
304                 set_current_state(TASK_UNINTERRUPTIBLE);
305                 add_wait_queue(&rxrpc_peer_wq, &myself);
306
307                 while (!list_empty(&rxrpc_peers)) {
308                         schedule();
309                         set_current_state(TASK_UNINTERRUPTIBLE);
310                 }
311
312                 remove_wait_queue(&rxrpc_peer_wq, &myself);
313                 set_current_state(TASK_RUNNING);
314         }
315
316         _leave("");
317 }