1/* RxRPC point-to-point transport session management
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/net.h>
14#include <linux/skbuff.h>
15#include <net/sock.h>
16#include <net/af_rxrpc.h>
17#include "ar-internal.h"
18
19static void rxrpc_transport_reaper(struct work_struct *work);
20
21static LIST_HEAD(rxrpc_transports);
22static DEFINE_RWLOCK(rxrpc_transport_lock);
23static unsigned long rxrpc_transport_timeout = 3600 * 24;
24static DECLARE_DELAYED_WORK(rxrpc_transport_reap, rxrpc_transport_reaper);
25
26/*
27 * allocate a new transport session manager
28 */
29static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
30						     struct rxrpc_peer *peer,
31						     gfp_t gfp)
32{
33	struct rxrpc_transport *trans;
34
35	_enter("");
36
37	trans = kzalloc(sizeof(struct rxrpc_transport), gfp);
38	if (trans) {
39		trans->local = local;
40		trans->peer = peer;
41		INIT_LIST_HEAD(&trans->link);
42		trans->bundles = RB_ROOT;
43		trans->client_conns = RB_ROOT;
44		trans->server_conns = RB_ROOT;
45		skb_queue_head_init(&trans->error_queue);
46		spin_lock_init(&trans->client_lock);
47		rwlock_init(&trans->conn_lock);
48		atomic_set(&trans->usage, 1);
49		trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
50
51		if (peer->srx.transport.family == AF_INET) {
52			switch (peer->srx.transport_type) {
53			case SOCK_DGRAM:
54				INIT_WORK(&trans->error_handler,
55					  rxrpc_UDP_error_handler);
56				break;
57			default:
58				BUG();
59				break;
60			}
61		} else {
62			BUG();
63		}
64	}
65
66	_leave(" = %p", trans);
67	return trans;
68}
69
70/*
71 * obtain a transport session for the nominated endpoints
72 */
73struct rxrpc_transport *rxrpc_get_transport(struct rxrpc_local *local,
74					    struct rxrpc_peer *peer,
75					    gfp_t gfp)
76{
77	struct rxrpc_transport *trans, *candidate;
78	const char *new = "old";
79	int usage;
80
81	_enter("{%u.%u.%u.%u+%hu},{%u.%u.%u.%u+%hu},",
82	       NIPQUAD(local->srx.transport.sin.sin_addr),
83	       ntohs(local->srx.transport.sin.sin_port),
84	       NIPQUAD(peer->srx.transport.sin.sin_addr),
85	       ntohs(peer->srx.transport.sin.sin_port));
86
87	/* search the transport list first */
88	read_lock_bh(&rxrpc_transport_lock);
89	list_for_each_entry(trans, &rxrpc_transports, link) {
90		if (trans->local == local && trans->peer == peer)
91			goto found_extant_transport;
92	}
93	read_unlock_bh(&rxrpc_transport_lock);
94
95	/* not yet present - create a candidate for a new record and then
96	 * redo the search */
97	candidate = rxrpc_alloc_transport(local, peer, gfp);
98	if (!candidate) {
99		_leave(" = -ENOMEM");
100		return ERR_PTR(-ENOMEM);
101	}
102
103	write_lock_bh(&rxrpc_transport_lock);
104
105	list_for_each_entry(trans, &rxrpc_transports, link) {
106		if (trans->local == local && trans->peer == peer)
107			goto found_extant_second;
108	}
109
110	/* we can now add the new candidate to the list */
111	trans = candidate;
112	candidate = NULL;
113
114	rxrpc_get_local(trans->local);
115	atomic_inc(&trans->peer->usage);
116	list_add_tail(&trans->link, &rxrpc_transports);
117	write_unlock_bh(&rxrpc_transport_lock);
118	new = "new";
119
120success:
121	_net("TRANSPORT %s %d local %d -> peer %d",
122	     new,
123	     trans->debug_id,
124	     trans->local->debug_id,
125	     trans->peer->debug_id);
126
127	_leave(" = %p {u=%d}", trans, atomic_read(&trans->usage));
128	return trans;
129
130	/* we found the transport in the list immediately */
131found_extant_transport:
132	usage = atomic_inc_return(&trans->usage);
133	read_unlock_bh(&rxrpc_transport_lock);
134	goto success;
135
136	/* we found the transport on the second time through the list */
137found_extant_second:
138	usage = atomic_inc_return(&trans->usage);
139	write_unlock_bh(&rxrpc_transport_lock);
140	kfree(candidate);
141	goto success;
142}
143
144/*
145 * find the transport connecting two endpoints
146 */
147struct rxrpc_transport *rxrpc_find_transport(struct rxrpc_local *local,
148					     struct rxrpc_peer *peer)
149{
150	struct rxrpc_transport *trans;
151
152	_enter("{%u.%u.%u.%u+%hu},{%u.%u.%u.%u+%hu},",
153	       NIPQUAD(local->srx.transport.sin.sin_addr),
154	       ntohs(local->srx.transport.sin.sin_port),
155	       NIPQUAD(peer->srx.transport.sin.sin_addr),
156	       ntohs(peer->srx.transport.sin.sin_port));
157
158	/* search the transport list */
159	read_lock_bh(&rxrpc_transport_lock);
160
161	list_for_each_entry(trans, &rxrpc_transports, link) {
162		if (trans->local == local && trans->peer == peer)
163			goto found_extant_transport;
164	}
165
166	read_unlock_bh(&rxrpc_transport_lock);
167	_leave(" = NULL");
168	return NULL;
169
170found_extant_transport:
171	atomic_inc(&trans->usage);
172	read_unlock_bh(&rxrpc_transport_lock);
173	_leave(" = %p", trans);
174	return trans;
175}
176
177/*
178 * release a transport session
179 */
180void rxrpc_put_transport(struct rxrpc_transport *trans)
181{
182	_enter("%p{u=%d}", trans, atomic_read(&trans->usage));
183
184	ASSERTCMP(atomic_read(&trans->usage), >, 0);
185
186	trans->put_time = xtime.tv_sec;
187	if (unlikely(atomic_dec_and_test(&trans->usage)))
188		_debug("zombie");
189		/* let the reaper determine the timeout to avoid a race with
190		 * overextending the timeout if the reaper is running at the
191		 * same time */
192		rxrpc_queue_delayed_work(&rxrpc_transport_reap, 0);
193	_leave("");
194}
195
196/*
197 * clean up a transport session
198 */
199static void rxrpc_cleanup_transport(struct rxrpc_transport *trans)
200{
201	_net("DESTROY TRANS %d", trans->debug_id);
202
203	rxrpc_purge_queue(&trans->error_queue);
204
205	rxrpc_put_local(trans->local);
206	rxrpc_put_peer(trans->peer);
207	kfree(trans);
208}
209
210/*
211 * reap dead transports that have passed their expiry date
212 */
213static void rxrpc_transport_reaper(struct work_struct *work)
214{
215	struct rxrpc_transport *trans, *_p;
216	unsigned long now, earliest, reap_time;
217
218	LIST_HEAD(graveyard);
219
220	_enter("");
221
222	now = xtime.tv_sec;
223	earliest = ULONG_MAX;
224
225	/* extract all the transports that have been dead too long */
226	write_lock_bh(&rxrpc_transport_lock);
227	list_for_each_entry_safe(trans, _p, &rxrpc_transports, link) {
228		_debug("reap TRANS %d { u=%d t=%ld }",
229		       trans->debug_id, atomic_read(&trans->usage),
230		       (long) now - (long) trans->put_time);
231
232		if (likely(atomic_read(&trans->usage) > 0))
233			continue;
234
235		reap_time = trans->put_time + rxrpc_transport_timeout;
236		if (reap_time <= now)
237			list_move_tail(&trans->link, &graveyard);
238		else if (reap_time < earliest)
239			earliest = reap_time;
240	}
241	write_unlock_bh(&rxrpc_transport_lock);
242
243	if (earliest != ULONG_MAX) {
244		_debug("reschedule reaper %ld", (long) earliest - now);
245		ASSERTCMP(earliest, >, now);
246		rxrpc_queue_delayed_work(&rxrpc_transport_reap,
247					 (earliest - now) * HZ);
248	}
249
250	/* then destroy all those pulled out */
251	while (!list_empty(&graveyard)) {
252		trans = list_entry(graveyard.next, struct rxrpc_transport,
253				   link);
254		list_del_init(&trans->link);
255
256		ASSERTCMP(atomic_read(&trans->usage), ==, 0);
257		rxrpc_cleanup_transport(trans);
258	}
259
260	_leave("");
261}
262
263/*
264 * preemptively destroy all the transport session records rather than waiting
265 * for them to time out
266 */
267void __exit rxrpc_destroy_all_transports(void)
268{
269	_enter("");
270
271	rxrpc_transport_timeout = 0;
272	cancel_delayed_work(&rxrpc_transport_reap);
273	rxrpc_queue_delayed_work(&rxrpc_transport_reap, 0);
274
275	_leave("");
276}
277