sctp_bsd_addr.c revision 283708
1/*-
2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * a) Redistributions of source code must retain the above copyright notice,
10 *    this list of conditions and the following disclaimer.
11 *
12 * b) Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in
14 *    the documentation and/or other materials provided with the distribution.
15 *
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 *    contributors may be used to endorse or promote products derived
18 *    from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: stable/10/sys/netinet/sctp_bsd_addr.c 283708 2015-05-29 12:03:02Z tuexen $");
35
36#include <netinet/sctp_os.h>
37#include <netinet/sctp_var.h>
38#include <netinet/sctp_pcb.h>
39#include <netinet/sctp_header.h>
40#include <netinet/sctputil.h>
41#include <netinet/sctp_output.h>
42#include <netinet/sctp_bsd_addr.h>
43#include <netinet/sctp_uio.h>
44#include <netinet/sctputil.h>
45#include <netinet/sctp_timer.h>
46#include <netinet/sctp_asconf.h>
47#include <netinet/sctp_sysctl.h>
48#include <netinet/sctp_indata.h>
49#include <sys/unistd.h>
50
51/* Declare all of our malloc named types */
52MALLOC_DEFINE(SCTP_M_MAP, "sctp_map", "sctp asoc map descriptor");
53MALLOC_DEFINE(SCTP_M_STRMI, "sctp_stri", "sctp stream in array");
54MALLOC_DEFINE(SCTP_M_STRMO, "sctp_stro", "sctp stream out array");
55MALLOC_DEFINE(SCTP_M_ASC_ADDR, "sctp_aadr", "sctp asconf address");
56MALLOC_DEFINE(SCTP_M_ASC_IT, "sctp_a_it", "sctp asconf iterator");
57MALLOC_DEFINE(SCTP_M_AUTH_CL, "sctp_atcl", "sctp auth chunklist");
58MALLOC_DEFINE(SCTP_M_AUTH_KY, "sctp_atky", "sctp auth key");
59MALLOC_DEFINE(SCTP_M_AUTH_HL, "sctp_athm", "sctp auth hmac list");
60MALLOC_DEFINE(SCTP_M_AUTH_IF, "sctp_athi", "sctp auth info");
61MALLOC_DEFINE(SCTP_M_STRESET, "sctp_stre", "sctp stream reset");
62MALLOC_DEFINE(SCTP_M_CMSG, "sctp_cmsg", "sctp CMSG buffer");
63MALLOC_DEFINE(SCTP_M_COPYAL, "sctp_cpal", "sctp copy all");
64MALLOC_DEFINE(SCTP_M_VRF, "sctp_vrf", "sctp vrf struct");
65MALLOC_DEFINE(SCTP_M_IFA, "sctp_ifa", "sctp ifa struct");
66MALLOC_DEFINE(SCTP_M_IFN, "sctp_ifn", "sctp ifn struct");
67MALLOC_DEFINE(SCTP_M_TIMW, "sctp_timw", "sctp time block");
68MALLOC_DEFINE(SCTP_M_MVRF, "sctp_mvrf", "sctp mvrf pcb list");
69MALLOC_DEFINE(SCTP_M_ITER, "sctp_iter", "sctp iterator control");
70MALLOC_DEFINE(SCTP_M_SOCKOPT, "sctp_socko", "sctp socket option");
71MALLOC_DEFINE(SCTP_M_MCORE, "sctp_mcore", "sctp mcore queue");
72
73/* Global NON-VNET structure that controls the iterator */
74struct iterator_control sctp_it_ctl;
75
76
77void
78sctp_wakeup_iterator(void)
79{
80	wakeup(&sctp_it_ctl.iterator_running);
81}
82
83static void
84sctp_iterator_thread(void *v SCTP_UNUSED)
85{
86	SCTP_IPI_ITERATOR_WQ_LOCK();
87	/* In FreeBSD this thread never terminates. */
88	for (;;) {
89		msleep(&sctp_it_ctl.iterator_running,
90		    &sctp_it_ctl.ipi_iterator_wq_mtx,
91		    0, "waiting_for_work", 0);
92		sctp_iterator_worker();
93	}
94}
95
96void
97sctp_startup_iterator(void)
98{
99	if (sctp_it_ctl.thread_proc) {
100		/* You only get one */
101		return;
102	}
103	/* Initialize global locks here, thus only once. */
104	SCTP_ITERATOR_LOCK_INIT();
105	SCTP_IPI_ITERATOR_WQ_INIT();
106	TAILQ_INIT(&sctp_it_ctl.iteratorhead);
107	kproc_create(sctp_iterator_thread,
108	    (void *)NULL,
109	    &sctp_it_ctl.thread_proc,
110	    RFPROC,
111	    SCTP_KTHREAD_PAGES,
112	    SCTP_KTRHEAD_NAME);
113}
114
115#ifdef INET6
116
117void
118sctp_gather_internal_ifa_flags(struct sctp_ifa *ifa)
119{
120	struct in6_ifaddr *ifa6;
121
122	ifa6 = (struct in6_ifaddr *)ifa->ifa;
123	ifa->flags = ifa6->ia6_flags;
124	if (!MODULE_GLOBAL(ip6_use_deprecated)) {
125		if (ifa->flags &
126		    IN6_IFF_DEPRECATED) {
127			ifa->localifa_flags |= SCTP_ADDR_IFA_UNUSEABLE;
128		} else {
129			ifa->localifa_flags &= ~SCTP_ADDR_IFA_UNUSEABLE;
130		}
131	} else {
132		ifa->localifa_flags &= ~SCTP_ADDR_IFA_UNUSEABLE;
133	}
134	if (ifa->flags &
135	    (IN6_IFF_DETACHED |
136	    IN6_IFF_ANYCAST |
137	    IN6_IFF_NOTREADY)) {
138		ifa->localifa_flags |= SCTP_ADDR_IFA_UNUSEABLE;
139	} else {
140		ifa->localifa_flags &= ~SCTP_ADDR_IFA_UNUSEABLE;
141	}
142}
143
144#endif				/* INET6 */
145
146
147static uint32_t
148sctp_is_desired_interface_type(struct ifnet *ifn)
149{
150	int result;
151
152	/* check the interface type to see if it's one we care about */
153	switch (ifn->if_type) {
154	case IFT_ETHER:
155	case IFT_ISO88023:
156	case IFT_ISO88024:
157	case IFT_ISO88025:
158	case IFT_ISO88026:
159	case IFT_STARLAN:
160	case IFT_P10:
161	case IFT_P80:
162	case IFT_HY:
163	case IFT_FDDI:
164	case IFT_XETHER:
165	case IFT_ISDNBASIC:
166	case IFT_ISDNPRIMARY:
167	case IFT_PTPSERIAL:
168	case IFT_OTHER:
169	case IFT_PPP:
170	case IFT_LOOP:
171	case IFT_SLIP:
172	case IFT_GIF:
173	case IFT_L2VLAN:
174	case IFT_STF:
175	case IFT_IP:
176	case IFT_IPOVERCDLC:
177	case IFT_IPOVERCLAW:
178	case IFT_PROPVIRTUAL:	/* NetGraph Virtual too */
179	case IFT_VIRTUALIPADDRESS:
180		result = 1;
181		break;
182	default:
183		result = 0;
184	}
185
186	return (result);
187}
188
189
190
191
192static void
193sctp_init_ifns_for_vrf(int vrfid)
194{
195	/*
196	 * Here we must apply ANY locks needed by the IFN we access and also
197	 * make sure we lock any IFA that exists as we float through the
198	 * list of IFA's
199	 */
200	struct ifnet *ifn;
201	struct ifaddr *ifa;
202	struct sctp_ifa *sctp_ifa;
203	uint32_t ifa_flags;
204
205#ifdef INET6
206	struct in6_ifaddr *ifa6;
207
208#endif
209
210	IFNET_RLOCK();
211	TAILQ_FOREACH(ifn, &MODULE_GLOBAL(ifnet), if_list) {
212		if (sctp_is_desired_interface_type(ifn) == 0) {
213			/* non desired type */
214			continue;
215		}
216		IF_ADDR_RLOCK(ifn);
217		TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
218			if (ifa->ifa_addr == NULL) {
219				continue;
220			}
221			switch (ifa->ifa_addr->sa_family) {
222#ifdef INET
223			case AF_INET:
224				if (((struct sockaddr_in *)ifa->ifa_addr)->sin_addr.s_addr == 0) {
225					continue;
226				}
227				break;
228#endif
229#ifdef INET6
230			case AF_INET6:
231				if (IN6_IS_ADDR_UNSPECIFIED(&((struct sockaddr_in6 *)ifa->ifa_addr)->sin6_addr)) {
232					/* skip unspecifed addresses */
233					continue;
234				}
235				break;
236#endif
237			default:
238				continue;
239			}
240			switch (ifa->ifa_addr->sa_family) {
241#ifdef INET
242			case AF_INET:
243				ifa_flags = 0;
244				break;
245#endif
246#ifdef INET6
247			case AF_INET6:
248				ifa6 = (struct in6_ifaddr *)ifa;
249				ifa_flags = ifa6->ia6_flags;
250				break;
251#endif
252			default:
253				ifa_flags = 0;
254				break;
255			}
256			sctp_ifa = sctp_add_addr_to_vrf(vrfid,
257			    (void *)ifn,
258			    ifn->if_index,
259			    ifn->if_type,
260			    ifn->if_xname,
261			    (void *)ifa,
262			    ifa->ifa_addr,
263			    ifa_flags,
264			    0);
265			if (sctp_ifa) {
266				sctp_ifa->localifa_flags &= ~SCTP_ADDR_DEFER_USE;
267			}
268		}
269		IF_ADDR_RUNLOCK(ifn);
270	}
271	IFNET_RUNLOCK();
272}
273
274void
275sctp_init_vrf_list(int vrfid)
276{
277	if (vrfid > SCTP_MAX_VRF_ID)
278		/* can't do that */
279		return;
280
281	/* Don't care about return here */
282	(void)sctp_allocate_vrf(vrfid);
283
284	/*
285	 * Now we need to build all the ifn's for this vrf and there
286	 * addresses
287	 */
288	sctp_init_ifns_for_vrf(vrfid);
289}
290
291void
292sctp_addr_change(struct ifaddr *ifa, int cmd)
293{
294	uint32_t ifa_flags = 0;
295
296	/*
297	 * BSD only has one VRF, if this changes we will need to hook in the
298	 * right things here to get the id to pass to the address managment
299	 * routine.
300	 */
301	if (SCTP_BASE_VAR(first_time) == 0) {
302		/* Special test to see if my ::1 will showup with this */
303		SCTP_BASE_VAR(first_time) = 1;
304		sctp_init_ifns_for_vrf(SCTP_DEFAULT_VRFID);
305	}
306	if ((cmd != RTM_ADD) && (cmd != RTM_DELETE)) {
307		/* don't know what to do with this */
308		return;
309	}
310	if (ifa->ifa_addr == NULL) {
311		return;
312	}
313	if (sctp_is_desired_interface_type(ifa->ifa_ifp) == 0) {
314		/* non desired type */
315		return;
316	}
317	switch (ifa->ifa_addr->sa_family) {
318#ifdef INET
319	case AF_INET:
320		if (((struct sockaddr_in *)ifa->ifa_addr)->sin_addr.s_addr == 0) {
321			return;
322		}
323		break;
324#endif
325#ifdef INET6
326	case AF_INET6:
327		ifa_flags = ((struct in6_ifaddr *)ifa)->ia6_flags;
328		if (IN6_IS_ADDR_UNSPECIFIED(&((struct sockaddr_in6 *)ifa->ifa_addr)->sin6_addr)) {
329			/* skip unspecifed addresses */
330			return;
331		}
332		break;
333#endif
334	default:
335		/* non inet/inet6 skip */
336		return;
337	}
338	if (cmd == RTM_ADD) {
339		(void)sctp_add_addr_to_vrf(SCTP_DEFAULT_VRFID, (void *)ifa->ifa_ifp,
340		    ifa->ifa_ifp->if_index, ifa->ifa_ifp->if_type, ifa->ifa_ifp->if_xname,
341		    (void *)ifa, ifa->ifa_addr, ifa_flags, 1);
342	} else {
343
344		sctp_del_addr_from_vrf(SCTP_DEFAULT_VRFID, ifa->ifa_addr,
345		    ifa->ifa_ifp->if_index,
346		    ifa->ifa_ifp->if_xname);
347
348		/*
349		 * We don't bump refcount here so when it completes the
350		 * final delete will happen.
351		 */
352	}
353}
354
355void
356     sctp_add_or_del_interfaces(int (*pred) (struct ifnet *), int add){
357	struct ifnet *ifn;
358	struct ifaddr *ifa;
359
360	IFNET_RLOCK();
361	TAILQ_FOREACH(ifn, &MODULE_GLOBAL(ifnet), if_list) {
362		if (!(*pred) (ifn)) {
363			continue;
364		}
365		TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
366			sctp_addr_change(ifa, add ? RTM_ADD : RTM_DELETE);
367		}
368	}
369	IFNET_RUNLOCK();
370}
371
372struct mbuf *
373sctp_get_mbuf_for_msg(unsigned int space_needed, int want_header,
374    int how, int allonebuf, int type)
375{
376	struct mbuf *m = NULL;
377
378	m = m_getm2(NULL, space_needed, how, type, want_header ? M_PKTHDR : 0);
379	if (m == NULL) {
380		/* bad, no memory */
381		return (m);
382	}
383	if (allonebuf) {
384		int siz;
385
386		if (SCTP_BUF_IS_EXTENDED(m)) {
387			siz = SCTP_BUF_EXTEND_SIZE(m);
388		} else {
389			if (want_header)
390				siz = MHLEN;
391			else
392				siz = MLEN;
393		}
394		if (siz < space_needed) {
395			m_freem(m);
396			return (NULL);
397		}
398	}
399	if (SCTP_BUF_NEXT(m)) {
400		sctp_m_freem(SCTP_BUF_NEXT(m));
401		SCTP_BUF_NEXT(m) = NULL;
402	}
403#ifdef SCTP_MBUF_LOGGING
404	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
405		sctp_log_mb(m, SCTP_MBUF_IALLOC);
406	}
407#endif
408	return (m);
409}
410
411
412#ifdef SCTP_PACKET_LOGGING
413void
414sctp_packet_log(struct mbuf *m)
415{
416	int *lenat, thisone;
417	void *copyto;
418	uint32_t *tick_tock;
419	int length;
420	int total_len;
421	int grabbed_lock = 0;
422	int value, newval, thisend, thisbegin;
423
424	/*
425	 * Buffer layout. -sizeof this entry (total_len) -previous end
426	 * (value) -ticks of log      (ticks) o -ip packet o -as logged -
427	 * where this started (thisbegin) x <--end points here
428	 */
429	length = SCTP_HEADER_LEN(m);
430	total_len = SCTP_SIZE32((length + (4 * sizeof(int))));
431	/* Log a packet to the buffer. */
432	if (total_len > SCTP_PACKET_LOG_SIZE) {
433		/* Can't log this packet I have not a buffer big enough */
434		return;
435	}
436	if (length < (int)(SCTP_MIN_V4_OVERHEAD + sizeof(struct sctp_cookie_ack_chunk))) {
437		return;
438	}
439	atomic_add_int(&SCTP_BASE_VAR(packet_log_writers), 1);
440try_again:
441	if (SCTP_BASE_VAR(packet_log_writers) > SCTP_PKTLOG_WRITERS_NEED_LOCK) {
442		SCTP_IP_PKTLOG_LOCK();
443		grabbed_lock = 1;
444again_locked:
445		value = SCTP_BASE_VAR(packet_log_end);
446		newval = SCTP_BASE_VAR(packet_log_end) + total_len;
447		if (newval >= SCTP_PACKET_LOG_SIZE) {
448			/* we wrapped */
449			thisbegin = 0;
450			thisend = total_len;
451		} else {
452			thisbegin = SCTP_BASE_VAR(packet_log_end);
453			thisend = newval;
454		}
455		if (!(atomic_cmpset_int(&SCTP_BASE_VAR(packet_log_end), value, thisend))) {
456			goto again_locked;
457		}
458	} else {
459		value = SCTP_BASE_VAR(packet_log_end);
460		newval = SCTP_BASE_VAR(packet_log_end) + total_len;
461		if (newval >= SCTP_PACKET_LOG_SIZE) {
462			/* we wrapped */
463			thisbegin = 0;
464			thisend = total_len;
465		} else {
466			thisbegin = SCTP_BASE_VAR(packet_log_end);
467			thisend = newval;
468		}
469		if (!(atomic_cmpset_int(&SCTP_BASE_VAR(packet_log_end), value, thisend))) {
470			goto try_again;
471		}
472	}
473	/* Sanity check */
474	if (thisend >= SCTP_PACKET_LOG_SIZE) {
475		SCTP_PRINTF("Insanity stops a log thisbegin:%d thisend:%d writers:%d lock:%d end:%d\n",
476		    thisbegin,
477		    thisend,
478		    SCTP_BASE_VAR(packet_log_writers),
479		    grabbed_lock,
480		    SCTP_BASE_VAR(packet_log_end));
481		SCTP_BASE_VAR(packet_log_end) = 0;
482		goto no_log;
483
484	}
485	lenat = (int *)&SCTP_BASE_VAR(packet_log_buffer)[thisbegin];
486	*lenat = total_len;
487	lenat++;
488	*lenat = value;
489	lenat++;
490	tick_tock = (uint32_t *) lenat;
491	lenat++;
492	*tick_tock = sctp_get_tick_count();
493	copyto = (void *)lenat;
494	thisone = thisend - sizeof(int);
495	lenat = (int *)&SCTP_BASE_VAR(packet_log_buffer)[thisone];
496	*lenat = thisbegin;
497	if (grabbed_lock) {
498		SCTP_IP_PKTLOG_UNLOCK();
499		grabbed_lock = 0;
500	}
501	m_copydata(m, 0, length, (caddr_t)copyto);
502no_log:
503	if (grabbed_lock) {
504		SCTP_IP_PKTLOG_UNLOCK();
505	}
506	atomic_subtract_int(&SCTP_BASE_VAR(packet_log_writers), 1);
507}
508
509
510int
511sctp_copy_out_packet_log(uint8_t * target, int length)
512{
513	/*
514	 * We wind through the packet log starting at start copying up to
515	 * length bytes out. We return the number of bytes copied.
516	 */
517	int tocopy, this_copy;
518	int *lenat;
519	int did_delay = 0;
520
521	tocopy = length;
522	if (length < (int)(2 * sizeof(int))) {
523		/* not enough room */
524		return (0);
525	}
526	if (SCTP_PKTLOG_WRITERS_NEED_LOCK) {
527		atomic_add_int(&SCTP_BASE_VAR(packet_log_writers), SCTP_PKTLOG_WRITERS_NEED_LOCK);
528again:
529		if ((did_delay == 0) && (SCTP_BASE_VAR(packet_log_writers) != SCTP_PKTLOG_WRITERS_NEED_LOCK)) {
530			/*
531			 * we delay here for just a moment hoping the
532			 * writer(s) that were present when we entered will
533			 * have left and we only have locking ones that will
534			 * contend with us for the lock. This does not
535			 * assure 100% access, but its good enough for a
536			 * logging facility like this.
537			 */
538			did_delay = 1;
539			DELAY(10);
540			goto again;
541		}
542	}
543	SCTP_IP_PKTLOG_LOCK();
544	lenat = (int *)target;
545	*lenat = SCTP_BASE_VAR(packet_log_end);
546	lenat++;
547	this_copy = min((length - sizeof(int)), SCTP_PACKET_LOG_SIZE);
548	memcpy((void *)lenat, (void *)SCTP_BASE_VAR(packet_log_buffer), this_copy);
549	if (SCTP_PKTLOG_WRITERS_NEED_LOCK) {
550		atomic_subtract_int(&SCTP_BASE_VAR(packet_log_writers),
551		    SCTP_PKTLOG_WRITERS_NEED_LOCK);
552	}
553	SCTP_IP_PKTLOG_UNLOCK();
554	return (this_copy + sizeof(int));
555}
556
557#endif
558