sctp_bsd_addr.c revision 296052
1/*-
2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * a) Redistributions of source code must retain the above copyright notice,
10 *    this list of conditions and the following disclaimer.
11 *
12 * b) Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in
14 *    the documentation and/or other materials provided with the distribution.
15 *
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 *    contributors may be used to endorse or promote products derived
18 *    from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: stable/10/sys/netinet/sctp_bsd_addr.c 296052 2016-02-25 18:46:06Z tuexen $");
35
36#include <netinet/sctp_os.h>
37#include <netinet/sctp_var.h>
38#include <netinet/sctp_pcb.h>
39#include <netinet/sctp_header.h>
40#include <netinet/sctputil.h>
41#include <netinet/sctp_output.h>
42#include <netinet/sctp_bsd_addr.h>
43#include <netinet/sctp_uio.h>
44#include <netinet/sctputil.h>
45#include <netinet/sctp_timer.h>
46#include <netinet/sctp_asconf.h>
47#include <netinet/sctp_sysctl.h>
48#include <netinet/sctp_indata.h>
49#include <sys/unistd.h>
50
51/* Declare all of our malloc named types */
52MALLOC_DEFINE(SCTP_M_MAP, "sctp_map", "sctp asoc map descriptor");
53MALLOC_DEFINE(SCTP_M_STRMI, "sctp_stri", "sctp stream in array");
54MALLOC_DEFINE(SCTP_M_STRMO, "sctp_stro", "sctp stream out array");
55MALLOC_DEFINE(SCTP_M_ASC_ADDR, "sctp_aadr", "sctp asconf address");
56MALLOC_DEFINE(SCTP_M_ASC_IT, "sctp_a_it", "sctp asconf iterator");
57MALLOC_DEFINE(SCTP_M_AUTH_CL, "sctp_atcl", "sctp auth chunklist");
58MALLOC_DEFINE(SCTP_M_AUTH_KY, "sctp_atky", "sctp auth key");
59MALLOC_DEFINE(SCTP_M_AUTH_HL, "sctp_athm", "sctp auth hmac list");
60MALLOC_DEFINE(SCTP_M_AUTH_IF, "sctp_athi", "sctp auth info");
61MALLOC_DEFINE(SCTP_M_STRESET, "sctp_stre", "sctp stream reset");
62MALLOC_DEFINE(SCTP_M_CMSG, "sctp_cmsg", "sctp CMSG buffer");
63MALLOC_DEFINE(SCTP_M_COPYAL, "sctp_cpal", "sctp copy all");
64MALLOC_DEFINE(SCTP_M_VRF, "sctp_vrf", "sctp vrf struct");
65MALLOC_DEFINE(SCTP_M_IFA, "sctp_ifa", "sctp ifa struct");
66MALLOC_DEFINE(SCTP_M_IFN, "sctp_ifn", "sctp ifn struct");
67MALLOC_DEFINE(SCTP_M_TIMW, "sctp_timw", "sctp time block");
68MALLOC_DEFINE(SCTP_M_MVRF, "sctp_mvrf", "sctp mvrf pcb list");
69MALLOC_DEFINE(SCTP_M_ITER, "sctp_iter", "sctp iterator control");
70MALLOC_DEFINE(SCTP_M_SOCKOPT, "sctp_socko", "sctp socket option");
71MALLOC_DEFINE(SCTP_M_MCORE, "sctp_mcore", "sctp mcore queue");
72
73/* Global NON-VNET structure that controls the iterator */
74struct iterator_control sctp_it_ctl;
75
76
77void
78sctp_wakeup_iterator(void)
79{
80	wakeup(&sctp_it_ctl.iterator_running);
81}
82
83static void
84sctp_iterator_thread(void *v SCTP_UNUSED)
85{
86	SCTP_IPI_ITERATOR_WQ_LOCK();
87	/* In FreeBSD this thread never terminates. */
88	for (;;) {
89		msleep(&sctp_it_ctl.iterator_running,
90		    &sctp_it_ctl.ipi_iterator_wq_mtx,
91		    0, "waiting_for_work", 0);
92		sctp_iterator_worker();
93	}
94}
95
96void
97sctp_startup_iterator(void)
98{
99	if (sctp_it_ctl.thread_proc) {
100		/* You only get one */
101		return;
102	}
103	/* Initialize global locks here, thus only once. */
104	SCTP_ITERATOR_LOCK_INIT();
105	SCTP_IPI_ITERATOR_WQ_INIT();
106	TAILQ_INIT(&sctp_it_ctl.iteratorhead);
107	kproc_create(sctp_iterator_thread,
108	    (void *)NULL,
109	    &sctp_it_ctl.thread_proc,
110	    RFPROC,
111	    SCTP_KTHREAD_PAGES,
112	    SCTP_KTRHEAD_NAME);
113}
114
115#ifdef INET6
116
117void
118sctp_gather_internal_ifa_flags(struct sctp_ifa *ifa)
119{
120	struct in6_ifaddr *ifa6;
121
122	ifa6 = (struct in6_ifaddr *)ifa->ifa;
123	ifa->flags = ifa6->ia6_flags;
124	if (!MODULE_GLOBAL(ip6_use_deprecated)) {
125		if (ifa->flags &
126		    IN6_IFF_DEPRECATED) {
127			ifa->localifa_flags |= SCTP_ADDR_IFA_UNUSEABLE;
128		} else {
129			ifa->localifa_flags &= ~SCTP_ADDR_IFA_UNUSEABLE;
130		}
131	} else {
132		ifa->localifa_flags &= ~SCTP_ADDR_IFA_UNUSEABLE;
133	}
134	if (ifa->flags &
135	    (IN6_IFF_DETACHED |
136	    IN6_IFF_ANYCAST |
137	    IN6_IFF_NOTREADY)) {
138		ifa->localifa_flags |= SCTP_ADDR_IFA_UNUSEABLE;
139	} else {
140		ifa->localifa_flags &= ~SCTP_ADDR_IFA_UNUSEABLE;
141	}
142}
143
144#endif				/* INET6 */
145
146
147static uint32_t
148sctp_is_desired_interface_type(struct ifnet *ifn)
149{
150	int result;
151
152	/* check the interface type to see if it's one we care about */
153	switch (ifn->if_type) {
154	case IFT_ETHER:
155	case IFT_ISO88023:
156	case IFT_ISO88024:
157	case IFT_ISO88025:
158	case IFT_ISO88026:
159	case IFT_STARLAN:
160	case IFT_P10:
161	case IFT_P80:
162	case IFT_HY:
163	case IFT_FDDI:
164	case IFT_XETHER:
165	case IFT_ISDNBASIC:
166	case IFT_ISDNPRIMARY:
167	case IFT_PTPSERIAL:
168	case IFT_OTHER:
169	case IFT_PPP:
170	case IFT_LOOP:
171	case IFT_SLIP:
172	case IFT_GIF:
173	case IFT_L2VLAN:
174	case IFT_STF:
175	case IFT_IP:
176	case IFT_IPOVERCDLC:
177	case IFT_IPOVERCLAW:
178	case IFT_PROPVIRTUAL:	/* NetGraph Virtual too */
179	case IFT_VIRTUALIPADDRESS:
180		result = 1;
181		break;
182	default:
183		result = 0;
184	}
185
186	return (result);
187}
188
189
190
191
192static void
193sctp_init_ifns_for_vrf(int vrfid)
194{
195	/*
196	 * Here we must apply ANY locks needed by the IFN we access and also
197	 * make sure we lock any IFA that exists as we float through the
198	 * list of IFA's
199	 */
200	struct ifnet *ifn;
201	struct ifaddr *ifa;
202	struct sctp_ifa *sctp_ifa;
203	uint32_t ifa_flags;
204
205#ifdef INET6
206	struct in6_ifaddr *ifa6;
207
208#endif
209
210	IFNET_RLOCK();
211	TAILQ_FOREACH(ifn, &MODULE_GLOBAL(ifnet), if_list) {
212		if (sctp_is_desired_interface_type(ifn) == 0) {
213			/* non desired type */
214			continue;
215		}
216		IF_ADDR_RLOCK(ifn);
217		TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
218			if (ifa->ifa_addr == NULL) {
219				continue;
220			}
221			switch (ifa->ifa_addr->sa_family) {
222#ifdef INET
223			case AF_INET:
224				if (((struct sockaddr_in *)ifa->ifa_addr)->sin_addr.s_addr == 0) {
225					continue;
226				}
227				break;
228#endif
229#ifdef INET6
230			case AF_INET6:
231				if (IN6_IS_ADDR_UNSPECIFIED(&((struct sockaddr_in6 *)ifa->ifa_addr)->sin6_addr)) {
232					/* skip unspecifed addresses */
233					continue;
234				}
235				break;
236#endif
237			default:
238				continue;
239			}
240			switch (ifa->ifa_addr->sa_family) {
241#ifdef INET
242			case AF_INET:
243				ifa_flags = 0;
244				break;
245#endif
246#ifdef INET6
247			case AF_INET6:
248				ifa6 = (struct in6_ifaddr *)ifa;
249				ifa_flags = ifa6->ia6_flags;
250				break;
251#endif
252			default:
253				ifa_flags = 0;
254				break;
255			}
256			sctp_ifa = sctp_add_addr_to_vrf(vrfid,
257			    (void *)ifn,
258			    ifn->if_index,
259			    ifn->if_type,
260			    ifn->if_xname,
261			    (void *)ifa,
262			    ifa->ifa_addr,
263			    ifa_flags,
264			    0);
265			if (sctp_ifa) {
266				sctp_ifa->localifa_flags &= ~SCTP_ADDR_DEFER_USE;
267			}
268		}
269		IF_ADDR_RUNLOCK(ifn);
270	}
271	IFNET_RUNLOCK();
272}
273
274void
275sctp_init_vrf_list(int vrfid)
276{
277	if (vrfid > SCTP_MAX_VRF_ID)
278		/* can't do that */
279		return;
280
281	/* Don't care about return here */
282	(void)sctp_allocate_vrf(vrfid);
283
284	/*
285	 * Now we need to build all the ifn's for this vrf and there
286	 * addresses
287	 */
288	sctp_init_ifns_for_vrf(vrfid);
289}
290
291void
292sctp_addr_change(struct ifaddr *ifa, int cmd)
293{
294	uint32_t ifa_flags = 0;
295
296	if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
297		return;
298	}
299	/*
300	 * BSD only has one VRF, if this changes we will need to hook in the
301	 * right things here to get the id to pass to the address managment
302	 * routine.
303	 */
304	if (SCTP_BASE_VAR(first_time) == 0) {
305		/* Special test to see if my ::1 will showup with this */
306		SCTP_BASE_VAR(first_time) = 1;
307		sctp_init_ifns_for_vrf(SCTP_DEFAULT_VRFID);
308	}
309	if ((cmd != RTM_ADD) && (cmd != RTM_DELETE)) {
310		/* don't know what to do with this */
311		return;
312	}
313	if (ifa->ifa_addr == NULL) {
314		return;
315	}
316	if (sctp_is_desired_interface_type(ifa->ifa_ifp) == 0) {
317		/* non desired type */
318		return;
319	}
320	switch (ifa->ifa_addr->sa_family) {
321#ifdef INET
322	case AF_INET:
323		if (((struct sockaddr_in *)ifa->ifa_addr)->sin_addr.s_addr == 0) {
324			return;
325		}
326		break;
327#endif
328#ifdef INET6
329	case AF_INET6:
330		ifa_flags = ((struct in6_ifaddr *)ifa)->ia6_flags;
331		if (IN6_IS_ADDR_UNSPECIFIED(&((struct sockaddr_in6 *)ifa->ifa_addr)->sin6_addr)) {
332			/* skip unspecifed addresses */
333			return;
334		}
335		break;
336#endif
337	default:
338		/* non inet/inet6 skip */
339		return;
340	}
341	if (cmd == RTM_ADD) {
342		(void)sctp_add_addr_to_vrf(SCTP_DEFAULT_VRFID, (void *)ifa->ifa_ifp,
343		    ifa->ifa_ifp->if_index, ifa->ifa_ifp->if_type, ifa->ifa_ifp->if_xname,
344		    (void *)ifa, ifa->ifa_addr, ifa_flags, 1);
345	} else {
346
347		sctp_del_addr_from_vrf(SCTP_DEFAULT_VRFID, ifa->ifa_addr,
348		    ifa->ifa_ifp->if_index,
349		    ifa->ifa_ifp->if_xname);
350
351		/*
352		 * We don't bump refcount here so when it completes the
353		 * final delete will happen.
354		 */
355	}
356}
357
358void
359     sctp_add_or_del_interfaces(int (*pred) (struct ifnet *), int add){
360	struct ifnet *ifn;
361	struct ifaddr *ifa;
362
363	IFNET_RLOCK();
364	TAILQ_FOREACH(ifn, &MODULE_GLOBAL(ifnet), if_list) {
365		if (!(*pred) (ifn)) {
366			continue;
367		}
368		TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
369			sctp_addr_change(ifa, add ? RTM_ADD : RTM_DELETE);
370		}
371	}
372	IFNET_RUNLOCK();
373}
374
375struct mbuf *
376sctp_get_mbuf_for_msg(unsigned int space_needed, int want_header,
377    int how, int allonebuf, int type)
378{
379	struct mbuf *m = NULL;
380
381	m = m_getm2(NULL, space_needed, how, type, want_header ? M_PKTHDR : 0);
382	if (m == NULL) {
383		/* bad, no memory */
384		return (m);
385	}
386	if (allonebuf) {
387		int siz;
388
389		if (SCTP_BUF_IS_EXTENDED(m)) {
390			siz = SCTP_BUF_EXTEND_SIZE(m);
391		} else {
392			if (want_header)
393				siz = MHLEN;
394			else
395				siz = MLEN;
396		}
397		if (siz < space_needed) {
398			m_freem(m);
399			return (NULL);
400		}
401	}
402	if (SCTP_BUF_NEXT(m)) {
403		sctp_m_freem(SCTP_BUF_NEXT(m));
404		SCTP_BUF_NEXT(m) = NULL;
405	}
406#ifdef SCTP_MBUF_LOGGING
407	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
408		sctp_log_mb(m, SCTP_MBUF_IALLOC);
409	}
410#endif
411	return (m);
412}
413
414
415#ifdef SCTP_PACKET_LOGGING
416void
417sctp_packet_log(struct mbuf *m)
418{
419	int *lenat, thisone;
420	void *copyto;
421	uint32_t *tick_tock;
422	int length;
423	int total_len;
424	int grabbed_lock = 0;
425	int value, newval, thisend, thisbegin;
426
427	/*
428	 * Buffer layout. -sizeof this entry (total_len) -previous end
429	 * (value) -ticks of log      (ticks) o -ip packet o -as logged -
430	 * where this started (thisbegin) x <--end points here
431	 */
432	length = SCTP_HEADER_LEN(m);
433	total_len = SCTP_SIZE32((length + (4 * sizeof(int))));
434	/* Log a packet to the buffer. */
435	if (total_len > SCTP_PACKET_LOG_SIZE) {
436		/* Can't log this packet I have not a buffer big enough */
437		return;
438	}
439	if (length < (int)(SCTP_MIN_V4_OVERHEAD + sizeof(struct sctp_cookie_ack_chunk))) {
440		return;
441	}
442	atomic_add_int(&SCTP_BASE_VAR(packet_log_writers), 1);
443try_again:
444	if (SCTP_BASE_VAR(packet_log_writers) > SCTP_PKTLOG_WRITERS_NEED_LOCK) {
445		SCTP_IP_PKTLOG_LOCK();
446		grabbed_lock = 1;
447again_locked:
448		value = SCTP_BASE_VAR(packet_log_end);
449		newval = SCTP_BASE_VAR(packet_log_end) + total_len;
450		if (newval >= SCTP_PACKET_LOG_SIZE) {
451			/* we wrapped */
452			thisbegin = 0;
453			thisend = total_len;
454		} else {
455			thisbegin = SCTP_BASE_VAR(packet_log_end);
456			thisend = newval;
457		}
458		if (!(atomic_cmpset_int(&SCTP_BASE_VAR(packet_log_end), value, thisend))) {
459			goto again_locked;
460		}
461	} else {
462		value = SCTP_BASE_VAR(packet_log_end);
463		newval = SCTP_BASE_VAR(packet_log_end) + total_len;
464		if (newval >= SCTP_PACKET_LOG_SIZE) {
465			/* we wrapped */
466			thisbegin = 0;
467			thisend = total_len;
468		} else {
469			thisbegin = SCTP_BASE_VAR(packet_log_end);
470			thisend = newval;
471		}
472		if (!(atomic_cmpset_int(&SCTP_BASE_VAR(packet_log_end), value, thisend))) {
473			goto try_again;
474		}
475	}
476	/* Sanity check */
477	if (thisend >= SCTP_PACKET_LOG_SIZE) {
478		SCTP_PRINTF("Insanity stops a log thisbegin:%d thisend:%d writers:%d lock:%d end:%d\n",
479		    thisbegin,
480		    thisend,
481		    SCTP_BASE_VAR(packet_log_writers),
482		    grabbed_lock,
483		    SCTP_BASE_VAR(packet_log_end));
484		SCTP_BASE_VAR(packet_log_end) = 0;
485		goto no_log;
486
487	}
488	lenat = (int *)&SCTP_BASE_VAR(packet_log_buffer)[thisbegin];
489	*lenat = total_len;
490	lenat++;
491	*lenat = value;
492	lenat++;
493	tick_tock = (uint32_t *) lenat;
494	lenat++;
495	*tick_tock = sctp_get_tick_count();
496	copyto = (void *)lenat;
497	thisone = thisend - sizeof(int);
498	lenat = (int *)&SCTP_BASE_VAR(packet_log_buffer)[thisone];
499	*lenat = thisbegin;
500	if (grabbed_lock) {
501		SCTP_IP_PKTLOG_UNLOCK();
502		grabbed_lock = 0;
503	}
504	m_copydata(m, 0, length, (caddr_t)copyto);
505no_log:
506	if (grabbed_lock) {
507		SCTP_IP_PKTLOG_UNLOCK();
508	}
509	atomic_subtract_int(&SCTP_BASE_VAR(packet_log_writers), 1);
510}
511
512
513int
514sctp_copy_out_packet_log(uint8_t * target, int length)
515{
516	/*
517	 * We wind through the packet log starting at start copying up to
518	 * length bytes out. We return the number of bytes copied.
519	 */
520	int tocopy, this_copy;
521	int *lenat;
522	int did_delay = 0;
523
524	tocopy = length;
525	if (length < (int)(2 * sizeof(int))) {
526		/* not enough room */
527		return (0);
528	}
529	if (SCTP_PKTLOG_WRITERS_NEED_LOCK) {
530		atomic_add_int(&SCTP_BASE_VAR(packet_log_writers), SCTP_PKTLOG_WRITERS_NEED_LOCK);
531again:
532		if ((did_delay == 0) && (SCTP_BASE_VAR(packet_log_writers) != SCTP_PKTLOG_WRITERS_NEED_LOCK)) {
533			/*
534			 * we delay here for just a moment hoping the
535			 * writer(s) that were present when we entered will
536			 * have left and we only have locking ones that will
537			 * contend with us for the lock. This does not
538			 * assure 100% access, but its good enough for a
539			 * logging facility like this.
540			 */
541			did_delay = 1;
542			DELAY(10);
543			goto again;
544		}
545	}
546	SCTP_IP_PKTLOG_LOCK();
547	lenat = (int *)target;
548	*lenat = SCTP_BASE_VAR(packet_log_end);
549	lenat++;
550	this_copy = min((length - sizeof(int)), SCTP_PACKET_LOG_SIZE);
551	memcpy((void *)lenat, (void *)SCTP_BASE_VAR(packet_log_buffer), this_copy);
552	if (SCTP_PKTLOG_WRITERS_NEED_LOCK) {
553		atomic_subtract_int(&SCTP_BASE_VAR(packet_log_writers),
554		    SCTP_PKTLOG_WRITERS_NEED_LOCK);
555	}
556	SCTP_IP_PKTLOG_UNLOCK();
557	return (this_copy + sizeof(int));
558}
559
560#endif
561