sctputil.c revision 330897
1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * a) Redistributions of source code must retain the above copyright notice,
12 *    this list of conditions and the following disclaimer.
13 *
14 * b) Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in
16 *    the documentation and/or other materials provided with the distribution.
17 *
18 * c) Neither the name of Cisco Systems, Inc. nor the names of its
19 *    contributors may be used to endorse or promote products derived
20 *    from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: stable/11/sys/netinet/sctputil.c 330897 2018-03-14 03:19:51Z eadler $");
37
38#include <netinet/sctp_os.h>
39#include <netinet/sctp_pcb.h>
40#include <netinet/sctputil.h>
41#include <netinet/sctp_var.h>
42#include <netinet/sctp_sysctl.h>
43#ifdef INET6
44#include <netinet6/sctp6_var.h>
45#endif
46#include <netinet/sctp_header.h>
47#include <netinet/sctp_output.h>
48#include <netinet/sctp_uio.h>
49#include <netinet/sctp_timer.h>
50#include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
51#include <netinet/sctp_auth.h>
52#include <netinet/sctp_asconf.h>
53#include <netinet/sctp_bsd_addr.h>
54#if defined(INET6) || defined(INET)
55#include <netinet/tcp_var.h>
56#endif
57#include <netinet/udp.h>
58#include <netinet/udp_var.h>
59#include <sys/proc.h>
60#ifdef INET6
61#include <netinet/icmp6.h>
62#endif
63
64
65#ifndef KTR_SCTP
66#define KTR_SCTP KTR_SUBSYS
67#endif
68
69extern const struct sctp_cc_functions sctp_cc_functions[];
70extern const struct sctp_ss_functions sctp_ss_functions[];
71
72void
73sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
74{
75	struct sctp_cwnd_log sctp_clog;
76
77	sctp_clog.x.sb.stcb = stcb;
78	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
79	if (stcb)
80		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
81	else
82		sctp_clog.x.sb.stcb_sbcc = 0;
83	sctp_clog.x.sb.incr = incr;
84	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
85	    SCTP_LOG_EVENT_SB,
86	    from,
87	    sctp_clog.x.misc.log1,
88	    sctp_clog.x.misc.log2,
89	    sctp_clog.x.misc.log3,
90	    sctp_clog.x.misc.log4);
91}
92
93void
94sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
95{
96	struct sctp_cwnd_log sctp_clog;
97
98	sctp_clog.x.close.inp = (void *)inp;
99	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
100	if (stcb) {
101		sctp_clog.x.close.stcb = (void *)stcb;
102		sctp_clog.x.close.state = (uint16_t)stcb->asoc.state;
103	} else {
104		sctp_clog.x.close.stcb = 0;
105		sctp_clog.x.close.state = 0;
106	}
107	sctp_clog.x.close.loc = loc;
108	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
109	    SCTP_LOG_EVENT_CLOSE,
110	    0,
111	    sctp_clog.x.misc.log1,
112	    sctp_clog.x.misc.log2,
113	    sctp_clog.x.misc.log3,
114	    sctp_clog.x.misc.log4);
115}
116
117void
118rto_logging(struct sctp_nets *net, int from)
119{
120	struct sctp_cwnd_log sctp_clog;
121
122	memset(&sctp_clog, 0, sizeof(sctp_clog));
123	sctp_clog.x.rto.net = (void *)net;
124	sctp_clog.x.rto.rtt = net->rtt / 1000;
125	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
126	    SCTP_LOG_EVENT_RTT,
127	    from,
128	    sctp_clog.x.misc.log1,
129	    sctp_clog.x.misc.log2,
130	    sctp_clog.x.misc.log3,
131	    sctp_clog.x.misc.log4);
132}
133
134void
135sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
136{
137	struct sctp_cwnd_log sctp_clog;
138
139	sctp_clog.x.strlog.stcb = stcb;
140	sctp_clog.x.strlog.n_tsn = tsn;
141	sctp_clog.x.strlog.n_sseq = sseq;
142	sctp_clog.x.strlog.e_tsn = 0;
143	sctp_clog.x.strlog.e_sseq = 0;
144	sctp_clog.x.strlog.strm = stream;
145	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
146	    SCTP_LOG_EVENT_STRM,
147	    from,
148	    sctp_clog.x.misc.log1,
149	    sctp_clog.x.misc.log2,
150	    sctp_clog.x.misc.log3,
151	    sctp_clog.x.misc.log4);
152}
153
154void
155sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
156{
157	struct sctp_cwnd_log sctp_clog;
158
159	sctp_clog.x.nagle.stcb = (void *)stcb;
160	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
161	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
162	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
163	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
164	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
165	    SCTP_LOG_EVENT_NAGLE,
166	    action,
167	    sctp_clog.x.misc.log1,
168	    sctp_clog.x.misc.log2,
169	    sctp_clog.x.misc.log3,
170	    sctp_clog.x.misc.log4);
171}
172
173void
174sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
175{
176	struct sctp_cwnd_log sctp_clog;
177
178	sctp_clog.x.sack.cumack = cumack;
179	sctp_clog.x.sack.oldcumack = old_cumack;
180	sctp_clog.x.sack.tsn = tsn;
181	sctp_clog.x.sack.numGaps = gaps;
182	sctp_clog.x.sack.numDups = dups;
183	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
184	    SCTP_LOG_EVENT_SACK,
185	    from,
186	    sctp_clog.x.misc.log1,
187	    sctp_clog.x.misc.log2,
188	    sctp_clog.x.misc.log3,
189	    sctp_clog.x.misc.log4);
190}
191
192void
193sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
194{
195	struct sctp_cwnd_log sctp_clog;
196
197	memset(&sctp_clog, 0, sizeof(sctp_clog));
198	sctp_clog.x.map.base = map;
199	sctp_clog.x.map.cum = cum;
200	sctp_clog.x.map.high = high;
201	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
202	    SCTP_LOG_EVENT_MAP,
203	    from,
204	    sctp_clog.x.misc.log1,
205	    sctp_clog.x.misc.log2,
206	    sctp_clog.x.misc.log3,
207	    sctp_clog.x.misc.log4);
208}
209
210void
211sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
212{
213	struct sctp_cwnd_log sctp_clog;
214
215	memset(&sctp_clog, 0, sizeof(sctp_clog));
216	sctp_clog.x.fr.largest_tsn = biggest_tsn;
217	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
218	sctp_clog.x.fr.tsn = tsn;
219	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
220	    SCTP_LOG_EVENT_FR,
221	    from,
222	    sctp_clog.x.misc.log1,
223	    sctp_clog.x.misc.log2,
224	    sctp_clog.x.misc.log3,
225	    sctp_clog.x.misc.log4);
226}
227
228#ifdef SCTP_MBUF_LOGGING
229void
230sctp_log_mb(struct mbuf *m, int from)
231{
232	struct sctp_cwnd_log sctp_clog;
233
234	sctp_clog.x.mb.mp = m;
235	sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m));
236	sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m));
237	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
238	if (SCTP_BUF_IS_EXTENDED(m)) {
239		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
240		sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m));
241	} else {
242		sctp_clog.x.mb.ext = 0;
243		sctp_clog.x.mb.refcnt = 0;
244	}
245	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
246	    SCTP_LOG_EVENT_MBUF,
247	    from,
248	    sctp_clog.x.misc.log1,
249	    sctp_clog.x.misc.log2,
250	    sctp_clog.x.misc.log3,
251	    sctp_clog.x.misc.log4);
252}
253
254void
255sctp_log_mbc(struct mbuf *m, int from)
256{
257	struct mbuf *mat;
258
259	for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
260		sctp_log_mb(mat, from);
261	}
262}
263#endif
264
265void
266sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
267{
268	struct sctp_cwnd_log sctp_clog;
269
270	if (control == NULL) {
271		SCTP_PRINTF("Gak log of NULL?\n");
272		return;
273	}
274	sctp_clog.x.strlog.stcb = control->stcb;
275	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
276	sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid;
277	sctp_clog.x.strlog.strm = control->sinfo_stream;
278	if (poschk != NULL) {
279		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
280		sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid;
281	} else {
282		sctp_clog.x.strlog.e_tsn = 0;
283		sctp_clog.x.strlog.e_sseq = 0;
284	}
285	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
286	    SCTP_LOG_EVENT_STRM,
287	    from,
288	    sctp_clog.x.misc.log1,
289	    sctp_clog.x.misc.log2,
290	    sctp_clog.x.misc.log3,
291	    sctp_clog.x.misc.log4);
292}
293
294void
295sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
296{
297	struct sctp_cwnd_log sctp_clog;
298
299	sctp_clog.x.cwnd.net = net;
300	if (stcb->asoc.send_queue_cnt > 255)
301		sctp_clog.x.cwnd.cnt_in_send = 255;
302	else
303		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
304	if (stcb->asoc.stream_queue_cnt > 255)
305		sctp_clog.x.cwnd.cnt_in_str = 255;
306	else
307		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
308
309	if (net) {
310		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
311		sctp_clog.x.cwnd.inflight = net->flight_size;
312		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
313		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
314		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
315	}
316	if (SCTP_CWNDLOG_PRESEND == from) {
317		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
318	}
319	sctp_clog.x.cwnd.cwnd_augment = augment;
320	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
321	    SCTP_LOG_EVENT_CWND,
322	    from,
323	    sctp_clog.x.misc.log1,
324	    sctp_clog.x.misc.log2,
325	    sctp_clog.x.misc.log3,
326	    sctp_clog.x.misc.log4);
327}
328
329void
330sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
331{
332	struct sctp_cwnd_log sctp_clog;
333
334	memset(&sctp_clog, 0, sizeof(sctp_clog));
335	if (inp) {
336		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
337
338	} else {
339		sctp_clog.x.lock.sock = (void *)NULL;
340	}
341	sctp_clog.x.lock.inp = (void *)inp;
342	if (stcb) {
343		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
344	} else {
345		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
346	}
347	if (inp) {
348		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
349		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
350	} else {
351		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
352		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
353	}
354	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
355	if (inp && (inp->sctp_socket)) {
356		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
357		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
358		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
359	} else {
360		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
361		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
362		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
363	}
364	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
365	    SCTP_LOG_LOCK_EVENT,
366	    from,
367	    sctp_clog.x.misc.log1,
368	    sctp_clog.x.misc.log2,
369	    sctp_clog.x.misc.log3,
370	    sctp_clog.x.misc.log4);
371}
372
373void
374sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
375{
376	struct sctp_cwnd_log sctp_clog;
377
378	memset(&sctp_clog, 0, sizeof(sctp_clog));
379	sctp_clog.x.cwnd.net = net;
380	sctp_clog.x.cwnd.cwnd_new_value = error;
381	sctp_clog.x.cwnd.inflight = net->flight_size;
382	sctp_clog.x.cwnd.cwnd_augment = burst;
383	if (stcb->asoc.send_queue_cnt > 255)
384		sctp_clog.x.cwnd.cnt_in_send = 255;
385	else
386		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
387	if (stcb->asoc.stream_queue_cnt > 255)
388		sctp_clog.x.cwnd.cnt_in_str = 255;
389	else
390		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
391	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
392	    SCTP_LOG_EVENT_MAXBURST,
393	    from,
394	    sctp_clog.x.misc.log1,
395	    sctp_clog.x.misc.log2,
396	    sctp_clog.x.misc.log3,
397	    sctp_clog.x.misc.log4);
398}
399
400void
401sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
402{
403	struct sctp_cwnd_log sctp_clog;
404
405	sctp_clog.x.rwnd.rwnd = peers_rwnd;
406	sctp_clog.x.rwnd.send_size = snd_size;
407	sctp_clog.x.rwnd.overhead = overhead;
408	sctp_clog.x.rwnd.new_rwnd = 0;
409	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
410	    SCTP_LOG_EVENT_RWND,
411	    from,
412	    sctp_clog.x.misc.log1,
413	    sctp_clog.x.misc.log2,
414	    sctp_clog.x.misc.log3,
415	    sctp_clog.x.misc.log4);
416}
417
418void
419sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
420{
421	struct sctp_cwnd_log sctp_clog;
422
423	sctp_clog.x.rwnd.rwnd = peers_rwnd;
424	sctp_clog.x.rwnd.send_size = flight_size;
425	sctp_clog.x.rwnd.overhead = overhead;
426	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
427	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
428	    SCTP_LOG_EVENT_RWND,
429	    from,
430	    sctp_clog.x.misc.log1,
431	    sctp_clog.x.misc.log2,
432	    sctp_clog.x.misc.log3,
433	    sctp_clog.x.misc.log4);
434}
435
436#ifdef SCTP_MBCNT_LOGGING
437static void
438sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
439{
440	struct sctp_cwnd_log sctp_clog;
441
442	sctp_clog.x.mbcnt.total_queue_size = total_oq;
443	sctp_clog.x.mbcnt.size_change = book;
444	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
445	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
446	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
447	    SCTP_LOG_EVENT_MBCNT,
448	    from,
449	    sctp_clog.x.misc.log1,
450	    sctp_clog.x.misc.log2,
451	    sctp_clog.x.misc.log3,
452	    sctp_clog.x.misc.log4);
453}
454#endif
455
456void
457sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
458{
459	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
460	    SCTP_LOG_MISC_EVENT,
461	    from,
462	    a, b, c, d);
463}
464
465void
466sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
467{
468	struct sctp_cwnd_log sctp_clog;
469
470	sctp_clog.x.wake.stcb = (void *)stcb;
471	sctp_clog.x.wake.wake_cnt = wake_cnt;
472	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
473	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
474	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
475
476	if (stcb->asoc.stream_queue_cnt < 0xff)
477		sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt;
478	else
479		sctp_clog.x.wake.stream_qcnt = 0xff;
480
481	if (stcb->asoc.chunks_on_out_queue < 0xff)
482		sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue;
483	else
484		sctp_clog.x.wake.chunks_on_oque = 0xff;
485
486	sctp_clog.x.wake.sctpflags = 0;
487	/* set in the defered mode stuff */
488	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
489		sctp_clog.x.wake.sctpflags |= 1;
490	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
491		sctp_clog.x.wake.sctpflags |= 2;
492	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
493		sctp_clog.x.wake.sctpflags |= 4;
494	/* what about the sb */
495	if (stcb->sctp_socket) {
496		struct socket *so = stcb->sctp_socket;
497
498		sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff));
499	} else {
500		sctp_clog.x.wake.sbflags = 0xff;
501	}
502	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
503	    SCTP_LOG_EVENT_WAKE,
504	    from,
505	    sctp_clog.x.misc.log1,
506	    sctp_clog.x.misc.log2,
507	    sctp_clog.x.misc.log3,
508	    sctp_clog.x.misc.log4);
509}
510
511void
512sctp_log_block(uint8_t from, struct sctp_association *asoc, size_t sendlen)
513{
514	struct sctp_cwnd_log sctp_clog;
515
516	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
517	sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt);
518	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
519	sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt;
520	sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue;
521	sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024);
522	sctp_clog.x.blk.sndlen = (uint32_t)sendlen;
523	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
524	    SCTP_LOG_EVENT_BLOCK,
525	    from,
526	    sctp_clog.x.misc.log1,
527	    sctp_clog.x.misc.log2,
528	    sctp_clog.x.misc.log3,
529	    sctp_clog.x.misc.log4);
530}
531
532int
533sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
534{
535	/* May need to fix this if ktrdump does not work */
536	return (0);
537}
538
539#ifdef SCTP_AUDITING_ENABLED
540uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
541static int sctp_audit_indx = 0;
542
543static
544void
545sctp_print_audit_report(void)
546{
547	int i;
548	int cnt;
549
550	cnt = 0;
551	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
552		if ((sctp_audit_data[i][0] == 0xe0) &&
553		    (sctp_audit_data[i][1] == 0x01)) {
554			cnt = 0;
555			SCTP_PRINTF("\n");
556		} else if (sctp_audit_data[i][0] == 0xf0) {
557			cnt = 0;
558			SCTP_PRINTF("\n");
559		} else if ((sctp_audit_data[i][0] == 0xc0) &&
560		    (sctp_audit_data[i][1] == 0x01)) {
561			SCTP_PRINTF("\n");
562			cnt = 0;
563		}
564		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
565		    (uint32_t)sctp_audit_data[i][1]);
566		cnt++;
567		if ((cnt % 14) == 0)
568			SCTP_PRINTF("\n");
569	}
570	for (i = 0; i < sctp_audit_indx; i++) {
571		if ((sctp_audit_data[i][0] == 0xe0) &&
572		    (sctp_audit_data[i][1] == 0x01)) {
573			cnt = 0;
574			SCTP_PRINTF("\n");
575		} else if (sctp_audit_data[i][0] == 0xf0) {
576			cnt = 0;
577			SCTP_PRINTF("\n");
578		} else if ((sctp_audit_data[i][0] == 0xc0) &&
579		    (sctp_audit_data[i][1] == 0x01)) {
580			SCTP_PRINTF("\n");
581			cnt = 0;
582		}
583		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
584		    (uint32_t)sctp_audit_data[i][1]);
585		cnt++;
586		if ((cnt % 14) == 0)
587			SCTP_PRINTF("\n");
588	}
589	SCTP_PRINTF("\n");
590}
591
592void
593sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
594    struct sctp_nets *net)
595{
596	int resend_cnt, tot_out, rep, tot_book_cnt;
597	struct sctp_nets *lnet;
598	struct sctp_tmit_chunk *chk;
599
600	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
601	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
602	sctp_audit_indx++;
603	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
604		sctp_audit_indx = 0;
605	}
606	if (inp == NULL) {
607		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
608		sctp_audit_data[sctp_audit_indx][1] = 0x01;
609		sctp_audit_indx++;
610		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
611			sctp_audit_indx = 0;
612		}
613		return;
614	}
615	if (stcb == NULL) {
616		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
617		sctp_audit_data[sctp_audit_indx][1] = 0x02;
618		sctp_audit_indx++;
619		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
620			sctp_audit_indx = 0;
621		}
622		return;
623	}
624	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
625	sctp_audit_data[sctp_audit_indx][1] =
626	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
627	sctp_audit_indx++;
628	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
629		sctp_audit_indx = 0;
630	}
631	rep = 0;
632	tot_book_cnt = 0;
633	resend_cnt = tot_out = 0;
634	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
635		if (chk->sent == SCTP_DATAGRAM_RESEND) {
636			resend_cnt++;
637		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
638			tot_out += chk->book_size;
639			tot_book_cnt++;
640		}
641	}
642	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
643		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
644		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
645		sctp_audit_indx++;
646		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
647			sctp_audit_indx = 0;
648		}
649		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
650		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
651		rep = 1;
652		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
653		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
654		sctp_audit_data[sctp_audit_indx][1] =
655		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
656		sctp_audit_indx++;
657		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
658			sctp_audit_indx = 0;
659		}
660	}
661	if (tot_out != stcb->asoc.total_flight) {
662		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
663		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
664		sctp_audit_indx++;
665		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
666			sctp_audit_indx = 0;
667		}
668		rep = 1;
669		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
670		    (int)stcb->asoc.total_flight);
671		stcb->asoc.total_flight = tot_out;
672	}
673	if (tot_book_cnt != stcb->asoc.total_flight_count) {
674		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
675		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
676		sctp_audit_indx++;
677		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
678			sctp_audit_indx = 0;
679		}
680		rep = 1;
681		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
682
683		stcb->asoc.total_flight_count = tot_book_cnt;
684	}
685	tot_out = 0;
686	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
687		tot_out += lnet->flight_size;
688	}
689	if (tot_out != stcb->asoc.total_flight) {
690		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
691		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
692		sctp_audit_indx++;
693		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
694			sctp_audit_indx = 0;
695		}
696		rep = 1;
697		SCTP_PRINTF("real flight:%d net total was %d\n",
698		    stcb->asoc.total_flight, tot_out);
699		/* now corrective action */
700		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
701
702			tot_out = 0;
703			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
704				if ((chk->whoTo == lnet) &&
705				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
706					tot_out += chk->book_size;
707				}
708			}
709			if (lnet->flight_size != tot_out) {
710				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
711				    (void *)lnet, lnet->flight_size,
712				    tot_out);
713				lnet->flight_size = tot_out;
714			}
715		}
716	}
717	if (rep) {
718		sctp_print_audit_report();
719	}
720}
721
722void
723sctp_audit_log(uint8_t ev, uint8_t fd)
724{
725
726	sctp_audit_data[sctp_audit_indx][0] = ev;
727	sctp_audit_data[sctp_audit_indx][1] = fd;
728	sctp_audit_indx++;
729	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
730		sctp_audit_indx = 0;
731	}
732}
733
734#endif
735
736/*
737 * sctp_stop_timers_for_shutdown() should be called
738 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
739 * state to make sure that all timers are stopped.
740 */
741void
742sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
743{
744	struct sctp_association *asoc;
745	struct sctp_nets *net;
746
747	asoc = &stcb->asoc;
748
749	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
750	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
751	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
752	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
753	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
754	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
755		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
756		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
757	}
758}
759
760/*
761 * a list of sizes based on typical mtu's, used only if next hop size not
762 * returned.
763 */
764static uint32_t sctp_mtu_sizes[] = {
765	68,
766	296,
767	508,
768	512,
769	544,
770	576,
771	1006,
772	1492,
773	1500,
774	1536,
775	2002,
776	2048,
777	4352,
778	4464,
779	8166,
780	17914,
781	32000,
782	65535
783};
784
785/*
786 * Return the largest MTU smaller than val. If there is no
787 * entry, just return val.
788 */
789uint32_t
790sctp_get_prev_mtu(uint32_t val)
791{
792	uint32_t i;
793
794	if (val <= sctp_mtu_sizes[0]) {
795		return (val);
796	}
797	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
798		if (val <= sctp_mtu_sizes[i]) {
799			break;
800		}
801	}
802	return (sctp_mtu_sizes[i - 1]);
803}
804
805/*
806 * Return the smallest MTU larger than val. If there is no
807 * entry, just return val.
808 */
809uint32_t
810sctp_get_next_mtu(uint32_t val)
811{
812	/* select another MTU that is just bigger than this one */
813	uint32_t i;
814
815	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
816		if (val < sctp_mtu_sizes[i]) {
817			return (sctp_mtu_sizes[i]);
818		}
819	}
820	return (val);
821}
822
823void
824sctp_fill_random_store(struct sctp_pcb *m)
825{
826	/*
827	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
828	 * our counter. The result becomes our good random numbers and we
829	 * then setup to give these out. Note that we do no locking to
830	 * protect this. This is ok, since if competing folks call this we
831	 * will get more gobbled gook in the random store which is what we
832	 * want. There is a danger that two guys will use the same random
833	 * numbers, but thats ok too since that is random as well :->
834	 */
835	m->store_at = 0;
836	(void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers,
837	    sizeof(m->random_numbers), (uint8_t *)&m->random_counter,
838	    sizeof(m->random_counter), (uint8_t *)m->random_store);
839	m->random_counter++;
840}
841
842uint32_t
843sctp_select_initial_TSN(struct sctp_pcb *inp)
844{
845	/*
846	 * A true implementation should use random selection process to get
847	 * the initial stream sequence number, using RFC1750 as a good
848	 * guideline
849	 */
850	uint32_t x, *xp;
851	uint8_t *p;
852	int store_at, new_store;
853
854	if (inp->initial_sequence_debug != 0) {
855		uint32_t ret;
856
857		ret = inp->initial_sequence_debug;
858		inp->initial_sequence_debug++;
859		return (ret);
860	}
861retry:
862	store_at = inp->store_at;
863	new_store = store_at + sizeof(uint32_t);
864	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
865		new_store = 0;
866	}
867	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
868		goto retry;
869	}
870	if (new_store == 0) {
871		/* Refill the random store */
872		sctp_fill_random_store(inp);
873	}
874	p = &inp->random_store[store_at];
875	xp = (uint32_t *)p;
876	x = *xp;
877	return (x);
878}
879
880uint32_t
881sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
882{
883	uint32_t x;
884	struct timeval now;
885
886	if (check) {
887		(void)SCTP_GETTIME_TIMEVAL(&now);
888	}
889	for (;;) {
890		x = sctp_select_initial_TSN(&inp->sctp_ep);
891		if (x == 0) {
892			/* we never use 0 */
893			continue;
894		}
895		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
896			break;
897		}
898	}
899	return (x);
900}
901
902int32_t
903sctp_map_assoc_state(int kernel_state)
904{
905	int32_t user_state;
906
907	if (kernel_state & SCTP_STATE_WAS_ABORTED) {
908		user_state = SCTP_CLOSED;
909	} else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) {
910		user_state = SCTP_SHUTDOWN_PENDING;
911	} else {
912		switch (kernel_state & SCTP_STATE_MASK) {
913		case SCTP_STATE_EMPTY:
914			user_state = SCTP_CLOSED;
915			break;
916		case SCTP_STATE_INUSE:
917			user_state = SCTP_CLOSED;
918			break;
919		case SCTP_STATE_COOKIE_WAIT:
920			user_state = SCTP_COOKIE_WAIT;
921			break;
922		case SCTP_STATE_COOKIE_ECHOED:
923			user_state = SCTP_COOKIE_ECHOED;
924			break;
925		case SCTP_STATE_OPEN:
926			user_state = SCTP_ESTABLISHED;
927			break;
928		case SCTP_STATE_SHUTDOWN_SENT:
929			user_state = SCTP_SHUTDOWN_SENT;
930			break;
931		case SCTP_STATE_SHUTDOWN_RECEIVED:
932			user_state = SCTP_SHUTDOWN_RECEIVED;
933			break;
934		case SCTP_STATE_SHUTDOWN_ACK_SENT:
935			user_state = SCTP_SHUTDOWN_ACK_SENT;
936			break;
937		default:
938			user_state = SCTP_CLOSED;
939			break;
940		}
941	}
942	return (user_state);
943}
944
945int
946sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
947    uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms)
948{
949	struct sctp_association *asoc;
950
951	/*
952	 * Anything set to zero is taken care of by the allocation routine's
953	 * bzero
954	 */
955
956	/*
957	 * Up front select what scoping to apply on addresses I tell my peer
958	 * Not sure what to do with these right now, we will need to come up
959	 * with a way to set them. We may need to pass them through from the
960	 * caller in the sctp_aloc_assoc() function.
961	 */
962	int i;
963#if defined(SCTP_DETAILED_STR_STATS)
964	int j;
965#endif
966
967	asoc = &stcb->asoc;
968	/* init all variables to a known value. */
969	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
970	asoc->max_burst = inp->sctp_ep.max_burst;
971	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
972	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
973	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
974	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
975	asoc->ecn_supported = inp->ecn_supported;
976	asoc->prsctp_supported = inp->prsctp_supported;
977	asoc->idata_supported = inp->idata_supported;
978	asoc->auth_supported = inp->auth_supported;
979	asoc->asconf_supported = inp->asconf_supported;
980	asoc->reconfig_supported = inp->reconfig_supported;
981	asoc->nrsack_supported = inp->nrsack_supported;
982	asoc->pktdrop_supported = inp->pktdrop_supported;
983	asoc->idata_supported = inp->idata_supported;
984	asoc->sctp_cmt_pf = (uint8_t)0;
985	asoc->sctp_frag_point = inp->sctp_frag_point;
986	asoc->sctp_features = inp->sctp_features;
987	asoc->default_dscp = inp->sctp_ep.default_dscp;
988	asoc->max_cwnd = inp->max_cwnd;
989#ifdef INET6
990	if (inp->sctp_ep.default_flowlabel) {
991		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
992	} else {
993		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
994			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
995			asoc->default_flowlabel &= 0x000fffff;
996			asoc->default_flowlabel |= 0x80000000;
997		} else {
998			asoc->default_flowlabel = 0;
999		}
1000	}
1001#endif
1002	asoc->sb_send_resv = 0;
1003	if (override_tag) {
1004		asoc->my_vtag = override_tag;
1005	} else {
1006		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
1007	}
1008	/* Get the nonce tags */
1009	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1010	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1011	asoc->vrf_id = vrf_id;
1012
1013#ifdef SCTP_ASOCLOG_OF_TSNS
1014	asoc->tsn_in_at = 0;
1015	asoc->tsn_out_at = 0;
1016	asoc->tsn_in_wrapped = 0;
1017	asoc->tsn_out_wrapped = 0;
1018	asoc->cumack_log_at = 0;
1019	asoc->cumack_log_atsnt = 0;
1020#endif
1021#ifdef SCTP_FS_SPEC_LOG
1022	asoc->fs_index = 0;
1023#endif
1024	asoc->refcnt = 0;
1025	asoc->assoc_up_sent = 0;
1026	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
1027	    sctp_select_initial_TSN(&inp->sctp_ep);
1028	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1029	/* we are optimisitic here */
1030	asoc->peer_supports_nat = 0;
1031	asoc->sent_queue_retran_cnt = 0;
1032
1033	/* for CMT */
1034	asoc->last_net_cmt_send_started = NULL;
1035
1036	/* This will need to be adjusted */
1037	asoc->last_acked_seq = asoc->init_seq_number - 1;
1038	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1039	asoc->asconf_seq_in = asoc->last_acked_seq;
1040
1041	/* here we are different, we hold the next one we expect */
1042	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
1043
1044	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
1045	asoc->initial_rto = inp->sctp_ep.initial_rto;
1046
1047	asoc->max_init_times = inp->sctp_ep.max_init_times;
1048	asoc->max_send_times = inp->sctp_ep.max_send_times;
1049	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1050	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1051	asoc->free_chunk_cnt = 0;
1052
1053	asoc->iam_blocking = 0;
1054	asoc->context = inp->sctp_context;
1055	asoc->local_strreset_support = inp->local_strreset_support;
1056	asoc->def_send = inp->def_send;
1057	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1058	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1059	asoc->pr_sctp_cnt = 0;
1060	asoc->total_output_queue_size = 0;
1061
1062	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1063		asoc->scope.ipv6_addr_legal = 1;
1064		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1065			asoc->scope.ipv4_addr_legal = 1;
1066		} else {
1067			asoc->scope.ipv4_addr_legal = 0;
1068		}
1069	} else {
1070		asoc->scope.ipv6_addr_legal = 0;
1071		asoc->scope.ipv4_addr_legal = 1;
1072	}
1073
1074	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1075	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1076
1077	asoc->smallest_mtu = inp->sctp_frag_point;
1078	asoc->minrto = inp->sctp_ep.sctp_minrto;
1079	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1080
1081	asoc->stream_locked_on = 0;
1082	asoc->ecn_echo_cnt_onq = 0;
1083	asoc->stream_locked = 0;
1084
1085	asoc->send_sack = 1;
1086
1087	LIST_INIT(&asoc->sctp_restricted_addrs);
1088
1089	TAILQ_INIT(&asoc->nets);
1090	TAILQ_INIT(&asoc->pending_reply_queue);
1091	TAILQ_INIT(&asoc->asconf_ack_sent);
1092	/* Setup to fill the hb random cache at first HB */
1093	asoc->hb_random_idx = 4;
1094
1095	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1096
1097	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1098	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1099
1100	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1101	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1102
1103	/*
1104	 * Now the stream parameters, here we allocate space for all streams
1105	 * that we request by default.
1106	 */
1107	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1108	    o_strms;
1109	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1110	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1111	    SCTP_M_STRMO);
1112	if (asoc->strmout == NULL) {
1113		/* big trouble no memory */
1114		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1115		return (ENOMEM);
1116	}
1117	for (i = 0; i < asoc->streamoutcnt; i++) {
1118		/*
1119		 * inbound side must be set to 0xffff, also NOTE when we get
1120		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1121		 * count (streamoutcnt) but first check if we sent to any of
1122		 * the upper streams that were dropped (if some were). Those
1123		 * that were dropped must be notified to the upper layer as
1124		 * failed to send.
1125		 */
1126		asoc->strmout[i].next_mid_ordered = 0;
1127		asoc->strmout[i].next_mid_unordered = 0;
1128		TAILQ_INIT(&asoc->strmout[i].outqueue);
1129		asoc->strmout[i].chunks_on_queues = 0;
1130#if defined(SCTP_DETAILED_STR_STATS)
1131		for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1132			asoc->strmout[i].abandoned_sent[j] = 0;
1133			asoc->strmout[i].abandoned_unsent[j] = 0;
1134		}
1135#else
1136		asoc->strmout[i].abandoned_sent[0] = 0;
1137		asoc->strmout[i].abandoned_unsent[0] = 0;
1138#endif
1139		asoc->strmout[i].sid = i;
1140		asoc->strmout[i].last_msg_incomplete = 0;
1141		asoc->strmout[i].state = SCTP_STREAM_OPENING;
1142		asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL);
1143	}
1144	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1145
1146	/* Now the mapping array */
1147	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1148	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1149	    SCTP_M_MAP);
1150	if (asoc->mapping_array == NULL) {
1151		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1152		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1153		return (ENOMEM);
1154	}
1155	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1156	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1157	    SCTP_M_MAP);
1158	if (asoc->nr_mapping_array == NULL) {
1159		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1160		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1161		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1162		return (ENOMEM);
1163	}
1164	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1165
1166	/* Now the init of the other outqueues */
1167	TAILQ_INIT(&asoc->free_chunks);
1168	TAILQ_INIT(&asoc->control_send_queue);
1169	TAILQ_INIT(&asoc->asconf_send_queue);
1170	TAILQ_INIT(&asoc->send_queue);
1171	TAILQ_INIT(&asoc->sent_queue);
1172	TAILQ_INIT(&asoc->resetHead);
1173	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1174	TAILQ_INIT(&asoc->asconf_queue);
1175	/* authentication fields */
1176	asoc->authinfo.random = NULL;
1177	asoc->authinfo.active_keyid = 0;
1178	asoc->authinfo.assoc_key = NULL;
1179	asoc->authinfo.assoc_keyid = 0;
1180	asoc->authinfo.recv_key = NULL;
1181	asoc->authinfo.recv_keyid = 0;
1182	LIST_INIT(&asoc->shared_keys);
1183	asoc->marked_retrans = 0;
1184	asoc->port = inp->sctp_ep.port;
1185	asoc->timoinit = 0;
1186	asoc->timodata = 0;
1187	asoc->timosack = 0;
1188	asoc->timoshutdown = 0;
1189	asoc->timoheartbeat = 0;
1190	asoc->timocookie = 0;
1191	asoc->timoshutdownack = 0;
1192	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1193	asoc->discontinuity_time = asoc->start_time;
1194	for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1195		asoc->abandoned_unsent[i] = 0;
1196		asoc->abandoned_sent[i] = 0;
1197	}
1198	/*
1199	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1200	 * freed later when the association is freed.
1201	 */
1202	return (0);
1203}
1204
1205void
1206sctp_print_mapping_array(struct sctp_association *asoc)
1207{
1208	unsigned int i, limit;
1209
1210	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1211	    asoc->mapping_array_size,
1212	    asoc->mapping_array_base_tsn,
1213	    asoc->cumulative_tsn,
1214	    asoc->highest_tsn_inside_map,
1215	    asoc->highest_tsn_inside_nr_map);
1216	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1217		if (asoc->mapping_array[limit - 1] != 0) {
1218			break;
1219		}
1220	}
1221	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1222	for (i = 0; i < limit; i++) {
1223		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1224	}
1225	if (limit % 16)
1226		SCTP_PRINTF("\n");
1227	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1228		if (asoc->nr_mapping_array[limit - 1]) {
1229			break;
1230		}
1231	}
1232	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1233	for (i = 0; i < limit; i++) {
1234		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1235	}
1236	if (limit % 16)
1237		SCTP_PRINTF("\n");
1238}
1239
1240int
1241sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1242{
1243	/* mapping array needs to grow */
1244	uint8_t *new_array1, *new_array2;
1245	uint32_t new_size;
1246
1247	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1248	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1249	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1250	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1251		/* can't get more, forget it */
1252		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1253		if (new_array1) {
1254			SCTP_FREE(new_array1, SCTP_M_MAP);
1255		}
1256		if (new_array2) {
1257			SCTP_FREE(new_array2, SCTP_M_MAP);
1258		}
1259		return (-1);
1260	}
1261	memset(new_array1, 0, new_size);
1262	memset(new_array2, 0, new_size);
1263	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1264	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1265	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1266	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1267	asoc->mapping_array = new_array1;
1268	asoc->nr_mapping_array = new_array2;
1269	asoc->mapping_array_size = new_size;
1270	return (0);
1271}
1272
1273
1274static void
1275sctp_iterator_work(struct sctp_iterator *it)
1276{
1277	int iteration_count = 0;
1278	int inp_skip = 0;
1279	int first_in = 1;
1280	struct sctp_inpcb *tinp;
1281
1282	SCTP_INP_INFO_RLOCK();
1283	SCTP_ITERATOR_LOCK();
1284	sctp_it_ctl.cur_it = it;
1285	if (it->inp) {
1286		SCTP_INP_RLOCK(it->inp);
1287		SCTP_INP_DECR_REF(it->inp);
1288	}
1289	if (it->inp == NULL) {
1290		/* iterator is complete */
1291done_with_iterator:
1292		sctp_it_ctl.cur_it = NULL;
1293		SCTP_ITERATOR_UNLOCK();
1294		SCTP_INP_INFO_RUNLOCK();
1295		if (it->function_atend != NULL) {
1296			(*it->function_atend) (it->pointer, it->val);
1297		}
1298		SCTP_FREE(it, SCTP_M_ITER);
1299		return;
1300	}
1301select_a_new_ep:
1302	if (first_in) {
1303		first_in = 0;
1304	} else {
1305		SCTP_INP_RLOCK(it->inp);
1306	}
1307	while (((it->pcb_flags) &&
1308	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1309	    ((it->pcb_features) &&
1310	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1311		/* endpoint flags or features don't match, so keep looking */
1312		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1313			SCTP_INP_RUNLOCK(it->inp);
1314			goto done_with_iterator;
1315		}
1316		tinp = it->inp;
1317		it->inp = LIST_NEXT(it->inp, sctp_list);
1318		SCTP_INP_RUNLOCK(tinp);
1319		if (it->inp == NULL) {
1320			goto done_with_iterator;
1321		}
1322		SCTP_INP_RLOCK(it->inp);
1323	}
1324	/* now go through each assoc which is in the desired state */
1325	if (it->done_current_ep == 0) {
1326		if (it->function_inp != NULL)
1327			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1328		it->done_current_ep = 1;
1329	}
1330	if (it->stcb == NULL) {
1331		/* run the per instance function */
1332		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1333	}
1334	if ((inp_skip) || it->stcb == NULL) {
1335		if (it->function_inp_end != NULL) {
1336			inp_skip = (*it->function_inp_end) (it->inp,
1337			    it->pointer,
1338			    it->val);
1339		}
1340		SCTP_INP_RUNLOCK(it->inp);
1341		goto no_stcb;
1342	}
1343	while (it->stcb) {
1344		SCTP_TCB_LOCK(it->stcb);
1345		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1346			/* not in the right state... keep looking */
1347			SCTP_TCB_UNLOCK(it->stcb);
1348			goto next_assoc;
1349		}
1350		/* see if we have limited out the iterator loop */
1351		iteration_count++;
1352		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1353			/* Pause to let others grab the lock */
1354			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1355			SCTP_TCB_UNLOCK(it->stcb);
1356			SCTP_INP_INCR_REF(it->inp);
1357			SCTP_INP_RUNLOCK(it->inp);
1358			SCTP_ITERATOR_UNLOCK();
1359			SCTP_INP_INFO_RUNLOCK();
1360			SCTP_INP_INFO_RLOCK();
1361			SCTP_ITERATOR_LOCK();
1362			if (sctp_it_ctl.iterator_flags) {
1363				/* We won't be staying here */
1364				SCTP_INP_DECR_REF(it->inp);
1365				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1366				if (sctp_it_ctl.iterator_flags &
1367				    SCTP_ITERATOR_STOP_CUR_IT) {
1368					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1369					goto done_with_iterator;
1370				}
1371				if (sctp_it_ctl.iterator_flags &
1372				    SCTP_ITERATOR_STOP_CUR_INP) {
1373					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1374					goto no_stcb;
1375				}
1376				/* If we reach here huh? */
1377				SCTP_PRINTF("Unknown it ctl flag %x\n",
1378				    sctp_it_ctl.iterator_flags);
1379				sctp_it_ctl.iterator_flags = 0;
1380			}
1381			SCTP_INP_RLOCK(it->inp);
1382			SCTP_INP_DECR_REF(it->inp);
1383			SCTP_TCB_LOCK(it->stcb);
1384			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1385			iteration_count = 0;
1386		}
1387		/* run function on this one */
1388		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1389
1390		/*
1391		 * we lie here, it really needs to have its own type but
1392		 * first I must verify that this won't effect things :-0
1393		 */
1394		if (it->no_chunk_output == 0)
1395			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1396
1397		SCTP_TCB_UNLOCK(it->stcb);
1398next_assoc:
1399		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1400		if (it->stcb == NULL) {
1401			/* Run last function */
1402			if (it->function_inp_end != NULL) {
1403				inp_skip = (*it->function_inp_end) (it->inp,
1404				    it->pointer,
1405				    it->val);
1406			}
1407		}
1408	}
1409	SCTP_INP_RUNLOCK(it->inp);
1410no_stcb:
1411	/* done with all assocs on this endpoint, move on to next endpoint */
1412	it->done_current_ep = 0;
1413	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1414		it->inp = NULL;
1415	} else {
1416		it->inp = LIST_NEXT(it->inp, sctp_list);
1417	}
1418	if (it->inp == NULL) {
1419		goto done_with_iterator;
1420	}
1421	goto select_a_new_ep;
1422}
1423
1424void
1425sctp_iterator_worker(void)
1426{
1427	struct sctp_iterator *it, *nit;
1428
1429	/* This function is called with the WQ lock in place */
1430
1431	sctp_it_ctl.iterator_running = 1;
1432	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1433		/* now lets work on this one */
1434		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1435		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1436		CURVNET_SET(it->vn);
1437		sctp_iterator_work(it);
1438		CURVNET_RESTORE();
1439		SCTP_IPI_ITERATOR_WQ_LOCK();
1440		/* sa_ignore FREED_MEMORY */
1441	}
1442	sctp_it_ctl.iterator_running = 0;
1443	return;
1444}
1445
1446
1447static void
1448sctp_handle_addr_wq(void)
1449{
1450	/* deal with the ADDR wq from the rtsock calls */
1451	struct sctp_laddr *wi, *nwi;
1452	struct sctp_asconf_iterator *asc;
1453
1454	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1455	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1456	if (asc == NULL) {
1457		/* Try later, no memory */
1458		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1459		    (struct sctp_inpcb *)NULL,
1460		    (struct sctp_tcb *)NULL,
1461		    (struct sctp_nets *)NULL);
1462		return;
1463	}
1464	LIST_INIT(&asc->list_of_work);
1465	asc->cnt = 0;
1466
1467	SCTP_WQ_ADDR_LOCK();
1468	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1469		LIST_REMOVE(wi, sctp_nxt_addr);
1470		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1471		asc->cnt++;
1472	}
1473	SCTP_WQ_ADDR_UNLOCK();
1474
1475	if (asc->cnt == 0) {
1476		SCTP_FREE(asc, SCTP_M_ASC_IT);
1477	} else {
1478		int ret;
1479
1480		ret = sctp_initiate_iterator(sctp_asconf_iterator_ep,
1481		    sctp_asconf_iterator_stcb,
1482		    NULL,	/* No ep end for boundall */
1483		    SCTP_PCB_FLAGS_BOUNDALL,
1484		    SCTP_PCB_ANY_FEATURES,
1485		    SCTP_ASOC_ANY_STATE,
1486		    (void *)asc, 0,
1487		    sctp_asconf_iterator_end, NULL, 0);
1488		if (ret) {
1489			SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n");
1490			/*
1491			 * Freeing if we are stopping or put back on the
1492			 * addr_wq.
1493			 */
1494			if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
1495				sctp_asconf_iterator_end(asc, 0);
1496			} else {
1497				SCTP_WQ_ADDR_LOCK();
1498				LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) {
1499					LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
1500				}
1501				SCTP_WQ_ADDR_UNLOCK();
1502				SCTP_FREE(asc, SCTP_M_ASC_IT);
1503			}
1504		}
1505	}
1506}
1507
1508void
1509sctp_timeout_handler(void *t)
1510{
1511	struct sctp_inpcb *inp;
1512	struct sctp_tcb *stcb;
1513	struct sctp_nets *net;
1514	struct sctp_timer *tmr;
1515	struct mbuf *op_err;
1516#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1517	struct socket *so;
1518#endif
1519	int did_output;
1520	int type;
1521
1522	tmr = (struct sctp_timer *)t;
1523	inp = (struct sctp_inpcb *)tmr->ep;
1524	stcb = (struct sctp_tcb *)tmr->tcb;
1525	net = (struct sctp_nets *)tmr->net;
1526	CURVNET_SET((struct vnet *)tmr->vnet);
1527	did_output = 1;
1528
1529#ifdef SCTP_AUDITING_ENABLED
1530	sctp_audit_log(0xF0, (uint8_t)tmr->type);
1531	sctp_auditing(3, inp, stcb, net);
1532#endif
1533
1534	/* sanity checks... */
1535	if (tmr->self != (void *)tmr) {
1536		/*
1537		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1538		 * (void *)tmr);
1539		 */
1540		CURVNET_RESTORE();
1541		return;
1542	}
1543	tmr->stopped_from = 0xa001;
1544	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1545		/*
1546		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1547		 * tmr->type);
1548		 */
1549		CURVNET_RESTORE();
1550		return;
1551	}
1552	tmr->stopped_from = 0xa002;
1553	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1554		CURVNET_RESTORE();
1555		return;
1556	}
1557	/* if this is an iterator timeout, get the struct and clear inp */
1558	tmr->stopped_from = 0xa003;
1559	if (inp) {
1560		SCTP_INP_INCR_REF(inp);
1561		if ((inp->sctp_socket == NULL) &&
1562		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1563		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1564		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1565		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1566		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1567		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1568		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1569		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1570		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1571		    ) {
1572			SCTP_INP_DECR_REF(inp);
1573			CURVNET_RESTORE();
1574			return;
1575		}
1576	}
1577	tmr->stopped_from = 0xa004;
1578	if (stcb) {
1579		atomic_add_int(&stcb->asoc.refcnt, 1);
1580		if (stcb->asoc.state == 0) {
1581			atomic_add_int(&stcb->asoc.refcnt, -1);
1582			if (inp) {
1583				SCTP_INP_DECR_REF(inp);
1584			}
1585			CURVNET_RESTORE();
1586			return;
1587		}
1588	}
1589	type = tmr->type;
1590	tmr->stopped_from = 0xa005;
1591	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", type);
1592	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1593		if (inp) {
1594			SCTP_INP_DECR_REF(inp);
1595		}
1596		if (stcb) {
1597			atomic_add_int(&stcb->asoc.refcnt, -1);
1598		}
1599		CURVNET_RESTORE();
1600		return;
1601	}
1602	tmr->stopped_from = 0xa006;
1603
1604	if (stcb) {
1605		SCTP_TCB_LOCK(stcb);
1606		atomic_add_int(&stcb->asoc.refcnt, -1);
1607		if ((type != SCTP_TIMER_TYPE_ASOCKILL) &&
1608		    ((stcb->asoc.state == 0) ||
1609		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1610			SCTP_TCB_UNLOCK(stcb);
1611			if (inp) {
1612				SCTP_INP_DECR_REF(inp);
1613			}
1614			CURVNET_RESTORE();
1615			return;
1616		}
1617	}
1618	/* record in stopped what t-o occurred */
1619	tmr->stopped_from = type;
1620
1621	/* mark as being serviced now */
1622	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1623		/*
1624		 * Callout has been rescheduled.
1625		 */
1626		goto get_out;
1627	}
1628	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1629		/*
1630		 * Not active, so no action.
1631		 */
1632		goto get_out;
1633	}
1634	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1635
1636	/* call the handler for the appropriate timer type */
1637	switch (type) {
1638	case SCTP_TIMER_TYPE_ZERO_COPY:
1639		if (inp == NULL) {
1640			break;
1641		}
1642		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1643			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1644		}
1645		break;
1646	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1647		if (inp == NULL) {
1648			break;
1649		}
1650		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1651			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1652		}
1653		break;
1654	case SCTP_TIMER_TYPE_ADDR_WQ:
1655		sctp_handle_addr_wq();
1656		break;
1657	case SCTP_TIMER_TYPE_SEND:
1658		if ((stcb == NULL) || (inp == NULL)) {
1659			break;
1660		}
1661		SCTP_STAT_INCR(sctps_timodata);
1662		stcb->asoc.timodata++;
1663		stcb->asoc.num_send_timers_up--;
1664		if (stcb->asoc.num_send_timers_up < 0) {
1665			stcb->asoc.num_send_timers_up = 0;
1666		}
1667		SCTP_TCB_LOCK_ASSERT(stcb);
1668		if (sctp_t3rxt_timer(inp, stcb, net)) {
1669			/* no need to unlock on tcb its gone */
1670
1671			goto out_decr;
1672		}
1673		SCTP_TCB_LOCK_ASSERT(stcb);
1674#ifdef SCTP_AUDITING_ENABLED
1675		sctp_auditing(4, inp, stcb, net);
1676#endif
1677		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1678		if ((stcb->asoc.num_send_timers_up == 0) &&
1679		    (stcb->asoc.sent_queue_cnt > 0)) {
1680			struct sctp_tmit_chunk *chk;
1681
1682			/*
1683			 * safeguard. If there on some on the sent queue
1684			 * somewhere but no timers running something is
1685			 * wrong... so we start a timer on the first chunk
1686			 * on the send queue on whatever net it is sent to.
1687			 */
1688			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1689			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1690			    chk->whoTo);
1691		}
1692		break;
1693	case SCTP_TIMER_TYPE_INIT:
1694		if ((stcb == NULL) || (inp == NULL)) {
1695			break;
1696		}
1697		SCTP_STAT_INCR(sctps_timoinit);
1698		stcb->asoc.timoinit++;
1699		if (sctp_t1init_timer(inp, stcb, net)) {
1700			/* no need to unlock on tcb its gone */
1701			goto out_decr;
1702		}
1703		/* We do output but not here */
1704		did_output = 0;
1705		break;
1706	case SCTP_TIMER_TYPE_RECV:
1707		if ((stcb == NULL) || (inp == NULL)) {
1708			break;
1709		}
1710		SCTP_STAT_INCR(sctps_timosack);
1711		stcb->asoc.timosack++;
1712		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1713#ifdef SCTP_AUDITING_ENABLED
1714		sctp_auditing(4, inp, stcb, net);
1715#endif
1716		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1717		break;
1718	case SCTP_TIMER_TYPE_SHUTDOWN:
1719		if ((stcb == NULL) || (inp == NULL)) {
1720			break;
1721		}
1722		if (sctp_shutdown_timer(inp, stcb, net)) {
1723			/* no need to unlock on tcb its gone */
1724			goto out_decr;
1725		}
1726		SCTP_STAT_INCR(sctps_timoshutdown);
1727		stcb->asoc.timoshutdown++;
1728#ifdef SCTP_AUDITING_ENABLED
1729		sctp_auditing(4, inp, stcb, net);
1730#endif
1731		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1732		break;
1733	case SCTP_TIMER_TYPE_HEARTBEAT:
1734		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1735			break;
1736		}
1737		SCTP_STAT_INCR(sctps_timoheartbeat);
1738		stcb->asoc.timoheartbeat++;
1739		if (sctp_heartbeat_timer(inp, stcb, net)) {
1740			/* no need to unlock on tcb its gone */
1741			goto out_decr;
1742		}
1743#ifdef SCTP_AUDITING_ENABLED
1744		sctp_auditing(4, inp, stcb, net);
1745#endif
1746		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1747			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1748			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1749		}
1750		break;
1751	case SCTP_TIMER_TYPE_COOKIE:
1752		if ((stcb == NULL) || (inp == NULL)) {
1753			break;
1754		}
1755		if (sctp_cookie_timer(inp, stcb, net)) {
1756			/* no need to unlock on tcb its gone */
1757			goto out_decr;
1758		}
1759		SCTP_STAT_INCR(sctps_timocookie);
1760		stcb->asoc.timocookie++;
1761#ifdef SCTP_AUDITING_ENABLED
1762		sctp_auditing(4, inp, stcb, net);
1763#endif
1764		/*
1765		 * We consider T3 and Cookie timer pretty much the same with
1766		 * respect to where from in chunk_output.
1767		 */
1768		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1769		break;
1770	case SCTP_TIMER_TYPE_NEWCOOKIE:
1771		{
1772			struct timeval tv;
1773			int i, secret;
1774
1775			if (inp == NULL) {
1776				break;
1777			}
1778			SCTP_STAT_INCR(sctps_timosecret);
1779			(void)SCTP_GETTIME_TIMEVAL(&tv);
1780			SCTP_INP_WLOCK(inp);
1781			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1782			inp->sctp_ep.last_secret_number =
1783			    inp->sctp_ep.current_secret_number;
1784			inp->sctp_ep.current_secret_number++;
1785			if (inp->sctp_ep.current_secret_number >=
1786			    SCTP_HOW_MANY_SECRETS) {
1787				inp->sctp_ep.current_secret_number = 0;
1788			}
1789			secret = (int)inp->sctp_ep.current_secret_number;
1790			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1791				inp->sctp_ep.secret_key[secret][i] =
1792				    sctp_select_initial_TSN(&inp->sctp_ep);
1793			}
1794			SCTP_INP_WUNLOCK(inp);
1795			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1796		}
1797		did_output = 0;
1798		break;
1799	case SCTP_TIMER_TYPE_PATHMTURAISE:
1800		if ((stcb == NULL) || (inp == NULL)) {
1801			break;
1802		}
1803		SCTP_STAT_INCR(sctps_timopathmtu);
1804		sctp_pathmtu_timer(inp, stcb, net);
1805		did_output = 0;
1806		break;
1807	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1808		if ((stcb == NULL) || (inp == NULL)) {
1809			break;
1810		}
1811		if (sctp_shutdownack_timer(inp, stcb, net)) {
1812			/* no need to unlock on tcb its gone */
1813			goto out_decr;
1814		}
1815		SCTP_STAT_INCR(sctps_timoshutdownack);
1816		stcb->asoc.timoshutdownack++;
1817#ifdef SCTP_AUDITING_ENABLED
1818		sctp_auditing(4, inp, stcb, net);
1819#endif
1820		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1821		break;
1822	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1823		if ((stcb == NULL) || (inp == NULL)) {
1824			break;
1825		}
1826		SCTP_STAT_INCR(sctps_timoshutdownguard);
1827		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
1828		    "Shutdown guard timer expired");
1829		sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
1830		/* no need to unlock on tcb its gone */
1831		goto out_decr;
1832
1833	case SCTP_TIMER_TYPE_STRRESET:
1834		if ((stcb == NULL) || (inp == NULL)) {
1835			break;
1836		}
1837		if (sctp_strreset_timer(inp, stcb, net)) {
1838			/* no need to unlock on tcb its gone */
1839			goto out_decr;
1840		}
1841		SCTP_STAT_INCR(sctps_timostrmrst);
1842		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1843		break;
1844	case SCTP_TIMER_TYPE_ASCONF:
1845		if ((stcb == NULL) || (inp == NULL)) {
1846			break;
1847		}
1848		if (sctp_asconf_timer(inp, stcb, net)) {
1849			/* no need to unlock on tcb its gone */
1850			goto out_decr;
1851		}
1852		SCTP_STAT_INCR(sctps_timoasconf);
1853#ifdef SCTP_AUDITING_ENABLED
1854		sctp_auditing(4, inp, stcb, net);
1855#endif
1856		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1857		break;
1858	case SCTP_TIMER_TYPE_PRIM_DELETED:
1859		if ((stcb == NULL) || (inp == NULL)) {
1860			break;
1861		}
1862		sctp_delete_prim_timer(inp, stcb, net);
1863		SCTP_STAT_INCR(sctps_timodelprim);
1864		break;
1865
1866	case SCTP_TIMER_TYPE_AUTOCLOSE:
1867		if ((stcb == NULL) || (inp == NULL)) {
1868			break;
1869		}
1870		SCTP_STAT_INCR(sctps_timoautoclose);
1871		sctp_autoclose_timer(inp, stcb, net);
1872		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1873		did_output = 0;
1874		break;
1875	case SCTP_TIMER_TYPE_ASOCKILL:
1876		if ((stcb == NULL) || (inp == NULL)) {
1877			break;
1878		}
1879		SCTP_STAT_INCR(sctps_timoassockill);
1880		/* Can we free it yet? */
1881		SCTP_INP_DECR_REF(inp);
1882		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
1883		    SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1884#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1885		so = SCTP_INP_SO(inp);
1886		atomic_add_int(&stcb->asoc.refcnt, 1);
1887		SCTP_TCB_UNLOCK(stcb);
1888		SCTP_SOCKET_LOCK(so, 1);
1889		SCTP_TCB_LOCK(stcb);
1890		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1891#endif
1892		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
1893		    SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1894#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1895		SCTP_SOCKET_UNLOCK(so, 1);
1896#endif
1897		/*
1898		 * free asoc, always unlocks (or destroy's) so prevent
1899		 * duplicate unlock or unlock of a free mtx :-0
1900		 */
1901		stcb = NULL;
1902		goto out_no_decr;
1903	case SCTP_TIMER_TYPE_INPKILL:
1904		SCTP_STAT_INCR(sctps_timoinpkill);
1905		if (inp == NULL) {
1906			break;
1907		}
1908		/*
1909		 * special case, take away our increment since WE are the
1910		 * killer
1911		 */
1912		SCTP_INP_DECR_REF(inp);
1913		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
1914		    SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1915		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1916		    SCTP_CALLED_FROM_INPKILL_TIMER);
1917		inp = NULL;
1918		goto out_no_decr;
1919	default:
1920		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1921		    type);
1922		break;
1923	}
1924#ifdef SCTP_AUDITING_ENABLED
1925	sctp_audit_log(0xF1, (uint8_t)type);
1926	if (inp)
1927		sctp_auditing(5, inp, stcb, net);
1928#endif
1929	if ((did_output) && stcb) {
1930		/*
1931		 * Now we need to clean up the control chunk chain if an
1932		 * ECNE is on it. It must be marked as UNSENT again so next
1933		 * call will continue to send it until such time that we get
1934		 * a CWR, to remove it. It is, however, less likely that we
1935		 * will find a ecn echo on the chain though.
1936		 */
1937		sctp_fix_ecn_echo(&stcb->asoc);
1938	}
1939get_out:
1940	if (stcb) {
1941		SCTP_TCB_UNLOCK(stcb);
1942	}
1943out_decr:
1944	if (inp) {
1945		SCTP_INP_DECR_REF(inp);
1946	}
1947out_no_decr:
1948	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type = %d)\n", type);
1949	CURVNET_RESTORE();
1950}
1951
1952void
1953sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1954    struct sctp_nets *net)
1955{
1956	uint32_t to_ticks;
1957	struct sctp_timer *tmr;
1958
1959	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1960		return;
1961
1962	tmr = NULL;
1963	if (stcb) {
1964		SCTP_TCB_LOCK_ASSERT(stcb);
1965	}
1966	switch (t_type) {
1967	case SCTP_TIMER_TYPE_ZERO_COPY:
1968		tmr = &inp->sctp_ep.zero_copy_timer;
1969		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1970		break;
1971	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1972		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1973		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1974		break;
1975	case SCTP_TIMER_TYPE_ADDR_WQ:
1976		/* Only 1 tick away :-) */
1977		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1978		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1979		break;
1980	case SCTP_TIMER_TYPE_SEND:
1981		/* Here we use the RTO timer */
1982		{
1983			int rto_val;
1984
1985			if ((stcb == NULL) || (net == NULL)) {
1986				return;
1987			}
1988			tmr = &net->rxt_timer;
1989			if (net->RTO == 0) {
1990				rto_val = stcb->asoc.initial_rto;
1991			} else {
1992				rto_val = net->RTO;
1993			}
1994			to_ticks = MSEC_TO_TICKS(rto_val);
1995		}
1996		break;
1997	case SCTP_TIMER_TYPE_INIT:
1998		/*
1999		 * Here we use the INIT timer default usually about 1
2000		 * minute.
2001		 */
2002		if ((stcb == NULL) || (net == NULL)) {
2003			return;
2004		}
2005		tmr = &net->rxt_timer;
2006		if (net->RTO == 0) {
2007			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2008		} else {
2009			to_ticks = MSEC_TO_TICKS(net->RTO);
2010		}
2011		break;
2012	case SCTP_TIMER_TYPE_RECV:
2013		/*
2014		 * Here we use the Delayed-Ack timer value from the inp
2015		 * ususually about 200ms.
2016		 */
2017		if (stcb == NULL) {
2018			return;
2019		}
2020		tmr = &stcb->asoc.dack_timer;
2021		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2022		break;
2023	case SCTP_TIMER_TYPE_SHUTDOWN:
2024		/* Here we use the RTO of the destination. */
2025		if ((stcb == NULL) || (net == NULL)) {
2026			return;
2027		}
2028		if (net->RTO == 0) {
2029			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2030		} else {
2031			to_ticks = MSEC_TO_TICKS(net->RTO);
2032		}
2033		tmr = &net->rxt_timer;
2034		break;
2035	case SCTP_TIMER_TYPE_HEARTBEAT:
2036		/*
2037		 * the net is used here so that we can add in the RTO. Even
2038		 * though we use a different timer. We also add the HB timer
2039		 * PLUS a random jitter.
2040		 */
2041		if ((stcb == NULL) || (net == NULL)) {
2042			return;
2043		} else {
2044			uint32_t rndval;
2045			uint32_t jitter;
2046
2047			if ((net->dest_state & SCTP_ADDR_NOHB) &&
2048			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
2049				return;
2050			}
2051			if (net->RTO == 0) {
2052				to_ticks = stcb->asoc.initial_rto;
2053			} else {
2054				to_ticks = net->RTO;
2055			}
2056			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2057			jitter = rndval % to_ticks;
2058			if (jitter >= (to_ticks >> 1)) {
2059				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
2060			} else {
2061				to_ticks = to_ticks - jitter;
2062			}
2063			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2064			    !(net->dest_state & SCTP_ADDR_PF)) {
2065				to_ticks += net->heart_beat_delay;
2066			}
2067			/*
2068			 * Now we must convert the to_ticks that are now in
2069			 * ms to ticks.
2070			 */
2071			to_ticks = MSEC_TO_TICKS(to_ticks);
2072			tmr = &net->hb_timer;
2073		}
2074		break;
2075	case SCTP_TIMER_TYPE_COOKIE:
2076		/*
2077		 * Here we can use the RTO timer from the network since one
2078		 * RTT was compelete. If a retran happened then we will be
2079		 * using the RTO initial value.
2080		 */
2081		if ((stcb == NULL) || (net == NULL)) {
2082			return;
2083		}
2084		if (net->RTO == 0) {
2085			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2086		} else {
2087			to_ticks = MSEC_TO_TICKS(net->RTO);
2088		}
2089		tmr = &net->rxt_timer;
2090		break;
2091	case SCTP_TIMER_TYPE_NEWCOOKIE:
2092		/*
2093		 * nothing needed but the endpoint here ususually about 60
2094		 * minutes.
2095		 */
2096		tmr = &inp->sctp_ep.signature_change;
2097		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2098		break;
2099	case SCTP_TIMER_TYPE_ASOCKILL:
2100		if (stcb == NULL) {
2101			return;
2102		}
2103		tmr = &stcb->asoc.strreset_timer;
2104		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2105		break;
2106	case SCTP_TIMER_TYPE_INPKILL:
2107		/*
2108		 * The inp is setup to die. We re-use the signature_chage
2109		 * timer since that has stopped and we are in the GONE
2110		 * state.
2111		 */
2112		tmr = &inp->sctp_ep.signature_change;
2113		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2114		break;
2115	case SCTP_TIMER_TYPE_PATHMTURAISE:
2116		/*
2117		 * Here we use the value found in the EP for PMTU ususually
2118		 * about 10 minutes.
2119		 */
2120		if ((stcb == NULL) || (net == NULL)) {
2121			return;
2122		}
2123		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2124			return;
2125		}
2126		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2127		tmr = &net->pmtu_timer;
2128		break;
2129	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2130		/* Here we use the RTO of the destination */
2131		if ((stcb == NULL) || (net == NULL)) {
2132			return;
2133		}
2134		if (net->RTO == 0) {
2135			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2136		} else {
2137			to_ticks = MSEC_TO_TICKS(net->RTO);
2138		}
2139		tmr = &net->rxt_timer;
2140		break;
2141	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2142		/*
2143		 * Here we use the endpoints shutdown guard timer usually
2144		 * about 3 minutes.
2145		 */
2146		if (stcb == NULL) {
2147			return;
2148		}
2149		if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) {
2150			to_ticks = 5 * MSEC_TO_TICKS(stcb->asoc.maxrto);
2151		} else {
2152			to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2153		}
2154		tmr = &stcb->asoc.shut_guard_timer;
2155		break;
2156	case SCTP_TIMER_TYPE_STRRESET:
2157		/*
2158		 * Here the timer comes from the stcb but its value is from
2159		 * the net's RTO.
2160		 */
2161		if ((stcb == NULL) || (net == NULL)) {
2162			return;
2163		}
2164		if (net->RTO == 0) {
2165			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2166		} else {
2167			to_ticks = MSEC_TO_TICKS(net->RTO);
2168		}
2169		tmr = &stcb->asoc.strreset_timer;
2170		break;
2171	case SCTP_TIMER_TYPE_ASCONF:
2172		/*
2173		 * Here the timer comes from the stcb but its value is from
2174		 * the net's RTO.
2175		 */
2176		if ((stcb == NULL) || (net == NULL)) {
2177			return;
2178		}
2179		if (net->RTO == 0) {
2180			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2181		} else {
2182			to_ticks = MSEC_TO_TICKS(net->RTO);
2183		}
2184		tmr = &stcb->asoc.asconf_timer;
2185		break;
2186	case SCTP_TIMER_TYPE_PRIM_DELETED:
2187		if ((stcb == NULL) || (net != NULL)) {
2188			return;
2189		}
2190		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2191		tmr = &stcb->asoc.delete_prim_timer;
2192		break;
2193	case SCTP_TIMER_TYPE_AUTOCLOSE:
2194		if (stcb == NULL) {
2195			return;
2196		}
2197		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2198			/*
2199			 * Really an error since stcb is NOT set to
2200			 * autoclose
2201			 */
2202			return;
2203		}
2204		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2205		tmr = &stcb->asoc.autoclose_timer;
2206		break;
2207	default:
2208		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2209		    __func__, t_type);
2210		return;
2211		break;
2212	}
2213	if ((to_ticks <= 0) || (tmr == NULL)) {
2214		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2215		    __func__, t_type, to_ticks, (void *)tmr);
2216		return;
2217	}
2218	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2219		/*
2220		 * we do NOT allow you to have it already running. if it is
2221		 * we leave the current one up unchanged
2222		 */
2223		return;
2224	}
2225	/* At this point we can proceed */
2226	if (t_type == SCTP_TIMER_TYPE_SEND) {
2227		stcb->asoc.num_send_timers_up++;
2228	}
2229	tmr->stopped_from = 0;
2230	tmr->type = t_type;
2231	tmr->ep = (void *)inp;
2232	tmr->tcb = (void *)stcb;
2233	tmr->net = (void *)net;
2234	tmr->self = (void *)tmr;
2235	tmr->vnet = (void *)curvnet;
2236	tmr->ticks = sctp_get_tick_count();
2237	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2238	return;
2239}
2240
2241void
2242sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2243    struct sctp_nets *net, uint32_t from)
2244{
2245	struct sctp_timer *tmr;
2246
2247	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2248	    (inp == NULL))
2249		return;
2250
2251	tmr = NULL;
2252	if (stcb) {
2253		SCTP_TCB_LOCK_ASSERT(stcb);
2254	}
2255	switch (t_type) {
2256	case SCTP_TIMER_TYPE_ZERO_COPY:
2257		tmr = &inp->sctp_ep.zero_copy_timer;
2258		break;
2259	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2260		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2261		break;
2262	case SCTP_TIMER_TYPE_ADDR_WQ:
2263		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2264		break;
2265	case SCTP_TIMER_TYPE_SEND:
2266		if ((stcb == NULL) || (net == NULL)) {
2267			return;
2268		}
2269		tmr = &net->rxt_timer;
2270		break;
2271	case SCTP_TIMER_TYPE_INIT:
2272		if ((stcb == NULL) || (net == NULL)) {
2273			return;
2274		}
2275		tmr = &net->rxt_timer;
2276		break;
2277	case SCTP_TIMER_TYPE_RECV:
2278		if (stcb == NULL) {
2279			return;
2280		}
2281		tmr = &stcb->asoc.dack_timer;
2282		break;
2283	case SCTP_TIMER_TYPE_SHUTDOWN:
2284		if ((stcb == NULL) || (net == NULL)) {
2285			return;
2286		}
2287		tmr = &net->rxt_timer;
2288		break;
2289	case SCTP_TIMER_TYPE_HEARTBEAT:
2290		if ((stcb == NULL) || (net == NULL)) {
2291			return;
2292		}
2293		tmr = &net->hb_timer;
2294		break;
2295	case SCTP_TIMER_TYPE_COOKIE:
2296		if ((stcb == NULL) || (net == NULL)) {
2297			return;
2298		}
2299		tmr = &net->rxt_timer;
2300		break;
2301	case SCTP_TIMER_TYPE_NEWCOOKIE:
2302		/* nothing needed but the endpoint here */
2303		tmr = &inp->sctp_ep.signature_change;
2304		/*
2305		 * We re-use the newcookie timer for the INP kill timer. We
2306		 * must assure that we do not kill it by accident.
2307		 */
2308		break;
2309	case SCTP_TIMER_TYPE_ASOCKILL:
2310		/*
2311		 * Stop the asoc kill timer.
2312		 */
2313		if (stcb == NULL) {
2314			return;
2315		}
2316		tmr = &stcb->asoc.strreset_timer;
2317		break;
2318
2319	case SCTP_TIMER_TYPE_INPKILL:
2320		/*
2321		 * The inp is setup to die. We re-use the signature_chage
2322		 * timer since that has stopped and we are in the GONE
2323		 * state.
2324		 */
2325		tmr = &inp->sctp_ep.signature_change;
2326		break;
2327	case SCTP_TIMER_TYPE_PATHMTURAISE:
2328		if ((stcb == NULL) || (net == NULL)) {
2329			return;
2330		}
2331		tmr = &net->pmtu_timer;
2332		break;
2333	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2334		if ((stcb == NULL) || (net == NULL)) {
2335			return;
2336		}
2337		tmr = &net->rxt_timer;
2338		break;
2339	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2340		if (stcb == NULL) {
2341			return;
2342		}
2343		tmr = &stcb->asoc.shut_guard_timer;
2344		break;
2345	case SCTP_TIMER_TYPE_STRRESET:
2346		if (stcb == NULL) {
2347			return;
2348		}
2349		tmr = &stcb->asoc.strreset_timer;
2350		break;
2351	case SCTP_TIMER_TYPE_ASCONF:
2352		if (stcb == NULL) {
2353			return;
2354		}
2355		tmr = &stcb->asoc.asconf_timer;
2356		break;
2357	case SCTP_TIMER_TYPE_PRIM_DELETED:
2358		if (stcb == NULL) {
2359			return;
2360		}
2361		tmr = &stcb->asoc.delete_prim_timer;
2362		break;
2363	case SCTP_TIMER_TYPE_AUTOCLOSE:
2364		if (stcb == NULL) {
2365			return;
2366		}
2367		tmr = &stcb->asoc.autoclose_timer;
2368		break;
2369	default:
2370		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2371		    __func__, t_type);
2372		break;
2373	}
2374	if (tmr == NULL) {
2375		return;
2376	}
2377	if ((tmr->type != t_type) && tmr->type) {
2378		/*
2379		 * Ok we have a timer that is under joint use. Cookie timer
2380		 * per chance with the SEND timer. We therefore are NOT
2381		 * running the timer that the caller wants stopped.  So just
2382		 * return.
2383		 */
2384		return;
2385	}
2386	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2387		stcb->asoc.num_send_timers_up--;
2388		if (stcb->asoc.num_send_timers_up < 0) {
2389			stcb->asoc.num_send_timers_up = 0;
2390		}
2391	}
2392	tmr->self = NULL;
2393	tmr->stopped_from = from;
2394	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2395	return;
2396}
2397
2398uint32_t
2399sctp_calculate_len(struct mbuf *m)
2400{
2401	uint32_t tlen = 0;
2402	struct mbuf *at;
2403
2404	at = m;
2405	while (at) {
2406		tlen += SCTP_BUF_LEN(at);
2407		at = SCTP_BUF_NEXT(at);
2408	}
2409	return (tlen);
2410}
2411
2412void
2413sctp_mtu_size_reset(struct sctp_inpcb *inp,
2414    struct sctp_association *asoc, uint32_t mtu)
2415{
2416	/*
2417	 * Reset the P-MTU size on this association, this involves changing
2418	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2419	 * allow the DF flag to be cleared.
2420	 */
2421	struct sctp_tmit_chunk *chk;
2422	unsigned int eff_mtu, ovh;
2423
2424	asoc->smallest_mtu = mtu;
2425	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2426		ovh = SCTP_MIN_OVERHEAD;
2427	} else {
2428		ovh = SCTP_MIN_V4_OVERHEAD;
2429	}
2430	eff_mtu = mtu - ovh;
2431	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2432		if (chk->send_size > eff_mtu) {
2433			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2434		}
2435	}
2436	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2437		if (chk->send_size > eff_mtu) {
2438			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2439		}
2440	}
2441}
2442
2443
2444/*
2445 * given an association and starting time of the current RTT period return
2446 * RTO in number of msecs net should point to the current network
2447 */
2448
2449uint32_t
2450sctp_calculate_rto(struct sctp_tcb *stcb,
2451    struct sctp_association *asoc,
2452    struct sctp_nets *net,
2453    struct timeval *told,
2454    int safe, int rtt_from_sack)
2455{
2456	/*-
2457	 * given an association and the starting time of the current RTT
2458	 * period (in value1/value2) return RTO in number of msecs.
2459	 */
2460	int32_t rtt;		/* RTT in ms */
2461	uint32_t new_rto;
2462	int first_measure = 0;
2463	struct timeval now, then, *old;
2464
2465	/* Copy it out for sparc64 */
2466	if (safe == sctp_align_unsafe_makecopy) {
2467		old = &then;
2468		memcpy(&then, told, sizeof(struct timeval));
2469	} else if (safe == sctp_align_safe_nocopy) {
2470		old = told;
2471	} else {
2472		/* error */
2473		SCTP_PRINTF("Huh, bad rto calc call\n");
2474		return (0);
2475	}
2476	/************************/
2477	/* 1. calculate new RTT */
2478	/************************/
2479	/* get the current time */
2480	if (stcb->asoc.use_precise_time) {
2481		(void)SCTP_GETPTIME_TIMEVAL(&now);
2482	} else {
2483		(void)SCTP_GETTIME_TIMEVAL(&now);
2484	}
2485	timevalsub(&now, old);
2486	/* store the current RTT in us */
2487	net->rtt = (uint64_t)1000000 *(uint64_t)now.tv_sec +
2488	        (uint64_t)now.tv_usec;
2489
2490	/* compute rtt in ms */
2491	rtt = (int32_t)(net->rtt / 1000);
2492	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2493		/*
2494		 * Tell the CC module that a new update has just occurred
2495		 * from a sack
2496		 */
2497		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2498	}
2499	/*
2500	 * Do we need to determine the lan? We do this only on sacks i.e.
2501	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2502	 */
2503	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2504	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2505		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2506			net->lan_type = SCTP_LAN_INTERNET;
2507		} else {
2508			net->lan_type = SCTP_LAN_LOCAL;
2509		}
2510	}
2511	/***************************/
2512	/* 2. update RTTVAR & SRTT */
2513	/***************************/
2514	/*-
2515	 * Compute the scaled average lastsa and the
2516	 * scaled variance lastsv as described in van Jacobson
2517	 * Paper "Congestion Avoidance and Control", Annex A.
2518	 *
2519	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2520	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2521	 */
2522	if (net->RTO_measured) {
2523		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2524		net->lastsa += rtt;
2525		if (rtt < 0) {
2526			rtt = -rtt;
2527		}
2528		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2529		net->lastsv += rtt;
2530		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2531			rto_logging(net, SCTP_LOG_RTTVAR);
2532		}
2533	} else {
2534		/* First RTO measurment */
2535		net->RTO_measured = 1;
2536		first_measure = 1;
2537		net->lastsa = rtt << SCTP_RTT_SHIFT;
2538		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2539		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2540			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2541		}
2542	}
2543	if (net->lastsv == 0) {
2544		net->lastsv = SCTP_CLOCK_GRANULARITY;
2545	}
2546	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2547	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2548	    (stcb->asoc.sat_network_lockout == 0)) {
2549		stcb->asoc.sat_network = 1;
2550	} else if ((!first_measure) && stcb->asoc.sat_network) {
2551		stcb->asoc.sat_network = 0;
2552		stcb->asoc.sat_network_lockout = 1;
2553	}
2554	/* bound it, per C6/C7 in Section 5.3.1 */
2555	if (new_rto < stcb->asoc.minrto) {
2556		new_rto = stcb->asoc.minrto;
2557	}
2558	if (new_rto > stcb->asoc.maxrto) {
2559		new_rto = stcb->asoc.maxrto;
2560	}
2561	/* we are now returning the RTO */
2562	return (new_rto);
2563}
2564
2565/*
2566 * return a pointer to a contiguous piece of data from the given mbuf chain
2567 * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2568 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2569 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2570 */
2571caddr_t
2572sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr)
2573{
2574	uint32_t count;
2575	uint8_t *ptr;
2576
2577	ptr = in_ptr;
2578	if ((off < 0) || (len <= 0))
2579		return (NULL);
2580
2581	/* find the desired start location */
2582	while ((m != NULL) && (off > 0)) {
2583		if (off < SCTP_BUF_LEN(m))
2584			break;
2585		off -= SCTP_BUF_LEN(m);
2586		m = SCTP_BUF_NEXT(m);
2587	}
2588	if (m == NULL)
2589		return (NULL);
2590
2591	/* is the current mbuf large enough (eg. contiguous)? */
2592	if ((SCTP_BUF_LEN(m) - off) >= len) {
2593		return (mtod(m, caddr_t)+off);
2594	} else {
2595		/* else, it spans more than one mbuf, so save a temp copy... */
2596		while ((m != NULL) && (len > 0)) {
2597			count = min(SCTP_BUF_LEN(m) - off, len);
2598			bcopy(mtod(m, caddr_t)+off, ptr, count);
2599			len -= count;
2600			ptr += count;
2601			off = 0;
2602			m = SCTP_BUF_NEXT(m);
2603		}
2604		if ((m == NULL) && (len > 0))
2605			return (NULL);
2606		else
2607			return ((caddr_t)in_ptr);
2608	}
2609}
2610
2611
2612
2613struct sctp_paramhdr *
2614sctp_get_next_param(struct mbuf *m,
2615    int offset,
2616    struct sctp_paramhdr *pull,
2617    int pull_limit)
2618{
2619	/* This just provides a typed signature to Peter's Pull routine */
2620	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2621	    (uint8_t *)pull));
2622}
2623
2624
2625struct mbuf *
2626sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2627{
2628	struct mbuf *m_last;
2629	caddr_t dp;
2630
2631	if (padlen > 3) {
2632		return (NULL);
2633	}
2634	if (padlen <= M_TRAILINGSPACE(m)) {
2635		/*
2636		 * The easy way. We hope the majority of the time we hit
2637		 * here :)
2638		 */
2639		m_last = m;
2640	} else {
2641		/* Hard way we must grow the mbuf chain */
2642		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2643		if (m_last == NULL) {
2644			return (NULL);
2645		}
2646		SCTP_BUF_LEN(m_last) = 0;
2647		SCTP_BUF_NEXT(m_last) = NULL;
2648		SCTP_BUF_NEXT(m) = m_last;
2649	}
2650	dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
2651	SCTP_BUF_LEN(m_last) += padlen;
2652	memset(dp, 0, padlen);
2653	return (m_last);
2654}
2655
2656struct mbuf *
2657sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2658{
2659	/* find the last mbuf in chain and pad it */
2660	struct mbuf *m_at;
2661
2662	if (last_mbuf != NULL) {
2663		return (sctp_add_pad_tombuf(last_mbuf, padval));
2664	} else {
2665		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2666			if (SCTP_BUF_NEXT(m_at) == NULL) {
2667				return (sctp_add_pad_tombuf(m_at, padval));
2668			}
2669		}
2670	}
2671	return (NULL);
2672}
2673
2674static void
2675sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2676    uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2677#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2678    SCTP_UNUSED
2679#endif
2680)
2681{
2682	struct mbuf *m_notify;
2683	struct sctp_assoc_change *sac;
2684	struct sctp_queued_to_read *control;
2685	unsigned int notif_len;
2686	uint16_t abort_len;
2687	unsigned int i;
2688#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2689	struct socket *so;
2690#endif
2691
2692	if (stcb == NULL) {
2693		return;
2694	}
2695	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2696		notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2697		if (abort != NULL) {
2698			abort_len = ntohs(abort->ch.chunk_length);
2699		} else {
2700			abort_len = 0;
2701		}
2702		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2703			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2704		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2705			notif_len += abort_len;
2706		}
2707		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2708		if (m_notify == NULL) {
2709			/* Retry with smaller value. */
2710			notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2711			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2712			if (m_notify == NULL) {
2713				goto set_error;
2714			}
2715		}
2716		SCTP_BUF_NEXT(m_notify) = NULL;
2717		sac = mtod(m_notify, struct sctp_assoc_change *);
2718		memset(sac, 0, notif_len);
2719		sac->sac_type = SCTP_ASSOC_CHANGE;
2720		sac->sac_flags = 0;
2721		sac->sac_length = sizeof(struct sctp_assoc_change);
2722		sac->sac_state = state;
2723		sac->sac_error = error;
2724		/* XXX verify these stream counts */
2725		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2726		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2727		sac->sac_assoc_id = sctp_get_associd(stcb);
2728		if (notif_len > sizeof(struct sctp_assoc_change)) {
2729			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2730				i = 0;
2731				if (stcb->asoc.prsctp_supported == 1) {
2732					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2733				}
2734				if (stcb->asoc.auth_supported == 1) {
2735					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2736				}
2737				if (stcb->asoc.asconf_supported == 1) {
2738					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2739				}
2740				if (stcb->asoc.idata_supported == 1) {
2741					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING;
2742				}
2743				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2744				if (stcb->asoc.reconfig_supported == 1) {
2745					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2746				}
2747				sac->sac_length += i;
2748			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2749				memcpy(sac->sac_info, abort, abort_len);
2750				sac->sac_length += abort_len;
2751			}
2752		}
2753		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2754		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2755		    0, 0, stcb->asoc.context, 0, 0, 0,
2756		    m_notify);
2757		if (control != NULL) {
2758			control->length = SCTP_BUF_LEN(m_notify);
2759			/* not that we need this */
2760			control->tail_mbuf = m_notify;
2761			control->spec_flags = M_NOTIFICATION;
2762			sctp_add_to_readq(stcb->sctp_ep, stcb,
2763			    control,
2764			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2765			    so_locked);
2766		} else {
2767			sctp_m_freem(m_notify);
2768		}
2769	}
2770	/*
2771	 * For 1-to-1 style sockets, we send up and error when an ABORT
2772	 * comes in.
2773	 */
2774set_error:
2775	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2776	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2777	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2778		SOCK_LOCK(stcb->sctp_socket);
2779		if (from_peer) {
2780			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2781				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2782				stcb->sctp_socket->so_error = ECONNREFUSED;
2783			} else {
2784				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2785				stcb->sctp_socket->so_error = ECONNRESET;
2786			}
2787		} else {
2788			if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) ||
2789			    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
2790				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2791				stcb->sctp_socket->so_error = ETIMEDOUT;
2792			} else {
2793				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2794				stcb->sctp_socket->so_error = ECONNABORTED;
2795			}
2796		}
2797	}
2798	/* Wake ANY sleepers */
2799#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2800	so = SCTP_INP_SO(stcb->sctp_ep);
2801	if (!so_locked) {
2802		atomic_add_int(&stcb->asoc.refcnt, 1);
2803		SCTP_TCB_UNLOCK(stcb);
2804		SCTP_SOCKET_LOCK(so, 1);
2805		SCTP_TCB_LOCK(stcb);
2806		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2807		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2808			SCTP_SOCKET_UNLOCK(so, 1);
2809			return;
2810		}
2811	}
2812#endif
2813	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2814	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2815	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2816		socantrcvmore_locked(stcb->sctp_socket);
2817	}
2818	sorwakeup(stcb->sctp_socket);
2819	sowwakeup(stcb->sctp_socket);
2820#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2821	if (!so_locked) {
2822		SCTP_SOCKET_UNLOCK(so, 1);
2823	}
2824#endif
2825}
2826
2827static void
2828sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2829    struct sockaddr *sa, uint32_t error, int so_locked
2830#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2831    SCTP_UNUSED
2832#endif
2833)
2834{
2835	struct mbuf *m_notify;
2836	struct sctp_paddr_change *spc;
2837	struct sctp_queued_to_read *control;
2838
2839	if ((stcb == NULL) ||
2840	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2841		/* event not enabled */
2842		return;
2843	}
2844	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2845	if (m_notify == NULL)
2846		return;
2847	SCTP_BUF_LEN(m_notify) = 0;
2848	spc = mtod(m_notify, struct sctp_paddr_change *);
2849	memset(spc, 0, sizeof(struct sctp_paddr_change));
2850	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2851	spc->spc_flags = 0;
2852	spc->spc_length = sizeof(struct sctp_paddr_change);
2853	switch (sa->sa_family) {
2854#ifdef INET
2855	case AF_INET:
2856#ifdef INET6
2857		if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
2858			in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
2859			    (struct sockaddr_in6 *)&spc->spc_aaddr);
2860		} else {
2861			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2862		}
2863#else
2864		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2865#endif
2866		break;
2867#endif
2868#ifdef INET6
2869	case AF_INET6:
2870		{
2871			struct sockaddr_in6 *sin6;
2872
2873			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2874
2875			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2876			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2877				if (sin6->sin6_scope_id == 0) {
2878					/* recover scope_id for user */
2879					(void)sa6_recoverscope(sin6);
2880				} else {
2881					/* clear embedded scope_id for user */
2882					in6_clearscope(&sin6->sin6_addr);
2883				}
2884			}
2885			break;
2886		}
2887#endif
2888	default:
2889		/* TSNH */
2890		break;
2891	}
2892	spc->spc_state = state;
2893	spc->spc_error = error;
2894	spc->spc_assoc_id = sctp_get_associd(stcb);
2895
2896	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2897	SCTP_BUF_NEXT(m_notify) = NULL;
2898
2899	/* append to socket */
2900	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2901	    0, 0, stcb->asoc.context, 0, 0, 0,
2902	    m_notify);
2903	if (control == NULL) {
2904		/* no memory */
2905		sctp_m_freem(m_notify);
2906		return;
2907	}
2908	control->length = SCTP_BUF_LEN(m_notify);
2909	control->spec_flags = M_NOTIFICATION;
2910	/* not that we need this */
2911	control->tail_mbuf = m_notify;
2912	sctp_add_to_readq(stcb->sctp_ep, stcb,
2913	    control,
2914	    &stcb->sctp_socket->so_rcv, 1,
2915	    SCTP_READ_LOCK_NOT_HELD,
2916	    so_locked);
2917}
2918
2919
2920static void
2921sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2922    struct sctp_tmit_chunk *chk, int so_locked
2923#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2924    SCTP_UNUSED
2925#endif
2926)
2927{
2928	struct mbuf *m_notify;
2929	struct sctp_send_failed *ssf;
2930	struct sctp_send_failed_event *ssfe;
2931	struct sctp_queued_to_read *control;
2932	struct sctp_chunkhdr *chkhdr;
2933	int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len;
2934
2935	if ((stcb == NULL) ||
2936	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2937	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2938		/* event not enabled */
2939		return;
2940	}
2941	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2942		notifhdr_len = sizeof(struct sctp_send_failed_event);
2943	} else {
2944		notifhdr_len = sizeof(struct sctp_send_failed);
2945	}
2946	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
2947	if (m_notify == NULL)
2948		/* no space left */
2949		return;
2950	SCTP_BUF_LEN(m_notify) = notifhdr_len;
2951	if (stcb->asoc.idata_supported) {
2952		chkhdr_len = sizeof(struct sctp_idata_chunk);
2953	} else {
2954		chkhdr_len = sizeof(struct sctp_data_chunk);
2955	}
2956	/* Use some defaults in case we can't access the chunk header */
2957	if (chk->send_size >= chkhdr_len) {
2958		payload_len = chk->send_size - chkhdr_len;
2959	} else {
2960		payload_len = 0;
2961	}
2962	padding_len = 0;
2963	if (chk->data != NULL) {
2964		chkhdr = mtod(chk->data, struct sctp_chunkhdr *);
2965		if (chkhdr != NULL) {
2966			chk_len = ntohs(chkhdr->chunk_length);
2967			if ((chk_len >= chkhdr_len) &&
2968			    (chk->send_size >= chk_len) &&
2969			    (chk->send_size - chk_len < 4)) {
2970				padding_len = chk->send_size - chk_len;
2971				payload_len = chk->send_size - chkhdr_len - padding_len;
2972			}
2973		}
2974	}
2975	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2976		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2977		memset(ssfe, 0, notifhdr_len);
2978		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2979		if (sent) {
2980			ssfe->ssfe_flags = SCTP_DATA_SENT;
2981		} else {
2982			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2983		}
2984		ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len);
2985		ssfe->ssfe_error = error;
2986		/* not exactly what the user sent in, but should be close :) */
2987		ssfe->ssfe_info.snd_sid = chk->rec.data.sid;
2988		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
2989		ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid;
2990		ssfe->ssfe_info.snd_context = chk->rec.data.context;
2991		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2992		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2993	} else {
2994		ssf = mtod(m_notify, struct sctp_send_failed *);
2995		memset(ssf, 0, notifhdr_len);
2996		ssf->ssf_type = SCTP_SEND_FAILED;
2997		if (sent) {
2998			ssf->ssf_flags = SCTP_DATA_SENT;
2999		} else {
3000			ssf->ssf_flags = SCTP_DATA_UNSENT;
3001		}
3002		ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len);
3003		ssf->ssf_error = error;
3004		/* not exactly what the user sent in, but should be close :) */
3005		ssf->ssf_info.sinfo_stream = chk->rec.data.sid;
3006		ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid;
3007		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3008		ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid;
3009		ssf->ssf_info.sinfo_context = chk->rec.data.context;
3010		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3011		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3012	}
3013	if (chk->data != NULL) {
3014		/* Trim off the sctp chunk header (it should be there) */
3015		if (chk->send_size == chkhdr_len + payload_len + padding_len) {
3016			m_adj(chk->data, chkhdr_len);
3017			m_adj(chk->data, -padding_len);
3018			sctp_mbuf_crush(chk->data);
3019			chk->send_size -= (chkhdr_len + padding_len);
3020		}
3021	}
3022	SCTP_BUF_NEXT(m_notify) = chk->data;
3023	/* Steal off the mbuf */
3024	chk->data = NULL;
3025	/*
3026	 * For this case, we check the actual socket buffer, since the assoc
3027	 * is going away we don't want to overfill the socket buffer for a
3028	 * non-reader
3029	 */
3030	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3031		sctp_m_freem(m_notify);
3032		return;
3033	}
3034	/* append to socket */
3035	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3036	    0, 0, stcb->asoc.context, 0, 0, 0,
3037	    m_notify);
3038	if (control == NULL) {
3039		/* no memory */
3040		sctp_m_freem(m_notify);
3041		return;
3042	}
3043	control->spec_flags = M_NOTIFICATION;
3044	sctp_add_to_readq(stcb->sctp_ep, stcb,
3045	    control,
3046	    &stcb->sctp_socket->so_rcv, 1,
3047	    SCTP_READ_LOCK_NOT_HELD,
3048	    so_locked);
3049}
3050
3051
3052static void
3053sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3054    struct sctp_stream_queue_pending *sp, int so_locked
3055#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3056    SCTP_UNUSED
3057#endif
3058)
3059{
3060	struct mbuf *m_notify;
3061	struct sctp_send_failed *ssf;
3062	struct sctp_send_failed_event *ssfe;
3063	struct sctp_queued_to_read *control;
3064	int notifhdr_len;
3065
3066	if ((stcb == NULL) ||
3067	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3068	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3069		/* event not enabled */
3070		return;
3071	}
3072	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3073		notifhdr_len = sizeof(struct sctp_send_failed_event);
3074	} else {
3075		notifhdr_len = sizeof(struct sctp_send_failed);
3076	}
3077	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3078	if (m_notify == NULL) {
3079		/* no space left */
3080		return;
3081	}
3082	SCTP_BUF_LEN(m_notify) = notifhdr_len;
3083	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3084		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3085		memset(ssfe, 0, notifhdr_len);
3086		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3087		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3088		ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length);
3089		ssfe->ssfe_error = error;
3090		/* not exactly what the user sent in, but should be close :) */
3091		ssfe->ssfe_info.snd_sid = sp->sid;
3092		if (sp->some_taken) {
3093			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3094		} else {
3095			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3096		}
3097		ssfe->ssfe_info.snd_ppid = sp->ppid;
3098		ssfe->ssfe_info.snd_context = sp->context;
3099		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3100		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3101	} else {
3102		ssf = mtod(m_notify, struct sctp_send_failed *);
3103		memset(ssf, 0, notifhdr_len);
3104		ssf->ssf_type = SCTP_SEND_FAILED;
3105		ssf->ssf_flags = SCTP_DATA_UNSENT;
3106		ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length);
3107		ssf->ssf_error = error;
3108		/* not exactly what the user sent in, but should be close :) */
3109		ssf->ssf_info.sinfo_stream = sp->sid;
3110		ssf->ssf_info.sinfo_ssn = 0;
3111		if (sp->some_taken) {
3112			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3113		} else {
3114			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3115		}
3116		ssf->ssf_info.sinfo_ppid = sp->ppid;
3117		ssf->ssf_info.sinfo_context = sp->context;
3118		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3119		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3120	}
3121	SCTP_BUF_NEXT(m_notify) = sp->data;
3122
3123	/* Steal off the mbuf */
3124	sp->data = NULL;
3125	/*
3126	 * For this case, we check the actual socket buffer, since the assoc
3127	 * is going away we don't want to overfill the socket buffer for a
3128	 * non-reader
3129	 */
3130	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3131		sctp_m_freem(m_notify);
3132		return;
3133	}
3134	/* append to socket */
3135	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3136	    0, 0, stcb->asoc.context, 0, 0, 0,
3137	    m_notify);
3138	if (control == NULL) {
3139		/* no memory */
3140		sctp_m_freem(m_notify);
3141		return;
3142	}
3143	control->spec_flags = M_NOTIFICATION;
3144	sctp_add_to_readq(stcb->sctp_ep, stcb,
3145	    control,
3146	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3147}
3148
3149
3150
3151static void
3152sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3153{
3154	struct mbuf *m_notify;
3155	struct sctp_adaptation_event *sai;
3156	struct sctp_queued_to_read *control;
3157
3158	if ((stcb == NULL) ||
3159	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3160		/* event not enabled */
3161		return;
3162	}
3163	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3164	if (m_notify == NULL)
3165		/* no space left */
3166		return;
3167	SCTP_BUF_LEN(m_notify) = 0;
3168	sai = mtod(m_notify, struct sctp_adaptation_event *);
3169	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3170	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3171	sai->sai_flags = 0;
3172	sai->sai_length = sizeof(struct sctp_adaptation_event);
3173	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3174	sai->sai_assoc_id = sctp_get_associd(stcb);
3175
3176	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3177	SCTP_BUF_NEXT(m_notify) = NULL;
3178
3179	/* append to socket */
3180	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3181	    0, 0, stcb->asoc.context, 0, 0, 0,
3182	    m_notify);
3183	if (control == NULL) {
3184		/* no memory */
3185		sctp_m_freem(m_notify);
3186		return;
3187	}
3188	control->length = SCTP_BUF_LEN(m_notify);
3189	control->spec_flags = M_NOTIFICATION;
3190	/* not that we need this */
3191	control->tail_mbuf = m_notify;
3192	sctp_add_to_readq(stcb->sctp_ep, stcb,
3193	    control,
3194	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3195}
3196
3197/* This always must be called with the read-queue LOCKED in the INP */
3198static void
3199sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3200    uint32_t val, int so_locked
3201#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3202    SCTP_UNUSED
3203#endif
3204)
3205{
3206	struct mbuf *m_notify;
3207	struct sctp_pdapi_event *pdapi;
3208	struct sctp_queued_to_read *control;
3209	struct sockbuf *sb;
3210
3211	if ((stcb == NULL) ||
3212	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3213		/* event not enabled */
3214		return;
3215	}
3216	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3217		return;
3218	}
3219	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3220	if (m_notify == NULL)
3221		/* no space left */
3222		return;
3223	SCTP_BUF_LEN(m_notify) = 0;
3224	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3225	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3226	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3227	pdapi->pdapi_flags = 0;
3228	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3229	pdapi->pdapi_indication = error;
3230	pdapi->pdapi_stream = (val >> 16);
3231	pdapi->pdapi_seq = (val & 0x0000ffff);
3232	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3233
3234	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3235	SCTP_BUF_NEXT(m_notify) = NULL;
3236	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3237	    0, 0, stcb->asoc.context, 0, 0, 0,
3238	    m_notify);
3239	if (control == NULL) {
3240		/* no memory */
3241		sctp_m_freem(m_notify);
3242		return;
3243	}
3244	control->spec_flags = M_NOTIFICATION;
3245	control->length = SCTP_BUF_LEN(m_notify);
3246	/* not that we need this */
3247	control->tail_mbuf = m_notify;
3248	control->held_length = 0;
3249	control->length = 0;
3250	sb = &stcb->sctp_socket->so_rcv;
3251	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3252		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3253	}
3254	sctp_sballoc(stcb, sb, m_notify);
3255	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3256		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3257	}
3258	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3259	control->end_added = 1;
3260	if (stcb->asoc.control_pdapi)
3261		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3262	else {
3263		/* we really should not see this case */
3264		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3265	}
3266	if (stcb->sctp_ep && stcb->sctp_socket) {
3267		/* This should always be the case */
3268#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3269		struct socket *so;
3270
3271		so = SCTP_INP_SO(stcb->sctp_ep);
3272		if (!so_locked) {
3273			atomic_add_int(&stcb->asoc.refcnt, 1);
3274			SCTP_TCB_UNLOCK(stcb);
3275			SCTP_SOCKET_LOCK(so, 1);
3276			SCTP_TCB_LOCK(stcb);
3277			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3278			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3279				SCTP_SOCKET_UNLOCK(so, 1);
3280				return;
3281			}
3282		}
3283#endif
3284		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3285#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3286		if (!so_locked) {
3287			SCTP_SOCKET_UNLOCK(so, 1);
3288		}
3289#endif
3290	}
3291}
3292
3293static void
3294sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3295{
3296	struct mbuf *m_notify;
3297	struct sctp_shutdown_event *sse;
3298	struct sctp_queued_to_read *control;
3299
3300	/*
3301	 * For TCP model AND UDP connected sockets we will send an error up
3302	 * when an SHUTDOWN completes
3303	 */
3304	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3305	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3306		/* mark socket closed for read/write and wakeup! */
3307#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3308		struct socket *so;
3309
3310		so = SCTP_INP_SO(stcb->sctp_ep);
3311		atomic_add_int(&stcb->asoc.refcnt, 1);
3312		SCTP_TCB_UNLOCK(stcb);
3313		SCTP_SOCKET_LOCK(so, 1);
3314		SCTP_TCB_LOCK(stcb);
3315		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3316		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3317			SCTP_SOCKET_UNLOCK(so, 1);
3318			return;
3319		}
3320#endif
3321		socantsendmore(stcb->sctp_socket);
3322#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3323		SCTP_SOCKET_UNLOCK(so, 1);
3324#endif
3325	}
3326	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3327		/* event not enabled */
3328		return;
3329	}
3330	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3331	if (m_notify == NULL)
3332		/* no space left */
3333		return;
3334	sse = mtod(m_notify, struct sctp_shutdown_event *);
3335	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3336	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3337	sse->sse_flags = 0;
3338	sse->sse_length = sizeof(struct sctp_shutdown_event);
3339	sse->sse_assoc_id = sctp_get_associd(stcb);
3340
3341	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3342	SCTP_BUF_NEXT(m_notify) = NULL;
3343
3344	/* append to socket */
3345	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3346	    0, 0, stcb->asoc.context, 0, 0, 0,
3347	    m_notify);
3348	if (control == NULL) {
3349		/* no memory */
3350		sctp_m_freem(m_notify);
3351		return;
3352	}
3353	control->spec_flags = M_NOTIFICATION;
3354	control->length = SCTP_BUF_LEN(m_notify);
3355	/* not that we need this */
3356	control->tail_mbuf = m_notify;
3357	sctp_add_to_readq(stcb->sctp_ep, stcb,
3358	    control,
3359	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3360}
3361
3362static void
3363sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3364    int so_locked
3365#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3366    SCTP_UNUSED
3367#endif
3368)
3369{
3370	struct mbuf *m_notify;
3371	struct sctp_sender_dry_event *event;
3372	struct sctp_queued_to_read *control;
3373
3374	if ((stcb == NULL) ||
3375	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3376		/* event not enabled */
3377		return;
3378	}
3379	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3380	if (m_notify == NULL) {
3381		/* no space left */
3382		return;
3383	}
3384	SCTP_BUF_LEN(m_notify) = 0;
3385	event = mtod(m_notify, struct sctp_sender_dry_event *);
3386	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3387	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3388	event->sender_dry_flags = 0;
3389	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3390	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3391
3392	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3393	SCTP_BUF_NEXT(m_notify) = NULL;
3394
3395	/* append to socket */
3396	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3397	    0, 0, stcb->asoc.context, 0, 0, 0,
3398	    m_notify);
3399	if (control == NULL) {
3400		/* no memory */
3401		sctp_m_freem(m_notify);
3402		return;
3403	}
3404	control->length = SCTP_BUF_LEN(m_notify);
3405	control->spec_flags = M_NOTIFICATION;
3406	/* not that we need this */
3407	control->tail_mbuf = m_notify;
3408	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3409	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3410}
3411
3412
3413void
3414sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3415{
3416	struct mbuf *m_notify;
3417	struct sctp_queued_to_read *control;
3418	struct sctp_stream_change_event *stradd;
3419
3420	if ((stcb == NULL) ||
3421	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3422		/* event not enabled */
3423		return;
3424	}
3425	if ((stcb->asoc.peer_req_out) && flag) {
3426		/* Peer made the request, don't tell the local user */
3427		stcb->asoc.peer_req_out = 0;
3428		return;
3429	}
3430	stcb->asoc.peer_req_out = 0;
3431	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3432	if (m_notify == NULL)
3433		/* no space left */
3434		return;
3435	SCTP_BUF_LEN(m_notify) = 0;
3436	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3437	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3438	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3439	stradd->strchange_flags = flag;
3440	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3441	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3442	stradd->strchange_instrms = numberin;
3443	stradd->strchange_outstrms = numberout;
3444	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3445	SCTP_BUF_NEXT(m_notify) = NULL;
3446	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3447		/* no space */
3448		sctp_m_freem(m_notify);
3449		return;
3450	}
3451	/* append to socket */
3452	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3453	    0, 0, stcb->asoc.context, 0, 0, 0,
3454	    m_notify);
3455	if (control == NULL) {
3456		/* no memory */
3457		sctp_m_freem(m_notify);
3458		return;
3459	}
3460	control->spec_flags = M_NOTIFICATION;
3461	control->length = SCTP_BUF_LEN(m_notify);
3462	/* not that we need this */
3463	control->tail_mbuf = m_notify;
3464	sctp_add_to_readq(stcb->sctp_ep, stcb,
3465	    control,
3466	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3467}
3468
3469void
3470sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3471{
3472	struct mbuf *m_notify;
3473	struct sctp_queued_to_read *control;
3474	struct sctp_assoc_reset_event *strasoc;
3475
3476	if ((stcb == NULL) ||
3477	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3478		/* event not enabled */
3479		return;
3480	}
3481	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3482	if (m_notify == NULL)
3483		/* no space left */
3484		return;
3485	SCTP_BUF_LEN(m_notify) = 0;
3486	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3487	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3488	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3489	strasoc->assocreset_flags = flag;
3490	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3491	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3492	strasoc->assocreset_local_tsn = sending_tsn;
3493	strasoc->assocreset_remote_tsn = recv_tsn;
3494	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3495	SCTP_BUF_NEXT(m_notify) = NULL;
3496	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3497		/* no space */
3498		sctp_m_freem(m_notify);
3499		return;
3500	}
3501	/* append to socket */
3502	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3503	    0, 0, stcb->asoc.context, 0, 0, 0,
3504	    m_notify);
3505	if (control == NULL) {
3506		/* no memory */
3507		sctp_m_freem(m_notify);
3508		return;
3509	}
3510	control->spec_flags = M_NOTIFICATION;
3511	control->length = SCTP_BUF_LEN(m_notify);
3512	/* not that we need this */
3513	control->tail_mbuf = m_notify;
3514	sctp_add_to_readq(stcb->sctp_ep, stcb,
3515	    control,
3516	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3517}
3518
3519
3520
3521static void
3522sctp_notify_stream_reset(struct sctp_tcb *stcb,
3523    int number_entries, uint16_t *list, int flag)
3524{
3525	struct mbuf *m_notify;
3526	struct sctp_queued_to_read *control;
3527	struct sctp_stream_reset_event *strreset;
3528	int len;
3529
3530	if ((stcb == NULL) ||
3531	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3532		/* event not enabled */
3533		return;
3534	}
3535	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3536	if (m_notify == NULL)
3537		/* no space left */
3538		return;
3539	SCTP_BUF_LEN(m_notify) = 0;
3540	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3541	if (len > M_TRAILINGSPACE(m_notify)) {
3542		/* never enough room */
3543		sctp_m_freem(m_notify);
3544		return;
3545	}
3546	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3547	memset(strreset, 0, len);
3548	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3549	strreset->strreset_flags = flag;
3550	strreset->strreset_length = len;
3551	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3552	if (number_entries) {
3553		int i;
3554
3555		for (i = 0; i < number_entries; i++) {
3556			strreset->strreset_stream_list[i] = ntohs(list[i]);
3557		}
3558	}
3559	SCTP_BUF_LEN(m_notify) = len;
3560	SCTP_BUF_NEXT(m_notify) = NULL;
3561	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3562		/* no space */
3563		sctp_m_freem(m_notify);
3564		return;
3565	}
3566	/* append to socket */
3567	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3568	    0, 0, stcb->asoc.context, 0, 0, 0,
3569	    m_notify);
3570	if (control == NULL) {
3571		/* no memory */
3572		sctp_m_freem(m_notify);
3573		return;
3574	}
3575	control->spec_flags = M_NOTIFICATION;
3576	control->length = SCTP_BUF_LEN(m_notify);
3577	/* not that we need this */
3578	control->tail_mbuf = m_notify;
3579	sctp_add_to_readq(stcb->sctp_ep, stcb,
3580	    control,
3581	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3582}
3583
3584
3585static void
3586sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3587{
3588	struct mbuf *m_notify;
3589	struct sctp_remote_error *sre;
3590	struct sctp_queued_to_read *control;
3591	unsigned int notif_len;
3592	uint16_t chunk_len;
3593
3594	if ((stcb == NULL) ||
3595	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3596		return;
3597	}
3598	if (chunk != NULL) {
3599		chunk_len = ntohs(chunk->ch.chunk_length);
3600	} else {
3601		chunk_len = 0;
3602	}
3603	notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len);
3604	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3605	if (m_notify == NULL) {
3606		/* Retry with smaller value. */
3607		notif_len = (unsigned int)sizeof(struct sctp_remote_error);
3608		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3609		if (m_notify == NULL) {
3610			return;
3611		}
3612	}
3613	SCTP_BUF_NEXT(m_notify) = NULL;
3614	sre = mtod(m_notify, struct sctp_remote_error *);
3615	memset(sre, 0, notif_len);
3616	sre->sre_type = SCTP_REMOTE_ERROR;
3617	sre->sre_flags = 0;
3618	sre->sre_length = sizeof(struct sctp_remote_error);
3619	sre->sre_error = error;
3620	sre->sre_assoc_id = sctp_get_associd(stcb);
3621	if (notif_len > sizeof(struct sctp_remote_error)) {
3622		memcpy(sre->sre_data, chunk, chunk_len);
3623		sre->sre_length += chunk_len;
3624	}
3625	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3626	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3627	    0, 0, stcb->asoc.context, 0, 0, 0,
3628	    m_notify);
3629	if (control != NULL) {
3630		control->length = SCTP_BUF_LEN(m_notify);
3631		/* not that we need this */
3632		control->tail_mbuf = m_notify;
3633		control->spec_flags = M_NOTIFICATION;
3634		sctp_add_to_readq(stcb->sctp_ep, stcb,
3635		    control,
3636		    &stcb->sctp_socket->so_rcv, 1,
3637		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3638	} else {
3639		sctp_m_freem(m_notify);
3640	}
3641}
3642
3643
3644void
3645sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3646    uint32_t error, void *data, int so_locked
3647#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3648    SCTP_UNUSED
3649#endif
3650)
3651{
3652	if ((stcb == NULL) ||
3653	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3654	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3655	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3656		/* If the socket is gone we are out of here */
3657		return;
3658	}
3659	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3660		return;
3661	}
3662	if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3663	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED)) {
3664		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3665		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3666		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3667			/* Don't report these in front states */
3668			return;
3669		}
3670	}
3671	switch (notification) {
3672	case SCTP_NOTIFY_ASSOC_UP:
3673		if (stcb->asoc.assoc_up_sent == 0) {
3674			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3675			stcb->asoc.assoc_up_sent = 1;
3676		}
3677		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3678			sctp_notify_adaptation_layer(stcb);
3679		}
3680		if (stcb->asoc.auth_supported == 0) {
3681			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3682			    NULL, so_locked);
3683		}
3684		break;
3685	case SCTP_NOTIFY_ASSOC_DOWN:
3686		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3687		break;
3688	case SCTP_NOTIFY_INTERFACE_DOWN:
3689		{
3690			struct sctp_nets *net;
3691
3692			net = (struct sctp_nets *)data;
3693			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3694			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3695			break;
3696		}
3697	case SCTP_NOTIFY_INTERFACE_UP:
3698		{
3699			struct sctp_nets *net;
3700
3701			net = (struct sctp_nets *)data;
3702			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3703			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3704			break;
3705		}
3706	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3707		{
3708			struct sctp_nets *net;
3709
3710			net = (struct sctp_nets *)data;
3711			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3712			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3713			break;
3714		}
3715	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3716		sctp_notify_send_failed2(stcb, error,
3717		    (struct sctp_stream_queue_pending *)data, so_locked);
3718		break;
3719	case SCTP_NOTIFY_SENT_DG_FAIL:
3720		sctp_notify_send_failed(stcb, 1, error,
3721		    (struct sctp_tmit_chunk *)data, so_locked);
3722		break;
3723	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3724		sctp_notify_send_failed(stcb, 0, error,
3725		    (struct sctp_tmit_chunk *)data, so_locked);
3726		break;
3727	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3728		{
3729			uint32_t val;
3730
3731			val = *((uint32_t *)data);
3732
3733			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3734			break;
3735		}
3736	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3737		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3738		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3739			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3740		} else {
3741			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3742		}
3743		break;
3744	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3745		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3746		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3747			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3748		} else {
3749			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3750		}
3751		break;
3752	case SCTP_NOTIFY_ASSOC_RESTART:
3753		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3754		if (stcb->asoc.auth_supported == 0) {
3755			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3756			    NULL, so_locked);
3757		}
3758		break;
3759	case SCTP_NOTIFY_STR_RESET_SEND:
3760		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN);
3761		break;
3762	case SCTP_NOTIFY_STR_RESET_RECV:
3763		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING);
3764		break;
3765	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3766		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3767		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3768		break;
3769	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3770		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3771		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3772		break;
3773	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3774		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3775		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3776		break;
3777	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3778		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3779		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3780		break;
3781	case SCTP_NOTIFY_ASCONF_ADD_IP:
3782		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3783		    error, so_locked);
3784		break;
3785	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3786		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3787		    error, so_locked);
3788		break;
3789	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3790		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3791		    error, so_locked);
3792		break;
3793	case SCTP_NOTIFY_PEER_SHUTDOWN:
3794		sctp_notify_shutdown_event(stcb);
3795		break;
3796	case SCTP_NOTIFY_AUTH_NEW_KEY:
3797		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3798		    (uint16_t)(uintptr_t)data,
3799		    so_locked);
3800		break;
3801	case SCTP_NOTIFY_AUTH_FREE_KEY:
3802		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3803		    (uint16_t)(uintptr_t)data,
3804		    so_locked);
3805		break;
3806	case SCTP_NOTIFY_NO_PEER_AUTH:
3807		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3808		    (uint16_t)(uintptr_t)data,
3809		    so_locked);
3810		break;
3811	case SCTP_NOTIFY_SENDER_DRY:
3812		sctp_notify_sender_dry_event(stcb, so_locked);
3813		break;
3814	case SCTP_NOTIFY_REMOTE_ERROR:
3815		sctp_notify_remote_error(stcb, error, data);
3816		break;
3817	default:
3818		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3819		    __func__, notification, notification);
3820		break;
3821	}			/* end switch */
3822}
3823
3824void
3825sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3826#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3827    SCTP_UNUSED
3828#endif
3829)
3830{
3831	struct sctp_association *asoc;
3832	struct sctp_stream_out *outs;
3833	struct sctp_tmit_chunk *chk, *nchk;
3834	struct sctp_stream_queue_pending *sp, *nsp;
3835	int i;
3836
3837	if (stcb == NULL) {
3838		return;
3839	}
3840	asoc = &stcb->asoc;
3841	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3842		/* already being freed */
3843		return;
3844	}
3845	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3846	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3847	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3848		return;
3849	}
3850	/* now through all the gunk freeing chunks */
3851	if (holds_lock == 0) {
3852		SCTP_TCB_SEND_LOCK(stcb);
3853	}
3854	/* sent queue SHOULD be empty */
3855	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3856		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3857		asoc->sent_queue_cnt--;
3858		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3859			if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
3860				asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
3861#ifdef INVARIANTS
3862			} else {
3863				panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
3864#endif
3865			}
3866		}
3867		if (chk->data != NULL) {
3868			sctp_free_bufspace(stcb, asoc, chk, 1);
3869			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3870			    error, chk, so_locked);
3871			if (chk->data) {
3872				sctp_m_freem(chk->data);
3873				chk->data = NULL;
3874			}
3875		}
3876		sctp_free_a_chunk(stcb, chk, so_locked);
3877		/* sa_ignore FREED_MEMORY */
3878	}
3879	/* pending send queue SHOULD be empty */
3880	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3881		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3882		asoc->send_queue_cnt--;
3883		if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
3884			asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
3885#ifdef INVARIANTS
3886		} else {
3887			panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
3888#endif
3889		}
3890		if (chk->data != NULL) {
3891			sctp_free_bufspace(stcb, asoc, chk, 1);
3892			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3893			    error, chk, so_locked);
3894			if (chk->data) {
3895				sctp_m_freem(chk->data);
3896				chk->data = NULL;
3897			}
3898		}
3899		sctp_free_a_chunk(stcb, chk, so_locked);
3900		/* sa_ignore FREED_MEMORY */
3901	}
3902	for (i = 0; i < asoc->streamoutcnt; i++) {
3903		/* For each stream */
3904		outs = &asoc->strmout[i];
3905		/* clean up any sends there */
3906		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3907			atomic_subtract_int(&asoc->stream_queue_cnt, 1);
3908			TAILQ_REMOVE(&outs->outqueue, sp, next);
3909			stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, holds_lock);
3910			sctp_free_spbufspace(stcb, asoc, sp);
3911			if (sp->data) {
3912				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3913				    error, (void *)sp, so_locked);
3914				if (sp->data) {
3915					sctp_m_freem(sp->data);
3916					sp->data = NULL;
3917					sp->tail_mbuf = NULL;
3918					sp->length = 0;
3919				}
3920			}
3921			if (sp->net) {
3922				sctp_free_remote_addr(sp->net);
3923				sp->net = NULL;
3924			}
3925			/* Free the chunk */
3926			sctp_free_a_strmoq(stcb, sp, so_locked);
3927			/* sa_ignore FREED_MEMORY */
3928		}
3929	}
3930
3931	if (holds_lock == 0) {
3932		SCTP_TCB_SEND_UNLOCK(stcb);
3933	}
3934}
3935
3936void
3937sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3938    struct sctp_abort_chunk *abort, int so_locked
3939#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3940    SCTP_UNUSED
3941#endif
3942)
3943{
3944	if (stcb == NULL) {
3945		return;
3946	}
3947	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3948	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3949	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3950		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3951	}
3952	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3953	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3954	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3955		return;
3956	}
3957	/* Tell them we lost the asoc */
3958	sctp_report_all_outbound(stcb, error, 1, so_locked);
3959	if (from_peer) {
3960		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
3961	} else {
3962		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
3963	}
3964}
3965
3966void
3967sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3968    struct mbuf *m, int iphlen,
3969    struct sockaddr *src, struct sockaddr *dst,
3970    struct sctphdr *sh, struct mbuf *op_err,
3971    uint8_t mflowtype, uint32_t mflowid,
3972    uint32_t vrf_id, uint16_t port)
3973{
3974	uint32_t vtag;
3975#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3976	struct socket *so;
3977#endif
3978
3979	vtag = 0;
3980	if (stcb != NULL) {
3981		vtag = stcb->asoc.peer_vtag;
3982		vrf_id = stcb->asoc.vrf_id;
3983	}
3984	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
3985	    mflowtype, mflowid, inp->fibnum,
3986	    vrf_id, port);
3987	if (stcb != NULL) {
3988		/* We have a TCB to abort, send notification too */
3989		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
3990		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3991		/* Ok, now lets free it */
3992#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3993		so = SCTP_INP_SO(inp);
3994		atomic_add_int(&stcb->asoc.refcnt, 1);
3995		SCTP_TCB_UNLOCK(stcb);
3996		SCTP_SOCKET_LOCK(so, 1);
3997		SCTP_TCB_LOCK(stcb);
3998		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3999#endif
4000		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4001		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4002		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4003			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4004		}
4005		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4006		    SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
4007#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4008		SCTP_SOCKET_UNLOCK(so, 1);
4009#endif
4010	}
4011}
4012#ifdef SCTP_ASOCLOG_OF_TSNS
4013void
4014sctp_print_out_track_log(struct sctp_tcb *stcb)
4015{
4016#ifdef NOSIY_PRINTS
4017	int i;
4018
4019	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
4020	SCTP_PRINTF("IN bound TSN log-aaa\n");
4021	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
4022		SCTP_PRINTF("None rcvd\n");
4023		goto none_in;
4024	}
4025	if (stcb->asoc.tsn_in_wrapped) {
4026		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
4027			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4028			    stcb->asoc.in_tsnlog[i].tsn,
4029			    stcb->asoc.in_tsnlog[i].strm,
4030			    stcb->asoc.in_tsnlog[i].seq,
4031			    stcb->asoc.in_tsnlog[i].flgs,
4032			    stcb->asoc.in_tsnlog[i].sz);
4033		}
4034	}
4035	if (stcb->asoc.tsn_in_at) {
4036		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
4037			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4038			    stcb->asoc.in_tsnlog[i].tsn,
4039			    stcb->asoc.in_tsnlog[i].strm,
4040			    stcb->asoc.in_tsnlog[i].seq,
4041			    stcb->asoc.in_tsnlog[i].flgs,
4042			    stcb->asoc.in_tsnlog[i].sz);
4043		}
4044	}
4045none_in:
4046	SCTP_PRINTF("OUT bound TSN log-aaa\n");
4047	if ((stcb->asoc.tsn_out_at == 0) &&
4048	    (stcb->asoc.tsn_out_wrapped == 0)) {
4049		SCTP_PRINTF("None sent\n");
4050	}
4051	if (stcb->asoc.tsn_out_wrapped) {
4052		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
4053			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4054			    stcb->asoc.out_tsnlog[i].tsn,
4055			    stcb->asoc.out_tsnlog[i].strm,
4056			    stcb->asoc.out_tsnlog[i].seq,
4057			    stcb->asoc.out_tsnlog[i].flgs,
4058			    stcb->asoc.out_tsnlog[i].sz);
4059		}
4060	}
4061	if (stcb->asoc.tsn_out_at) {
4062		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
4063			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4064			    stcb->asoc.out_tsnlog[i].tsn,
4065			    stcb->asoc.out_tsnlog[i].strm,
4066			    stcb->asoc.out_tsnlog[i].seq,
4067			    stcb->asoc.out_tsnlog[i].flgs,
4068			    stcb->asoc.out_tsnlog[i].sz);
4069		}
4070	}
4071#endif
4072}
4073#endif
4074
4075void
4076sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4077    struct mbuf *op_err,
4078    int so_locked
4079#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4080    SCTP_UNUSED
4081#endif
4082)
4083{
4084#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4085	struct socket *so;
4086#endif
4087
4088#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4089	so = SCTP_INP_SO(inp);
4090#endif
4091	if (stcb == NULL) {
4092		/* Got to have a TCB */
4093		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4094			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4095				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4096				    SCTP_CALLED_DIRECTLY_NOCMPSET);
4097			}
4098		}
4099		return;
4100	} else {
4101		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
4102	}
4103	/* notify the peer */
4104	sctp_send_abort_tcb(stcb, op_err, so_locked);
4105	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4106	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4107	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4108		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4109	}
4110	/* notify the ulp */
4111	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4112		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4113	}
4114	/* now free the asoc */
4115#ifdef SCTP_ASOCLOG_OF_TSNS
4116	sctp_print_out_track_log(stcb);
4117#endif
4118#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4119	if (!so_locked) {
4120		atomic_add_int(&stcb->asoc.refcnt, 1);
4121		SCTP_TCB_UNLOCK(stcb);
4122		SCTP_SOCKET_LOCK(so, 1);
4123		SCTP_TCB_LOCK(stcb);
4124		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4125	}
4126#endif
4127	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4128	    SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4129#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4130	if (!so_locked) {
4131		SCTP_SOCKET_UNLOCK(so, 1);
4132	}
4133#endif
4134}
4135
4136void
4137sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4138    struct sockaddr *src, struct sockaddr *dst,
4139    struct sctphdr *sh, struct sctp_inpcb *inp,
4140    struct mbuf *cause,
4141    uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4142    uint32_t vrf_id, uint16_t port)
4143{
4144	struct sctp_chunkhdr *ch, chunk_buf;
4145	unsigned int chk_length;
4146	int contains_init_chunk;
4147
4148	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4149	/* Generate a TO address for future reference */
4150	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4151		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4152			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4153			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4154		}
4155	}
4156	contains_init_chunk = 0;
4157	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4158	    sizeof(*ch), (uint8_t *)&chunk_buf);
4159	while (ch != NULL) {
4160		chk_length = ntohs(ch->chunk_length);
4161		if (chk_length < sizeof(*ch)) {
4162			/* break to abort land */
4163			break;
4164		}
4165		switch (ch->chunk_type) {
4166		case SCTP_INIT:
4167			contains_init_chunk = 1;
4168			break;
4169		case SCTP_PACKET_DROPPED:
4170			/* we don't respond to pkt-dropped */
4171			return;
4172		case SCTP_ABORT_ASSOCIATION:
4173			/* we don't respond with an ABORT to an ABORT */
4174			return;
4175		case SCTP_SHUTDOWN_COMPLETE:
4176			/*
4177			 * we ignore it since we are not waiting for it and
4178			 * peer is gone
4179			 */
4180			return;
4181		case SCTP_SHUTDOWN_ACK:
4182			sctp_send_shutdown_complete2(src, dst, sh,
4183			    mflowtype, mflowid, fibnum,
4184			    vrf_id, port);
4185			return;
4186		default:
4187			break;
4188		}
4189		offset += SCTP_SIZE32(chk_length);
4190		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4191		    sizeof(*ch), (uint8_t *)&chunk_buf);
4192	}
4193	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4194	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4195	    (contains_init_chunk == 0))) {
4196		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4197		    mflowtype, mflowid, fibnum,
4198		    vrf_id, port);
4199	}
4200}
4201
4202/*
4203 * check the inbound datagram to make sure there is not an abort inside it,
4204 * if there is return 1, else return 0.
4205 */
4206int
4207sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtagfill)
4208{
4209	struct sctp_chunkhdr *ch;
4210	struct sctp_init_chunk *init_chk, chunk_buf;
4211	int offset;
4212	unsigned int chk_length;
4213
4214	offset = iphlen + sizeof(struct sctphdr);
4215	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4216	    (uint8_t *)&chunk_buf);
4217	while (ch != NULL) {
4218		chk_length = ntohs(ch->chunk_length);
4219		if (chk_length < sizeof(*ch)) {
4220			/* packet is probably corrupt */
4221			break;
4222		}
4223		/* we seem to be ok, is it an abort? */
4224		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4225			/* yep, tell them */
4226			return (1);
4227		}
4228		if (ch->chunk_type == SCTP_INITIATION) {
4229			/* need to update the Vtag */
4230			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4231			    offset, sizeof(*init_chk), (uint8_t *)&chunk_buf);
4232			if (init_chk != NULL) {
4233				*vtagfill = ntohl(init_chk->init.initiate_tag);
4234			}
4235		}
4236		/* Nope, move to the next chunk */
4237		offset += SCTP_SIZE32(chk_length);
4238		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4239		    sizeof(*ch), (uint8_t *)&chunk_buf);
4240	}
4241	return (0);
4242}
4243
4244/*
4245 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4246 * set (i.e. it's 0) so, create this function to compare link local scopes
4247 */
4248#ifdef INET6
4249uint32_t
4250sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4251{
4252	struct sockaddr_in6 a, b;
4253
4254	/* save copies */
4255	a = *addr1;
4256	b = *addr2;
4257
4258	if (a.sin6_scope_id == 0)
4259		if (sa6_recoverscope(&a)) {
4260			/* can't get scope, so can't match */
4261			return (0);
4262		}
4263	if (b.sin6_scope_id == 0)
4264		if (sa6_recoverscope(&b)) {
4265			/* can't get scope, so can't match */
4266			return (0);
4267		}
4268	if (a.sin6_scope_id != b.sin6_scope_id)
4269		return (0);
4270
4271	return (1);
4272}
4273
4274/*
4275 * returns a sockaddr_in6 with embedded scope recovered and removed
4276 */
4277struct sockaddr_in6 *
4278sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4279{
4280	/* check and strip embedded scope junk */
4281	if (addr->sin6_family == AF_INET6) {
4282		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4283			if (addr->sin6_scope_id == 0) {
4284				*store = *addr;
4285				if (!sa6_recoverscope(store)) {
4286					/* use the recovered scope */
4287					addr = store;
4288				}
4289			} else {
4290				/* else, return the original "to" addr */
4291				in6_clearscope(&addr->sin6_addr);
4292			}
4293		}
4294	}
4295	return (addr);
4296}
4297#endif
4298
4299/*
4300 * are the two addresses the same?  currently a "scopeless" check returns: 1
4301 * if same, 0 if not
4302 */
4303int
4304sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4305{
4306
4307	/* must be valid */
4308	if (sa1 == NULL || sa2 == NULL)
4309		return (0);
4310
4311	/* must be the same family */
4312	if (sa1->sa_family != sa2->sa_family)
4313		return (0);
4314
4315	switch (sa1->sa_family) {
4316#ifdef INET6
4317	case AF_INET6:
4318		{
4319			/* IPv6 addresses */
4320			struct sockaddr_in6 *sin6_1, *sin6_2;
4321
4322			sin6_1 = (struct sockaddr_in6 *)sa1;
4323			sin6_2 = (struct sockaddr_in6 *)sa2;
4324			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4325			    sin6_2));
4326		}
4327#endif
4328#ifdef INET
4329	case AF_INET:
4330		{
4331			/* IPv4 addresses */
4332			struct sockaddr_in *sin_1, *sin_2;
4333
4334			sin_1 = (struct sockaddr_in *)sa1;
4335			sin_2 = (struct sockaddr_in *)sa2;
4336			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4337		}
4338#endif
4339	default:
4340		/* we don't do these... */
4341		return (0);
4342	}
4343}
4344
4345void
4346sctp_print_address(struct sockaddr *sa)
4347{
4348#ifdef INET6
4349	char ip6buf[INET6_ADDRSTRLEN];
4350#endif
4351
4352	switch (sa->sa_family) {
4353#ifdef INET6
4354	case AF_INET6:
4355		{
4356			struct sockaddr_in6 *sin6;
4357
4358			sin6 = (struct sockaddr_in6 *)sa;
4359			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4360			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4361			    ntohs(sin6->sin6_port),
4362			    sin6->sin6_scope_id);
4363			break;
4364		}
4365#endif
4366#ifdef INET
4367	case AF_INET:
4368		{
4369			struct sockaddr_in *sin;
4370			unsigned char *p;
4371
4372			sin = (struct sockaddr_in *)sa;
4373			p = (unsigned char *)&sin->sin_addr;
4374			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4375			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4376			break;
4377		}
4378#endif
4379	default:
4380		SCTP_PRINTF("?\n");
4381		break;
4382	}
4383}
4384
4385void
4386sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4387    struct sctp_inpcb *new_inp,
4388    struct sctp_tcb *stcb,
4389    int waitflags)
4390{
4391	/*
4392	 * go through our old INP and pull off any control structures that
4393	 * belong to stcb and move then to the new inp.
4394	 */
4395	struct socket *old_so, *new_so;
4396	struct sctp_queued_to_read *control, *nctl;
4397	struct sctp_readhead tmp_queue;
4398	struct mbuf *m;
4399	int error = 0;
4400
4401	old_so = old_inp->sctp_socket;
4402	new_so = new_inp->sctp_socket;
4403	TAILQ_INIT(&tmp_queue);
4404	error = sblock(&old_so->so_rcv, waitflags);
4405	if (error) {
4406		/*
4407		 * Gak, can't get sblock, we have a problem. data will be
4408		 * left stranded.. and we don't dare look at it since the
4409		 * other thread may be reading something. Oh well, its a
4410		 * screwed up app that does a peeloff OR a accept while
4411		 * reading from the main socket... actually its only the
4412		 * peeloff() case, since I think read will fail on a
4413		 * listening socket..
4414		 */
4415		return;
4416	}
4417	/* lock the socket buffers */
4418	SCTP_INP_READ_LOCK(old_inp);
4419	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4420		/* Pull off all for out target stcb */
4421		if (control->stcb == stcb) {
4422			/* remove it we want it */
4423			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4424			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4425			m = control->data;
4426			while (m) {
4427				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4428					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4429				}
4430				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4431				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4432					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4433				}
4434				m = SCTP_BUF_NEXT(m);
4435			}
4436		}
4437	}
4438	SCTP_INP_READ_UNLOCK(old_inp);
4439	/* Remove the sb-lock on the old socket */
4440
4441	sbunlock(&old_so->so_rcv);
4442	/* Now we move them over to the new socket buffer */
4443	SCTP_INP_READ_LOCK(new_inp);
4444	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4445		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4446		m = control->data;
4447		while (m) {
4448			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4449				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4450			}
4451			sctp_sballoc(stcb, &new_so->so_rcv, m);
4452			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4453				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4454			}
4455			m = SCTP_BUF_NEXT(m);
4456		}
4457	}
4458	SCTP_INP_READ_UNLOCK(new_inp);
4459}
4460
4461void
4462sctp_wakeup_the_read_socket(struct sctp_inpcb *inp,
4463    struct sctp_tcb *stcb,
4464    int so_locked
4465#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4466    SCTP_UNUSED
4467#endif
4468)
4469{
4470	if ((inp != NULL) && (inp->sctp_socket != NULL)) {
4471		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4472			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4473		} else {
4474#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4475			struct socket *so;
4476
4477			so = SCTP_INP_SO(inp);
4478			if (!so_locked) {
4479				if (stcb) {
4480					atomic_add_int(&stcb->asoc.refcnt, 1);
4481					SCTP_TCB_UNLOCK(stcb);
4482				}
4483				SCTP_SOCKET_LOCK(so, 1);
4484				if (stcb) {
4485					SCTP_TCB_LOCK(stcb);
4486					atomic_subtract_int(&stcb->asoc.refcnt, 1);
4487				}
4488				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4489					SCTP_SOCKET_UNLOCK(so, 1);
4490					return;
4491				}
4492			}
4493#endif
4494			sctp_sorwakeup(inp, inp->sctp_socket);
4495#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4496			if (!so_locked) {
4497				SCTP_SOCKET_UNLOCK(so, 1);
4498			}
4499#endif
4500		}
4501	}
4502}
4503
4504void
4505sctp_add_to_readq(struct sctp_inpcb *inp,
4506    struct sctp_tcb *stcb,
4507    struct sctp_queued_to_read *control,
4508    struct sockbuf *sb,
4509    int end,
4510    int inp_read_lock_held,
4511    int so_locked
4512#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4513    SCTP_UNUSED
4514#endif
4515)
4516{
4517	/*
4518	 * Here we must place the control on the end of the socket read
4519	 * queue AND increment sb_cc so that select will work properly on
4520	 * read.
4521	 */
4522	struct mbuf *m, *prev = NULL;
4523
4524	if (inp == NULL) {
4525		/* Gak, TSNH!! */
4526#ifdef INVARIANTS
4527		panic("Gak, inp NULL on add_to_readq");
4528#endif
4529		return;
4530	}
4531	if (inp_read_lock_held == 0)
4532		SCTP_INP_READ_LOCK(inp);
4533	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4534		sctp_free_remote_addr(control->whoFrom);
4535		if (control->data) {
4536			sctp_m_freem(control->data);
4537			control->data = NULL;
4538		}
4539		sctp_free_a_readq(stcb, control);
4540		if (inp_read_lock_held == 0)
4541			SCTP_INP_READ_UNLOCK(inp);
4542		return;
4543	}
4544	if (!(control->spec_flags & M_NOTIFICATION)) {
4545		atomic_add_int(&inp->total_recvs, 1);
4546		if (!control->do_not_ref_stcb) {
4547			atomic_add_int(&stcb->total_recvs, 1);
4548		}
4549	}
4550	m = control->data;
4551	control->held_length = 0;
4552	control->length = 0;
4553	while (m) {
4554		if (SCTP_BUF_LEN(m) == 0) {
4555			/* Skip mbufs with NO length */
4556			if (prev == NULL) {
4557				/* First one */
4558				control->data = sctp_m_free(m);
4559				m = control->data;
4560			} else {
4561				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4562				m = SCTP_BUF_NEXT(prev);
4563			}
4564			if (m == NULL) {
4565				control->tail_mbuf = prev;
4566			}
4567			continue;
4568		}
4569		prev = m;
4570		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4571			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4572		}
4573		sctp_sballoc(stcb, sb, m);
4574		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4575			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4576		}
4577		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4578		m = SCTP_BUF_NEXT(m);
4579	}
4580	if (prev != NULL) {
4581		control->tail_mbuf = prev;
4582	} else {
4583		/* Everything got collapsed out?? */
4584		sctp_free_remote_addr(control->whoFrom);
4585		sctp_free_a_readq(stcb, control);
4586		if (inp_read_lock_held == 0)
4587			SCTP_INP_READ_UNLOCK(inp);
4588		return;
4589	}
4590	if (end) {
4591		control->end_added = 1;
4592	}
4593	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4594	control->on_read_q = 1;
4595	if (inp_read_lock_held == 0)
4596		SCTP_INP_READ_UNLOCK(inp);
4597	if (inp && inp->sctp_socket) {
4598		sctp_wakeup_the_read_socket(inp, stcb, so_locked);
4599	}
4600}
4601
4602/*************HOLD THIS COMMENT FOR PATCH FILE OF
4603 *************ALTERNATE ROUTING CODE
4604 */
4605
4606/*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4607 *************ALTERNATE ROUTING CODE
4608 */
4609
4610struct mbuf *
4611sctp_generate_cause(uint16_t code, char *info)
4612{
4613	struct mbuf *m;
4614	struct sctp_gen_error_cause *cause;
4615	size_t info_len;
4616	uint16_t len;
4617
4618	if ((code == 0) || (info == NULL)) {
4619		return (NULL);
4620	}
4621	info_len = strlen(info);
4622	if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) {
4623		return (NULL);
4624	}
4625	len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len);
4626	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4627	if (m != NULL) {
4628		SCTP_BUF_LEN(m) = len;
4629		cause = mtod(m, struct sctp_gen_error_cause *);
4630		cause->code = htons(code);
4631		cause->length = htons(len);
4632		memcpy(cause->info, info, info_len);
4633	}
4634	return (m);
4635}
4636
4637struct mbuf *
4638sctp_generate_no_user_data_cause(uint32_t tsn)
4639{
4640	struct mbuf *m;
4641	struct sctp_error_no_user_data *no_user_data_cause;
4642	uint16_t len;
4643
4644	len = (uint16_t)sizeof(struct sctp_error_no_user_data);
4645	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4646	if (m != NULL) {
4647		SCTP_BUF_LEN(m) = len;
4648		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
4649		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
4650		no_user_data_cause->cause.length = htons(len);
4651		no_user_data_cause->tsn = htonl(tsn);
4652	}
4653	return (m);
4654}
4655
4656#ifdef SCTP_MBCNT_LOGGING
4657void
4658sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4659    struct sctp_tmit_chunk *tp1, int chk_cnt)
4660{
4661	if (tp1->data == NULL) {
4662		return;
4663	}
4664	asoc->chunks_on_out_queue -= chk_cnt;
4665	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4666		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4667		    asoc->total_output_queue_size,
4668		    tp1->book_size,
4669		    0,
4670		    tp1->mbcnt);
4671	}
4672	if (asoc->total_output_queue_size >= tp1->book_size) {
4673		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4674	} else {
4675		asoc->total_output_queue_size = 0;
4676	}
4677
4678	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4679	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4680		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4681			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4682		} else {
4683			stcb->sctp_socket->so_snd.sb_cc = 0;
4684
4685		}
4686	}
4687}
4688
4689#endif
4690
4691int
4692sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4693    uint8_t sent, int so_locked
4694#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4695    SCTP_UNUSED
4696#endif
4697)
4698{
4699	struct sctp_stream_out *strq;
4700	struct sctp_tmit_chunk *chk = NULL, *tp2;
4701	struct sctp_stream_queue_pending *sp;
4702	uint32_t mid;
4703	uint16_t sid;
4704	uint8_t foundeom = 0;
4705	int ret_sz = 0;
4706	int notdone;
4707	int do_wakeup_routine = 0;
4708
4709	sid = tp1->rec.data.sid;
4710	mid = tp1->rec.data.mid;
4711	if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4712		stcb->asoc.abandoned_sent[0]++;
4713		stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4714		stcb->asoc.strmout[sid].abandoned_sent[0]++;
4715#if defined(SCTP_DETAILED_STR_STATS)
4716		stcb->asoc.strmout[stream].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4717#endif
4718	} else {
4719		stcb->asoc.abandoned_unsent[0]++;
4720		stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4721		stcb->asoc.strmout[sid].abandoned_unsent[0]++;
4722#if defined(SCTP_DETAILED_STR_STATS)
4723		stcb->asoc.strmout[stream].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4724#endif
4725	}
4726	do {
4727		ret_sz += tp1->book_size;
4728		if (tp1->data != NULL) {
4729			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4730				sctp_flight_size_decrease(tp1);
4731				sctp_total_flight_decrease(stcb, tp1);
4732			}
4733			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4734			stcb->asoc.peers_rwnd += tp1->send_size;
4735			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4736			if (sent) {
4737				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4738			} else {
4739				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4740			}
4741			if (tp1->data) {
4742				sctp_m_freem(tp1->data);
4743				tp1->data = NULL;
4744			}
4745			do_wakeup_routine = 1;
4746			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4747				stcb->asoc.sent_queue_cnt_removeable--;
4748			}
4749		}
4750		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4751		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4752		    SCTP_DATA_NOT_FRAG) {
4753			/* not frag'ed we ae done   */
4754			notdone = 0;
4755			foundeom = 1;
4756		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4757			/* end of frag, we are done */
4758			notdone = 0;
4759			foundeom = 1;
4760		} else {
4761			/*
4762			 * Its a begin or middle piece, we must mark all of
4763			 * it
4764			 */
4765			notdone = 1;
4766			tp1 = TAILQ_NEXT(tp1, sctp_next);
4767		}
4768	} while (tp1 && notdone);
4769	if (foundeom == 0) {
4770		/*
4771		 * The multi-part message was scattered across the send and
4772		 * sent queue.
4773		 */
4774		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4775			if ((tp1->rec.data.sid != sid) ||
4776			    (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) {
4777				break;
4778			}
4779			/*
4780			 * save to chk in case we have some on stream out
4781			 * queue. If so and we have an un-transmitted one we
4782			 * don't have to fudge the TSN.
4783			 */
4784			chk = tp1;
4785			ret_sz += tp1->book_size;
4786			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4787			if (sent) {
4788				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4789			} else {
4790				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4791			}
4792			if (tp1->data) {
4793				sctp_m_freem(tp1->data);
4794				tp1->data = NULL;
4795			}
4796			/* No flight involved here book the size to 0 */
4797			tp1->book_size = 0;
4798			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4799				foundeom = 1;
4800			}
4801			do_wakeup_routine = 1;
4802			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4803			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4804			/*
4805			 * on to the sent queue so we can wait for it to be
4806			 * passed by.
4807			 */
4808			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4809			    sctp_next);
4810			stcb->asoc.send_queue_cnt--;
4811			stcb->asoc.sent_queue_cnt++;
4812		}
4813	}
4814	if (foundeom == 0) {
4815		/*
4816		 * Still no eom found. That means there is stuff left on the
4817		 * stream out queue.. yuck.
4818		 */
4819		SCTP_TCB_SEND_LOCK(stcb);
4820		strq = &stcb->asoc.strmout[sid];
4821		sp = TAILQ_FIRST(&strq->outqueue);
4822		if (sp != NULL) {
4823			sp->discard_rest = 1;
4824			/*
4825			 * We may need to put a chunk on the queue that
4826			 * holds the TSN that would have been sent with the
4827			 * LAST bit.
4828			 */
4829			if (chk == NULL) {
4830				/* Yep, we have to */
4831				sctp_alloc_a_chunk(stcb, chk);
4832				if (chk == NULL) {
4833					/*
4834					 * we are hosed. All we can do is
4835					 * nothing.. which will cause an
4836					 * abort if the peer is paying
4837					 * attention.
4838					 */
4839					goto oh_well;
4840				}
4841				memset(chk, 0, sizeof(*chk));
4842				chk->rec.data.rcv_flags = 0;
4843				chk->sent = SCTP_FORWARD_TSN_SKIP;
4844				chk->asoc = &stcb->asoc;
4845				if (stcb->asoc.idata_supported == 0) {
4846					if (sp->sinfo_flags & SCTP_UNORDERED) {
4847						chk->rec.data.mid = 0;
4848					} else {
4849						chk->rec.data.mid = strq->next_mid_ordered;
4850					}
4851				} else {
4852					if (sp->sinfo_flags & SCTP_UNORDERED) {
4853						chk->rec.data.mid = strq->next_mid_unordered;
4854					} else {
4855						chk->rec.data.mid = strq->next_mid_ordered;
4856					}
4857				}
4858				chk->rec.data.sid = sp->sid;
4859				chk->rec.data.ppid = sp->ppid;
4860				chk->rec.data.context = sp->context;
4861				chk->flags = sp->act_flags;
4862				chk->whoTo = NULL;
4863				chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4864				strq->chunks_on_queues++;
4865				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4866				stcb->asoc.sent_queue_cnt++;
4867				stcb->asoc.pr_sctp_cnt++;
4868			}
4869			chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4870			if (sp->sinfo_flags & SCTP_UNORDERED) {
4871				chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED;
4872			}
4873			if (stcb->asoc.idata_supported == 0) {
4874				if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) {
4875					strq->next_mid_ordered++;
4876				}
4877			} else {
4878				if (sp->sinfo_flags & SCTP_UNORDERED) {
4879					strq->next_mid_unordered++;
4880				} else {
4881					strq->next_mid_ordered++;
4882				}
4883			}
4884	oh_well:
4885			if (sp->data) {
4886				/*
4887				 * Pull any data to free up the SB and allow
4888				 * sender to "add more" while we will throw
4889				 * away :-)
4890				 */
4891				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
4892				ret_sz += sp->length;
4893				do_wakeup_routine = 1;
4894				sp->some_taken = 1;
4895				sctp_m_freem(sp->data);
4896				sp->data = NULL;
4897				sp->tail_mbuf = NULL;
4898				sp->length = 0;
4899			}
4900		}
4901		SCTP_TCB_SEND_UNLOCK(stcb);
4902	}
4903	if (do_wakeup_routine) {
4904#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4905		struct socket *so;
4906
4907		so = SCTP_INP_SO(stcb->sctp_ep);
4908		if (!so_locked) {
4909			atomic_add_int(&stcb->asoc.refcnt, 1);
4910			SCTP_TCB_UNLOCK(stcb);
4911			SCTP_SOCKET_LOCK(so, 1);
4912			SCTP_TCB_LOCK(stcb);
4913			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4914			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4915				/* assoc was freed while we were unlocked */
4916				SCTP_SOCKET_UNLOCK(so, 1);
4917				return (ret_sz);
4918			}
4919		}
4920#endif
4921		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4922#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4923		if (!so_locked) {
4924			SCTP_SOCKET_UNLOCK(so, 1);
4925		}
4926#endif
4927	}
4928	return (ret_sz);
4929}
4930
4931/*
4932 * checks to see if the given address, sa, is one that is currently known by
4933 * the kernel note: can't distinguish the same address on multiple interfaces
4934 * and doesn't handle multiple addresses with different zone/scope id's note:
4935 * ifa_ifwithaddr() compares the entire sockaddr struct
4936 */
4937struct sctp_ifa *
4938sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4939    int holds_lock)
4940{
4941	struct sctp_laddr *laddr;
4942
4943	if (holds_lock == 0) {
4944		SCTP_INP_RLOCK(inp);
4945	}
4946	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4947		if (laddr->ifa == NULL)
4948			continue;
4949		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4950			continue;
4951#ifdef INET
4952		if (addr->sa_family == AF_INET) {
4953			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4954			    laddr->ifa->address.sin.sin_addr.s_addr) {
4955				/* found him. */
4956				if (holds_lock == 0) {
4957					SCTP_INP_RUNLOCK(inp);
4958				}
4959				return (laddr->ifa);
4960				break;
4961			}
4962		}
4963#endif
4964#ifdef INET6
4965		if (addr->sa_family == AF_INET6) {
4966			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4967			    &laddr->ifa->address.sin6)) {
4968				/* found him. */
4969				if (holds_lock == 0) {
4970					SCTP_INP_RUNLOCK(inp);
4971				}
4972				return (laddr->ifa);
4973				break;
4974			}
4975		}
4976#endif
4977	}
4978	if (holds_lock == 0) {
4979		SCTP_INP_RUNLOCK(inp);
4980	}
4981	return (NULL);
4982}
4983
4984uint32_t
4985sctp_get_ifa_hash_val(struct sockaddr *addr)
4986{
4987	switch (addr->sa_family) {
4988#ifdef INET
4989	case AF_INET:
4990		{
4991			struct sockaddr_in *sin;
4992
4993			sin = (struct sockaddr_in *)addr;
4994			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4995		}
4996#endif
4997#ifdef INET6
4998	case AF_INET6:
4999		{
5000			struct sockaddr_in6 *sin6;
5001			uint32_t hash_of_addr;
5002
5003			sin6 = (struct sockaddr_in6 *)addr;
5004			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
5005			    sin6->sin6_addr.s6_addr32[1] +
5006			    sin6->sin6_addr.s6_addr32[2] +
5007			    sin6->sin6_addr.s6_addr32[3]);
5008			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
5009			return (hash_of_addr);
5010		}
5011#endif
5012	default:
5013		break;
5014	}
5015	return (0);
5016}
5017
5018struct sctp_ifa *
5019sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5020{
5021	struct sctp_ifa *sctp_ifap;
5022	struct sctp_vrf *vrf;
5023	struct sctp_ifalist *hash_head;
5024	uint32_t hash_of_addr;
5025
5026	if (holds_lock == 0)
5027		SCTP_IPI_ADDR_RLOCK();
5028
5029	vrf = sctp_find_vrf(vrf_id);
5030	if (vrf == NULL) {
5031		if (holds_lock == 0)
5032			SCTP_IPI_ADDR_RUNLOCK();
5033		return (NULL);
5034	}
5035	hash_of_addr = sctp_get_ifa_hash_val(addr);
5036
5037	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5038	if (hash_head == NULL) {
5039		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5040		    hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark,
5041		    (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark));
5042		sctp_print_address(addr);
5043		SCTP_PRINTF("No such bucket for address\n");
5044		if (holds_lock == 0)
5045			SCTP_IPI_ADDR_RUNLOCK();
5046
5047		return (NULL);
5048	}
5049	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5050		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5051			continue;
5052#ifdef INET
5053		if (addr->sa_family == AF_INET) {
5054			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5055			    sctp_ifap->address.sin.sin_addr.s_addr) {
5056				/* found him. */
5057				if (holds_lock == 0)
5058					SCTP_IPI_ADDR_RUNLOCK();
5059				return (sctp_ifap);
5060				break;
5061			}
5062		}
5063#endif
5064#ifdef INET6
5065		if (addr->sa_family == AF_INET6) {
5066			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5067			    &sctp_ifap->address.sin6)) {
5068				/* found him. */
5069				if (holds_lock == 0)
5070					SCTP_IPI_ADDR_RUNLOCK();
5071				return (sctp_ifap);
5072				break;
5073			}
5074		}
5075#endif
5076	}
5077	if (holds_lock == 0)
5078		SCTP_IPI_ADDR_RUNLOCK();
5079	return (NULL);
5080}
5081
5082static void
5083sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock,
5084    uint32_t rwnd_req)
5085{
5086	/* User pulled some data, do we need a rwnd update? */
5087	int r_unlocked = 0;
5088	uint32_t dif, rwnd;
5089	struct socket *so = NULL;
5090
5091	if (stcb == NULL)
5092		return;
5093
5094	atomic_add_int(&stcb->asoc.refcnt, 1);
5095
5096	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5097	    SCTP_STATE_SHUTDOWN_RECEIVED |
5098	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5099		/* Pre-check If we are freeing no update */
5100		goto no_lock;
5101	}
5102	SCTP_INP_INCR_REF(stcb->sctp_ep);
5103	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5104	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5105		goto out;
5106	}
5107	so = stcb->sctp_socket;
5108	if (so == NULL) {
5109		goto out;
5110	}
5111	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5112	/* Have you have freed enough to look */
5113	*freed_so_far = 0;
5114	/* Yep, its worth a look and the lock overhead */
5115
5116	/* Figure out what the rwnd would be */
5117	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5118	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5119		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5120	} else {
5121		dif = 0;
5122	}
5123	if (dif >= rwnd_req) {
5124		if (hold_rlock) {
5125			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5126			r_unlocked = 1;
5127		}
5128		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5129			/*
5130			 * One last check before we allow the guy possibly
5131			 * to get in. There is a race, where the guy has not
5132			 * reached the gate. In that case
5133			 */
5134			goto out;
5135		}
5136		SCTP_TCB_LOCK(stcb);
5137		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5138			/* No reports here */
5139			SCTP_TCB_UNLOCK(stcb);
5140			goto out;
5141		}
5142		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5143		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5144
5145		sctp_chunk_output(stcb->sctp_ep, stcb,
5146		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5147		/* make sure no timer is running */
5148		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
5149		    SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5150		SCTP_TCB_UNLOCK(stcb);
5151	} else {
5152		/* Update how much we have pending */
5153		stcb->freed_by_sorcv_sincelast = dif;
5154	}
5155out:
5156	if (so && r_unlocked && hold_rlock) {
5157		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5158	}
5159	SCTP_INP_DECR_REF(stcb->sctp_ep);
5160no_lock:
5161	atomic_add_int(&stcb->asoc.refcnt, -1);
5162	return;
5163}
5164
5165int
5166sctp_sorecvmsg(struct socket *so,
5167    struct uio *uio,
5168    struct mbuf **mp,
5169    struct sockaddr *from,
5170    int fromlen,
5171    int *msg_flags,
5172    struct sctp_sndrcvinfo *sinfo,
5173    int filling_sinfo)
5174{
5175	/*
5176	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5177	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5178	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5179	 * On the way out we may send out any combination of:
5180	 * MSG_NOTIFICATION MSG_EOR
5181	 *
5182	 */
5183	struct sctp_inpcb *inp = NULL;
5184	int my_len = 0;
5185	int cp_len = 0, error = 0;
5186	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5187	struct mbuf *m = NULL;
5188	struct sctp_tcb *stcb = NULL;
5189	int wakeup_read_socket = 0;
5190	int freecnt_applied = 0;
5191	int out_flags = 0, in_flags = 0;
5192	int block_allowed = 1;
5193	uint32_t freed_so_far = 0;
5194	uint32_t copied_so_far = 0;
5195	int in_eeor_mode = 0;
5196	int no_rcv_needed = 0;
5197	uint32_t rwnd_req = 0;
5198	int hold_sblock = 0;
5199	int hold_rlock = 0;
5200	ssize_t slen = 0;
5201	uint32_t held_length = 0;
5202	int sockbuf_lock = 0;
5203
5204	if (uio == NULL) {
5205		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5206		return (EINVAL);
5207	}
5208	if (msg_flags) {
5209		in_flags = *msg_flags;
5210		if (in_flags & MSG_PEEK)
5211			SCTP_STAT_INCR(sctps_read_peeks);
5212	} else {
5213		in_flags = 0;
5214	}
5215	slen = uio->uio_resid;
5216
5217	/* Pull in and set up our int flags */
5218	if (in_flags & MSG_OOB) {
5219		/* Out of band's NOT supported */
5220		return (EOPNOTSUPP);
5221	}
5222	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5223		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5224		return (EINVAL);
5225	}
5226	if ((in_flags & (MSG_DONTWAIT
5227	    | MSG_NBIO
5228	    )) ||
5229	    SCTP_SO_IS_NBIO(so)) {
5230		block_allowed = 0;
5231	}
5232	/* setup the endpoint */
5233	inp = (struct sctp_inpcb *)so->so_pcb;
5234	if (inp == NULL) {
5235		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5236		return (EFAULT);
5237	}
5238	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5239	/* Must be at least a MTU's worth */
5240	if (rwnd_req < SCTP_MIN_RWND)
5241		rwnd_req = SCTP_MIN_RWND;
5242	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5243	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5244		sctp_misc_ints(SCTP_SORECV_ENTER,
5245		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5246	}
5247	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5248		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5249		    rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5250	}
5251	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5252	if (error) {
5253		goto release_unlocked;
5254	}
5255	sockbuf_lock = 1;
5256restart:
5257
5258
5259restart_nosblocks:
5260	if (hold_sblock == 0) {
5261		SOCKBUF_LOCK(&so->so_rcv);
5262		hold_sblock = 1;
5263	}
5264	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5265	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5266		goto out;
5267	}
5268	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5269		if (so->so_error) {
5270			error = so->so_error;
5271			if ((in_flags & MSG_PEEK) == 0)
5272				so->so_error = 0;
5273			goto out;
5274		} else {
5275			if (so->so_rcv.sb_cc == 0) {
5276				/* indicate EOF */
5277				error = 0;
5278				goto out;
5279			}
5280		}
5281	}
5282	if (so->so_rcv.sb_cc <= held_length) {
5283		if (so->so_error) {
5284			error = so->so_error;
5285			if ((in_flags & MSG_PEEK) == 0) {
5286				so->so_error = 0;
5287			}
5288			goto out;
5289		}
5290		if ((so->so_rcv.sb_cc == 0) &&
5291		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5292		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5293			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5294				/*
5295				 * For active open side clear flags for
5296				 * re-use passive open is blocked by
5297				 * connect.
5298				 */
5299				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5300					/*
5301					 * You were aborted, passive side
5302					 * always hits here
5303					 */
5304					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5305					error = ECONNRESET;
5306				}
5307				so->so_state &= ~(SS_ISCONNECTING |
5308				    SS_ISDISCONNECTING |
5309				    SS_ISCONFIRMING |
5310				    SS_ISCONNECTED);
5311				if (error == 0) {
5312					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5313						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5314						error = ENOTCONN;
5315					}
5316				}
5317				goto out;
5318			}
5319		}
5320		if (block_allowed) {
5321			error = sbwait(&so->so_rcv);
5322			if (error) {
5323				goto out;
5324			}
5325			held_length = 0;
5326			goto restart_nosblocks;
5327		} else {
5328			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5329			error = EWOULDBLOCK;
5330			goto out;
5331		}
5332	}
5333	if (hold_sblock == 1) {
5334		SOCKBUF_UNLOCK(&so->so_rcv);
5335		hold_sblock = 0;
5336	}
5337	/* we possibly have data we can read */
5338	/* sa_ignore FREED_MEMORY */
5339	control = TAILQ_FIRST(&inp->read_queue);
5340	if (control == NULL) {
5341		/*
5342		 * This could be happening since the appender did the
5343		 * increment but as not yet did the tailq insert onto the
5344		 * read_queue
5345		 */
5346		if (hold_rlock == 0) {
5347			SCTP_INP_READ_LOCK(inp);
5348		}
5349		control = TAILQ_FIRST(&inp->read_queue);
5350		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5351#ifdef INVARIANTS
5352			panic("Huh, its non zero and nothing on control?");
5353#endif
5354			so->so_rcv.sb_cc = 0;
5355		}
5356		SCTP_INP_READ_UNLOCK(inp);
5357		hold_rlock = 0;
5358		goto restart;
5359	}
5360	if ((control->length == 0) &&
5361	    (control->do_not_ref_stcb)) {
5362		/*
5363		 * Clean up code for freeing assoc that left behind a
5364		 * pdapi.. maybe a peer in EEOR that just closed after
5365		 * sending and never indicated a EOR.
5366		 */
5367		if (hold_rlock == 0) {
5368			hold_rlock = 1;
5369			SCTP_INP_READ_LOCK(inp);
5370		}
5371		control->held_length = 0;
5372		if (control->data) {
5373			/* Hmm there is data here .. fix */
5374			struct mbuf *m_tmp;
5375			int cnt = 0;
5376
5377			m_tmp = control->data;
5378			while (m_tmp) {
5379				cnt += SCTP_BUF_LEN(m_tmp);
5380				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5381					control->tail_mbuf = m_tmp;
5382					control->end_added = 1;
5383				}
5384				m_tmp = SCTP_BUF_NEXT(m_tmp);
5385			}
5386			control->length = cnt;
5387		} else {
5388			/* remove it */
5389			TAILQ_REMOVE(&inp->read_queue, control, next);
5390			/* Add back any hiddend data */
5391			sctp_free_remote_addr(control->whoFrom);
5392			sctp_free_a_readq(stcb, control);
5393		}
5394		if (hold_rlock) {
5395			hold_rlock = 0;
5396			SCTP_INP_READ_UNLOCK(inp);
5397		}
5398		goto restart;
5399	}
5400	if ((control->length == 0) &&
5401	    (control->end_added == 1)) {
5402		/*
5403		 * Do we also need to check for (control->pdapi_aborted ==
5404		 * 1)?
5405		 */
5406		if (hold_rlock == 0) {
5407			hold_rlock = 1;
5408			SCTP_INP_READ_LOCK(inp);
5409		}
5410		TAILQ_REMOVE(&inp->read_queue, control, next);
5411		if (control->data) {
5412#ifdef INVARIANTS
5413			panic("control->data not null but control->length == 0");
5414#else
5415			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5416			sctp_m_freem(control->data);
5417			control->data = NULL;
5418#endif
5419		}
5420		if (control->aux_data) {
5421			sctp_m_free(control->aux_data);
5422			control->aux_data = NULL;
5423		}
5424#ifdef INVARIANTS
5425		if (control->on_strm_q) {
5426			panic("About to free ctl:%p so:%p and its in %d",
5427			    control, so, control->on_strm_q);
5428		}
5429#endif
5430		sctp_free_remote_addr(control->whoFrom);
5431		sctp_free_a_readq(stcb, control);
5432		if (hold_rlock) {
5433			hold_rlock = 0;
5434			SCTP_INP_READ_UNLOCK(inp);
5435		}
5436		goto restart;
5437	}
5438	if (control->length == 0) {
5439		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5440		    (filling_sinfo)) {
5441			/* find a more suitable one then this */
5442			ctl = TAILQ_NEXT(control, next);
5443			while (ctl) {
5444				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5445				    (ctl->some_taken ||
5446				    (ctl->spec_flags & M_NOTIFICATION) ||
5447				    ((ctl->do_not_ref_stcb == 0) &&
5448				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5449				    ) {
5450					/*-
5451					 * If we have a different TCB next, and there is data
5452					 * present. If we have already taken some (pdapi), OR we can
5453					 * ref the tcb and no delivery as started on this stream, we
5454					 * take it. Note we allow a notification on a different
5455					 * assoc to be delivered..
5456					 */
5457					control = ctl;
5458					goto found_one;
5459				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5460					    (ctl->length) &&
5461					    ((ctl->some_taken) ||
5462					    ((ctl->do_not_ref_stcb == 0) &&
5463					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5464				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5465					/*-
5466					 * If we have the same tcb, and there is data present, and we
5467					 * have the strm interleave feature present. Then if we have
5468					 * taken some (pdapi) or we can refer to tht tcb AND we have
5469					 * not started a delivery for this stream, we can take it.
5470					 * Note we do NOT allow a notificaiton on the same assoc to
5471					 * be delivered.
5472					 */
5473					control = ctl;
5474					goto found_one;
5475				}
5476				ctl = TAILQ_NEXT(ctl, next);
5477			}
5478		}
5479		/*
5480		 * if we reach here, not suitable replacement is available
5481		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5482		 * into the our held count, and its time to sleep again.
5483		 */
5484		held_length = so->so_rcv.sb_cc;
5485		control->held_length = so->so_rcv.sb_cc;
5486		goto restart;
5487	}
5488	/* Clear the held length since there is something to read */
5489	control->held_length = 0;
5490found_one:
5491	/*
5492	 * If we reach here, control has a some data for us to read off.
5493	 * Note that stcb COULD be NULL.
5494	 */
5495	if (hold_rlock == 0) {
5496		hold_rlock = 1;
5497		SCTP_INP_READ_LOCK(inp);
5498	}
5499	control->some_taken++;
5500	stcb = control->stcb;
5501	if (stcb) {
5502		if ((control->do_not_ref_stcb == 0) &&
5503		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5504			if (freecnt_applied == 0)
5505				stcb = NULL;
5506		} else if (control->do_not_ref_stcb == 0) {
5507			/* you can't free it on me please */
5508			/*
5509			 * The lock on the socket buffer protects us so the
5510			 * free code will stop. But since we used the
5511			 * socketbuf lock and the sender uses the tcb_lock
5512			 * to increment, we need to use the atomic add to
5513			 * the refcnt
5514			 */
5515			if (freecnt_applied) {
5516#ifdef INVARIANTS
5517				panic("refcnt already incremented");
5518#else
5519				SCTP_PRINTF("refcnt already incremented?\n");
5520#endif
5521			} else {
5522				atomic_add_int(&stcb->asoc.refcnt, 1);
5523				freecnt_applied = 1;
5524			}
5525			/*
5526			 * Setup to remember how much we have not yet told
5527			 * the peer our rwnd has opened up. Note we grab the
5528			 * value from the tcb from last time. Note too that
5529			 * sack sending clears this when a sack is sent,
5530			 * which is fine. Once we hit the rwnd_req, we then
5531			 * will go to the sctp_user_rcvd() that will not
5532			 * lock until it KNOWs it MUST send a WUP-SACK.
5533			 */
5534			freed_so_far = stcb->freed_by_sorcv_sincelast;
5535			stcb->freed_by_sorcv_sincelast = 0;
5536		}
5537	}
5538	if (stcb &&
5539	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5540	    control->do_not_ref_stcb == 0) {
5541		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5542	}
5543	/* First lets get off the sinfo and sockaddr info */
5544	if ((sinfo != NULL) && (filling_sinfo != 0)) {
5545		sinfo->sinfo_stream = control->sinfo_stream;
5546		sinfo->sinfo_ssn = (uint16_t)control->mid;
5547		sinfo->sinfo_flags = control->sinfo_flags;
5548		sinfo->sinfo_ppid = control->sinfo_ppid;
5549		sinfo->sinfo_context = control->sinfo_context;
5550		sinfo->sinfo_timetolive = control->sinfo_timetolive;
5551		sinfo->sinfo_tsn = control->sinfo_tsn;
5552		sinfo->sinfo_cumtsn = control->sinfo_cumtsn;
5553		sinfo->sinfo_assoc_id = control->sinfo_assoc_id;
5554		nxt = TAILQ_NEXT(control, next);
5555		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5556		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5557			struct sctp_extrcvinfo *s_extra;
5558
5559			s_extra = (struct sctp_extrcvinfo *)sinfo;
5560			if ((nxt) &&
5561			    (nxt->length)) {
5562				s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5563				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5564					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5565				}
5566				if (nxt->spec_flags & M_NOTIFICATION) {
5567					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5568				}
5569				s_extra->serinfo_next_aid = nxt->sinfo_assoc_id;
5570				s_extra->serinfo_next_length = nxt->length;
5571				s_extra->serinfo_next_ppid = nxt->sinfo_ppid;
5572				s_extra->serinfo_next_stream = nxt->sinfo_stream;
5573				if (nxt->tail_mbuf != NULL) {
5574					if (nxt->end_added) {
5575						s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5576					}
5577				}
5578			} else {
5579				/*
5580				 * we explicitly 0 this, since the memcpy
5581				 * got some other things beyond the older
5582				 * sinfo_ that is on the control's structure
5583				 * :-D
5584				 */
5585				nxt = NULL;
5586				s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
5587				s_extra->serinfo_next_aid = 0;
5588				s_extra->serinfo_next_length = 0;
5589				s_extra->serinfo_next_ppid = 0;
5590				s_extra->serinfo_next_stream = 0;
5591			}
5592		}
5593		/*
5594		 * update off the real current cum-ack, if we have an stcb.
5595		 */
5596		if ((control->do_not_ref_stcb == 0) && stcb)
5597			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5598		/*
5599		 * mask off the high bits, we keep the actual chunk bits in
5600		 * there.
5601		 */
5602		sinfo->sinfo_flags &= 0x00ff;
5603		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5604			sinfo->sinfo_flags |= SCTP_UNORDERED;
5605		}
5606	}
5607#ifdef SCTP_ASOCLOG_OF_TSNS
5608	{
5609		int index, newindex;
5610		struct sctp_pcbtsn_rlog *entry;
5611
5612		do {
5613			index = inp->readlog_index;
5614			newindex = index + 1;
5615			if (newindex >= SCTP_READ_LOG_SIZE) {
5616				newindex = 0;
5617			}
5618		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5619		entry = &inp->readlog[index];
5620		entry->vtag = control->sinfo_assoc_id;
5621		entry->strm = control->sinfo_stream;
5622		entry->seq = (uint16_t)control->mid;
5623		entry->sz = control->length;
5624		entry->flgs = control->sinfo_flags;
5625	}
5626#endif
5627	if ((fromlen > 0) && (from != NULL)) {
5628		union sctp_sockstore store;
5629		size_t len;
5630
5631		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5632#ifdef INET6
5633		case AF_INET6:
5634			len = sizeof(struct sockaddr_in6);
5635			store.sin6 = control->whoFrom->ro._l_addr.sin6;
5636			store.sin6.sin6_port = control->port_from;
5637			break;
5638#endif
5639#ifdef INET
5640		case AF_INET:
5641#ifdef INET6
5642			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
5643				len = sizeof(struct sockaddr_in6);
5644				in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
5645				    &store.sin6);
5646				store.sin6.sin6_port = control->port_from;
5647			} else {
5648				len = sizeof(struct sockaddr_in);
5649				store.sin = control->whoFrom->ro._l_addr.sin;
5650				store.sin.sin_port = control->port_from;
5651			}
5652#else
5653			len = sizeof(struct sockaddr_in);
5654			store.sin = control->whoFrom->ro._l_addr.sin;
5655			store.sin.sin_port = control->port_from;
5656#endif
5657			break;
5658#endif
5659		default:
5660			len = 0;
5661			break;
5662		}
5663		memcpy(from, &store, min((size_t)fromlen, len));
5664#ifdef INET6
5665		{
5666			struct sockaddr_in6 lsa6, *from6;
5667
5668			from6 = (struct sockaddr_in6 *)from;
5669			sctp_recover_scope_mac(from6, (&lsa6));
5670		}
5671#endif
5672	}
5673	if (hold_rlock) {
5674		SCTP_INP_READ_UNLOCK(inp);
5675		hold_rlock = 0;
5676	}
5677	if (hold_sblock) {
5678		SOCKBUF_UNLOCK(&so->so_rcv);
5679		hold_sblock = 0;
5680	}
5681	/* now copy out what data we can */
5682	if (mp == NULL) {
5683		/* copy out each mbuf in the chain up to length */
5684get_more_data:
5685		m = control->data;
5686		while (m) {
5687			/* Move out all we can */
5688			cp_len = (int)uio->uio_resid;
5689			my_len = (int)SCTP_BUF_LEN(m);
5690			if (cp_len > my_len) {
5691				/* not enough in this buf */
5692				cp_len = my_len;
5693			}
5694			if (hold_rlock) {
5695				SCTP_INP_READ_UNLOCK(inp);
5696				hold_rlock = 0;
5697			}
5698			if (cp_len > 0)
5699				error = uiomove(mtod(m, char *), cp_len, uio);
5700			/* re-read */
5701			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5702				goto release;
5703			}
5704			if ((control->do_not_ref_stcb == 0) && stcb &&
5705			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5706				no_rcv_needed = 1;
5707			}
5708			if (error) {
5709				/* error we are out of here */
5710				goto release;
5711			}
5712			SCTP_INP_READ_LOCK(inp);
5713			hold_rlock = 1;
5714			if (cp_len == SCTP_BUF_LEN(m)) {
5715				if ((SCTP_BUF_NEXT(m) == NULL) &&
5716				    (control->end_added)) {
5717					out_flags |= MSG_EOR;
5718					if ((control->do_not_ref_stcb == 0) &&
5719					    (control->stcb != NULL) &&
5720					    ((control->spec_flags & M_NOTIFICATION) == 0))
5721						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5722				}
5723				if (control->spec_flags & M_NOTIFICATION) {
5724					out_flags |= MSG_NOTIFICATION;
5725				}
5726				/* we ate up the mbuf */
5727				if (in_flags & MSG_PEEK) {
5728					/* just looking */
5729					m = SCTP_BUF_NEXT(m);
5730					copied_so_far += cp_len;
5731				} else {
5732					/* dispose of the mbuf */
5733					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5734						sctp_sblog(&so->so_rcv,
5735						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5736					}
5737					sctp_sbfree(control, stcb, &so->so_rcv, m);
5738					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5739						sctp_sblog(&so->so_rcv,
5740						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5741					}
5742					copied_so_far += cp_len;
5743					freed_so_far += cp_len;
5744					freed_so_far += MSIZE;
5745					atomic_subtract_int(&control->length, cp_len);
5746					control->data = sctp_m_free(m);
5747					m = control->data;
5748					/*
5749					 * been through it all, must hold sb
5750					 * lock ok to null tail
5751					 */
5752					if (control->data == NULL) {
5753#ifdef INVARIANTS
5754						if ((control->end_added == 0) ||
5755						    (TAILQ_NEXT(control, next) == NULL)) {
5756							/*
5757							 * If the end is not
5758							 * added, OR the
5759							 * next is NOT null
5760							 * we MUST have the
5761							 * lock.
5762							 */
5763							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5764								panic("Hmm we don't own the lock?");
5765							}
5766						}
5767#endif
5768						control->tail_mbuf = NULL;
5769#ifdef INVARIANTS
5770						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5771							panic("end_added, nothing left and no MSG_EOR");
5772						}
5773#endif
5774					}
5775				}
5776			} else {
5777				/* Do we need to trim the mbuf? */
5778				if (control->spec_flags & M_NOTIFICATION) {
5779					out_flags |= MSG_NOTIFICATION;
5780				}
5781				if ((in_flags & MSG_PEEK) == 0) {
5782					SCTP_BUF_RESV_UF(m, cp_len);
5783					SCTP_BUF_LEN(m) -= cp_len;
5784					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5785						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5786					}
5787					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5788					if ((control->do_not_ref_stcb == 0) &&
5789					    stcb) {
5790						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5791					}
5792					copied_so_far += cp_len;
5793					freed_so_far += cp_len;
5794					freed_so_far += MSIZE;
5795					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5796						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5797						    SCTP_LOG_SBRESULT, 0);
5798					}
5799					atomic_subtract_int(&control->length, cp_len);
5800				} else {
5801					copied_so_far += cp_len;
5802				}
5803			}
5804			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5805				break;
5806			}
5807			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5808			    (control->do_not_ref_stcb == 0) &&
5809			    (freed_so_far >= rwnd_req)) {
5810				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5811			}
5812		}		/* end while(m) */
5813		/*
5814		 * At this point we have looked at it all and we either have
5815		 * a MSG_EOR/or read all the user wants... <OR>
5816		 * control->length == 0.
5817		 */
5818		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5819			/* we are done with this control */
5820			if (control->length == 0) {
5821				if (control->data) {
5822#ifdef INVARIANTS
5823					panic("control->data not null at read eor?");
5824#else
5825					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5826					sctp_m_freem(control->data);
5827					control->data = NULL;
5828#endif
5829				}
5830		done_with_control:
5831				if (hold_rlock == 0) {
5832					SCTP_INP_READ_LOCK(inp);
5833					hold_rlock = 1;
5834				}
5835				TAILQ_REMOVE(&inp->read_queue, control, next);
5836				/* Add back any hiddend data */
5837				if (control->held_length) {
5838					held_length = 0;
5839					control->held_length = 0;
5840					wakeup_read_socket = 1;
5841				}
5842				if (control->aux_data) {
5843					sctp_m_free(control->aux_data);
5844					control->aux_data = NULL;
5845				}
5846				no_rcv_needed = control->do_not_ref_stcb;
5847				sctp_free_remote_addr(control->whoFrom);
5848				control->data = NULL;
5849#ifdef INVARIANTS
5850				if (control->on_strm_q) {
5851					panic("About to free ctl:%p so:%p and its in %d",
5852					    control, so, control->on_strm_q);
5853				}
5854#endif
5855				sctp_free_a_readq(stcb, control);
5856				control = NULL;
5857				if ((freed_so_far >= rwnd_req) &&
5858				    (no_rcv_needed == 0))
5859					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5860
5861			} else {
5862				/*
5863				 * The user did not read all of this
5864				 * message, turn off the returned MSG_EOR
5865				 * since we are leaving more behind on the
5866				 * control to read.
5867				 */
5868#ifdef INVARIANTS
5869				if (control->end_added &&
5870				    (control->data == NULL) &&
5871				    (control->tail_mbuf == NULL)) {
5872					panic("Gak, control->length is corrupt?");
5873				}
5874#endif
5875				no_rcv_needed = control->do_not_ref_stcb;
5876				out_flags &= ~MSG_EOR;
5877			}
5878		}
5879		if (out_flags & MSG_EOR) {
5880			goto release;
5881		}
5882		if ((uio->uio_resid == 0) ||
5883		    ((in_eeor_mode) &&
5884		    (copied_so_far >= (uint32_t)max(so->so_rcv.sb_lowat, 1)))) {
5885			goto release;
5886		}
5887		/*
5888		 * If I hit here the receiver wants more and this message is
5889		 * NOT done (pd-api). So two questions. Can we block? if not
5890		 * we are done. Did the user NOT set MSG_WAITALL?
5891		 */
5892		if (block_allowed == 0) {
5893			goto release;
5894		}
5895		/*
5896		 * We need to wait for more data a few things: - We don't
5897		 * sbunlock() so we don't get someone else reading. - We
5898		 * must be sure to account for the case where what is added
5899		 * is NOT to our control when we wakeup.
5900		 */
5901
5902		/*
5903		 * Do we need to tell the transport a rwnd update might be
5904		 * needed before we go to sleep?
5905		 */
5906		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5907		    ((freed_so_far >= rwnd_req) &&
5908		    (control->do_not_ref_stcb == 0) &&
5909		    (no_rcv_needed == 0))) {
5910			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5911		}
5912wait_some_more:
5913		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5914			goto release;
5915		}
5916		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5917			goto release;
5918
5919		if (hold_rlock == 1) {
5920			SCTP_INP_READ_UNLOCK(inp);
5921			hold_rlock = 0;
5922		}
5923		if (hold_sblock == 0) {
5924			SOCKBUF_LOCK(&so->so_rcv);
5925			hold_sblock = 1;
5926		}
5927		if ((copied_so_far) && (control->length == 0) &&
5928		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5929			goto release;
5930		}
5931		if (so->so_rcv.sb_cc <= control->held_length) {
5932			error = sbwait(&so->so_rcv);
5933			if (error) {
5934				goto release;
5935			}
5936			control->held_length = 0;
5937		}
5938		if (hold_sblock) {
5939			SOCKBUF_UNLOCK(&so->so_rcv);
5940			hold_sblock = 0;
5941		}
5942		if (control->length == 0) {
5943			/* still nothing here */
5944			if (control->end_added == 1) {
5945				/* he aborted, or is done i.e.did a shutdown */
5946				out_flags |= MSG_EOR;
5947				if (control->pdapi_aborted) {
5948					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5949						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5950
5951					out_flags |= MSG_TRUNC;
5952				} else {
5953					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5954						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5955				}
5956				goto done_with_control;
5957			}
5958			if (so->so_rcv.sb_cc > held_length) {
5959				control->held_length = so->so_rcv.sb_cc;
5960				held_length = 0;
5961			}
5962			goto wait_some_more;
5963		} else if (control->data == NULL) {
5964			/*
5965			 * we must re-sync since data is probably being
5966			 * added
5967			 */
5968			SCTP_INP_READ_LOCK(inp);
5969			if ((control->length > 0) && (control->data == NULL)) {
5970				/*
5971				 * big trouble.. we have the lock and its
5972				 * corrupt?
5973				 */
5974#ifdef INVARIANTS
5975				panic("Impossible data==NULL length !=0");
5976#endif
5977				out_flags |= MSG_EOR;
5978				out_flags |= MSG_TRUNC;
5979				control->length = 0;
5980				SCTP_INP_READ_UNLOCK(inp);
5981				goto done_with_control;
5982			}
5983			SCTP_INP_READ_UNLOCK(inp);
5984			/* We will fall around to get more data */
5985		}
5986		goto get_more_data;
5987	} else {
5988		/*-
5989		 * Give caller back the mbuf chain,
5990		 * store in uio_resid the length
5991		 */
5992		wakeup_read_socket = 0;
5993		if ((control->end_added == 0) ||
5994		    (TAILQ_NEXT(control, next) == NULL)) {
5995			/* Need to get rlock */
5996			if (hold_rlock == 0) {
5997				SCTP_INP_READ_LOCK(inp);
5998				hold_rlock = 1;
5999			}
6000		}
6001		if (control->end_added) {
6002			out_flags |= MSG_EOR;
6003			if ((control->do_not_ref_stcb == 0) &&
6004			    (control->stcb != NULL) &&
6005			    ((control->spec_flags & M_NOTIFICATION) == 0))
6006				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6007		}
6008		if (control->spec_flags & M_NOTIFICATION) {
6009			out_flags |= MSG_NOTIFICATION;
6010		}
6011		uio->uio_resid = control->length;
6012		*mp = control->data;
6013		m = control->data;
6014		while (m) {
6015			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6016				sctp_sblog(&so->so_rcv,
6017				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6018			}
6019			sctp_sbfree(control, stcb, &so->so_rcv, m);
6020			freed_so_far += SCTP_BUF_LEN(m);
6021			freed_so_far += MSIZE;
6022			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6023				sctp_sblog(&so->so_rcv,
6024				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6025			}
6026			m = SCTP_BUF_NEXT(m);
6027		}
6028		control->data = control->tail_mbuf = NULL;
6029		control->length = 0;
6030		if (out_flags & MSG_EOR) {
6031			/* Done with this control */
6032			goto done_with_control;
6033		}
6034	}
6035release:
6036	if (hold_rlock == 1) {
6037		SCTP_INP_READ_UNLOCK(inp);
6038		hold_rlock = 0;
6039	}
6040	if (hold_sblock == 1) {
6041		SOCKBUF_UNLOCK(&so->so_rcv);
6042		hold_sblock = 0;
6043	}
6044	sbunlock(&so->so_rcv);
6045	sockbuf_lock = 0;
6046
6047release_unlocked:
6048	if (hold_sblock) {
6049		SOCKBUF_UNLOCK(&so->so_rcv);
6050		hold_sblock = 0;
6051	}
6052	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6053		if ((freed_so_far >= rwnd_req) &&
6054		    (control && (control->do_not_ref_stcb == 0)) &&
6055		    (no_rcv_needed == 0))
6056			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6057	}
6058out:
6059	if (msg_flags) {
6060		*msg_flags = out_flags;
6061	}
6062	if (((out_flags & MSG_EOR) == 0) &&
6063	    ((in_flags & MSG_PEEK) == 0) &&
6064	    (sinfo) &&
6065	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6066	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6067		struct sctp_extrcvinfo *s_extra;
6068
6069		s_extra = (struct sctp_extrcvinfo *)sinfo;
6070		s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6071	}
6072	if (hold_rlock == 1) {
6073		SCTP_INP_READ_UNLOCK(inp);
6074	}
6075	if (hold_sblock) {
6076		SOCKBUF_UNLOCK(&so->so_rcv);
6077	}
6078	if (sockbuf_lock) {
6079		sbunlock(&so->so_rcv);
6080	}
6081	if (freecnt_applied) {
6082		/*
6083		 * The lock on the socket buffer protects us so the free
6084		 * code will stop. But since we used the socketbuf lock and
6085		 * the sender uses the tcb_lock to increment, we need to use
6086		 * the atomic add to the refcnt.
6087		 */
6088		if (stcb == NULL) {
6089#ifdef INVARIANTS
6090			panic("stcb for refcnt has gone NULL?");
6091			goto stage_left;
6092#else
6093			goto stage_left;
6094#endif
6095		}
6096		/* Save the value back for next time */
6097		stcb->freed_by_sorcv_sincelast = freed_so_far;
6098		atomic_add_int(&stcb->asoc.refcnt, -1);
6099	}
6100	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6101		if (stcb) {
6102			sctp_misc_ints(SCTP_SORECV_DONE,
6103			    freed_so_far,
6104			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6105			    stcb->asoc.my_rwnd,
6106			    so->so_rcv.sb_cc);
6107		} else {
6108			sctp_misc_ints(SCTP_SORECV_DONE,
6109			    freed_so_far,
6110			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6111			    0,
6112			    so->so_rcv.sb_cc);
6113		}
6114	}
6115stage_left:
6116	if (wakeup_read_socket) {
6117		sctp_sorwakeup(inp, so);
6118	}
6119	return (error);
6120}
6121
6122
6123#ifdef SCTP_MBUF_LOGGING
6124struct mbuf *
6125sctp_m_free(struct mbuf *m)
6126{
6127	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6128		sctp_log_mb(m, SCTP_MBUF_IFREE);
6129	}
6130	return (m_free(m));
6131}
6132
6133void
6134sctp_m_freem(struct mbuf *mb)
6135{
6136	while (mb != NULL)
6137		mb = sctp_m_free(mb);
6138}
6139
6140#endif
6141
6142int
6143sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6144{
6145	/*
6146	 * Given a local address. For all associations that holds the
6147	 * address, request a peer-set-primary.
6148	 */
6149	struct sctp_ifa *ifa;
6150	struct sctp_laddr *wi;
6151
6152	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6153	if (ifa == NULL) {
6154		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6155		return (EADDRNOTAVAIL);
6156	}
6157	/*
6158	 * Now that we have the ifa we must awaken the iterator with this
6159	 * message.
6160	 */
6161	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6162	if (wi == NULL) {
6163		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6164		return (ENOMEM);
6165	}
6166	/* Now incr the count and int wi structure */
6167	SCTP_INCR_LADDR_COUNT();
6168	bzero(wi, sizeof(*wi));
6169	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6170	wi->ifa = ifa;
6171	wi->action = SCTP_SET_PRIM_ADDR;
6172	atomic_add_int(&ifa->refcount, 1);
6173
6174	/* Now add it to the work queue */
6175	SCTP_WQ_ADDR_LOCK();
6176	/*
6177	 * Should this really be a tailq? As it is we will process the
6178	 * newest first :-0
6179	 */
6180	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6181	SCTP_WQ_ADDR_UNLOCK();
6182	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6183	    (struct sctp_inpcb *)NULL,
6184	    (struct sctp_tcb *)NULL,
6185	    (struct sctp_nets *)NULL);
6186	return (0);
6187}
6188
6189
6190int
6191sctp_soreceive(struct socket *so,
6192    struct sockaddr **psa,
6193    struct uio *uio,
6194    struct mbuf **mp0,
6195    struct mbuf **controlp,
6196    int *flagsp)
6197{
6198	int error, fromlen;
6199	uint8_t sockbuf[256];
6200	struct sockaddr *from;
6201	struct sctp_extrcvinfo sinfo;
6202	int filling_sinfo = 1;
6203	struct sctp_inpcb *inp;
6204
6205	inp = (struct sctp_inpcb *)so->so_pcb;
6206	/* pickup the assoc we are reading from */
6207	if (inp == NULL) {
6208		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6209		return (EINVAL);
6210	}
6211	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6212	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6213	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6214	    (controlp == NULL)) {
6215		/* user does not want the sndrcv ctl */
6216		filling_sinfo = 0;
6217	}
6218	if (psa) {
6219		from = (struct sockaddr *)sockbuf;
6220		fromlen = sizeof(sockbuf);
6221		from->sa_len = 0;
6222	} else {
6223		from = NULL;
6224		fromlen = 0;
6225	}
6226
6227	if (filling_sinfo) {
6228		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6229	}
6230	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6231	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6232	if (controlp != NULL) {
6233		/* copy back the sinfo in a CMSG format */
6234		if (filling_sinfo)
6235			*controlp = sctp_build_ctl_nchunk(inp,
6236			    (struct sctp_sndrcvinfo *)&sinfo);
6237		else
6238			*controlp = NULL;
6239	}
6240	if (psa) {
6241		/* copy back the address info */
6242		if (from && from->sa_len) {
6243			*psa = sodupsockaddr(from, M_NOWAIT);
6244		} else {
6245			*psa = NULL;
6246		}
6247	}
6248	return (error);
6249}
6250
6251
6252
6253
6254
6255int
6256sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6257    int totaddr, int *error)
6258{
6259	int added = 0;
6260	int i;
6261	struct sctp_inpcb *inp;
6262	struct sockaddr *sa;
6263	size_t incr = 0;
6264#ifdef INET
6265	struct sockaddr_in *sin;
6266#endif
6267#ifdef INET6
6268	struct sockaddr_in6 *sin6;
6269#endif
6270
6271	sa = addr;
6272	inp = stcb->sctp_ep;
6273	*error = 0;
6274	for (i = 0; i < totaddr; i++) {
6275		switch (sa->sa_family) {
6276#ifdef INET
6277		case AF_INET:
6278			incr = sizeof(struct sockaddr_in);
6279			sin = (struct sockaddr_in *)sa;
6280			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6281			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6282			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6283				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6284				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6285				    SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
6286				*error = EINVAL;
6287				goto out_now;
6288			}
6289			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6290			    SCTP_DONOT_SETSCOPE,
6291			    SCTP_ADDR_IS_CONFIRMED)) {
6292				/* assoc gone no un-lock */
6293				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6294				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6295				    SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
6296				*error = ENOBUFS;
6297				goto out_now;
6298			}
6299			added++;
6300			break;
6301#endif
6302#ifdef INET6
6303		case AF_INET6:
6304			incr = sizeof(struct sockaddr_in6);
6305			sin6 = (struct sockaddr_in6 *)sa;
6306			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6307			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6308				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6309				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6310				    SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
6311				*error = EINVAL;
6312				goto out_now;
6313			}
6314			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6315			    SCTP_DONOT_SETSCOPE,
6316			    SCTP_ADDR_IS_CONFIRMED)) {
6317				/* assoc gone no un-lock */
6318				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6319				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6320				    SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
6321				*error = ENOBUFS;
6322				goto out_now;
6323			}
6324			added++;
6325			break;
6326#endif
6327		default:
6328			break;
6329		}
6330		sa = (struct sockaddr *)((caddr_t)sa + incr);
6331	}
6332out_now:
6333	return (added);
6334}
6335
6336struct sctp_tcb *
6337sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6338    unsigned int *totaddr,
6339    unsigned int *num_v4, unsigned int *num_v6, int *error,
6340    unsigned int limit, int *bad_addr)
6341{
6342	struct sockaddr *sa;
6343	struct sctp_tcb *stcb = NULL;
6344	unsigned int incr, at, i;
6345
6346	at = 0;
6347	sa = addr;
6348	*error = *num_v6 = *num_v4 = 0;
6349	/* account and validate addresses */
6350	for (i = 0; i < *totaddr; i++) {
6351		switch (sa->sa_family) {
6352#ifdef INET
6353		case AF_INET:
6354			incr = (unsigned int)sizeof(struct sockaddr_in);
6355			if (sa->sa_len != incr) {
6356				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6357				*error = EINVAL;
6358				*bad_addr = 1;
6359				return (NULL);
6360			}
6361			(*num_v4) += 1;
6362			break;
6363#endif
6364#ifdef INET6
6365		case AF_INET6:
6366			{
6367				struct sockaddr_in6 *sin6;
6368
6369				sin6 = (struct sockaddr_in6 *)sa;
6370				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6371					/* Must be non-mapped for connectx */
6372					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6373					*error = EINVAL;
6374					*bad_addr = 1;
6375					return (NULL);
6376				}
6377				incr = (unsigned int)sizeof(struct sockaddr_in6);
6378				if (sa->sa_len != incr) {
6379					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6380					*error = EINVAL;
6381					*bad_addr = 1;
6382					return (NULL);
6383				}
6384				(*num_v6) += 1;
6385				break;
6386			}
6387#endif
6388		default:
6389			*totaddr = i;
6390			incr = 0;
6391			/* we are done */
6392			break;
6393		}
6394		if (i == *totaddr) {
6395			break;
6396		}
6397		SCTP_INP_INCR_REF(inp);
6398		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6399		if (stcb != NULL) {
6400			/* Already have or am bring up an association */
6401			return (stcb);
6402		} else {
6403			SCTP_INP_DECR_REF(inp);
6404		}
6405		if ((at + incr) > limit) {
6406			*totaddr = i;
6407			break;
6408		}
6409		sa = (struct sockaddr *)((caddr_t)sa + incr);
6410	}
6411	return ((struct sctp_tcb *)NULL);
6412}
6413
6414/*
6415 * sctp_bindx(ADD) for one address.
6416 * assumes all arguments are valid/checked by caller.
6417 */
6418void
6419sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6420    struct sockaddr *sa, sctp_assoc_t assoc_id,
6421    uint32_t vrf_id, int *error, void *p)
6422{
6423	struct sockaddr *addr_touse;
6424#if defined(INET) && defined(INET6)
6425	struct sockaddr_in sin;
6426#endif
6427
6428	/* see if we're bound all already! */
6429	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6430		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6431		*error = EINVAL;
6432		return;
6433	}
6434	addr_touse = sa;
6435#ifdef INET6
6436	if (sa->sa_family == AF_INET6) {
6437#ifdef INET
6438		struct sockaddr_in6 *sin6;
6439
6440#endif
6441		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6442			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6443			*error = EINVAL;
6444			return;
6445		}
6446		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6447			/* can only bind v6 on PF_INET6 sockets */
6448			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6449			*error = EINVAL;
6450			return;
6451		}
6452#ifdef INET
6453		sin6 = (struct sockaddr_in6 *)addr_touse;
6454		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6455			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6456			    SCTP_IPV6_V6ONLY(inp)) {
6457				/* can't bind v4-mapped on PF_INET sockets */
6458				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6459				*error = EINVAL;
6460				return;
6461			}
6462			in6_sin6_2_sin(&sin, sin6);
6463			addr_touse = (struct sockaddr *)&sin;
6464		}
6465#endif
6466	}
6467#endif
6468#ifdef INET
6469	if (sa->sa_family == AF_INET) {
6470		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6471			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6472			*error = EINVAL;
6473			return;
6474		}
6475		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6476		    SCTP_IPV6_V6ONLY(inp)) {
6477			/* can't bind v4 on PF_INET sockets */
6478			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6479			*error = EINVAL;
6480			return;
6481		}
6482	}
6483#endif
6484	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6485		if (p == NULL) {
6486			/* Can't get proc for Net/Open BSD */
6487			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6488			*error = EINVAL;
6489			return;
6490		}
6491		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6492		return;
6493	}
6494	/*
6495	 * No locks required here since bind and mgmt_ep_sa all do their own
6496	 * locking. If we do something for the FIX: below we may need to
6497	 * lock in that case.
6498	 */
6499	if (assoc_id == 0) {
6500		/* add the address */
6501		struct sctp_inpcb *lep;
6502		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6503
6504		/* validate the incoming port */
6505		if ((lsin->sin_port != 0) &&
6506		    (lsin->sin_port != inp->sctp_lport)) {
6507			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6508			*error = EINVAL;
6509			return;
6510		} else {
6511			/* user specified 0 port, set it to existing port */
6512			lsin->sin_port = inp->sctp_lport;
6513		}
6514
6515		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6516		if (lep != NULL) {
6517			/*
6518			 * We must decrement the refcount since we have the
6519			 * ep already and are binding. No remove going on
6520			 * here.
6521			 */
6522			SCTP_INP_DECR_REF(lep);
6523		}
6524		if (lep == inp) {
6525			/* already bound to it.. ok */
6526			return;
6527		} else if (lep == NULL) {
6528			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6529			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6530			    SCTP_ADD_IP_ADDRESS,
6531			    vrf_id, NULL);
6532		} else {
6533			*error = EADDRINUSE;
6534		}
6535		if (*error)
6536			return;
6537	} else {
6538		/*
6539		 * FIX: decide whether we allow assoc based bindx
6540		 */
6541	}
6542}
6543
6544/*
6545 * sctp_bindx(DELETE) for one address.
6546 * assumes all arguments are valid/checked by caller.
6547 */
6548void
6549sctp_bindx_delete_address(struct sctp_inpcb *inp,
6550    struct sockaddr *sa, sctp_assoc_t assoc_id,
6551    uint32_t vrf_id, int *error)
6552{
6553	struct sockaddr *addr_touse;
6554#if defined(INET) && defined(INET6)
6555	struct sockaddr_in sin;
6556#endif
6557
6558	/* see if we're bound all already! */
6559	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6560		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6561		*error = EINVAL;
6562		return;
6563	}
6564	addr_touse = sa;
6565#ifdef INET6
6566	if (sa->sa_family == AF_INET6) {
6567#ifdef INET
6568		struct sockaddr_in6 *sin6;
6569#endif
6570
6571		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6572			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6573			*error = EINVAL;
6574			return;
6575		}
6576		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6577			/* can only bind v6 on PF_INET6 sockets */
6578			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6579			*error = EINVAL;
6580			return;
6581		}
6582#ifdef INET
6583		sin6 = (struct sockaddr_in6 *)addr_touse;
6584		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6585			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6586			    SCTP_IPV6_V6ONLY(inp)) {
6587				/* can't bind mapped-v4 on PF_INET sockets */
6588				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6589				*error = EINVAL;
6590				return;
6591			}
6592			in6_sin6_2_sin(&sin, sin6);
6593			addr_touse = (struct sockaddr *)&sin;
6594		}
6595#endif
6596	}
6597#endif
6598#ifdef INET
6599	if (sa->sa_family == AF_INET) {
6600		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6601			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6602			*error = EINVAL;
6603			return;
6604		}
6605		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6606		    SCTP_IPV6_V6ONLY(inp)) {
6607			/* can't bind v4 on PF_INET sockets */
6608			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6609			*error = EINVAL;
6610			return;
6611		}
6612	}
6613#endif
6614	/*
6615	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6616	 * below is ever changed we may need to lock before calling
6617	 * association level binding.
6618	 */
6619	if (assoc_id == 0) {
6620		/* delete the address */
6621		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6622		    SCTP_DEL_IP_ADDRESS,
6623		    vrf_id, NULL);
6624	} else {
6625		/*
6626		 * FIX: decide whether we allow assoc based bindx
6627		 */
6628	}
6629}
6630
6631/*
6632 * returns the valid local address count for an assoc, taking into account
6633 * all scoping rules
6634 */
6635int
6636sctp_local_addr_count(struct sctp_tcb *stcb)
6637{
6638	int loopback_scope;
6639#if defined(INET)
6640	int ipv4_local_scope, ipv4_addr_legal;
6641#endif
6642#if defined (INET6)
6643	int local_scope, site_scope, ipv6_addr_legal;
6644#endif
6645	struct sctp_vrf *vrf;
6646	struct sctp_ifn *sctp_ifn;
6647	struct sctp_ifa *sctp_ifa;
6648	int count = 0;
6649
6650	/* Turn on all the appropriate scopes */
6651	loopback_scope = stcb->asoc.scope.loopback_scope;
6652#if defined(INET)
6653	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
6654	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
6655#endif
6656#if defined(INET6)
6657	local_scope = stcb->asoc.scope.local_scope;
6658	site_scope = stcb->asoc.scope.site_scope;
6659	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
6660#endif
6661	SCTP_IPI_ADDR_RLOCK();
6662	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6663	if (vrf == NULL) {
6664		/* no vrf, no addresses */
6665		SCTP_IPI_ADDR_RUNLOCK();
6666		return (0);
6667	}
6668	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6669		/*
6670		 * bound all case: go through all ifns on the vrf
6671		 */
6672		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6673			if ((loopback_scope == 0) &&
6674			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6675				continue;
6676			}
6677			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6678				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6679					continue;
6680				switch (sctp_ifa->address.sa.sa_family) {
6681#ifdef INET
6682				case AF_INET:
6683					if (ipv4_addr_legal) {
6684						struct sockaddr_in *sin;
6685
6686						sin = &sctp_ifa->address.sin;
6687						if (sin->sin_addr.s_addr == 0) {
6688							/*
6689							 * skip unspecified
6690							 * addrs
6691							 */
6692							continue;
6693						}
6694						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
6695						    &sin->sin_addr) != 0) {
6696							continue;
6697						}
6698						if ((ipv4_local_scope == 0) &&
6699						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6700							continue;
6701						}
6702						/* count this one */
6703						count++;
6704					} else {
6705						continue;
6706					}
6707					break;
6708#endif
6709#ifdef INET6
6710				case AF_INET6:
6711					if (ipv6_addr_legal) {
6712						struct sockaddr_in6 *sin6;
6713
6714						sin6 = &sctp_ifa->address.sin6;
6715						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6716							continue;
6717						}
6718						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
6719						    &sin6->sin6_addr) != 0) {
6720							continue;
6721						}
6722						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6723							if (local_scope == 0)
6724								continue;
6725							if (sin6->sin6_scope_id == 0) {
6726								if (sa6_recoverscope(sin6) != 0)
6727									/*
6728									 *
6729									 * bad
6730									 * link
6731									 *
6732									 * local
6733									 *
6734									 * address
6735									 */
6736									continue;
6737							}
6738						}
6739						if ((site_scope == 0) &&
6740						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6741							continue;
6742						}
6743						/* count this one */
6744						count++;
6745					}
6746					break;
6747#endif
6748				default:
6749					/* TSNH */
6750					break;
6751				}
6752			}
6753		}
6754	} else {
6755		/*
6756		 * subset bound case
6757		 */
6758		struct sctp_laddr *laddr;
6759
6760		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6761		    sctp_nxt_addr) {
6762			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6763				continue;
6764			}
6765			/* count this one */
6766			count++;
6767		}
6768	}
6769	SCTP_IPI_ADDR_RUNLOCK();
6770	return (count);
6771}
6772
6773#if defined(SCTP_LOCAL_TRACE_BUF)
6774
6775void
6776sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6777{
6778	uint32_t saveindex, newindex;
6779
6780	do {
6781		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6782		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6783			newindex = 1;
6784		} else {
6785			newindex = saveindex + 1;
6786		}
6787	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6788	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6789		saveindex = 0;
6790	}
6791	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6792	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6793	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6794	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6795	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6796	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6797	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6798	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6799}
6800
6801#endif
6802static void
6803sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
6804    const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
6805{
6806	struct ip *iph;
6807#ifdef INET6
6808	struct ip6_hdr *ip6;
6809#endif
6810	struct mbuf *sp, *last;
6811	struct udphdr *uhdr;
6812	uint16_t port;
6813
6814	if ((m->m_flags & M_PKTHDR) == 0) {
6815		/* Can't handle one that is not a pkt hdr */
6816		goto out;
6817	}
6818	/* Pull the src port */
6819	iph = mtod(m, struct ip *);
6820	uhdr = (struct udphdr *)((caddr_t)iph + off);
6821	port = uhdr->uh_sport;
6822	/*
6823	 * Split out the mbuf chain. Leave the IP header in m, place the
6824	 * rest in the sp.
6825	 */
6826	sp = m_split(m, off, M_NOWAIT);
6827	if (sp == NULL) {
6828		/* Gak, drop packet, we can't do a split */
6829		goto out;
6830	}
6831	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6832		/* Gak, packet can't have an SCTP header in it - too small */
6833		m_freem(sp);
6834		goto out;
6835	}
6836	/* Now pull up the UDP header and SCTP header together */
6837	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6838	if (sp == NULL) {
6839		/* Gak pullup failed */
6840		goto out;
6841	}
6842	/* Trim out the UDP header */
6843	m_adj(sp, sizeof(struct udphdr));
6844
6845	/* Now reconstruct the mbuf chain */
6846	for (last = m; last->m_next; last = last->m_next);
6847	last->m_next = sp;
6848	m->m_pkthdr.len += sp->m_pkthdr.len;
6849	/*
6850	 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP
6851	 * checksum and it was valid. Since CSUM_DATA_VALID ==
6852	 * CSUM_SCTP_VALID this would imply that the HW also verified the
6853	 * SCTP checksum. Therefore, clear the bit.
6854	 */
6855	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6856	    "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n",
6857	    m->m_pkthdr.len,
6858	    if_name(m->m_pkthdr.rcvif),
6859	    (int)m->m_pkthdr.csum_flags, CSUM_BITS);
6860	m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
6861	iph = mtod(m, struct ip *);
6862	switch (iph->ip_v) {
6863#ifdef INET
6864	case IPVERSION:
6865		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
6866		sctp_input_with_port(m, off, port);
6867		break;
6868#endif
6869#ifdef INET6
6870	case IPV6_VERSION >> 4:
6871		ip6 = mtod(m, struct ip6_hdr *);
6872		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
6873		sctp6_input_with_port(&m, &off, port);
6874		break;
6875#endif
6876	default:
6877		goto out;
6878		break;
6879	}
6880	return;
6881out:
6882	m_freem(m);
6883}
6884
6885#ifdef INET
6886static void
6887sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED)
6888{
6889	struct ip *outer_ip, *inner_ip;
6890	struct sctphdr *sh;
6891	struct icmp *icmp;
6892	struct udphdr *udp;
6893	struct sctp_inpcb *inp;
6894	struct sctp_tcb *stcb;
6895	struct sctp_nets *net;
6896	struct sctp_init_chunk *ch;
6897	struct sockaddr_in src, dst;
6898	uint8_t type, code;
6899
6900	inner_ip = (struct ip *)vip;
6901	icmp = (struct icmp *)((caddr_t)inner_ip -
6902	    (sizeof(struct icmp) - sizeof(struct ip)));
6903	outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip));
6904	if (ntohs(outer_ip->ip_len) <
6905	    sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) {
6906		return;
6907	}
6908	udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2));
6909	sh = (struct sctphdr *)(udp + 1);
6910	memset(&src, 0, sizeof(struct sockaddr_in));
6911	src.sin_family = AF_INET;
6912	src.sin_len = sizeof(struct sockaddr_in);
6913	src.sin_port = sh->src_port;
6914	src.sin_addr = inner_ip->ip_src;
6915	memset(&dst, 0, sizeof(struct sockaddr_in));
6916	dst.sin_family = AF_INET;
6917	dst.sin_len = sizeof(struct sockaddr_in);
6918	dst.sin_port = sh->dest_port;
6919	dst.sin_addr = inner_ip->ip_dst;
6920	/*
6921	 * 'dst' holds the dest of the packet that failed to be sent. 'src'
6922	 * holds our local endpoint address. Thus we reverse the dst and the
6923	 * src in the lookup.
6924	 */
6925	inp = NULL;
6926	net = NULL;
6927	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
6928	    (struct sockaddr *)&src,
6929	    &inp, &net, 1,
6930	    SCTP_DEFAULT_VRFID);
6931	if ((stcb != NULL) &&
6932	    (net != NULL) &&
6933	    (inp != NULL)) {
6934		/* Check the UDP port numbers */
6935		if ((udp->uh_dport != net->port) ||
6936		    (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
6937			SCTP_TCB_UNLOCK(stcb);
6938			return;
6939		}
6940		/* Check the verification tag */
6941		if (ntohl(sh->v_tag) != 0) {
6942			/*
6943			 * This must be the verification tag used for
6944			 * sending out packets. We don't consider packets
6945			 * reflecting the verification tag.
6946			 */
6947			if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) {
6948				SCTP_TCB_UNLOCK(stcb);
6949				return;
6950			}
6951		} else {
6952			if (ntohs(outer_ip->ip_len) >=
6953			    sizeof(struct ip) +
6954			    8 + (inner_ip->ip_hl << 2) + 8 + 20) {
6955				/*
6956				 * In this case we can check if we got an
6957				 * INIT chunk and if the initiate tag
6958				 * matches.
6959				 */
6960				ch = (struct sctp_init_chunk *)(sh + 1);
6961				if ((ch->ch.chunk_type != SCTP_INITIATION) ||
6962				    (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) {
6963					SCTP_TCB_UNLOCK(stcb);
6964					return;
6965				}
6966			} else {
6967				SCTP_TCB_UNLOCK(stcb);
6968				return;
6969			}
6970		}
6971		type = icmp->icmp_type;
6972		code = icmp->icmp_code;
6973		if ((type == ICMP_UNREACH) &&
6974		    (code == ICMP_UNREACH_PORT)) {
6975			code = ICMP_UNREACH_PROTOCOL;
6976		}
6977		sctp_notify(inp, stcb, net, type, code,
6978		    ntohs(inner_ip->ip_len),
6979		    (uint32_t)ntohs(icmp->icmp_nextmtu));
6980	} else {
6981		if ((stcb == NULL) && (inp != NULL)) {
6982			/* reduce ref-count */
6983			SCTP_INP_WLOCK(inp);
6984			SCTP_INP_DECR_REF(inp);
6985			SCTP_INP_WUNLOCK(inp);
6986		}
6987		if (stcb) {
6988			SCTP_TCB_UNLOCK(stcb);
6989		}
6990	}
6991	return;
6992}
6993#endif
6994
6995#ifdef INET6
6996static void
6997sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED)
6998{
6999	struct ip6ctlparam *ip6cp;
7000	struct sctp_inpcb *inp;
7001	struct sctp_tcb *stcb;
7002	struct sctp_nets *net;
7003	struct sctphdr sh;
7004	struct udphdr udp;
7005	struct sockaddr_in6 src, dst;
7006	uint8_t type, code;
7007
7008	ip6cp = (struct ip6ctlparam *)d;
7009	/*
7010	 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid.
7011	 */
7012	if (ip6cp->ip6c_m == NULL) {
7013		return;
7014	}
7015	/*
7016	 * Check if we can safely examine the ports and the verification tag
7017	 * of the SCTP common header.
7018	 */
7019	if (ip6cp->ip6c_m->m_pkthdr.len <
7020	    ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) {
7021		return;
7022	}
7023	/* Copy out the UDP header. */
7024	memset(&udp, 0, sizeof(struct udphdr));
7025	m_copydata(ip6cp->ip6c_m,
7026	    ip6cp->ip6c_off,
7027	    sizeof(struct udphdr),
7028	    (caddr_t)&udp);
7029	/* Copy out the port numbers and the verification tag. */
7030	memset(&sh, 0, sizeof(struct sctphdr));
7031	m_copydata(ip6cp->ip6c_m,
7032	    ip6cp->ip6c_off + sizeof(struct udphdr),
7033	    sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t),
7034	    (caddr_t)&sh);
7035	memset(&src, 0, sizeof(struct sockaddr_in6));
7036	src.sin6_family = AF_INET6;
7037	src.sin6_len = sizeof(struct sockaddr_in6);
7038	src.sin6_port = sh.src_port;
7039	src.sin6_addr = ip6cp->ip6c_ip6->ip6_src;
7040	if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7041		return;
7042	}
7043	memset(&dst, 0, sizeof(struct sockaddr_in6));
7044	dst.sin6_family = AF_INET6;
7045	dst.sin6_len = sizeof(struct sockaddr_in6);
7046	dst.sin6_port = sh.dest_port;
7047	dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst;
7048	if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7049		return;
7050	}
7051	inp = NULL;
7052	net = NULL;
7053	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
7054	    (struct sockaddr *)&src,
7055	    &inp, &net, 1, SCTP_DEFAULT_VRFID);
7056	if ((stcb != NULL) &&
7057	    (net != NULL) &&
7058	    (inp != NULL)) {
7059		/* Check the UDP port numbers */
7060		if ((udp.uh_dport != net->port) ||
7061		    (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
7062			SCTP_TCB_UNLOCK(stcb);
7063			return;
7064		}
7065		/* Check the verification tag */
7066		if (ntohl(sh.v_tag) != 0) {
7067			/*
7068			 * This must be the verification tag used for
7069			 * sending out packets. We don't consider packets
7070			 * reflecting the verification tag.
7071			 */
7072			if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) {
7073				SCTP_TCB_UNLOCK(stcb);
7074				return;
7075			}
7076		} else {
7077			if (ip6cp->ip6c_m->m_pkthdr.len >=
7078			    ip6cp->ip6c_off + sizeof(struct udphdr) +
7079			    sizeof(struct sctphdr) +
7080			    sizeof(struct sctp_chunkhdr) +
7081			    offsetof(struct sctp_init, a_rwnd)) {
7082				/*
7083				 * In this case we can check if we got an
7084				 * INIT chunk and if the initiate tag
7085				 * matches.
7086				 */
7087				uint32_t initiate_tag;
7088				uint8_t chunk_type;
7089
7090				m_copydata(ip6cp->ip6c_m,
7091				    ip6cp->ip6c_off +
7092				    sizeof(struct udphdr) +
7093				    sizeof(struct sctphdr),
7094				    sizeof(uint8_t),
7095				    (caddr_t)&chunk_type);
7096				m_copydata(ip6cp->ip6c_m,
7097				    ip6cp->ip6c_off +
7098				    sizeof(struct udphdr) +
7099				    sizeof(struct sctphdr) +
7100				    sizeof(struct sctp_chunkhdr),
7101				    sizeof(uint32_t),
7102				    (caddr_t)&initiate_tag);
7103				if ((chunk_type != SCTP_INITIATION) ||
7104				    (ntohl(initiate_tag) != stcb->asoc.my_vtag)) {
7105					SCTP_TCB_UNLOCK(stcb);
7106					return;
7107				}
7108			} else {
7109				SCTP_TCB_UNLOCK(stcb);
7110				return;
7111			}
7112		}
7113		type = ip6cp->ip6c_icmp6->icmp6_type;
7114		code = ip6cp->ip6c_icmp6->icmp6_code;
7115		if ((type == ICMP6_DST_UNREACH) &&
7116		    (code == ICMP6_DST_UNREACH_NOPORT)) {
7117			type = ICMP6_PARAM_PROB;
7118			code = ICMP6_PARAMPROB_NEXTHEADER;
7119		}
7120		sctp6_notify(inp, stcb, net, type, code,
7121		    ntohl(ip6cp->ip6c_icmp6->icmp6_mtu));
7122	} else {
7123		if ((stcb == NULL) && (inp != NULL)) {
7124			/* reduce inp's ref-count */
7125			SCTP_INP_WLOCK(inp);
7126			SCTP_INP_DECR_REF(inp);
7127			SCTP_INP_WUNLOCK(inp);
7128		}
7129		if (stcb) {
7130			SCTP_TCB_UNLOCK(stcb);
7131		}
7132	}
7133}
7134#endif
7135
7136void
7137sctp_over_udp_stop(void)
7138{
7139	/*
7140	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7141	 * for writting!
7142	 */
7143#ifdef INET
7144	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7145		soclose(SCTP_BASE_INFO(udp4_tun_socket));
7146		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
7147	}
7148#endif
7149#ifdef INET6
7150	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7151		soclose(SCTP_BASE_INFO(udp6_tun_socket));
7152		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
7153	}
7154#endif
7155}
7156
7157int
7158sctp_over_udp_start(void)
7159{
7160	uint16_t port;
7161	int ret;
7162#ifdef INET
7163	struct sockaddr_in sin;
7164#endif
7165#ifdef INET6
7166	struct sockaddr_in6 sin6;
7167#endif
7168	/*
7169	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7170	 * for writting!
7171	 */
7172	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
7173	if (ntohs(port) == 0) {
7174		/* Must have a port set */
7175		return (EINVAL);
7176	}
7177#ifdef INET
7178	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7179		/* Already running -- must stop first */
7180		return (EALREADY);
7181	}
7182#endif
7183#ifdef INET6
7184	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7185		/* Already running -- must stop first */
7186		return (EALREADY);
7187	}
7188#endif
7189#ifdef INET
7190	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
7191	    SOCK_DGRAM, IPPROTO_UDP,
7192	    curthread->td_ucred, curthread))) {
7193		sctp_over_udp_stop();
7194		return (ret);
7195	}
7196	/* Call the special UDP hook. */
7197	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
7198	    sctp_recv_udp_tunneled_packet,
7199	    sctp_recv_icmp_tunneled_packet,
7200	    NULL))) {
7201		sctp_over_udp_stop();
7202		return (ret);
7203	}
7204	/* Ok, we have a socket, bind it to the port. */
7205	memset(&sin, 0, sizeof(struct sockaddr_in));
7206	sin.sin_len = sizeof(struct sockaddr_in);
7207	sin.sin_family = AF_INET;
7208	sin.sin_port = htons(port);
7209	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
7210	    (struct sockaddr *)&sin, curthread))) {
7211		sctp_over_udp_stop();
7212		return (ret);
7213	}
7214#endif
7215#ifdef INET6
7216	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
7217	    SOCK_DGRAM, IPPROTO_UDP,
7218	    curthread->td_ucred, curthread))) {
7219		sctp_over_udp_stop();
7220		return (ret);
7221	}
7222	/* Call the special UDP hook. */
7223	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
7224	    sctp_recv_udp_tunneled_packet,
7225	    sctp_recv_icmp6_tunneled_packet,
7226	    NULL))) {
7227		sctp_over_udp_stop();
7228		return (ret);
7229	}
7230	/* Ok, we have a socket, bind it to the port. */
7231	memset(&sin6, 0, sizeof(struct sockaddr_in6));
7232	sin6.sin6_len = sizeof(struct sockaddr_in6);
7233	sin6.sin6_family = AF_INET6;
7234	sin6.sin6_port = htons(port);
7235	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7236	    (struct sockaddr *)&sin6, curthread))) {
7237		sctp_over_udp_stop();
7238		return (ret);
7239	}
7240#endif
7241	return (0);
7242}
7243
7244#if defined(INET6) || defined(INET)
7245
7246/*
7247 * sctp_min_mtu ()returns the minimum of all non-zero arguments.
7248 * If all arguments are zero, zero is returned.
7249 */
7250uint32_t
7251sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3)
7252{
7253	if (mtu1 > 0) {
7254		if (mtu2 > 0) {
7255			if (mtu3 > 0) {
7256				return (min(mtu1, min(mtu2, mtu3)));
7257			} else {
7258				return (min(mtu1, mtu2));
7259			}
7260		} else {
7261			if (mtu3 > 0) {
7262				return (min(mtu1, mtu3));
7263			} else {
7264				return (mtu1);
7265			}
7266		}
7267	} else {
7268		if (mtu2 > 0) {
7269			if (mtu3 > 0) {
7270				return (min(mtu2, mtu3));
7271			} else {
7272				return (mtu2);
7273			}
7274		} else {
7275			return (mtu3);
7276		}
7277	}
7278}
7279
7280void
7281sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu)
7282{
7283	struct in_conninfo inc;
7284
7285	memset(&inc, 0, sizeof(struct in_conninfo));
7286	inc.inc_fibnum = fibnum;
7287	switch (addr->sa.sa_family) {
7288#ifdef INET
7289	case AF_INET:
7290		inc.inc_faddr = addr->sin.sin_addr;
7291		break;
7292#endif
7293#ifdef INET6
7294	case AF_INET6:
7295		inc.inc_flags |= INC_ISIPV6;
7296		inc.inc6_faddr = addr->sin6.sin6_addr;
7297		break;
7298#endif
7299	default:
7300		return;
7301	}
7302	tcp_hc_updatemtu(&inc, (u_long)mtu);
7303}
7304
7305uint32_t
7306sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum)
7307{
7308	struct in_conninfo inc;
7309
7310	memset(&inc, 0, sizeof(struct in_conninfo));
7311	inc.inc_fibnum = fibnum;
7312	switch (addr->sa.sa_family) {
7313#ifdef INET
7314	case AF_INET:
7315		inc.inc_faddr = addr->sin.sin_addr;
7316		break;
7317#endif
7318#ifdef INET6
7319	case AF_INET6:
7320		inc.inc_flags |= INC_ISIPV6;
7321		inc.inc6_faddr = addr->sin6.sin6_addr;
7322		break;
7323#endif
7324	default:
7325		return (0);
7326	}
7327	return ((uint32_t)tcp_hc_getmtu(&inc));
7328}
7329#endif
7330