sctputil.c revision 283822
1/*-
2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * a) Redistributions of source code must retain the above copyright notice,
10 *    this list of conditions and the following disclaimer.
11 *
12 * b) Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in
14 *    the documentation and/or other materials provided with the distribution.
15 *
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 *    contributors may be used to endorse or promote products derived
18 *    from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: stable/10/sys/netinet/sctputil.c 283822 2015-05-31 12:46:40Z tuexen $");
35
36#include <netinet/sctp_os.h>
37#include <netinet/sctp_pcb.h>
38#include <netinet/sctputil.h>
39#include <netinet/sctp_var.h>
40#include <netinet/sctp_sysctl.h>
41#ifdef INET6
42#include <netinet6/sctp6_var.h>
43#endif
44#include <netinet/sctp_header.h>
45#include <netinet/sctp_output.h>
46#include <netinet/sctp_uio.h>
47#include <netinet/sctp_timer.h>
48#include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
49#include <netinet/sctp_auth.h>
50#include <netinet/sctp_asconf.h>
51#include <netinet/sctp_bsd_addr.h>
52#include <netinet/udp.h>
53#include <netinet/udp_var.h>
54#include <sys/proc.h>
55
56
57#ifndef KTR_SCTP
58#define KTR_SCTP KTR_SUBSYS
59#endif
60
61extern struct sctp_cc_functions sctp_cc_functions[];
62extern struct sctp_ss_functions sctp_ss_functions[];
63
64void
65sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
66{
67	struct sctp_cwnd_log sctp_clog;
68
69	sctp_clog.x.sb.stcb = stcb;
70	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
71	if (stcb)
72		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
73	else
74		sctp_clog.x.sb.stcb_sbcc = 0;
75	sctp_clog.x.sb.incr = incr;
76	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
77	    SCTP_LOG_EVENT_SB,
78	    from,
79	    sctp_clog.x.misc.log1,
80	    sctp_clog.x.misc.log2,
81	    sctp_clog.x.misc.log3,
82	    sctp_clog.x.misc.log4);
83}
84
85void
86sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
87{
88	struct sctp_cwnd_log sctp_clog;
89
90	sctp_clog.x.close.inp = (void *)inp;
91	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
92	if (stcb) {
93		sctp_clog.x.close.stcb = (void *)stcb;
94		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
95	} else {
96		sctp_clog.x.close.stcb = 0;
97		sctp_clog.x.close.state = 0;
98	}
99	sctp_clog.x.close.loc = loc;
100	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
101	    SCTP_LOG_EVENT_CLOSE,
102	    0,
103	    sctp_clog.x.misc.log1,
104	    sctp_clog.x.misc.log2,
105	    sctp_clog.x.misc.log3,
106	    sctp_clog.x.misc.log4);
107}
108
109void
110rto_logging(struct sctp_nets *net, int from)
111{
112	struct sctp_cwnd_log sctp_clog;
113
114	memset(&sctp_clog, 0, sizeof(sctp_clog));
115	sctp_clog.x.rto.net = (void *)net;
116	sctp_clog.x.rto.rtt = net->rtt / 1000;
117	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
118	    SCTP_LOG_EVENT_RTT,
119	    from,
120	    sctp_clog.x.misc.log1,
121	    sctp_clog.x.misc.log2,
122	    sctp_clog.x.misc.log3,
123	    sctp_clog.x.misc.log4);
124}
125
126void
127sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
128{
129	struct sctp_cwnd_log sctp_clog;
130
131	sctp_clog.x.strlog.stcb = stcb;
132	sctp_clog.x.strlog.n_tsn = tsn;
133	sctp_clog.x.strlog.n_sseq = sseq;
134	sctp_clog.x.strlog.e_tsn = 0;
135	sctp_clog.x.strlog.e_sseq = 0;
136	sctp_clog.x.strlog.strm = stream;
137	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
138	    SCTP_LOG_EVENT_STRM,
139	    from,
140	    sctp_clog.x.misc.log1,
141	    sctp_clog.x.misc.log2,
142	    sctp_clog.x.misc.log3,
143	    sctp_clog.x.misc.log4);
144}
145
146void
147sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
148{
149	struct sctp_cwnd_log sctp_clog;
150
151	sctp_clog.x.nagle.stcb = (void *)stcb;
152	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
153	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
154	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
155	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
156	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
157	    SCTP_LOG_EVENT_NAGLE,
158	    action,
159	    sctp_clog.x.misc.log1,
160	    sctp_clog.x.misc.log2,
161	    sctp_clog.x.misc.log3,
162	    sctp_clog.x.misc.log4);
163}
164
165void
166sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
167{
168	struct sctp_cwnd_log sctp_clog;
169
170	sctp_clog.x.sack.cumack = cumack;
171	sctp_clog.x.sack.oldcumack = old_cumack;
172	sctp_clog.x.sack.tsn = tsn;
173	sctp_clog.x.sack.numGaps = gaps;
174	sctp_clog.x.sack.numDups = dups;
175	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
176	    SCTP_LOG_EVENT_SACK,
177	    from,
178	    sctp_clog.x.misc.log1,
179	    sctp_clog.x.misc.log2,
180	    sctp_clog.x.misc.log3,
181	    sctp_clog.x.misc.log4);
182}
183
184void
185sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
186{
187	struct sctp_cwnd_log sctp_clog;
188
189	memset(&sctp_clog, 0, sizeof(sctp_clog));
190	sctp_clog.x.map.base = map;
191	sctp_clog.x.map.cum = cum;
192	sctp_clog.x.map.high = high;
193	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
194	    SCTP_LOG_EVENT_MAP,
195	    from,
196	    sctp_clog.x.misc.log1,
197	    sctp_clog.x.misc.log2,
198	    sctp_clog.x.misc.log3,
199	    sctp_clog.x.misc.log4);
200}
201
202void
203sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
204{
205	struct sctp_cwnd_log sctp_clog;
206
207	memset(&sctp_clog, 0, sizeof(sctp_clog));
208	sctp_clog.x.fr.largest_tsn = biggest_tsn;
209	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
210	sctp_clog.x.fr.tsn = tsn;
211	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
212	    SCTP_LOG_EVENT_FR,
213	    from,
214	    sctp_clog.x.misc.log1,
215	    sctp_clog.x.misc.log2,
216	    sctp_clog.x.misc.log3,
217	    sctp_clog.x.misc.log4);
218}
219
220#ifdef SCTP_MBUF_LOGGING
221void
222sctp_log_mb(struct mbuf *m, int from)
223{
224	struct sctp_cwnd_log sctp_clog;
225
226	sctp_clog.x.mb.mp = m;
227	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
228	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
229	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
230	if (SCTP_BUF_IS_EXTENDED(m)) {
231		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
232		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
233	} else {
234		sctp_clog.x.mb.ext = 0;
235		sctp_clog.x.mb.refcnt = 0;
236	}
237	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
238	    SCTP_LOG_EVENT_MBUF,
239	    from,
240	    sctp_clog.x.misc.log1,
241	    sctp_clog.x.misc.log2,
242	    sctp_clog.x.misc.log3,
243	    sctp_clog.x.misc.log4);
244}
245
246void
247sctp_log_mbc(struct mbuf *m, int from)
248{
249	struct mbuf *mat;
250
251	for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
252		sctp_log_mb(mat, from);
253	}
254}
255
256#endif
257
258void
259sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
260{
261	struct sctp_cwnd_log sctp_clog;
262
263	if (control == NULL) {
264		SCTP_PRINTF("Gak log of NULL?\n");
265		return;
266	}
267	sctp_clog.x.strlog.stcb = control->stcb;
268	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
269	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
270	sctp_clog.x.strlog.strm = control->sinfo_stream;
271	if (poschk != NULL) {
272		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
273		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
274	} else {
275		sctp_clog.x.strlog.e_tsn = 0;
276		sctp_clog.x.strlog.e_sseq = 0;
277	}
278	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
279	    SCTP_LOG_EVENT_STRM,
280	    from,
281	    sctp_clog.x.misc.log1,
282	    sctp_clog.x.misc.log2,
283	    sctp_clog.x.misc.log3,
284	    sctp_clog.x.misc.log4);
285}
286
287void
288sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
289{
290	struct sctp_cwnd_log sctp_clog;
291
292	sctp_clog.x.cwnd.net = net;
293	if (stcb->asoc.send_queue_cnt > 255)
294		sctp_clog.x.cwnd.cnt_in_send = 255;
295	else
296		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
297	if (stcb->asoc.stream_queue_cnt > 255)
298		sctp_clog.x.cwnd.cnt_in_str = 255;
299	else
300		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
301
302	if (net) {
303		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
304		sctp_clog.x.cwnd.inflight = net->flight_size;
305		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
306		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
307		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
308	}
309	if (SCTP_CWNDLOG_PRESEND == from) {
310		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
311	}
312	sctp_clog.x.cwnd.cwnd_augment = augment;
313	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
314	    SCTP_LOG_EVENT_CWND,
315	    from,
316	    sctp_clog.x.misc.log1,
317	    sctp_clog.x.misc.log2,
318	    sctp_clog.x.misc.log3,
319	    sctp_clog.x.misc.log4);
320}
321
322void
323sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
324{
325	struct sctp_cwnd_log sctp_clog;
326
327	memset(&sctp_clog, 0, sizeof(sctp_clog));
328	if (inp) {
329		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
330
331	} else {
332		sctp_clog.x.lock.sock = (void *)NULL;
333	}
334	sctp_clog.x.lock.inp = (void *)inp;
335	if (stcb) {
336		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
337	} else {
338		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
339	}
340	if (inp) {
341		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
342		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
343	} else {
344		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
345		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
346	}
347	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
348	if (inp && (inp->sctp_socket)) {
349		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
350		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
351		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
352	} else {
353		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
354		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
355		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
356	}
357	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
358	    SCTP_LOG_LOCK_EVENT,
359	    from,
360	    sctp_clog.x.misc.log1,
361	    sctp_clog.x.misc.log2,
362	    sctp_clog.x.misc.log3,
363	    sctp_clog.x.misc.log4);
364}
365
366void
367sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
368{
369	struct sctp_cwnd_log sctp_clog;
370
371	memset(&sctp_clog, 0, sizeof(sctp_clog));
372	sctp_clog.x.cwnd.net = net;
373	sctp_clog.x.cwnd.cwnd_new_value = error;
374	sctp_clog.x.cwnd.inflight = net->flight_size;
375	sctp_clog.x.cwnd.cwnd_augment = burst;
376	if (stcb->asoc.send_queue_cnt > 255)
377		sctp_clog.x.cwnd.cnt_in_send = 255;
378	else
379		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
380	if (stcb->asoc.stream_queue_cnt > 255)
381		sctp_clog.x.cwnd.cnt_in_str = 255;
382	else
383		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
384	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
385	    SCTP_LOG_EVENT_MAXBURST,
386	    from,
387	    sctp_clog.x.misc.log1,
388	    sctp_clog.x.misc.log2,
389	    sctp_clog.x.misc.log3,
390	    sctp_clog.x.misc.log4);
391}
392
393void
394sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
395{
396	struct sctp_cwnd_log sctp_clog;
397
398	sctp_clog.x.rwnd.rwnd = peers_rwnd;
399	sctp_clog.x.rwnd.send_size = snd_size;
400	sctp_clog.x.rwnd.overhead = overhead;
401	sctp_clog.x.rwnd.new_rwnd = 0;
402	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
403	    SCTP_LOG_EVENT_RWND,
404	    from,
405	    sctp_clog.x.misc.log1,
406	    sctp_clog.x.misc.log2,
407	    sctp_clog.x.misc.log3,
408	    sctp_clog.x.misc.log4);
409}
410
411void
412sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
413{
414	struct sctp_cwnd_log sctp_clog;
415
416	sctp_clog.x.rwnd.rwnd = peers_rwnd;
417	sctp_clog.x.rwnd.send_size = flight_size;
418	sctp_clog.x.rwnd.overhead = overhead;
419	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
420	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
421	    SCTP_LOG_EVENT_RWND,
422	    from,
423	    sctp_clog.x.misc.log1,
424	    sctp_clog.x.misc.log2,
425	    sctp_clog.x.misc.log3,
426	    sctp_clog.x.misc.log4);
427}
428
429#ifdef SCTP_MBCNT_LOGGING
430static void
431sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
432{
433	struct sctp_cwnd_log sctp_clog;
434
435	sctp_clog.x.mbcnt.total_queue_size = total_oq;
436	sctp_clog.x.mbcnt.size_change = book;
437	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
438	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
439	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
440	    SCTP_LOG_EVENT_MBCNT,
441	    from,
442	    sctp_clog.x.misc.log1,
443	    sctp_clog.x.misc.log2,
444	    sctp_clog.x.misc.log3,
445	    sctp_clog.x.misc.log4);
446}
447
448#endif
449
450void
451sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
452{
453	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
454	    SCTP_LOG_MISC_EVENT,
455	    from,
456	    a, b, c, d);
457}
458
459void
460sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
461{
462	struct sctp_cwnd_log sctp_clog;
463
464	sctp_clog.x.wake.stcb = (void *)stcb;
465	sctp_clog.x.wake.wake_cnt = wake_cnt;
466	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
467	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
468	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
469
470	if (stcb->asoc.stream_queue_cnt < 0xff)
471		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
472	else
473		sctp_clog.x.wake.stream_qcnt = 0xff;
474
475	if (stcb->asoc.chunks_on_out_queue < 0xff)
476		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
477	else
478		sctp_clog.x.wake.chunks_on_oque = 0xff;
479
480	sctp_clog.x.wake.sctpflags = 0;
481	/* set in the defered mode stuff */
482	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
483		sctp_clog.x.wake.sctpflags |= 1;
484	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
485		sctp_clog.x.wake.sctpflags |= 2;
486	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
487		sctp_clog.x.wake.sctpflags |= 4;
488	/* what about the sb */
489	if (stcb->sctp_socket) {
490		struct socket *so = stcb->sctp_socket;
491
492		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
493	} else {
494		sctp_clog.x.wake.sbflags = 0xff;
495	}
496	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
497	    SCTP_LOG_EVENT_WAKE,
498	    from,
499	    sctp_clog.x.misc.log1,
500	    sctp_clog.x.misc.log2,
501	    sctp_clog.x.misc.log3,
502	    sctp_clog.x.misc.log4);
503}
504
505void
506sctp_log_block(uint8_t from, struct sctp_association *asoc, int sendlen)
507{
508	struct sctp_cwnd_log sctp_clog;
509
510	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
511	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
512	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
513	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
514	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
515	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
516	sctp_clog.x.blk.sndlen = sendlen;
517	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
518	    SCTP_LOG_EVENT_BLOCK,
519	    from,
520	    sctp_clog.x.misc.log1,
521	    sctp_clog.x.misc.log2,
522	    sctp_clog.x.misc.log3,
523	    sctp_clog.x.misc.log4);
524}
525
526int
527sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
528{
529	/* May need to fix this if ktrdump does not work */
530	return (0);
531}
532
533#ifdef SCTP_AUDITING_ENABLED
534uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
535static int sctp_audit_indx = 0;
536
537static
538void
539sctp_print_audit_report(void)
540{
541	int i;
542	int cnt;
543
544	cnt = 0;
545	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
546		if ((sctp_audit_data[i][0] == 0xe0) &&
547		    (sctp_audit_data[i][1] == 0x01)) {
548			cnt = 0;
549			SCTP_PRINTF("\n");
550		} else if (sctp_audit_data[i][0] == 0xf0) {
551			cnt = 0;
552			SCTP_PRINTF("\n");
553		} else if ((sctp_audit_data[i][0] == 0xc0) &&
554		    (sctp_audit_data[i][1] == 0x01)) {
555			SCTP_PRINTF("\n");
556			cnt = 0;
557		}
558		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
559		    (uint32_t) sctp_audit_data[i][1]);
560		cnt++;
561		if ((cnt % 14) == 0)
562			SCTP_PRINTF("\n");
563	}
564	for (i = 0; i < sctp_audit_indx; i++) {
565		if ((sctp_audit_data[i][0] == 0xe0) &&
566		    (sctp_audit_data[i][1] == 0x01)) {
567			cnt = 0;
568			SCTP_PRINTF("\n");
569		} else if (sctp_audit_data[i][0] == 0xf0) {
570			cnt = 0;
571			SCTP_PRINTF("\n");
572		} else if ((sctp_audit_data[i][0] == 0xc0) &&
573		    (sctp_audit_data[i][1] == 0x01)) {
574			SCTP_PRINTF("\n");
575			cnt = 0;
576		}
577		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
578		    (uint32_t) sctp_audit_data[i][1]);
579		cnt++;
580		if ((cnt % 14) == 0)
581			SCTP_PRINTF("\n");
582	}
583	SCTP_PRINTF("\n");
584}
585
586void
587sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
588    struct sctp_nets *net)
589{
590	int resend_cnt, tot_out, rep, tot_book_cnt;
591	struct sctp_nets *lnet;
592	struct sctp_tmit_chunk *chk;
593
594	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
595	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
596	sctp_audit_indx++;
597	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
598		sctp_audit_indx = 0;
599	}
600	if (inp == NULL) {
601		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
602		sctp_audit_data[sctp_audit_indx][1] = 0x01;
603		sctp_audit_indx++;
604		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
605			sctp_audit_indx = 0;
606		}
607		return;
608	}
609	if (stcb == NULL) {
610		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
611		sctp_audit_data[sctp_audit_indx][1] = 0x02;
612		sctp_audit_indx++;
613		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
614			sctp_audit_indx = 0;
615		}
616		return;
617	}
618	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
619	sctp_audit_data[sctp_audit_indx][1] =
620	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
621	sctp_audit_indx++;
622	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
623		sctp_audit_indx = 0;
624	}
625	rep = 0;
626	tot_book_cnt = 0;
627	resend_cnt = tot_out = 0;
628	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
629		if (chk->sent == SCTP_DATAGRAM_RESEND) {
630			resend_cnt++;
631		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
632			tot_out += chk->book_size;
633			tot_book_cnt++;
634		}
635	}
636	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
637		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
638		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
639		sctp_audit_indx++;
640		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
641			sctp_audit_indx = 0;
642		}
643		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
644		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
645		rep = 1;
646		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
647		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
648		sctp_audit_data[sctp_audit_indx][1] =
649		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
650		sctp_audit_indx++;
651		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
652			sctp_audit_indx = 0;
653		}
654	}
655	if (tot_out != stcb->asoc.total_flight) {
656		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
657		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
658		sctp_audit_indx++;
659		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
660			sctp_audit_indx = 0;
661		}
662		rep = 1;
663		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
664		    (int)stcb->asoc.total_flight);
665		stcb->asoc.total_flight = tot_out;
666	}
667	if (tot_book_cnt != stcb->asoc.total_flight_count) {
668		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
669		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
670		sctp_audit_indx++;
671		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
672			sctp_audit_indx = 0;
673		}
674		rep = 1;
675		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
676
677		stcb->asoc.total_flight_count = tot_book_cnt;
678	}
679	tot_out = 0;
680	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
681		tot_out += lnet->flight_size;
682	}
683	if (tot_out != stcb->asoc.total_flight) {
684		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
685		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
686		sctp_audit_indx++;
687		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
688			sctp_audit_indx = 0;
689		}
690		rep = 1;
691		SCTP_PRINTF("real flight:%d net total was %d\n",
692		    stcb->asoc.total_flight, tot_out);
693		/* now corrective action */
694		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
695
696			tot_out = 0;
697			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
698				if ((chk->whoTo == lnet) &&
699				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
700					tot_out += chk->book_size;
701				}
702			}
703			if (lnet->flight_size != tot_out) {
704				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
705				    (void *)lnet, lnet->flight_size,
706				    tot_out);
707				lnet->flight_size = tot_out;
708			}
709		}
710	}
711	if (rep) {
712		sctp_print_audit_report();
713	}
714}
715
716void
717sctp_audit_log(uint8_t ev, uint8_t fd)
718{
719
720	sctp_audit_data[sctp_audit_indx][0] = ev;
721	sctp_audit_data[sctp_audit_indx][1] = fd;
722	sctp_audit_indx++;
723	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
724		sctp_audit_indx = 0;
725	}
726}
727
728#endif
729
730/*
731 * sctp_stop_timers_for_shutdown() should be called
732 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
733 * state to make sure that all timers are stopped.
734 */
735void
736sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
737{
738	struct sctp_association *asoc;
739	struct sctp_nets *net;
740
741	asoc = &stcb->asoc;
742
743	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
744	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
745	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
746	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
747	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
748	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
749		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
750		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
751	}
752}
753
754/*
755 * a list of sizes based on typical mtu's, used only if next hop size not
756 * returned.
757 */
758static uint32_t sctp_mtu_sizes[] = {
759	68,
760	296,
761	508,
762	512,
763	544,
764	576,
765	1006,
766	1492,
767	1500,
768	1536,
769	2002,
770	2048,
771	4352,
772	4464,
773	8166,
774	17914,
775	32000,
776	65535
777};
778
779/*
780 * Return the largest MTU smaller than val. If there is no
781 * entry, just return val.
782 */
783uint32_t
784sctp_get_prev_mtu(uint32_t val)
785{
786	uint32_t i;
787
788	if (val <= sctp_mtu_sizes[0]) {
789		return (val);
790	}
791	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
792		if (val <= sctp_mtu_sizes[i]) {
793			break;
794		}
795	}
796	return (sctp_mtu_sizes[i - 1]);
797}
798
799/*
800 * Return the smallest MTU larger than val. If there is no
801 * entry, just return val.
802 */
803uint32_t
804sctp_get_next_mtu(uint32_t val)
805{
806	/* select another MTU that is just bigger than this one */
807	uint32_t i;
808
809	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
810		if (val < sctp_mtu_sizes[i]) {
811			return (sctp_mtu_sizes[i]);
812		}
813	}
814	return (val);
815}
816
817void
818sctp_fill_random_store(struct sctp_pcb *m)
819{
820	/*
821	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
822	 * our counter. The result becomes our good random numbers and we
823	 * then setup to give these out. Note that we do no locking to
824	 * protect this. This is ok, since if competing folks call this we
825	 * will get more gobbled gook in the random store which is what we
826	 * want. There is a danger that two guys will use the same random
827	 * numbers, but thats ok too since that is random as well :->
828	 */
829	m->store_at = 0;
830	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
831	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
832	    sizeof(m->random_counter), (uint8_t *) m->random_store);
833	m->random_counter++;
834}
835
836uint32_t
837sctp_select_initial_TSN(struct sctp_pcb *inp)
838{
839	/*
840	 * A true implementation should use random selection process to get
841	 * the initial stream sequence number, using RFC1750 as a good
842	 * guideline
843	 */
844	uint32_t x, *xp;
845	uint8_t *p;
846	int store_at, new_store;
847
848	if (inp->initial_sequence_debug != 0) {
849		uint32_t ret;
850
851		ret = inp->initial_sequence_debug;
852		inp->initial_sequence_debug++;
853		return (ret);
854	}
855retry:
856	store_at = inp->store_at;
857	new_store = store_at + sizeof(uint32_t);
858	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
859		new_store = 0;
860	}
861	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
862		goto retry;
863	}
864	if (new_store == 0) {
865		/* Refill the random store */
866		sctp_fill_random_store(inp);
867	}
868	p = &inp->random_store[store_at];
869	xp = (uint32_t *) p;
870	x = *xp;
871	return (x);
872}
873
874uint32_t
875sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
876{
877	uint32_t x;
878	struct timeval now;
879
880	if (check) {
881		(void)SCTP_GETTIME_TIMEVAL(&now);
882	}
883	for (;;) {
884		x = sctp_select_initial_TSN(&inp->sctp_ep);
885		if (x == 0) {
886			/* we never use 0 */
887			continue;
888		}
889		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
890			break;
891		}
892	}
893	return (x);
894}
895
896int
897sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
898    uint32_t override_tag, uint32_t vrf_id)
899{
900	struct sctp_association *asoc;
901
902	/*
903	 * Anything set to zero is taken care of by the allocation routine's
904	 * bzero
905	 */
906
907	/*
908	 * Up front select what scoping to apply on addresses I tell my peer
909	 * Not sure what to do with these right now, we will need to come up
910	 * with a way to set them. We may need to pass them through from the
911	 * caller in the sctp_aloc_assoc() function.
912	 */
913	int i;
914
915#if defined(SCTP_DETAILED_STR_STATS)
916	int j;
917
918#endif
919
920	asoc = &stcb->asoc;
921	/* init all variables to a known value. */
922	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
923	asoc->max_burst = inp->sctp_ep.max_burst;
924	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
925	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
926	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
927	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
928	asoc->ecn_supported = inp->ecn_supported;
929	asoc->prsctp_supported = inp->prsctp_supported;
930	asoc->auth_supported = inp->auth_supported;
931	asoc->asconf_supported = inp->asconf_supported;
932	asoc->reconfig_supported = inp->reconfig_supported;
933	asoc->nrsack_supported = inp->nrsack_supported;
934	asoc->pktdrop_supported = inp->pktdrop_supported;
935	asoc->sctp_cmt_pf = (uint8_t) 0;
936	asoc->sctp_frag_point = inp->sctp_frag_point;
937	asoc->sctp_features = inp->sctp_features;
938	asoc->default_dscp = inp->sctp_ep.default_dscp;
939	asoc->max_cwnd = inp->max_cwnd;
940#ifdef INET6
941	if (inp->sctp_ep.default_flowlabel) {
942		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
943	} else {
944		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
945			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
946			asoc->default_flowlabel &= 0x000fffff;
947			asoc->default_flowlabel |= 0x80000000;
948		} else {
949			asoc->default_flowlabel = 0;
950		}
951	}
952#endif
953	asoc->sb_send_resv = 0;
954	if (override_tag) {
955		asoc->my_vtag = override_tag;
956	} else {
957		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
958	}
959	/* Get the nonce tags */
960	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
961	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
962	asoc->vrf_id = vrf_id;
963
964#ifdef SCTP_ASOCLOG_OF_TSNS
965	asoc->tsn_in_at = 0;
966	asoc->tsn_out_at = 0;
967	asoc->tsn_in_wrapped = 0;
968	asoc->tsn_out_wrapped = 0;
969	asoc->cumack_log_at = 0;
970	asoc->cumack_log_atsnt = 0;
971#endif
972#ifdef SCTP_FS_SPEC_LOG
973	asoc->fs_index = 0;
974#endif
975	asoc->refcnt = 0;
976	asoc->assoc_up_sent = 0;
977	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
978	    sctp_select_initial_TSN(&inp->sctp_ep);
979	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
980	/* we are optimisitic here */
981	asoc->peer_supports_nat = 0;
982	asoc->sent_queue_retran_cnt = 0;
983
984	/* for CMT */
985	asoc->last_net_cmt_send_started = NULL;
986
987	/* This will need to be adjusted */
988	asoc->last_acked_seq = asoc->init_seq_number - 1;
989	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
990	asoc->asconf_seq_in = asoc->last_acked_seq;
991
992	/* here we are different, we hold the next one we expect */
993	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
994
995	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
996	asoc->initial_rto = inp->sctp_ep.initial_rto;
997
998	asoc->max_init_times = inp->sctp_ep.max_init_times;
999	asoc->max_send_times = inp->sctp_ep.max_send_times;
1000	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1001	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1002	asoc->free_chunk_cnt = 0;
1003
1004	asoc->iam_blocking = 0;
1005	asoc->context = inp->sctp_context;
1006	asoc->local_strreset_support = inp->local_strreset_support;
1007	asoc->def_send = inp->def_send;
1008	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1009	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1010	asoc->pr_sctp_cnt = 0;
1011	asoc->total_output_queue_size = 0;
1012
1013	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1014		asoc->scope.ipv6_addr_legal = 1;
1015		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1016			asoc->scope.ipv4_addr_legal = 1;
1017		} else {
1018			asoc->scope.ipv4_addr_legal = 0;
1019		}
1020	} else {
1021		asoc->scope.ipv6_addr_legal = 0;
1022		asoc->scope.ipv4_addr_legal = 1;
1023	}
1024
1025	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1026	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1027
1028	asoc->smallest_mtu = inp->sctp_frag_point;
1029	asoc->minrto = inp->sctp_ep.sctp_minrto;
1030	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1031
1032	asoc->locked_on_sending = NULL;
1033	asoc->stream_locked_on = 0;
1034	asoc->ecn_echo_cnt_onq = 0;
1035	asoc->stream_locked = 0;
1036
1037	asoc->send_sack = 1;
1038
1039	LIST_INIT(&asoc->sctp_restricted_addrs);
1040
1041	TAILQ_INIT(&asoc->nets);
1042	TAILQ_INIT(&asoc->pending_reply_queue);
1043	TAILQ_INIT(&asoc->asconf_ack_sent);
1044	/* Setup to fill the hb random cache at first HB */
1045	asoc->hb_random_idx = 4;
1046
1047	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1048
1049	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1050	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1051
1052	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1053	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1054
1055	/*
1056	 * Now the stream parameters, here we allocate space for all streams
1057	 * that we request by default.
1058	 */
1059	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1060	    inp->sctp_ep.pre_open_stream_count;
1061	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1062	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1063	    SCTP_M_STRMO);
1064	if (asoc->strmout == NULL) {
1065		/* big trouble no memory */
1066		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1067		return (ENOMEM);
1068	}
1069	for (i = 0; i < asoc->streamoutcnt; i++) {
1070		/*
1071		 * inbound side must be set to 0xffff, also NOTE when we get
1072		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1073		 * count (streamoutcnt) but first check if we sent to any of
1074		 * the upper streams that were dropped (if some were). Those
1075		 * that were dropped must be notified to the upper layer as
1076		 * failed to send.
1077		 */
1078		asoc->strmout[i].next_sequence_send = 0x0;
1079		TAILQ_INIT(&asoc->strmout[i].outqueue);
1080		asoc->strmout[i].chunks_on_queues = 0;
1081#if defined(SCTP_DETAILED_STR_STATS)
1082		for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1083			asoc->strmout[i].abandoned_sent[j] = 0;
1084			asoc->strmout[i].abandoned_unsent[j] = 0;
1085		}
1086#else
1087		asoc->strmout[i].abandoned_sent[0] = 0;
1088		asoc->strmout[i].abandoned_unsent[0] = 0;
1089#endif
1090		asoc->strmout[i].stream_no = i;
1091		asoc->strmout[i].last_msg_incomplete = 0;
1092		asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL);
1093	}
1094	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1095
1096	/* Now the mapping array */
1097	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1098	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1099	    SCTP_M_MAP);
1100	if (asoc->mapping_array == NULL) {
1101		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1102		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1103		return (ENOMEM);
1104	}
1105	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1106	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1107	    SCTP_M_MAP);
1108	if (asoc->nr_mapping_array == NULL) {
1109		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1110		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1111		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1112		return (ENOMEM);
1113	}
1114	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1115
1116	/* Now the init of the other outqueues */
1117	TAILQ_INIT(&asoc->free_chunks);
1118	TAILQ_INIT(&asoc->control_send_queue);
1119	TAILQ_INIT(&asoc->asconf_send_queue);
1120	TAILQ_INIT(&asoc->send_queue);
1121	TAILQ_INIT(&asoc->sent_queue);
1122	TAILQ_INIT(&asoc->reasmqueue);
1123	TAILQ_INIT(&asoc->resetHead);
1124	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1125	TAILQ_INIT(&asoc->asconf_queue);
1126	/* authentication fields */
1127	asoc->authinfo.random = NULL;
1128	asoc->authinfo.active_keyid = 0;
1129	asoc->authinfo.assoc_key = NULL;
1130	asoc->authinfo.assoc_keyid = 0;
1131	asoc->authinfo.recv_key = NULL;
1132	asoc->authinfo.recv_keyid = 0;
1133	LIST_INIT(&asoc->shared_keys);
1134	asoc->marked_retrans = 0;
1135	asoc->port = inp->sctp_ep.port;
1136	asoc->timoinit = 0;
1137	asoc->timodata = 0;
1138	asoc->timosack = 0;
1139	asoc->timoshutdown = 0;
1140	asoc->timoheartbeat = 0;
1141	asoc->timocookie = 0;
1142	asoc->timoshutdownack = 0;
1143	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1144	asoc->discontinuity_time = asoc->start_time;
1145	for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1146		asoc->abandoned_unsent[i] = 0;
1147		asoc->abandoned_sent[i] = 0;
1148	}
1149	/*
1150	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1151	 * freed later when the association is freed.
1152	 */
1153	return (0);
1154}
1155
1156void
1157sctp_print_mapping_array(struct sctp_association *asoc)
1158{
1159	unsigned int i, limit;
1160
1161	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1162	    asoc->mapping_array_size,
1163	    asoc->mapping_array_base_tsn,
1164	    asoc->cumulative_tsn,
1165	    asoc->highest_tsn_inside_map,
1166	    asoc->highest_tsn_inside_nr_map);
1167	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1168		if (asoc->mapping_array[limit - 1] != 0) {
1169			break;
1170		}
1171	}
1172	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1173	for (i = 0; i < limit; i++) {
1174		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1175	}
1176	if (limit % 16)
1177		SCTP_PRINTF("\n");
1178	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1179		if (asoc->nr_mapping_array[limit - 1]) {
1180			break;
1181		}
1182	}
1183	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1184	for (i = 0; i < limit; i++) {
1185		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1186	}
1187	if (limit % 16)
1188		SCTP_PRINTF("\n");
1189}
1190
1191int
1192sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1193{
1194	/* mapping array needs to grow */
1195	uint8_t *new_array1, *new_array2;
1196	uint32_t new_size;
1197
1198	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1199	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1200	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1201	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1202		/* can't get more, forget it */
1203		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1204		if (new_array1) {
1205			SCTP_FREE(new_array1, SCTP_M_MAP);
1206		}
1207		if (new_array2) {
1208			SCTP_FREE(new_array2, SCTP_M_MAP);
1209		}
1210		return (-1);
1211	}
1212	memset(new_array1, 0, new_size);
1213	memset(new_array2, 0, new_size);
1214	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1215	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1216	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1217	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1218	asoc->mapping_array = new_array1;
1219	asoc->nr_mapping_array = new_array2;
1220	asoc->mapping_array_size = new_size;
1221	return (0);
1222}
1223
1224
1225static void
1226sctp_iterator_work(struct sctp_iterator *it)
1227{
1228	int iteration_count = 0;
1229	int inp_skip = 0;
1230	int first_in = 1;
1231	struct sctp_inpcb *tinp;
1232
1233	SCTP_INP_INFO_RLOCK();
1234	SCTP_ITERATOR_LOCK();
1235	if (it->inp) {
1236		SCTP_INP_RLOCK(it->inp);
1237		SCTP_INP_DECR_REF(it->inp);
1238	}
1239	if (it->inp == NULL) {
1240		/* iterator is complete */
1241done_with_iterator:
1242		SCTP_ITERATOR_UNLOCK();
1243		SCTP_INP_INFO_RUNLOCK();
1244		if (it->function_atend != NULL) {
1245			(*it->function_atend) (it->pointer, it->val);
1246		}
1247		SCTP_FREE(it, SCTP_M_ITER);
1248		return;
1249	}
1250select_a_new_ep:
1251	if (first_in) {
1252		first_in = 0;
1253	} else {
1254		SCTP_INP_RLOCK(it->inp);
1255	}
1256	while (((it->pcb_flags) &&
1257	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1258	    ((it->pcb_features) &&
1259	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1260		/* endpoint flags or features don't match, so keep looking */
1261		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1262			SCTP_INP_RUNLOCK(it->inp);
1263			goto done_with_iterator;
1264		}
1265		tinp = it->inp;
1266		it->inp = LIST_NEXT(it->inp, sctp_list);
1267		SCTP_INP_RUNLOCK(tinp);
1268		if (it->inp == NULL) {
1269			goto done_with_iterator;
1270		}
1271		SCTP_INP_RLOCK(it->inp);
1272	}
1273	/* now go through each assoc which is in the desired state */
1274	if (it->done_current_ep == 0) {
1275		if (it->function_inp != NULL)
1276			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1277		it->done_current_ep = 1;
1278	}
1279	if (it->stcb == NULL) {
1280		/* run the per instance function */
1281		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1282	}
1283	if ((inp_skip) || it->stcb == NULL) {
1284		if (it->function_inp_end != NULL) {
1285			inp_skip = (*it->function_inp_end) (it->inp,
1286			    it->pointer,
1287			    it->val);
1288		}
1289		SCTP_INP_RUNLOCK(it->inp);
1290		goto no_stcb;
1291	}
1292	while (it->stcb) {
1293		SCTP_TCB_LOCK(it->stcb);
1294		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1295			/* not in the right state... keep looking */
1296			SCTP_TCB_UNLOCK(it->stcb);
1297			goto next_assoc;
1298		}
1299		/* see if we have limited out the iterator loop */
1300		iteration_count++;
1301		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1302			/* Pause to let others grab the lock */
1303			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1304			SCTP_TCB_UNLOCK(it->stcb);
1305			SCTP_INP_INCR_REF(it->inp);
1306			SCTP_INP_RUNLOCK(it->inp);
1307			SCTP_ITERATOR_UNLOCK();
1308			SCTP_INP_INFO_RUNLOCK();
1309			SCTP_INP_INFO_RLOCK();
1310			SCTP_ITERATOR_LOCK();
1311			if (sctp_it_ctl.iterator_flags) {
1312				/* We won't be staying here */
1313				SCTP_INP_DECR_REF(it->inp);
1314				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1315				if (sctp_it_ctl.iterator_flags &
1316				    SCTP_ITERATOR_STOP_CUR_IT) {
1317					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1318					goto done_with_iterator;
1319				}
1320				if (sctp_it_ctl.iterator_flags &
1321				    SCTP_ITERATOR_STOP_CUR_INP) {
1322					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1323					goto no_stcb;
1324				}
1325				/* If we reach here huh? */
1326				SCTP_PRINTF("Unknown it ctl flag %x\n",
1327				    sctp_it_ctl.iterator_flags);
1328				sctp_it_ctl.iterator_flags = 0;
1329			}
1330			SCTP_INP_RLOCK(it->inp);
1331			SCTP_INP_DECR_REF(it->inp);
1332			SCTP_TCB_LOCK(it->stcb);
1333			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1334			iteration_count = 0;
1335		}
1336		/* run function on this one */
1337		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1338
1339		/*
1340		 * we lie here, it really needs to have its own type but
1341		 * first I must verify that this won't effect things :-0
1342		 */
1343		if (it->no_chunk_output == 0)
1344			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1345
1346		SCTP_TCB_UNLOCK(it->stcb);
1347next_assoc:
1348		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1349		if (it->stcb == NULL) {
1350			/* Run last function */
1351			if (it->function_inp_end != NULL) {
1352				inp_skip = (*it->function_inp_end) (it->inp,
1353				    it->pointer,
1354				    it->val);
1355			}
1356		}
1357	}
1358	SCTP_INP_RUNLOCK(it->inp);
1359no_stcb:
1360	/* done with all assocs on this endpoint, move on to next endpoint */
1361	it->done_current_ep = 0;
1362	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1363		it->inp = NULL;
1364	} else {
1365		it->inp = LIST_NEXT(it->inp, sctp_list);
1366	}
1367	if (it->inp == NULL) {
1368		goto done_with_iterator;
1369	}
1370	goto select_a_new_ep;
1371}
1372
1373void
1374sctp_iterator_worker(void)
1375{
1376	struct sctp_iterator *it, *nit;
1377
1378	/* This function is called with the WQ lock in place */
1379
1380	sctp_it_ctl.iterator_running = 1;
1381	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1382		sctp_it_ctl.cur_it = it;
1383		/* now lets work on this one */
1384		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1385		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1386		CURVNET_SET(it->vn);
1387		sctp_iterator_work(it);
1388		sctp_it_ctl.cur_it = NULL;
1389		CURVNET_RESTORE();
1390		SCTP_IPI_ITERATOR_WQ_LOCK();
1391		/* sa_ignore FREED_MEMORY */
1392	}
1393	sctp_it_ctl.iterator_running = 0;
1394	return;
1395}
1396
1397
1398static void
1399sctp_handle_addr_wq(void)
1400{
1401	/* deal with the ADDR wq from the rtsock calls */
1402	struct sctp_laddr *wi, *nwi;
1403	struct sctp_asconf_iterator *asc;
1404
1405	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1406	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1407	if (asc == NULL) {
1408		/* Try later, no memory */
1409		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1410		    (struct sctp_inpcb *)NULL,
1411		    (struct sctp_tcb *)NULL,
1412		    (struct sctp_nets *)NULL);
1413		return;
1414	}
1415	LIST_INIT(&asc->list_of_work);
1416	asc->cnt = 0;
1417
1418	SCTP_WQ_ADDR_LOCK();
1419	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1420		LIST_REMOVE(wi, sctp_nxt_addr);
1421		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1422		asc->cnt++;
1423	}
1424	SCTP_WQ_ADDR_UNLOCK();
1425
1426	if (asc->cnt == 0) {
1427		SCTP_FREE(asc, SCTP_M_ASC_IT);
1428	} else {
1429		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1430		    sctp_asconf_iterator_stcb,
1431		    NULL,	/* No ep end for boundall */
1432		    SCTP_PCB_FLAGS_BOUNDALL,
1433		    SCTP_PCB_ANY_FEATURES,
1434		    SCTP_ASOC_ANY_STATE,
1435		    (void *)asc, 0,
1436		    sctp_asconf_iterator_end, NULL, 0);
1437	}
1438}
1439
1440void
1441sctp_timeout_handler(void *t)
1442{
1443	struct sctp_inpcb *inp;
1444	struct sctp_tcb *stcb;
1445	struct sctp_nets *net;
1446	struct sctp_timer *tmr;
1447
1448#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1449	struct socket *so;
1450
1451#endif
1452	int did_output;
1453
1454	tmr = (struct sctp_timer *)t;
1455	inp = (struct sctp_inpcb *)tmr->ep;
1456	stcb = (struct sctp_tcb *)tmr->tcb;
1457	net = (struct sctp_nets *)tmr->net;
1458	CURVNET_SET((struct vnet *)tmr->vnet);
1459	did_output = 1;
1460
1461#ifdef SCTP_AUDITING_ENABLED
1462	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1463	sctp_auditing(3, inp, stcb, net);
1464#endif
1465
1466	/* sanity checks... */
1467	if (tmr->self != (void *)tmr) {
1468		/*
1469		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1470		 * (void *)tmr);
1471		 */
1472		CURVNET_RESTORE();
1473		return;
1474	}
1475	tmr->stopped_from = 0xa001;
1476	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1477		/*
1478		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1479		 * tmr->type);
1480		 */
1481		CURVNET_RESTORE();
1482		return;
1483	}
1484	tmr->stopped_from = 0xa002;
1485	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1486		CURVNET_RESTORE();
1487		return;
1488	}
1489	/* if this is an iterator timeout, get the struct and clear inp */
1490	tmr->stopped_from = 0xa003;
1491	if (inp) {
1492		SCTP_INP_INCR_REF(inp);
1493		if ((inp->sctp_socket == NULL) &&
1494		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1495		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1496		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1497		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1498		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1499		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1500		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1501		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1502		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1503		    ) {
1504			SCTP_INP_DECR_REF(inp);
1505			CURVNET_RESTORE();
1506			return;
1507		}
1508	}
1509	tmr->stopped_from = 0xa004;
1510	if (stcb) {
1511		atomic_add_int(&stcb->asoc.refcnt, 1);
1512		if (stcb->asoc.state == 0) {
1513			atomic_add_int(&stcb->asoc.refcnt, -1);
1514			if (inp) {
1515				SCTP_INP_DECR_REF(inp);
1516			}
1517			CURVNET_RESTORE();
1518			return;
1519		}
1520	}
1521	tmr->stopped_from = 0xa005;
1522	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1523	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1524		if (inp) {
1525			SCTP_INP_DECR_REF(inp);
1526		}
1527		if (stcb) {
1528			atomic_add_int(&stcb->asoc.refcnt, -1);
1529		}
1530		CURVNET_RESTORE();
1531		return;
1532	}
1533	tmr->stopped_from = 0xa006;
1534
1535	if (stcb) {
1536		SCTP_TCB_LOCK(stcb);
1537		atomic_add_int(&stcb->asoc.refcnt, -1);
1538		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1539		    ((stcb->asoc.state == 0) ||
1540		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1541			SCTP_TCB_UNLOCK(stcb);
1542			if (inp) {
1543				SCTP_INP_DECR_REF(inp);
1544			}
1545			CURVNET_RESTORE();
1546			return;
1547		}
1548	}
1549	/* record in stopped what t-o occured */
1550	tmr->stopped_from = tmr->type;
1551
1552	/* mark as being serviced now */
1553	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1554		/*
1555		 * Callout has been rescheduled.
1556		 */
1557		goto get_out;
1558	}
1559	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1560		/*
1561		 * Not active, so no action.
1562		 */
1563		goto get_out;
1564	}
1565	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1566
1567	/* call the handler for the appropriate timer type */
1568	switch (tmr->type) {
1569	case SCTP_TIMER_TYPE_ZERO_COPY:
1570		if (inp == NULL) {
1571			break;
1572		}
1573		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1574			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1575		}
1576		break;
1577	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1578		if (inp == NULL) {
1579			break;
1580		}
1581		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1582			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1583		}
1584		break;
1585	case SCTP_TIMER_TYPE_ADDR_WQ:
1586		sctp_handle_addr_wq();
1587		break;
1588	case SCTP_TIMER_TYPE_SEND:
1589		if ((stcb == NULL) || (inp == NULL)) {
1590			break;
1591		}
1592		SCTP_STAT_INCR(sctps_timodata);
1593		stcb->asoc.timodata++;
1594		stcb->asoc.num_send_timers_up--;
1595		if (stcb->asoc.num_send_timers_up < 0) {
1596			stcb->asoc.num_send_timers_up = 0;
1597		}
1598		SCTP_TCB_LOCK_ASSERT(stcb);
1599		if (sctp_t3rxt_timer(inp, stcb, net)) {
1600			/* no need to unlock on tcb its gone */
1601
1602			goto out_decr;
1603		}
1604		SCTP_TCB_LOCK_ASSERT(stcb);
1605#ifdef SCTP_AUDITING_ENABLED
1606		sctp_auditing(4, inp, stcb, net);
1607#endif
1608		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1609		if ((stcb->asoc.num_send_timers_up == 0) &&
1610		    (stcb->asoc.sent_queue_cnt > 0)) {
1611			struct sctp_tmit_chunk *chk;
1612
1613			/*
1614			 * safeguard. If there on some on the sent queue
1615			 * somewhere but no timers running something is
1616			 * wrong... so we start a timer on the first chunk
1617			 * on the send queue on whatever net it is sent to.
1618			 */
1619			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1620			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1621			    chk->whoTo);
1622		}
1623		break;
1624	case SCTP_TIMER_TYPE_INIT:
1625		if ((stcb == NULL) || (inp == NULL)) {
1626			break;
1627		}
1628		SCTP_STAT_INCR(sctps_timoinit);
1629		stcb->asoc.timoinit++;
1630		if (sctp_t1init_timer(inp, stcb, net)) {
1631			/* no need to unlock on tcb its gone */
1632			goto out_decr;
1633		}
1634		/* We do output but not here */
1635		did_output = 0;
1636		break;
1637	case SCTP_TIMER_TYPE_RECV:
1638		if ((stcb == NULL) || (inp == NULL)) {
1639			break;
1640		}
1641		SCTP_STAT_INCR(sctps_timosack);
1642		stcb->asoc.timosack++;
1643		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1644#ifdef SCTP_AUDITING_ENABLED
1645		sctp_auditing(4, inp, stcb, net);
1646#endif
1647		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1648		break;
1649	case SCTP_TIMER_TYPE_SHUTDOWN:
1650		if ((stcb == NULL) || (inp == NULL)) {
1651			break;
1652		}
1653		if (sctp_shutdown_timer(inp, stcb, net)) {
1654			/* no need to unlock on tcb its gone */
1655			goto out_decr;
1656		}
1657		SCTP_STAT_INCR(sctps_timoshutdown);
1658		stcb->asoc.timoshutdown++;
1659#ifdef SCTP_AUDITING_ENABLED
1660		sctp_auditing(4, inp, stcb, net);
1661#endif
1662		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1663		break;
1664	case SCTP_TIMER_TYPE_HEARTBEAT:
1665		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1666			break;
1667		}
1668		SCTP_STAT_INCR(sctps_timoheartbeat);
1669		stcb->asoc.timoheartbeat++;
1670		if (sctp_heartbeat_timer(inp, stcb, net)) {
1671			/* no need to unlock on tcb its gone */
1672			goto out_decr;
1673		}
1674#ifdef SCTP_AUDITING_ENABLED
1675		sctp_auditing(4, inp, stcb, net);
1676#endif
1677		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1678			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1679			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1680		}
1681		break;
1682	case SCTP_TIMER_TYPE_COOKIE:
1683		if ((stcb == NULL) || (inp == NULL)) {
1684			break;
1685		}
1686		if (sctp_cookie_timer(inp, stcb, net)) {
1687			/* no need to unlock on tcb its gone */
1688			goto out_decr;
1689		}
1690		SCTP_STAT_INCR(sctps_timocookie);
1691		stcb->asoc.timocookie++;
1692#ifdef SCTP_AUDITING_ENABLED
1693		sctp_auditing(4, inp, stcb, net);
1694#endif
1695		/*
1696		 * We consider T3 and Cookie timer pretty much the same with
1697		 * respect to where from in chunk_output.
1698		 */
1699		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1700		break;
1701	case SCTP_TIMER_TYPE_NEWCOOKIE:
1702		{
1703			struct timeval tv;
1704			int i, secret;
1705
1706			if (inp == NULL) {
1707				break;
1708			}
1709			SCTP_STAT_INCR(sctps_timosecret);
1710			(void)SCTP_GETTIME_TIMEVAL(&tv);
1711			SCTP_INP_WLOCK(inp);
1712			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1713			inp->sctp_ep.last_secret_number =
1714			    inp->sctp_ep.current_secret_number;
1715			inp->sctp_ep.current_secret_number++;
1716			if (inp->sctp_ep.current_secret_number >=
1717			    SCTP_HOW_MANY_SECRETS) {
1718				inp->sctp_ep.current_secret_number = 0;
1719			}
1720			secret = (int)inp->sctp_ep.current_secret_number;
1721			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1722				inp->sctp_ep.secret_key[secret][i] =
1723				    sctp_select_initial_TSN(&inp->sctp_ep);
1724			}
1725			SCTP_INP_WUNLOCK(inp);
1726			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1727		}
1728		did_output = 0;
1729		break;
1730	case SCTP_TIMER_TYPE_PATHMTURAISE:
1731		if ((stcb == NULL) || (inp == NULL)) {
1732			break;
1733		}
1734		SCTP_STAT_INCR(sctps_timopathmtu);
1735		sctp_pathmtu_timer(inp, stcb, net);
1736		did_output = 0;
1737		break;
1738	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1739		if ((stcb == NULL) || (inp == NULL)) {
1740			break;
1741		}
1742		if (sctp_shutdownack_timer(inp, stcb, net)) {
1743			/* no need to unlock on tcb its gone */
1744			goto out_decr;
1745		}
1746		SCTP_STAT_INCR(sctps_timoshutdownack);
1747		stcb->asoc.timoshutdownack++;
1748#ifdef SCTP_AUDITING_ENABLED
1749		sctp_auditing(4, inp, stcb, net);
1750#endif
1751		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1752		break;
1753	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1754		if ((stcb == NULL) || (inp == NULL)) {
1755			break;
1756		}
1757		SCTP_STAT_INCR(sctps_timoshutdownguard);
1758		sctp_abort_an_association(inp, stcb, NULL, SCTP_SO_NOT_LOCKED);
1759		/* no need to unlock on tcb its gone */
1760		goto out_decr;
1761
1762	case SCTP_TIMER_TYPE_STRRESET:
1763		if ((stcb == NULL) || (inp == NULL)) {
1764			break;
1765		}
1766		if (sctp_strreset_timer(inp, stcb, net)) {
1767			/* no need to unlock on tcb its gone */
1768			goto out_decr;
1769		}
1770		SCTP_STAT_INCR(sctps_timostrmrst);
1771		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1772		break;
1773	case SCTP_TIMER_TYPE_ASCONF:
1774		if ((stcb == NULL) || (inp == NULL)) {
1775			break;
1776		}
1777		if (sctp_asconf_timer(inp, stcb, net)) {
1778			/* no need to unlock on tcb its gone */
1779			goto out_decr;
1780		}
1781		SCTP_STAT_INCR(sctps_timoasconf);
1782#ifdef SCTP_AUDITING_ENABLED
1783		sctp_auditing(4, inp, stcb, net);
1784#endif
1785		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1786		break;
1787	case SCTP_TIMER_TYPE_PRIM_DELETED:
1788		if ((stcb == NULL) || (inp == NULL)) {
1789			break;
1790		}
1791		sctp_delete_prim_timer(inp, stcb, net);
1792		SCTP_STAT_INCR(sctps_timodelprim);
1793		break;
1794
1795	case SCTP_TIMER_TYPE_AUTOCLOSE:
1796		if ((stcb == NULL) || (inp == NULL)) {
1797			break;
1798		}
1799		SCTP_STAT_INCR(sctps_timoautoclose);
1800		sctp_autoclose_timer(inp, stcb, net);
1801		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1802		did_output = 0;
1803		break;
1804	case SCTP_TIMER_TYPE_ASOCKILL:
1805		if ((stcb == NULL) || (inp == NULL)) {
1806			break;
1807		}
1808		SCTP_STAT_INCR(sctps_timoassockill);
1809		/* Can we free it yet? */
1810		SCTP_INP_DECR_REF(inp);
1811		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1812#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1813		so = SCTP_INP_SO(inp);
1814		atomic_add_int(&stcb->asoc.refcnt, 1);
1815		SCTP_TCB_UNLOCK(stcb);
1816		SCTP_SOCKET_LOCK(so, 1);
1817		SCTP_TCB_LOCK(stcb);
1818		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1819#endif
1820		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1821#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1822		SCTP_SOCKET_UNLOCK(so, 1);
1823#endif
1824		/*
1825		 * free asoc, always unlocks (or destroy's) so prevent
1826		 * duplicate unlock or unlock of a free mtx :-0
1827		 */
1828		stcb = NULL;
1829		goto out_no_decr;
1830	case SCTP_TIMER_TYPE_INPKILL:
1831		SCTP_STAT_INCR(sctps_timoinpkill);
1832		if (inp == NULL) {
1833			break;
1834		}
1835		/*
1836		 * special case, take away our increment since WE are the
1837		 * killer
1838		 */
1839		SCTP_INP_DECR_REF(inp);
1840		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1841		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1842		    SCTP_CALLED_FROM_INPKILL_TIMER);
1843		inp = NULL;
1844		goto out_no_decr;
1845	default:
1846		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1847		    tmr->type);
1848		break;
1849	}
1850#ifdef SCTP_AUDITING_ENABLED
1851	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1852	if (inp)
1853		sctp_auditing(5, inp, stcb, net);
1854#endif
1855	if ((did_output) && stcb) {
1856		/*
1857		 * Now we need to clean up the control chunk chain if an
1858		 * ECNE is on it. It must be marked as UNSENT again so next
1859		 * call will continue to send it until such time that we get
1860		 * a CWR, to remove it. It is, however, less likely that we
1861		 * will find a ecn echo on the chain though.
1862		 */
1863		sctp_fix_ecn_echo(&stcb->asoc);
1864	}
1865get_out:
1866	if (stcb) {
1867		SCTP_TCB_UNLOCK(stcb);
1868	}
1869out_decr:
1870	if (inp) {
1871		SCTP_INP_DECR_REF(inp);
1872	}
1873out_no_decr:
1874	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1875	    tmr->type);
1876	CURVNET_RESTORE();
1877}
1878
1879void
1880sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1881    struct sctp_nets *net)
1882{
1883	uint32_t to_ticks;
1884	struct sctp_timer *tmr;
1885
1886	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1887		return;
1888
1889	tmr = NULL;
1890	if (stcb) {
1891		SCTP_TCB_LOCK_ASSERT(stcb);
1892	}
1893	switch (t_type) {
1894	case SCTP_TIMER_TYPE_ZERO_COPY:
1895		tmr = &inp->sctp_ep.zero_copy_timer;
1896		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1897		break;
1898	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1899		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1900		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1901		break;
1902	case SCTP_TIMER_TYPE_ADDR_WQ:
1903		/* Only 1 tick away :-) */
1904		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1905		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1906		break;
1907	case SCTP_TIMER_TYPE_SEND:
1908		/* Here we use the RTO timer */
1909		{
1910			int rto_val;
1911
1912			if ((stcb == NULL) || (net == NULL)) {
1913				return;
1914			}
1915			tmr = &net->rxt_timer;
1916			if (net->RTO == 0) {
1917				rto_val = stcb->asoc.initial_rto;
1918			} else {
1919				rto_val = net->RTO;
1920			}
1921			to_ticks = MSEC_TO_TICKS(rto_val);
1922		}
1923		break;
1924	case SCTP_TIMER_TYPE_INIT:
1925		/*
1926		 * Here we use the INIT timer default usually about 1
1927		 * minute.
1928		 */
1929		if ((stcb == NULL) || (net == NULL)) {
1930			return;
1931		}
1932		tmr = &net->rxt_timer;
1933		if (net->RTO == 0) {
1934			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1935		} else {
1936			to_ticks = MSEC_TO_TICKS(net->RTO);
1937		}
1938		break;
1939	case SCTP_TIMER_TYPE_RECV:
1940		/*
1941		 * Here we use the Delayed-Ack timer value from the inp
1942		 * ususually about 200ms.
1943		 */
1944		if (stcb == NULL) {
1945			return;
1946		}
1947		tmr = &stcb->asoc.dack_timer;
1948		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
1949		break;
1950	case SCTP_TIMER_TYPE_SHUTDOWN:
1951		/* Here we use the RTO of the destination. */
1952		if ((stcb == NULL) || (net == NULL)) {
1953			return;
1954		}
1955		if (net->RTO == 0) {
1956			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1957		} else {
1958			to_ticks = MSEC_TO_TICKS(net->RTO);
1959		}
1960		tmr = &net->rxt_timer;
1961		break;
1962	case SCTP_TIMER_TYPE_HEARTBEAT:
1963		/*
1964		 * the net is used here so that we can add in the RTO. Even
1965		 * though we use a different timer. We also add the HB timer
1966		 * PLUS a random jitter.
1967		 */
1968		if ((stcb == NULL) || (net == NULL)) {
1969			return;
1970		} else {
1971			uint32_t rndval;
1972			uint32_t jitter;
1973
1974			if ((net->dest_state & SCTP_ADDR_NOHB) &&
1975			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
1976				return;
1977			}
1978			if (net->RTO == 0) {
1979				to_ticks = stcb->asoc.initial_rto;
1980			} else {
1981				to_ticks = net->RTO;
1982			}
1983			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
1984			jitter = rndval % to_ticks;
1985			if (jitter >= (to_ticks >> 1)) {
1986				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
1987			} else {
1988				to_ticks = to_ticks - jitter;
1989			}
1990			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1991			    !(net->dest_state & SCTP_ADDR_PF)) {
1992				to_ticks += net->heart_beat_delay;
1993			}
1994			/*
1995			 * Now we must convert the to_ticks that are now in
1996			 * ms to ticks.
1997			 */
1998			to_ticks = MSEC_TO_TICKS(to_ticks);
1999			tmr = &net->hb_timer;
2000		}
2001		break;
2002	case SCTP_TIMER_TYPE_COOKIE:
2003		/*
2004		 * Here we can use the RTO timer from the network since one
2005		 * RTT was compelete. If a retran happened then we will be
2006		 * using the RTO initial value.
2007		 */
2008		if ((stcb == NULL) || (net == NULL)) {
2009			return;
2010		}
2011		if (net->RTO == 0) {
2012			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2013		} else {
2014			to_ticks = MSEC_TO_TICKS(net->RTO);
2015		}
2016		tmr = &net->rxt_timer;
2017		break;
2018	case SCTP_TIMER_TYPE_NEWCOOKIE:
2019		/*
2020		 * nothing needed but the endpoint here ususually about 60
2021		 * minutes.
2022		 */
2023		tmr = &inp->sctp_ep.signature_change;
2024		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2025		break;
2026	case SCTP_TIMER_TYPE_ASOCKILL:
2027		if (stcb == NULL) {
2028			return;
2029		}
2030		tmr = &stcb->asoc.strreset_timer;
2031		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2032		break;
2033	case SCTP_TIMER_TYPE_INPKILL:
2034		/*
2035		 * The inp is setup to die. We re-use the signature_chage
2036		 * timer since that has stopped and we are in the GONE
2037		 * state.
2038		 */
2039		tmr = &inp->sctp_ep.signature_change;
2040		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2041		break;
2042	case SCTP_TIMER_TYPE_PATHMTURAISE:
2043		/*
2044		 * Here we use the value found in the EP for PMTU ususually
2045		 * about 10 minutes.
2046		 */
2047		if ((stcb == NULL) || (net == NULL)) {
2048			return;
2049		}
2050		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2051			return;
2052		}
2053		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2054		tmr = &net->pmtu_timer;
2055		break;
2056	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2057		/* Here we use the RTO of the destination */
2058		if ((stcb == NULL) || (net == NULL)) {
2059			return;
2060		}
2061		if (net->RTO == 0) {
2062			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2063		} else {
2064			to_ticks = MSEC_TO_TICKS(net->RTO);
2065		}
2066		tmr = &net->rxt_timer;
2067		break;
2068	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2069		/*
2070		 * Here we use the endpoints shutdown guard timer usually
2071		 * about 3 minutes.
2072		 */
2073		if (stcb == NULL) {
2074			return;
2075		}
2076		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2077		tmr = &stcb->asoc.shut_guard_timer;
2078		break;
2079	case SCTP_TIMER_TYPE_STRRESET:
2080		/*
2081		 * Here the timer comes from the stcb but its value is from
2082		 * the net's RTO.
2083		 */
2084		if ((stcb == NULL) || (net == NULL)) {
2085			return;
2086		}
2087		if (net->RTO == 0) {
2088			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2089		} else {
2090			to_ticks = MSEC_TO_TICKS(net->RTO);
2091		}
2092		tmr = &stcb->asoc.strreset_timer;
2093		break;
2094	case SCTP_TIMER_TYPE_ASCONF:
2095		/*
2096		 * Here the timer comes from the stcb but its value is from
2097		 * the net's RTO.
2098		 */
2099		if ((stcb == NULL) || (net == NULL)) {
2100			return;
2101		}
2102		if (net->RTO == 0) {
2103			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2104		} else {
2105			to_ticks = MSEC_TO_TICKS(net->RTO);
2106		}
2107		tmr = &stcb->asoc.asconf_timer;
2108		break;
2109	case SCTP_TIMER_TYPE_PRIM_DELETED:
2110		if ((stcb == NULL) || (net != NULL)) {
2111			return;
2112		}
2113		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2114		tmr = &stcb->asoc.delete_prim_timer;
2115		break;
2116	case SCTP_TIMER_TYPE_AUTOCLOSE:
2117		if (stcb == NULL) {
2118			return;
2119		}
2120		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2121			/*
2122			 * Really an error since stcb is NOT set to
2123			 * autoclose
2124			 */
2125			return;
2126		}
2127		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2128		tmr = &stcb->asoc.autoclose_timer;
2129		break;
2130	default:
2131		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2132		    __FUNCTION__, t_type);
2133		return;
2134		break;
2135	}
2136	if ((to_ticks <= 0) || (tmr == NULL)) {
2137		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2138		    __FUNCTION__, t_type, to_ticks, (void *)tmr);
2139		return;
2140	}
2141	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2142		/*
2143		 * we do NOT allow you to have it already running. if it is
2144		 * we leave the current one up unchanged
2145		 */
2146		return;
2147	}
2148	/* At this point we can proceed */
2149	if (t_type == SCTP_TIMER_TYPE_SEND) {
2150		stcb->asoc.num_send_timers_up++;
2151	}
2152	tmr->stopped_from = 0;
2153	tmr->type = t_type;
2154	tmr->ep = (void *)inp;
2155	tmr->tcb = (void *)stcb;
2156	tmr->net = (void *)net;
2157	tmr->self = (void *)tmr;
2158	tmr->vnet = (void *)curvnet;
2159	tmr->ticks = sctp_get_tick_count();
2160	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2161	return;
2162}
2163
2164void
2165sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2166    struct sctp_nets *net, uint32_t from)
2167{
2168	struct sctp_timer *tmr;
2169
2170	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2171	    (inp == NULL))
2172		return;
2173
2174	tmr = NULL;
2175	if (stcb) {
2176		SCTP_TCB_LOCK_ASSERT(stcb);
2177	}
2178	switch (t_type) {
2179	case SCTP_TIMER_TYPE_ZERO_COPY:
2180		tmr = &inp->sctp_ep.zero_copy_timer;
2181		break;
2182	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2183		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2184		break;
2185	case SCTP_TIMER_TYPE_ADDR_WQ:
2186		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2187		break;
2188	case SCTP_TIMER_TYPE_SEND:
2189		if ((stcb == NULL) || (net == NULL)) {
2190			return;
2191		}
2192		tmr = &net->rxt_timer;
2193		break;
2194	case SCTP_TIMER_TYPE_INIT:
2195		if ((stcb == NULL) || (net == NULL)) {
2196			return;
2197		}
2198		tmr = &net->rxt_timer;
2199		break;
2200	case SCTP_TIMER_TYPE_RECV:
2201		if (stcb == NULL) {
2202			return;
2203		}
2204		tmr = &stcb->asoc.dack_timer;
2205		break;
2206	case SCTP_TIMER_TYPE_SHUTDOWN:
2207		if ((stcb == NULL) || (net == NULL)) {
2208			return;
2209		}
2210		tmr = &net->rxt_timer;
2211		break;
2212	case SCTP_TIMER_TYPE_HEARTBEAT:
2213		if ((stcb == NULL) || (net == NULL)) {
2214			return;
2215		}
2216		tmr = &net->hb_timer;
2217		break;
2218	case SCTP_TIMER_TYPE_COOKIE:
2219		if ((stcb == NULL) || (net == NULL)) {
2220			return;
2221		}
2222		tmr = &net->rxt_timer;
2223		break;
2224	case SCTP_TIMER_TYPE_NEWCOOKIE:
2225		/* nothing needed but the endpoint here */
2226		tmr = &inp->sctp_ep.signature_change;
2227		/*
2228		 * We re-use the newcookie timer for the INP kill timer. We
2229		 * must assure that we do not kill it by accident.
2230		 */
2231		break;
2232	case SCTP_TIMER_TYPE_ASOCKILL:
2233		/*
2234		 * Stop the asoc kill timer.
2235		 */
2236		if (stcb == NULL) {
2237			return;
2238		}
2239		tmr = &stcb->asoc.strreset_timer;
2240		break;
2241
2242	case SCTP_TIMER_TYPE_INPKILL:
2243		/*
2244		 * The inp is setup to die. We re-use the signature_chage
2245		 * timer since that has stopped and we are in the GONE
2246		 * state.
2247		 */
2248		tmr = &inp->sctp_ep.signature_change;
2249		break;
2250	case SCTP_TIMER_TYPE_PATHMTURAISE:
2251		if ((stcb == NULL) || (net == NULL)) {
2252			return;
2253		}
2254		tmr = &net->pmtu_timer;
2255		break;
2256	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2257		if ((stcb == NULL) || (net == NULL)) {
2258			return;
2259		}
2260		tmr = &net->rxt_timer;
2261		break;
2262	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2263		if (stcb == NULL) {
2264			return;
2265		}
2266		tmr = &stcb->asoc.shut_guard_timer;
2267		break;
2268	case SCTP_TIMER_TYPE_STRRESET:
2269		if (stcb == NULL) {
2270			return;
2271		}
2272		tmr = &stcb->asoc.strreset_timer;
2273		break;
2274	case SCTP_TIMER_TYPE_ASCONF:
2275		if (stcb == NULL) {
2276			return;
2277		}
2278		tmr = &stcb->asoc.asconf_timer;
2279		break;
2280	case SCTP_TIMER_TYPE_PRIM_DELETED:
2281		if (stcb == NULL) {
2282			return;
2283		}
2284		tmr = &stcb->asoc.delete_prim_timer;
2285		break;
2286	case SCTP_TIMER_TYPE_AUTOCLOSE:
2287		if (stcb == NULL) {
2288			return;
2289		}
2290		tmr = &stcb->asoc.autoclose_timer;
2291		break;
2292	default:
2293		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2294		    __FUNCTION__, t_type);
2295		break;
2296	}
2297	if (tmr == NULL) {
2298		return;
2299	}
2300	if ((tmr->type != t_type) && tmr->type) {
2301		/*
2302		 * Ok we have a timer that is under joint use. Cookie timer
2303		 * per chance with the SEND timer. We therefore are NOT
2304		 * running the timer that the caller wants stopped.  So just
2305		 * return.
2306		 */
2307		return;
2308	}
2309	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2310		stcb->asoc.num_send_timers_up--;
2311		if (stcb->asoc.num_send_timers_up < 0) {
2312			stcb->asoc.num_send_timers_up = 0;
2313		}
2314	}
2315	tmr->self = NULL;
2316	tmr->stopped_from = from;
2317	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2318	return;
2319}
2320
2321uint32_t
2322sctp_calculate_len(struct mbuf *m)
2323{
2324	uint32_t tlen = 0;
2325	struct mbuf *at;
2326
2327	at = m;
2328	while (at) {
2329		tlen += SCTP_BUF_LEN(at);
2330		at = SCTP_BUF_NEXT(at);
2331	}
2332	return (tlen);
2333}
2334
2335void
2336sctp_mtu_size_reset(struct sctp_inpcb *inp,
2337    struct sctp_association *asoc, uint32_t mtu)
2338{
2339	/*
2340	 * Reset the P-MTU size on this association, this involves changing
2341	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2342	 * allow the DF flag to be cleared.
2343	 */
2344	struct sctp_tmit_chunk *chk;
2345	unsigned int eff_mtu, ovh;
2346
2347	asoc->smallest_mtu = mtu;
2348	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2349		ovh = SCTP_MIN_OVERHEAD;
2350	} else {
2351		ovh = SCTP_MIN_V4_OVERHEAD;
2352	}
2353	eff_mtu = mtu - ovh;
2354	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2355		if (chk->send_size > eff_mtu) {
2356			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2357		}
2358	}
2359	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2360		if (chk->send_size > eff_mtu) {
2361			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2362		}
2363	}
2364}
2365
2366
2367/*
2368 * given an association and starting time of the current RTT period return
2369 * RTO in number of msecs net should point to the current network
2370 */
2371
2372uint32_t
2373sctp_calculate_rto(struct sctp_tcb *stcb,
2374    struct sctp_association *asoc,
2375    struct sctp_nets *net,
2376    struct timeval *told,
2377    int safe, int rtt_from_sack)
2378{
2379	/*-
2380	 * given an association and the starting time of the current RTT
2381	 * period (in value1/value2) return RTO in number of msecs.
2382	 */
2383	int32_t rtt;		/* RTT in ms */
2384	uint32_t new_rto;
2385	int first_measure = 0;
2386	struct timeval now, then, *old;
2387
2388	/* Copy it out for sparc64 */
2389	if (safe == sctp_align_unsafe_makecopy) {
2390		old = &then;
2391		memcpy(&then, told, sizeof(struct timeval));
2392	} else if (safe == sctp_align_safe_nocopy) {
2393		old = told;
2394	} else {
2395		/* error */
2396		SCTP_PRINTF("Huh, bad rto calc call\n");
2397		return (0);
2398	}
2399	/************************/
2400	/* 1. calculate new RTT */
2401	/************************/
2402	/* get the current time */
2403	if (stcb->asoc.use_precise_time) {
2404		(void)SCTP_GETPTIME_TIMEVAL(&now);
2405	} else {
2406		(void)SCTP_GETTIME_TIMEVAL(&now);
2407	}
2408	timevalsub(&now, old);
2409	/* store the current RTT in us */
2410	net->rtt = (uint64_t) 1000000 *(uint64_t) now.tv_sec +
2411	        (uint64_t) now.tv_usec;
2412
2413	/* compute rtt in ms */
2414	rtt = (int32_t) (net->rtt / 1000);
2415	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2416		/*
2417		 * Tell the CC module that a new update has just occurred
2418		 * from a sack
2419		 */
2420		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2421	}
2422	/*
2423	 * Do we need to determine the lan? We do this only on sacks i.e.
2424	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2425	 */
2426	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2427	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2428		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2429			net->lan_type = SCTP_LAN_INTERNET;
2430		} else {
2431			net->lan_type = SCTP_LAN_LOCAL;
2432		}
2433	}
2434	/***************************/
2435	/* 2. update RTTVAR & SRTT */
2436	/***************************/
2437	/*-
2438	 * Compute the scaled average lastsa and the
2439	 * scaled variance lastsv as described in van Jacobson
2440	 * Paper "Congestion Avoidance and Control", Annex A.
2441	 *
2442	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2443	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2444	 */
2445	if (net->RTO_measured) {
2446		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2447		net->lastsa += rtt;
2448		if (rtt < 0) {
2449			rtt = -rtt;
2450		}
2451		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2452		net->lastsv += rtt;
2453		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2454			rto_logging(net, SCTP_LOG_RTTVAR);
2455		}
2456	} else {
2457		/* First RTO measurment */
2458		net->RTO_measured = 1;
2459		first_measure = 1;
2460		net->lastsa = rtt << SCTP_RTT_SHIFT;
2461		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2462		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2463			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2464		}
2465	}
2466	if (net->lastsv == 0) {
2467		net->lastsv = SCTP_CLOCK_GRANULARITY;
2468	}
2469	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2470	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2471	    (stcb->asoc.sat_network_lockout == 0)) {
2472		stcb->asoc.sat_network = 1;
2473	} else if ((!first_measure) && stcb->asoc.sat_network) {
2474		stcb->asoc.sat_network = 0;
2475		stcb->asoc.sat_network_lockout = 1;
2476	}
2477	/* bound it, per C6/C7 in Section 5.3.1 */
2478	if (new_rto < stcb->asoc.minrto) {
2479		new_rto = stcb->asoc.minrto;
2480	}
2481	if (new_rto > stcb->asoc.maxrto) {
2482		new_rto = stcb->asoc.maxrto;
2483	}
2484	/* we are now returning the RTO */
2485	return (new_rto);
2486}
2487
2488/*
2489 * return a pointer to a contiguous piece of data from the given mbuf chain
2490 * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2491 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2492 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2493 */
2494caddr_t
2495sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2496{
2497	uint32_t count;
2498	uint8_t *ptr;
2499
2500	ptr = in_ptr;
2501	if ((off < 0) || (len <= 0))
2502		return (NULL);
2503
2504	/* find the desired start location */
2505	while ((m != NULL) && (off > 0)) {
2506		if (off < SCTP_BUF_LEN(m))
2507			break;
2508		off -= SCTP_BUF_LEN(m);
2509		m = SCTP_BUF_NEXT(m);
2510	}
2511	if (m == NULL)
2512		return (NULL);
2513
2514	/* is the current mbuf large enough (eg. contiguous)? */
2515	if ((SCTP_BUF_LEN(m) - off) >= len) {
2516		return (mtod(m, caddr_t)+off);
2517	} else {
2518		/* else, it spans more than one mbuf, so save a temp copy... */
2519		while ((m != NULL) && (len > 0)) {
2520			count = min(SCTP_BUF_LEN(m) - off, len);
2521			bcopy(mtod(m, caddr_t)+off, ptr, count);
2522			len -= count;
2523			ptr += count;
2524			off = 0;
2525			m = SCTP_BUF_NEXT(m);
2526		}
2527		if ((m == NULL) && (len > 0))
2528			return (NULL);
2529		else
2530			return ((caddr_t)in_ptr);
2531	}
2532}
2533
2534
2535
2536struct sctp_paramhdr *
2537sctp_get_next_param(struct mbuf *m,
2538    int offset,
2539    struct sctp_paramhdr *pull,
2540    int pull_limit)
2541{
2542	/* This just provides a typed signature to Peter's Pull routine */
2543	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2544	    (uint8_t *) pull));
2545}
2546
2547
2548struct mbuf *
2549sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2550{
2551	struct mbuf *m_last;
2552	caddr_t dp;
2553
2554	if (padlen > 3) {
2555		return (NULL);
2556	}
2557	if (padlen <= M_TRAILINGSPACE(m)) {
2558		/*
2559		 * The easy way. We hope the majority of the time we hit
2560		 * here :)
2561		 */
2562		m_last = m;
2563	} else {
2564		/* Hard way we must grow the mbuf chain */
2565		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2566		if (m_last == NULL) {
2567			return (NULL);
2568		}
2569		SCTP_BUF_LEN(m_last) = 0;
2570		SCTP_BUF_NEXT(m_last) = NULL;
2571		SCTP_BUF_NEXT(m) = m_last;
2572	}
2573	dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
2574	SCTP_BUF_LEN(m_last) += padlen;
2575	memset(dp, 0, padlen);
2576	return (m_last);
2577}
2578
2579struct mbuf *
2580sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2581{
2582	/* find the last mbuf in chain and pad it */
2583	struct mbuf *m_at;
2584
2585	if (last_mbuf != NULL) {
2586		return (sctp_add_pad_tombuf(last_mbuf, padval));
2587	} else {
2588		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2589			if (SCTP_BUF_NEXT(m_at) == NULL) {
2590				return (sctp_add_pad_tombuf(m_at, padval));
2591			}
2592		}
2593	}
2594	return (NULL);
2595}
2596
2597static void
2598sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2599    uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2600#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2601    SCTP_UNUSED
2602#endif
2603)
2604{
2605	struct mbuf *m_notify;
2606	struct sctp_assoc_change *sac;
2607	struct sctp_queued_to_read *control;
2608	size_t notif_len, abort_len;
2609	unsigned int i;
2610
2611#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2612	struct socket *so;
2613
2614#endif
2615
2616	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2617		notif_len = sizeof(struct sctp_assoc_change);
2618		if (abort != NULL) {
2619			abort_len = ntohs(abort->ch.chunk_length);
2620		} else {
2621			abort_len = 0;
2622		}
2623		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2624			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2625		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2626			notif_len += abort_len;
2627		}
2628		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2629		if (m_notify == NULL) {
2630			/* Retry with smaller value. */
2631			notif_len = sizeof(struct sctp_assoc_change);
2632			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2633			if (m_notify == NULL) {
2634				goto set_error;
2635			}
2636		}
2637		SCTP_BUF_NEXT(m_notify) = NULL;
2638		sac = mtod(m_notify, struct sctp_assoc_change *);
2639		memset(sac, 0, notif_len);
2640		sac->sac_type = SCTP_ASSOC_CHANGE;
2641		sac->sac_flags = 0;
2642		sac->sac_length = sizeof(struct sctp_assoc_change);
2643		sac->sac_state = state;
2644		sac->sac_error = error;
2645		/* XXX verify these stream counts */
2646		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2647		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2648		sac->sac_assoc_id = sctp_get_associd(stcb);
2649		if (notif_len > sizeof(struct sctp_assoc_change)) {
2650			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2651				i = 0;
2652				if (stcb->asoc.prsctp_supported == 1) {
2653					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2654				}
2655				if (stcb->asoc.auth_supported == 1) {
2656					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2657				}
2658				if (stcb->asoc.asconf_supported == 1) {
2659					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2660				}
2661				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2662				if (stcb->asoc.reconfig_supported == 1) {
2663					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2664				}
2665				sac->sac_length += i;
2666			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2667				memcpy(sac->sac_info, abort, abort_len);
2668				sac->sac_length += abort_len;
2669			}
2670		}
2671		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2672		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2673		    0, 0, stcb->asoc.context, 0, 0, 0,
2674		    m_notify);
2675		if (control != NULL) {
2676			control->length = SCTP_BUF_LEN(m_notify);
2677			/* not that we need this */
2678			control->tail_mbuf = m_notify;
2679			control->spec_flags = M_NOTIFICATION;
2680			sctp_add_to_readq(stcb->sctp_ep, stcb,
2681			    control,
2682			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2683			    so_locked);
2684		} else {
2685			sctp_m_freem(m_notify);
2686		}
2687	}
2688	/*
2689	 * For 1-to-1 style sockets, we send up and error when an ABORT
2690	 * comes in.
2691	 */
2692set_error:
2693	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2694	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2695	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2696		SOCK_LOCK(stcb->sctp_socket);
2697		if (from_peer) {
2698			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2699				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2700				stcb->sctp_socket->so_error = ECONNREFUSED;
2701			} else {
2702				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2703				stcb->sctp_socket->so_error = ECONNRESET;
2704			}
2705		} else {
2706			if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) ||
2707			    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
2708				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2709				stcb->sctp_socket->so_error = ETIMEDOUT;
2710			} else {
2711				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2712				stcb->sctp_socket->so_error = ECONNABORTED;
2713			}
2714		}
2715	}
2716	/* Wake ANY sleepers */
2717#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2718	so = SCTP_INP_SO(stcb->sctp_ep);
2719	if (!so_locked) {
2720		atomic_add_int(&stcb->asoc.refcnt, 1);
2721		SCTP_TCB_UNLOCK(stcb);
2722		SCTP_SOCKET_LOCK(so, 1);
2723		SCTP_TCB_LOCK(stcb);
2724		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2725		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2726			SCTP_SOCKET_UNLOCK(so, 1);
2727			return;
2728		}
2729	}
2730#endif
2731	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2732	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2733	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2734		socantrcvmore_locked(stcb->sctp_socket);
2735	}
2736	sorwakeup(stcb->sctp_socket);
2737	sowwakeup(stcb->sctp_socket);
2738#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2739	if (!so_locked) {
2740		SCTP_SOCKET_UNLOCK(so, 1);
2741	}
2742#endif
2743}
2744
2745static void
2746sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2747    struct sockaddr *sa, uint32_t error, int so_locked
2748#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2749    SCTP_UNUSED
2750#endif
2751)
2752{
2753	struct mbuf *m_notify;
2754	struct sctp_paddr_change *spc;
2755	struct sctp_queued_to_read *control;
2756
2757	if ((stcb == NULL) ||
2758	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2759		/* event not enabled */
2760		return;
2761	}
2762	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2763	if (m_notify == NULL)
2764		return;
2765	SCTP_BUF_LEN(m_notify) = 0;
2766	spc = mtod(m_notify, struct sctp_paddr_change *);
2767	memset(spc, 0, sizeof(struct sctp_paddr_change));
2768	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2769	spc->spc_flags = 0;
2770	spc->spc_length = sizeof(struct sctp_paddr_change);
2771	switch (sa->sa_family) {
2772#ifdef INET
2773	case AF_INET:
2774#ifdef INET6
2775		if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
2776			in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
2777			    (struct sockaddr_in6 *)&spc->spc_aaddr);
2778		} else {
2779			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2780		}
2781#else
2782		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2783#endif
2784		break;
2785#endif
2786#ifdef INET6
2787	case AF_INET6:
2788		{
2789			struct sockaddr_in6 *sin6;
2790
2791			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2792
2793			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2794			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2795				if (sin6->sin6_scope_id == 0) {
2796					/* recover scope_id for user */
2797					(void)sa6_recoverscope(sin6);
2798				} else {
2799					/* clear embedded scope_id for user */
2800					in6_clearscope(&sin6->sin6_addr);
2801				}
2802			}
2803			break;
2804		}
2805#endif
2806	default:
2807		/* TSNH */
2808		break;
2809	}
2810	spc->spc_state = state;
2811	spc->spc_error = error;
2812	spc->spc_assoc_id = sctp_get_associd(stcb);
2813
2814	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2815	SCTP_BUF_NEXT(m_notify) = NULL;
2816
2817	/* append to socket */
2818	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2819	    0, 0, stcb->asoc.context, 0, 0, 0,
2820	    m_notify);
2821	if (control == NULL) {
2822		/* no memory */
2823		sctp_m_freem(m_notify);
2824		return;
2825	}
2826	control->length = SCTP_BUF_LEN(m_notify);
2827	control->spec_flags = M_NOTIFICATION;
2828	/* not that we need this */
2829	control->tail_mbuf = m_notify;
2830	sctp_add_to_readq(stcb->sctp_ep, stcb,
2831	    control,
2832	    &stcb->sctp_socket->so_rcv, 1,
2833	    SCTP_READ_LOCK_NOT_HELD,
2834	    so_locked);
2835}
2836
2837
2838static void
2839sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2840    struct sctp_tmit_chunk *chk, int so_locked
2841#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2842    SCTP_UNUSED
2843#endif
2844)
2845{
2846	struct mbuf *m_notify;
2847	struct sctp_send_failed *ssf;
2848	struct sctp_send_failed_event *ssfe;
2849	struct sctp_queued_to_read *control;
2850	int length;
2851
2852	if ((stcb == NULL) ||
2853	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2854	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2855		/* event not enabled */
2856		return;
2857	}
2858	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2859		length = sizeof(struct sctp_send_failed_event);
2860	} else {
2861		length = sizeof(struct sctp_send_failed);
2862	}
2863	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
2864	if (m_notify == NULL)
2865		/* no space left */
2866		return;
2867	SCTP_BUF_LEN(m_notify) = 0;
2868	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2869		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2870		memset(ssfe, 0, length);
2871		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2872		if (sent) {
2873			ssfe->ssfe_flags = SCTP_DATA_SENT;
2874		} else {
2875			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2876		}
2877		length += chk->send_size;
2878		length -= sizeof(struct sctp_data_chunk);
2879		ssfe->ssfe_length = length;
2880		ssfe->ssfe_error = error;
2881		/* not exactly what the user sent in, but should be close :) */
2882		ssfe->ssfe_info.snd_sid = chk->rec.data.stream_number;
2883		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
2884		ssfe->ssfe_info.snd_ppid = chk->rec.data.payloadtype;
2885		ssfe->ssfe_info.snd_context = chk->rec.data.context;
2886		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2887		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2888		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
2889	} else {
2890		ssf = mtod(m_notify, struct sctp_send_failed *);
2891		memset(ssf, 0, length);
2892		ssf->ssf_type = SCTP_SEND_FAILED;
2893		if (sent) {
2894			ssf->ssf_flags = SCTP_DATA_SENT;
2895		} else {
2896			ssf->ssf_flags = SCTP_DATA_UNSENT;
2897		}
2898		length += chk->send_size;
2899		length -= sizeof(struct sctp_data_chunk);
2900		ssf->ssf_length = length;
2901		ssf->ssf_error = error;
2902		/* not exactly what the user sent in, but should be close :) */
2903		bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2904		ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2905		ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2906		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2907		ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2908		ssf->ssf_info.sinfo_context = chk->rec.data.context;
2909		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2910		ssf->ssf_assoc_id = sctp_get_associd(stcb);
2911		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2912	}
2913	if (chk->data) {
2914		/*
2915		 * trim off the sctp chunk header(it should be there)
2916		 */
2917		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
2918			m_adj(chk->data, sizeof(struct sctp_data_chunk));
2919			sctp_mbuf_crush(chk->data);
2920			chk->send_size -= sizeof(struct sctp_data_chunk);
2921		}
2922	}
2923	SCTP_BUF_NEXT(m_notify) = chk->data;
2924	/* Steal off the mbuf */
2925	chk->data = NULL;
2926	/*
2927	 * For this case, we check the actual socket buffer, since the assoc
2928	 * is going away we don't want to overfill the socket buffer for a
2929	 * non-reader
2930	 */
2931	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2932		sctp_m_freem(m_notify);
2933		return;
2934	}
2935	/* append to socket */
2936	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2937	    0, 0, stcb->asoc.context, 0, 0, 0,
2938	    m_notify);
2939	if (control == NULL) {
2940		/* no memory */
2941		sctp_m_freem(m_notify);
2942		return;
2943	}
2944	control->spec_flags = M_NOTIFICATION;
2945	sctp_add_to_readq(stcb->sctp_ep, stcb,
2946	    control,
2947	    &stcb->sctp_socket->so_rcv, 1,
2948	    SCTP_READ_LOCK_NOT_HELD,
2949	    so_locked);
2950}
2951
2952
2953static void
2954sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
2955    struct sctp_stream_queue_pending *sp, int so_locked
2956#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2957    SCTP_UNUSED
2958#endif
2959)
2960{
2961	struct mbuf *m_notify;
2962	struct sctp_send_failed *ssf;
2963	struct sctp_send_failed_event *ssfe;
2964	struct sctp_queued_to_read *control;
2965	int length;
2966
2967	if ((stcb == NULL) ||
2968	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2969	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2970		/* event not enabled */
2971		return;
2972	}
2973	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2974		length = sizeof(struct sctp_send_failed_event);
2975	} else {
2976		length = sizeof(struct sctp_send_failed);
2977	}
2978	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
2979	if (m_notify == NULL) {
2980		/* no space left */
2981		return;
2982	}
2983	SCTP_BUF_LEN(m_notify) = 0;
2984	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2985		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2986		memset(ssfe, 0, length);
2987		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2988		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2989		length += sp->length;
2990		ssfe->ssfe_length = length;
2991		ssfe->ssfe_error = error;
2992		/* not exactly what the user sent in, but should be close :) */
2993		ssfe->ssfe_info.snd_sid = sp->stream;
2994		if (sp->some_taken) {
2995			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
2996		} else {
2997			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
2998		}
2999		ssfe->ssfe_info.snd_ppid = sp->ppid;
3000		ssfe->ssfe_info.snd_context = sp->context;
3001		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3002		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3003		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
3004	} else {
3005		ssf = mtod(m_notify, struct sctp_send_failed *);
3006		memset(ssf, 0, length);
3007		ssf->ssf_type = SCTP_SEND_FAILED;
3008		ssf->ssf_flags = SCTP_DATA_UNSENT;
3009		length += sp->length;
3010		ssf->ssf_length = length;
3011		ssf->ssf_error = error;
3012		/* not exactly what the user sent in, but should be close :) */
3013		ssf->ssf_info.sinfo_stream = sp->stream;
3014		ssf->ssf_info.sinfo_ssn = 0;
3015		if (sp->some_taken) {
3016			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3017		} else {
3018			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3019		}
3020		ssf->ssf_info.sinfo_ppid = sp->ppid;
3021		ssf->ssf_info.sinfo_context = sp->context;
3022		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3023		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3024		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3025	}
3026	SCTP_BUF_NEXT(m_notify) = sp->data;
3027
3028	/* Steal off the mbuf */
3029	sp->data = NULL;
3030	/*
3031	 * For this case, we check the actual socket buffer, since the assoc
3032	 * is going away we don't want to overfill the socket buffer for a
3033	 * non-reader
3034	 */
3035	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3036		sctp_m_freem(m_notify);
3037		return;
3038	}
3039	/* append to socket */
3040	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3041	    0, 0, stcb->asoc.context, 0, 0, 0,
3042	    m_notify);
3043	if (control == NULL) {
3044		/* no memory */
3045		sctp_m_freem(m_notify);
3046		return;
3047	}
3048	control->spec_flags = M_NOTIFICATION;
3049	sctp_add_to_readq(stcb->sctp_ep, stcb,
3050	    control,
3051	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3052}
3053
3054
3055
3056static void
3057sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3058{
3059	struct mbuf *m_notify;
3060	struct sctp_adaptation_event *sai;
3061	struct sctp_queued_to_read *control;
3062
3063	if ((stcb == NULL) ||
3064	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3065		/* event not enabled */
3066		return;
3067	}
3068	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3069	if (m_notify == NULL)
3070		/* no space left */
3071		return;
3072	SCTP_BUF_LEN(m_notify) = 0;
3073	sai = mtod(m_notify, struct sctp_adaptation_event *);
3074	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3075	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3076	sai->sai_flags = 0;
3077	sai->sai_length = sizeof(struct sctp_adaptation_event);
3078	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3079	sai->sai_assoc_id = sctp_get_associd(stcb);
3080
3081	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3082	SCTP_BUF_NEXT(m_notify) = NULL;
3083
3084	/* append to socket */
3085	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3086	    0, 0, stcb->asoc.context, 0, 0, 0,
3087	    m_notify);
3088	if (control == NULL) {
3089		/* no memory */
3090		sctp_m_freem(m_notify);
3091		return;
3092	}
3093	control->length = SCTP_BUF_LEN(m_notify);
3094	control->spec_flags = M_NOTIFICATION;
3095	/* not that we need this */
3096	control->tail_mbuf = m_notify;
3097	sctp_add_to_readq(stcb->sctp_ep, stcb,
3098	    control,
3099	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3100}
3101
3102/* This always must be called with the read-queue LOCKED in the INP */
3103static void
3104sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3105    uint32_t val, int so_locked
3106#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3107    SCTP_UNUSED
3108#endif
3109)
3110{
3111	struct mbuf *m_notify;
3112	struct sctp_pdapi_event *pdapi;
3113	struct sctp_queued_to_read *control;
3114	struct sockbuf *sb;
3115
3116	if ((stcb == NULL) ||
3117	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3118		/* event not enabled */
3119		return;
3120	}
3121	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3122		return;
3123	}
3124	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3125	if (m_notify == NULL)
3126		/* no space left */
3127		return;
3128	SCTP_BUF_LEN(m_notify) = 0;
3129	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3130	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3131	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3132	pdapi->pdapi_flags = 0;
3133	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3134	pdapi->pdapi_indication = error;
3135	pdapi->pdapi_stream = (val >> 16);
3136	pdapi->pdapi_seq = (val & 0x0000ffff);
3137	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3138
3139	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3140	SCTP_BUF_NEXT(m_notify) = NULL;
3141	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3142	    0, 0, stcb->asoc.context, 0, 0, 0,
3143	    m_notify);
3144	if (control == NULL) {
3145		/* no memory */
3146		sctp_m_freem(m_notify);
3147		return;
3148	}
3149	control->spec_flags = M_NOTIFICATION;
3150	control->length = SCTP_BUF_LEN(m_notify);
3151	/* not that we need this */
3152	control->tail_mbuf = m_notify;
3153	control->held_length = 0;
3154	control->length = 0;
3155	sb = &stcb->sctp_socket->so_rcv;
3156	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3157		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3158	}
3159	sctp_sballoc(stcb, sb, m_notify);
3160	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3161		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3162	}
3163	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3164	control->end_added = 1;
3165	if (stcb->asoc.control_pdapi)
3166		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3167	else {
3168		/* we really should not see this case */
3169		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3170	}
3171	if (stcb->sctp_ep && stcb->sctp_socket) {
3172		/* This should always be the case */
3173#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3174		struct socket *so;
3175
3176		so = SCTP_INP_SO(stcb->sctp_ep);
3177		if (!so_locked) {
3178			atomic_add_int(&stcb->asoc.refcnt, 1);
3179			SCTP_TCB_UNLOCK(stcb);
3180			SCTP_SOCKET_LOCK(so, 1);
3181			SCTP_TCB_LOCK(stcb);
3182			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3183			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3184				SCTP_SOCKET_UNLOCK(so, 1);
3185				return;
3186			}
3187		}
3188#endif
3189		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3190#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3191		if (!so_locked) {
3192			SCTP_SOCKET_UNLOCK(so, 1);
3193		}
3194#endif
3195	}
3196}
3197
3198static void
3199sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3200{
3201	struct mbuf *m_notify;
3202	struct sctp_shutdown_event *sse;
3203	struct sctp_queued_to_read *control;
3204
3205	/*
3206	 * For TCP model AND UDP connected sockets we will send an error up
3207	 * when an SHUTDOWN completes
3208	 */
3209	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3210	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3211		/* mark socket closed for read/write and wakeup! */
3212#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3213		struct socket *so;
3214
3215		so = SCTP_INP_SO(stcb->sctp_ep);
3216		atomic_add_int(&stcb->asoc.refcnt, 1);
3217		SCTP_TCB_UNLOCK(stcb);
3218		SCTP_SOCKET_LOCK(so, 1);
3219		SCTP_TCB_LOCK(stcb);
3220		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3221		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3222			SCTP_SOCKET_UNLOCK(so, 1);
3223			return;
3224		}
3225#endif
3226		socantsendmore(stcb->sctp_socket);
3227#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3228		SCTP_SOCKET_UNLOCK(so, 1);
3229#endif
3230	}
3231	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3232		/* event not enabled */
3233		return;
3234	}
3235	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3236	if (m_notify == NULL)
3237		/* no space left */
3238		return;
3239	sse = mtod(m_notify, struct sctp_shutdown_event *);
3240	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3241	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3242	sse->sse_flags = 0;
3243	sse->sse_length = sizeof(struct sctp_shutdown_event);
3244	sse->sse_assoc_id = sctp_get_associd(stcb);
3245
3246	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3247	SCTP_BUF_NEXT(m_notify) = NULL;
3248
3249	/* append to socket */
3250	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3251	    0, 0, stcb->asoc.context, 0, 0, 0,
3252	    m_notify);
3253	if (control == NULL) {
3254		/* no memory */
3255		sctp_m_freem(m_notify);
3256		return;
3257	}
3258	control->spec_flags = M_NOTIFICATION;
3259	control->length = SCTP_BUF_LEN(m_notify);
3260	/* not that we need this */
3261	control->tail_mbuf = m_notify;
3262	sctp_add_to_readq(stcb->sctp_ep, stcb,
3263	    control,
3264	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3265}
3266
3267static void
3268sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3269    int so_locked
3270#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3271    SCTP_UNUSED
3272#endif
3273)
3274{
3275	struct mbuf *m_notify;
3276	struct sctp_sender_dry_event *event;
3277	struct sctp_queued_to_read *control;
3278
3279	if ((stcb == NULL) ||
3280	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3281		/* event not enabled */
3282		return;
3283	}
3284	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3285	if (m_notify == NULL) {
3286		/* no space left */
3287		return;
3288	}
3289	SCTP_BUF_LEN(m_notify) = 0;
3290	event = mtod(m_notify, struct sctp_sender_dry_event *);
3291	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3292	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3293	event->sender_dry_flags = 0;
3294	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3295	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3296
3297	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3298	SCTP_BUF_NEXT(m_notify) = NULL;
3299
3300	/* append to socket */
3301	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3302	    0, 0, stcb->asoc.context, 0, 0, 0,
3303	    m_notify);
3304	if (control == NULL) {
3305		/* no memory */
3306		sctp_m_freem(m_notify);
3307		return;
3308	}
3309	control->length = SCTP_BUF_LEN(m_notify);
3310	control->spec_flags = M_NOTIFICATION;
3311	/* not that we need this */
3312	control->tail_mbuf = m_notify;
3313	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3314	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3315}
3316
3317
3318void
3319sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3320{
3321	struct mbuf *m_notify;
3322	struct sctp_queued_to_read *control;
3323	struct sctp_stream_change_event *stradd;
3324
3325	if ((stcb == NULL) ||
3326	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3327		/* event not enabled */
3328		return;
3329	}
3330	if ((stcb->asoc.peer_req_out) && flag) {
3331		/* Peer made the request, don't tell the local user */
3332		stcb->asoc.peer_req_out = 0;
3333		return;
3334	}
3335	stcb->asoc.peer_req_out = 0;
3336	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3337	if (m_notify == NULL)
3338		/* no space left */
3339		return;
3340	SCTP_BUF_LEN(m_notify) = 0;
3341	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3342	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3343	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3344	stradd->strchange_flags = flag;
3345	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3346	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3347	stradd->strchange_instrms = numberin;
3348	stradd->strchange_outstrms = numberout;
3349	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3350	SCTP_BUF_NEXT(m_notify) = NULL;
3351	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3352		/* no space */
3353		sctp_m_freem(m_notify);
3354		return;
3355	}
3356	/* append to socket */
3357	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3358	    0, 0, stcb->asoc.context, 0, 0, 0,
3359	    m_notify);
3360	if (control == NULL) {
3361		/* no memory */
3362		sctp_m_freem(m_notify);
3363		return;
3364	}
3365	control->spec_flags = M_NOTIFICATION;
3366	control->length = SCTP_BUF_LEN(m_notify);
3367	/* not that we need this */
3368	control->tail_mbuf = m_notify;
3369	sctp_add_to_readq(stcb->sctp_ep, stcb,
3370	    control,
3371	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3372}
3373
3374void
3375sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3376{
3377	struct mbuf *m_notify;
3378	struct sctp_queued_to_read *control;
3379	struct sctp_assoc_reset_event *strasoc;
3380
3381	if ((stcb == NULL) ||
3382	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3383		/* event not enabled */
3384		return;
3385	}
3386	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3387	if (m_notify == NULL)
3388		/* no space left */
3389		return;
3390	SCTP_BUF_LEN(m_notify) = 0;
3391	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3392	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3393	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3394	strasoc->assocreset_flags = flag;
3395	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3396	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3397	strasoc->assocreset_local_tsn = sending_tsn;
3398	strasoc->assocreset_remote_tsn = recv_tsn;
3399	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3400	SCTP_BUF_NEXT(m_notify) = NULL;
3401	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3402		/* no space */
3403		sctp_m_freem(m_notify);
3404		return;
3405	}
3406	/* append to socket */
3407	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3408	    0, 0, stcb->asoc.context, 0, 0, 0,
3409	    m_notify);
3410	if (control == NULL) {
3411		/* no memory */
3412		sctp_m_freem(m_notify);
3413		return;
3414	}
3415	control->spec_flags = M_NOTIFICATION;
3416	control->length = SCTP_BUF_LEN(m_notify);
3417	/* not that we need this */
3418	control->tail_mbuf = m_notify;
3419	sctp_add_to_readq(stcb->sctp_ep, stcb,
3420	    control,
3421	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3422}
3423
3424
3425
3426static void
3427sctp_notify_stream_reset(struct sctp_tcb *stcb,
3428    int number_entries, uint16_t * list, int flag)
3429{
3430	struct mbuf *m_notify;
3431	struct sctp_queued_to_read *control;
3432	struct sctp_stream_reset_event *strreset;
3433	int len;
3434
3435	if ((stcb == NULL) ||
3436	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3437		/* event not enabled */
3438		return;
3439	}
3440	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3441	if (m_notify == NULL)
3442		/* no space left */
3443		return;
3444	SCTP_BUF_LEN(m_notify) = 0;
3445	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3446	if (len > M_TRAILINGSPACE(m_notify)) {
3447		/* never enough room */
3448		sctp_m_freem(m_notify);
3449		return;
3450	}
3451	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3452	memset(strreset, 0, len);
3453	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3454	strreset->strreset_flags = flag;
3455	strreset->strreset_length = len;
3456	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3457	if (number_entries) {
3458		int i;
3459
3460		for (i = 0; i < number_entries; i++) {
3461			strreset->strreset_stream_list[i] = ntohs(list[i]);
3462		}
3463	}
3464	SCTP_BUF_LEN(m_notify) = len;
3465	SCTP_BUF_NEXT(m_notify) = NULL;
3466	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3467		/* no space */
3468		sctp_m_freem(m_notify);
3469		return;
3470	}
3471	/* append to socket */
3472	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3473	    0, 0, stcb->asoc.context, 0, 0, 0,
3474	    m_notify);
3475	if (control == NULL) {
3476		/* no memory */
3477		sctp_m_freem(m_notify);
3478		return;
3479	}
3480	control->spec_flags = M_NOTIFICATION;
3481	control->length = SCTP_BUF_LEN(m_notify);
3482	/* not that we need this */
3483	control->tail_mbuf = m_notify;
3484	sctp_add_to_readq(stcb->sctp_ep, stcb,
3485	    control,
3486	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3487}
3488
3489
3490static void
3491sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3492{
3493	struct mbuf *m_notify;
3494	struct sctp_remote_error *sre;
3495	struct sctp_queued_to_read *control;
3496	size_t notif_len, chunk_len;
3497
3498	if ((stcb == NULL) ||
3499	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3500		return;
3501	}
3502	if (chunk != NULL) {
3503		chunk_len = ntohs(chunk->ch.chunk_length);
3504	} else {
3505		chunk_len = 0;
3506	}
3507	notif_len = sizeof(struct sctp_remote_error) + chunk_len;
3508	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3509	if (m_notify == NULL) {
3510		/* Retry with smaller value. */
3511		notif_len = sizeof(struct sctp_remote_error);
3512		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3513		if (m_notify == NULL) {
3514			return;
3515		}
3516	}
3517	SCTP_BUF_NEXT(m_notify) = NULL;
3518	sre = mtod(m_notify, struct sctp_remote_error *);
3519	memset(sre, 0, notif_len);
3520	sre->sre_type = SCTP_REMOTE_ERROR;
3521	sre->sre_flags = 0;
3522	sre->sre_length = sizeof(struct sctp_remote_error);
3523	sre->sre_error = error;
3524	sre->sre_assoc_id = sctp_get_associd(stcb);
3525	if (notif_len > sizeof(struct sctp_remote_error)) {
3526		memcpy(sre->sre_data, chunk, chunk_len);
3527		sre->sre_length += chunk_len;
3528	}
3529	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3530	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3531	    0, 0, stcb->asoc.context, 0, 0, 0,
3532	    m_notify);
3533	if (control != NULL) {
3534		control->length = SCTP_BUF_LEN(m_notify);
3535		/* not that we need this */
3536		control->tail_mbuf = m_notify;
3537		control->spec_flags = M_NOTIFICATION;
3538		sctp_add_to_readq(stcb->sctp_ep, stcb,
3539		    control,
3540		    &stcb->sctp_socket->so_rcv, 1,
3541		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3542	} else {
3543		sctp_m_freem(m_notify);
3544	}
3545}
3546
3547
3548void
3549sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3550    uint32_t error, void *data, int so_locked
3551#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3552    SCTP_UNUSED
3553#endif
3554)
3555{
3556	if ((stcb == NULL) ||
3557	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3558	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3559	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3560		/* If the socket is gone we are out of here */
3561		return;
3562	}
3563	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3564		return;
3565	}
3566	if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3567	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED)) {
3568		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3569		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3570		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3571			/* Don't report these in front states */
3572			return;
3573		}
3574	}
3575	switch (notification) {
3576	case SCTP_NOTIFY_ASSOC_UP:
3577		if (stcb->asoc.assoc_up_sent == 0) {
3578			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3579			stcb->asoc.assoc_up_sent = 1;
3580		}
3581		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3582			sctp_notify_adaptation_layer(stcb);
3583		}
3584		if (stcb->asoc.auth_supported == 0) {
3585			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3586			    NULL, so_locked);
3587		}
3588		break;
3589	case SCTP_NOTIFY_ASSOC_DOWN:
3590		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3591		break;
3592	case SCTP_NOTIFY_INTERFACE_DOWN:
3593		{
3594			struct sctp_nets *net;
3595
3596			net = (struct sctp_nets *)data;
3597			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3598			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3599			break;
3600		}
3601	case SCTP_NOTIFY_INTERFACE_UP:
3602		{
3603			struct sctp_nets *net;
3604
3605			net = (struct sctp_nets *)data;
3606			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3607			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3608			break;
3609		}
3610	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3611		{
3612			struct sctp_nets *net;
3613
3614			net = (struct sctp_nets *)data;
3615			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3616			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3617			break;
3618		}
3619	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3620		sctp_notify_send_failed2(stcb, error,
3621		    (struct sctp_stream_queue_pending *)data, so_locked);
3622		break;
3623	case SCTP_NOTIFY_SENT_DG_FAIL:
3624		sctp_notify_send_failed(stcb, 1, error,
3625		    (struct sctp_tmit_chunk *)data, so_locked);
3626		break;
3627	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3628		sctp_notify_send_failed(stcb, 0, error,
3629		    (struct sctp_tmit_chunk *)data, so_locked);
3630		break;
3631	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3632		{
3633			uint32_t val;
3634
3635			val = *((uint32_t *) data);
3636
3637			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3638			break;
3639		}
3640	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3641		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3642		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3643			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3644		} else {
3645			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3646		}
3647		break;
3648	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3649		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3650		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3651			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3652		} else {
3653			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3654		}
3655		break;
3656	case SCTP_NOTIFY_ASSOC_RESTART:
3657		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3658		if (stcb->asoc.auth_supported == 0) {
3659			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3660			    NULL, so_locked);
3661		}
3662		break;
3663	case SCTP_NOTIFY_STR_RESET_SEND:
3664		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN);
3665		break;
3666	case SCTP_NOTIFY_STR_RESET_RECV:
3667		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING);
3668		break;
3669	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3670		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3671		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3672		break;
3673	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3674		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3675		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3676		break;
3677	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3678		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3679		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3680		break;
3681	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3682		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3683		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3684		break;
3685	case SCTP_NOTIFY_ASCONF_ADD_IP:
3686		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3687		    error, so_locked);
3688		break;
3689	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3690		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3691		    error, so_locked);
3692		break;
3693	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3694		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3695		    error, so_locked);
3696		break;
3697	case SCTP_NOTIFY_PEER_SHUTDOWN:
3698		sctp_notify_shutdown_event(stcb);
3699		break;
3700	case SCTP_NOTIFY_AUTH_NEW_KEY:
3701		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3702		    (uint16_t) (uintptr_t) data,
3703		    so_locked);
3704		break;
3705	case SCTP_NOTIFY_AUTH_FREE_KEY:
3706		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3707		    (uint16_t) (uintptr_t) data,
3708		    so_locked);
3709		break;
3710	case SCTP_NOTIFY_NO_PEER_AUTH:
3711		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3712		    (uint16_t) (uintptr_t) data,
3713		    so_locked);
3714		break;
3715	case SCTP_NOTIFY_SENDER_DRY:
3716		sctp_notify_sender_dry_event(stcb, so_locked);
3717		break;
3718	case SCTP_NOTIFY_REMOTE_ERROR:
3719		sctp_notify_remote_error(stcb, error, data);
3720		break;
3721	default:
3722		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3723		    __FUNCTION__, notification, notification);
3724		break;
3725	}			/* end switch */
3726}
3727
3728void
3729sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3730#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3731    SCTP_UNUSED
3732#endif
3733)
3734{
3735	struct sctp_association *asoc;
3736	struct sctp_stream_out *outs;
3737	struct sctp_tmit_chunk *chk, *nchk;
3738	struct sctp_stream_queue_pending *sp, *nsp;
3739	int i;
3740
3741	if (stcb == NULL) {
3742		return;
3743	}
3744	asoc = &stcb->asoc;
3745	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3746		/* already being freed */
3747		return;
3748	}
3749	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3750	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3751	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3752		return;
3753	}
3754	/* now through all the gunk freeing chunks */
3755	if (holds_lock == 0) {
3756		SCTP_TCB_SEND_LOCK(stcb);
3757	}
3758	/* sent queue SHOULD be empty */
3759	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3760		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3761		asoc->sent_queue_cnt--;
3762		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3763			if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3764				asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3765#ifdef INVARIANTS
3766			} else {
3767				panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3768#endif
3769			}
3770		}
3771		if (chk->data != NULL) {
3772			sctp_free_bufspace(stcb, asoc, chk, 1);
3773			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3774			    error, chk, so_locked);
3775			if (chk->data) {
3776				sctp_m_freem(chk->data);
3777				chk->data = NULL;
3778			}
3779		}
3780		sctp_free_a_chunk(stcb, chk, so_locked);
3781		/* sa_ignore FREED_MEMORY */
3782	}
3783	/* pending send queue SHOULD be empty */
3784	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3785		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3786		asoc->send_queue_cnt--;
3787		if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3788			asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3789#ifdef INVARIANTS
3790		} else {
3791			panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3792#endif
3793		}
3794		if (chk->data != NULL) {
3795			sctp_free_bufspace(stcb, asoc, chk, 1);
3796			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3797			    error, chk, so_locked);
3798			if (chk->data) {
3799				sctp_m_freem(chk->data);
3800				chk->data = NULL;
3801			}
3802		}
3803		sctp_free_a_chunk(stcb, chk, so_locked);
3804		/* sa_ignore FREED_MEMORY */
3805	}
3806	for (i = 0; i < asoc->streamoutcnt; i++) {
3807		/* For each stream */
3808		outs = &asoc->strmout[i];
3809		/* clean up any sends there */
3810		asoc->locked_on_sending = NULL;
3811		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3812			asoc->stream_queue_cnt--;
3813			TAILQ_REMOVE(&outs->outqueue, sp, next);
3814			sctp_free_spbufspace(stcb, asoc, sp);
3815			if (sp->data) {
3816				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3817				    error, (void *)sp, so_locked);
3818				if (sp->data) {
3819					sctp_m_freem(sp->data);
3820					sp->data = NULL;
3821					sp->tail_mbuf = NULL;
3822					sp->length = 0;
3823				}
3824			}
3825			if (sp->net) {
3826				sctp_free_remote_addr(sp->net);
3827				sp->net = NULL;
3828			}
3829			/* Free the chunk */
3830			sctp_free_a_strmoq(stcb, sp, so_locked);
3831			/* sa_ignore FREED_MEMORY */
3832		}
3833	}
3834
3835	if (holds_lock == 0) {
3836		SCTP_TCB_SEND_UNLOCK(stcb);
3837	}
3838}
3839
3840void
3841sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3842    struct sctp_abort_chunk *abort, int so_locked
3843#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3844    SCTP_UNUSED
3845#endif
3846)
3847{
3848	if (stcb == NULL) {
3849		return;
3850	}
3851	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3852	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3853	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3854		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3855	}
3856	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3857	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3858	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3859		return;
3860	}
3861	/* Tell them we lost the asoc */
3862	sctp_report_all_outbound(stcb, error, 1, so_locked);
3863	if (from_peer) {
3864		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
3865	} else {
3866		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
3867	}
3868}
3869
3870void
3871sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3872    struct mbuf *m, int iphlen,
3873    struct sockaddr *src, struct sockaddr *dst,
3874    struct sctphdr *sh, struct mbuf *op_err,
3875    uint8_t mflowtype, uint32_t mflowid,
3876    uint32_t vrf_id, uint16_t port)
3877{
3878	uint32_t vtag;
3879
3880#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3881	struct socket *so;
3882
3883#endif
3884
3885	vtag = 0;
3886	if (stcb != NULL) {
3887		/* We have a TCB to abort, send notification too */
3888		vtag = stcb->asoc.peer_vtag;
3889		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
3890		/* get the assoc vrf id and table id */
3891		vrf_id = stcb->asoc.vrf_id;
3892		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3893	}
3894	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
3895	    mflowtype, mflowid,
3896	    vrf_id, port);
3897	if (stcb != NULL) {
3898		/* Ok, now lets free it */
3899#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3900		so = SCTP_INP_SO(inp);
3901		atomic_add_int(&stcb->asoc.refcnt, 1);
3902		SCTP_TCB_UNLOCK(stcb);
3903		SCTP_SOCKET_LOCK(so, 1);
3904		SCTP_TCB_LOCK(stcb);
3905		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3906#endif
3907		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3908		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3909		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3910			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3911		}
3912		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3913#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3914		SCTP_SOCKET_UNLOCK(so, 1);
3915#endif
3916	}
3917}
3918
3919#ifdef SCTP_ASOCLOG_OF_TSNS
3920void
3921sctp_print_out_track_log(struct sctp_tcb *stcb)
3922{
3923#ifdef NOSIY_PRINTS
3924	int i;
3925
3926	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3927	SCTP_PRINTF("IN bound TSN log-aaa\n");
3928	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3929		SCTP_PRINTF("None rcvd\n");
3930		goto none_in;
3931	}
3932	if (stcb->asoc.tsn_in_wrapped) {
3933		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3934			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3935			    stcb->asoc.in_tsnlog[i].tsn,
3936			    stcb->asoc.in_tsnlog[i].strm,
3937			    stcb->asoc.in_tsnlog[i].seq,
3938			    stcb->asoc.in_tsnlog[i].flgs,
3939			    stcb->asoc.in_tsnlog[i].sz);
3940		}
3941	}
3942	if (stcb->asoc.tsn_in_at) {
3943		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3944			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3945			    stcb->asoc.in_tsnlog[i].tsn,
3946			    stcb->asoc.in_tsnlog[i].strm,
3947			    stcb->asoc.in_tsnlog[i].seq,
3948			    stcb->asoc.in_tsnlog[i].flgs,
3949			    stcb->asoc.in_tsnlog[i].sz);
3950		}
3951	}
3952none_in:
3953	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3954	if ((stcb->asoc.tsn_out_at == 0) &&
3955	    (stcb->asoc.tsn_out_wrapped == 0)) {
3956		SCTP_PRINTF("None sent\n");
3957	}
3958	if (stcb->asoc.tsn_out_wrapped) {
3959		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3960			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3961			    stcb->asoc.out_tsnlog[i].tsn,
3962			    stcb->asoc.out_tsnlog[i].strm,
3963			    stcb->asoc.out_tsnlog[i].seq,
3964			    stcb->asoc.out_tsnlog[i].flgs,
3965			    stcb->asoc.out_tsnlog[i].sz);
3966		}
3967	}
3968	if (stcb->asoc.tsn_out_at) {
3969		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3970			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3971			    stcb->asoc.out_tsnlog[i].tsn,
3972			    stcb->asoc.out_tsnlog[i].strm,
3973			    stcb->asoc.out_tsnlog[i].seq,
3974			    stcb->asoc.out_tsnlog[i].flgs,
3975			    stcb->asoc.out_tsnlog[i].sz);
3976		}
3977	}
3978#endif
3979}
3980
3981#endif
3982
3983void
3984sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3985    struct mbuf *op_err,
3986    int so_locked
3987#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3988    SCTP_UNUSED
3989#endif
3990)
3991{
3992#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3993	struct socket *so;
3994
3995#endif
3996
3997#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3998	so = SCTP_INP_SO(inp);
3999#endif
4000	if (stcb == NULL) {
4001		/* Got to have a TCB */
4002		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4003			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4004				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4005				    SCTP_CALLED_DIRECTLY_NOCMPSET);
4006			}
4007		}
4008		return;
4009	} else {
4010		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
4011	}
4012	/* notify the ulp */
4013	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4014		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4015	}
4016	/* notify the peer */
4017	sctp_send_abort_tcb(stcb, op_err, so_locked);
4018	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4019	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4020	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4021		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4022	}
4023	/* now free the asoc */
4024#ifdef SCTP_ASOCLOG_OF_TSNS
4025	sctp_print_out_track_log(stcb);
4026#endif
4027#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4028	if (!so_locked) {
4029		atomic_add_int(&stcb->asoc.refcnt, 1);
4030		SCTP_TCB_UNLOCK(stcb);
4031		SCTP_SOCKET_LOCK(so, 1);
4032		SCTP_TCB_LOCK(stcb);
4033		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4034	}
4035#endif
4036	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4037#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4038	if (!so_locked) {
4039		SCTP_SOCKET_UNLOCK(so, 1);
4040	}
4041#endif
4042}
4043
4044void
4045sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4046    struct sockaddr *src, struct sockaddr *dst,
4047    struct sctphdr *sh, struct sctp_inpcb *inp,
4048    struct mbuf *cause,
4049    uint8_t mflowtype, uint32_t mflowid,
4050    uint32_t vrf_id, uint16_t port)
4051{
4052	struct sctp_chunkhdr *ch, chunk_buf;
4053	unsigned int chk_length;
4054	int contains_init_chunk;
4055
4056	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4057	/* Generate a TO address for future reference */
4058	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4059		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4060			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4061			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4062		}
4063	}
4064	contains_init_chunk = 0;
4065	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4066	    sizeof(*ch), (uint8_t *) & chunk_buf);
4067	while (ch != NULL) {
4068		chk_length = ntohs(ch->chunk_length);
4069		if (chk_length < sizeof(*ch)) {
4070			/* break to abort land */
4071			break;
4072		}
4073		switch (ch->chunk_type) {
4074		case SCTP_INIT:
4075			contains_init_chunk = 1;
4076			break;
4077		case SCTP_PACKET_DROPPED:
4078			/* we don't respond to pkt-dropped */
4079			return;
4080		case SCTP_ABORT_ASSOCIATION:
4081			/* we don't respond with an ABORT to an ABORT */
4082			return;
4083		case SCTP_SHUTDOWN_COMPLETE:
4084			/*
4085			 * we ignore it since we are not waiting for it and
4086			 * peer is gone
4087			 */
4088			return;
4089		case SCTP_SHUTDOWN_ACK:
4090			sctp_send_shutdown_complete2(src, dst, sh,
4091			    mflowtype, mflowid,
4092			    vrf_id, port);
4093			return;
4094		default:
4095			break;
4096		}
4097		offset += SCTP_SIZE32(chk_length);
4098		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4099		    sizeof(*ch), (uint8_t *) & chunk_buf);
4100	}
4101	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4102	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4103	    (contains_init_chunk == 0))) {
4104		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4105		    mflowtype, mflowid,
4106		    vrf_id, port);
4107	}
4108}
4109
4110/*
4111 * check the inbound datagram to make sure there is not an abort inside it,
4112 * if there is return 1, else return 0.
4113 */
4114int
4115sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4116{
4117	struct sctp_chunkhdr *ch;
4118	struct sctp_init_chunk *init_chk, chunk_buf;
4119	int offset;
4120	unsigned int chk_length;
4121
4122	offset = iphlen + sizeof(struct sctphdr);
4123	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4124	    (uint8_t *) & chunk_buf);
4125	while (ch != NULL) {
4126		chk_length = ntohs(ch->chunk_length);
4127		if (chk_length < sizeof(*ch)) {
4128			/* packet is probably corrupt */
4129			break;
4130		}
4131		/* we seem to be ok, is it an abort? */
4132		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4133			/* yep, tell them */
4134			return (1);
4135		}
4136		if (ch->chunk_type == SCTP_INITIATION) {
4137			/* need to update the Vtag */
4138			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4139			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4140			if (init_chk != NULL) {
4141				*vtagfill = ntohl(init_chk->init.initiate_tag);
4142			}
4143		}
4144		/* Nope, move to the next chunk */
4145		offset += SCTP_SIZE32(chk_length);
4146		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4147		    sizeof(*ch), (uint8_t *) & chunk_buf);
4148	}
4149	return (0);
4150}
4151
4152/*
4153 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4154 * set (i.e. it's 0) so, create this function to compare link local scopes
4155 */
4156#ifdef INET6
4157uint32_t
4158sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4159{
4160	struct sockaddr_in6 a, b;
4161
4162	/* save copies */
4163	a = *addr1;
4164	b = *addr2;
4165
4166	if (a.sin6_scope_id == 0)
4167		if (sa6_recoverscope(&a)) {
4168			/* can't get scope, so can't match */
4169			return (0);
4170		}
4171	if (b.sin6_scope_id == 0)
4172		if (sa6_recoverscope(&b)) {
4173			/* can't get scope, so can't match */
4174			return (0);
4175		}
4176	if (a.sin6_scope_id != b.sin6_scope_id)
4177		return (0);
4178
4179	return (1);
4180}
4181
4182/*
4183 * returns a sockaddr_in6 with embedded scope recovered and removed
4184 */
4185struct sockaddr_in6 *
4186sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4187{
4188	/* check and strip embedded scope junk */
4189	if (addr->sin6_family == AF_INET6) {
4190		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4191			if (addr->sin6_scope_id == 0) {
4192				*store = *addr;
4193				if (!sa6_recoverscope(store)) {
4194					/* use the recovered scope */
4195					addr = store;
4196				}
4197			} else {
4198				/* else, return the original "to" addr */
4199				in6_clearscope(&addr->sin6_addr);
4200			}
4201		}
4202	}
4203	return (addr);
4204}
4205
4206#endif
4207
4208/*
4209 * are the two addresses the same?  currently a "scopeless" check returns: 1
4210 * if same, 0 if not
4211 */
4212int
4213sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4214{
4215
4216	/* must be valid */
4217	if (sa1 == NULL || sa2 == NULL)
4218		return (0);
4219
4220	/* must be the same family */
4221	if (sa1->sa_family != sa2->sa_family)
4222		return (0);
4223
4224	switch (sa1->sa_family) {
4225#ifdef INET6
4226	case AF_INET6:
4227		{
4228			/* IPv6 addresses */
4229			struct sockaddr_in6 *sin6_1, *sin6_2;
4230
4231			sin6_1 = (struct sockaddr_in6 *)sa1;
4232			sin6_2 = (struct sockaddr_in6 *)sa2;
4233			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4234			    sin6_2));
4235		}
4236#endif
4237#ifdef INET
4238	case AF_INET:
4239		{
4240			/* IPv4 addresses */
4241			struct sockaddr_in *sin_1, *sin_2;
4242
4243			sin_1 = (struct sockaddr_in *)sa1;
4244			sin_2 = (struct sockaddr_in *)sa2;
4245			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4246		}
4247#endif
4248	default:
4249		/* we don't do these... */
4250		return (0);
4251	}
4252}
4253
4254void
4255sctp_print_address(struct sockaddr *sa)
4256{
4257#ifdef INET6
4258	char ip6buf[INET6_ADDRSTRLEN];
4259
4260#endif
4261
4262	switch (sa->sa_family) {
4263#ifdef INET6
4264	case AF_INET6:
4265		{
4266			struct sockaddr_in6 *sin6;
4267
4268			sin6 = (struct sockaddr_in6 *)sa;
4269			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4270			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4271			    ntohs(sin6->sin6_port),
4272			    sin6->sin6_scope_id);
4273			break;
4274		}
4275#endif
4276#ifdef INET
4277	case AF_INET:
4278		{
4279			struct sockaddr_in *sin;
4280			unsigned char *p;
4281
4282			sin = (struct sockaddr_in *)sa;
4283			p = (unsigned char *)&sin->sin_addr;
4284			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4285			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4286			break;
4287		}
4288#endif
4289	default:
4290		SCTP_PRINTF("?\n");
4291		break;
4292	}
4293}
4294
4295void
4296sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4297    struct sctp_inpcb *new_inp,
4298    struct sctp_tcb *stcb,
4299    int waitflags)
4300{
4301	/*
4302	 * go through our old INP and pull off any control structures that
4303	 * belong to stcb and move then to the new inp.
4304	 */
4305	struct socket *old_so, *new_so;
4306	struct sctp_queued_to_read *control, *nctl;
4307	struct sctp_readhead tmp_queue;
4308	struct mbuf *m;
4309	int error = 0;
4310
4311	old_so = old_inp->sctp_socket;
4312	new_so = new_inp->sctp_socket;
4313	TAILQ_INIT(&tmp_queue);
4314	error = sblock(&old_so->so_rcv, waitflags);
4315	if (error) {
4316		/*
4317		 * Gak, can't get sblock, we have a problem. data will be
4318		 * left stranded.. and we don't dare look at it since the
4319		 * other thread may be reading something. Oh well, its a
4320		 * screwed up app that does a peeloff OR a accept while
4321		 * reading from the main socket... actually its only the
4322		 * peeloff() case, since I think read will fail on a
4323		 * listening socket..
4324		 */
4325		return;
4326	}
4327	/* lock the socket buffers */
4328	SCTP_INP_READ_LOCK(old_inp);
4329	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4330		/* Pull off all for out target stcb */
4331		if (control->stcb == stcb) {
4332			/* remove it we want it */
4333			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4334			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4335			m = control->data;
4336			while (m) {
4337				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4338					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4339				}
4340				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4341				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4342					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4343				}
4344				m = SCTP_BUF_NEXT(m);
4345			}
4346		}
4347	}
4348	SCTP_INP_READ_UNLOCK(old_inp);
4349	/* Remove the sb-lock on the old socket */
4350
4351	sbunlock(&old_so->so_rcv);
4352	/* Now we move them over to the new socket buffer */
4353	SCTP_INP_READ_LOCK(new_inp);
4354	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4355		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4356		m = control->data;
4357		while (m) {
4358			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4359				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4360			}
4361			sctp_sballoc(stcb, &new_so->so_rcv, m);
4362			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4363				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4364			}
4365			m = SCTP_BUF_NEXT(m);
4366		}
4367	}
4368	SCTP_INP_READ_UNLOCK(new_inp);
4369}
4370
4371void
4372sctp_add_to_readq(struct sctp_inpcb *inp,
4373    struct sctp_tcb *stcb,
4374    struct sctp_queued_to_read *control,
4375    struct sockbuf *sb,
4376    int end,
4377    int inp_read_lock_held,
4378    int so_locked
4379#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4380    SCTP_UNUSED
4381#endif
4382)
4383{
4384	/*
4385	 * Here we must place the control on the end of the socket read
4386	 * queue AND increment sb_cc so that select will work properly on
4387	 * read.
4388	 */
4389	struct mbuf *m, *prev = NULL;
4390
4391	if (inp == NULL) {
4392		/* Gak, TSNH!! */
4393#ifdef INVARIANTS
4394		panic("Gak, inp NULL on add_to_readq");
4395#endif
4396		return;
4397	}
4398	if (inp_read_lock_held == 0)
4399		SCTP_INP_READ_LOCK(inp);
4400	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4401		sctp_free_remote_addr(control->whoFrom);
4402		if (control->data) {
4403			sctp_m_freem(control->data);
4404			control->data = NULL;
4405		}
4406		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4407		if (inp_read_lock_held == 0)
4408			SCTP_INP_READ_UNLOCK(inp);
4409		return;
4410	}
4411	if (!(control->spec_flags & M_NOTIFICATION)) {
4412		atomic_add_int(&inp->total_recvs, 1);
4413		if (!control->do_not_ref_stcb) {
4414			atomic_add_int(&stcb->total_recvs, 1);
4415		}
4416	}
4417	m = control->data;
4418	control->held_length = 0;
4419	control->length = 0;
4420	while (m) {
4421		if (SCTP_BUF_LEN(m) == 0) {
4422			/* Skip mbufs with NO length */
4423			if (prev == NULL) {
4424				/* First one */
4425				control->data = sctp_m_free(m);
4426				m = control->data;
4427			} else {
4428				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4429				m = SCTP_BUF_NEXT(prev);
4430			}
4431			if (m == NULL) {
4432				control->tail_mbuf = prev;
4433			}
4434			continue;
4435		}
4436		prev = m;
4437		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4438			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4439		}
4440		sctp_sballoc(stcb, sb, m);
4441		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4442			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4443		}
4444		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4445		m = SCTP_BUF_NEXT(m);
4446	}
4447	if (prev != NULL) {
4448		control->tail_mbuf = prev;
4449	} else {
4450		/* Everything got collapsed out?? */
4451		sctp_free_remote_addr(control->whoFrom);
4452		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4453		if (inp_read_lock_held == 0)
4454			SCTP_INP_READ_UNLOCK(inp);
4455		return;
4456	}
4457	if (end) {
4458		control->end_added = 1;
4459	}
4460	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4461	if (inp_read_lock_held == 0)
4462		SCTP_INP_READ_UNLOCK(inp);
4463	if (inp && inp->sctp_socket) {
4464		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4465			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4466		} else {
4467#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4468			struct socket *so;
4469
4470			so = SCTP_INP_SO(inp);
4471			if (!so_locked) {
4472				if (stcb) {
4473					atomic_add_int(&stcb->asoc.refcnt, 1);
4474					SCTP_TCB_UNLOCK(stcb);
4475				}
4476				SCTP_SOCKET_LOCK(so, 1);
4477				if (stcb) {
4478					SCTP_TCB_LOCK(stcb);
4479					atomic_subtract_int(&stcb->asoc.refcnt, 1);
4480				}
4481				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4482					SCTP_SOCKET_UNLOCK(so, 1);
4483					return;
4484				}
4485			}
4486#endif
4487			sctp_sorwakeup(inp, inp->sctp_socket);
4488#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4489			if (!so_locked) {
4490				SCTP_SOCKET_UNLOCK(so, 1);
4491			}
4492#endif
4493		}
4494	}
4495}
4496
4497
4498int
4499sctp_append_to_readq(struct sctp_inpcb *inp,
4500    struct sctp_tcb *stcb,
4501    struct sctp_queued_to_read *control,
4502    struct mbuf *m,
4503    int end,
4504    int ctls_cumack,
4505    struct sockbuf *sb)
4506{
4507	/*
4508	 * A partial delivery API event is underway. OR we are appending on
4509	 * the reassembly queue.
4510	 *
4511	 * If PDAPI this means we need to add m to the end of the data.
4512	 * Increase the length in the control AND increment the sb_cc.
4513	 * Otherwise sb is NULL and all we need to do is put it at the end
4514	 * of the mbuf chain.
4515	 */
4516	int len = 0;
4517	struct mbuf *mm, *tail = NULL, *prev = NULL;
4518
4519	if (inp) {
4520		SCTP_INP_READ_LOCK(inp);
4521	}
4522	if (control == NULL) {
4523get_out:
4524		if (inp) {
4525			SCTP_INP_READ_UNLOCK(inp);
4526		}
4527		return (-1);
4528	}
4529	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) {
4530		SCTP_INP_READ_UNLOCK(inp);
4531		return (0);
4532	}
4533	if (control->end_added) {
4534		/* huh this one is complete? */
4535		goto get_out;
4536	}
4537	mm = m;
4538	if (mm == NULL) {
4539		goto get_out;
4540	}
4541	while (mm) {
4542		if (SCTP_BUF_LEN(mm) == 0) {
4543			/* Skip mbufs with NO lenght */
4544			if (prev == NULL) {
4545				/* First one */
4546				m = sctp_m_free(mm);
4547				mm = m;
4548			} else {
4549				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4550				mm = SCTP_BUF_NEXT(prev);
4551			}
4552			continue;
4553		}
4554		prev = mm;
4555		len += SCTP_BUF_LEN(mm);
4556		if (sb) {
4557			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4558				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4559			}
4560			sctp_sballoc(stcb, sb, mm);
4561			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4562				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4563			}
4564		}
4565		mm = SCTP_BUF_NEXT(mm);
4566	}
4567	if (prev) {
4568		tail = prev;
4569	} else {
4570		/* Really there should always be a prev */
4571		if (m == NULL) {
4572			/* Huh nothing left? */
4573#ifdef INVARIANTS
4574			panic("Nothing left to add?");
4575#else
4576			goto get_out;
4577#endif
4578		}
4579		tail = m;
4580	}
4581	if (control->tail_mbuf) {
4582		/* append */
4583		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4584		control->tail_mbuf = tail;
4585	} else {
4586		/* nothing there */
4587#ifdef INVARIANTS
4588		if (control->data != NULL) {
4589			panic("This should NOT happen");
4590		}
4591#endif
4592		control->data = m;
4593		control->tail_mbuf = tail;
4594	}
4595	atomic_add_int(&control->length, len);
4596	if (end) {
4597		/* message is complete */
4598		if (stcb && (control == stcb->asoc.control_pdapi)) {
4599			stcb->asoc.control_pdapi = NULL;
4600		}
4601		control->held_length = 0;
4602		control->end_added = 1;
4603	}
4604	if (stcb == NULL) {
4605		control->do_not_ref_stcb = 1;
4606	}
4607	/*
4608	 * When we are appending in partial delivery, the cum-ack is used
4609	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4610	 * is populated in the outbound sinfo structure from the true cumack
4611	 * if the association exists...
4612	 */
4613	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4614	if (inp) {
4615		SCTP_INP_READ_UNLOCK(inp);
4616	}
4617	if (inp && inp->sctp_socket) {
4618		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4619			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4620		} else {
4621#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4622			struct socket *so;
4623
4624			so = SCTP_INP_SO(inp);
4625			if (stcb) {
4626				atomic_add_int(&stcb->asoc.refcnt, 1);
4627				SCTP_TCB_UNLOCK(stcb);
4628			}
4629			SCTP_SOCKET_LOCK(so, 1);
4630			if (stcb) {
4631				SCTP_TCB_LOCK(stcb);
4632				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4633			}
4634			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4635				SCTP_SOCKET_UNLOCK(so, 1);
4636				return (0);
4637			}
4638#endif
4639			sctp_sorwakeup(inp, inp->sctp_socket);
4640#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4641			SCTP_SOCKET_UNLOCK(so, 1);
4642#endif
4643		}
4644	}
4645	return (0);
4646}
4647
4648
4649
4650/*************HOLD THIS COMMENT FOR PATCH FILE OF
4651 *************ALTERNATE ROUTING CODE
4652 */
4653
4654/*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4655 *************ALTERNATE ROUTING CODE
4656 */
4657
4658struct mbuf *
4659sctp_generate_cause(uint16_t code, char *info)
4660{
4661	struct mbuf *m;
4662	struct sctp_gen_error_cause *cause;
4663	size_t info_len, len;
4664
4665	if ((code == 0) || (info == NULL)) {
4666		return (NULL);
4667	}
4668	info_len = strlen(info);
4669	len = sizeof(struct sctp_paramhdr) + info_len;
4670	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4671	if (m != NULL) {
4672		SCTP_BUF_LEN(m) = len;
4673		cause = mtod(m, struct sctp_gen_error_cause *);
4674		cause->code = htons(code);
4675		cause->length = htons((uint16_t) len);
4676		memcpy(cause->info, info, info_len);
4677	}
4678	return (m);
4679}
4680
4681struct mbuf *
4682sctp_generate_no_user_data_cause(uint32_t tsn)
4683{
4684	struct mbuf *m;
4685	struct sctp_error_no_user_data *no_user_data_cause;
4686	size_t len;
4687
4688	len = sizeof(struct sctp_error_no_user_data);
4689	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4690	if (m != NULL) {
4691		SCTP_BUF_LEN(m) = len;
4692		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
4693		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
4694		no_user_data_cause->cause.length = htons((uint16_t) len);
4695		no_user_data_cause->tsn = tsn;	/* tsn is passed in as NBO */
4696	}
4697	return (m);
4698}
4699
4700#ifdef SCTP_MBCNT_LOGGING
4701void
4702sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4703    struct sctp_tmit_chunk *tp1, int chk_cnt)
4704{
4705	if (tp1->data == NULL) {
4706		return;
4707	}
4708	asoc->chunks_on_out_queue -= chk_cnt;
4709	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4710		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4711		    asoc->total_output_queue_size,
4712		    tp1->book_size,
4713		    0,
4714		    tp1->mbcnt);
4715	}
4716	if (asoc->total_output_queue_size >= tp1->book_size) {
4717		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4718	} else {
4719		asoc->total_output_queue_size = 0;
4720	}
4721
4722	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4723	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4724		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4725			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4726		} else {
4727			stcb->sctp_socket->so_snd.sb_cc = 0;
4728
4729		}
4730	}
4731}
4732
4733#endif
4734
4735int
4736sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4737    uint8_t sent, int so_locked
4738#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4739    SCTP_UNUSED
4740#endif
4741)
4742{
4743	struct sctp_stream_out *strq;
4744	struct sctp_tmit_chunk *chk = NULL, *tp2;
4745	struct sctp_stream_queue_pending *sp;
4746	uint16_t stream = 0, seq = 0;
4747	uint8_t foundeom = 0;
4748	int ret_sz = 0;
4749	int notdone;
4750	int do_wakeup_routine = 0;
4751
4752	stream = tp1->rec.data.stream_number;
4753	seq = tp1->rec.data.stream_seq;
4754	if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4755		stcb->asoc.abandoned_sent[0]++;
4756		stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4757		stcb->asoc.strmout[stream].abandoned_sent[0]++;
4758#if defined(SCTP_DETAILED_STR_STATS)
4759		stcb->asoc.strmout[stream].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4760#endif
4761	} else {
4762		stcb->asoc.abandoned_unsent[0]++;
4763		stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4764		stcb->asoc.strmout[stream].abandoned_unsent[0]++;
4765#if defined(SCTP_DETAILED_STR_STATS)
4766		stcb->asoc.strmout[stream].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4767#endif
4768	}
4769	do {
4770		ret_sz += tp1->book_size;
4771		if (tp1->data != NULL) {
4772			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4773				sctp_flight_size_decrease(tp1);
4774				sctp_total_flight_decrease(stcb, tp1);
4775			}
4776			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4777			stcb->asoc.peers_rwnd += tp1->send_size;
4778			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4779			if (sent) {
4780				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4781			} else {
4782				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4783			}
4784			if (tp1->data) {
4785				sctp_m_freem(tp1->data);
4786				tp1->data = NULL;
4787			}
4788			do_wakeup_routine = 1;
4789			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4790				stcb->asoc.sent_queue_cnt_removeable--;
4791			}
4792		}
4793		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4794		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4795		    SCTP_DATA_NOT_FRAG) {
4796			/* not frag'ed we ae done   */
4797			notdone = 0;
4798			foundeom = 1;
4799		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4800			/* end of frag, we are done */
4801			notdone = 0;
4802			foundeom = 1;
4803		} else {
4804			/*
4805			 * Its a begin or middle piece, we must mark all of
4806			 * it
4807			 */
4808			notdone = 1;
4809			tp1 = TAILQ_NEXT(tp1, sctp_next);
4810		}
4811	} while (tp1 && notdone);
4812	if (foundeom == 0) {
4813		/*
4814		 * The multi-part message was scattered across the send and
4815		 * sent queue.
4816		 */
4817		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4818			if ((tp1->rec.data.stream_number != stream) ||
4819			    (tp1->rec.data.stream_seq != seq)) {
4820				break;
4821			}
4822			/*
4823			 * save to chk in case we have some on stream out
4824			 * queue. If so and we have an un-transmitted one we
4825			 * don't have to fudge the TSN.
4826			 */
4827			chk = tp1;
4828			ret_sz += tp1->book_size;
4829			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4830			if (sent) {
4831				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4832			} else {
4833				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4834			}
4835			if (tp1->data) {
4836				sctp_m_freem(tp1->data);
4837				tp1->data = NULL;
4838			}
4839			/* No flight involved here book the size to 0 */
4840			tp1->book_size = 0;
4841			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4842				foundeom = 1;
4843			}
4844			do_wakeup_routine = 1;
4845			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4846			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4847			/*
4848			 * on to the sent queue so we can wait for it to be
4849			 * passed by.
4850			 */
4851			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4852			    sctp_next);
4853			stcb->asoc.send_queue_cnt--;
4854			stcb->asoc.sent_queue_cnt++;
4855		}
4856	}
4857	if (foundeom == 0) {
4858		/*
4859		 * Still no eom found. That means there is stuff left on the
4860		 * stream out queue.. yuck.
4861		 */
4862		SCTP_TCB_SEND_LOCK(stcb);
4863		strq = &stcb->asoc.strmout[stream];
4864		sp = TAILQ_FIRST(&strq->outqueue);
4865		if (sp != NULL) {
4866			sp->discard_rest = 1;
4867			/*
4868			 * We may need to put a chunk on the queue that
4869			 * holds the TSN that would have been sent with the
4870			 * LAST bit.
4871			 */
4872			if (chk == NULL) {
4873				/* Yep, we have to */
4874				sctp_alloc_a_chunk(stcb, chk);
4875				if (chk == NULL) {
4876					/*
4877					 * we are hosed. All we can do is
4878					 * nothing.. which will cause an
4879					 * abort if the peer is paying
4880					 * attention.
4881					 */
4882					goto oh_well;
4883				}
4884				memset(chk, 0, sizeof(*chk));
4885				chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4886				chk->sent = SCTP_FORWARD_TSN_SKIP;
4887				chk->asoc = &stcb->asoc;
4888				chk->rec.data.stream_seq = strq->next_sequence_send;
4889				chk->rec.data.stream_number = sp->stream;
4890				chk->rec.data.payloadtype = sp->ppid;
4891				chk->rec.data.context = sp->context;
4892				chk->flags = sp->act_flags;
4893				chk->whoTo = NULL;
4894				chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4895				strq->chunks_on_queues++;
4896				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4897				stcb->asoc.sent_queue_cnt++;
4898				stcb->asoc.pr_sctp_cnt++;
4899			} else {
4900				chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4901			}
4902			strq->next_sequence_send++;
4903	oh_well:
4904			if (sp->data) {
4905				/*
4906				 * Pull any data to free up the SB and allow
4907				 * sender to "add more" while we will throw
4908				 * away :-)
4909				 */
4910				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
4911				ret_sz += sp->length;
4912				do_wakeup_routine = 1;
4913				sp->some_taken = 1;
4914				sctp_m_freem(sp->data);
4915				sp->data = NULL;
4916				sp->tail_mbuf = NULL;
4917				sp->length = 0;
4918			}
4919		}
4920		SCTP_TCB_SEND_UNLOCK(stcb);
4921	}
4922	if (do_wakeup_routine) {
4923#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4924		struct socket *so;
4925
4926		so = SCTP_INP_SO(stcb->sctp_ep);
4927		if (!so_locked) {
4928			atomic_add_int(&stcb->asoc.refcnt, 1);
4929			SCTP_TCB_UNLOCK(stcb);
4930			SCTP_SOCKET_LOCK(so, 1);
4931			SCTP_TCB_LOCK(stcb);
4932			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4933			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4934				/* assoc was freed while we were unlocked */
4935				SCTP_SOCKET_UNLOCK(so, 1);
4936				return (ret_sz);
4937			}
4938		}
4939#endif
4940		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4941#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4942		if (!so_locked) {
4943			SCTP_SOCKET_UNLOCK(so, 1);
4944		}
4945#endif
4946	}
4947	return (ret_sz);
4948}
4949
4950/*
4951 * checks to see if the given address, sa, is one that is currently known by
4952 * the kernel note: can't distinguish the same address on multiple interfaces
4953 * and doesn't handle multiple addresses with different zone/scope id's note:
4954 * ifa_ifwithaddr() compares the entire sockaddr struct
4955 */
4956struct sctp_ifa *
4957sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4958    int holds_lock)
4959{
4960	struct sctp_laddr *laddr;
4961
4962	if (holds_lock == 0) {
4963		SCTP_INP_RLOCK(inp);
4964	}
4965	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4966		if (laddr->ifa == NULL)
4967			continue;
4968		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4969			continue;
4970#ifdef INET
4971		if (addr->sa_family == AF_INET) {
4972			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4973			    laddr->ifa->address.sin.sin_addr.s_addr) {
4974				/* found him. */
4975				if (holds_lock == 0) {
4976					SCTP_INP_RUNLOCK(inp);
4977				}
4978				return (laddr->ifa);
4979				break;
4980			}
4981		}
4982#endif
4983#ifdef INET6
4984		if (addr->sa_family == AF_INET6) {
4985			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4986			    &laddr->ifa->address.sin6)) {
4987				/* found him. */
4988				if (holds_lock == 0) {
4989					SCTP_INP_RUNLOCK(inp);
4990				}
4991				return (laddr->ifa);
4992				break;
4993			}
4994		}
4995#endif
4996	}
4997	if (holds_lock == 0) {
4998		SCTP_INP_RUNLOCK(inp);
4999	}
5000	return (NULL);
5001}
5002
5003uint32_t
5004sctp_get_ifa_hash_val(struct sockaddr *addr)
5005{
5006	switch (addr->sa_family) {
5007#ifdef INET
5008	case AF_INET:
5009		{
5010			struct sockaddr_in *sin;
5011
5012			sin = (struct sockaddr_in *)addr;
5013			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
5014		}
5015#endif
5016#ifdef INET6
5017	case AF_INET6:
5018		{
5019			struct sockaddr_in6 *sin6;
5020			uint32_t hash_of_addr;
5021
5022			sin6 = (struct sockaddr_in6 *)addr;
5023			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
5024			    sin6->sin6_addr.s6_addr32[1] +
5025			    sin6->sin6_addr.s6_addr32[2] +
5026			    sin6->sin6_addr.s6_addr32[3]);
5027			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
5028			return (hash_of_addr);
5029		}
5030#endif
5031	default:
5032		break;
5033	}
5034	return (0);
5035}
5036
5037struct sctp_ifa *
5038sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5039{
5040	struct sctp_ifa *sctp_ifap;
5041	struct sctp_vrf *vrf;
5042	struct sctp_ifalist *hash_head;
5043	uint32_t hash_of_addr;
5044
5045	if (holds_lock == 0)
5046		SCTP_IPI_ADDR_RLOCK();
5047
5048	vrf = sctp_find_vrf(vrf_id);
5049	if (vrf == NULL) {
5050		if (holds_lock == 0)
5051			SCTP_IPI_ADDR_RUNLOCK();
5052		return (NULL);
5053	}
5054	hash_of_addr = sctp_get_ifa_hash_val(addr);
5055
5056	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5057	if (hash_head == NULL) {
5058		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5059		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
5060		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
5061		sctp_print_address(addr);
5062		SCTP_PRINTF("No such bucket for address\n");
5063		if (holds_lock == 0)
5064			SCTP_IPI_ADDR_RUNLOCK();
5065
5066		return (NULL);
5067	}
5068	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5069		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5070			continue;
5071#ifdef INET
5072		if (addr->sa_family == AF_INET) {
5073			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5074			    sctp_ifap->address.sin.sin_addr.s_addr) {
5075				/* found him. */
5076				if (holds_lock == 0)
5077					SCTP_IPI_ADDR_RUNLOCK();
5078				return (sctp_ifap);
5079				break;
5080			}
5081		}
5082#endif
5083#ifdef INET6
5084		if (addr->sa_family == AF_INET6) {
5085			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5086			    &sctp_ifap->address.sin6)) {
5087				/* found him. */
5088				if (holds_lock == 0)
5089					SCTP_IPI_ADDR_RUNLOCK();
5090				return (sctp_ifap);
5091				break;
5092			}
5093		}
5094#endif
5095	}
5096	if (holds_lock == 0)
5097		SCTP_IPI_ADDR_RUNLOCK();
5098	return (NULL);
5099}
5100
5101static void
5102sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
5103    uint32_t rwnd_req)
5104{
5105	/* User pulled some data, do we need a rwnd update? */
5106	int r_unlocked = 0;
5107	uint32_t dif, rwnd;
5108	struct socket *so = NULL;
5109
5110	if (stcb == NULL)
5111		return;
5112
5113	atomic_add_int(&stcb->asoc.refcnt, 1);
5114
5115	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5116	    SCTP_STATE_SHUTDOWN_RECEIVED |
5117	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5118		/* Pre-check If we are freeing no update */
5119		goto no_lock;
5120	}
5121	SCTP_INP_INCR_REF(stcb->sctp_ep);
5122	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5123	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5124		goto out;
5125	}
5126	so = stcb->sctp_socket;
5127	if (so == NULL) {
5128		goto out;
5129	}
5130	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5131	/* Have you have freed enough to look */
5132	*freed_so_far = 0;
5133	/* Yep, its worth a look and the lock overhead */
5134
5135	/* Figure out what the rwnd would be */
5136	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5137	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5138		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5139	} else {
5140		dif = 0;
5141	}
5142	if (dif >= rwnd_req) {
5143		if (hold_rlock) {
5144			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5145			r_unlocked = 1;
5146		}
5147		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5148			/*
5149			 * One last check before we allow the guy possibly
5150			 * to get in. There is a race, where the guy has not
5151			 * reached the gate. In that case
5152			 */
5153			goto out;
5154		}
5155		SCTP_TCB_LOCK(stcb);
5156		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5157			/* No reports here */
5158			SCTP_TCB_UNLOCK(stcb);
5159			goto out;
5160		}
5161		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5162		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5163
5164		sctp_chunk_output(stcb->sctp_ep, stcb,
5165		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5166		/* make sure no timer is running */
5167		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5168		SCTP_TCB_UNLOCK(stcb);
5169	} else {
5170		/* Update how much we have pending */
5171		stcb->freed_by_sorcv_sincelast = dif;
5172	}
5173out:
5174	if (so && r_unlocked && hold_rlock) {
5175		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5176	}
5177	SCTP_INP_DECR_REF(stcb->sctp_ep);
5178no_lock:
5179	atomic_add_int(&stcb->asoc.refcnt, -1);
5180	return;
5181}
5182
5183int
5184sctp_sorecvmsg(struct socket *so,
5185    struct uio *uio,
5186    struct mbuf **mp,
5187    struct sockaddr *from,
5188    int fromlen,
5189    int *msg_flags,
5190    struct sctp_sndrcvinfo *sinfo,
5191    int filling_sinfo)
5192{
5193	/*
5194	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5195	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5196	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5197	 * On the way out we may send out any combination of:
5198	 * MSG_NOTIFICATION MSG_EOR
5199	 *
5200	 */
5201	struct sctp_inpcb *inp = NULL;
5202	int my_len = 0;
5203	int cp_len = 0, error = 0;
5204	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5205	struct mbuf *m = NULL;
5206	struct sctp_tcb *stcb = NULL;
5207	int wakeup_read_socket = 0;
5208	int freecnt_applied = 0;
5209	int out_flags = 0, in_flags = 0;
5210	int block_allowed = 1;
5211	uint32_t freed_so_far = 0;
5212	uint32_t copied_so_far = 0;
5213	int in_eeor_mode = 0;
5214	int no_rcv_needed = 0;
5215	uint32_t rwnd_req = 0;
5216	int hold_sblock = 0;
5217	int hold_rlock = 0;
5218	int slen = 0;
5219	uint32_t held_length = 0;
5220	int sockbuf_lock = 0;
5221
5222	if (uio == NULL) {
5223		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5224		return (EINVAL);
5225	}
5226	if (msg_flags) {
5227		in_flags = *msg_flags;
5228		if (in_flags & MSG_PEEK)
5229			SCTP_STAT_INCR(sctps_read_peeks);
5230	} else {
5231		in_flags = 0;
5232	}
5233	slen = uio->uio_resid;
5234
5235	/* Pull in and set up our int flags */
5236	if (in_flags & MSG_OOB) {
5237		/* Out of band's NOT supported */
5238		return (EOPNOTSUPP);
5239	}
5240	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5241		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5242		return (EINVAL);
5243	}
5244	if ((in_flags & (MSG_DONTWAIT
5245	    | MSG_NBIO
5246	    )) ||
5247	    SCTP_SO_IS_NBIO(so)) {
5248		block_allowed = 0;
5249	}
5250	/* setup the endpoint */
5251	inp = (struct sctp_inpcb *)so->so_pcb;
5252	if (inp == NULL) {
5253		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5254		return (EFAULT);
5255	}
5256	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5257	/* Must be at least a MTU's worth */
5258	if (rwnd_req < SCTP_MIN_RWND)
5259		rwnd_req = SCTP_MIN_RWND;
5260	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5261	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5262		sctp_misc_ints(SCTP_SORECV_ENTER,
5263		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5264	}
5265	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5266		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5267		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5268	}
5269	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5270	if (error) {
5271		goto release_unlocked;
5272	}
5273	sockbuf_lock = 1;
5274restart:
5275
5276
5277restart_nosblocks:
5278	if (hold_sblock == 0) {
5279		SOCKBUF_LOCK(&so->so_rcv);
5280		hold_sblock = 1;
5281	}
5282	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5283	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5284		goto out;
5285	}
5286	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5287		if (so->so_error) {
5288			error = so->so_error;
5289			if ((in_flags & MSG_PEEK) == 0)
5290				so->so_error = 0;
5291			goto out;
5292		} else {
5293			if (so->so_rcv.sb_cc == 0) {
5294				/* indicate EOF */
5295				error = 0;
5296				goto out;
5297			}
5298		}
5299	}
5300	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5301		/* we need to wait for data */
5302		if ((so->so_rcv.sb_cc == 0) &&
5303		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5304		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5305			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5306				/*
5307				 * For active open side clear flags for
5308				 * re-use passive open is blocked by
5309				 * connect.
5310				 */
5311				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5312					/*
5313					 * You were aborted, passive side
5314					 * always hits here
5315					 */
5316					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5317					error = ECONNRESET;
5318				}
5319				so->so_state &= ~(SS_ISCONNECTING |
5320				    SS_ISDISCONNECTING |
5321				    SS_ISCONFIRMING |
5322				    SS_ISCONNECTED);
5323				if (error == 0) {
5324					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5325						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5326						error = ENOTCONN;
5327					}
5328				}
5329				goto out;
5330			}
5331		}
5332		error = sbwait(&so->so_rcv);
5333		if (error) {
5334			goto out;
5335		}
5336		held_length = 0;
5337		goto restart_nosblocks;
5338	} else if (so->so_rcv.sb_cc == 0) {
5339		if (so->so_error) {
5340			error = so->so_error;
5341			if ((in_flags & MSG_PEEK) == 0)
5342				so->so_error = 0;
5343		} else {
5344			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5345			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5346				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5347					/*
5348					 * For active open side clear flags
5349					 * for re-use passive open is
5350					 * blocked by connect.
5351					 */
5352					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5353						/*
5354						 * You were aborted, passive
5355						 * side always hits here
5356						 */
5357						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5358						error = ECONNRESET;
5359					}
5360					so->so_state &= ~(SS_ISCONNECTING |
5361					    SS_ISDISCONNECTING |
5362					    SS_ISCONFIRMING |
5363					    SS_ISCONNECTED);
5364					if (error == 0) {
5365						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5366							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5367							error = ENOTCONN;
5368						}
5369					}
5370					goto out;
5371				}
5372			}
5373			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5374			error = EWOULDBLOCK;
5375		}
5376		goto out;
5377	}
5378	if (hold_sblock == 1) {
5379		SOCKBUF_UNLOCK(&so->so_rcv);
5380		hold_sblock = 0;
5381	}
5382	/* we possibly have data we can read */
5383	/* sa_ignore FREED_MEMORY */
5384	control = TAILQ_FIRST(&inp->read_queue);
5385	if (control == NULL) {
5386		/*
5387		 * This could be happening since the appender did the
5388		 * increment but as not yet did the tailq insert onto the
5389		 * read_queue
5390		 */
5391		if (hold_rlock == 0) {
5392			SCTP_INP_READ_LOCK(inp);
5393		}
5394		control = TAILQ_FIRST(&inp->read_queue);
5395		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5396#ifdef INVARIANTS
5397			panic("Huh, its non zero and nothing on control?");
5398#endif
5399			so->so_rcv.sb_cc = 0;
5400		}
5401		SCTP_INP_READ_UNLOCK(inp);
5402		hold_rlock = 0;
5403		goto restart;
5404	}
5405	if ((control->length == 0) &&
5406	    (control->do_not_ref_stcb)) {
5407		/*
5408		 * Clean up code for freeing assoc that left behind a
5409		 * pdapi.. maybe a peer in EEOR that just closed after
5410		 * sending and never indicated a EOR.
5411		 */
5412		if (hold_rlock == 0) {
5413			hold_rlock = 1;
5414			SCTP_INP_READ_LOCK(inp);
5415		}
5416		control->held_length = 0;
5417		if (control->data) {
5418			/* Hmm there is data here .. fix */
5419			struct mbuf *m_tmp;
5420			int cnt = 0;
5421
5422			m_tmp = control->data;
5423			while (m_tmp) {
5424				cnt += SCTP_BUF_LEN(m_tmp);
5425				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5426					control->tail_mbuf = m_tmp;
5427					control->end_added = 1;
5428				}
5429				m_tmp = SCTP_BUF_NEXT(m_tmp);
5430			}
5431			control->length = cnt;
5432		} else {
5433			/* remove it */
5434			TAILQ_REMOVE(&inp->read_queue, control, next);
5435			/* Add back any hiddend data */
5436			sctp_free_remote_addr(control->whoFrom);
5437			sctp_free_a_readq(stcb, control);
5438		}
5439		if (hold_rlock) {
5440			hold_rlock = 0;
5441			SCTP_INP_READ_UNLOCK(inp);
5442		}
5443		goto restart;
5444	}
5445	if ((control->length == 0) &&
5446	    (control->end_added == 1)) {
5447		/*
5448		 * Do we also need to check for (control->pdapi_aborted ==
5449		 * 1)?
5450		 */
5451		if (hold_rlock == 0) {
5452			hold_rlock = 1;
5453			SCTP_INP_READ_LOCK(inp);
5454		}
5455		TAILQ_REMOVE(&inp->read_queue, control, next);
5456		if (control->data) {
5457#ifdef INVARIANTS
5458			panic("control->data not null but control->length == 0");
5459#else
5460			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5461			sctp_m_freem(control->data);
5462			control->data = NULL;
5463#endif
5464		}
5465		if (control->aux_data) {
5466			sctp_m_free(control->aux_data);
5467			control->aux_data = NULL;
5468		}
5469		sctp_free_remote_addr(control->whoFrom);
5470		sctp_free_a_readq(stcb, control);
5471		if (hold_rlock) {
5472			hold_rlock = 0;
5473			SCTP_INP_READ_UNLOCK(inp);
5474		}
5475		goto restart;
5476	}
5477	if (control->length == 0) {
5478		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5479		    (filling_sinfo)) {
5480			/* find a more suitable one then this */
5481			ctl = TAILQ_NEXT(control, next);
5482			while (ctl) {
5483				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5484				    (ctl->some_taken ||
5485				    (ctl->spec_flags & M_NOTIFICATION) ||
5486				    ((ctl->do_not_ref_stcb == 0) &&
5487				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5488				    ) {
5489					/*-
5490					 * If we have a different TCB next, and there is data
5491					 * present. If we have already taken some (pdapi), OR we can
5492					 * ref the tcb and no delivery as started on this stream, we
5493					 * take it. Note we allow a notification on a different
5494					 * assoc to be delivered..
5495					 */
5496					control = ctl;
5497					goto found_one;
5498				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5499					    (ctl->length) &&
5500					    ((ctl->some_taken) ||
5501					    ((ctl->do_not_ref_stcb == 0) &&
5502					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5503				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5504					/*-
5505					 * If we have the same tcb, and there is data present, and we
5506					 * have the strm interleave feature present. Then if we have
5507					 * taken some (pdapi) or we can refer to tht tcb AND we have
5508					 * not started a delivery for this stream, we can take it.
5509					 * Note we do NOT allow a notificaiton on the same assoc to
5510					 * be delivered.
5511					 */
5512					control = ctl;
5513					goto found_one;
5514				}
5515				ctl = TAILQ_NEXT(ctl, next);
5516			}
5517		}
5518		/*
5519		 * if we reach here, not suitable replacement is available
5520		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5521		 * into the our held count, and its time to sleep again.
5522		 */
5523		held_length = so->so_rcv.sb_cc;
5524		control->held_length = so->so_rcv.sb_cc;
5525		goto restart;
5526	}
5527	/* Clear the held length since there is something to read */
5528	control->held_length = 0;
5529	if (hold_rlock) {
5530		SCTP_INP_READ_UNLOCK(inp);
5531		hold_rlock = 0;
5532	}
5533found_one:
5534	/*
5535	 * If we reach here, control has a some data for us to read off.
5536	 * Note that stcb COULD be NULL.
5537	 */
5538	control->some_taken++;
5539	if (hold_sblock) {
5540		SOCKBUF_UNLOCK(&so->so_rcv);
5541		hold_sblock = 0;
5542	}
5543	stcb = control->stcb;
5544	if (stcb) {
5545		if ((control->do_not_ref_stcb == 0) &&
5546		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5547			if (freecnt_applied == 0)
5548				stcb = NULL;
5549		} else if (control->do_not_ref_stcb == 0) {
5550			/* you can't free it on me please */
5551			/*
5552			 * The lock on the socket buffer protects us so the
5553			 * free code will stop. But since we used the
5554			 * socketbuf lock and the sender uses the tcb_lock
5555			 * to increment, we need to use the atomic add to
5556			 * the refcnt
5557			 */
5558			if (freecnt_applied) {
5559#ifdef INVARIANTS
5560				panic("refcnt already incremented");
5561#else
5562				SCTP_PRINTF("refcnt already incremented?\n");
5563#endif
5564			} else {
5565				atomic_add_int(&stcb->asoc.refcnt, 1);
5566				freecnt_applied = 1;
5567			}
5568			/*
5569			 * Setup to remember how much we have not yet told
5570			 * the peer our rwnd has opened up. Note we grab the
5571			 * value from the tcb from last time. Note too that
5572			 * sack sending clears this when a sack is sent,
5573			 * which is fine. Once we hit the rwnd_req, we then
5574			 * will go to the sctp_user_rcvd() that will not
5575			 * lock until it KNOWs it MUST send a WUP-SACK.
5576			 */
5577			freed_so_far = stcb->freed_by_sorcv_sincelast;
5578			stcb->freed_by_sorcv_sincelast = 0;
5579		}
5580	}
5581	if (stcb &&
5582	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5583	    control->do_not_ref_stcb == 0) {
5584		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5585	}
5586	/* First lets get off the sinfo and sockaddr info */
5587	if ((sinfo) && filling_sinfo) {
5588		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5589		nxt = TAILQ_NEXT(control, next);
5590		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5591		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5592			struct sctp_extrcvinfo *s_extra;
5593
5594			s_extra = (struct sctp_extrcvinfo *)sinfo;
5595			if ((nxt) &&
5596			    (nxt->length)) {
5597				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5598				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5599					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5600				}
5601				if (nxt->spec_flags & M_NOTIFICATION) {
5602					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5603				}
5604				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5605				s_extra->sreinfo_next_length = nxt->length;
5606				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5607				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5608				if (nxt->tail_mbuf != NULL) {
5609					if (nxt->end_added) {
5610						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5611					}
5612				}
5613			} else {
5614				/*
5615				 * we explicitly 0 this, since the memcpy
5616				 * got some other things beyond the older
5617				 * sinfo_ that is on the control's structure
5618				 * :-D
5619				 */
5620				nxt = NULL;
5621				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5622				s_extra->sreinfo_next_aid = 0;
5623				s_extra->sreinfo_next_length = 0;
5624				s_extra->sreinfo_next_ppid = 0;
5625				s_extra->sreinfo_next_stream = 0;
5626			}
5627		}
5628		/*
5629		 * update off the real current cum-ack, if we have an stcb.
5630		 */
5631		if ((control->do_not_ref_stcb == 0) && stcb)
5632			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5633		/*
5634		 * mask off the high bits, we keep the actual chunk bits in
5635		 * there.
5636		 */
5637		sinfo->sinfo_flags &= 0x00ff;
5638		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5639			sinfo->sinfo_flags |= SCTP_UNORDERED;
5640		}
5641	}
5642#ifdef SCTP_ASOCLOG_OF_TSNS
5643	{
5644		int index, newindex;
5645		struct sctp_pcbtsn_rlog *entry;
5646
5647		do {
5648			index = inp->readlog_index;
5649			newindex = index + 1;
5650			if (newindex >= SCTP_READ_LOG_SIZE) {
5651				newindex = 0;
5652			}
5653		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5654		entry = &inp->readlog[index];
5655		entry->vtag = control->sinfo_assoc_id;
5656		entry->strm = control->sinfo_stream;
5657		entry->seq = control->sinfo_ssn;
5658		entry->sz = control->length;
5659		entry->flgs = control->sinfo_flags;
5660	}
5661#endif
5662	if ((fromlen > 0) && (from != NULL)) {
5663		union sctp_sockstore store;
5664		size_t len;
5665
5666		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5667#ifdef INET6
5668		case AF_INET6:
5669			len = sizeof(struct sockaddr_in6);
5670			store.sin6 = control->whoFrom->ro._l_addr.sin6;
5671			store.sin6.sin6_port = control->port_from;
5672			break;
5673#endif
5674#ifdef INET
5675		case AF_INET:
5676#ifdef INET6
5677			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
5678				len = sizeof(struct sockaddr_in6);
5679				in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
5680				    &store.sin6);
5681				store.sin6.sin6_port = control->port_from;
5682			} else {
5683				len = sizeof(struct sockaddr_in);
5684				store.sin = control->whoFrom->ro._l_addr.sin;
5685				store.sin.sin_port = control->port_from;
5686			}
5687#else
5688			len = sizeof(struct sockaddr_in);
5689			store.sin = control->whoFrom->ro._l_addr.sin;
5690			store.sin.sin_port = control->port_from;
5691#endif
5692			break;
5693#endif
5694		default:
5695			len = 0;
5696			break;
5697		}
5698		memcpy(from, &store, min((size_t)fromlen, len));
5699#ifdef INET6
5700		{
5701			struct sockaddr_in6 lsa6, *from6;
5702
5703			from6 = (struct sockaddr_in6 *)from;
5704			sctp_recover_scope_mac(from6, (&lsa6));
5705		}
5706#endif
5707	}
5708	/* now copy out what data we can */
5709	if (mp == NULL) {
5710		/* copy out each mbuf in the chain up to length */
5711get_more_data:
5712		m = control->data;
5713		while (m) {
5714			/* Move out all we can */
5715			cp_len = (int)uio->uio_resid;
5716			my_len = (int)SCTP_BUF_LEN(m);
5717			if (cp_len > my_len) {
5718				/* not enough in this buf */
5719				cp_len = my_len;
5720			}
5721			if (hold_rlock) {
5722				SCTP_INP_READ_UNLOCK(inp);
5723				hold_rlock = 0;
5724			}
5725			if (cp_len > 0)
5726				error = uiomove(mtod(m, char *), cp_len, uio);
5727			/* re-read */
5728			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5729				goto release;
5730			}
5731			if ((control->do_not_ref_stcb == 0) && stcb &&
5732			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5733				no_rcv_needed = 1;
5734			}
5735			if (error) {
5736				/* error we are out of here */
5737				goto release;
5738			}
5739			if ((SCTP_BUF_NEXT(m) == NULL) &&
5740			    (cp_len >= SCTP_BUF_LEN(m)) &&
5741			    ((control->end_added == 0) ||
5742			    (control->end_added &&
5743			    (TAILQ_NEXT(control, next) == NULL)))
5744			    ) {
5745				SCTP_INP_READ_LOCK(inp);
5746				hold_rlock = 1;
5747			}
5748			if (cp_len == SCTP_BUF_LEN(m)) {
5749				if ((SCTP_BUF_NEXT(m) == NULL) &&
5750				    (control->end_added)) {
5751					out_flags |= MSG_EOR;
5752					if ((control->do_not_ref_stcb == 0) &&
5753					    (control->stcb != NULL) &&
5754					    ((control->spec_flags & M_NOTIFICATION) == 0))
5755						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5756				}
5757				if (control->spec_flags & M_NOTIFICATION) {
5758					out_flags |= MSG_NOTIFICATION;
5759				}
5760				/* we ate up the mbuf */
5761				if (in_flags & MSG_PEEK) {
5762					/* just looking */
5763					m = SCTP_BUF_NEXT(m);
5764					copied_so_far += cp_len;
5765				} else {
5766					/* dispose of the mbuf */
5767					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5768						sctp_sblog(&so->so_rcv,
5769						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5770					}
5771					sctp_sbfree(control, stcb, &so->so_rcv, m);
5772					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5773						sctp_sblog(&so->so_rcv,
5774						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5775					}
5776					copied_so_far += cp_len;
5777					freed_so_far += cp_len;
5778					freed_so_far += MSIZE;
5779					atomic_subtract_int(&control->length, cp_len);
5780					control->data = sctp_m_free(m);
5781					m = control->data;
5782					/*
5783					 * been through it all, must hold sb
5784					 * lock ok to null tail
5785					 */
5786					if (control->data == NULL) {
5787#ifdef INVARIANTS
5788						if ((control->end_added == 0) ||
5789						    (TAILQ_NEXT(control, next) == NULL)) {
5790							/*
5791							 * If the end is not
5792							 * added, OR the
5793							 * next is NOT null
5794							 * we MUST have the
5795							 * lock.
5796							 */
5797							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5798								panic("Hmm we don't own the lock?");
5799							}
5800						}
5801#endif
5802						control->tail_mbuf = NULL;
5803#ifdef INVARIANTS
5804						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5805							panic("end_added, nothing left and no MSG_EOR");
5806						}
5807#endif
5808					}
5809				}
5810			} else {
5811				/* Do we need to trim the mbuf? */
5812				if (control->spec_flags & M_NOTIFICATION) {
5813					out_flags |= MSG_NOTIFICATION;
5814				}
5815				if ((in_flags & MSG_PEEK) == 0) {
5816					SCTP_BUF_RESV_UF(m, cp_len);
5817					SCTP_BUF_LEN(m) -= cp_len;
5818					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5819						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5820					}
5821					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5822					if ((control->do_not_ref_stcb == 0) &&
5823					    stcb) {
5824						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5825					}
5826					copied_so_far += cp_len;
5827					freed_so_far += cp_len;
5828					freed_so_far += MSIZE;
5829					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5830						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5831						    SCTP_LOG_SBRESULT, 0);
5832					}
5833					atomic_subtract_int(&control->length, cp_len);
5834				} else {
5835					copied_so_far += cp_len;
5836				}
5837			}
5838			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5839				break;
5840			}
5841			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5842			    (control->do_not_ref_stcb == 0) &&
5843			    (freed_so_far >= rwnd_req)) {
5844				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5845			}
5846		}		/* end while(m) */
5847		/*
5848		 * At this point we have looked at it all and we either have
5849		 * a MSG_EOR/or read all the user wants... <OR>
5850		 * control->length == 0.
5851		 */
5852		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5853			/* we are done with this control */
5854			if (control->length == 0) {
5855				if (control->data) {
5856#ifdef INVARIANTS
5857					panic("control->data not null at read eor?");
5858#else
5859					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5860					sctp_m_freem(control->data);
5861					control->data = NULL;
5862#endif
5863				}
5864		done_with_control:
5865				if (TAILQ_NEXT(control, next) == NULL) {
5866					/*
5867					 * If we don't have a next we need a
5868					 * lock, if there is a next
5869					 * interrupt is filling ahead of us
5870					 * and we don't need a lock to
5871					 * remove this guy (which is the
5872					 * head of the queue).
5873					 */
5874					if (hold_rlock == 0) {
5875						SCTP_INP_READ_LOCK(inp);
5876						hold_rlock = 1;
5877					}
5878				}
5879				TAILQ_REMOVE(&inp->read_queue, control, next);
5880				/* Add back any hiddend data */
5881				if (control->held_length) {
5882					held_length = 0;
5883					control->held_length = 0;
5884					wakeup_read_socket = 1;
5885				}
5886				if (control->aux_data) {
5887					sctp_m_free(control->aux_data);
5888					control->aux_data = NULL;
5889				}
5890				no_rcv_needed = control->do_not_ref_stcb;
5891				sctp_free_remote_addr(control->whoFrom);
5892				control->data = NULL;
5893				sctp_free_a_readq(stcb, control);
5894				control = NULL;
5895				if ((freed_so_far >= rwnd_req) &&
5896				    (no_rcv_needed == 0))
5897					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5898
5899			} else {
5900				/*
5901				 * The user did not read all of this
5902				 * message, turn off the returned MSG_EOR
5903				 * since we are leaving more behind on the
5904				 * control to read.
5905				 */
5906#ifdef INVARIANTS
5907				if (control->end_added &&
5908				    (control->data == NULL) &&
5909				    (control->tail_mbuf == NULL)) {
5910					panic("Gak, control->length is corrupt?");
5911				}
5912#endif
5913				no_rcv_needed = control->do_not_ref_stcb;
5914				out_flags &= ~MSG_EOR;
5915			}
5916		}
5917		if (out_flags & MSG_EOR) {
5918			goto release;
5919		}
5920		if ((uio->uio_resid == 0) ||
5921		    ((in_eeor_mode) &&
5922		    (copied_so_far >= (uint32_t) max(so->so_rcv.sb_lowat, 1)))) {
5923			goto release;
5924		}
5925		/*
5926		 * If I hit here the receiver wants more and this message is
5927		 * NOT done (pd-api). So two questions. Can we block? if not
5928		 * we are done. Did the user NOT set MSG_WAITALL?
5929		 */
5930		if (block_allowed == 0) {
5931			goto release;
5932		}
5933		/*
5934		 * We need to wait for more data a few things: - We don't
5935		 * sbunlock() so we don't get someone else reading. - We
5936		 * must be sure to account for the case where what is added
5937		 * is NOT to our control when we wakeup.
5938		 */
5939
5940		/*
5941		 * Do we need to tell the transport a rwnd update might be
5942		 * needed before we go to sleep?
5943		 */
5944		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5945		    ((freed_so_far >= rwnd_req) &&
5946		    (control->do_not_ref_stcb == 0) &&
5947		    (no_rcv_needed == 0))) {
5948			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5949		}
5950wait_some_more:
5951		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5952			goto release;
5953		}
5954		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5955			goto release;
5956
5957		if (hold_rlock == 1) {
5958			SCTP_INP_READ_UNLOCK(inp);
5959			hold_rlock = 0;
5960		}
5961		if (hold_sblock == 0) {
5962			SOCKBUF_LOCK(&so->so_rcv);
5963			hold_sblock = 1;
5964		}
5965		if ((copied_so_far) && (control->length == 0) &&
5966		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5967			goto release;
5968		}
5969		if (so->so_rcv.sb_cc <= control->held_length) {
5970			error = sbwait(&so->so_rcv);
5971			if (error) {
5972				goto release;
5973			}
5974			control->held_length = 0;
5975		}
5976		if (hold_sblock) {
5977			SOCKBUF_UNLOCK(&so->so_rcv);
5978			hold_sblock = 0;
5979		}
5980		if (control->length == 0) {
5981			/* still nothing here */
5982			if (control->end_added == 1) {
5983				/* he aborted, or is done i.e.did a shutdown */
5984				out_flags |= MSG_EOR;
5985				if (control->pdapi_aborted) {
5986					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5987						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5988
5989					out_flags |= MSG_TRUNC;
5990				} else {
5991					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5992						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5993				}
5994				goto done_with_control;
5995			}
5996			if (so->so_rcv.sb_cc > held_length) {
5997				control->held_length = so->so_rcv.sb_cc;
5998				held_length = 0;
5999			}
6000			goto wait_some_more;
6001		} else if (control->data == NULL) {
6002			/*
6003			 * we must re-sync since data is probably being
6004			 * added
6005			 */
6006			SCTP_INP_READ_LOCK(inp);
6007			if ((control->length > 0) && (control->data == NULL)) {
6008				/*
6009				 * big trouble.. we have the lock and its
6010				 * corrupt?
6011				 */
6012#ifdef INVARIANTS
6013				panic("Impossible data==NULL length !=0");
6014#endif
6015				out_flags |= MSG_EOR;
6016				out_flags |= MSG_TRUNC;
6017				control->length = 0;
6018				SCTP_INP_READ_UNLOCK(inp);
6019				goto done_with_control;
6020			}
6021			SCTP_INP_READ_UNLOCK(inp);
6022			/* We will fall around to get more data */
6023		}
6024		goto get_more_data;
6025	} else {
6026		/*-
6027		 * Give caller back the mbuf chain,
6028		 * store in uio_resid the length
6029		 */
6030		wakeup_read_socket = 0;
6031		if ((control->end_added == 0) ||
6032		    (TAILQ_NEXT(control, next) == NULL)) {
6033			/* Need to get rlock */
6034			if (hold_rlock == 0) {
6035				SCTP_INP_READ_LOCK(inp);
6036				hold_rlock = 1;
6037			}
6038		}
6039		if (control->end_added) {
6040			out_flags |= MSG_EOR;
6041			if ((control->do_not_ref_stcb == 0) &&
6042			    (control->stcb != NULL) &&
6043			    ((control->spec_flags & M_NOTIFICATION) == 0))
6044				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6045		}
6046		if (control->spec_flags & M_NOTIFICATION) {
6047			out_flags |= MSG_NOTIFICATION;
6048		}
6049		uio->uio_resid = control->length;
6050		*mp = control->data;
6051		m = control->data;
6052		while (m) {
6053			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6054				sctp_sblog(&so->so_rcv,
6055				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6056			}
6057			sctp_sbfree(control, stcb, &so->so_rcv, m);
6058			freed_so_far += SCTP_BUF_LEN(m);
6059			freed_so_far += MSIZE;
6060			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6061				sctp_sblog(&so->so_rcv,
6062				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6063			}
6064			m = SCTP_BUF_NEXT(m);
6065		}
6066		control->data = control->tail_mbuf = NULL;
6067		control->length = 0;
6068		if (out_flags & MSG_EOR) {
6069			/* Done with this control */
6070			goto done_with_control;
6071		}
6072	}
6073release:
6074	if (hold_rlock == 1) {
6075		SCTP_INP_READ_UNLOCK(inp);
6076		hold_rlock = 0;
6077	}
6078	if (hold_sblock == 1) {
6079		SOCKBUF_UNLOCK(&so->so_rcv);
6080		hold_sblock = 0;
6081	}
6082	sbunlock(&so->so_rcv);
6083	sockbuf_lock = 0;
6084
6085release_unlocked:
6086	if (hold_sblock) {
6087		SOCKBUF_UNLOCK(&so->so_rcv);
6088		hold_sblock = 0;
6089	}
6090	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6091		if ((freed_so_far >= rwnd_req) &&
6092		    (control && (control->do_not_ref_stcb == 0)) &&
6093		    (no_rcv_needed == 0))
6094			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6095	}
6096out:
6097	if (msg_flags) {
6098		*msg_flags = out_flags;
6099	}
6100	if (((out_flags & MSG_EOR) == 0) &&
6101	    ((in_flags & MSG_PEEK) == 0) &&
6102	    (sinfo) &&
6103	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6104	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6105		struct sctp_extrcvinfo *s_extra;
6106
6107		s_extra = (struct sctp_extrcvinfo *)sinfo;
6108		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
6109	}
6110	if (hold_rlock == 1) {
6111		SCTP_INP_READ_UNLOCK(inp);
6112	}
6113	if (hold_sblock) {
6114		SOCKBUF_UNLOCK(&so->so_rcv);
6115	}
6116	if (sockbuf_lock) {
6117		sbunlock(&so->so_rcv);
6118	}
6119	if (freecnt_applied) {
6120		/*
6121		 * The lock on the socket buffer protects us so the free
6122		 * code will stop. But since we used the socketbuf lock and
6123		 * the sender uses the tcb_lock to increment, we need to use
6124		 * the atomic add to the refcnt.
6125		 */
6126		if (stcb == NULL) {
6127#ifdef INVARIANTS
6128			panic("stcb for refcnt has gone NULL?");
6129			goto stage_left;
6130#else
6131			goto stage_left;
6132#endif
6133		}
6134		atomic_add_int(&stcb->asoc.refcnt, -1);
6135		/* Save the value back for next time */
6136		stcb->freed_by_sorcv_sincelast = freed_so_far;
6137	}
6138	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6139		if (stcb) {
6140			sctp_misc_ints(SCTP_SORECV_DONE,
6141			    freed_so_far,
6142			    ((uio) ? (slen - uio->uio_resid) : slen),
6143			    stcb->asoc.my_rwnd,
6144			    so->so_rcv.sb_cc);
6145		} else {
6146			sctp_misc_ints(SCTP_SORECV_DONE,
6147			    freed_so_far,
6148			    ((uio) ? (slen - uio->uio_resid) : slen),
6149			    0,
6150			    so->so_rcv.sb_cc);
6151		}
6152	}
6153stage_left:
6154	if (wakeup_read_socket) {
6155		sctp_sorwakeup(inp, so);
6156	}
6157	return (error);
6158}
6159
6160
6161#ifdef SCTP_MBUF_LOGGING
6162struct mbuf *
6163sctp_m_free(struct mbuf *m)
6164{
6165	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6166		sctp_log_mb(m, SCTP_MBUF_IFREE);
6167	}
6168	return (m_free(m));
6169}
6170
6171void
6172sctp_m_freem(struct mbuf *mb)
6173{
6174	while (mb != NULL)
6175		mb = sctp_m_free(mb);
6176}
6177
6178#endif
6179
6180int
6181sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6182{
6183	/*
6184	 * Given a local address. For all associations that holds the
6185	 * address, request a peer-set-primary.
6186	 */
6187	struct sctp_ifa *ifa;
6188	struct sctp_laddr *wi;
6189
6190	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6191	if (ifa == NULL) {
6192		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6193		return (EADDRNOTAVAIL);
6194	}
6195	/*
6196	 * Now that we have the ifa we must awaken the iterator with this
6197	 * message.
6198	 */
6199	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6200	if (wi == NULL) {
6201		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6202		return (ENOMEM);
6203	}
6204	/* Now incr the count and int wi structure */
6205	SCTP_INCR_LADDR_COUNT();
6206	bzero(wi, sizeof(*wi));
6207	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6208	wi->ifa = ifa;
6209	wi->action = SCTP_SET_PRIM_ADDR;
6210	atomic_add_int(&ifa->refcount, 1);
6211
6212	/* Now add it to the work queue */
6213	SCTP_WQ_ADDR_LOCK();
6214	/*
6215	 * Should this really be a tailq? As it is we will process the
6216	 * newest first :-0
6217	 */
6218	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6219	SCTP_WQ_ADDR_UNLOCK();
6220	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6221	    (struct sctp_inpcb *)NULL,
6222	    (struct sctp_tcb *)NULL,
6223	    (struct sctp_nets *)NULL);
6224	return (0);
6225}
6226
6227
6228int
6229sctp_soreceive(struct socket *so,
6230    struct sockaddr **psa,
6231    struct uio *uio,
6232    struct mbuf **mp0,
6233    struct mbuf **controlp,
6234    int *flagsp)
6235{
6236	int error, fromlen;
6237	uint8_t sockbuf[256];
6238	struct sockaddr *from;
6239	struct sctp_extrcvinfo sinfo;
6240	int filling_sinfo = 1;
6241	struct sctp_inpcb *inp;
6242
6243	inp = (struct sctp_inpcb *)so->so_pcb;
6244	/* pickup the assoc we are reading from */
6245	if (inp == NULL) {
6246		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6247		return (EINVAL);
6248	}
6249	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6250	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6251	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6252	    (controlp == NULL)) {
6253		/* user does not want the sndrcv ctl */
6254		filling_sinfo = 0;
6255	}
6256	if (psa) {
6257		from = (struct sockaddr *)sockbuf;
6258		fromlen = sizeof(sockbuf);
6259		from->sa_len = 0;
6260	} else {
6261		from = NULL;
6262		fromlen = 0;
6263	}
6264
6265	if (filling_sinfo) {
6266		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6267	}
6268	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6269	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6270	if (controlp != NULL) {
6271		/* copy back the sinfo in a CMSG format */
6272		if (filling_sinfo)
6273			*controlp = sctp_build_ctl_nchunk(inp,
6274			    (struct sctp_sndrcvinfo *)&sinfo);
6275		else
6276			*controlp = NULL;
6277	}
6278	if (psa) {
6279		/* copy back the address info */
6280		if (from && from->sa_len) {
6281			*psa = sodupsockaddr(from, M_NOWAIT);
6282		} else {
6283			*psa = NULL;
6284		}
6285	}
6286	return (error);
6287}
6288
6289
6290
6291
6292
6293int
6294sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6295    int totaddr, int *error)
6296{
6297	int added = 0;
6298	int i;
6299	struct sctp_inpcb *inp;
6300	struct sockaddr *sa;
6301	size_t incr = 0;
6302
6303#ifdef INET
6304	struct sockaddr_in *sin;
6305
6306#endif
6307#ifdef INET6
6308	struct sockaddr_in6 *sin6;
6309
6310#endif
6311
6312	sa = addr;
6313	inp = stcb->sctp_ep;
6314	*error = 0;
6315	for (i = 0; i < totaddr; i++) {
6316		switch (sa->sa_family) {
6317#ifdef INET
6318		case AF_INET:
6319			incr = sizeof(struct sockaddr_in);
6320			sin = (struct sockaddr_in *)sa;
6321			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6322			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6323			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6324				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6325				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
6326				*error = EINVAL;
6327				goto out_now;
6328			}
6329			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6330				/* assoc gone no un-lock */
6331				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6332				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
6333				*error = ENOBUFS;
6334				goto out_now;
6335			}
6336			added++;
6337			break;
6338#endif
6339#ifdef INET6
6340		case AF_INET6:
6341			incr = sizeof(struct sockaddr_in6);
6342			sin6 = (struct sockaddr_in6 *)sa;
6343			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6344			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6345				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6346				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
6347				*error = EINVAL;
6348				goto out_now;
6349			}
6350			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6351				/* assoc gone no un-lock */
6352				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6353				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
6354				*error = ENOBUFS;
6355				goto out_now;
6356			}
6357			added++;
6358			break;
6359#endif
6360		default:
6361			break;
6362		}
6363		sa = (struct sockaddr *)((caddr_t)sa + incr);
6364	}
6365out_now:
6366	return (added);
6367}
6368
6369struct sctp_tcb *
6370sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6371    int *totaddr, int *num_v4, int *num_v6, int *error,
6372    int limit, int *bad_addr)
6373{
6374	struct sockaddr *sa;
6375	struct sctp_tcb *stcb = NULL;
6376	size_t incr, at, i;
6377
6378	at = incr = 0;
6379	sa = addr;
6380
6381	*error = *num_v6 = *num_v4 = 0;
6382	/* account and validate addresses */
6383	for (i = 0; i < (size_t)*totaddr; i++) {
6384		switch (sa->sa_family) {
6385#ifdef INET
6386		case AF_INET:
6387			(*num_v4) += 1;
6388			incr = sizeof(struct sockaddr_in);
6389			if (sa->sa_len != incr) {
6390				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6391				*error = EINVAL;
6392				*bad_addr = 1;
6393				return (NULL);
6394			}
6395			break;
6396#endif
6397#ifdef INET6
6398		case AF_INET6:
6399			{
6400				struct sockaddr_in6 *sin6;
6401
6402				sin6 = (struct sockaddr_in6 *)sa;
6403				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6404					/* Must be non-mapped for connectx */
6405					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6406					*error = EINVAL;
6407					*bad_addr = 1;
6408					return (NULL);
6409				}
6410				(*num_v6) += 1;
6411				incr = sizeof(struct sockaddr_in6);
6412				if (sa->sa_len != incr) {
6413					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6414					*error = EINVAL;
6415					*bad_addr = 1;
6416					return (NULL);
6417				}
6418				break;
6419			}
6420#endif
6421		default:
6422			*totaddr = i;
6423			/* we are done */
6424			break;
6425		}
6426		if (i == (size_t)*totaddr) {
6427			break;
6428		}
6429		SCTP_INP_INCR_REF(inp);
6430		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6431		if (stcb != NULL) {
6432			/* Already have or am bring up an association */
6433			return (stcb);
6434		} else {
6435			SCTP_INP_DECR_REF(inp);
6436		}
6437		if ((at + incr) > (size_t)limit) {
6438			*totaddr = i;
6439			break;
6440		}
6441		sa = (struct sockaddr *)((caddr_t)sa + incr);
6442	}
6443	return ((struct sctp_tcb *)NULL);
6444}
6445
6446/*
6447 * sctp_bindx(ADD) for one address.
6448 * assumes all arguments are valid/checked by caller.
6449 */
6450void
6451sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6452    struct sockaddr *sa, sctp_assoc_t assoc_id,
6453    uint32_t vrf_id, int *error, void *p)
6454{
6455	struct sockaddr *addr_touse;
6456
6457#if defined(INET) && defined(INET6)
6458	struct sockaddr_in sin;
6459
6460#endif
6461
6462	/* see if we're bound all already! */
6463	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6464		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6465		*error = EINVAL;
6466		return;
6467	}
6468	addr_touse = sa;
6469#ifdef INET6
6470	if (sa->sa_family == AF_INET6) {
6471#ifdef INET
6472		struct sockaddr_in6 *sin6;
6473
6474#endif
6475		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6476			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6477			*error = EINVAL;
6478			return;
6479		}
6480		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6481			/* can only bind v6 on PF_INET6 sockets */
6482			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6483			*error = EINVAL;
6484			return;
6485		}
6486#ifdef INET
6487		sin6 = (struct sockaddr_in6 *)addr_touse;
6488		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6489			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6490			    SCTP_IPV6_V6ONLY(inp)) {
6491				/* can't bind v4-mapped on PF_INET sockets */
6492				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6493				*error = EINVAL;
6494				return;
6495			}
6496			in6_sin6_2_sin(&sin, sin6);
6497			addr_touse = (struct sockaddr *)&sin;
6498		}
6499#endif
6500	}
6501#endif
6502#ifdef INET
6503	if (sa->sa_family == AF_INET) {
6504		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6505			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6506			*error = EINVAL;
6507			return;
6508		}
6509		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6510		    SCTP_IPV6_V6ONLY(inp)) {
6511			/* can't bind v4 on PF_INET sockets */
6512			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6513			*error = EINVAL;
6514			return;
6515		}
6516	}
6517#endif
6518	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6519		if (p == NULL) {
6520			/* Can't get proc for Net/Open BSD */
6521			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6522			*error = EINVAL;
6523			return;
6524		}
6525		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6526		return;
6527	}
6528	/*
6529	 * No locks required here since bind and mgmt_ep_sa all do their own
6530	 * locking. If we do something for the FIX: below we may need to
6531	 * lock in that case.
6532	 */
6533	if (assoc_id == 0) {
6534		/* add the address */
6535		struct sctp_inpcb *lep;
6536		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6537
6538		/* validate the incoming port */
6539		if ((lsin->sin_port != 0) &&
6540		    (lsin->sin_port != inp->sctp_lport)) {
6541			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6542			*error = EINVAL;
6543			return;
6544		} else {
6545			/* user specified 0 port, set it to existing port */
6546			lsin->sin_port = inp->sctp_lport;
6547		}
6548
6549		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6550		if (lep != NULL) {
6551			/*
6552			 * We must decrement the refcount since we have the
6553			 * ep already and are binding. No remove going on
6554			 * here.
6555			 */
6556			SCTP_INP_DECR_REF(lep);
6557		}
6558		if (lep == inp) {
6559			/* already bound to it.. ok */
6560			return;
6561		} else if (lep == NULL) {
6562			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6563			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6564			    SCTP_ADD_IP_ADDRESS,
6565			    vrf_id, NULL);
6566		} else {
6567			*error = EADDRINUSE;
6568		}
6569		if (*error)
6570			return;
6571	} else {
6572		/*
6573		 * FIX: decide whether we allow assoc based bindx
6574		 */
6575	}
6576}
6577
6578/*
6579 * sctp_bindx(DELETE) for one address.
6580 * assumes all arguments are valid/checked by caller.
6581 */
6582void
6583sctp_bindx_delete_address(struct sctp_inpcb *inp,
6584    struct sockaddr *sa, sctp_assoc_t assoc_id,
6585    uint32_t vrf_id, int *error)
6586{
6587	struct sockaddr *addr_touse;
6588
6589#if defined(INET) && defined(INET6)
6590	struct sockaddr_in sin;
6591
6592#endif
6593
6594	/* see if we're bound all already! */
6595	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6596		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6597		*error = EINVAL;
6598		return;
6599	}
6600	addr_touse = sa;
6601#ifdef INET6
6602	if (sa->sa_family == AF_INET6) {
6603#ifdef INET
6604		struct sockaddr_in6 *sin6;
6605
6606#endif
6607
6608		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6609			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6610			*error = EINVAL;
6611			return;
6612		}
6613		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6614			/* can only bind v6 on PF_INET6 sockets */
6615			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6616			*error = EINVAL;
6617			return;
6618		}
6619#ifdef INET
6620		sin6 = (struct sockaddr_in6 *)addr_touse;
6621		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6622			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6623			    SCTP_IPV6_V6ONLY(inp)) {
6624				/* can't bind mapped-v4 on PF_INET sockets */
6625				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6626				*error = EINVAL;
6627				return;
6628			}
6629			in6_sin6_2_sin(&sin, sin6);
6630			addr_touse = (struct sockaddr *)&sin;
6631		}
6632#endif
6633	}
6634#endif
6635#ifdef INET
6636	if (sa->sa_family == AF_INET) {
6637		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6638			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6639			*error = EINVAL;
6640			return;
6641		}
6642		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6643		    SCTP_IPV6_V6ONLY(inp)) {
6644			/* can't bind v4 on PF_INET sockets */
6645			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6646			*error = EINVAL;
6647			return;
6648		}
6649	}
6650#endif
6651	/*
6652	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6653	 * below is ever changed we may need to lock before calling
6654	 * association level binding.
6655	 */
6656	if (assoc_id == 0) {
6657		/* delete the address */
6658		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6659		    SCTP_DEL_IP_ADDRESS,
6660		    vrf_id, NULL);
6661	} else {
6662		/*
6663		 * FIX: decide whether we allow assoc based bindx
6664		 */
6665	}
6666}
6667
6668/*
6669 * returns the valid local address count for an assoc, taking into account
6670 * all scoping rules
6671 */
6672int
6673sctp_local_addr_count(struct sctp_tcb *stcb)
6674{
6675	int loopback_scope;
6676
6677#if defined(INET)
6678	int ipv4_local_scope, ipv4_addr_legal;
6679
6680#endif
6681#if defined (INET6)
6682	int local_scope, site_scope, ipv6_addr_legal;
6683
6684#endif
6685	struct sctp_vrf *vrf;
6686	struct sctp_ifn *sctp_ifn;
6687	struct sctp_ifa *sctp_ifa;
6688	int count = 0;
6689
6690	/* Turn on all the appropriate scopes */
6691	loopback_scope = stcb->asoc.scope.loopback_scope;
6692#if defined(INET)
6693	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
6694	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
6695#endif
6696#if defined(INET6)
6697	local_scope = stcb->asoc.scope.local_scope;
6698	site_scope = stcb->asoc.scope.site_scope;
6699	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
6700#endif
6701	SCTP_IPI_ADDR_RLOCK();
6702	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6703	if (vrf == NULL) {
6704		/* no vrf, no addresses */
6705		SCTP_IPI_ADDR_RUNLOCK();
6706		return (0);
6707	}
6708	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6709		/*
6710		 * bound all case: go through all ifns on the vrf
6711		 */
6712		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6713			if ((loopback_scope == 0) &&
6714			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6715				continue;
6716			}
6717			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6718				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6719					continue;
6720				switch (sctp_ifa->address.sa.sa_family) {
6721#ifdef INET
6722				case AF_INET:
6723					if (ipv4_addr_legal) {
6724						struct sockaddr_in *sin;
6725
6726						sin = &sctp_ifa->address.sin;
6727						if (sin->sin_addr.s_addr == 0) {
6728							/*
6729							 * skip unspecified
6730							 * addrs
6731							 */
6732							continue;
6733						}
6734						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
6735						    &sin->sin_addr) != 0) {
6736							continue;
6737						}
6738						if ((ipv4_local_scope == 0) &&
6739						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6740							continue;
6741						}
6742						/* count this one */
6743						count++;
6744					} else {
6745						continue;
6746					}
6747					break;
6748#endif
6749#ifdef INET6
6750				case AF_INET6:
6751					if (ipv6_addr_legal) {
6752						struct sockaddr_in6 *sin6;
6753
6754						sin6 = &sctp_ifa->address.sin6;
6755						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6756							continue;
6757						}
6758						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
6759						    &sin6->sin6_addr) != 0) {
6760							continue;
6761						}
6762						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6763							if (local_scope == 0)
6764								continue;
6765							if (sin6->sin6_scope_id == 0) {
6766								if (sa6_recoverscope(sin6) != 0)
6767									/*
6768									 *
6769									 * bad
6770									 *
6771									 * li
6772									 * nk
6773									 *
6774									 * loc
6775									 * al
6776									 *
6777									 * add
6778									 * re
6779									 * ss
6780									 * */
6781									continue;
6782							}
6783						}
6784						if ((site_scope == 0) &&
6785						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6786							continue;
6787						}
6788						/* count this one */
6789						count++;
6790					}
6791					break;
6792#endif
6793				default:
6794					/* TSNH */
6795					break;
6796				}
6797			}
6798		}
6799	} else {
6800		/*
6801		 * subset bound case
6802		 */
6803		struct sctp_laddr *laddr;
6804
6805		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6806		    sctp_nxt_addr) {
6807			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6808				continue;
6809			}
6810			/* count this one */
6811			count++;
6812		}
6813	}
6814	SCTP_IPI_ADDR_RUNLOCK();
6815	return (count);
6816}
6817
6818#if defined(SCTP_LOCAL_TRACE_BUF)
6819
6820void
6821sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6822{
6823	uint32_t saveindex, newindex;
6824
6825	do {
6826		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6827		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6828			newindex = 1;
6829		} else {
6830			newindex = saveindex + 1;
6831		}
6832	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6833	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6834		saveindex = 0;
6835	}
6836	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6837	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6838	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6839	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6840	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6841	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6842	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6843	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6844}
6845
6846#endif
6847static void
6848sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored,
6849    const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
6850{
6851	struct ip *iph;
6852
6853#ifdef INET6
6854	struct ip6_hdr *ip6;
6855
6856#endif
6857	struct mbuf *sp, *last;
6858	struct udphdr *uhdr;
6859	uint16_t port;
6860
6861	if ((m->m_flags & M_PKTHDR) == 0) {
6862		/* Can't handle one that is not a pkt hdr */
6863		goto out;
6864	}
6865	/* Pull the src port */
6866	iph = mtod(m, struct ip *);
6867	uhdr = (struct udphdr *)((caddr_t)iph + off);
6868	port = uhdr->uh_sport;
6869	/*
6870	 * Split out the mbuf chain. Leave the IP header in m, place the
6871	 * rest in the sp.
6872	 */
6873	sp = m_split(m, off, M_NOWAIT);
6874	if (sp == NULL) {
6875		/* Gak, drop packet, we can't do a split */
6876		goto out;
6877	}
6878	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6879		/* Gak, packet can't have an SCTP header in it - too small */
6880		m_freem(sp);
6881		goto out;
6882	}
6883	/* Now pull up the UDP header and SCTP header together */
6884	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6885	if (sp == NULL) {
6886		/* Gak pullup failed */
6887		goto out;
6888	}
6889	/* Trim out the UDP header */
6890	m_adj(sp, sizeof(struct udphdr));
6891
6892	/* Now reconstruct the mbuf chain */
6893	for (last = m; last->m_next; last = last->m_next);
6894	last->m_next = sp;
6895	m->m_pkthdr.len += sp->m_pkthdr.len;
6896	iph = mtod(m, struct ip *);
6897	switch (iph->ip_v) {
6898#ifdef INET
6899	case IPVERSION:
6900		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
6901		sctp_input_with_port(m, off, port);
6902		break;
6903#endif
6904#ifdef INET6
6905	case IPV6_VERSION >> 4:
6906		ip6 = mtod(m, struct ip6_hdr *);
6907		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
6908		sctp6_input_with_port(&m, &off, port);
6909		break;
6910#endif
6911	default:
6912		goto out;
6913		break;
6914	}
6915	return;
6916out:
6917	m_freem(m);
6918}
6919
6920void
6921sctp_over_udp_stop(void)
6922{
6923	/*
6924	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6925	 * for writting!
6926	 */
6927#ifdef INET
6928	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
6929		soclose(SCTP_BASE_INFO(udp4_tun_socket));
6930		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
6931	}
6932#endif
6933#ifdef INET6
6934	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
6935		soclose(SCTP_BASE_INFO(udp6_tun_socket));
6936		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
6937	}
6938#endif
6939}
6940
6941int
6942sctp_over_udp_start(void)
6943{
6944	uint16_t port;
6945	int ret;
6946
6947#ifdef INET
6948	struct sockaddr_in sin;
6949
6950#endif
6951#ifdef INET6
6952	struct sockaddr_in6 sin6;
6953
6954#endif
6955	/*
6956	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6957	 * for writting!
6958	 */
6959	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
6960	if (ntohs(port) == 0) {
6961		/* Must have a port set */
6962		return (EINVAL);
6963	}
6964#ifdef INET
6965	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
6966		/* Already running -- must stop first */
6967		return (EALREADY);
6968	}
6969#endif
6970#ifdef INET6
6971	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
6972		/* Already running -- must stop first */
6973		return (EALREADY);
6974	}
6975#endif
6976#ifdef INET
6977	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
6978	    SOCK_DGRAM, IPPROTO_UDP,
6979	    curthread->td_ucred, curthread))) {
6980		sctp_over_udp_stop();
6981		return (ret);
6982	}
6983	/* Call the special UDP hook. */
6984	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
6985	    sctp_recv_udp_tunneled_packet, NULL))) {
6986		sctp_over_udp_stop();
6987		return (ret);
6988	}
6989	/* Ok, we have a socket, bind it to the port. */
6990	memset(&sin, 0, sizeof(struct sockaddr_in));
6991	sin.sin_len = sizeof(struct sockaddr_in);
6992	sin.sin_family = AF_INET;
6993	sin.sin_port = htons(port);
6994	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
6995	    (struct sockaddr *)&sin, curthread))) {
6996		sctp_over_udp_stop();
6997		return (ret);
6998	}
6999#endif
7000#ifdef INET6
7001	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
7002	    SOCK_DGRAM, IPPROTO_UDP,
7003	    curthread->td_ucred, curthread))) {
7004		sctp_over_udp_stop();
7005		return (ret);
7006	}
7007	/* Call the special UDP hook. */
7008	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
7009	    sctp_recv_udp_tunneled_packet, NULL))) {
7010		sctp_over_udp_stop();
7011		return (ret);
7012	}
7013	/* Ok, we have a socket, bind it to the port. */
7014	memset(&sin6, 0, sizeof(struct sockaddr_in6));
7015	sin6.sin6_len = sizeof(struct sockaddr_in6);
7016	sin6.sin6_family = AF_INET6;
7017	sin6.sin6_port = htons(port);
7018	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7019	    (struct sockaddr *)&sin6, curthread))) {
7020		sctp_over_udp_stop();
7021		return (ret);
7022	}
7023#endif
7024	return (0);
7025}
7026