sctputil.c revision 296052
1/*-
2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * a) Redistributions of source code must retain the above copyright notice,
10 *    this list of conditions and the following disclaimer.
11 *
12 * b) Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in
14 *    the documentation and/or other materials provided with the distribution.
15 *
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 *    contributors may be used to endorse or promote products derived
18 *    from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: stable/10/sys/netinet/sctputil.c 296052 2016-02-25 18:46:06Z tuexen $");
35
36#include <netinet/sctp_os.h>
37#include <netinet/sctp_pcb.h>
38#include <netinet/sctputil.h>
39#include <netinet/sctp_var.h>
40#include <netinet/sctp_sysctl.h>
41#ifdef INET6
42#include <netinet6/sctp6_var.h>
43#endif
44#include <netinet/sctp_header.h>
45#include <netinet/sctp_output.h>
46#include <netinet/sctp_uio.h>
47#include <netinet/sctp_timer.h>
48#include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
49#include <netinet/sctp_auth.h>
50#include <netinet/sctp_asconf.h>
51#include <netinet/sctp_bsd_addr.h>
52#include <netinet/udp.h>
53#include <netinet/udp_var.h>
54#include <sys/proc.h>
55
56
57#ifndef KTR_SCTP
58#define KTR_SCTP KTR_SUBSYS
59#endif
60
61extern struct sctp_cc_functions sctp_cc_functions[];
62extern struct sctp_ss_functions sctp_ss_functions[];
63
64void
65sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
66{
67	struct sctp_cwnd_log sctp_clog;
68
69	sctp_clog.x.sb.stcb = stcb;
70	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
71	if (stcb)
72		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
73	else
74		sctp_clog.x.sb.stcb_sbcc = 0;
75	sctp_clog.x.sb.incr = incr;
76	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
77	    SCTP_LOG_EVENT_SB,
78	    from,
79	    sctp_clog.x.misc.log1,
80	    sctp_clog.x.misc.log2,
81	    sctp_clog.x.misc.log3,
82	    sctp_clog.x.misc.log4);
83}
84
85void
86sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
87{
88	struct sctp_cwnd_log sctp_clog;
89
90	sctp_clog.x.close.inp = (void *)inp;
91	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
92	if (stcb) {
93		sctp_clog.x.close.stcb = (void *)stcb;
94		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
95	} else {
96		sctp_clog.x.close.stcb = 0;
97		sctp_clog.x.close.state = 0;
98	}
99	sctp_clog.x.close.loc = loc;
100	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
101	    SCTP_LOG_EVENT_CLOSE,
102	    0,
103	    sctp_clog.x.misc.log1,
104	    sctp_clog.x.misc.log2,
105	    sctp_clog.x.misc.log3,
106	    sctp_clog.x.misc.log4);
107}
108
109void
110rto_logging(struct sctp_nets *net, int from)
111{
112	struct sctp_cwnd_log sctp_clog;
113
114	memset(&sctp_clog, 0, sizeof(sctp_clog));
115	sctp_clog.x.rto.net = (void *)net;
116	sctp_clog.x.rto.rtt = net->rtt / 1000;
117	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
118	    SCTP_LOG_EVENT_RTT,
119	    from,
120	    sctp_clog.x.misc.log1,
121	    sctp_clog.x.misc.log2,
122	    sctp_clog.x.misc.log3,
123	    sctp_clog.x.misc.log4);
124}
125
126void
127sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
128{
129	struct sctp_cwnd_log sctp_clog;
130
131	sctp_clog.x.strlog.stcb = stcb;
132	sctp_clog.x.strlog.n_tsn = tsn;
133	sctp_clog.x.strlog.n_sseq = sseq;
134	sctp_clog.x.strlog.e_tsn = 0;
135	sctp_clog.x.strlog.e_sseq = 0;
136	sctp_clog.x.strlog.strm = stream;
137	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
138	    SCTP_LOG_EVENT_STRM,
139	    from,
140	    sctp_clog.x.misc.log1,
141	    sctp_clog.x.misc.log2,
142	    sctp_clog.x.misc.log3,
143	    sctp_clog.x.misc.log4);
144}
145
146void
147sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
148{
149	struct sctp_cwnd_log sctp_clog;
150
151	sctp_clog.x.nagle.stcb = (void *)stcb;
152	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
153	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
154	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
155	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
156	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
157	    SCTP_LOG_EVENT_NAGLE,
158	    action,
159	    sctp_clog.x.misc.log1,
160	    sctp_clog.x.misc.log2,
161	    sctp_clog.x.misc.log3,
162	    sctp_clog.x.misc.log4);
163}
164
165void
166sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
167{
168	struct sctp_cwnd_log sctp_clog;
169
170	sctp_clog.x.sack.cumack = cumack;
171	sctp_clog.x.sack.oldcumack = old_cumack;
172	sctp_clog.x.sack.tsn = tsn;
173	sctp_clog.x.sack.numGaps = gaps;
174	sctp_clog.x.sack.numDups = dups;
175	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
176	    SCTP_LOG_EVENT_SACK,
177	    from,
178	    sctp_clog.x.misc.log1,
179	    sctp_clog.x.misc.log2,
180	    sctp_clog.x.misc.log3,
181	    sctp_clog.x.misc.log4);
182}
183
184void
185sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
186{
187	struct sctp_cwnd_log sctp_clog;
188
189	memset(&sctp_clog, 0, sizeof(sctp_clog));
190	sctp_clog.x.map.base = map;
191	sctp_clog.x.map.cum = cum;
192	sctp_clog.x.map.high = high;
193	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
194	    SCTP_LOG_EVENT_MAP,
195	    from,
196	    sctp_clog.x.misc.log1,
197	    sctp_clog.x.misc.log2,
198	    sctp_clog.x.misc.log3,
199	    sctp_clog.x.misc.log4);
200}
201
202void
203sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
204{
205	struct sctp_cwnd_log sctp_clog;
206
207	memset(&sctp_clog, 0, sizeof(sctp_clog));
208	sctp_clog.x.fr.largest_tsn = biggest_tsn;
209	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
210	sctp_clog.x.fr.tsn = tsn;
211	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
212	    SCTP_LOG_EVENT_FR,
213	    from,
214	    sctp_clog.x.misc.log1,
215	    sctp_clog.x.misc.log2,
216	    sctp_clog.x.misc.log3,
217	    sctp_clog.x.misc.log4);
218}
219
220#ifdef SCTP_MBUF_LOGGING
221void
222sctp_log_mb(struct mbuf *m, int from)
223{
224	struct sctp_cwnd_log sctp_clog;
225
226	sctp_clog.x.mb.mp = m;
227	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
228	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
229	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
230	if (SCTP_BUF_IS_EXTENDED(m)) {
231		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
232		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
233	} else {
234		sctp_clog.x.mb.ext = 0;
235		sctp_clog.x.mb.refcnt = 0;
236	}
237	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
238	    SCTP_LOG_EVENT_MBUF,
239	    from,
240	    sctp_clog.x.misc.log1,
241	    sctp_clog.x.misc.log2,
242	    sctp_clog.x.misc.log3,
243	    sctp_clog.x.misc.log4);
244}
245
246void
247sctp_log_mbc(struct mbuf *m, int from)
248{
249	struct mbuf *mat;
250
251	for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
252		sctp_log_mb(mat, from);
253	}
254}
255
256#endif
257
258void
259sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
260{
261	struct sctp_cwnd_log sctp_clog;
262
263	if (control == NULL) {
264		SCTP_PRINTF("Gak log of NULL?\n");
265		return;
266	}
267	sctp_clog.x.strlog.stcb = control->stcb;
268	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
269	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
270	sctp_clog.x.strlog.strm = control->sinfo_stream;
271	if (poschk != NULL) {
272		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
273		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
274	} else {
275		sctp_clog.x.strlog.e_tsn = 0;
276		sctp_clog.x.strlog.e_sseq = 0;
277	}
278	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
279	    SCTP_LOG_EVENT_STRM,
280	    from,
281	    sctp_clog.x.misc.log1,
282	    sctp_clog.x.misc.log2,
283	    sctp_clog.x.misc.log3,
284	    sctp_clog.x.misc.log4);
285}
286
287void
288sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
289{
290	struct sctp_cwnd_log sctp_clog;
291
292	sctp_clog.x.cwnd.net = net;
293	if (stcb->asoc.send_queue_cnt > 255)
294		sctp_clog.x.cwnd.cnt_in_send = 255;
295	else
296		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
297	if (stcb->asoc.stream_queue_cnt > 255)
298		sctp_clog.x.cwnd.cnt_in_str = 255;
299	else
300		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
301
302	if (net) {
303		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
304		sctp_clog.x.cwnd.inflight = net->flight_size;
305		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
306		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
307		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
308	}
309	if (SCTP_CWNDLOG_PRESEND == from) {
310		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
311	}
312	sctp_clog.x.cwnd.cwnd_augment = augment;
313	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
314	    SCTP_LOG_EVENT_CWND,
315	    from,
316	    sctp_clog.x.misc.log1,
317	    sctp_clog.x.misc.log2,
318	    sctp_clog.x.misc.log3,
319	    sctp_clog.x.misc.log4);
320}
321
322void
323sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
324{
325	struct sctp_cwnd_log sctp_clog;
326
327	memset(&sctp_clog, 0, sizeof(sctp_clog));
328	if (inp) {
329		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
330
331	} else {
332		sctp_clog.x.lock.sock = (void *)NULL;
333	}
334	sctp_clog.x.lock.inp = (void *)inp;
335	if (stcb) {
336		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
337	} else {
338		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
339	}
340	if (inp) {
341		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
342		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
343	} else {
344		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
345		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
346	}
347	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
348	if (inp && (inp->sctp_socket)) {
349		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
350		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
351		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
352	} else {
353		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
354		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
355		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
356	}
357	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
358	    SCTP_LOG_LOCK_EVENT,
359	    from,
360	    sctp_clog.x.misc.log1,
361	    sctp_clog.x.misc.log2,
362	    sctp_clog.x.misc.log3,
363	    sctp_clog.x.misc.log4);
364}
365
366void
367sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
368{
369	struct sctp_cwnd_log sctp_clog;
370
371	memset(&sctp_clog, 0, sizeof(sctp_clog));
372	sctp_clog.x.cwnd.net = net;
373	sctp_clog.x.cwnd.cwnd_new_value = error;
374	sctp_clog.x.cwnd.inflight = net->flight_size;
375	sctp_clog.x.cwnd.cwnd_augment = burst;
376	if (stcb->asoc.send_queue_cnt > 255)
377		sctp_clog.x.cwnd.cnt_in_send = 255;
378	else
379		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
380	if (stcb->asoc.stream_queue_cnt > 255)
381		sctp_clog.x.cwnd.cnt_in_str = 255;
382	else
383		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
384	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
385	    SCTP_LOG_EVENT_MAXBURST,
386	    from,
387	    sctp_clog.x.misc.log1,
388	    sctp_clog.x.misc.log2,
389	    sctp_clog.x.misc.log3,
390	    sctp_clog.x.misc.log4);
391}
392
393void
394sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
395{
396	struct sctp_cwnd_log sctp_clog;
397
398	sctp_clog.x.rwnd.rwnd = peers_rwnd;
399	sctp_clog.x.rwnd.send_size = snd_size;
400	sctp_clog.x.rwnd.overhead = overhead;
401	sctp_clog.x.rwnd.new_rwnd = 0;
402	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
403	    SCTP_LOG_EVENT_RWND,
404	    from,
405	    sctp_clog.x.misc.log1,
406	    sctp_clog.x.misc.log2,
407	    sctp_clog.x.misc.log3,
408	    sctp_clog.x.misc.log4);
409}
410
411void
412sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
413{
414	struct sctp_cwnd_log sctp_clog;
415
416	sctp_clog.x.rwnd.rwnd = peers_rwnd;
417	sctp_clog.x.rwnd.send_size = flight_size;
418	sctp_clog.x.rwnd.overhead = overhead;
419	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
420	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
421	    SCTP_LOG_EVENT_RWND,
422	    from,
423	    sctp_clog.x.misc.log1,
424	    sctp_clog.x.misc.log2,
425	    sctp_clog.x.misc.log3,
426	    sctp_clog.x.misc.log4);
427}
428
429#ifdef SCTP_MBCNT_LOGGING
430static void
431sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
432{
433	struct sctp_cwnd_log sctp_clog;
434
435	sctp_clog.x.mbcnt.total_queue_size = total_oq;
436	sctp_clog.x.mbcnt.size_change = book;
437	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
438	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
439	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
440	    SCTP_LOG_EVENT_MBCNT,
441	    from,
442	    sctp_clog.x.misc.log1,
443	    sctp_clog.x.misc.log2,
444	    sctp_clog.x.misc.log3,
445	    sctp_clog.x.misc.log4);
446}
447
448#endif
449
450void
451sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
452{
453	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
454	    SCTP_LOG_MISC_EVENT,
455	    from,
456	    a, b, c, d);
457}
458
459void
460sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
461{
462	struct sctp_cwnd_log sctp_clog;
463
464	sctp_clog.x.wake.stcb = (void *)stcb;
465	sctp_clog.x.wake.wake_cnt = wake_cnt;
466	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
467	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
468	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
469
470	if (stcb->asoc.stream_queue_cnt < 0xff)
471		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
472	else
473		sctp_clog.x.wake.stream_qcnt = 0xff;
474
475	if (stcb->asoc.chunks_on_out_queue < 0xff)
476		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
477	else
478		sctp_clog.x.wake.chunks_on_oque = 0xff;
479
480	sctp_clog.x.wake.sctpflags = 0;
481	/* set in the defered mode stuff */
482	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
483		sctp_clog.x.wake.sctpflags |= 1;
484	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
485		sctp_clog.x.wake.sctpflags |= 2;
486	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
487		sctp_clog.x.wake.sctpflags |= 4;
488	/* what about the sb */
489	if (stcb->sctp_socket) {
490		struct socket *so = stcb->sctp_socket;
491
492		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
493	} else {
494		sctp_clog.x.wake.sbflags = 0xff;
495	}
496	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
497	    SCTP_LOG_EVENT_WAKE,
498	    from,
499	    sctp_clog.x.misc.log1,
500	    sctp_clog.x.misc.log2,
501	    sctp_clog.x.misc.log3,
502	    sctp_clog.x.misc.log4);
503}
504
505void
506sctp_log_block(uint8_t from, struct sctp_association *asoc, int sendlen)
507{
508	struct sctp_cwnd_log sctp_clog;
509
510	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
511	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
512	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
513	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
514	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
515	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
516	sctp_clog.x.blk.sndlen = sendlen;
517	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
518	    SCTP_LOG_EVENT_BLOCK,
519	    from,
520	    sctp_clog.x.misc.log1,
521	    sctp_clog.x.misc.log2,
522	    sctp_clog.x.misc.log3,
523	    sctp_clog.x.misc.log4);
524}
525
526int
527sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
528{
529	/* May need to fix this if ktrdump does not work */
530	return (0);
531}
532
533#ifdef SCTP_AUDITING_ENABLED
534uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
535static int sctp_audit_indx = 0;
536
537static
538void
539sctp_print_audit_report(void)
540{
541	int i;
542	int cnt;
543
544	cnt = 0;
545	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
546		if ((sctp_audit_data[i][0] == 0xe0) &&
547		    (sctp_audit_data[i][1] == 0x01)) {
548			cnt = 0;
549			SCTP_PRINTF("\n");
550		} else if (sctp_audit_data[i][0] == 0xf0) {
551			cnt = 0;
552			SCTP_PRINTF("\n");
553		} else if ((sctp_audit_data[i][0] == 0xc0) &&
554		    (sctp_audit_data[i][1] == 0x01)) {
555			SCTP_PRINTF("\n");
556			cnt = 0;
557		}
558		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
559		    (uint32_t) sctp_audit_data[i][1]);
560		cnt++;
561		if ((cnt % 14) == 0)
562			SCTP_PRINTF("\n");
563	}
564	for (i = 0; i < sctp_audit_indx; i++) {
565		if ((sctp_audit_data[i][0] == 0xe0) &&
566		    (sctp_audit_data[i][1] == 0x01)) {
567			cnt = 0;
568			SCTP_PRINTF("\n");
569		} else if (sctp_audit_data[i][0] == 0xf0) {
570			cnt = 0;
571			SCTP_PRINTF("\n");
572		} else if ((sctp_audit_data[i][0] == 0xc0) &&
573		    (sctp_audit_data[i][1] == 0x01)) {
574			SCTP_PRINTF("\n");
575			cnt = 0;
576		}
577		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
578		    (uint32_t) sctp_audit_data[i][1]);
579		cnt++;
580		if ((cnt % 14) == 0)
581			SCTP_PRINTF("\n");
582	}
583	SCTP_PRINTF("\n");
584}
585
586void
587sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
588    struct sctp_nets *net)
589{
590	int resend_cnt, tot_out, rep, tot_book_cnt;
591	struct sctp_nets *lnet;
592	struct sctp_tmit_chunk *chk;
593
594	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
595	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
596	sctp_audit_indx++;
597	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
598		sctp_audit_indx = 0;
599	}
600	if (inp == NULL) {
601		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
602		sctp_audit_data[sctp_audit_indx][1] = 0x01;
603		sctp_audit_indx++;
604		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
605			sctp_audit_indx = 0;
606		}
607		return;
608	}
609	if (stcb == NULL) {
610		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
611		sctp_audit_data[sctp_audit_indx][1] = 0x02;
612		sctp_audit_indx++;
613		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
614			sctp_audit_indx = 0;
615		}
616		return;
617	}
618	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
619	sctp_audit_data[sctp_audit_indx][1] =
620	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
621	sctp_audit_indx++;
622	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
623		sctp_audit_indx = 0;
624	}
625	rep = 0;
626	tot_book_cnt = 0;
627	resend_cnt = tot_out = 0;
628	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
629		if (chk->sent == SCTP_DATAGRAM_RESEND) {
630			resend_cnt++;
631		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
632			tot_out += chk->book_size;
633			tot_book_cnt++;
634		}
635	}
636	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
637		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
638		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
639		sctp_audit_indx++;
640		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
641			sctp_audit_indx = 0;
642		}
643		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
644		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
645		rep = 1;
646		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
647		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
648		sctp_audit_data[sctp_audit_indx][1] =
649		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
650		sctp_audit_indx++;
651		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
652			sctp_audit_indx = 0;
653		}
654	}
655	if (tot_out != stcb->asoc.total_flight) {
656		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
657		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
658		sctp_audit_indx++;
659		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
660			sctp_audit_indx = 0;
661		}
662		rep = 1;
663		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
664		    (int)stcb->asoc.total_flight);
665		stcb->asoc.total_flight = tot_out;
666	}
667	if (tot_book_cnt != stcb->asoc.total_flight_count) {
668		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
669		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
670		sctp_audit_indx++;
671		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
672			sctp_audit_indx = 0;
673		}
674		rep = 1;
675		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
676
677		stcb->asoc.total_flight_count = tot_book_cnt;
678	}
679	tot_out = 0;
680	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
681		tot_out += lnet->flight_size;
682	}
683	if (tot_out != stcb->asoc.total_flight) {
684		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
685		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
686		sctp_audit_indx++;
687		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
688			sctp_audit_indx = 0;
689		}
690		rep = 1;
691		SCTP_PRINTF("real flight:%d net total was %d\n",
692		    stcb->asoc.total_flight, tot_out);
693		/* now corrective action */
694		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
695
696			tot_out = 0;
697			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
698				if ((chk->whoTo == lnet) &&
699				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
700					tot_out += chk->book_size;
701				}
702			}
703			if (lnet->flight_size != tot_out) {
704				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
705				    (void *)lnet, lnet->flight_size,
706				    tot_out);
707				lnet->flight_size = tot_out;
708			}
709		}
710	}
711	if (rep) {
712		sctp_print_audit_report();
713	}
714}
715
716void
717sctp_audit_log(uint8_t ev, uint8_t fd)
718{
719
720	sctp_audit_data[sctp_audit_indx][0] = ev;
721	sctp_audit_data[sctp_audit_indx][1] = fd;
722	sctp_audit_indx++;
723	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
724		sctp_audit_indx = 0;
725	}
726}
727
728#endif
729
730/*
731 * sctp_stop_timers_for_shutdown() should be called
732 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
733 * state to make sure that all timers are stopped.
734 */
735void
736sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
737{
738	struct sctp_association *asoc;
739	struct sctp_nets *net;
740
741	asoc = &stcb->asoc;
742
743	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
744	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
745	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
746	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
747	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
748	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
749		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
750		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
751	}
752}
753
754/*
755 * a list of sizes based on typical mtu's, used only if next hop size not
756 * returned.
757 */
758static uint32_t sctp_mtu_sizes[] = {
759	68,
760	296,
761	508,
762	512,
763	544,
764	576,
765	1006,
766	1492,
767	1500,
768	1536,
769	2002,
770	2048,
771	4352,
772	4464,
773	8166,
774	17914,
775	32000,
776	65535
777};
778
779/*
780 * Return the largest MTU smaller than val. If there is no
781 * entry, just return val.
782 */
783uint32_t
784sctp_get_prev_mtu(uint32_t val)
785{
786	uint32_t i;
787
788	if (val <= sctp_mtu_sizes[0]) {
789		return (val);
790	}
791	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
792		if (val <= sctp_mtu_sizes[i]) {
793			break;
794		}
795	}
796	return (sctp_mtu_sizes[i - 1]);
797}
798
799/*
800 * Return the smallest MTU larger than val. If there is no
801 * entry, just return val.
802 */
803uint32_t
804sctp_get_next_mtu(uint32_t val)
805{
806	/* select another MTU that is just bigger than this one */
807	uint32_t i;
808
809	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
810		if (val < sctp_mtu_sizes[i]) {
811			return (sctp_mtu_sizes[i]);
812		}
813	}
814	return (val);
815}
816
817void
818sctp_fill_random_store(struct sctp_pcb *m)
819{
820	/*
821	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
822	 * our counter. The result becomes our good random numbers and we
823	 * then setup to give these out. Note that we do no locking to
824	 * protect this. This is ok, since if competing folks call this we
825	 * will get more gobbled gook in the random store which is what we
826	 * want. There is a danger that two guys will use the same random
827	 * numbers, but thats ok too since that is random as well :->
828	 */
829	m->store_at = 0;
830	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
831	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
832	    sizeof(m->random_counter), (uint8_t *) m->random_store);
833	m->random_counter++;
834}
835
836uint32_t
837sctp_select_initial_TSN(struct sctp_pcb *inp)
838{
839	/*
840	 * A true implementation should use random selection process to get
841	 * the initial stream sequence number, using RFC1750 as a good
842	 * guideline
843	 */
844	uint32_t x, *xp;
845	uint8_t *p;
846	int store_at, new_store;
847
848	if (inp->initial_sequence_debug != 0) {
849		uint32_t ret;
850
851		ret = inp->initial_sequence_debug;
852		inp->initial_sequence_debug++;
853		return (ret);
854	}
855retry:
856	store_at = inp->store_at;
857	new_store = store_at + sizeof(uint32_t);
858	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
859		new_store = 0;
860	}
861	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
862		goto retry;
863	}
864	if (new_store == 0) {
865		/* Refill the random store */
866		sctp_fill_random_store(inp);
867	}
868	p = &inp->random_store[store_at];
869	xp = (uint32_t *) p;
870	x = *xp;
871	return (x);
872}
873
874uint32_t
875sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
876{
877	uint32_t x;
878	struct timeval now;
879
880	if (check) {
881		(void)SCTP_GETTIME_TIMEVAL(&now);
882	}
883	for (;;) {
884		x = sctp_select_initial_TSN(&inp->sctp_ep);
885		if (x == 0) {
886			/* we never use 0 */
887			continue;
888		}
889		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
890			break;
891		}
892	}
893	return (x);
894}
895
896int32_t
897sctp_map_assoc_state(int kernel_state)
898{
899	int32_t user_state;
900
901	if (kernel_state & SCTP_STATE_WAS_ABORTED) {
902		user_state = SCTP_CLOSED;
903	} else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) {
904		user_state = SCTP_SHUTDOWN_PENDING;
905	} else {
906		switch (kernel_state & SCTP_STATE_MASK) {
907		case SCTP_STATE_EMPTY:
908			user_state = SCTP_CLOSED;
909			break;
910		case SCTP_STATE_INUSE:
911			user_state = SCTP_CLOSED;
912			break;
913		case SCTP_STATE_COOKIE_WAIT:
914			user_state = SCTP_COOKIE_WAIT;
915			break;
916		case SCTP_STATE_COOKIE_ECHOED:
917			user_state = SCTP_COOKIE_ECHOED;
918			break;
919		case SCTP_STATE_OPEN:
920			user_state = SCTP_ESTABLISHED;
921			break;
922		case SCTP_STATE_SHUTDOWN_SENT:
923			user_state = SCTP_SHUTDOWN_SENT;
924			break;
925		case SCTP_STATE_SHUTDOWN_RECEIVED:
926			user_state = SCTP_SHUTDOWN_RECEIVED;
927			break;
928		case SCTP_STATE_SHUTDOWN_ACK_SENT:
929			user_state = SCTP_SHUTDOWN_ACK_SENT;
930			break;
931		default:
932			user_state = SCTP_CLOSED;
933			break;
934		}
935	}
936	return (user_state);
937}
938
939int
940sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
941    uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms)
942{
943	struct sctp_association *asoc;
944
945	/*
946	 * Anything set to zero is taken care of by the allocation routine's
947	 * bzero
948	 */
949
950	/*
951	 * Up front select what scoping to apply on addresses I tell my peer
952	 * Not sure what to do with these right now, we will need to come up
953	 * with a way to set them. We may need to pass them through from the
954	 * caller in the sctp_aloc_assoc() function.
955	 */
956	int i;
957
958#if defined(SCTP_DETAILED_STR_STATS)
959	int j;
960
961#endif
962
963	asoc = &stcb->asoc;
964	/* init all variables to a known value. */
965	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
966	asoc->max_burst = inp->sctp_ep.max_burst;
967	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
968	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
969	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
970	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
971	asoc->ecn_supported = inp->ecn_supported;
972	asoc->prsctp_supported = inp->prsctp_supported;
973	asoc->auth_supported = inp->auth_supported;
974	asoc->asconf_supported = inp->asconf_supported;
975	asoc->reconfig_supported = inp->reconfig_supported;
976	asoc->nrsack_supported = inp->nrsack_supported;
977	asoc->pktdrop_supported = inp->pktdrop_supported;
978	asoc->sctp_cmt_pf = (uint8_t) 0;
979	asoc->sctp_frag_point = inp->sctp_frag_point;
980	asoc->sctp_features = inp->sctp_features;
981	asoc->default_dscp = inp->sctp_ep.default_dscp;
982	asoc->max_cwnd = inp->max_cwnd;
983#ifdef INET6
984	if (inp->sctp_ep.default_flowlabel) {
985		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
986	} else {
987		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
988			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
989			asoc->default_flowlabel &= 0x000fffff;
990			asoc->default_flowlabel |= 0x80000000;
991		} else {
992			asoc->default_flowlabel = 0;
993		}
994	}
995#endif
996	asoc->sb_send_resv = 0;
997	if (override_tag) {
998		asoc->my_vtag = override_tag;
999	} else {
1000		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
1001	}
1002	/* Get the nonce tags */
1003	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1004	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1005	asoc->vrf_id = vrf_id;
1006
1007#ifdef SCTP_ASOCLOG_OF_TSNS
1008	asoc->tsn_in_at = 0;
1009	asoc->tsn_out_at = 0;
1010	asoc->tsn_in_wrapped = 0;
1011	asoc->tsn_out_wrapped = 0;
1012	asoc->cumack_log_at = 0;
1013	asoc->cumack_log_atsnt = 0;
1014#endif
1015#ifdef SCTP_FS_SPEC_LOG
1016	asoc->fs_index = 0;
1017#endif
1018	asoc->refcnt = 0;
1019	asoc->assoc_up_sent = 0;
1020	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
1021	    sctp_select_initial_TSN(&inp->sctp_ep);
1022	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1023	/* we are optimisitic here */
1024	asoc->peer_supports_nat = 0;
1025	asoc->sent_queue_retran_cnt = 0;
1026
1027	/* for CMT */
1028	asoc->last_net_cmt_send_started = NULL;
1029
1030	/* This will need to be adjusted */
1031	asoc->last_acked_seq = asoc->init_seq_number - 1;
1032	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1033	asoc->asconf_seq_in = asoc->last_acked_seq;
1034
1035	/* here we are different, we hold the next one we expect */
1036	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
1037
1038	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
1039	asoc->initial_rto = inp->sctp_ep.initial_rto;
1040
1041	asoc->max_init_times = inp->sctp_ep.max_init_times;
1042	asoc->max_send_times = inp->sctp_ep.max_send_times;
1043	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1044	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1045	asoc->free_chunk_cnt = 0;
1046
1047	asoc->iam_blocking = 0;
1048	asoc->context = inp->sctp_context;
1049	asoc->local_strreset_support = inp->local_strreset_support;
1050	asoc->def_send = inp->def_send;
1051	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1052	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1053	asoc->pr_sctp_cnt = 0;
1054	asoc->total_output_queue_size = 0;
1055
1056	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1057		asoc->scope.ipv6_addr_legal = 1;
1058		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1059			asoc->scope.ipv4_addr_legal = 1;
1060		} else {
1061			asoc->scope.ipv4_addr_legal = 0;
1062		}
1063	} else {
1064		asoc->scope.ipv6_addr_legal = 0;
1065		asoc->scope.ipv4_addr_legal = 1;
1066	}
1067
1068	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1069	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1070
1071	asoc->smallest_mtu = inp->sctp_frag_point;
1072	asoc->minrto = inp->sctp_ep.sctp_minrto;
1073	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1074
1075	asoc->locked_on_sending = NULL;
1076	asoc->stream_locked_on = 0;
1077	asoc->ecn_echo_cnt_onq = 0;
1078	asoc->stream_locked = 0;
1079
1080	asoc->send_sack = 1;
1081
1082	LIST_INIT(&asoc->sctp_restricted_addrs);
1083
1084	TAILQ_INIT(&asoc->nets);
1085	TAILQ_INIT(&asoc->pending_reply_queue);
1086	TAILQ_INIT(&asoc->asconf_ack_sent);
1087	/* Setup to fill the hb random cache at first HB */
1088	asoc->hb_random_idx = 4;
1089
1090	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1091
1092	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1093	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1094
1095	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1096	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1097
1098	/*
1099	 * Now the stream parameters, here we allocate space for all streams
1100	 * that we request by default.
1101	 */
1102	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1103	    o_strms;
1104	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1105	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1106	    SCTP_M_STRMO);
1107	if (asoc->strmout == NULL) {
1108		/* big trouble no memory */
1109		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1110		return (ENOMEM);
1111	}
1112	for (i = 0; i < asoc->streamoutcnt; i++) {
1113		/*
1114		 * inbound side must be set to 0xffff, also NOTE when we get
1115		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1116		 * count (streamoutcnt) but first check if we sent to any of
1117		 * the upper streams that were dropped (if some were). Those
1118		 * that were dropped must be notified to the upper layer as
1119		 * failed to send.
1120		 */
1121		asoc->strmout[i].next_sequence_send = 0x0;
1122		TAILQ_INIT(&asoc->strmout[i].outqueue);
1123		asoc->strmout[i].chunks_on_queues = 0;
1124#if defined(SCTP_DETAILED_STR_STATS)
1125		for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1126			asoc->strmout[i].abandoned_sent[j] = 0;
1127			asoc->strmout[i].abandoned_unsent[j] = 0;
1128		}
1129#else
1130		asoc->strmout[i].abandoned_sent[0] = 0;
1131		asoc->strmout[i].abandoned_unsent[0] = 0;
1132#endif
1133		asoc->strmout[i].stream_no = i;
1134		asoc->strmout[i].last_msg_incomplete = 0;
1135		asoc->strmout[i].state = SCTP_STREAM_OPENING;
1136		asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL);
1137	}
1138	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1139
1140	/* Now the mapping array */
1141	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1142	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1143	    SCTP_M_MAP);
1144	if (asoc->mapping_array == NULL) {
1145		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1146		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1147		return (ENOMEM);
1148	}
1149	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1150	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1151	    SCTP_M_MAP);
1152	if (asoc->nr_mapping_array == NULL) {
1153		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1154		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1155		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1156		return (ENOMEM);
1157	}
1158	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1159
1160	/* Now the init of the other outqueues */
1161	TAILQ_INIT(&asoc->free_chunks);
1162	TAILQ_INIT(&asoc->control_send_queue);
1163	TAILQ_INIT(&asoc->asconf_send_queue);
1164	TAILQ_INIT(&asoc->send_queue);
1165	TAILQ_INIT(&asoc->sent_queue);
1166	TAILQ_INIT(&asoc->reasmqueue);
1167	TAILQ_INIT(&asoc->resetHead);
1168	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1169	TAILQ_INIT(&asoc->asconf_queue);
1170	/* authentication fields */
1171	asoc->authinfo.random = NULL;
1172	asoc->authinfo.active_keyid = 0;
1173	asoc->authinfo.assoc_key = NULL;
1174	asoc->authinfo.assoc_keyid = 0;
1175	asoc->authinfo.recv_key = NULL;
1176	asoc->authinfo.recv_keyid = 0;
1177	LIST_INIT(&asoc->shared_keys);
1178	asoc->marked_retrans = 0;
1179	asoc->port = inp->sctp_ep.port;
1180	asoc->timoinit = 0;
1181	asoc->timodata = 0;
1182	asoc->timosack = 0;
1183	asoc->timoshutdown = 0;
1184	asoc->timoheartbeat = 0;
1185	asoc->timocookie = 0;
1186	asoc->timoshutdownack = 0;
1187	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1188	asoc->discontinuity_time = asoc->start_time;
1189	for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1190		asoc->abandoned_unsent[i] = 0;
1191		asoc->abandoned_sent[i] = 0;
1192	}
1193	/*
1194	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1195	 * freed later when the association is freed.
1196	 */
1197	return (0);
1198}
1199
1200void
1201sctp_print_mapping_array(struct sctp_association *asoc)
1202{
1203	unsigned int i, limit;
1204
1205	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1206	    asoc->mapping_array_size,
1207	    asoc->mapping_array_base_tsn,
1208	    asoc->cumulative_tsn,
1209	    asoc->highest_tsn_inside_map,
1210	    asoc->highest_tsn_inside_nr_map);
1211	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1212		if (asoc->mapping_array[limit - 1] != 0) {
1213			break;
1214		}
1215	}
1216	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1217	for (i = 0; i < limit; i++) {
1218		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1219	}
1220	if (limit % 16)
1221		SCTP_PRINTF("\n");
1222	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1223		if (asoc->nr_mapping_array[limit - 1]) {
1224			break;
1225		}
1226	}
1227	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1228	for (i = 0; i < limit; i++) {
1229		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1230	}
1231	if (limit % 16)
1232		SCTP_PRINTF("\n");
1233}
1234
1235int
1236sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1237{
1238	/* mapping array needs to grow */
1239	uint8_t *new_array1, *new_array2;
1240	uint32_t new_size;
1241
1242	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1243	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1244	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1245	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1246		/* can't get more, forget it */
1247		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1248		if (new_array1) {
1249			SCTP_FREE(new_array1, SCTP_M_MAP);
1250		}
1251		if (new_array2) {
1252			SCTP_FREE(new_array2, SCTP_M_MAP);
1253		}
1254		return (-1);
1255	}
1256	memset(new_array1, 0, new_size);
1257	memset(new_array2, 0, new_size);
1258	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1259	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1260	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1261	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1262	asoc->mapping_array = new_array1;
1263	asoc->nr_mapping_array = new_array2;
1264	asoc->mapping_array_size = new_size;
1265	return (0);
1266}
1267
1268
1269static void
1270sctp_iterator_work(struct sctp_iterator *it)
1271{
1272	int iteration_count = 0;
1273	int inp_skip = 0;
1274	int first_in = 1;
1275	struct sctp_inpcb *tinp;
1276
1277	SCTP_INP_INFO_RLOCK();
1278	SCTP_ITERATOR_LOCK();
1279	if (it->inp) {
1280		SCTP_INP_RLOCK(it->inp);
1281		SCTP_INP_DECR_REF(it->inp);
1282	}
1283	if (it->inp == NULL) {
1284		/* iterator is complete */
1285done_with_iterator:
1286		SCTP_ITERATOR_UNLOCK();
1287		SCTP_INP_INFO_RUNLOCK();
1288		if (it->function_atend != NULL) {
1289			(*it->function_atend) (it->pointer, it->val);
1290		}
1291		SCTP_FREE(it, SCTP_M_ITER);
1292		return;
1293	}
1294select_a_new_ep:
1295	if (first_in) {
1296		first_in = 0;
1297	} else {
1298		SCTP_INP_RLOCK(it->inp);
1299	}
1300	while (((it->pcb_flags) &&
1301	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1302	    ((it->pcb_features) &&
1303	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1304		/* endpoint flags or features don't match, so keep looking */
1305		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1306			SCTP_INP_RUNLOCK(it->inp);
1307			goto done_with_iterator;
1308		}
1309		tinp = it->inp;
1310		it->inp = LIST_NEXT(it->inp, sctp_list);
1311		SCTP_INP_RUNLOCK(tinp);
1312		if (it->inp == NULL) {
1313			goto done_with_iterator;
1314		}
1315		SCTP_INP_RLOCK(it->inp);
1316	}
1317	/* now go through each assoc which is in the desired state */
1318	if (it->done_current_ep == 0) {
1319		if (it->function_inp != NULL)
1320			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1321		it->done_current_ep = 1;
1322	}
1323	if (it->stcb == NULL) {
1324		/* run the per instance function */
1325		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1326	}
1327	if ((inp_skip) || it->stcb == NULL) {
1328		if (it->function_inp_end != NULL) {
1329			inp_skip = (*it->function_inp_end) (it->inp,
1330			    it->pointer,
1331			    it->val);
1332		}
1333		SCTP_INP_RUNLOCK(it->inp);
1334		goto no_stcb;
1335	}
1336	while (it->stcb) {
1337		SCTP_TCB_LOCK(it->stcb);
1338		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1339			/* not in the right state... keep looking */
1340			SCTP_TCB_UNLOCK(it->stcb);
1341			goto next_assoc;
1342		}
1343		/* see if we have limited out the iterator loop */
1344		iteration_count++;
1345		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1346			/* Pause to let others grab the lock */
1347			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1348			SCTP_TCB_UNLOCK(it->stcb);
1349			SCTP_INP_INCR_REF(it->inp);
1350			SCTP_INP_RUNLOCK(it->inp);
1351			SCTP_ITERATOR_UNLOCK();
1352			SCTP_INP_INFO_RUNLOCK();
1353			SCTP_INP_INFO_RLOCK();
1354			SCTP_ITERATOR_LOCK();
1355			if (sctp_it_ctl.iterator_flags) {
1356				/* We won't be staying here */
1357				SCTP_INP_DECR_REF(it->inp);
1358				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1359				if (sctp_it_ctl.iterator_flags &
1360				    SCTP_ITERATOR_STOP_CUR_IT) {
1361					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1362					goto done_with_iterator;
1363				}
1364				if (sctp_it_ctl.iterator_flags &
1365				    SCTP_ITERATOR_STOP_CUR_INP) {
1366					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1367					goto no_stcb;
1368				}
1369				/* If we reach here huh? */
1370				SCTP_PRINTF("Unknown it ctl flag %x\n",
1371				    sctp_it_ctl.iterator_flags);
1372				sctp_it_ctl.iterator_flags = 0;
1373			}
1374			SCTP_INP_RLOCK(it->inp);
1375			SCTP_INP_DECR_REF(it->inp);
1376			SCTP_TCB_LOCK(it->stcb);
1377			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1378			iteration_count = 0;
1379		}
1380		/* run function on this one */
1381		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1382
1383		/*
1384		 * we lie here, it really needs to have its own type but
1385		 * first I must verify that this won't effect things :-0
1386		 */
1387		if (it->no_chunk_output == 0)
1388			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1389
1390		SCTP_TCB_UNLOCK(it->stcb);
1391next_assoc:
1392		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1393		if (it->stcb == NULL) {
1394			/* Run last function */
1395			if (it->function_inp_end != NULL) {
1396				inp_skip = (*it->function_inp_end) (it->inp,
1397				    it->pointer,
1398				    it->val);
1399			}
1400		}
1401	}
1402	SCTP_INP_RUNLOCK(it->inp);
1403no_stcb:
1404	/* done with all assocs on this endpoint, move on to next endpoint */
1405	it->done_current_ep = 0;
1406	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1407		it->inp = NULL;
1408	} else {
1409		it->inp = LIST_NEXT(it->inp, sctp_list);
1410	}
1411	if (it->inp == NULL) {
1412		goto done_with_iterator;
1413	}
1414	goto select_a_new_ep;
1415}
1416
1417void
1418sctp_iterator_worker(void)
1419{
1420	struct sctp_iterator *it, *nit;
1421
1422	/* This function is called with the WQ lock in place */
1423
1424	sctp_it_ctl.iterator_running = 1;
1425	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1426		sctp_it_ctl.cur_it = it;
1427		/* now lets work on this one */
1428		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1429		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1430		CURVNET_SET(it->vn);
1431		sctp_iterator_work(it);
1432		sctp_it_ctl.cur_it = NULL;
1433		CURVNET_RESTORE();
1434		SCTP_IPI_ITERATOR_WQ_LOCK();
1435		/* sa_ignore FREED_MEMORY */
1436	}
1437	sctp_it_ctl.iterator_running = 0;
1438	return;
1439}
1440
1441
1442static void
1443sctp_handle_addr_wq(void)
1444{
1445	/* deal with the ADDR wq from the rtsock calls */
1446	struct sctp_laddr *wi, *nwi;
1447	struct sctp_asconf_iterator *asc;
1448
1449	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1450	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1451	if (asc == NULL) {
1452		/* Try later, no memory */
1453		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1454		    (struct sctp_inpcb *)NULL,
1455		    (struct sctp_tcb *)NULL,
1456		    (struct sctp_nets *)NULL);
1457		return;
1458	}
1459	LIST_INIT(&asc->list_of_work);
1460	asc->cnt = 0;
1461
1462	SCTP_WQ_ADDR_LOCK();
1463	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1464		LIST_REMOVE(wi, sctp_nxt_addr);
1465		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1466		asc->cnt++;
1467	}
1468	SCTP_WQ_ADDR_UNLOCK();
1469
1470	if (asc->cnt == 0) {
1471		SCTP_FREE(asc, SCTP_M_ASC_IT);
1472	} else {
1473		int ret;
1474
1475		ret = sctp_initiate_iterator(sctp_asconf_iterator_ep,
1476		    sctp_asconf_iterator_stcb,
1477		    NULL,	/* No ep end for boundall */
1478		    SCTP_PCB_FLAGS_BOUNDALL,
1479		    SCTP_PCB_ANY_FEATURES,
1480		    SCTP_ASOC_ANY_STATE,
1481		    (void *)asc, 0,
1482		    sctp_asconf_iterator_end, NULL, 0);
1483		if (ret) {
1484			SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n");
1485			/*
1486			 * Freeing if we are stopping or put back on the
1487			 * addr_wq.
1488			 */
1489			if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
1490				sctp_asconf_iterator_end(asc, 0);
1491			} else {
1492				SCTP_WQ_ADDR_LOCK();
1493				LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) {
1494					LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
1495				}
1496				SCTP_WQ_ADDR_UNLOCK();
1497				SCTP_FREE(asc, SCTP_M_ASC_IT);
1498			}
1499		}
1500	}
1501}
1502
1503void
1504sctp_timeout_handler(void *t)
1505{
1506	struct sctp_inpcb *inp;
1507	struct sctp_tcb *stcb;
1508	struct sctp_nets *net;
1509	struct sctp_timer *tmr;
1510	struct mbuf *op_err;
1511
1512#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1513	struct socket *so;
1514
1515#endif
1516	int did_output;
1517	int type;
1518
1519	tmr = (struct sctp_timer *)t;
1520	inp = (struct sctp_inpcb *)tmr->ep;
1521	stcb = (struct sctp_tcb *)tmr->tcb;
1522	net = (struct sctp_nets *)tmr->net;
1523	CURVNET_SET((struct vnet *)tmr->vnet);
1524	did_output = 1;
1525
1526#ifdef SCTP_AUDITING_ENABLED
1527	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1528	sctp_auditing(3, inp, stcb, net);
1529#endif
1530
1531	/* sanity checks... */
1532	if (tmr->self != (void *)tmr) {
1533		/*
1534		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1535		 * (void *)tmr);
1536		 */
1537		CURVNET_RESTORE();
1538		return;
1539	}
1540	tmr->stopped_from = 0xa001;
1541	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1542		/*
1543		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1544		 * tmr->type);
1545		 */
1546		CURVNET_RESTORE();
1547		return;
1548	}
1549	tmr->stopped_from = 0xa002;
1550	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1551		CURVNET_RESTORE();
1552		return;
1553	}
1554	/* if this is an iterator timeout, get the struct and clear inp */
1555	tmr->stopped_from = 0xa003;
1556	if (inp) {
1557		SCTP_INP_INCR_REF(inp);
1558		if ((inp->sctp_socket == NULL) &&
1559		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1560		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1561		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1562		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1563		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1564		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1565		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1566		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1567		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1568		    ) {
1569			SCTP_INP_DECR_REF(inp);
1570			CURVNET_RESTORE();
1571			return;
1572		}
1573	}
1574	tmr->stopped_from = 0xa004;
1575	if (stcb) {
1576		atomic_add_int(&stcb->asoc.refcnt, 1);
1577		if (stcb->asoc.state == 0) {
1578			atomic_add_int(&stcb->asoc.refcnt, -1);
1579			if (inp) {
1580				SCTP_INP_DECR_REF(inp);
1581			}
1582			CURVNET_RESTORE();
1583			return;
1584		}
1585	}
1586	type = tmr->type;
1587	tmr->stopped_from = 0xa005;
1588	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", type);
1589	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1590		if (inp) {
1591			SCTP_INP_DECR_REF(inp);
1592		}
1593		if (stcb) {
1594			atomic_add_int(&stcb->asoc.refcnt, -1);
1595		}
1596		CURVNET_RESTORE();
1597		return;
1598	}
1599	tmr->stopped_from = 0xa006;
1600
1601	if (stcb) {
1602		SCTP_TCB_LOCK(stcb);
1603		atomic_add_int(&stcb->asoc.refcnt, -1);
1604		if ((type != SCTP_TIMER_TYPE_ASOCKILL) &&
1605		    ((stcb->asoc.state == 0) ||
1606		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1607			SCTP_TCB_UNLOCK(stcb);
1608			if (inp) {
1609				SCTP_INP_DECR_REF(inp);
1610			}
1611			CURVNET_RESTORE();
1612			return;
1613		}
1614	}
1615	/* record in stopped what t-o occured */
1616	tmr->stopped_from = type;
1617
1618	/* mark as being serviced now */
1619	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1620		/*
1621		 * Callout has been rescheduled.
1622		 */
1623		goto get_out;
1624	}
1625	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1626		/*
1627		 * Not active, so no action.
1628		 */
1629		goto get_out;
1630	}
1631	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1632
1633	/* call the handler for the appropriate timer type */
1634	switch (type) {
1635	case SCTP_TIMER_TYPE_ZERO_COPY:
1636		if (inp == NULL) {
1637			break;
1638		}
1639		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1640			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1641		}
1642		break;
1643	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1644		if (inp == NULL) {
1645			break;
1646		}
1647		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1648			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1649		}
1650		break;
1651	case SCTP_TIMER_TYPE_ADDR_WQ:
1652		sctp_handle_addr_wq();
1653		break;
1654	case SCTP_TIMER_TYPE_SEND:
1655		if ((stcb == NULL) || (inp == NULL)) {
1656			break;
1657		}
1658		SCTP_STAT_INCR(sctps_timodata);
1659		stcb->asoc.timodata++;
1660		stcb->asoc.num_send_timers_up--;
1661		if (stcb->asoc.num_send_timers_up < 0) {
1662			stcb->asoc.num_send_timers_up = 0;
1663		}
1664		SCTP_TCB_LOCK_ASSERT(stcb);
1665		if (sctp_t3rxt_timer(inp, stcb, net)) {
1666			/* no need to unlock on tcb its gone */
1667
1668			goto out_decr;
1669		}
1670		SCTP_TCB_LOCK_ASSERT(stcb);
1671#ifdef SCTP_AUDITING_ENABLED
1672		sctp_auditing(4, inp, stcb, net);
1673#endif
1674		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1675		if ((stcb->asoc.num_send_timers_up == 0) &&
1676		    (stcb->asoc.sent_queue_cnt > 0)) {
1677			struct sctp_tmit_chunk *chk;
1678
1679			/*
1680			 * safeguard. If there on some on the sent queue
1681			 * somewhere but no timers running something is
1682			 * wrong... so we start a timer on the first chunk
1683			 * on the send queue on whatever net it is sent to.
1684			 */
1685			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1686			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1687			    chk->whoTo);
1688		}
1689		break;
1690	case SCTP_TIMER_TYPE_INIT:
1691		if ((stcb == NULL) || (inp == NULL)) {
1692			break;
1693		}
1694		SCTP_STAT_INCR(sctps_timoinit);
1695		stcb->asoc.timoinit++;
1696		if (sctp_t1init_timer(inp, stcb, net)) {
1697			/* no need to unlock on tcb its gone */
1698			goto out_decr;
1699		}
1700		/* We do output but not here */
1701		did_output = 0;
1702		break;
1703	case SCTP_TIMER_TYPE_RECV:
1704		if ((stcb == NULL) || (inp == NULL)) {
1705			break;
1706		}
1707		SCTP_STAT_INCR(sctps_timosack);
1708		stcb->asoc.timosack++;
1709		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1710#ifdef SCTP_AUDITING_ENABLED
1711		sctp_auditing(4, inp, stcb, net);
1712#endif
1713		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1714		break;
1715	case SCTP_TIMER_TYPE_SHUTDOWN:
1716		if ((stcb == NULL) || (inp == NULL)) {
1717			break;
1718		}
1719		if (sctp_shutdown_timer(inp, stcb, net)) {
1720			/* no need to unlock on tcb its gone */
1721			goto out_decr;
1722		}
1723		SCTP_STAT_INCR(sctps_timoshutdown);
1724		stcb->asoc.timoshutdown++;
1725#ifdef SCTP_AUDITING_ENABLED
1726		sctp_auditing(4, inp, stcb, net);
1727#endif
1728		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1729		break;
1730	case SCTP_TIMER_TYPE_HEARTBEAT:
1731		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1732			break;
1733		}
1734		SCTP_STAT_INCR(sctps_timoheartbeat);
1735		stcb->asoc.timoheartbeat++;
1736		if (sctp_heartbeat_timer(inp, stcb, net)) {
1737			/* no need to unlock on tcb its gone */
1738			goto out_decr;
1739		}
1740#ifdef SCTP_AUDITING_ENABLED
1741		sctp_auditing(4, inp, stcb, net);
1742#endif
1743		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1744			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1745			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1746		}
1747		break;
1748	case SCTP_TIMER_TYPE_COOKIE:
1749		if ((stcb == NULL) || (inp == NULL)) {
1750			break;
1751		}
1752		if (sctp_cookie_timer(inp, stcb, net)) {
1753			/* no need to unlock on tcb its gone */
1754			goto out_decr;
1755		}
1756		SCTP_STAT_INCR(sctps_timocookie);
1757		stcb->asoc.timocookie++;
1758#ifdef SCTP_AUDITING_ENABLED
1759		sctp_auditing(4, inp, stcb, net);
1760#endif
1761		/*
1762		 * We consider T3 and Cookie timer pretty much the same with
1763		 * respect to where from in chunk_output.
1764		 */
1765		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1766		break;
1767	case SCTP_TIMER_TYPE_NEWCOOKIE:
1768		{
1769			struct timeval tv;
1770			int i, secret;
1771
1772			if (inp == NULL) {
1773				break;
1774			}
1775			SCTP_STAT_INCR(sctps_timosecret);
1776			(void)SCTP_GETTIME_TIMEVAL(&tv);
1777			SCTP_INP_WLOCK(inp);
1778			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1779			inp->sctp_ep.last_secret_number =
1780			    inp->sctp_ep.current_secret_number;
1781			inp->sctp_ep.current_secret_number++;
1782			if (inp->sctp_ep.current_secret_number >=
1783			    SCTP_HOW_MANY_SECRETS) {
1784				inp->sctp_ep.current_secret_number = 0;
1785			}
1786			secret = (int)inp->sctp_ep.current_secret_number;
1787			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1788				inp->sctp_ep.secret_key[secret][i] =
1789				    sctp_select_initial_TSN(&inp->sctp_ep);
1790			}
1791			SCTP_INP_WUNLOCK(inp);
1792			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1793		}
1794		did_output = 0;
1795		break;
1796	case SCTP_TIMER_TYPE_PATHMTURAISE:
1797		if ((stcb == NULL) || (inp == NULL)) {
1798			break;
1799		}
1800		SCTP_STAT_INCR(sctps_timopathmtu);
1801		sctp_pathmtu_timer(inp, stcb, net);
1802		did_output = 0;
1803		break;
1804	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1805		if ((stcb == NULL) || (inp == NULL)) {
1806			break;
1807		}
1808		if (sctp_shutdownack_timer(inp, stcb, net)) {
1809			/* no need to unlock on tcb its gone */
1810			goto out_decr;
1811		}
1812		SCTP_STAT_INCR(sctps_timoshutdownack);
1813		stcb->asoc.timoshutdownack++;
1814#ifdef SCTP_AUDITING_ENABLED
1815		sctp_auditing(4, inp, stcb, net);
1816#endif
1817		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1818		break;
1819	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1820		if ((stcb == NULL) || (inp == NULL)) {
1821			break;
1822		}
1823		SCTP_STAT_INCR(sctps_timoshutdownguard);
1824		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
1825		    "Shutdown guard timer expired");
1826		sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
1827		/* no need to unlock on tcb its gone */
1828		goto out_decr;
1829
1830	case SCTP_TIMER_TYPE_STRRESET:
1831		if ((stcb == NULL) || (inp == NULL)) {
1832			break;
1833		}
1834		if (sctp_strreset_timer(inp, stcb, net)) {
1835			/* no need to unlock on tcb its gone */
1836			goto out_decr;
1837		}
1838		SCTP_STAT_INCR(sctps_timostrmrst);
1839		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1840		break;
1841	case SCTP_TIMER_TYPE_ASCONF:
1842		if ((stcb == NULL) || (inp == NULL)) {
1843			break;
1844		}
1845		if (sctp_asconf_timer(inp, stcb, net)) {
1846			/* no need to unlock on tcb its gone */
1847			goto out_decr;
1848		}
1849		SCTP_STAT_INCR(sctps_timoasconf);
1850#ifdef SCTP_AUDITING_ENABLED
1851		sctp_auditing(4, inp, stcb, net);
1852#endif
1853		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1854		break;
1855	case SCTP_TIMER_TYPE_PRIM_DELETED:
1856		if ((stcb == NULL) || (inp == NULL)) {
1857			break;
1858		}
1859		sctp_delete_prim_timer(inp, stcb, net);
1860		SCTP_STAT_INCR(sctps_timodelprim);
1861		break;
1862
1863	case SCTP_TIMER_TYPE_AUTOCLOSE:
1864		if ((stcb == NULL) || (inp == NULL)) {
1865			break;
1866		}
1867		SCTP_STAT_INCR(sctps_timoautoclose);
1868		sctp_autoclose_timer(inp, stcb, net);
1869		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1870		did_output = 0;
1871		break;
1872	case SCTP_TIMER_TYPE_ASOCKILL:
1873		if ((stcb == NULL) || (inp == NULL)) {
1874			break;
1875		}
1876		SCTP_STAT_INCR(sctps_timoassockill);
1877		/* Can we free it yet? */
1878		SCTP_INP_DECR_REF(inp);
1879		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
1880		    SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1881#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1882		so = SCTP_INP_SO(inp);
1883		atomic_add_int(&stcb->asoc.refcnt, 1);
1884		SCTP_TCB_UNLOCK(stcb);
1885		SCTP_SOCKET_LOCK(so, 1);
1886		SCTP_TCB_LOCK(stcb);
1887		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1888#endif
1889		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
1890		    SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1891#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1892		SCTP_SOCKET_UNLOCK(so, 1);
1893#endif
1894		/*
1895		 * free asoc, always unlocks (or destroy's) so prevent
1896		 * duplicate unlock or unlock of a free mtx :-0
1897		 */
1898		stcb = NULL;
1899		goto out_no_decr;
1900	case SCTP_TIMER_TYPE_INPKILL:
1901		SCTP_STAT_INCR(sctps_timoinpkill);
1902		if (inp == NULL) {
1903			break;
1904		}
1905		/*
1906		 * special case, take away our increment since WE are the
1907		 * killer
1908		 */
1909		SCTP_INP_DECR_REF(inp);
1910		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
1911		    SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1912		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1913		    SCTP_CALLED_FROM_INPKILL_TIMER);
1914		inp = NULL;
1915		goto out_no_decr;
1916	default:
1917		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1918		    type);
1919		break;
1920	}
1921#ifdef SCTP_AUDITING_ENABLED
1922	sctp_audit_log(0xF1, (uint8_t) type);
1923	if (inp)
1924		sctp_auditing(5, inp, stcb, net);
1925#endif
1926	if ((did_output) && stcb) {
1927		/*
1928		 * Now we need to clean up the control chunk chain if an
1929		 * ECNE is on it. It must be marked as UNSENT again so next
1930		 * call will continue to send it until such time that we get
1931		 * a CWR, to remove it. It is, however, less likely that we
1932		 * will find a ecn echo on the chain though.
1933		 */
1934		sctp_fix_ecn_echo(&stcb->asoc);
1935	}
1936get_out:
1937	if (stcb) {
1938		SCTP_TCB_UNLOCK(stcb);
1939	}
1940out_decr:
1941	if (inp) {
1942		SCTP_INP_DECR_REF(inp);
1943	}
1944out_no_decr:
1945	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type = %d)\n", type);
1946	CURVNET_RESTORE();
1947}
1948
1949void
1950sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1951    struct sctp_nets *net)
1952{
1953	uint32_t to_ticks;
1954	struct sctp_timer *tmr;
1955
1956	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1957		return;
1958
1959	tmr = NULL;
1960	if (stcb) {
1961		SCTP_TCB_LOCK_ASSERT(stcb);
1962	}
1963	switch (t_type) {
1964	case SCTP_TIMER_TYPE_ZERO_COPY:
1965		tmr = &inp->sctp_ep.zero_copy_timer;
1966		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1967		break;
1968	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1969		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1970		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1971		break;
1972	case SCTP_TIMER_TYPE_ADDR_WQ:
1973		/* Only 1 tick away :-) */
1974		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1975		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1976		break;
1977	case SCTP_TIMER_TYPE_SEND:
1978		/* Here we use the RTO timer */
1979		{
1980			int rto_val;
1981
1982			if ((stcb == NULL) || (net == NULL)) {
1983				return;
1984			}
1985			tmr = &net->rxt_timer;
1986			if (net->RTO == 0) {
1987				rto_val = stcb->asoc.initial_rto;
1988			} else {
1989				rto_val = net->RTO;
1990			}
1991			to_ticks = MSEC_TO_TICKS(rto_val);
1992		}
1993		break;
1994	case SCTP_TIMER_TYPE_INIT:
1995		/*
1996		 * Here we use the INIT timer default usually about 1
1997		 * minute.
1998		 */
1999		if ((stcb == NULL) || (net == NULL)) {
2000			return;
2001		}
2002		tmr = &net->rxt_timer;
2003		if (net->RTO == 0) {
2004			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2005		} else {
2006			to_ticks = MSEC_TO_TICKS(net->RTO);
2007		}
2008		break;
2009	case SCTP_TIMER_TYPE_RECV:
2010		/*
2011		 * Here we use the Delayed-Ack timer value from the inp
2012		 * ususually about 200ms.
2013		 */
2014		if (stcb == NULL) {
2015			return;
2016		}
2017		tmr = &stcb->asoc.dack_timer;
2018		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2019		break;
2020	case SCTP_TIMER_TYPE_SHUTDOWN:
2021		/* Here we use the RTO of the destination. */
2022		if ((stcb == NULL) || (net == NULL)) {
2023			return;
2024		}
2025		if (net->RTO == 0) {
2026			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2027		} else {
2028			to_ticks = MSEC_TO_TICKS(net->RTO);
2029		}
2030		tmr = &net->rxt_timer;
2031		break;
2032	case SCTP_TIMER_TYPE_HEARTBEAT:
2033		/*
2034		 * the net is used here so that we can add in the RTO. Even
2035		 * though we use a different timer. We also add the HB timer
2036		 * PLUS a random jitter.
2037		 */
2038		if ((stcb == NULL) || (net == NULL)) {
2039			return;
2040		} else {
2041			uint32_t rndval;
2042			uint32_t jitter;
2043
2044			if ((net->dest_state & SCTP_ADDR_NOHB) &&
2045			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
2046				return;
2047			}
2048			if (net->RTO == 0) {
2049				to_ticks = stcb->asoc.initial_rto;
2050			} else {
2051				to_ticks = net->RTO;
2052			}
2053			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2054			jitter = rndval % to_ticks;
2055			if (jitter >= (to_ticks >> 1)) {
2056				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
2057			} else {
2058				to_ticks = to_ticks - jitter;
2059			}
2060			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2061			    !(net->dest_state & SCTP_ADDR_PF)) {
2062				to_ticks += net->heart_beat_delay;
2063			}
2064			/*
2065			 * Now we must convert the to_ticks that are now in
2066			 * ms to ticks.
2067			 */
2068			to_ticks = MSEC_TO_TICKS(to_ticks);
2069			tmr = &net->hb_timer;
2070		}
2071		break;
2072	case SCTP_TIMER_TYPE_COOKIE:
2073		/*
2074		 * Here we can use the RTO timer from the network since one
2075		 * RTT was compelete. If a retran happened then we will be
2076		 * using the RTO initial value.
2077		 */
2078		if ((stcb == NULL) || (net == NULL)) {
2079			return;
2080		}
2081		if (net->RTO == 0) {
2082			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2083		} else {
2084			to_ticks = MSEC_TO_TICKS(net->RTO);
2085		}
2086		tmr = &net->rxt_timer;
2087		break;
2088	case SCTP_TIMER_TYPE_NEWCOOKIE:
2089		/*
2090		 * nothing needed but the endpoint here ususually about 60
2091		 * minutes.
2092		 */
2093		tmr = &inp->sctp_ep.signature_change;
2094		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2095		break;
2096	case SCTP_TIMER_TYPE_ASOCKILL:
2097		if (stcb == NULL) {
2098			return;
2099		}
2100		tmr = &stcb->asoc.strreset_timer;
2101		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2102		break;
2103	case SCTP_TIMER_TYPE_INPKILL:
2104		/*
2105		 * The inp is setup to die. We re-use the signature_chage
2106		 * timer since that has stopped and we are in the GONE
2107		 * state.
2108		 */
2109		tmr = &inp->sctp_ep.signature_change;
2110		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2111		break;
2112	case SCTP_TIMER_TYPE_PATHMTURAISE:
2113		/*
2114		 * Here we use the value found in the EP for PMTU ususually
2115		 * about 10 minutes.
2116		 */
2117		if ((stcb == NULL) || (net == NULL)) {
2118			return;
2119		}
2120		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2121			return;
2122		}
2123		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2124		tmr = &net->pmtu_timer;
2125		break;
2126	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2127		/* Here we use the RTO of the destination */
2128		if ((stcb == NULL) || (net == NULL)) {
2129			return;
2130		}
2131		if (net->RTO == 0) {
2132			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2133		} else {
2134			to_ticks = MSEC_TO_TICKS(net->RTO);
2135		}
2136		tmr = &net->rxt_timer;
2137		break;
2138	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2139		/*
2140		 * Here we use the endpoints shutdown guard timer usually
2141		 * about 3 minutes.
2142		 */
2143		if (stcb == NULL) {
2144			return;
2145		}
2146		if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) {
2147			to_ticks = 5 * MSEC_TO_TICKS(stcb->asoc.maxrto);
2148		} else {
2149			to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2150		}
2151		tmr = &stcb->asoc.shut_guard_timer;
2152		break;
2153	case SCTP_TIMER_TYPE_STRRESET:
2154		/*
2155		 * Here the timer comes from the stcb but its value is from
2156		 * the net's RTO.
2157		 */
2158		if ((stcb == NULL) || (net == NULL)) {
2159			return;
2160		}
2161		if (net->RTO == 0) {
2162			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2163		} else {
2164			to_ticks = MSEC_TO_TICKS(net->RTO);
2165		}
2166		tmr = &stcb->asoc.strreset_timer;
2167		break;
2168	case SCTP_TIMER_TYPE_ASCONF:
2169		/*
2170		 * Here the timer comes from the stcb but its value is from
2171		 * the net's RTO.
2172		 */
2173		if ((stcb == NULL) || (net == NULL)) {
2174			return;
2175		}
2176		if (net->RTO == 0) {
2177			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2178		} else {
2179			to_ticks = MSEC_TO_TICKS(net->RTO);
2180		}
2181		tmr = &stcb->asoc.asconf_timer;
2182		break;
2183	case SCTP_TIMER_TYPE_PRIM_DELETED:
2184		if ((stcb == NULL) || (net != NULL)) {
2185			return;
2186		}
2187		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2188		tmr = &stcb->asoc.delete_prim_timer;
2189		break;
2190	case SCTP_TIMER_TYPE_AUTOCLOSE:
2191		if (stcb == NULL) {
2192			return;
2193		}
2194		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2195			/*
2196			 * Really an error since stcb is NOT set to
2197			 * autoclose
2198			 */
2199			return;
2200		}
2201		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2202		tmr = &stcb->asoc.autoclose_timer;
2203		break;
2204	default:
2205		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2206		    __func__, t_type);
2207		return;
2208		break;
2209	}
2210	if ((to_ticks <= 0) || (tmr == NULL)) {
2211		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2212		    __func__, t_type, to_ticks, (void *)tmr);
2213		return;
2214	}
2215	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2216		/*
2217		 * we do NOT allow you to have it already running. if it is
2218		 * we leave the current one up unchanged
2219		 */
2220		return;
2221	}
2222	/* At this point we can proceed */
2223	if (t_type == SCTP_TIMER_TYPE_SEND) {
2224		stcb->asoc.num_send_timers_up++;
2225	}
2226	tmr->stopped_from = 0;
2227	tmr->type = t_type;
2228	tmr->ep = (void *)inp;
2229	tmr->tcb = (void *)stcb;
2230	tmr->net = (void *)net;
2231	tmr->self = (void *)tmr;
2232	tmr->vnet = (void *)curvnet;
2233	tmr->ticks = sctp_get_tick_count();
2234	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2235	return;
2236}
2237
2238void
2239sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2240    struct sctp_nets *net, uint32_t from)
2241{
2242	struct sctp_timer *tmr;
2243
2244	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2245	    (inp == NULL))
2246		return;
2247
2248	tmr = NULL;
2249	if (stcb) {
2250		SCTP_TCB_LOCK_ASSERT(stcb);
2251	}
2252	switch (t_type) {
2253	case SCTP_TIMER_TYPE_ZERO_COPY:
2254		tmr = &inp->sctp_ep.zero_copy_timer;
2255		break;
2256	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2257		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2258		break;
2259	case SCTP_TIMER_TYPE_ADDR_WQ:
2260		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2261		break;
2262	case SCTP_TIMER_TYPE_SEND:
2263		if ((stcb == NULL) || (net == NULL)) {
2264			return;
2265		}
2266		tmr = &net->rxt_timer;
2267		break;
2268	case SCTP_TIMER_TYPE_INIT:
2269		if ((stcb == NULL) || (net == NULL)) {
2270			return;
2271		}
2272		tmr = &net->rxt_timer;
2273		break;
2274	case SCTP_TIMER_TYPE_RECV:
2275		if (stcb == NULL) {
2276			return;
2277		}
2278		tmr = &stcb->asoc.dack_timer;
2279		break;
2280	case SCTP_TIMER_TYPE_SHUTDOWN:
2281		if ((stcb == NULL) || (net == NULL)) {
2282			return;
2283		}
2284		tmr = &net->rxt_timer;
2285		break;
2286	case SCTP_TIMER_TYPE_HEARTBEAT:
2287		if ((stcb == NULL) || (net == NULL)) {
2288			return;
2289		}
2290		tmr = &net->hb_timer;
2291		break;
2292	case SCTP_TIMER_TYPE_COOKIE:
2293		if ((stcb == NULL) || (net == NULL)) {
2294			return;
2295		}
2296		tmr = &net->rxt_timer;
2297		break;
2298	case SCTP_TIMER_TYPE_NEWCOOKIE:
2299		/* nothing needed but the endpoint here */
2300		tmr = &inp->sctp_ep.signature_change;
2301		/*
2302		 * We re-use the newcookie timer for the INP kill timer. We
2303		 * must assure that we do not kill it by accident.
2304		 */
2305		break;
2306	case SCTP_TIMER_TYPE_ASOCKILL:
2307		/*
2308		 * Stop the asoc kill timer.
2309		 */
2310		if (stcb == NULL) {
2311			return;
2312		}
2313		tmr = &stcb->asoc.strreset_timer;
2314		break;
2315
2316	case SCTP_TIMER_TYPE_INPKILL:
2317		/*
2318		 * The inp is setup to die. We re-use the signature_chage
2319		 * timer since that has stopped and we are in the GONE
2320		 * state.
2321		 */
2322		tmr = &inp->sctp_ep.signature_change;
2323		break;
2324	case SCTP_TIMER_TYPE_PATHMTURAISE:
2325		if ((stcb == NULL) || (net == NULL)) {
2326			return;
2327		}
2328		tmr = &net->pmtu_timer;
2329		break;
2330	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2331		if ((stcb == NULL) || (net == NULL)) {
2332			return;
2333		}
2334		tmr = &net->rxt_timer;
2335		break;
2336	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2337		if (stcb == NULL) {
2338			return;
2339		}
2340		tmr = &stcb->asoc.shut_guard_timer;
2341		break;
2342	case SCTP_TIMER_TYPE_STRRESET:
2343		if (stcb == NULL) {
2344			return;
2345		}
2346		tmr = &stcb->asoc.strreset_timer;
2347		break;
2348	case SCTP_TIMER_TYPE_ASCONF:
2349		if (stcb == NULL) {
2350			return;
2351		}
2352		tmr = &stcb->asoc.asconf_timer;
2353		break;
2354	case SCTP_TIMER_TYPE_PRIM_DELETED:
2355		if (stcb == NULL) {
2356			return;
2357		}
2358		tmr = &stcb->asoc.delete_prim_timer;
2359		break;
2360	case SCTP_TIMER_TYPE_AUTOCLOSE:
2361		if (stcb == NULL) {
2362			return;
2363		}
2364		tmr = &stcb->asoc.autoclose_timer;
2365		break;
2366	default:
2367		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2368		    __func__, t_type);
2369		break;
2370	}
2371	if (tmr == NULL) {
2372		return;
2373	}
2374	if ((tmr->type != t_type) && tmr->type) {
2375		/*
2376		 * Ok we have a timer that is under joint use. Cookie timer
2377		 * per chance with the SEND timer. We therefore are NOT
2378		 * running the timer that the caller wants stopped.  So just
2379		 * return.
2380		 */
2381		return;
2382	}
2383	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2384		stcb->asoc.num_send_timers_up--;
2385		if (stcb->asoc.num_send_timers_up < 0) {
2386			stcb->asoc.num_send_timers_up = 0;
2387		}
2388	}
2389	tmr->self = NULL;
2390	tmr->stopped_from = from;
2391	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2392	return;
2393}
2394
2395uint32_t
2396sctp_calculate_len(struct mbuf *m)
2397{
2398	uint32_t tlen = 0;
2399	struct mbuf *at;
2400
2401	at = m;
2402	while (at) {
2403		tlen += SCTP_BUF_LEN(at);
2404		at = SCTP_BUF_NEXT(at);
2405	}
2406	return (tlen);
2407}
2408
2409void
2410sctp_mtu_size_reset(struct sctp_inpcb *inp,
2411    struct sctp_association *asoc, uint32_t mtu)
2412{
2413	/*
2414	 * Reset the P-MTU size on this association, this involves changing
2415	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2416	 * allow the DF flag to be cleared.
2417	 */
2418	struct sctp_tmit_chunk *chk;
2419	unsigned int eff_mtu, ovh;
2420
2421	asoc->smallest_mtu = mtu;
2422	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2423		ovh = SCTP_MIN_OVERHEAD;
2424	} else {
2425		ovh = SCTP_MIN_V4_OVERHEAD;
2426	}
2427	eff_mtu = mtu - ovh;
2428	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2429		if (chk->send_size > eff_mtu) {
2430			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2431		}
2432	}
2433	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2434		if (chk->send_size > eff_mtu) {
2435			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2436		}
2437	}
2438}
2439
2440
2441/*
2442 * given an association and starting time of the current RTT period return
2443 * RTO in number of msecs net should point to the current network
2444 */
2445
2446uint32_t
2447sctp_calculate_rto(struct sctp_tcb *stcb,
2448    struct sctp_association *asoc,
2449    struct sctp_nets *net,
2450    struct timeval *told,
2451    int safe, int rtt_from_sack)
2452{
2453	/*-
2454	 * given an association and the starting time of the current RTT
2455	 * period (in value1/value2) return RTO in number of msecs.
2456	 */
2457	int32_t rtt;		/* RTT in ms */
2458	uint32_t new_rto;
2459	int first_measure = 0;
2460	struct timeval now, then, *old;
2461
2462	/* Copy it out for sparc64 */
2463	if (safe == sctp_align_unsafe_makecopy) {
2464		old = &then;
2465		memcpy(&then, told, sizeof(struct timeval));
2466	} else if (safe == sctp_align_safe_nocopy) {
2467		old = told;
2468	} else {
2469		/* error */
2470		SCTP_PRINTF("Huh, bad rto calc call\n");
2471		return (0);
2472	}
2473	/************************/
2474	/* 1. calculate new RTT */
2475	/************************/
2476	/* get the current time */
2477	if (stcb->asoc.use_precise_time) {
2478		(void)SCTP_GETPTIME_TIMEVAL(&now);
2479	} else {
2480		(void)SCTP_GETTIME_TIMEVAL(&now);
2481	}
2482	timevalsub(&now, old);
2483	/* store the current RTT in us */
2484	net->rtt = (uint64_t) 1000000 *(uint64_t) now.tv_sec +
2485	        (uint64_t) now.tv_usec;
2486
2487	/* compute rtt in ms */
2488	rtt = (int32_t) (net->rtt / 1000);
2489	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2490		/*
2491		 * Tell the CC module that a new update has just occurred
2492		 * from a sack
2493		 */
2494		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2495	}
2496	/*
2497	 * Do we need to determine the lan? We do this only on sacks i.e.
2498	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2499	 */
2500	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2501	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2502		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2503			net->lan_type = SCTP_LAN_INTERNET;
2504		} else {
2505			net->lan_type = SCTP_LAN_LOCAL;
2506		}
2507	}
2508	/***************************/
2509	/* 2. update RTTVAR & SRTT */
2510	/***************************/
2511	/*-
2512	 * Compute the scaled average lastsa and the
2513	 * scaled variance lastsv as described in van Jacobson
2514	 * Paper "Congestion Avoidance and Control", Annex A.
2515	 *
2516	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2517	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2518	 */
2519	if (net->RTO_measured) {
2520		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2521		net->lastsa += rtt;
2522		if (rtt < 0) {
2523			rtt = -rtt;
2524		}
2525		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2526		net->lastsv += rtt;
2527		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2528			rto_logging(net, SCTP_LOG_RTTVAR);
2529		}
2530	} else {
2531		/* First RTO measurment */
2532		net->RTO_measured = 1;
2533		first_measure = 1;
2534		net->lastsa = rtt << SCTP_RTT_SHIFT;
2535		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2536		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2537			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2538		}
2539	}
2540	if (net->lastsv == 0) {
2541		net->lastsv = SCTP_CLOCK_GRANULARITY;
2542	}
2543	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2544	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2545	    (stcb->asoc.sat_network_lockout == 0)) {
2546		stcb->asoc.sat_network = 1;
2547	} else if ((!first_measure) && stcb->asoc.sat_network) {
2548		stcb->asoc.sat_network = 0;
2549		stcb->asoc.sat_network_lockout = 1;
2550	}
2551	/* bound it, per C6/C7 in Section 5.3.1 */
2552	if (new_rto < stcb->asoc.minrto) {
2553		new_rto = stcb->asoc.minrto;
2554	}
2555	if (new_rto > stcb->asoc.maxrto) {
2556		new_rto = stcb->asoc.maxrto;
2557	}
2558	/* we are now returning the RTO */
2559	return (new_rto);
2560}
2561
2562/*
2563 * return a pointer to a contiguous piece of data from the given mbuf chain
2564 * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2565 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2566 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2567 */
2568caddr_t
2569sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2570{
2571	uint32_t count;
2572	uint8_t *ptr;
2573
2574	ptr = in_ptr;
2575	if ((off < 0) || (len <= 0))
2576		return (NULL);
2577
2578	/* find the desired start location */
2579	while ((m != NULL) && (off > 0)) {
2580		if (off < SCTP_BUF_LEN(m))
2581			break;
2582		off -= SCTP_BUF_LEN(m);
2583		m = SCTP_BUF_NEXT(m);
2584	}
2585	if (m == NULL)
2586		return (NULL);
2587
2588	/* is the current mbuf large enough (eg. contiguous)? */
2589	if ((SCTP_BUF_LEN(m) - off) >= len) {
2590		return (mtod(m, caddr_t)+off);
2591	} else {
2592		/* else, it spans more than one mbuf, so save a temp copy... */
2593		while ((m != NULL) && (len > 0)) {
2594			count = min(SCTP_BUF_LEN(m) - off, len);
2595			bcopy(mtod(m, caddr_t)+off, ptr, count);
2596			len -= count;
2597			ptr += count;
2598			off = 0;
2599			m = SCTP_BUF_NEXT(m);
2600		}
2601		if ((m == NULL) && (len > 0))
2602			return (NULL);
2603		else
2604			return ((caddr_t)in_ptr);
2605	}
2606}
2607
2608
2609
2610struct sctp_paramhdr *
2611sctp_get_next_param(struct mbuf *m,
2612    int offset,
2613    struct sctp_paramhdr *pull,
2614    int pull_limit)
2615{
2616	/* This just provides a typed signature to Peter's Pull routine */
2617	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2618	    (uint8_t *) pull));
2619}
2620
2621
2622struct mbuf *
2623sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2624{
2625	struct mbuf *m_last;
2626	caddr_t dp;
2627
2628	if (padlen > 3) {
2629		return (NULL);
2630	}
2631	if (padlen <= M_TRAILINGSPACE(m)) {
2632		/*
2633		 * The easy way. We hope the majority of the time we hit
2634		 * here :)
2635		 */
2636		m_last = m;
2637	} else {
2638		/* Hard way we must grow the mbuf chain */
2639		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2640		if (m_last == NULL) {
2641			return (NULL);
2642		}
2643		SCTP_BUF_LEN(m_last) = 0;
2644		SCTP_BUF_NEXT(m_last) = NULL;
2645		SCTP_BUF_NEXT(m) = m_last;
2646	}
2647	dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
2648	SCTP_BUF_LEN(m_last) += padlen;
2649	memset(dp, 0, padlen);
2650	return (m_last);
2651}
2652
2653struct mbuf *
2654sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2655{
2656	/* find the last mbuf in chain and pad it */
2657	struct mbuf *m_at;
2658
2659	if (last_mbuf != NULL) {
2660		return (sctp_add_pad_tombuf(last_mbuf, padval));
2661	} else {
2662		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2663			if (SCTP_BUF_NEXT(m_at) == NULL) {
2664				return (sctp_add_pad_tombuf(m_at, padval));
2665			}
2666		}
2667	}
2668	return (NULL);
2669}
2670
2671static void
2672sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2673    uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2674#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2675    SCTP_UNUSED
2676#endif
2677)
2678{
2679	struct mbuf *m_notify;
2680	struct sctp_assoc_change *sac;
2681	struct sctp_queued_to_read *control;
2682	size_t notif_len, abort_len;
2683	unsigned int i;
2684
2685#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2686	struct socket *so;
2687
2688#endif
2689
2690	if (stcb == NULL) {
2691		return;
2692	}
2693	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2694		notif_len = sizeof(struct sctp_assoc_change);
2695		if (abort != NULL) {
2696			abort_len = ntohs(abort->ch.chunk_length);
2697		} else {
2698			abort_len = 0;
2699		}
2700		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2701			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2702		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2703			notif_len += abort_len;
2704		}
2705		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2706		if (m_notify == NULL) {
2707			/* Retry with smaller value. */
2708			notif_len = sizeof(struct sctp_assoc_change);
2709			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2710			if (m_notify == NULL) {
2711				goto set_error;
2712			}
2713		}
2714		SCTP_BUF_NEXT(m_notify) = NULL;
2715		sac = mtod(m_notify, struct sctp_assoc_change *);
2716		memset(sac, 0, notif_len);
2717		sac->sac_type = SCTP_ASSOC_CHANGE;
2718		sac->sac_flags = 0;
2719		sac->sac_length = sizeof(struct sctp_assoc_change);
2720		sac->sac_state = state;
2721		sac->sac_error = error;
2722		/* XXX verify these stream counts */
2723		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2724		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2725		sac->sac_assoc_id = sctp_get_associd(stcb);
2726		if (notif_len > sizeof(struct sctp_assoc_change)) {
2727			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2728				i = 0;
2729				if (stcb->asoc.prsctp_supported == 1) {
2730					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2731				}
2732				if (stcb->asoc.auth_supported == 1) {
2733					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2734				}
2735				if (stcb->asoc.asconf_supported == 1) {
2736					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2737				}
2738				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2739				if (stcb->asoc.reconfig_supported == 1) {
2740					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2741				}
2742				sac->sac_length += i;
2743			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2744				memcpy(sac->sac_info, abort, abort_len);
2745				sac->sac_length += abort_len;
2746			}
2747		}
2748		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2749		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2750		    0, 0, stcb->asoc.context, 0, 0, 0,
2751		    m_notify);
2752		if (control != NULL) {
2753			control->length = SCTP_BUF_LEN(m_notify);
2754			/* not that we need this */
2755			control->tail_mbuf = m_notify;
2756			control->spec_flags = M_NOTIFICATION;
2757			sctp_add_to_readq(stcb->sctp_ep, stcb,
2758			    control,
2759			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2760			    so_locked);
2761		} else {
2762			sctp_m_freem(m_notify);
2763		}
2764	}
2765	/*
2766	 * For 1-to-1 style sockets, we send up and error when an ABORT
2767	 * comes in.
2768	 */
2769set_error:
2770	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2771	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2772	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2773		SOCK_LOCK(stcb->sctp_socket);
2774		if (from_peer) {
2775			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2776				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2777				stcb->sctp_socket->so_error = ECONNREFUSED;
2778			} else {
2779				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2780				stcb->sctp_socket->so_error = ECONNRESET;
2781			}
2782		} else {
2783			if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) ||
2784			    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
2785				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2786				stcb->sctp_socket->so_error = ETIMEDOUT;
2787			} else {
2788				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2789				stcb->sctp_socket->so_error = ECONNABORTED;
2790			}
2791		}
2792	}
2793	/* Wake ANY sleepers */
2794#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2795	so = SCTP_INP_SO(stcb->sctp_ep);
2796	if (!so_locked) {
2797		atomic_add_int(&stcb->asoc.refcnt, 1);
2798		SCTP_TCB_UNLOCK(stcb);
2799		SCTP_SOCKET_LOCK(so, 1);
2800		SCTP_TCB_LOCK(stcb);
2801		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2802		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2803			SCTP_SOCKET_UNLOCK(so, 1);
2804			return;
2805		}
2806	}
2807#endif
2808	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2809	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2810	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2811		socantrcvmore_locked(stcb->sctp_socket);
2812	}
2813	sorwakeup(stcb->sctp_socket);
2814	sowwakeup(stcb->sctp_socket);
2815#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2816	if (!so_locked) {
2817		SCTP_SOCKET_UNLOCK(so, 1);
2818	}
2819#endif
2820}
2821
2822static void
2823sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2824    struct sockaddr *sa, uint32_t error, int so_locked
2825#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2826    SCTP_UNUSED
2827#endif
2828)
2829{
2830	struct mbuf *m_notify;
2831	struct sctp_paddr_change *spc;
2832	struct sctp_queued_to_read *control;
2833
2834	if ((stcb == NULL) ||
2835	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2836		/* event not enabled */
2837		return;
2838	}
2839	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2840	if (m_notify == NULL)
2841		return;
2842	SCTP_BUF_LEN(m_notify) = 0;
2843	spc = mtod(m_notify, struct sctp_paddr_change *);
2844	memset(spc, 0, sizeof(struct sctp_paddr_change));
2845	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2846	spc->spc_flags = 0;
2847	spc->spc_length = sizeof(struct sctp_paddr_change);
2848	switch (sa->sa_family) {
2849#ifdef INET
2850	case AF_INET:
2851#ifdef INET6
2852		if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
2853			in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
2854			    (struct sockaddr_in6 *)&spc->spc_aaddr);
2855		} else {
2856			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2857		}
2858#else
2859		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2860#endif
2861		break;
2862#endif
2863#ifdef INET6
2864	case AF_INET6:
2865		{
2866			struct sockaddr_in6 *sin6;
2867
2868			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2869
2870			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2871			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2872				if (sin6->sin6_scope_id == 0) {
2873					/* recover scope_id for user */
2874					(void)sa6_recoverscope(sin6);
2875				} else {
2876					/* clear embedded scope_id for user */
2877					in6_clearscope(&sin6->sin6_addr);
2878				}
2879			}
2880			break;
2881		}
2882#endif
2883	default:
2884		/* TSNH */
2885		break;
2886	}
2887	spc->spc_state = state;
2888	spc->spc_error = error;
2889	spc->spc_assoc_id = sctp_get_associd(stcb);
2890
2891	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2892	SCTP_BUF_NEXT(m_notify) = NULL;
2893
2894	/* append to socket */
2895	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2896	    0, 0, stcb->asoc.context, 0, 0, 0,
2897	    m_notify);
2898	if (control == NULL) {
2899		/* no memory */
2900		sctp_m_freem(m_notify);
2901		return;
2902	}
2903	control->length = SCTP_BUF_LEN(m_notify);
2904	control->spec_flags = M_NOTIFICATION;
2905	/* not that we need this */
2906	control->tail_mbuf = m_notify;
2907	sctp_add_to_readq(stcb->sctp_ep, stcb,
2908	    control,
2909	    &stcb->sctp_socket->so_rcv, 1,
2910	    SCTP_READ_LOCK_NOT_HELD,
2911	    so_locked);
2912}
2913
2914
2915static void
2916sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2917    struct sctp_tmit_chunk *chk, int so_locked
2918#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2919    SCTP_UNUSED
2920#endif
2921)
2922{
2923	struct mbuf *m_notify;
2924	struct sctp_send_failed *ssf;
2925	struct sctp_send_failed_event *ssfe;
2926	struct sctp_queued_to_read *control;
2927	int length;
2928
2929	if ((stcb == NULL) ||
2930	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2931	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2932		/* event not enabled */
2933		return;
2934	}
2935	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2936		length = sizeof(struct sctp_send_failed_event);
2937	} else {
2938		length = sizeof(struct sctp_send_failed);
2939	}
2940	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
2941	if (m_notify == NULL)
2942		/* no space left */
2943		return;
2944	SCTP_BUF_LEN(m_notify) = 0;
2945	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2946		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2947		memset(ssfe, 0, length);
2948		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2949		if (sent) {
2950			ssfe->ssfe_flags = SCTP_DATA_SENT;
2951		} else {
2952			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2953		}
2954		length += chk->send_size;
2955		length -= sizeof(struct sctp_data_chunk);
2956		ssfe->ssfe_length = length;
2957		ssfe->ssfe_error = error;
2958		/* not exactly what the user sent in, but should be close :) */
2959		ssfe->ssfe_info.snd_sid = chk->rec.data.stream_number;
2960		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
2961		ssfe->ssfe_info.snd_ppid = chk->rec.data.payloadtype;
2962		ssfe->ssfe_info.snd_context = chk->rec.data.context;
2963		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2964		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2965		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
2966	} else {
2967		ssf = mtod(m_notify, struct sctp_send_failed *);
2968		memset(ssf, 0, length);
2969		ssf->ssf_type = SCTP_SEND_FAILED;
2970		if (sent) {
2971			ssf->ssf_flags = SCTP_DATA_SENT;
2972		} else {
2973			ssf->ssf_flags = SCTP_DATA_UNSENT;
2974		}
2975		length += chk->send_size;
2976		length -= sizeof(struct sctp_data_chunk);
2977		ssf->ssf_length = length;
2978		ssf->ssf_error = error;
2979		/* not exactly what the user sent in, but should be close :) */
2980		bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2981		ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2982		ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2983		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2984		ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2985		ssf->ssf_info.sinfo_context = chk->rec.data.context;
2986		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2987		ssf->ssf_assoc_id = sctp_get_associd(stcb);
2988		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2989	}
2990	if (chk->data) {
2991		/*
2992		 * trim off the sctp chunk header(it should be there)
2993		 */
2994		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
2995			m_adj(chk->data, sizeof(struct sctp_data_chunk));
2996			sctp_mbuf_crush(chk->data);
2997			chk->send_size -= sizeof(struct sctp_data_chunk);
2998		}
2999	}
3000	SCTP_BUF_NEXT(m_notify) = chk->data;
3001	/* Steal off the mbuf */
3002	chk->data = NULL;
3003	/*
3004	 * For this case, we check the actual socket buffer, since the assoc
3005	 * is going away we don't want to overfill the socket buffer for a
3006	 * non-reader
3007	 */
3008	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3009		sctp_m_freem(m_notify);
3010		return;
3011	}
3012	/* append to socket */
3013	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3014	    0, 0, stcb->asoc.context, 0, 0, 0,
3015	    m_notify);
3016	if (control == NULL) {
3017		/* no memory */
3018		sctp_m_freem(m_notify);
3019		return;
3020	}
3021	control->spec_flags = M_NOTIFICATION;
3022	sctp_add_to_readq(stcb->sctp_ep, stcb,
3023	    control,
3024	    &stcb->sctp_socket->so_rcv, 1,
3025	    SCTP_READ_LOCK_NOT_HELD,
3026	    so_locked);
3027}
3028
3029
3030static void
3031sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3032    struct sctp_stream_queue_pending *sp, int so_locked
3033#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3034    SCTP_UNUSED
3035#endif
3036)
3037{
3038	struct mbuf *m_notify;
3039	struct sctp_send_failed *ssf;
3040	struct sctp_send_failed_event *ssfe;
3041	struct sctp_queued_to_read *control;
3042	int length;
3043
3044	if ((stcb == NULL) ||
3045	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3046	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3047		/* event not enabled */
3048		return;
3049	}
3050	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3051		length = sizeof(struct sctp_send_failed_event);
3052	} else {
3053		length = sizeof(struct sctp_send_failed);
3054	}
3055	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
3056	if (m_notify == NULL) {
3057		/* no space left */
3058		return;
3059	}
3060	SCTP_BUF_LEN(m_notify) = 0;
3061	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3062		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3063		memset(ssfe, 0, length);
3064		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3065		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3066		length += sp->length;
3067		ssfe->ssfe_length = length;
3068		ssfe->ssfe_error = error;
3069		/* not exactly what the user sent in, but should be close :) */
3070		ssfe->ssfe_info.snd_sid = sp->stream;
3071		if (sp->some_taken) {
3072			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3073		} else {
3074			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3075		}
3076		ssfe->ssfe_info.snd_ppid = sp->ppid;
3077		ssfe->ssfe_info.snd_context = sp->context;
3078		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3079		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3080		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
3081	} else {
3082		ssf = mtod(m_notify, struct sctp_send_failed *);
3083		memset(ssf, 0, length);
3084		ssf->ssf_type = SCTP_SEND_FAILED;
3085		ssf->ssf_flags = SCTP_DATA_UNSENT;
3086		length += sp->length;
3087		ssf->ssf_length = length;
3088		ssf->ssf_error = error;
3089		/* not exactly what the user sent in, but should be close :) */
3090		ssf->ssf_info.sinfo_stream = sp->stream;
3091		ssf->ssf_info.sinfo_ssn = 0;
3092		if (sp->some_taken) {
3093			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3094		} else {
3095			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3096		}
3097		ssf->ssf_info.sinfo_ppid = sp->ppid;
3098		ssf->ssf_info.sinfo_context = sp->context;
3099		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3100		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3101		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3102	}
3103	SCTP_BUF_NEXT(m_notify) = sp->data;
3104
3105	/* Steal off the mbuf */
3106	sp->data = NULL;
3107	/*
3108	 * For this case, we check the actual socket buffer, since the assoc
3109	 * is going away we don't want to overfill the socket buffer for a
3110	 * non-reader
3111	 */
3112	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3113		sctp_m_freem(m_notify);
3114		return;
3115	}
3116	/* append to socket */
3117	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3118	    0, 0, stcb->asoc.context, 0, 0, 0,
3119	    m_notify);
3120	if (control == NULL) {
3121		/* no memory */
3122		sctp_m_freem(m_notify);
3123		return;
3124	}
3125	control->spec_flags = M_NOTIFICATION;
3126	sctp_add_to_readq(stcb->sctp_ep, stcb,
3127	    control,
3128	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3129}
3130
3131
3132
3133static void
3134sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3135{
3136	struct mbuf *m_notify;
3137	struct sctp_adaptation_event *sai;
3138	struct sctp_queued_to_read *control;
3139
3140	if ((stcb == NULL) ||
3141	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3142		/* event not enabled */
3143		return;
3144	}
3145	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3146	if (m_notify == NULL)
3147		/* no space left */
3148		return;
3149	SCTP_BUF_LEN(m_notify) = 0;
3150	sai = mtod(m_notify, struct sctp_adaptation_event *);
3151	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3152	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3153	sai->sai_flags = 0;
3154	sai->sai_length = sizeof(struct sctp_adaptation_event);
3155	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3156	sai->sai_assoc_id = sctp_get_associd(stcb);
3157
3158	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3159	SCTP_BUF_NEXT(m_notify) = NULL;
3160
3161	/* append to socket */
3162	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3163	    0, 0, stcb->asoc.context, 0, 0, 0,
3164	    m_notify);
3165	if (control == NULL) {
3166		/* no memory */
3167		sctp_m_freem(m_notify);
3168		return;
3169	}
3170	control->length = SCTP_BUF_LEN(m_notify);
3171	control->spec_flags = M_NOTIFICATION;
3172	/* not that we need this */
3173	control->tail_mbuf = m_notify;
3174	sctp_add_to_readq(stcb->sctp_ep, stcb,
3175	    control,
3176	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3177}
3178
3179/* This always must be called with the read-queue LOCKED in the INP */
3180static void
3181sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3182    uint32_t val, int so_locked
3183#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3184    SCTP_UNUSED
3185#endif
3186)
3187{
3188	struct mbuf *m_notify;
3189	struct sctp_pdapi_event *pdapi;
3190	struct sctp_queued_to_read *control;
3191	struct sockbuf *sb;
3192
3193	if ((stcb == NULL) ||
3194	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3195		/* event not enabled */
3196		return;
3197	}
3198	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3199		return;
3200	}
3201	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3202	if (m_notify == NULL)
3203		/* no space left */
3204		return;
3205	SCTP_BUF_LEN(m_notify) = 0;
3206	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3207	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3208	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3209	pdapi->pdapi_flags = 0;
3210	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3211	pdapi->pdapi_indication = error;
3212	pdapi->pdapi_stream = (val >> 16);
3213	pdapi->pdapi_seq = (val & 0x0000ffff);
3214	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3215
3216	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3217	SCTP_BUF_NEXT(m_notify) = NULL;
3218	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3219	    0, 0, stcb->asoc.context, 0, 0, 0,
3220	    m_notify);
3221	if (control == NULL) {
3222		/* no memory */
3223		sctp_m_freem(m_notify);
3224		return;
3225	}
3226	control->spec_flags = M_NOTIFICATION;
3227	control->length = SCTP_BUF_LEN(m_notify);
3228	/* not that we need this */
3229	control->tail_mbuf = m_notify;
3230	control->held_length = 0;
3231	control->length = 0;
3232	sb = &stcb->sctp_socket->so_rcv;
3233	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3234		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3235	}
3236	sctp_sballoc(stcb, sb, m_notify);
3237	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3238		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3239	}
3240	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3241	control->end_added = 1;
3242	if (stcb->asoc.control_pdapi)
3243		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3244	else {
3245		/* we really should not see this case */
3246		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3247	}
3248	if (stcb->sctp_ep && stcb->sctp_socket) {
3249		/* This should always be the case */
3250#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3251		struct socket *so;
3252
3253		so = SCTP_INP_SO(stcb->sctp_ep);
3254		if (!so_locked) {
3255			atomic_add_int(&stcb->asoc.refcnt, 1);
3256			SCTP_TCB_UNLOCK(stcb);
3257			SCTP_SOCKET_LOCK(so, 1);
3258			SCTP_TCB_LOCK(stcb);
3259			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3260			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3261				SCTP_SOCKET_UNLOCK(so, 1);
3262				return;
3263			}
3264		}
3265#endif
3266		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3267#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3268		if (!so_locked) {
3269			SCTP_SOCKET_UNLOCK(so, 1);
3270		}
3271#endif
3272	}
3273}
3274
3275static void
3276sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3277{
3278	struct mbuf *m_notify;
3279	struct sctp_shutdown_event *sse;
3280	struct sctp_queued_to_read *control;
3281
3282	/*
3283	 * For TCP model AND UDP connected sockets we will send an error up
3284	 * when an SHUTDOWN completes
3285	 */
3286	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3287	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3288		/* mark socket closed for read/write and wakeup! */
3289#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3290		struct socket *so;
3291
3292		so = SCTP_INP_SO(stcb->sctp_ep);
3293		atomic_add_int(&stcb->asoc.refcnt, 1);
3294		SCTP_TCB_UNLOCK(stcb);
3295		SCTP_SOCKET_LOCK(so, 1);
3296		SCTP_TCB_LOCK(stcb);
3297		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3298		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3299			SCTP_SOCKET_UNLOCK(so, 1);
3300			return;
3301		}
3302#endif
3303		socantsendmore(stcb->sctp_socket);
3304#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3305		SCTP_SOCKET_UNLOCK(so, 1);
3306#endif
3307	}
3308	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3309		/* event not enabled */
3310		return;
3311	}
3312	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3313	if (m_notify == NULL)
3314		/* no space left */
3315		return;
3316	sse = mtod(m_notify, struct sctp_shutdown_event *);
3317	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3318	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3319	sse->sse_flags = 0;
3320	sse->sse_length = sizeof(struct sctp_shutdown_event);
3321	sse->sse_assoc_id = sctp_get_associd(stcb);
3322
3323	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3324	SCTP_BUF_NEXT(m_notify) = NULL;
3325
3326	/* append to socket */
3327	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3328	    0, 0, stcb->asoc.context, 0, 0, 0,
3329	    m_notify);
3330	if (control == NULL) {
3331		/* no memory */
3332		sctp_m_freem(m_notify);
3333		return;
3334	}
3335	control->spec_flags = M_NOTIFICATION;
3336	control->length = SCTP_BUF_LEN(m_notify);
3337	/* not that we need this */
3338	control->tail_mbuf = m_notify;
3339	sctp_add_to_readq(stcb->sctp_ep, stcb,
3340	    control,
3341	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3342}
3343
3344static void
3345sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3346    int so_locked
3347#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3348    SCTP_UNUSED
3349#endif
3350)
3351{
3352	struct mbuf *m_notify;
3353	struct sctp_sender_dry_event *event;
3354	struct sctp_queued_to_read *control;
3355
3356	if ((stcb == NULL) ||
3357	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3358		/* event not enabled */
3359		return;
3360	}
3361	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3362	if (m_notify == NULL) {
3363		/* no space left */
3364		return;
3365	}
3366	SCTP_BUF_LEN(m_notify) = 0;
3367	event = mtod(m_notify, struct sctp_sender_dry_event *);
3368	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3369	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3370	event->sender_dry_flags = 0;
3371	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3372	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3373
3374	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3375	SCTP_BUF_NEXT(m_notify) = NULL;
3376
3377	/* append to socket */
3378	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3379	    0, 0, stcb->asoc.context, 0, 0, 0,
3380	    m_notify);
3381	if (control == NULL) {
3382		/* no memory */
3383		sctp_m_freem(m_notify);
3384		return;
3385	}
3386	control->length = SCTP_BUF_LEN(m_notify);
3387	control->spec_flags = M_NOTIFICATION;
3388	/* not that we need this */
3389	control->tail_mbuf = m_notify;
3390	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3391	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3392}
3393
3394
3395void
3396sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3397{
3398	struct mbuf *m_notify;
3399	struct sctp_queued_to_read *control;
3400	struct sctp_stream_change_event *stradd;
3401
3402	if ((stcb == NULL) ||
3403	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3404		/* event not enabled */
3405		return;
3406	}
3407	if ((stcb->asoc.peer_req_out) && flag) {
3408		/* Peer made the request, don't tell the local user */
3409		stcb->asoc.peer_req_out = 0;
3410		return;
3411	}
3412	stcb->asoc.peer_req_out = 0;
3413	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3414	if (m_notify == NULL)
3415		/* no space left */
3416		return;
3417	SCTP_BUF_LEN(m_notify) = 0;
3418	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3419	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3420	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3421	stradd->strchange_flags = flag;
3422	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3423	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3424	stradd->strchange_instrms = numberin;
3425	stradd->strchange_outstrms = numberout;
3426	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3427	SCTP_BUF_NEXT(m_notify) = NULL;
3428	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3429		/* no space */
3430		sctp_m_freem(m_notify);
3431		return;
3432	}
3433	/* append to socket */
3434	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3435	    0, 0, stcb->asoc.context, 0, 0, 0,
3436	    m_notify);
3437	if (control == NULL) {
3438		/* no memory */
3439		sctp_m_freem(m_notify);
3440		return;
3441	}
3442	control->spec_flags = M_NOTIFICATION;
3443	control->length = SCTP_BUF_LEN(m_notify);
3444	/* not that we need this */
3445	control->tail_mbuf = m_notify;
3446	sctp_add_to_readq(stcb->sctp_ep, stcb,
3447	    control,
3448	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3449}
3450
3451void
3452sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3453{
3454	struct mbuf *m_notify;
3455	struct sctp_queued_to_read *control;
3456	struct sctp_assoc_reset_event *strasoc;
3457
3458	if ((stcb == NULL) ||
3459	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3460		/* event not enabled */
3461		return;
3462	}
3463	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3464	if (m_notify == NULL)
3465		/* no space left */
3466		return;
3467	SCTP_BUF_LEN(m_notify) = 0;
3468	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3469	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3470	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3471	strasoc->assocreset_flags = flag;
3472	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3473	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3474	strasoc->assocreset_local_tsn = sending_tsn;
3475	strasoc->assocreset_remote_tsn = recv_tsn;
3476	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3477	SCTP_BUF_NEXT(m_notify) = NULL;
3478	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3479		/* no space */
3480		sctp_m_freem(m_notify);
3481		return;
3482	}
3483	/* append to socket */
3484	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3485	    0, 0, stcb->asoc.context, 0, 0, 0,
3486	    m_notify);
3487	if (control == NULL) {
3488		/* no memory */
3489		sctp_m_freem(m_notify);
3490		return;
3491	}
3492	control->spec_flags = M_NOTIFICATION;
3493	control->length = SCTP_BUF_LEN(m_notify);
3494	/* not that we need this */
3495	control->tail_mbuf = m_notify;
3496	sctp_add_to_readq(stcb->sctp_ep, stcb,
3497	    control,
3498	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3499}
3500
3501
3502
3503static void
3504sctp_notify_stream_reset(struct sctp_tcb *stcb,
3505    int number_entries, uint16_t * list, int flag)
3506{
3507	struct mbuf *m_notify;
3508	struct sctp_queued_to_read *control;
3509	struct sctp_stream_reset_event *strreset;
3510	int len;
3511
3512	if ((stcb == NULL) ||
3513	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3514		/* event not enabled */
3515		return;
3516	}
3517	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3518	if (m_notify == NULL)
3519		/* no space left */
3520		return;
3521	SCTP_BUF_LEN(m_notify) = 0;
3522	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3523	if (len > M_TRAILINGSPACE(m_notify)) {
3524		/* never enough room */
3525		sctp_m_freem(m_notify);
3526		return;
3527	}
3528	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3529	memset(strreset, 0, len);
3530	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3531	strreset->strreset_flags = flag;
3532	strreset->strreset_length = len;
3533	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3534	if (number_entries) {
3535		int i;
3536
3537		for (i = 0; i < number_entries; i++) {
3538			strreset->strreset_stream_list[i] = ntohs(list[i]);
3539		}
3540	}
3541	SCTP_BUF_LEN(m_notify) = len;
3542	SCTP_BUF_NEXT(m_notify) = NULL;
3543	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3544		/* no space */
3545		sctp_m_freem(m_notify);
3546		return;
3547	}
3548	/* append to socket */
3549	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3550	    0, 0, stcb->asoc.context, 0, 0, 0,
3551	    m_notify);
3552	if (control == NULL) {
3553		/* no memory */
3554		sctp_m_freem(m_notify);
3555		return;
3556	}
3557	control->spec_flags = M_NOTIFICATION;
3558	control->length = SCTP_BUF_LEN(m_notify);
3559	/* not that we need this */
3560	control->tail_mbuf = m_notify;
3561	sctp_add_to_readq(stcb->sctp_ep, stcb,
3562	    control,
3563	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3564}
3565
3566
3567static void
3568sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3569{
3570	struct mbuf *m_notify;
3571	struct sctp_remote_error *sre;
3572	struct sctp_queued_to_read *control;
3573	size_t notif_len, chunk_len;
3574
3575	if ((stcb == NULL) ||
3576	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3577		return;
3578	}
3579	if (chunk != NULL) {
3580		chunk_len = ntohs(chunk->ch.chunk_length);
3581	} else {
3582		chunk_len = 0;
3583	}
3584	notif_len = sizeof(struct sctp_remote_error) + chunk_len;
3585	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3586	if (m_notify == NULL) {
3587		/* Retry with smaller value. */
3588		notif_len = sizeof(struct sctp_remote_error);
3589		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3590		if (m_notify == NULL) {
3591			return;
3592		}
3593	}
3594	SCTP_BUF_NEXT(m_notify) = NULL;
3595	sre = mtod(m_notify, struct sctp_remote_error *);
3596	memset(sre, 0, notif_len);
3597	sre->sre_type = SCTP_REMOTE_ERROR;
3598	sre->sre_flags = 0;
3599	sre->sre_length = sizeof(struct sctp_remote_error);
3600	sre->sre_error = error;
3601	sre->sre_assoc_id = sctp_get_associd(stcb);
3602	if (notif_len > sizeof(struct sctp_remote_error)) {
3603		memcpy(sre->sre_data, chunk, chunk_len);
3604		sre->sre_length += chunk_len;
3605	}
3606	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3607	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3608	    0, 0, stcb->asoc.context, 0, 0, 0,
3609	    m_notify);
3610	if (control != NULL) {
3611		control->length = SCTP_BUF_LEN(m_notify);
3612		/* not that we need this */
3613		control->tail_mbuf = m_notify;
3614		control->spec_flags = M_NOTIFICATION;
3615		sctp_add_to_readq(stcb->sctp_ep, stcb,
3616		    control,
3617		    &stcb->sctp_socket->so_rcv, 1,
3618		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3619	} else {
3620		sctp_m_freem(m_notify);
3621	}
3622}
3623
3624
3625void
3626sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3627    uint32_t error, void *data, int so_locked
3628#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3629    SCTP_UNUSED
3630#endif
3631)
3632{
3633	if ((stcb == NULL) ||
3634	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3635	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3636	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3637		/* If the socket is gone we are out of here */
3638		return;
3639	}
3640	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3641		return;
3642	}
3643	if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3644	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED)) {
3645		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3646		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3647		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3648			/* Don't report these in front states */
3649			return;
3650		}
3651	}
3652	switch (notification) {
3653	case SCTP_NOTIFY_ASSOC_UP:
3654		if (stcb->asoc.assoc_up_sent == 0) {
3655			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3656			stcb->asoc.assoc_up_sent = 1;
3657		}
3658		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3659			sctp_notify_adaptation_layer(stcb);
3660		}
3661		if (stcb->asoc.auth_supported == 0) {
3662			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3663			    NULL, so_locked);
3664		}
3665		break;
3666	case SCTP_NOTIFY_ASSOC_DOWN:
3667		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3668		break;
3669	case SCTP_NOTIFY_INTERFACE_DOWN:
3670		{
3671			struct sctp_nets *net;
3672
3673			net = (struct sctp_nets *)data;
3674			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3675			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3676			break;
3677		}
3678	case SCTP_NOTIFY_INTERFACE_UP:
3679		{
3680			struct sctp_nets *net;
3681
3682			net = (struct sctp_nets *)data;
3683			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3684			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3685			break;
3686		}
3687	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3688		{
3689			struct sctp_nets *net;
3690
3691			net = (struct sctp_nets *)data;
3692			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3693			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3694			break;
3695		}
3696	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3697		sctp_notify_send_failed2(stcb, error,
3698		    (struct sctp_stream_queue_pending *)data, so_locked);
3699		break;
3700	case SCTP_NOTIFY_SENT_DG_FAIL:
3701		sctp_notify_send_failed(stcb, 1, error,
3702		    (struct sctp_tmit_chunk *)data, so_locked);
3703		break;
3704	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3705		sctp_notify_send_failed(stcb, 0, error,
3706		    (struct sctp_tmit_chunk *)data, so_locked);
3707		break;
3708	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3709		{
3710			uint32_t val;
3711
3712			val = *((uint32_t *) data);
3713
3714			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3715			break;
3716		}
3717	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3718		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3719		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3720			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3721		} else {
3722			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3723		}
3724		break;
3725	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3726		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3727		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3728			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3729		} else {
3730			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3731		}
3732		break;
3733	case SCTP_NOTIFY_ASSOC_RESTART:
3734		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3735		if (stcb->asoc.auth_supported == 0) {
3736			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3737			    NULL, so_locked);
3738		}
3739		break;
3740	case SCTP_NOTIFY_STR_RESET_SEND:
3741		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN);
3742		break;
3743	case SCTP_NOTIFY_STR_RESET_RECV:
3744		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING);
3745		break;
3746	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3747		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3748		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3749		break;
3750	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3751		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3752		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3753		break;
3754	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3755		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3756		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3757		break;
3758	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3759		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3760		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3761		break;
3762	case SCTP_NOTIFY_ASCONF_ADD_IP:
3763		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3764		    error, so_locked);
3765		break;
3766	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3767		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3768		    error, so_locked);
3769		break;
3770	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3771		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3772		    error, so_locked);
3773		break;
3774	case SCTP_NOTIFY_PEER_SHUTDOWN:
3775		sctp_notify_shutdown_event(stcb);
3776		break;
3777	case SCTP_NOTIFY_AUTH_NEW_KEY:
3778		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3779		    (uint16_t) (uintptr_t) data,
3780		    so_locked);
3781		break;
3782	case SCTP_NOTIFY_AUTH_FREE_KEY:
3783		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3784		    (uint16_t) (uintptr_t) data,
3785		    so_locked);
3786		break;
3787	case SCTP_NOTIFY_NO_PEER_AUTH:
3788		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3789		    (uint16_t) (uintptr_t) data,
3790		    so_locked);
3791		break;
3792	case SCTP_NOTIFY_SENDER_DRY:
3793		sctp_notify_sender_dry_event(stcb, so_locked);
3794		break;
3795	case SCTP_NOTIFY_REMOTE_ERROR:
3796		sctp_notify_remote_error(stcb, error, data);
3797		break;
3798	default:
3799		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3800		    __func__, notification, notification);
3801		break;
3802	}			/* end switch */
3803}
3804
3805void
3806sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3807#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3808    SCTP_UNUSED
3809#endif
3810)
3811{
3812	struct sctp_association *asoc;
3813	struct sctp_stream_out *outs;
3814	struct sctp_tmit_chunk *chk, *nchk;
3815	struct sctp_stream_queue_pending *sp, *nsp;
3816	int i;
3817
3818	if (stcb == NULL) {
3819		return;
3820	}
3821	asoc = &stcb->asoc;
3822	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3823		/* already being freed */
3824		return;
3825	}
3826	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3827	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3828	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3829		return;
3830	}
3831	/* now through all the gunk freeing chunks */
3832	if (holds_lock == 0) {
3833		SCTP_TCB_SEND_LOCK(stcb);
3834	}
3835	/* sent queue SHOULD be empty */
3836	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3837		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3838		asoc->sent_queue_cnt--;
3839		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3840			if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3841				asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3842#ifdef INVARIANTS
3843			} else {
3844				panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3845#endif
3846			}
3847		}
3848		if (chk->data != NULL) {
3849			sctp_free_bufspace(stcb, asoc, chk, 1);
3850			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3851			    error, chk, so_locked);
3852			if (chk->data) {
3853				sctp_m_freem(chk->data);
3854				chk->data = NULL;
3855			}
3856		}
3857		sctp_free_a_chunk(stcb, chk, so_locked);
3858		/* sa_ignore FREED_MEMORY */
3859	}
3860	/* pending send queue SHOULD be empty */
3861	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3862		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3863		asoc->send_queue_cnt--;
3864		if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3865			asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3866#ifdef INVARIANTS
3867		} else {
3868			panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3869#endif
3870		}
3871		if (chk->data != NULL) {
3872			sctp_free_bufspace(stcb, asoc, chk, 1);
3873			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3874			    error, chk, so_locked);
3875			if (chk->data) {
3876				sctp_m_freem(chk->data);
3877				chk->data = NULL;
3878			}
3879		}
3880		sctp_free_a_chunk(stcb, chk, so_locked);
3881		/* sa_ignore FREED_MEMORY */
3882	}
3883	for (i = 0; i < asoc->streamoutcnt; i++) {
3884		/* For each stream */
3885		outs = &asoc->strmout[i];
3886		/* clean up any sends there */
3887		asoc->locked_on_sending = NULL;
3888		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3889			asoc->stream_queue_cnt--;
3890			TAILQ_REMOVE(&outs->outqueue, sp, next);
3891			sctp_free_spbufspace(stcb, asoc, sp);
3892			if (sp->data) {
3893				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3894				    error, (void *)sp, so_locked);
3895				if (sp->data) {
3896					sctp_m_freem(sp->data);
3897					sp->data = NULL;
3898					sp->tail_mbuf = NULL;
3899					sp->length = 0;
3900				}
3901			}
3902			if (sp->net) {
3903				sctp_free_remote_addr(sp->net);
3904				sp->net = NULL;
3905			}
3906			/* Free the chunk */
3907			sctp_free_a_strmoq(stcb, sp, so_locked);
3908			/* sa_ignore FREED_MEMORY */
3909		}
3910	}
3911
3912	if (holds_lock == 0) {
3913		SCTP_TCB_SEND_UNLOCK(stcb);
3914	}
3915}
3916
3917void
3918sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3919    struct sctp_abort_chunk *abort, int so_locked
3920#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3921    SCTP_UNUSED
3922#endif
3923)
3924{
3925	if (stcb == NULL) {
3926		return;
3927	}
3928	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3929	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3930	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3931		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3932	}
3933	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3934	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3935	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3936		return;
3937	}
3938	/* Tell them we lost the asoc */
3939	sctp_report_all_outbound(stcb, error, 1, so_locked);
3940	if (from_peer) {
3941		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
3942	} else {
3943		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
3944	}
3945}
3946
3947void
3948sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3949    struct mbuf *m, int iphlen,
3950    struct sockaddr *src, struct sockaddr *dst,
3951    struct sctphdr *sh, struct mbuf *op_err,
3952    uint8_t mflowtype, uint32_t mflowid,
3953    uint32_t vrf_id, uint16_t port)
3954{
3955	uint32_t vtag;
3956
3957#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3958	struct socket *so;
3959
3960#endif
3961
3962	vtag = 0;
3963	if (stcb != NULL) {
3964		/* We have a TCB to abort, send notification too */
3965		vtag = stcb->asoc.peer_vtag;
3966		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
3967		/* get the assoc vrf id and table id */
3968		vrf_id = stcb->asoc.vrf_id;
3969		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3970	}
3971	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
3972	    mflowtype, mflowid, inp->fibnum,
3973	    vrf_id, port);
3974	if (stcb != NULL) {
3975		/* Ok, now lets free it */
3976#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3977		so = SCTP_INP_SO(inp);
3978		atomic_add_int(&stcb->asoc.refcnt, 1);
3979		SCTP_TCB_UNLOCK(stcb);
3980		SCTP_SOCKET_LOCK(so, 1);
3981		SCTP_TCB_LOCK(stcb);
3982		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3983#endif
3984		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3985		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3986		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3987			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3988		}
3989		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
3990		    SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3991#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3992		SCTP_SOCKET_UNLOCK(so, 1);
3993#endif
3994	}
3995}
3996
3997#ifdef SCTP_ASOCLOG_OF_TSNS
3998void
3999sctp_print_out_track_log(struct sctp_tcb *stcb)
4000{
4001#ifdef NOSIY_PRINTS
4002	int i;
4003
4004	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
4005	SCTP_PRINTF("IN bound TSN log-aaa\n");
4006	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
4007		SCTP_PRINTF("None rcvd\n");
4008		goto none_in;
4009	}
4010	if (stcb->asoc.tsn_in_wrapped) {
4011		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
4012			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4013			    stcb->asoc.in_tsnlog[i].tsn,
4014			    stcb->asoc.in_tsnlog[i].strm,
4015			    stcb->asoc.in_tsnlog[i].seq,
4016			    stcb->asoc.in_tsnlog[i].flgs,
4017			    stcb->asoc.in_tsnlog[i].sz);
4018		}
4019	}
4020	if (stcb->asoc.tsn_in_at) {
4021		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
4022			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4023			    stcb->asoc.in_tsnlog[i].tsn,
4024			    stcb->asoc.in_tsnlog[i].strm,
4025			    stcb->asoc.in_tsnlog[i].seq,
4026			    stcb->asoc.in_tsnlog[i].flgs,
4027			    stcb->asoc.in_tsnlog[i].sz);
4028		}
4029	}
4030none_in:
4031	SCTP_PRINTF("OUT bound TSN log-aaa\n");
4032	if ((stcb->asoc.tsn_out_at == 0) &&
4033	    (stcb->asoc.tsn_out_wrapped == 0)) {
4034		SCTP_PRINTF("None sent\n");
4035	}
4036	if (stcb->asoc.tsn_out_wrapped) {
4037		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
4038			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4039			    stcb->asoc.out_tsnlog[i].tsn,
4040			    stcb->asoc.out_tsnlog[i].strm,
4041			    stcb->asoc.out_tsnlog[i].seq,
4042			    stcb->asoc.out_tsnlog[i].flgs,
4043			    stcb->asoc.out_tsnlog[i].sz);
4044		}
4045	}
4046	if (stcb->asoc.tsn_out_at) {
4047		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
4048			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4049			    stcb->asoc.out_tsnlog[i].tsn,
4050			    stcb->asoc.out_tsnlog[i].strm,
4051			    stcb->asoc.out_tsnlog[i].seq,
4052			    stcb->asoc.out_tsnlog[i].flgs,
4053			    stcb->asoc.out_tsnlog[i].sz);
4054		}
4055	}
4056#endif
4057}
4058
4059#endif
4060
4061void
4062sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4063    struct mbuf *op_err,
4064    int so_locked
4065#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4066    SCTP_UNUSED
4067#endif
4068)
4069{
4070#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4071	struct socket *so;
4072
4073#endif
4074
4075#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4076	so = SCTP_INP_SO(inp);
4077#endif
4078	if (stcb == NULL) {
4079		/* Got to have a TCB */
4080		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4081			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4082				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4083				    SCTP_CALLED_DIRECTLY_NOCMPSET);
4084			}
4085		}
4086		return;
4087	} else {
4088		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
4089	}
4090	/* notify the ulp */
4091	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4092		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4093	}
4094	/* notify the peer */
4095	sctp_send_abort_tcb(stcb, op_err, so_locked);
4096	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4097	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4098	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4099		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4100	}
4101	/* now free the asoc */
4102#ifdef SCTP_ASOCLOG_OF_TSNS
4103	sctp_print_out_track_log(stcb);
4104#endif
4105#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4106	if (!so_locked) {
4107		atomic_add_int(&stcb->asoc.refcnt, 1);
4108		SCTP_TCB_UNLOCK(stcb);
4109		SCTP_SOCKET_LOCK(so, 1);
4110		SCTP_TCB_LOCK(stcb);
4111		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4112	}
4113#endif
4114	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4115	    SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4116#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4117	if (!so_locked) {
4118		SCTP_SOCKET_UNLOCK(so, 1);
4119	}
4120#endif
4121}
4122
4123void
4124sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4125    struct sockaddr *src, struct sockaddr *dst,
4126    struct sctphdr *sh, struct sctp_inpcb *inp,
4127    struct mbuf *cause,
4128    uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4129    uint32_t vrf_id, uint16_t port)
4130{
4131	struct sctp_chunkhdr *ch, chunk_buf;
4132	unsigned int chk_length;
4133	int contains_init_chunk;
4134
4135	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4136	/* Generate a TO address for future reference */
4137	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4138		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4139			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4140			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4141		}
4142	}
4143	contains_init_chunk = 0;
4144	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4145	    sizeof(*ch), (uint8_t *) & chunk_buf);
4146	while (ch != NULL) {
4147		chk_length = ntohs(ch->chunk_length);
4148		if (chk_length < sizeof(*ch)) {
4149			/* break to abort land */
4150			break;
4151		}
4152		switch (ch->chunk_type) {
4153		case SCTP_INIT:
4154			contains_init_chunk = 1;
4155			break;
4156		case SCTP_PACKET_DROPPED:
4157			/* we don't respond to pkt-dropped */
4158			return;
4159		case SCTP_ABORT_ASSOCIATION:
4160			/* we don't respond with an ABORT to an ABORT */
4161			return;
4162		case SCTP_SHUTDOWN_COMPLETE:
4163			/*
4164			 * we ignore it since we are not waiting for it and
4165			 * peer is gone
4166			 */
4167			return;
4168		case SCTP_SHUTDOWN_ACK:
4169			sctp_send_shutdown_complete2(src, dst, sh,
4170			    mflowtype, mflowid, fibnum,
4171			    vrf_id, port);
4172			return;
4173		default:
4174			break;
4175		}
4176		offset += SCTP_SIZE32(chk_length);
4177		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4178		    sizeof(*ch), (uint8_t *) & chunk_buf);
4179	}
4180	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4181	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4182	    (contains_init_chunk == 0))) {
4183		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4184		    mflowtype, mflowid, fibnum,
4185		    vrf_id, port);
4186	}
4187}
4188
4189/*
4190 * check the inbound datagram to make sure there is not an abort inside it,
4191 * if there is return 1, else return 0.
4192 */
4193int
4194sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4195{
4196	struct sctp_chunkhdr *ch;
4197	struct sctp_init_chunk *init_chk, chunk_buf;
4198	int offset;
4199	unsigned int chk_length;
4200
4201	offset = iphlen + sizeof(struct sctphdr);
4202	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4203	    (uint8_t *) & chunk_buf);
4204	while (ch != NULL) {
4205		chk_length = ntohs(ch->chunk_length);
4206		if (chk_length < sizeof(*ch)) {
4207			/* packet is probably corrupt */
4208			break;
4209		}
4210		/* we seem to be ok, is it an abort? */
4211		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4212			/* yep, tell them */
4213			return (1);
4214		}
4215		if (ch->chunk_type == SCTP_INITIATION) {
4216			/* need to update the Vtag */
4217			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4218			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4219			if (init_chk != NULL) {
4220				*vtagfill = ntohl(init_chk->init.initiate_tag);
4221			}
4222		}
4223		/* Nope, move to the next chunk */
4224		offset += SCTP_SIZE32(chk_length);
4225		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4226		    sizeof(*ch), (uint8_t *) & chunk_buf);
4227	}
4228	return (0);
4229}
4230
4231/*
4232 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4233 * set (i.e. it's 0) so, create this function to compare link local scopes
4234 */
4235#ifdef INET6
4236uint32_t
4237sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4238{
4239	struct sockaddr_in6 a, b;
4240
4241	/* save copies */
4242	a = *addr1;
4243	b = *addr2;
4244
4245	if (a.sin6_scope_id == 0)
4246		if (sa6_recoverscope(&a)) {
4247			/* can't get scope, so can't match */
4248			return (0);
4249		}
4250	if (b.sin6_scope_id == 0)
4251		if (sa6_recoverscope(&b)) {
4252			/* can't get scope, so can't match */
4253			return (0);
4254		}
4255	if (a.sin6_scope_id != b.sin6_scope_id)
4256		return (0);
4257
4258	return (1);
4259}
4260
4261/*
4262 * returns a sockaddr_in6 with embedded scope recovered and removed
4263 */
4264struct sockaddr_in6 *
4265sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4266{
4267	/* check and strip embedded scope junk */
4268	if (addr->sin6_family == AF_INET6) {
4269		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4270			if (addr->sin6_scope_id == 0) {
4271				*store = *addr;
4272				if (!sa6_recoverscope(store)) {
4273					/* use the recovered scope */
4274					addr = store;
4275				}
4276			} else {
4277				/* else, return the original "to" addr */
4278				in6_clearscope(&addr->sin6_addr);
4279			}
4280		}
4281	}
4282	return (addr);
4283}
4284
4285#endif
4286
4287/*
4288 * are the two addresses the same?  currently a "scopeless" check returns: 1
4289 * if same, 0 if not
4290 */
4291int
4292sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4293{
4294
4295	/* must be valid */
4296	if (sa1 == NULL || sa2 == NULL)
4297		return (0);
4298
4299	/* must be the same family */
4300	if (sa1->sa_family != sa2->sa_family)
4301		return (0);
4302
4303	switch (sa1->sa_family) {
4304#ifdef INET6
4305	case AF_INET6:
4306		{
4307			/* IPv6 addresses */
4308			struct sockaddr_in6 *sin6_1, *sin6_2;
4309
4310			sin6_1 = (struct sockaddr_in6 *)sa1;
4311			sin6_2 = (struct sockaddr_in6 *)sa2;
4312			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4313			    sin6_2));
4314		}
4315#endif
4316#ifdef INET
4317	case AF_INET:
4318		{
4319			/* IPv4 addresses */
4320			struct sockaddr_in *sin_1, *sin_2;
4321
4322			sin_1 = (struct sockaddr_in *)sa1;
4323			sin_2 = (struct sockaddr_in *)sa2;
4324			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4325		}
4326#endif
4327	default:
4328		/* we don't do these... */
4329		return (0);
4330	}
4331}
4332
4333void
4334sctp_print_address(struct sockaddr *sa)
4335{
4336#ifdef INET6
4337	char ip6buf[INET6_ADDRSTRLEN];
4338
4339#endif
4340
4341	switch (sa->sa_family) {
4342#ifdef INET6
4343	case AF_INET6:
4344		{
4345			struct sockaddr_in6 *sin6;
4346
4347			sin6 = (struct sockaddr_in6 *)sa;
4348			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4349			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4350			    ntohs(sin6->sin6_port),
4351			    sin6->sin6_scope_id);
4352			break;
4353		}
4354#endif
4355#ifdef INET
4356	case AF_INET:
4357		{
4358			struct sockaddr_in *sin;
4359			unsigned char *p;
4360
4361			sin = (struct sockaddr_in *)sa;
4362			p = (unsigned char *)&sin->sin_addr;
4363			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4364			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4365			break;
4366		}
4367#endif
4368	default:
4369		SCTP_PRINTF("?\n");
4370		break;
4371	}
4372}
4373
4374void
4375sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4376    struct sctp_inpcb *new_inp,
4377    struct sctp_tcb *stcb,
4378    int waitflags)
4379{
4380	/*
4381	 * go through our old INP and pull off any control structures that
4382	 * belong to stcb and move then to the new inp.
4383	 */
4384	struct socket *old_so, *new_so;
4385	struct sctp_queued_to_read *control, *nctl;
4386	struct sctp_readhead tmp_queue;
4387	struct mbuf *m;
4388	int error = 0;
4389
4390	old_so = old_inp->sctp_socket;
4391	new_so = new_inp->sctp_socket;
4392	TAILQ_INIT(&tmp_queue);
4393	error = sblock(&old_so->so_rcv, waitflags);
4394	if (error) {
4395		/*
4396		 * Gak, can't get sblock, we have a problem. data will be
4397		 * left stranded.. and we don't dare look at it since the
4398		 * other thread may be reading something. Oh well, its a
4399		 * screwed up app that does a peeloff OR a accept while
4400		 * reading from the main socket... actually its only the
4401		 * peeloff() case, since I think read will fail on a
4402		 * listening socket..
4403		 */
4404		return;
4405	}
4406	/* lock the socket buffers */
4407	SCTP_INP_READ_LOCK(old_inp);
4408	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4409		/* Pull off all for out target stcb */
4410		if (control->stcb == stcb) {
4411			/* remove it we want it */
4412			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4413			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4414			m = control->data;
4415			while (m) {
4416				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4417					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4418				}
4419				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4420				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4421					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4422				}
4423				m = SCTP_BUF_NEXT(m);
4424			}
4425		}
4426	}
4427	SCTP_INP_READ_UNLOCK(old_inp);
4428	/* Remove the sb-lock on the old socket */
4429
4430	sbunlock(&old_so->so_rcv);
4431	/* Now we move them over to the new socket buffer */
4432	SCTP_INP_READ_LOCK(new_inp);
4433	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4434		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4435		m = control->data;
4436		while (m) {
4437			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4438				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4439			}
4440			sctp_sballoc(stcb, &new_so->so_rcv, m);
4441			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4442				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4443			}
4444			m = SCTP_BUF_NEXT(m);
4445		}
4446	}
4447	SCTP_INP_READ_UNLOCK(new_inp);
4448}
4449
4450void
4451sctp_add_to_readq(struct sctp_inpcb *inp,
4452    struct sctp_tcb *stcb,
4453    struct sctp_queued_to_read *control,
4454    struct sockbuf *sb,
4455    int end,
4456    int inp_read_lock_held,
4457    int so_locked
4458#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4459    SCTP_UNUSED
4460#endif
4461)
4462{
4463	/*
4464	 * Here we must place the control on the end of the socket read
4465	 * queue AND increment sb_cc so that select will work properly on
4466	 * read.
4467	 */
4468	struct mbuf *m, *prev = NULL;
4469
4470	if (inp == NULL) {
4471		/* Gak, TSNH!! */
4472#ifdef INVARIANTS
4473		panic("Gak, inp NULL on add_to_readq");
4474#endif
4475		return;
4476	}
4477	if (inp_read_lock_held == 0)
4478		SCTP_INP_READ_LOCK(inp);
4479	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4480		sctp_free_remote_addr(control->whoFrom);
4481		if (control->data) {
4482			sctp_m_freem(control->data);
4483			control->data = NULL;
4484		}
4485		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4486		if (inp_read_lock_held == 0)
4487			SCTP_INP_READ_UNLOCK(inp);
4488		return;
4489	}
4490	if (!(control->spec_flags & M_NOTIFICATION)) {
4491		atomic_add_int(&inp->total_recvs, 1);
4492		if (!control->do_not_ref_stcb) {
4493			atomic_add_int(&stcb->total_recvs, 1);
4494		}
4495	}
4496	m = control->data;
4497	control->held_length = 0;
4498	control->length = 0;
4499	while (m) {
4500		if (SCTP_BUF_LEN(m) == 0) {
4501			/* Skip mbufs with NO length */
4502			if (prev == NULL) {
4503				/* First one */
4504				control->data = sctp_m_free(m);
4505				m = control->data;
4506			} else {
4507				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4508				m = SCTP_BUF_NEXT(prev);
4509			}
4510			if (m == NULL) {
4511				control->tail_mbuf = prev;
4512			}
4513			continue;
4514		}
4515		prev = m;
4516		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4517			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4518		}
4519		sctp_sballoc(stcb, sb, m);
4520		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4521			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4522		}
4523		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4524		m = SCTP_BUF_NEXT(m);
4525	}
4526	if (prev != NULL) {
4527		control->tail_mbuf = prev;
4528	} else {
4529		/* Everything got collapsed out?? */
4530		sctp_free_remote_addr(control->whoFrom);
4531		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4532		if (inp_read_lock_held == 0)
4533			SCTP_INP_READ_UNLOCK(inp);
4534		return;
4535	}
4536	if (end) {
4537		control->end_added = 1;
4538	}
4539	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4540	if (inp_read_lock_held == 0)
4541		SCTP_INP_READ_UNLOCK(inp);
4542	if (inp && inp->sctp_socket) {
4543		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4544			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4545		} else {
4546#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4547			struct socket *so;
4548
4549			so = SCTP_INP_SO(inp);
4550			if (!so_locked) {
4551				if (stcb) {
4552					atomic_add_int(&stcb->asoc.refcnt, 1);
4553					SCTP_TCB_UNLOCK(stcb);
4554				}
4555				SCTP_SOCKET_LOCK(so, 1);
4556				if (stcb) {
4557					SCTP_TCB_LOCK(stcb);
4558					atomic_subtract_int(&stcb->asoc.refcnt, 1);
4559				}
4560				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4561					SCTP_SOCKET_UNLOCK(so, 1);
4562					return;
4563				}
4564			}
4565#endif
4566			sctp_sorwakeup(inp, inp->sctp_socket);
4567#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4568			if (!so_locked) {
4569				SCTP_SOCKET_UNLOCK(so, 1);
4570			}
4571#endif
4572		}
4573	}
4574}
4575
4576
4577int
4578sctp_append_to_readq(struct sctp_inpcb *inp,
4579    struct sctp_tcb *stcb,
4580    struct sctp_queued_to_read *control,
4581    struct mbuf *m,
4582    int end,
4583    int ctls_cumack,
4584    struct sockbuf *sb)
4585{
4586	/*
4587	 * A partial delivery API event is underway. OR we are appending on
4588	 * the reassembly queue.
4589	 *
4590	 * If PDAPI this means we need to add m to the end of the data.
4591	 * Increase the length in the control AND increment the sb_cc.
4592	 * Otherwise sb is NULL and all we need to do is put it at the end
4593	 * of the mbuf chain.
4594	 */
4595	int len = 0;
4596	struct mbuf *mm, *tail = NULL, *prev = NULL;
4597
4598	if (inp) {
4599		SCTP_INP_READ_LOCK(inp);
4600	}
4601	if (control == NULL) {
4602get_out:
4603		if (inp) {
4604			SCTP_INP_READ_UNLOCK(inp);
4605		}
4606		return (-1);
4607	}
4608	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) {
4609		SCTP_INP_READ_UNLOCK(inp);
4610		return (0);
4611	}
4612	if (control->end_added) {
4613		/* huh this one is complete? */
4614		goto get_out;
4615	}
4616	mm = m;
4617	if (mm == NULL) {
4618		goto get_out;
4619	}
4620	while (mm) {
4621		if (SCTP_BUF_LEN(mm) == 0) {
4622			/* Skip mbufs with NO lenght */
4623			if (prev == NULL) {
4624				/* First one */
4625				m = sctp_m_free(mm);
4626				mm = m;
4627			} else {
4628				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4629				mm = SCTP_BUF_NEXT(prev);
4630			}
4631			continue;
4632		}
4633		prev = mm;
4634		len += SCTP_BUF_LEN(mm);
4635		if (sb) {
4636			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4637				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4638			}
4639			sctp_sballoc(stcb, sb, mm);
4640			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4641				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4642			}
4643		}
4644		mm = SCTP_BUF_NEXT(mm);
4645	}
4646	if (prev) {
4647		tail = prev;
4648	} else {
4649		/* Really there should always be a prev */
4650		if (m == NULL) {
4651			/* Huh nothing left? */
4652#ifdef INVARIANTS
4653			panic("Nothing left to add?");
4654#else
4655			goto get_out;
4656#endif
4657		}
4658		tail = m;
4659	}
4660	if (control->tail_mbuf) {
4661		/* append */
4662		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4663		control->tail_mbuf = tail;
4664	} else {
4665		/* nothing there */
4666#ifdef INVARIANTS
4667		if (control->data != NULL) {
4668			panic("This should NOT happen");
4669		}
4670#endif
4671		control->data = m;
4672		control->tail_mbuf = tail;
4673	}
4674	atomic_add_int(&control->length, len);
4675	if (end) {
4676		/* message is complete */
4677		if (stcb && (control == stcb->asoc.control_pdapi)) {
4678			stcb->asoc.control_pdapi = NULL;
4679		}
4680		control->held_length = 0;
4681		control->end_added = 1;
4682	}
4683	if (stcb == NULL) {
4684		control->do_not_ref_stcb = 1;
4685	}
4686	/*
4687	 * When we are appending in partial delivery, the cum-ack is used
4688	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4689	 * is populated in the outbound sinfo structure from the true cumack
4690	 * if the association exists...
4691	 */
4692	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4693	if (inp) {
4694		SCTP_INP_READ_UNLOCK(inp);
4695	}
4696	if (inp && inp->sctp_socket) {
4697		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4698			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4699		} else {
4700#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4701			struct socket *so;
4702
4703			so = SCTP_INP_SO(inp);
4704			if (stcb) {
4705				atomic_add_int(&stcb->asoc.refcnt, 1);
4706				SCTP_TCB_UNLOCK(stcb);
4707			}
4708			SCTP_SOCKET_LOCK(so, 1);
4709			if (stcb) {
4710				SCTP_TCB_LOCK(stcb);
4711				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4712			}
4713			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4714				SCTP_SOCKET_UNLOCK(so, 1);
4715				return (0);
4716			}
4717#endif
4718			sctp_sorwakeup(inp, inp->sctp_socket);
4719#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4720			SCTP_SOCKET_UNLOCK(so, 1);
4721#endif
4722		}
4723	}
4724	return (0);
4725}
4726
4727
4728
4729/*************HOLD THIS COMMENT FOR PATCH FILE OF
4730 *************ALTERNATE ROUTING CODE
4731 */
4732
4733/*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4734 *************ALTERNATE ROUTING CODE
4735 */
4736
4737struct mbuf *
4738sctp_generate_cause(uint16_t code, char *info)
4739{
4740	struct mbuf *m;
4741	struct sctp_gen_error_cause *cause;
4742	size_t info_len, len;
4743
4744	if ((code == 0) || (info == NULL)) {
4745		return (NULL);
4746	}
4747	info_len = strlen(info);
4748	len = sizeof(struct sctp_paramhdr) + info_len;
4749	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4750	if (m != NULL) {
4751		SCTP_BUF_LEN(m) = len;
4752		cause = mtod(m, struct sctp_gen_error_cause *);
4753		cause->code = htons(code);
4754		cause->length = htons((uint16_t) len);
4755		memcpy(cause->info, info, info_len);
4756	}
4757	return (m);
4758}
4759
4760struct mbuf *
4761sctp_generate_no_user_data_cause(uint32_t tsn)
4762{
4763	struct mbuf *m;
4764	struct sctp_error_no_user_data *no_user_data_cause;
4765	size_t len;
4766
4767	len = sizeof(struct sctp_error_no_user_data);
4768	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4769	if (m != NULL) {
4770		SCTP_BUF_LEN(m) = len;
4771		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
4772		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
4773		no_user_data_cause->cause.length = htons((uint16_t) len);
4774		no_user_data_cause->tsn = tsn;	/* tsn is passed in as NBO */
4775	}
4776	return (m);
4777}
4778
4779#ifdef SCTP_MBCNT_LOGGING
4780void
4781sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4782    struct sctp_tmit_chunk *tp1, int chk_cnt)
4783{
4784	if (tp1->data == NULL) {
4785		return;
4786	}
4787	asoc->chunks_on_out_queue -= chk_cnt;
4788	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4789		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4790		    asoc->total_output_queue_size,
4791		    tp1->book_size,
4792		    0,
4793		    tp1->mbcnt);
4794	}
4795	if (asoc->total_output_queue_size >= tp1->book_size) {
4796		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4797	} else {
4798		asoc->total_output_queue_size = 0;
4799	}
4800
4801	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4802	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4803		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4804			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4805		} else {
4806			stcb->sctp_socket->so_snd.sb_cc = 0;
4807
4808		}
4809	}
4810}
4811
4812#endif
4813
4814int
4815sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4816    uint8_t sent, int so_locked
4817#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4818    SCTP_UNUSED
4819#endif
4820)
4821{
4822	struct sctp_stream_out *strq;
4823	struct sctp_tmit_chunk *chk = NULL, *tp2;
4824	struct sctp_stream_queue_pending *sp;
4825	uint16_t stream = 0, seq = 0;
4826	uint8_t foundeom = 0;
4827	int ret_sz = 0;
4828	int notdone;
4829	int do_wakeup_routine = 0;
4830
4831	stream = tp1->rec.data.stream_number;
4832	seq = tp1->rec.data.stream_seq;
4833	if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4834		stcb->asoc.abandoned_sent[0]++;
4835		stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4836		stcb->asoc.strmout[stream].abandoned_sent[0]++;
4837#if defined(SCTP_DETAILED_STR_STATS)
4838		stcb->asoc.strmout[stream].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4839#endif
4840	} else {
4841		stcb->asoc.abandoned_unsent[0]++;
4842		stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4843		stcb->asoc.strmout[stream].abandoned_unsent[0]++;
4844#if defined(SCTP_DETAILED_STR_STATS)
4845		stcb->asoc.strmout[stream].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4846#endif
4847	}
4848	do {
4849		ret_sz += tp1->book_size;
4850		if (tp1->data != NULL) {
4851			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4852				sctp_flight_size_decrease(tp1);
4853				sctp_total_flight_decrease(stcb, tp1);
4854			}
4855			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4856			stcb->asoc.peers_rwnd += tp1->send_size;
4857			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4858			if (sent) {
4859				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4860			} else {
4861				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4862			}
4863			if (tp1->data) {
4864				sctp_m_freem(tp1->data);
4865				tp1->data = NULL;
4866			}
4867			do_wakeup_routine = 1;
4868			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4869				stcb->asoc.sent_queue_cnt_removeable--;
4870			}
4871		}
4872		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4873		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4874		    SCTP_DATA_NOT_FRAG) {
4875			/* not frag'ed we ae done   */
4876			notdone = 0;
4877			foundeom = 1;
4878		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4879			/* end of frag, we are done */
4880			notdone = 0;
4881			foundeom = 1;
4882		} else {
4883			/*
4884			 * Its a begin or middle piece, we must mark all of
4885			 * it
4886			 */
4887			notdone = 1;
4888			tp1 = TAILQ_NEXT(tp1, sctp_next);
4889		}
4890	} while (tp1 && notdone);
4891	if (foundeom == 0) {
4892		/*
4893		 * The multi-part message was scattered across the send and
4894		 * sent queue.
4895		 */
4896		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4897			if ((tp1->rec.data.stream_number != stream) ||
4898			    (tp1->rec.data.stream_seq != seq)) {
4899				break;
4900			}
4901			/*
4902			 * save to chk in case we have some on stream out
4903			 * queue. If so and we have an un-transmitted one we
4904			 * don't have to fudge the TSN.
4905			 */
4906			chk = tp1;
4907			ret_sz += tp1->book_size;
4908			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4909			if (sent) {
4910				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4911			} else {
4912				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4913			}
4914			if (tp1->data) {
4915				sctp_m_freem(tp1->data);
4916				tp1->data = NULL;
4917			}
4918			/* No flight involved here book the size to 0 */
4919			tp1->book_size = 0;
4920			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4921				foundeom = 1;
4922			}
4923			do_wakeup_routine = 1;
4924			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4925			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4926			/*
4927			 * on to the sent queue so we can wait for it to be
4928			 * passed by.
4929			 */
4930			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4931			    sctp_next);
4932			stcb->asoc.send_queue_cnt--;
4933			stcb->asoc.sent_queue_cnt++;
4934		}
4935	}
4936	if (foundeom == 0) {
4937		/*
4938		 * Still no eom found. That means there is stuff left on the
4939		 * stream out queue.. yuck.
4940		 */
4941		SCTP_TCB_SEND_LOCK(stcb);
4942		strq = &stcb->asoc.strmout[stream];
4943		sp = TAILQ_FIRST(&strq->outqueue);
4944		if (sp != NULL) {
4945			sp->discard_rest = 1;
4946			/*
4947			 * We may need to put a chunk on the queue that
4948			 * holds the TSN that would have been sent with the
4949			 * LAST bit.
4950			 */
4951			if (chk == NULL) {
4952				/* Yep, we have to */
4953				sctp_alloc_a_chunk(stcb, chk);
4954				if (chk == NULL) {
4955					/*
4956					 * we are hosed. All we can do is
4957					 * nothing.. which will cause an
4958					 * abort if the peer is paying
4959					 * attention.
4960					 */
4961					goto oh_well;
4962				}
4963				memset(chk, 0, sizeof(*chk));
4964				chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4965				chk->sent = SCTP_FORWARD_TSN_SKIP;
4966				chk->asoc = &stcb->asoc;
4967				chk->rec.data.stream_seq = strq->next_sequence_send;
4968				chk->rec.data.stream_number = sp->stream;
4969				chk->rec.data.payloadtype = sp->ppid;
4970				chk->rec.data.context = sp->context;
4971				chk->flags = sp->act_flags;
4972				chk->whoTo = NULL;
4973				chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4974				strq->chunks_on_queues++;
4975				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4976				stcb->asoc.sent_queue_cnt++;
4977				stcb->asoc.pr_sctp_cnt++;
4978			} else {
4979				chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4980			}
4981			strq->next_sequence_send++;
4982	oh_well:
4983			if (sp->data) {
4984				/*
4985				 * Pull any data to free up the SB and allow
4986				 * sender to "add more" while we will throw
4987				 * away :-)
4988				 */
4989				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
4990				ret_sz += sp->length;
4991				do_wakeup_routine = 1;
4992				sp->some_taken = 1;
4993				sctp_m_freem(sp->data);
4994				sp->data = NULL;
4995				sp->tail_mbuf = NULL;
4996				sp->length = 0;
4997			}
4998		}
4999		SCTP_TCB_SEND_UNLOCK(stcb);
5000	}
5001	if (do_wakeup_routine) {
5002#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5003		struct socket *so;
5004
5005		so = SCTP_INP_SO(stcb->sctp_ep);
5006		if (!so_locked) {
5007			atomic_add_int(&stcb->asoc.refcnt, 1);
5008			SCTP_TCB_UNLOCK(stcb);
5009			SCTP_SOCKET_LOCK(so, 1);
5010			SCTP_TCB_LOCK(stcb);
5011			atomic_subtract_int(&stcb->asoc.refcnt, 1);
5012			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
5013				/* assoc was freed while we were unlocked */
5014				SCTP_SOCKET_UNLOCK(so, 1);
5015				return (ret_sz);
5016			}
5017		}
5018#endif
5019		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
5020#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5021		if (!so_locked) {
5022			SCTP_SOCKET_UNLOCK(so, 1);
5023		}
5024#endif
5025	}
5026	return (ret_sz);
5027}
5028
5029/*
5030 * checks to see if the given address, sa, is one that is currently known by
5031 * the kernel note: can't distinguish the same address on multiple interfaces
5032 * and doesn't handle multiple addresses with different zone/scope id's note:
5033 * ifa_ifwithaddr() compares the entire sockaddr struct
5034 */
5035struct sctp_ifa *
5036sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
5037    int holds_lock)
5038{
5039	struct sctp_laddr *laddr;
5040
5041	if (holds_lock == 0) {
5042		SCTP_INP_RLOCK(inp);
5043	}
5044	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
5045		if (laddr->ifa == NULL)
5046			continue;
5047		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
5048			continue;
5049#ifdef INET
5050		if (addr->sa_family == AF_INET) {
5051			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5052			    laddr->ifa->address.sin.sin_addr.s_addr) {
5053				/* found him. */
5054				if (holds_lock == 0) {
5055					SCTP_INP_RUNLOCK(inp);
5056				}
5057				return (laddr->ifa);
5058				break;
5059			}
5060		}
5061#endif
5062#ifdef INET6
5063		if (addr->sa_family == AF_INET6) {
5064			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5065			    &laddr->ifa->address.sin6)) {
5066				/* found him. */
5067				if (holds_lock == 0) {
5068					SCTP_INP_RUNLOCK(inp);
5069				}
5070				return (laddr->ifa);
5071				break;
5072			}
5073		}
5074#endif
5075	}
5076	if (holds_lock == 0) {
5077		SCTP_INP_RUNLOCK(inp);
5078	}
5079	return (NULL);
5080}
5081
5082uint32_t
5083sctp_get_ifa_hash_val(struct sockaddr *addr)
5084{
5085	switch (addr->sa_family) {
5086#ifdef INET
5087	case AF_INET:
5088		{
5089			struct sockaddr_in *sin;
5090
5091			sin = (struct sockaddr_in *)addr;
5092			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
5093		}
5094#endif
5095#ifdef INET6
5096	case AF_INET6:
5097		{
5098			struct sockaddr_in6 *sin6;
5099			uint32_t hash_of_addr;
5100
5101			sin6 = (struct sockaddr_in6 *)addr;
5102			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
5103			    sin6->sin6_addr.s6_addr32[1] +
5104			    sin6->sin6_addr.s6_addr32[2] +
5105			    sin6->sin6_addr.s6_addr32[3]);
5106			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
5107			return (hash_of_addr);
5108		}
5109#endif
5110	default:
5111		break;
5112	}
5113	return (0);
5114}
5115
5116struct sctp_ifa *
5117sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5118{
5119	struct sctp_ifa *sctp_ifap;
5120	struct sctp_vrf *vrf;
5121	struct sctp_ifalist *hash_head;
5122	uint32_t hash_of_addr;
5123
5124	if (holds_lock == 0)
5125		SCTP_IPI_ADDR_RLOCK();
5126
5127	vrf = sctp_find_vrf(vrf_id);
5128	if (vrf == NULL) {
5129		if (holds_lock == 0)
5130			SCTP_IPI_ADDR_RUNLOCK();
5131		return (NULL);
5132	}
5133	hash_of_addr = sctp_get_ifa_hash_val(addr);
5134
5135	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5136	if (hash_head == NULL) {
5137		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5138		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
5139		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
5140		sctp_print_address(addr);
5141		SCTP_PRINTF("No such bucket for address\n");
5142		if (holds_lock == 0)
5143			SCTP_IPI_ADDR_RUNLOCK();
5144
5145		return (NULL);
5146	}
5147	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5148		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5149			continue;
5150#ifdef INET
5151		if (addr->sa_family == AF_INET) {
5152			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5153			    sctp_ifap->address.sin.sin_addr.s_addr) {
5154				/* found him. */
5155				if (holds_lock == 0)
5156					SCTP_IPI_ADDR_RUNLOCK();
5157				return (sctp_ifap);
5158				break;
5159			}
5160		}
5161#endif
5162#ifdef INET6
5163		if (addr->sa_family == AF_INET6) {
5164			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5165			    &sctp_ifap->address.sin6)) {
5166				/* found him. */
5167				if (holds_lock == 0)
5168					SCTP_IPI_ADDR_RUNLOCK();
5169				return (sctp_ifap);
5170				break;
5171			}
5172		}
5173#endif
5174	}
5175	if (holds_lock == 0)
5176		SCTP_IPI_ADDR_RUNLOCK();
5177	return (NULL);
5178}
5179
5180static void
5181sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
5182    uint32_t rwnd_req)
5183{
5184	/* User pulled some data, do we need a rwnd update? */
5185	int r_unlocked = 0;
5186	uint32_t dif, rwnd;
5187	struct socket *so = NULL;
5188
5189	if (stcb == NULL)
5190		return;
5191
5192	atomic_add_int(&stcb->asoc.refcnt, 1);
5193
5194	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5195	    SCTP_STATE_SHUTDOWN_RECEIVED |
5196	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5197		/* Pre-check If we are freeing no update */
5198		goto no_lock;
5199	}
5200	SCTP_INP_INCR_REF(stcb->sctp_ep);
5201	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5202	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5203		goto out;
5204	}
5205	so = stcb->sctp_socket;
5206	if (so == NULL) {
5207		goto out;
5208	}
5209	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5210	/* Have you have freed enough to look */
5211	*freed_so_far = 0;
5212	/* Yep, its worth a look and the lock overhead */
5213
5214	/* Figure out what the rwnd would be */
5215	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5216	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5217		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5218	} else {
5219		dif = 0;
5220	}
5221	if (dif >= rwnd_req) {
5222		if (hold_rlock) {
5223			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5224			r_unlocked = 1;
5225		}
5226		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5227			/*
5228			 * One last check before we allow the guy possibly
5229			 * to get in. There is a race, where the guy has not
5230			 * reached the gate. In that case
5231			 */
5232			goto out;
5233		}
5234		SCTP_TCB_LOCK(stcb);
5235		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5236			/* No reports here */
5237			SCTP_TCB_UNLOCK(stcb);
5238			goto out;
5239		}
5240		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5241		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5242
5243		sctp_chunk_output(stcb->sctp_ep, stcb,
5244		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5245		/* make sure no timer is running */
5246		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
5247		    SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5248		SCTP_TCB_UNLOCK(stcb);
5249	} else {
5250		/* Update how much we have pending */
5251		stcb->freed_by_sorcv_sincelast = dif;
5252	}
5253out:
5254	if (so && r_unlocked && hold_rlock) {
5255		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5256	}
5257	SCTP_INP_DECR_REF(stcb->sctp_ep);
5258no_lock:
5259	atomic_add_int(&stcb->asoc.refcnt, -1);
5260	return;
5261}
5262
5263int
5264sctp_sorecvmsg(struct socket *so,
5265    struct uio *uio,
5266    struct mbuf **mp,
5267    struct sockaddr *from,
5268    int fromlen,
5269    int *msg_flags,
5270    struct sctp_sndrcvinfo *sinfo,
5271    int filling_sinfo)
5272{
5273	/*
5274	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5275	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5276	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5277	 * On the way out we may send out any combination of:
5278	 * MSG_NOTIFICATION MSG_EOR
5279	 *
5280	 */
5281	struct sctp_inpcb *inp = NULL;
5282	int my_len = 0;
5283	int cp_len = 0, error = 0;
5284	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5285	struct mbuf *m = NULL;
5286	struct sctp_tcb *stcb = NULL;
5287	int wakeup_read_socket = 0;
5288	int freecnt_applied = 0;
5289	int out_flags = 0, in_flags = 0;
5290	int block_allowed = 1;
5291	uint32_t freed_so_far = 0;
5292	uint32_t copied_so_far = 0;
5293	int in_eeor_mode = 0;
5294	int no_rcv_needed = 0;
5295	uint32_t rwnd_req = 0;
5296	int hold_sblock = 0;
5297	int hold_rlock = 0;
5298	int slen = 0;
5299	uint32_t held_length = 0;
5300	int sockbuf_lock = 0;
5301
5302	if (uio == NULL) {
5303		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5304		return (EINVAL);
5305	}
5306	if (msg_flags) {
5307		in_flags = *msg_flags;
5308		if (in_flags & MSG_PEEK)
5309			SCTP_STAT_INCR(sctps_read_peeks);
5310	} else {
5311		in_flags = 0;
5312	}
5313	slen = uio->uio_resid;
5314
5315	/* Pull in and set up our int flags */
5316	if (in_flags & MSG_OOB) {
5317		/* Out of band's NOT supported */
5318		return (EOPNOTSUPP);
5319	}
5320	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5321		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5322		return (EINVAL);
5323	}
5324	if ((in_flags & (MSG_DONTWAIT
5325	    | MSG_NBIO
5326	    )) ||
5327	    SCTP_SO_IS_NBIO(so)) {
5328		block_allowed = 0;
5329	}
5330	/* setup the endpoint */
5331	inp = (struct sctp_inpcb *)so->so_pcb;
5332	if (inp == NULL) {
5333		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5334		return (EFAULT);
5335	}
5336	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5337	/* Must be at least a MTU's worth */
5338	if (rwnd_req < SCTP_MIN_RWND)
5339		rwnd_req = SCTP_MIN_RWND;
5340	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5341	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5342		sctp_misc_ints(SCTP_SORECV_ENTER,
5343		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5344	}
5345	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5346		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5347		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5348	}
5349	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5350	if (error) {
5351		goto release_unlocked;
5352	}
5353	sockbuf_lock = 1;
5354restart:
5355
5356
5357restart_nosblocks:
5358	if (hold_sblock == 0) {
5359		SOCKBUF_LOCK(&so->so_rcv);
5360		hold_sblock = 1;
5361	}
5362	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5363	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5364		goto out;
5365	}
5366	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5367		if (so->so_error) {
5368			error = so->so_error;
5369			if ((in_flags & MSG_PEEK) == 0)
5370				so->so_error = 0;
5371			goto out;
5372		} else {
5373			if (so->so_rcv.sb_cc == 0) {
5374				/* indicate EOF */
5375				error = 0;
5376				goto out;
5377			}
5378		}
5379	}
5380	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5381		/* we need to wait for data */
5382		if ((so->so_rcv.sb_cc == 0) &&
5383		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5384		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5385			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5386				/*
5387				 * For active open side clear flags for
5388				 * re-use passive open is blocked by
5389				 * connect.
5390				 */
5391				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5392					/*
5393					 * You were aborted, passive side
5394					 * always hits here
5395					 */
5396					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5397					error = ECONNRESET;
5398				}
5399				so->so_state &= ~(SS_ISCONNECTING |
5400				    SS_ISDISCONNECTING |
5401				    SS_ISCONFIRMING |
5402				    SS_ISCONNECTED);
5403				if (error == 0) {
5404					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5405						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5406						error = ENOTCONN;
5407					}
5408				}
5409				goto out;
5410			}
5411		}
5412		error = sbwait(&so->so_rcv);
5413		if (error) {
5414			goto out;
5415		}
5416		held_length = 0;
5417		goto restart_nosblocks;
5418	} else if (so->so_rcv.sb_cc == 0) {
5419		if (so->so_error) {
5420			error = so->so_error;
5421			if ((in_flags & MSG_PEEK) == 0)
5422				so->so_error = 0;
5423		} else {
5424			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5425			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5426				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5427					/*
5428					 * For active open side clear flags
5429					 * for re-use passive open is
5430					 * blocked by connect.
5431					 */
5432					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5433						/*
5434						 * You were aborted, passive
5435						 * side always hits here
5436						 */
5437						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5438						error = ECONNRESET;
5439					}
5440					so->so_state &= ~(SS_ISCONNECTING |
5441					    SS_ISDISCONNECTING |
5442					    SS_ISCONFIRMING |
5443					    SS_ISCONNECTED);
5444					if (error == 0) {
5445						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5446							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5447							error = ENOTCONN;
5448						}
5449					}
5450					goto out;
5451				}
5452			}
5453			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5454			error = EWOULDBLOCK;
5455		}
5456		goto out;
5457	}
5458	if (hold_sblock == 1) {
5459		SOCKBUF_UNLOCK(&so->so_rcv);
5460		hold_sblock = 0;
5461	}
5462	/* we possibly have data we can read */
5463	/* sa_ignore FREED_MEMORY */
5464	control = TAILQ_FIRST(&inp->read_queue);
5465	if (control == NULL) {
5466		/*
5467		 * This could be happening since the appender did the
5468		 * increment but as not yet did the tailq insert onto the
5469		 * read_queue
5470		 */
5471		if (hold_rlock == 0) {
5472			SCTP_INP_READ_LOCK(inp);
5473		}
5474		control = TAILQ_FIRST(&inp->read_queue);
5475		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5476#ifdef INVARIANTS
5477			panic("Huh, its non zero and nothing on control?");
5478#endif
5479			so->so_rcv.sb_cc = 0;
5480		}
5481		SCTP_INP_READ_UNLOCK(inp);
5482		hold_rlock = 0;
5483		goto restart;
5484	}
5485	if ((control->length == 0) &&
5486	    (control->do_not_ref_stcb)) {
5487		/*
5488		 * Clean up code for freeing assoc that left behind a
5489		 * pdapi.. maybe a peer in EEOR that just closed after
5490		 * sending and never indicated a EOR.
5491		 */
5492		if (hold_rlock == 0) {
5493			hold_rlock = 1;
5494			SCTP_INP_READ_LOCK(inp);
5495		}
5496		control->held_length = 0;
5497		if (control->data) {
5498			/* Hmm there is data here .. fix */
5499			struct mbuf *m_tmp;
5500			int cnt = 0;
5501
5502			m_tmp = control->data;
5503			while (m_tmp) {
5504				cnt += SCTP_BUF_LEN(m_tmp);
5505				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5506					control->tail_mbuf = m_tmp;
5507					control->end_added = 1;
5508				}
5509				m_tmp = SCTP_BUF_NEXT(m_tmp);
5510			}
5511			control->length = cnt;
5512		} else {
5513			/* remove it */
5514			TAILQ_REMOVE(&inp->read_queue, control, next);
5515			/* Add back any hiddend data */
5516			sctp_free_remote_addr(control->whoFrom);
5517			sctp_free_a_readq(stcb, control);
5518		}
5519		if (hold_rlock) {
5520			hold_rlock = 0;
5521			SCTP_INP_READ_UNLOCK(inp);
5522		}
5523		goto restart;
5524	}
5525	if ((control->length == 0) &&
5526	    (control->end_added == 1)) {
5527		/*
5528		 * Do we also need to check for (control->pdapi_aborted ==
5529		 * 1)?
5530		 */
5531		if (hold_rlock == 0) {
5532			hold_rlock = 1;
5533			SCTP_INP_READ_LOCK(inp);
5534		}
5535		TAILQ_REMOVE(&inp->read_queue, control, next);
5536		if (control->data) {
5537#ifdef INVARIANTS
5538			panic("control->data not null but control->length == 0");
5539#else
5540			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5541			sctp_m_freem(control->data);
5542			control->data = NULL;
5543#endif
5544		}
5545		if (control->aux_data) {
5546			sctp_m_free(control->aux_data);
5547			control->aux_data = NULL;
5548		}
5549		sctp_free_remote_addr(control->whoFrom);
5550		sctp_free_a_readq(stcb, control);
5551		if (hold_rlock) {
5552			hold_rlock = 0;
5553			SCTP_INP_READ_UNLOCK(inp);
5554		}
5555		goto restart;
5556	}
5557	if (control->length == 0) {
5558		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5559		    (filling_sinfo)) {
5560			/* find a more suitable one then this */
5561			ctl = TAILQ_NEXT(control, next);
5562			while (ctl) {
5563				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5564				    (ctl->some_taken ||
5565				    (ctl->spec_flags & M_NOTIFICATION) ||
5566				    ((ctl->do_not_ref_stcb == 0) &&
5567				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5568				    ) {
5569					/*-
5570					 * If we have a different TCB next, and there is data
5571					 * present. If we have already taken some (pdapi), OR we can
5572					 * ref the tcb and no delivery as started on this stream, we
5573					 * take it. Note we allow a notification on a different
5574					 * assoc to be delivered..
5575					 */
5576					control = ctl;
5577					goto found_one;
5578				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5579					    (ctl->length) &&
5580					    ((ctl->some_taken) ||
5581					    ((ctl->do_not_ref_stcb == 0) &&
5582					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5583				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5584					/*-
5585					 * If we have the same tcb, and there is data present, and we
5586					 * have the strm interleave feature present. Then if we have
5587					 * taken some (pdapi) or we can refer to tht tcb AND we have
5588					 * not started a delivery for this stream, we can take it.
5589					 * Note we do NOT allow a notificaiton on the same assoc to
5590					 * be delivered.
5591					 */
5592					control = ctl;
5593					goto found_one;
5594				}
5595				ctl = TAILQ_NEXT(ctl, next);
5596			}
5597		}
5598		/*
5599		 * if we reach here, not suitable replacement is available
5600		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5601		 * into the our held count, and its time to sleep again.
5602		 */
5603		held_length = so->so_rcv.sb_cc;
5604		control->held_length = so->so_rcv.sb_cc;
5605		goto restart;
5606	}
5607	/* Clear the held length since there is something to read */
5608	control->held_length = 0;
5609	if (hold_rlock) {
5610		SCTP_INP_READ_UNLOCK(inp);
5611		hold_rlock = 0;
5612	}
5613found_one:
5614	/*
5615	 * If we reach here, control has a some data for us to read off.
5616	 * Note that stcb COULD be NULL.
5617	 */
5618	control->some_taken++;
5619	if (hold_sblock) {
5620		SOCKBUF_UNLOCK(&so->so_rcv);
5621		hold_sblock = 0;
5622	}
5623	stcb = control->stcb;
5624	if (stcb) {
5625		if ((control->do_not_ref_stcb == 0) &&
5626		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5627			if (freecnt_applied == 0)
5628				stcb = NULL;
5629		} else if (control->do_not_ref_stcb == 0) {
5630			/* you can't free it on me please */
5631			/*
5632			 * The lock on the socket buffer protects us so the
5633			 * free code will stop. But since we used the
5634			 * socketbuf lock and the sender uses the tcb_lock
5635			 * to increment, we need to use the atomic add to
5636			 * the refcnt
5637			 */
5638			if (freecnt_applied) {
5639#ifdef INVARIANTS
5640				panic("refcnt already incremented");
5641#else
5642				SCTP_PRINTF("refcnt already incremented?\n");
5643#endif
5644			} else {
5645				atomic_add_int(&stcb->asoc.refcnt, 1);
5646				freecnt_applied = 1;
5647			}
5648			/*
5649			 * Setup to remember how much we have not yet told
5650			 * the peer our rwnd has opened up. Note we grab the
5651			 * value from the tcb from last time. Note too that
5652			 * sack sending clears this when a sack is sent,
5653			 * which is fine. Once we hit the rwnd_req, we then
5654			 * will go to the sctp_user_rcvd() that will not
5655			 * lock until it KNOWs it MUST send a WUP-SACK.
5656			 */
5657			freed_so_far = stcb->freed_by_sorcv_sincelast;
5658			stcb->freed_by_sorcv_sincelast = 0;
5659		}
5660	}
5661	if (stcb &&
5662	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5663	    control->do_not_ref_stcb == 0) {
5664		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5665	}
5666	/* First lets get off the sinfo and sockaddr info */
5667	if ((sinfo) && filling_sinfo) {
5668		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5669		nxt = TAILQ_NEXT(control, next);
5670		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5671		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5672			struct sctp_extrcvinfo *s_extra;
5673
5674			s_extra = (struct sctp_extrcvinfo *)sinfo;
5675			if ((nxt) &&
5676			    (nxt->length)) {
5677				s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5678				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5679					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5680				}
5681				if (nxt->spec_flags & M_NOTIFICATION) {
5682					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5683				}
5684				s_extra->serinfo_next_aid = nxt->sinfo_assoc_id;
5685				s_extra->serinfo_next_length = nxt->length;
5686				s_extra->serinfo_next_ppid = nxt->sinfo_ppid;
5687				s_extra->serinfo_next_stream = nxt->sinfo_stream;
5688				if (nxt->tail_mbuf != NULL) {
5689					if (nxt->end_added) {
5690						s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5691					}
5692				}
5693			} else {
5694				/*
5695				 * we explicitly 0 this, since the memcpy
5696				 * got some other things beyond the older
5697				 * sinfo_ that is on the control's structure
5698				 * :-D
5699				 */
5700				nxt = NULL;
5701				s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
5702				s_extra->serinfo_next_aid = 0;
5703				s_extra->serinfo_next_length = 0;
5704				s_extra->serinfo_next_ppid = 0;
5705				s_extra->serinfo_next_stream = 0;
5706			}
5707		}
5708		/*
5709		 * update off the real current cum-ack, if we have an stcb.
5710		 */
5711		if ((control->do_not_ref_stcb == 0) && stcb)
5712			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5713		/*
5714		 * mask off the high bits, we keep the actual chunk bits in
5715		 * there.
5716		 */
5717		sinfo->sinfo_flags &= 0x00ff;
5718		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5719			sinfo->sinfo_flags |= SCTP_UNORDERED;
5720		}
5721	}
5722#ifdef SCTP_ASOCLOG_OF_TSNS
5723	{
5724		int index, newindex;
5725		struct sctp_pcbtsn_rlog *entry;
5726
5727		do {
5728			index = inp->readlog_index;
5729			newindex = index + 1;
5730			if (newindex >= SCTP_READ_LOG_SIZE) {
5731				newindex = 0;
5732			}
5733		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5734		entry = &inp->readlog[index];
5735		entry->vtag = control->sinfo_assoc_id;
5736		entry->strm = control->sinfo_stream;
5737		entry->seq = control->sinfo_ssn;
5738		entry->sz = control->length;
5739		entry->flgs = control->sinfo_flags;
5740	}
5741#endif
5742	if ((fromlen > 0) && (from != NULL)) {
5743		union sctp_sockstore store;
5744		size_t len;
5745
5746		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5747#ifdef INET6
5748		case AF_INET6:
5749			len = sizeof(struct sockaddr_in6);
5750			store.sin6 = control->whoFrom->ro._l_addr.sin6;
5751			store.sin6.sin6_port = control->port_from;
5752			break;
5753#endif
5754#ifdef INET
5755		case AF_INET:
5756#ifdef INET6
5757			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
5758				len = sizeof(struct sockaddr_in6);
5759				in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
5760				    &store.sin6);
5761				store.sin6.sin6_port = control->port_from;
5762			} else {
5763				len = sizeof(struct sockaddr_in);
5764				store.sin = control->whoFrom->ro._l_addr.sin;
5765				store.sin.sin_port = control->port_from;
5766			}
5767#else
5768			len = sizeof(struct sockaddr_in);
5769			store.sin = control->whoFrom->ro._l_addr.sin;
5770			store.sin.sin_port = control->port_from;
5771#endif
5772			break;
5773#endif
5774		default:
5775			len = 0;
5776			break;
5777		}
5778		memcpy(from, &store, min((size_t)fromlen, len));
5779#ifdef INET6
5780		{
5781			struct sockaddr_in6 lsa6, *from6;
5782
5783			from6 = (struct sockaddr_in6 *)from;
5784			sctp_recover_scope_mac(from6, (&lsa6));
5785		}
5786#endif
5787	}
5788	/* now copy out what data we can */
5789	if (mp == NULL) {
5790		/* copy out each mbuf in the chain up to length */
5791get_more_data:
5792		m = control->data;
5793		while (m) {
5794			/* Move out all we can */
5795			cp_len = (int)uio->uio_resid;
5796			my_len = (int)SCTP_BUF_LEN(m);
5797			if (cp_len > my_len) {
5798				/* not enough in this buf */
5799				cp_len = my_len;
5800			}
5801			if (hold_rlock) {
5802				SCTP_INP_READ_UNLOCK(inp);
5803				hold_rlock = 0;
5804			}
5805			if (cp_len > 0)
5806				error = uiomove(mtod(m, char *), cp_len, uio);
5807			/* re-read */
5808			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5809				goto release;
5810			}
5811			if ((control->do_not_ref_stcb == 0) && stcb &&
5812			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5813				no_rcv_needed = 1;
5814			}
5815			if (error) {
5816				/* error we are out of here */
5817				goto release;
5818			}
5819			if ((SCTP_BUF_NEXT(m) == NULL) &&
5820			    (cp_len >= SCTP_BUF_LEN(m)) &&
5821			    ((control->end_added == 0) ||
5822			    (control->end_added &&
5823			    (TAILQ_NEXT(control, next) == NULL)))
5824			    ) {
5825				SCTP_INP_READ_LOCK(inp);
5826				hold_rlock = 1;
5827			}
5828			if (cp_len == SCTP_BUF_LEN(m)) {
5829				if ((SCTP_BUF_NEXT(m) == NULL) &&
5830				    (control->end_added)) {
5831					out_flags |= MSG_EOR;
5832					if ((control->do_not_ref_stcb == 0) &&
5833					    (control->stcb != NULL) &&
5834					    ((control->spec_flags & M_NOTIFICATION) == 0))
5835						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5836				}
5837				if (control->spec_flags & M_NOTIFICATION) {
5838					out_flags |= MSG_NOTIFICATION;
5839				}
5840				/* we ate up the mbuf */
5841				if (in_flags & MSG_PEEK) {
5842					/* just looking */
5843					m = SCTP_BUF_NEXT(m);
5844					copied_so_far += cp_len;
5845				} else {
5846					/* dispose of the mbuf */
5847					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5848						sctp_sblog(&so->so_rcv,
5849						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5850					}
5851					sctp_sbfree(control, stcb, &so->so_rcv, m);
5852					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5853						sctp_sblog(&so->so_rcv,
5854						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5855					}
5856					copied_so_far += cp_len;
5857					freed_so_far += cp_len;
5858					freed_so_far += MSIZE;
5859					atomic_subtract_int(&control->length, cp_len);
5860					control->data = sctp_m_free(m);
5861					m = control->data;
5862					/*
5863					 * been through it all, must hold sb
5864					 * lock ok to null tail
5865					 */
5866					if (control->data == NULL) {
5867#ifdef INVARIANTS
5868						if ((control->end_added == 0) ||
5869						    (TAILQ_NEXT(control, next) == NULL)) {
5870							/*
5871							 * If the end is not
5872							 * added, OR the
5873							 * next is NOT null
5874							 * we MUST have the
5875							 * lock.
5876							 */
5877							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5878								panic("Hmm we don't own the lock?");
5879							}
5880						}
5881#endif
5882						control->tail_mbuf = NULL;
5883#ifdef INVARIANTS
5884						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5885							panic("end_added, nothing left and no MSG_EOR");
5886						}
5887#endif
5888					}
5889				}
5890			} else {
5891				/* Do we need to trim the mbuf? */
5892				if (control->spec_flags & M_NOTIFICATION) {
5893					out_flags |= MSG_NOTIFICATION;
5894				}
5895				if ((in_flags & MSG_PEEK) == 0) {
5896					SCTP_BUF_RESV_UF(m, cp_len);
5897					SCTP_BUF_LEN(m) -= cp_len;
5898					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5899						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5900					}
5901					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5902					if ((control->do_not_ref_stcb == 0) &&
5903					    stcb) {
5904						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5905					}
5906					copied_so_far += cp_len;
5907					freed_so_far += cp_len;
5908					freed_so_far += MSIZE;
5909					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5910						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5911						    SCTP_LOG_SBRESULT, 0);
5912					}
5913					atomic_subtract_int(&control->length, cp_len);
5914				} else {
5915					copied_so_far += cp_len;
5916				}
5917			}
5918			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5919				break;
5920			}
5921			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5922			    (control->do_not_ref_stcb == 0) &&
5923			    (freed_so_far >= rwnd_req)) {
5924				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5925			}
5926		}		/* end while(m) */
5927		/*
5928		 * At this point we have looked at it all and we either have
5929		 * a MSG_EOR/or read all the user wants... <OR>
5930		 * control->length == 0.
5931		 */
5932		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5933			/* we are done with this control */
5934			if (control->length == 0) {
5935				if (control->data) {
5936#ifdef INVARIANTS
5937					panic("control->data not null at read eor?");
5938#else
5939					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5940					sctp_m_freem(control->data);
5941					control->data = NULL;
5942#endif
5943				}
5944		done_with_control:
5945				if (TAILQ_NEXT(control, next) == NULL) {
5946					/*
5947					 * If we don't have a next we need a
5948					 * lock, if there is a next
5949					 * interrupt is filling ahead of us
5950					 * and we don't need a lock to
5951					 * remove this guy (which is the
5952					 * head of the queue).
5953					 */
5954					if (hold_rlock == 0) {
5955						SCTP_INP_READ_LOCK(inp);
5956						hold_rlock = 1;
5957					}
5958				}
5959				TAILQ_REMOVE(&inp->read_queue, control, next);
5960				/* Add back any hiddend data */
5961				if (control->held_length) {
5962					held_length = 0;
5963					control->held_length = 0;
5964					wakeup_read_socket = 1;
5965				}
5966				if (control->aux_data) {
5967					sctp_m_free(control->aux_data);
5968					control->aux_data = NULL;
5969				}
5970				no_rcv_needed = control->do_not_ref_stcb;
5971				sctp_free_remote_addr(control->whoFrom);
5972				control->data = NULL;
5973				sctp_free_a_readq(stcb, control);
5974				control = NULL;
5975				if ((freed_so_far >= rwnd_req) &&
5976				    (no_rcv_needed == 0))
5977					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5978
5979			} else {
5980				/*
5981				 * The user did not read all of this
5982				 * message, turn off the returned MSG_EOR
5983				 * since we are leaving more behind on the
5984				 * control to read.
5985				 */
5986#ifdef INVARIANTS
5987				if (control->end_added &&
5988				    (control->data == NULL) &&
5989				    (control->tail_mbuf == NULL)) {
5990					panic("Gak, control->length is corrupt?");
5991				}
5992#endif
5993				no_rcv_needed = control->do_not_ref_stcb;
5994				out_flags &= ~MSG_EOR;
5995			}
5996		}
5997		if (out_flags & MSG_EOR) {
5998			goto release;
5999		}
6000		if ((uio->uio_resid == 0) ||
6001		    ((in_eeor_mode) &&
6002		    (copied_so_far >= (uint32_t) max(so->so_rcv.sb_lowat, 1)))) {
6003			goto release;
6004		}
6005		/*
6006		 * If I hit here the receiver wants more and this message is
6007		 * NOT done (pd-api). So two questions. Can we block? if not
6008		 * we are done. Did the user NOT set MSG_WAITALL?
6009		 */
6010		if (block_allowed == 0) {
6011			goto release;
6012		}
6013		/*
6014		 * We need to wait for more data a few things: - We don't
6015		 * sbunlock() so we don't get someone else reading. - We
6016		 * must be sure to account for the case where what is added
6017		 * is NOT to our control when we wakeup.
6018		 */
6019
6020		/*
6021		 * Do we need to tell the transport a rwnd update might be
6022		 * needed before we go to sleep?
6023		 */
6024		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
6025		    ((freed_so_far >= rwnd_req) &&
6026		    (control->do_not_ref_stcb == 0) &&
6027		    (no_rcv_needed == 0))) {
6028			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6029		}
6030wait_some_more:
6031		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
6032			goto release;
6033		}
6034		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
6035			goto release;
6036
6037		if (hold_rlock == 1) {
6038			SCTP_INP_READ_UNLOCK(inp);
6039			hold_rlock = 0;
6040		}
6041		if (hold_sblock == 0) {
6042			SOCKBUF_LOCK(&so->so_rcv);
6043			hold_sblock = 1;
6044		}
6045		if ((copied_so_far) && (control->length == 0) &&
6046		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
6047			goto release;
6048		}
6049		if (so->so_rcv.sb_cc <= control->held_length) {
6050			error = sbwait(&so->so_rcv);
6051			if (error) {
6052				goto release;
6053			}
6054			control->held_length = 0;
6055		}
6056		if (hold_sblock) {
6057			SOCKBUF_UNLOCK(&so->so_rcv);
6058			hold_sblock = 0;
6059		}
6060		if (control->length == 0) {
6061			/* still nothing here */
6062			if (control->end_added == 1) {
6063				/* he aborted, or is done i.e.did a shutdown */
6064				out_flags |= MSG_EOR;
6065				if (control->pdapi_aborted) {
6066					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6067						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6068
6069					out_flags |= MSG_TRUNC;
6070				} else {
6071					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6072						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6073				}
6074				goto done_with_control;
6075			}
6076			if (so->so_rcv.sb_cc > held_length) {
6077				control->held_length = so->so_rcv.sb_cc;
6078				held_length = 0;
6079			}
6080			goto wait_some_more;
6081		} else if (control->data == NULL) {
6082			/*
6083			 * we must re-sync since data is probably being
6084			 * added
6085			 */
6086			SCTP_INP_READ_LOCK(inp);
6087			if ((control->length > 0) && (control->data == NULL)) {
6088				/*
6089				 * big trouble.. we have the lock and its
6090				 * corrupt?
6091				 */
6092#ifdef INVARIANTS
6093				panic("Impossible data==NULL length !=0");
6094#endif
6095				out_flags |= MSG_EOR;
6096				out_flags |= MSG_TRUNC;
6097				control->length = 0;
6098				SCTP_INP_READ_UNLOCK(inp);
6099				goto done_with_control;
6100			}
6101			SCTP_INP_READ_UNLOCK(inp);
6102			/* We will fall around to get more data */
6103		}
6104		goto get_more_data;
6105	} else {
6106		/*-
6107		 * Give caller back the mbuf chain,
6108		 * store in uio_resid the length
6109		 */
6110		wakeup_read_socket = 0;
6111		if ((control->end_added == 0) ||
6112		    (TAILQ_NEXT(control, next) == NULL)) {
6113			/* Need to get rlock */
6114			if (hold_rlock == 0) {
6115				SCTP_INP_READ_LOCK(inp);
6116				hold_rlock = 1;
6117			}
6118		}
6119		if (control->end_added) {
6120			out_flags |= MSG_EOR;
6121			if ((control->do_not_ref_stcb == 0) &&
6122			    (control->stcb != NULL) &&
6123			    ((control->spec_flags & M_NOTIFICATION) == 0))
6124				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6125		}
6126		if (control->spec_flags & M_NOTIFICATION) {
6127			out_flags |= MSG_NOTIFICATION;
6128		}
6129		uio->uio_resid = control->length;
6130		*mp = control->data;
6131		m = control->data;
6132		while (m) {
6133			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6134				sctp_sblog(&so->so_rcv,
6135				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6136			}
6137			sctp_sbfree(control, stcb, &so->so_rcv, m);
6138			freed_so_far += SCTP_BUF_LEN(m);
6139			freed_so_far += MSIZE;
6140			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6141				sctp_sblog(&so->so_rcv,
6142				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6143			}
6144			m = SCTP_BUF_NEXT(m);
6145		}
6146		control->data = control->tail_mbuf = NULL;
6147		control->length = 0;
6148		if (out_flags & MSG_EOR) {
6149			/* Done with this control */
6150			goto done_with_control;
6151		}
6152	}
6153release:
6154	if (hold_rlock == 1) {
6155		SCTP_INP_READ_UNLOCK(inp);
6156		hold_rlock = 0;
6157	}
6158	if (hold_sblock == 1) {
6159		SOCKBUF_UNLOCK(&so->so_rcv);
6160		hold_sblock = 0;
6161	}
6162	sbunlock(&so->so_rcv);
6163	sockbuf_lock = 0;
6164
6165release_unlocked:
6166	if (hold_sblock) {
6167		SOCKBUF_UNLOCK(&so->so_rcv);
6168		hold_sblock = 0;
6169	}
6170	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6171		if ((freed_so_far >= rwnd_req) &&
6172		    (control && (control->do_not_ref_stcb == 0)) &&
6173		    (no_rcv_needed == 0))
6174			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6175	}
6176out:
6177	if (msg_flags) {
6178		*msg_flags = out_flags;
6179	}
6180	if (((out_flags & MSG_EOR) == 0) &&
6181	    ((in_flags & MSG_PEEK) == 0) &&
6182	    (sinfo) &&
6183	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6184	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6185		struct sctp_extrcvinfo *s_extra;
6186
6187		s_extra = (struct sctp_extrcvinfo *)sinfo;
6188		s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6189	}
6190	if (hold_rlock == 1) {
6191		SCTP_INP_READ_UNLOCK(inp);
6192	}
6193	if (hold_sblock) {
6194		SOCKBUF_UNLOCK(&so->so_rcv);
6195	}
6196	if (sockbuf_lock) {
6197		sbunlock(&so->so_rcv);
6198	}
6199	if (freecnt_applied) {
6200		/*
6201		 * The lock on the socket buffer protects us so the free
6202		 * code will stop. But since we used the socketbuf lock and
6203		 * the sender uses the tcb_lock to increment, we need to use
6204		 * the atomic add to the refcnt.
6205		 */
6206		if (stcb == NULL) {
6207#ifdef INVARIANTS
6208			panic("stcb for refcnt has gone NULL?");
6209			goto stage_left;
6210#else
6211			goto stage_left;
6212#endif
6213		}
6214		atomic_add_int(&stcb->asoc.refcnt, -1);
6215		/* Save the value back for next time */
6216		stcb->freed_by_sorcv_sincelast = freed_so_far;
6217	}
6218	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6219		if (stcb) {
6220			sctp_misc_ints(SCTP_SORECV_DONE,
6221			    freed_so_far,
6222			    ((uio) ? (slen - uio->uio_resid) : slen),
6223			    stcb->asoc.my_rwnd,
6224			    so->so_rcv.sb_cc);
6225		} else {
6226			sctp_misc_ints(SCTP_SORECV_DONE,
6227			    freed_so_far,
6228			    ((uio) ? (slen - uio->uio_resid) : slen),
6229			    0,
6230			    so->so_rcv.sb_cc);
6231		}
6232	}
6233stage_left:
6234	if (wakeup_read_socket) {
6235		sctp_sorwakeup(inp, so);
6236	}
6237	return (error);
6238}
6239
6240
6241#ifdef SCTP_MBUF_LOGGING
6242struct mbuf *
6243sctp_m_free(struct mbuf *m)
6244{
6245	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6246		sctp_log_mb(m, SCTP_MBUF_IFREE);
6247	}
6248	return (m_free(m));
6249}
6250
6251void
6252sctp_m_freem(struct mbuf *mb)
6253{
6254	while (mb != NULL)
6255		mb = sctp_m_free(mb);
6256}
6257
6258#endif
6259
6260int
6261sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6262{
6263	/*
6264	 * Given a local address. For all associations that holds the
6265	 * address, request a peer-set-primary.
6266	 */
6267	struct sctp_ifa *ifa;
6268	struct sctp_laddr *wi;
6269
6270	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6271	if (ifa == NULL) {
6272		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6273		return (EADDRNOTAVAIL);
6274	}
6275	/*
6276	 * Now that we have the ifa we must awaken the iterator with this
6277	 * message.
6278	 */
6279	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6280	if (wi == NULL) {
6281		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6282		return (ENOMEM);
6283	}
6284	/* Now incr the count and int wi structure */
6285	SCTP_INCR_LADDR_COUNT();
6286	bzero(wi, sizeof(*wi));
6287	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6288	wi->ifa = ifa;
6289	wi->action = SCTP_SET_PRIM_ADDR;
6290	atomic_add_int(&ifa->refcount, 1);
6291
6292	/* Now add it to the work queue */
6293	SCTP_WQ_ADDR_LOCK();
6294	/*
6295	 * Should this really be a tailq? As it is we will process the
6296	 * newest first :-0
6297	 */
6298	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6299	SCTP_WQ_ADDR_UNLOCK();
6300	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6301	    (struct sctp_inpcb *)NULL,
6302	    (struct sctp_tcb *)NULL,
6303	    (struct sctp_nets *)NULL);
6304	return (0);
6305}
6306
6307
6308int
6309sctp_soreceive(struct socket *so,
6310    struct sockaddr **psa,
6311    struct uio *uio,
6312    struct mbuf **mp0,
6313    struct mbuf **controlp,
6314    int *flagsp)
6315{
6316	int error, fromlen;
6317	uint8_t sockbuf[256];
6318	struct sockaddr *from;
6319	struct sctp_extrcvinfo sinfo;
6320	int filling_sinfo = 1;
6321	struct sctp_inpcb *inp;
6322
6323	inp = (struct sctp_inpcb *)so->so_pcb;
6324	/* pickup the assoc we are reading from */
6325	if (inp == NULL) {
6326		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6327		return (EINVAL);
6328	}
6329	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6330	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6331	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6332	    (controlp == NULL)) {
6333		/* user does not want the sndrcv ctl */
6334		filling_sinfo = 0;
6335	}
6336	if (psa) {
6337		from = (struct sockaddr *)sockbuf;
6338		fromlen = sizeof(sockbuf);
6339		from->sa_len = 0;
6340	} else {
6341		from = NULL;
6342		fromlen = 0;
6343	}
6344
6345	if (filling_sinfo) {
6346		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6347	}
6348	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6349	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6350	if (controlp != NULL) {
6351		/* copy back the sinfo in a CMSG format */
6352		if (filling_sinfo)
6353			*controlp = sctp_build_ctl_nchunk(inp,
6354			    (struct sctp_sndrcvinfo *)&sinfo);
6355		else
6356			*controlp = NULL;
6357	}
6358	if (psa) {
6359		/* copy back the address info */
6360		if (from && from->sa_len) {
6361			*psa = sodupsockaddr(from, M_NOWAIT);
6362		} else {
6363			*psa = NULL;
6364		}
6365	}
6366	return (error);
6367}
6368
6369
6370
6371
6372
6373int
6374sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6375    int totaddr, int *error)
6376{
6377	int added = 0;
6378	int i;
6379	struct sctp_inpcb *inp;
6380	struct sockaddr *sa;
6381	size_t incr = 0;
6382
6383#ifdef INET
6384	struct sockaddr_in *sin;
6385
6386#endif
6387#ifdef INET6
6388	struct sockaddr_in6 *sin6;
6389
6390#endif
6391
6392	sa = addr;
6393	inp = stcb->sctp_ep;
6394	*error = 0;
6395	for (i = 0; i < totaddr; i++) {
6396		switch (sa->sa_family) {
6397#ifdef INET
6398		case AF_INET:
6399			incr = sizeof(struct sockaddr_in);
6400			sin = (struct sockaddr_in *)sa;
6401			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6402			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6403			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6404				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6405				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6406				    SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
6407				*error = EINVAL;
6408				goto out_now;
6409			}
6410			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6411				/* assoc gone no un-lock */
6412				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6413				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6414				    SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
6415				*error = ENOBUFS;
6416				goto out_now;
6417			}
6418			added++;
6419			break;
6420#endif
6421#ifdef INET6
6422		case AF_INET6:
6423			incr = sizeof(struct sockaddr_in6);
6424			sin6 = (struct sockaddr_in6 *)sa;
6425			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6426			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6427				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6428				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6429				    SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
6430				*error = EINVAL;
6431				goto out_now;
6432			}
6433			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6434				/* assoc gone no un-lock */
6435				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6436				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6437				    SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
6438				*error = ENOBUFS;
6439				goto out_now;
6440			}
6441			added++;
6442			break;
6443#endif
6444		default:
6445			break;
6446		}
6447		sa = (struct sockaddr *)((caddr_t)sa + incr);
6448	}
6449out_now:
6450	return (added);
6451}
6452
6453struct sctp_tcb *
6454sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6455    int *totaddr, int *num_v4, int *num_v6, int *error,
6456    int limit, int *bad_addr)
6457{
6458	struct sockaddr *sa;
6459	struct sctp_tcb *stcb = NULL;
6460	size_t incr, at, i;
6461
6462	at = incr = 0;
6463	sa = addr;
6464
6465	*error = *num_v6 = *num_v4 = 0;
6466	/* account and validate addresses */
6467	for (i = 0; i < (size_t)*totaddr; i++) {
6468		switch (sa->sa_family) {
6469#ifdef INET
6470		case AF_INET:
6471			(*num_v4) += 1;
6472			incr = sizeof(struct sockaddr_in);
6473			if (sa->sa_len != incr) {
6474				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6475				*error = EINVAL;
6476				*bad_addr = 1;
6477				return (NULL);
6478			}
6479			break;
6480#endif
6481#ifdef INET6
6482		case AF_INET6:
6483			{
6484				struct sockaddr_in6 *sin6;
6485
6486				sin6 = (struct sockaddr_in6 *)sa;
6487				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6488					/* Must be non-mapped for connectx */
6489					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6490					*error = EINVAL;
6491					*bad_addr = 1;
6492					return (NULL);
6493				}
6494				(*num_v6) += 1;
6495				incr = sizeof(struct sockaddr_in6);
6496				if (sa->sa_len != incr) {
6497					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6498					*error = EINVAL;
6499					*bad_addr = 1;
6500					return (NULL);
6501				}
6502				break;
6503			}
6504#endif
6505		default:
6506			*totaddr = i;
6507			/* we are done */
6508			break;
6509		}
6510		if (i == (size_t)*totaddr) {
6511			break;
6512		}
6513		SCTP_INP_INCR_REF(inp);
6514		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6515		if (stcb != NULL) {
6516			/* Already have or am bring up an association */
6517			return (stcb);
6518		} else {
6519			SCTP_INP_DECR_REF(inp);
6520		}
6521		if ((at + incr) > (size_t)limit) {
6522			*totaddr = i;
6523			break;
6524		}
6525		sa = (struct sockaddr *)((caddr_t)sa + incr);
6526	}
6527	return ((struct sctp_tcb *)NULL);
6528}
6529
6530/*
6531 * sctp_bindx(ADD) for one address.
6532 * assumes all arguments are valid/checked by caller.
6533 */
6534void
6535sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6536    struct sockaddr *sa, sctp_assoc_t assoc_id,
6537    uint32_t vrf_id, int *error, void *p)
6538{
6539	struct sockaddr *addr_touse;
6540
6541#if defined(INET) && defined(INET6)
6542	struct sockaddr_in sin;
6543
6544#endif
6545
6546	/* see if we're bound all already! */
6547	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6548		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6549		*error = EINVAL;
6550		return;
6551	}
6552	addr_touse = sa;
6553#ifdef INET6
6554	if (sa->sa_family == AF_INET6) {
6555#ifdef INET
6556		struct sockaddr_in6 *sin6;
6557
6558#endif
6559		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6560			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6561			*error = EINVAL;
6562			return;
6563		}
6564		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6565			/* can only bind v6 on PF_INET6 sockets */
6566			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6567			*error = EINVAL;
6568			return;
6569		}
6570#ifdef INET
6571		sin6 = (struct sockaddr_in6 *)addr_touse;
6572		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6573			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6574			    SCTP_IPV6_V6ONLY(inp)) {
6575				/* can't bind v4-mapped on PF_INET sockets */
6576				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6577				*error = EINVAL;
6578				return;
6579			}
6580			in6_sin6_2_sin(&sin, sin6);
6581			addr_touse = (struct sockaddr *)&sin;
6582		}
6583#endif
6584	}
6585#endif
6586#ifdef INET
6587	if (sa->sa_family == AF_INET) {
6588		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6589			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6590			*error = EINVAL;
6591			return;
6592		}
6593		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6594		    SCTP_IPV6_V6ONLY(inp)) {
6595			/* can't bind v4 on PF_INET sockets */
6596			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6597			*error = EINVAL;
6598			return;
6599		}
6600	}
6601#endif
6602	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6603		if (p == NULL) {
6604			/* Can't get proc for Net/Open BSD */
6605			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6606			*error = EINVAL;
6607			return;
6608		}
6609		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6610		return;
6611	}
6612	/*
6613	 * No locks required here since bind and mgmt_ep_sa all do their own
6614	 * locking. If we do something for the FIX: below we may need to
6615	 * lock in that case.
6616	 */
6617	if (assoc_id == 0) {
6618		/* add the address */
6619		struct sctp_inpcb *lep;
6620		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6621
6622		/* validate the incoming port */
6623		if ((lsin->sin_port != 0) &&
6624		    (lsin->sin_port != inp->sctp_lport)) {
6625			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6626			*error = EINVAL;
6627			return;
6628		} else {
6629			/* user specified 0 port, set it to existing port */
6630			lsin->sin_port = inp->sctp_lport;
6631		}
6632
6633		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6634		if (lep != NULL) {
6635			/*
6636			 * We must decrement the refcount since we have the
6637			 * ep already and are binding. No remove going on
6638			 * here.
6639			 */
6640			SCTP_INP_DECR_REF(lep);
6641		}
6642		if (lep == inp) {
6643			/* already bound to it.. ok */
6644			return;
6645		} else if (lep == NULL) {
6646			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6647			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6648			    SCTP_ADD_IP_ADDRESS,
6649			    vrf_id, NULL);
6650		} else {
6651			*error = EADDRINUSE;
6652		}
6653		if (*error)
6654			return;
6655	} else {
6656		/*
6657		 * FIX: decide whether we allow assoc based bindx
6658		 */
6659	}
6660}
6661
6662/*
6663 * sctp_bindx(DELETE) for one address.
6664 * assumes all arguments are valid/checked by caller.
6665 */
6666void
6667sctp_bindx_delete_address(struct sctp_inpcb *inp,
6668    struct sockaddr *sa, sctp_assoc_t assoc_id,
6669    uint32_t vrf_id, int *error)
6670{
6671	struct sockaddr *addr_touse;
6672
6673#if defined(INET) && defined(INET6)
6674	struct sockaddr_in sin;
6675
6676#endif
6677
6678	/* see if we're bound all already! */
6679	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6680		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6681		*error = EINVAL;
6682		return;
6683	}
6684	addr_touse = sa;
6685#ifdef INET6
6686	if (sa->sa_family == AF_INET6) {
6687#ifdef INET
6688		struct sockaddr_in6 *sin6;
6689
6690#endif
6691
6692		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6693			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6694			*error = EINVAL;
6695			return;
6696		}
6697		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6698			/* can only bind v6 on PF_INET6 sockets */
6699			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6700			*error = EINVAL;
6701			return;
6702		}
6703#ifdef INET
6704		sin6 = (struct sockaddr_in6 *)addr_touse;
6705		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6706			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6707			    SCTP_IPV6_V6ONLY(inp)) {
6708				/* can't bind mapped-v4 on PF_INET sockets */
6709				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6710				*error = EINVAL;
6711				return;
6712			}
6713			in6_sin6_2_sin(&sin, sin6);
6714			addr_touse = (struct sockaddr *)&sin;
6715		}
6716#endif
6717	}
6718#endif
6719#ifdef INET
6720	if (sa->sa_family == AF_INET) {
6721		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6722			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6723			*error = EINVAL;
6724			return;
6725		}
6726		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6727		    SCTP_IPV6_V6ONLY(inp)) {
6728			/* can't bind v4 on PF_INET sockets */
6729			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6730			*error = EINVAL;
6731			return;
6732		}
6733	}
6734#endif
6735	/*
6736	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6737	 * below is ever changed we may need to lock before calling
6738	 * association level binding.
6739	 */
6740	if (assoc_id == 0) {
6741		/* delete the address */
6742		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6743		    SCTP_DEL_IP_ADDRESS,
6744		    vrf_id, NULL);
6745	} else {
6746		/*
6747		 * FIX: decide whether we allow assoc based bindx
6748		 */
6749	}
6750}
6751
6752/*
6753 * returns the valid local address count for an assoc, taking into account
6754 * all scoping rules
6755 */
6756int
6757sctp_local_addr_count(struct sctp_tcb *stcb)
6758{
6759	int loopback_scope;
6760
6761#if defined(INET)
6762	int ipv4_local_scope, ipv4_addr_legal;
6763
6764#endif
6765#if defined (INET6)
6766	int local_scope, site_scope, ipv6_addr_legal;
6767
6768#endif
6769	struct sctp_vrf *vrf;
6770	struct sctp_ifn *sctp_ifn;
6771	struct sctp_ifa *sctp_ifa;
6772	int count = 0;
6773
6774	/* Turn on all the appropriate scopes */
6775	loopback_scope = stcb->asoc.scope.loopback_scope;
6776#if defined(INET)
6777	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
6778	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
6779#endif
6780#if defined(INET6)
6781	local_scope = stcb->asoc.scope.local_scope;
6782	site_scope = stcb->asoc.scope.site_scope;
6783	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
6784#endif
6785	SCTP_IPI_ADDR_RLOCK();
6786	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6787	if (vrf == NULL) {
6788		/* no vrf, no addresses */
6789		SCTP_IPI_ADDR_RUNLOCK();
6790		return (0);
6791	}
6792	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6793		/*
6794		 * bound all case: go through all ifns on the vrf
6795		 */
6796		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6797			if ((loopback_scope == 0) &&
6798			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6799				continue;
6800			}
6801			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6802				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6803					continue;
6804				switch (sctp_ifa->address.sa.sa_family) {
6805#ifdef INET
6806				case AF_INET:
6807					if (ipv4_addr_legal) {
6808						struct sockaddr_in *sin;
6809
6810						sin = &sctp_ifa->address.sin;
6811						if (sin->sin_addr.s_addr == 0) {
6812							/*
6813							 * skip unspecified
6814							 * addrs
6815							 */
6816							continue;
6817						}
6818						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
6819						    &sin->sin_addr) != 0) {
6820							continue;
6821						}
6822						if ((ipv4_local_scope == 0) &&
6823						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6824							continue;
6825						}
6826						/* count this one */
6827						count++;
6828					} else {
6829						continue;
6830					}
6831					break;
6832#endif
6833#ifdef INET6
6834				case AF_INET6:
6835					if (ipv6_addr_legal) {
6836						struct sockaddr_in6 *sin6;
6837
6838						sin6 = &sctp_ifa->address.sin6;
6839						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6840							continue;
6841						}
6842						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
6843						    &sin6->sin6_addr) != 0) {
6844							continue;
6845						}
6846						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6847							if (local_scope == 0)
6848								continue;
6849							if (sin6->sin6_scope_id == 0) {
6850								if (sa6_recoverscope(sin6) != 0)
6851									/*
6852									 *
6853									 * bad
6854									 *
6855									 * li
6856									 * nk
6857									 *
6858									 * loc
6859									 * al
6860									 *
6861									 * add
6862									 * re
6863									 * ss
6864									 * */
6865									continue;
6866							}
6867						}
6868						if ((site_scope == 0) &&
6869						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6870							continue;
6871						}
6872						/* count this one */
6873						count++;
6874					}
6875					break;
6876#endif
6877				default:
6878					/* TSNH */
6879					break;
6880				}
6881			}
6882		}
6883	} else {
6884		/*
6885		 * subset bound case
6886		 */
6887		struct sctp_laddr *laddr;
6888
6889		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6890		    sctp_nxt_addr) {
6891			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6892				continue;
6893			}
6894			/* count this one */
6895			count++;
6896		}
6897	}
6898	SCTP_IPI_ADDR_RUNLOCK();
6899	return (count);
6900}
6901
6902#if defined(SCTP_LOCAL_TRACE_BUF)
6903
6904void
6905sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6906{
6907	uint32_t saveindex, newindex;
6908
6909	do {
6910		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6911		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6912			newindex = 1;
6913		} else {
6914			newindex = saveindex + 1;
6915		}
6916	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6917	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6918		saveindex = 0;
6919	}
6920	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6921	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6922	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6923	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6924	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6925	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6926	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6927	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6928}
6929
6930#endif
6931static void
6932sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
6933    const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
6934{
6935	struct ip *iph;
6936
6937#ifdef INET6
6938	struct ip6_hdr *ip6;
6939
6940#endif
6941	struct mbuf *sp, *last;
6942	struct udphdr *uhdr;
6943	uint16_t port;
6944
6945	if ((m->m_flags & M_PKTHDR) == 0) {
6946		/* Can't handle one that is not a pkt hdr */
6947		goto out;
6948	}
6949	/* Pull the src port */
6950	iph = mtod(m, struct ip *);
6951	uhdr = (struct udphdr *)((caddr_t)iph + off);
6952	port = uhdr->uh_sport;
6953	/*
6954	 * Split out the mbuf chain. Leave the IP header in m, place the
6955	 * rest in the sp.
6956	 */
6957	sp = m_split(m, off, M_NOWAIT);
6958	if (sp == NULL) {
6959		/* Gak, drop packet, we can't do a split */
6960		goto out;
6961	}
6962	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6963		/* Gak, packet can't have an SCTP header in it - too small */
6964		m_freem(sp);
6965		goto out;
6966	}
6967	/* Now pull up the UDP header and SCTP header together */
6968	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6969	if (sp == NULL) {
6970		/* Gak pullup failed */
6971		goto out;
6972	}
6973	/* Trim out the UDP header */
6974	m_adj(sp, sizeof(struct udphdr));
6975
6976	/* Now reconstruct the mbuf chain */
6977	for (last = m; last->m_next; last = last->m_next);
6978	last->m_next = sp;
6979	m->m_pkthdr.len += sp->m_pkthdr.len;
6980	/*
6981	 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP
6982	 * checksum and it was valid. Since CSUM_DATA_VALID ==
6983	 * CSUM_SCTP_VALID this would imply that the HW also verified the
6984	 * SCTP checksum. Therefore, clear the bit.
6985	 */
6986	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6987	    "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n",
6988	    m->m_pkthdr.len,
6989	    if_name(m->m_pkthdr.rcvif),
6990	    (int)m->m_pkthdr.csum_flags, CSUM_BITS);
6991	m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
6992	iph = mtod(m, struct ip *);
6993	switch (iph->ip_v) {
6994#ifdef INET
6995	case IPVERSION:
6996		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
6997		sctp_input_with_port(m, off, port);
6998		break;
6999#endif
7000#ifdef INET6
7001	case IPV6_VERSION >> 4:
7002		ip6 = mtod(m, struct ip6_hdr *);
7003		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
7004		sctp6_input_with_port(&m, &off, port);
7005		break;
7006#endif
7007	default:
7008		goto out;
7009		break;
7010	}
7011	return;
7012out:
7013	m_freem(m);
7014}
7015
7016void
7017sctp_over_udp_stop(void)
7018{
7019	/*
7020	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7021	 * for writting!
7022	 */
7023#ifdef INET
7024	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7025		soclose(SCTP_BASE_INFO(udp4_tun_socket));
7026		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
7027	}
7028#endif
7029#ifdef INET6
7030	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7031		soclose(SCTP_BASE_INFO(udp6_tun_socket));
7032		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
7033	}
7034#endif
7035}
7036
7037int
7038sctp_over_udp_start(void)
7039{
7040	uint16_t port;
7041	int ret;
7042
7043#ifdef INET
7044	struct sockaddr_in sin;
7045
7046#endif
7047#ifdef INET6
7048	struct sockaddr_in6 sin6;
7049
7050#endif
7051	/*
7052	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7053	 * for writting!
7054	 */
7055	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
7056	if (ntohs(port) == 0) {
7057		/* Must have a port set */
7058		return (EINVAL);
7059	}
7060#ifdef INET
7061	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7062		/* Already running -- must stop first */
7063		return (EALREADY);
7064	}
7065#endif
7066#ifdef INET6
7067	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7068		/* Already running -- must stop first */
7069		return (EALREADY);
7070	}
7071#endif
7072#ifdef INET
7073	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
7074	    SOCK_DGRAM, IPPROTO_UDP,
7075	    curthread->td_ucred, curthread))) {
7076		sctp_over_udp_stop();
7077		return (ret);
7078	}
7079	/* Call the special UDP hook. */
7080	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
7081	    sctp_recv_udp_tunneled_packet, NULL))) {
7082		sctp_over_udp_stop();
7083		return (ret);
7084	}
7085	/* Ok, we have a socket, bind it to the port. */
7086	memset(&sin, 0, sizeof(struct sockaddr_in));
7087	sin.sin_len = sizeof(struct sockaddr_in);
7088	sin.sin_family = AF_INET;
7089	sin.sin_port = htons(port);
7090	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
7091	    (struct sockaddr *)&sin, curthread))) {
7092		sctp_over_udp_stop();
7093		return (ret);
7094	}
7095#endif
7096#ifdef INET6
7097	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
7098	    SOCK_DGRAM, IPPROTO_UDP,
7099	    curthread->td_ucred, curthread))) {
7100		sctp_over_udp_stop();
7101		return (ret);
7102	}
7103	/* Call the special UDP hook. */
7104	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
7105	    sctp_recv_udp_tunneled_packet, NULL))) {
7106		sctp_over_udp_stop();
7107		return (ret);
7108	}
7109	/* Ok, we have a socket, bind it to the port. */
7110	memset(&sin6, 0, sizeof(struct sockaddr_in6));
7111	sin6.sin6_len = sizeof(struct sockaddr_in6);
7112	sin6.sin6_family = AF_INET6;
7113	sin6.sin6_port = htons(port);
7114	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7115	    (struct sockaddr *)&sin6, curthread))) {
7116		sctp_over_udp_stop();
7117		return (ret);
7118	}
7119#endif
7120	return (0);
7121}
7122