1/*-
2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * a) Redistributions of source code must retain the above copyright notice,
10 *    this list of conditions and the following disclaimer.
11 *
12 * b) Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in
14 *    the documentation and/or other materials provided with the distribution.
15 *
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 *    contributors may be used to endorse or promote products derived
18 *    from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD$");
35
36#include <netinet/sctp_os.h>
37#include <netinet/sctp_var.h>
38#include <netinet/sctp_sysctl.h>
39#include <netinet/sctp_pcb.h>
40#include <netinet/sctp_header.h>
41#include <netinet/sctputil.h>
42#include <netinet/sctp_output.h>
43#include <netinet/sctp_input.h>
44#include <netinet/sctp_auth.h>
45#include <netinet/sctp_indata.h>
46#include <netinet/sctp_asconf.h>
47#include <netinet/sctp_bsd_addr.h>
48#include <netinet/sctp_timer.h>
49#include <netinet/sctp_crc32.h>
50#include <netinet/udp.h>
51#include <sys/smp.h>
52
53
54
55static void
56sctp_stop_all_cookie_timers(struct sctp_tcb *stcb)
57{
58	struct sctp_nets *net;
59
60	/*
61	 * This now not only stops all cookie timers it also stops any INIT
62	 * timers as well. This will make sure that the timers are stopped
63	 * in all collision cases.
64	 */
65	SCTP_TCB_LOCK_ASSERT(stcb);
66	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
67		if (net->rxt_timer.type == SCTP_TIMER_TYPE_COOKIE) {
68			sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE,
69			    stcb->sctp_ep,
70			    stcb,
71			    net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_1);
72		} else if (net->rxt_timer.type == SCTP_TIMER_TYPE_INIT) {
73			sctp_timer_stop(SCTP_TIMER_TYPE_INIT,
74			    stcb->sctp_ep,
75			    stcb,
76			    net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_2);
77		}
78	}
79}
80
81/* INIT handler */
82static void
83sctp_handle_init(struct mbuf *m, int iphlen, int offset,
84    struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh,
85    struct sctp_init_chunk *cp, struct sctp_inpcb *inp,
86    struct sctp_tcb *stcb, int *abort_no_unlock,
87    uint8_t use_mflowid, uint32_t mflowid,
88    uint32_t vrf_id, uint16_t port)
89{
90	struct sctp_init *init;
91	struct mbuf *op_err;
92
93	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_init: handling INIT tcb:%p\n",
94	    (void *)stcb);
95	if (stcb == NULL) {
96		SCTP_INP_RLOCK(inp);
97	}
98	/* validate length */
99	if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_chunk)) {
100		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
101		sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
102		    use_mflowid, mflowid,
103		    vrf_id, port);
104		if (stcb)
105			*abort_no_unlock = 1;
106		goto outnow;
107	}
108	/* validate parameters */
109	init = &cp->init;
110	if (init->initiate_tag == 0) {
111		/* protocol error... send abort */
112		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
113		sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
114		    use_mflowid, mflowid,
115		    vrf_id, port);
116		if (stcb)
117			*abort_no_unlock = 1;
118		goto outnow;
119	}
120	if (ntohl(init->a_rwnd) < SCTP_MIN_RWND) {
121		/* invalid parameter... send abort */
122		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
123		sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
124		    use_mflowid, mflowid,
125		    vrf_id, port);
126		if (stcb)
127			*abort_no_unlock = 1;
128		goto outnow;
129	}
130	if (init->num_inbound_streams == 0) {
131		/* protocol error... send abort */
132		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
133		sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
134		    use_mflowid, mflowid,
135		    vrf_id, port);
136		if (stcb)
137			*abort_no_unlock = 1;
138		goto outnow;
139	}
140	if (init->num_outbound_streams == 0) {
141		/* protocol error... send abort */
142		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
143		sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
144		    use_mflowid, mflowid,
145		    vrf_id, port);
146		if (stcb)
147			*abort_no_unlock = 1;
148		goto outnow;
149	}
150	if (sctp_validate_init_auth_params(m, offset + sizeof(*cp),
151	    offset + ntohs(cp->ch.chunk_length))) {
152		/* auth parameter(s) error... send abort */
153		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
154		    "Problem with AUTH parameters");
155		sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
156		    use_mflowid, mflowid,
157		    vrf_id, port);
158		if (stcb)
159			*abort_no_unlock = 1;
160		goto outnow;
161	}
162	/*
163	 * We are only accepting if we have a socket with positive
164	 * so_qlimit.
165	 */
166	if ((stcb == NULL) &&
167	    ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
168	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
169	    (inp->sctp_socket == NULL) ||
170	    (inp->sctp_socket->so_qlimit == 0))) {
171		/*
172		 * FIX ME ?? What about TCP model and we have a
173		 * match/restart case? Actually no fix is needed. the lookup
174		 * will always find the existing assoc so stcb would not be
175		 * NULL. It may be questionable to do this since we COULD
176		 * just send back the INIT-ACK and hope that the app did
177		 * accept()'s by the time the COOKIE was sent. But there is
178		 * a price to pay for COOKIE generation and I don't want to
179		 * pay it on the chance that the app will actually do some
180		 * accepts(). The App just looses and should NOT be in this
181		 * state :-)
182		 */
183		if (SCTP_BASE_SYSCTL(sctp_blackhole) == 0) {
184			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
185			    "No listener");
186			sctp_send_abort(m, iphlen, src, dst, sh, 0, op_err,
187			    use_mflowid, mflowid,
188			    vrf_id, port);
189		}
190		goto outnow;
191	}
192	if ((stcb != NULL) &&
193	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT)) {
194		SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending SHUTDOWN-ACK\n");
195		sctp_send_shutdown_ack(stcb, NULL);
196		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
197	} else {
198		SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending INIT-ACK\n");
199		sctp_send_initiate_ack(inp, stcb, m, iphlen, offset, src, dst,
200		    sh, cp,
201		    use_mflowid, mflowid,
202		    vrf_id, port,
203		    ((stcb == NULL) ? SCTP_HOLDS_LOCK : SCTP_NOT_LOCKED));
204	}
205outnow:
206	if (stcb == NULL) {
207		SCTP_INP_RUNLOCK(inp);
208	}
209}
210
211/*
212 * process peer "INIT/INIT-ACK" chunk returns value < 0 on error
213 */
214
215int
216sctp_is_there_unsent_data(struct sctp_tcb *stcb, int so_locked
217#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
218    SCTP_UNUSED
219#endif
220)
221{
222	int unsent_data = 0;
223	unsigned int i;
224	struct sctp_stream_queue_pending *sp;
225	struct sctp_association *asoc;
226
227	/*
228	 * This function returns the number of streams that have true unsent
229	 * data on them. Note that as it looks through it will clean up any
230	 * places that have old data that has been sent but left at top of
231	 * stream queue.
232	 */
233	asoc = &stcb->asoc;
234	SCTP_TCB_SEND_LOCK(stcb);
235	if (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
236		/* Check to see if some data queued */
237		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
238			/* sa_ignore FREED_MEMORY */
239			sp = TAILQ_FIRST(&stcb->asoc.strmout[i].outqueue);
240			if (sp == NULL) {
241				continue;
242			}
243			if ((sp->msg_is_complete) &&
244			    (sp->length == 0) &&
245			    (sp->sender_all_done)) {
246				/*
247				 * We are doing differed cleanup. Last time
248				 * through when we took all the data the
249				 * sender_all_done was not set.
250				 */
251				if (sp->put_last_out == 0) {
252					SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
253					SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d\n",
254					    sp->sender_all_done,
255					    sp->length,
256					    sp->msg_is_complete,
257					    sp->put_last_out);
258				}
259				atomic_subtract_int(&stcb->asoc.stream_queue_cnt, 1);
260				TAILQ_REMOVE(&stcb->asoc.strmout[i].outqueue, sp, next);
261				if (sp->net) {
262					sctp_free_remote_addr(sp->net);
263					sp->net = NULL;
264				}
265				if (sp->data) {
266					sctp_m_freem(sp->data);
267					sp->data = NULL;
268				}
269				sctp_free_a_strmoq(stcb, sp, so_locked);
270			} else {
271				unsent_data++;
272				break;
273			}
274		}
275	}
276	SCTP_TCB_SEND_UNLOCK(stcb);
277	return (unsent_data);
278}
279
280static int
281sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb)
282{
283	struct sctp_init *init;
284	struct sctp_association *asoc;
285	struct sctp_nets *lnet;
286	unsigned int i;
287
288	init = &cp->init;
289	asoc = &stcb->asoc;
290	/* save off parameters */
291	asoc->peer_vtag = ntohl(init->initiate_tag);
292	asoc->peers_rwnd = ntohl(init->a_rwnd);
293	/* init tsn's */
294	asoc->highest_tsn_inside_map = asoc->asconf_seq_in = ntohl(init->initial_tsn) - 1;
295
296	if (!TAILQ_EMPTY(&asoc->nets)) {
297		/* update any ssthresh's that may have a default */
298		TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
299			lnet->ssthresh = asoc->peers_rwnd;
300			if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) {
301				sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_INITIALIZATION);
302			}
303		}
304	}
305	SCTP_TCB_SEND_LOCK(stcb);
306	if (asoc->pre_open_streams > ntohs(init->num_inbound_streams)) {
307		unsigned int newcnt;
308		struct sctp_stream_out *outs;
309		struct sctp_stream_queue_pending *sp, *nsp;
310		struct sctp_tmit_chunk *chk, *nchk;
311
312		/* abandon the upper streams */
313		newcnt = ntohs(init->num_inbound_streams);
314		TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
315			if (chk->rec.data.stream_number >= newcnt) {
316				TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
317				asoc->send_queue_cnt--;
318				if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
319					asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
320#ifdef INVARIANTS
321				} else {
322					panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
323#endif
324				}
325				if (chk->data != NULL) {
326					sctp_free_bufspace(stcb, asoc, chk, 1);
327					sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
328					    0, chk, SCTP_SO_NOT_LOCKED);
329					if (chk->data) {
330						sctp_m_freem(chk->data);
331						chk->data = NULL;
332					}
333				}
334				sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
335				/* sa_ignore FREED_MEMORY */
336			}
337		}
338		if (asoc->strmout) {
339			for (i = newcnt; i < asoc->pre_open_streams; i++) {
340				outs = &asoc->strmout[i];
341				TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
342					TAILQ_REMOVE(&outs->outqueue, sp, next);
343					asoc->stream_queue_cnt--;
344					sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL,
345					    stcb, 0, sp, SCTP_SO_NOT_LOCKED);
346					if (sp->data) {
347						sctp_m_freem(sp->data);
348						sp->data = NULL;
349					}
350					if (sp->net) {
351						sctp_free_remote_addr(sp->net);
352						sp->net = NULL;
353					}
354					/* Free the chunk */
355					sctp_free_a_strmoq(stcb, sp, SCTP_SO_NOT_LOCKED);
356					/* sa_ignore FREED_MEMORY */
357				}
358			}
359		}
360		/* cut back the count */
361		asoc->pre_open_streams = newcnt;
362	}
363	SCTP_TCB_SEND_UNLOCK(stcb);
364	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams;
365
366	/* EY - nr_sack: initialize highest tsn in nr_mapping_array */
367	asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map;
368	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
369		sctp_log_map(0, 5, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
370	}
371	/* This is the next one we expect */
372	asoc->str_reset_seq_in = asoc->asconf_seq_in + 1;
373
374	asoc->mapping_array_base_tsn = ntohl(init->initial_tsn);
375	asoc->tsn_last_delivered = asoc->cumulative_tsn = asoc->asconf_seq_in;
376
377	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
378	/* open the requested streams */
379
380	if (asoc->strmin != NULL) {
381		/* Free the old ones */
382		struct sctp_queued_to_read *ctl, *nctl;
383
384		for (i = 0; i < asoc->streamincnt; i++) {
385			TAILQ_FOREACH_SAFE(ctl, &asoc->strmin[i].inqueue, next, nctl) {
386				TAILQ_REMOVE(&asoc->strmin[i].inqueue, ctl, next);
387				sctp_free_remote_addr(ctl->whoFrom);
388				ctl->whoFrom = NULL;
389				sctp_m_freem(ctl->data);
390				ctl->data = NULL;
391				sctp_free_a_readq(stcb, ctl);
392			}
393		}
394		SCTP_FREE(asoc->strmin, SCTP_M_STRMI);
395	}
396	if (asoc->max_inbound_streams > ntohs(init->num_outbound_streams)) {
397		asoc->streamincnt = ntohs(init->num_outbound_streams);
398	} else {
399		asoc->streamincnt = asoc->max_inbound_streams;
400	}
401	SCTP_MALLOC(asoc->strmin, struct sctp_stream_in *, asoc->streamincnt *
402	    sizeof(struct sctp_stream_in), SCTP_M_STRMI);
403	if (asoc->strmin == NULL) {
404		/* we didn't get memory for the streams! */
405		SCTPDBG(SCTP_DEBUG_INPUT2, "process_init: couldn't get memory for the streams!\n");
406		return (-1);
407	}
408	for (i = 0; i < asoc->streamincnt; i++) {
409		asoc->strmin[i].stream_no = i;
410		asoc->strmin[i].last_sequence_delivered = 0xffff;
411		TAILQ_INIT(&asoc->strmin[i].inqueue);
412		asoc->strmin[i].delivery_started = 0;
413	}
414	/*
415	 * load_address_from_init will put the addresses into the
416	 * association when the COOKIE is processed or the INIT-ACK is
417	 * processed. Both types of COOKIE's existing and new call this
418	 * routine. It will remove addresses that are no longer in the
419	 * association (for the restarting case where addresses are
420	 * removed). Up front when the INIT arrives we will discard it if it
421	 * is a restart and new addresses have been added.
422	 */
423	/* sa_ignore MEMLEAK */
424	return (0);
425}
426
427/*
428 * INIT-ACK message processing/consumption returns value < 0 on error
429 */
430static int
431sctp_process_init_ack(struct mbuf *m, int iphlen, int offset,
432    struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh,
433    struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
434    struct sctp_nets *net, int *abort_no_unlock,
435    uint8_t use_mflowid, uint32_t mflowid,
436    uint32_t vrf_id)
437{
438	struct sctp_association *asoc;
439	struct mbuf *op_err;
440	int retval, abort_flag;
441	uint32_t initack_limit;
442	int nat_friendly = 0;
443
444	/* First verify that we have no illegal param's */
445	abort_flag = 0;
446
447	op_err = sctp_arethere_unrecognized_parameters(m,
448	    (offset + sizeof(struct sctp_init_chunk)),
449	    &abort_flag, (struct sctp_chunkhdr *)cp, &nat_friendly);
450	if (abort_flag) {
451		/* Send an abort and notify peer */
452		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
453		*abort_no_unlock = 1;
454		return (-1);
455	}
456	asoc = &stcb->asoc;
457	asoc->peer_supports_nat = (uint8_t) nat_friendly;
458	/* process the peer's parameters in the INIT-ACK */
459	retval = sctp_process_init((struct sctp_init_chunk *)cp, stcb);
460	if (retval < 0) {
461		return (retval);
462	}
463	initack_limit = offset + ntohs(cp->ch.chunk_length);
464	/* load all addresses */
465	if ((retval = sctp_load_addresses_from_init(stcb, m,
466	    (offset + sizeof(struct sctp_init_chunk)), initack_limit,
467	    src, dst, NULL))) {
468		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
469		    "Problem with address parameters");
470		SCTPDBG(SCTP_DEBUG_INPUT1,
471		    "Load addresses from INIT causes an abort %d\n",
472		    retval);
473		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
474		    src, dst, sh, op_err,
475		    use_mflowid, mflowid,
476		    vrf_id, net->port);
477		*abort_no_unlock = 1;
478		return (-1);
479	}
480	/* if the peer doesn't support asconf, flush the asconf queue */
481	if (asoc->peer_supports_asconf == 0) {
482		struct sctp_asconf_addr *param, *nparam;
483
484		TAILQ_FOREACH_SAFE(param, &asoc->asconf_queue, next, nparam) {
485			TAILQ_REMOVE(&asoc->asconf_queue, param, next);
486			SCTP_FREE(param, SCTP_M_ASC_ADDR);
487		}
488	}
489	stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs,
490	    stcb->asoc.local_hmacs);
491	if (op_err) {
492		sctp_queue_op_err(stcb, op_err);
493		/* queuing will steal away the mbuf chain to the out queue */
494		op_err = NULL;
495	}
496	/* extract the cookie and queue it to "echo" it back... */
497	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
498		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
499		    stcb->asoc.overall_error_count,
500		    0,
501		    SCTP_FROM_SCTP_INPUT,
502		    __LINE__);
503	}
504	stcb->asoc.overall_error_count = 0;
505	net->error_count = 0;
506
507	/*
508	 * Cancel the INIT timer, We do this first before queueing the
509	 * cookie. We always cancel at the primary to assue that we are
510	 * canceling the timer started by the INIT which always goes to the
511	 * primary.
512	 */
513	sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, stcb,
514	    asoc->primary_destination, SCTP_FROM_SCTP_INPUT + SCTP_LOC_4);
515
516	/* calculate the RTO */
517	net->RTO = sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered, sctp_align_safe_nocopy,
518	    SCTP_RTT_FROM_NON_DATA);
519
520	retval = sctp_send_cookie_echo(m, offset, stcb, net);
521	if (retval < 0) {
522		/*
523		 * No cookie, we probably should send a op error. But in any
524		 * case if there is no cookie in the INIT-ACK, we can
525		 * abandon the peer, its broke.
526		 */
527		if (retval == -3) {
528			/* We abort with an error of missing mandatory param */
529			op_err = sctp_generate_cause(SCTP_CAUSE_MISSING_PARAM, "");
530			if (op_err) {
531				/*
532				 * Expand beyond to include the mandatory
533				 * param cookie
534				 */
535				struct sctp_inv_mandatory_param *mp;
536
537				SCTP_BUF_LEN(op_err) =
538				    sizeof(struct sctp_inv_mandatory_param);
539				mp = mtod(op_err,
540				    struct sctp_inv_mandatory_param *);
541				/* Subtract the reserved param */
542				mp->length =
543				    htons(sizeof(struct sctp_inv_mandatory_param) - 2);
544				mp->num_param = htonl(1);
545				mp->param = htons(SCTP_STATE_COOKIE);
546				mp->resv = 0;
547			}
548			sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
549			    src, dst, sh, op_err,
550			    use_mflowid, mflowid,
551			    vrf_id, net->port);
552			*abort_no_unlock = 1;
553		}
554		return (retval);
555	}
556	return (0);
557}
558
559static void
560sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp,
561    struct sctp_tcb *stcb, struct sctp_nets *net)
562{
563	struct sockaddr_storage store;
564	struct sctp_nets *r_net, *f_net;
565	struct timeval tv;
566	int req_prim = 0;
567	uint16_t old_error_counter;
568
569#ifdef INET
570	struct sockaddr_in *sin;
571
572#endif
573#ifdef INET6
574	struct sockaddr_in6 *sin6;
575
576#endif
577
578	if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_heartbeat_chunk)) {
579		/* Invalid length */
580		return;
581	}
582	memset(&store, 0, sizeof(store));
583	switch (cp->heartbeat.hb_info.addr_family) {
584#ifdef INET
585	case AF_INET:
586		if (cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in)) {
587			sin = (struct sockaddr_in *)&store;
588			sin->sin_family = cp->heartbeat.hb_info.addr_family;
589			sin->sin_len = cp->heartbeat.hb_info.addr_len;
590			sin->sin_port = stcb->rport;
591			memcpy(&sin->sin_addr, cp->heartbeat.hb_info.address,
592			    sizeof(sin->sin_addr));
593		} else {
594			return;
595		}
596		break;
597#endif
598#ifdef INET6
599	case AF_INET6:
600		if (cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in6)) {
601			sin6 = (struct sockaddr_in6 *)&store;
602			sin6->sin6_family = cp->heartbeat.hb_info.addr_family;
603			sin6->sin6_len = cp->heartbeat.hb_info.addr_len;
604			sin6->sin6_port = stcb->rport;
605			memcpy(&sin6->sin6_addr, cp->heartbeat.hb_info.address,
606			    sizeof(sin6->sin6_addr));
607		} else {
608			return;
609		}
610		break;
611#endif
612	default:
613		return;
614	}
615	r_net = sctp_findnet(stcb, (struct sockaddr *)&store);
616	if (r_net == NULL) {
617		SCTPDBG(SCTP_DEBUG_INPUT1, "Huh? I can't find the address I sent it to, discard\n");
618		return;
619	}
620	if ((r_net && (r_net->dest_state & SCTP_ADDR_UNCONFIRMED)) &&
621	    (r_net->heartbeat_random1 == cp->heartbeat.hb_info.random_value1) &&
622	    (r_net->heartbeat_random2 == cp->heartbeat.hb_info.random_value2)) {
623		/*
624		 * If the its a HB and it's random value is correct when can
625		 * confirm the destination.
626		 */
627		r_net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
628		if (r_net->dest_state & SCTP_ADDR_REQ_PRIMARY) {
629			stcb->asoc.primary_destination = r_net;
630			r_net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY;
631			f_net = TAILQ_FIRST(&stcb->asoc.nets);
632			if (f_net != r_net) {
633				/*
634				 * first one on the list is NOT the primary
635				 * sctp_cmpaddr() is much more efficent if
636				 * the primary is the first on the list,
637				 * make it so.
638				 */
639				TAILQ_REMOVE(&stcb->asoc.nets, r_net, sctp_next);
640				TAILQ_INSERT_HEAD(&stcb->asoc.nets, r_net, sctp_next);
641			}
642			req_prim = 1;
643		}
644		sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
645		    stcb, 0, (void *)r_net, SCTP_SO_NOT_LOCKED);
646		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, r_net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
647		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, r_net);
648	}
649	old_error_counter = r_net->error_count;
650	r_net->error_count = 0;
651	r_net->hb_responded = 1;
652	tv.tv_sec = cp->heartbeat.hb_info.time_value_1;
653	tv.tv_usec = cp->heartbeat.hb_info.time_value_2;
654	/* Now lets do a RTO with this */
655	r_net->RTO = sctp_calculate_rto(stcb, &stcb->asoc, r_net, &tv, sctp_align_safe_nocopy,
656	    SCTP_RTT_FROM_NON_DATA);
657	if (!(r_net->dest_state & SCTP_ADDR_REACHABLE)) {
658		r_net->dest_state |= SCTP_ADDR_REACHABLE;
659		sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
660		    0, (void *)r_net, SCTP_SO_NOT_LOCKED);
661	}
662	if (r_net->dest_state & SCTP_ADDR_PF) {
663		r_net->dest_state &= ~SCTP_ADDR_PF;
664		stcb->asoc.cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
665	}
666	if (old_error_counter > 0) {
667		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, r_net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
668		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, r_net);
669	}
670	if (r_net == stcb->asoc.primary_destination) {
671		if (stcb->asoc.alternate) {
672			/* release the alternate, primary is good */
673			sctp_free_remote_addr(stcb->asoc.alternate);
674			stcb->asoc.alternate = NULL;
675		}
676	}
677	/* Mobility adaptation */
678	if (req_prim) {
679		if ((sctp_is_mobility_feature_on(stcb->sctp_ep,
680		    SCTP_MOBILITY_BASE) ||
681		    sctp_is_mobility_feature_on(stcb->sctp_ep,
682		    SCTP_MOBILITY_FASTHANDOFF)) &&
683		    sctp_is_mobility_feature_on(stcb->sctp_ep,
684		    SCTP_MOBILITY_PRIM_DELETED)) {
685
686			sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_TIMER + SCTP_LOC_7);
687			if (sctp_is_mobility_feature_on(stcb->sctp_ep,
688			    SCTP_MOBILITY_FASTHANDOFF)) {
689				sctp_assoc_immediate_retrans(stcb,
690				    stcb->asoc.primary_destination);
691			}
692			if (sctp_is_mobility_feature_on(stcb->sctp_ep,
693			    SCTP_MOBILITY_BASE)) {
694				sctp_move_chunks_from_net(stcb,
695				    stcb->asoc.deleted_primary);
696			}
697			sctp_delete_prim_timer(stcb->sctp_ep, stcb,
698			    stcb->asoc.deleted_primary);
699		}
700	}
701}
702
703static int
704sctp_handle_nat_colliding_state(struct sctp_tcb *stcb)
705{
706	/*
707	 * return 0 means we want you to proceed with the abort non-zero
708	 * means no abort processing
709	 */
710	struct sctpasochead *head;
711
712	if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
713		/* generate a new vtag and send init */
714		LIST_REMOVE(stcb, sctp_asocs);
715		stcb->asoc.my_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
716		head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))];
717		/*
718		 * put it in the bucket in the vtag hash of assoc's for the
719		 * system
720		 */
721		LIST_INSERT_HEAD(head, stcb, sctp_asocs);
722		sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
723		return (1);
724	}
725	if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) {
726		/*
727		 * treat like a case where the cookie expired i.e.: - dump
728		 * current cookie. - generate a new vtag. - resend init.
729		 */
730		/* generate a new vtag and send init */
731		LIST_REMOVE(stcb, sctp_asocs);
732		stcb->asoc.state &= ~SCTP_STATE_COOKIE_ECHOED;
733		stcb->asoc.state |= SCTP_STATE_COOKIE_WAIT;
734		sctp_stop_all_cookie_timers(stcb);
735		sctp_toss_old_cookies(stcb, &stcb->asoc);
736		stcb->asoc.my_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
737		head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))];
738		/*
739		 * put it in the bucket in the vtag hash of assoc's for the
740		 * system
741		 */
742		LIST_INSERT_HEAD(head, stcb, sctp_asocs);
743		sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
744		return (1);
745	}
746	return (0);
747}
748
749static int
750sctp_handle_nat_missing_state(struct sctp_tcb *stcb,
751    struct sctp_nets *net)
752{
753	/*
754	 * return 0 means we want you to proceed with the abort non-zero
755	 * means no abort processing
756	 */
757	if (stcb->asoc.peer_supports_auth == 0) {
758		SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_nat_missing_state: Peer does not support AUTH, cannot send an asconf\n");
759		return (0);
760	}
761	sctp_asconf_send_nat_state_update(stcb, net);
762	return (1);
763}
764
765
766static void
767sctp_handle_abort(struct sctp_abort_chunk *abort,
768    struct sctp_tcb *stcb, struct sctp_nets *net)
769{
770#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
771	struct socket *so;
772
773#endif
774	uint16_t len;
775	uint16_t error;
776
777	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: handling ABORT\n");
778	if (stcb == NULL)
779		return;
780
781	len = ntohs(abort->ch.chunk_length);
782	if (len > sizeof(struct sctp_chunkhdr)) {
783		/*
784		 * Need to check the cause codes for our two magic nat
785		 * aborts which don't kill the assoc necessarily.
786		 */
787		struct sctp_missing_nat_state *natc;
788
789		natc = (struct sctp_missing_nat_state *)(abort + 1);
790		error = ntohs(natc->cause);
791		if (error == SCTP_CAUSE_NAT_COLLIDING_STATE) {
792			SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state abort flags:%x\n",
793			    abort->ch.chunk_flags);
794			if (sctp_handle_nat_colliding_state(stcb)) {
795				return;
796			}
797		} else if (error == SCTP_CAUSE_NAT_MISSING_STATE) {
798			SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state abort flags:%x\n",
799			    abort->ch.chunk_flags);
800			if (sctp_handle_nat_missing_state(stcb, net)) {
801				return;
802			}
803		}
804	} else {
805		error = 0;
806	}
807	/* stop any receive timers */
808	sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_6);
809	/* notify user of the abort and clean up... */
810	sctp_abort_notification(stcb, 1, error, abort, SCTP_SO_NOT_LOCKED);
811	/* free the tcb */
812	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
813	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
814	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
815		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
816	}
817#ifdef SCTP_ASOCLOG_OF_TSNS
818	sctp_print_out_track_log(stcb);
819#endif
820#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
821	so = SCTP_INP_SO(stcb->sctp_ep);
822	atomic_add_int(&stcb->asoc.refcnt, 1);
823	SCTP_TCB_UNLOCK(stcb);
824	SCTP_SOCKET_LOCK(so, 1);
825	SCTP_TCB_LOCK(stcb);
826	atomic_subtract_int(&stcb->asoc.refcnt, 1);
827#endif
828	stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
829	(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
830	    SCTP_FROM_SCTP_INPUT + SCTP_LOC_6);
831#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
832	SCTP_SOCKET_UNLOCK(so, 1);
833#endif
834	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: finished\n");
835}
836
837static void
838sctp_start_net_timers(struct sctp_tcb *stcb)
839{
840	uint32_t cnt_hb_sent;
841	struct sctp_nets *net;
842
843	cnt_hb_sent = 0;
844	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
845		/*
846		 * For each network start: 1) A pmtu timer. 2) A HB timer 3)
847		 * If the dest in unconfirmed send a hb as well if under
848		 * max_hb_burst have been sent.
849		 */
850		sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, stcb->sctp_ep, stcb, net);
851		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
852		if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
853		    (cnt_hb_sent < SCTP_BASE_SYSCTL(sctp_hb_maxburst))) {
854			sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED);
855			cnt_hb_sent++;
856		}
857	}
858	if (cnt_hb_sent) {
859		sctp_chunk_output(stcb->sctp_ep, stcb,
860		    SCTP_OUTPUT_FROM_COOKIE_ACK,
861		    SCTP_SO_NOT_LOCKED);
862	}
863}
864
865
866static void
867sctp_handle_shutdown(struct sctp_shutdown_chunk *cp,
868    struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_flag)
869{
870	struct sctp_association *asoc;
871	int some_on_streamwheel;
872
873#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
874	struct socket *so;
875
876#endif
877
878	SCTPDBG(SCTP_DEBUG_INPUT2,
879	    "sctp_handle_shutdown: handling SHUTDOWN\n");
880	if (stcb == NULL)
881		return;
882	asoc = &stcb->asoc;
883	if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
884	    (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
885		return;
886	}
887	if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_shutdown_chunk)) {
888		/* Shutdown NOT the expected size */
889		return;
890	} else {
891		sctp_update_acked(stcb, cp, abort_flag);
892		if (*abort_flag) {
893			return;
894		}
895	}
896	if (asoc->control_pdapi) {
897		/*
898		 * With a normal shutdown we assume the end of last record.
899		 */
900		SCTP_INP_READ_LOCK(stcb->sctp_ep);
901		asoc->control_pdapi->end_added = 1;
902		asoc->control_pdapi->pdapi_aborted = 1;
903		asoc->control_pdapi = NULL;
904		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
905#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
906		so = SCTP_INP_SO(stcb->sctp_ep);
907		atomic_add_int(&stcb->asoc.refcnt, 1);
908		SCTP_TCB_UNLOCK(stcb);
909		SCTP_SOCKET_LOCK(so, 1);
910		SCTP_TCB_LOCK(stcb);
911		atomic_subtract_int(&stcb->asoc.refcnt, 1);
912		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
913			/* assoc was freed while we were unlocked */
914			SCTP_SOCKET_UNLOCK(so, 1);
915			return;
916		}
917#endif
918		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
919#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
920		SCTP_SOCKET_UNLOCK(so, 1);
921#endif
922	}
923	/* goto SHUTDOWN_RECEIVED state to block new requests */
924	if (stcb->sctp_socket) {
925		if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
926		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) &&
927		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) {
928			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_RECEIVED);
929			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
930			/*
931			 * notify upper layer that peer has initiated a
932			 * shutdown
933			 */
934			sctp_ulp_notify(SCTP_NOTIFY_PEER_SHUTDOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
935
936			/* reset time */
937			(void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
938		}
939	}
940	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
941		/*
942		 * stop the shutdown timer, since we WILL move to
943		 * SHUTDOWN-ACK-SENT.
944		 */
945		sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_8);
946	}
947	/* Now is there unsent data on a stream somewhere? */
948	some_on_streamwheel = sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED);
949
950	if (!TAILQ_EMPTY(&asoc->send_queue) ||
951	    !TAILQ_EMPTY(&asoc->sent_queue) ||
952	    some_on_streamwheel) {
953		/* By returning we will push more data out */
954		return;
955	} else {
956		/* no outstanding data to send, so move on... */
957		/* send SHUTDOWN-ACK */
958		/* move to SHUTDOWN-ACK-SENT state */
959		if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
960		    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
961			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
962		}
963		SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
964		SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
965		sctp_stop_timers_for_shutdown(stcb);
966		sctp_send_shutdown_ack(stcb, net);
967		sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep,
968		    stcb, net);
969	}
970}
971
972static void
973sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp SCTP_UNUSED,
974    struct sctp_tcb *stcb,
975    struct sctp_nets *net)
976{
977	struct sctp_association *asoc;
978
979#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
980	struct socket *so;
981
982	so = SCTP_INP_SO(stcb->sctp_ep);
983#endif
984	SCTPDBG(SCTP_DEBUG_INPUT2,
985	    "sctp_handle_shutdown_ack: handling SHUTDOWN ACK\n");
986	if (stcb == NULL)
987		return;
988
989	asoc = &stcb->asoc;
990	/* process according to association state */
991	if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
992	    (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
993		/* unexpected SHUTDOWN-ACK... do OOTB handling... */
994		sctp_send_shutdown_complete(stcb, net, 1);
995		SCTP_TCB_UNLOCK(stcb);
996		return;
997	}
998	if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
999	    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
1000		/* unexpected SHUTDOWN-ACK... so ignore... */
1001		SCTP_TCB_UNLOCK(stcb);
1002		return;
1003	}
1004	if (asoc->control_pdapi) {
1005		/*
1006		 * With a normal shutdown we assume the end of last record.
1007		 */
1008		SCTP_INP_READ_LOCK(stcb->sctp_ep);
1009		asoc->control_pdapi->end_added = 1;
1010		asoc->control_pdapi->pdapi_aborted = 1;
1011		asoc->control_pdapi = NULL;
1012		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1013#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1014		atomic_add_int(&stcb->asoc.refcnt, 1);
1015		SCTP_TCB_UNLOCK(stcb);
1016		SCTP_SOCKET_LOCK(so, 1);
1017		SCTP_TCB_LOCK(stcb);
1018		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1019		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1020			/* assoc was freed while we were unlocked */
1021			SCTP_SOCKET_UNLOCK(so, 1);
1022			return;
1023		}
1024#endif
1025		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1026#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1027		SCTP_SOCKET_UNLOCK(so, 1);
1028#endif
1029	}
1030#ifdef INVARIANTS
1031	if (!TAILQ_EMPTY(&asoc->send_queue) ||
1032	    !TAILQ_EMPTY(&asoc->sent_queue) ||
1033	    !stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
1034		panic("Queues are not empty when handling SHUTDOWN-ACK");
1035	}
1036#endif
1037	/* stop the timer */
1038	sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_9);
1039	/* send SHUTDOWN-COMPLETE */
1040	sctp_send_shutdown_complete(stcb, net, 0);
1041	/* notify upper layer protocol */
1042	if (stcb->sctp_socket) {
1043		if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1044		    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
1045			stcb->sctp_socket->so_snd.sb_cc = 0;
1046		}
1047		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
1048	}
1049	SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
1050	/* free the TCB but first save off the ep */
1051#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1052	atomic_add_int(&stcb->asoc.refcnt, 1);
1053	SCTP_TCB_UNLOCK(stcb);
1054	SCTP_SOCKET_LOCK(so, 1);
1055	SCTP_TCB_LOCK(stcb);
1056	atomic_subtract_int(&stcb->asoc.refcnt, 1);
1057#endif
1058	(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
1059	    SCTP_FROM_SCTP_INPUT + SCTP_LOC_10);
1060#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1061	SCTP_SOCKET_UNLOCK(so, 1);
1062#endif
1063}
1064
1065/*
1066 * Skip past the param header and then we will find the chunk that caused the
1067 * problem. There are two possiblities ASCONF or FWD-TSN other than that and
1068 * our peer must be broken.
1069 */
1070static void
1071sctp_process_unrecog_chunk(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr,
1072    struct sctp_nets *net)
1073{
1074	struct sctp_chunkhdr *chk;
1075
1076	chk = (struct sctp_chunkhdr *)((caddr_t)phdr + sizeof(*phdr));
1077	switch (chk->chunk_type) {
1078	case SCTP_ASCONF_ACK:
1079	case SCTP_ASCONF:
1080		sctp_asconf_cleanup(stcb, net);
1081		break;
1082	case SCTP_FORWARD_CUM_TSN:
1083		stcb->asoc.peer_supports_prsctp = 0;
1084		break;
1085	default:
1086		SCTPDBG(SCTP_DEBUG_INPUT2,
1087		    "Peer does not support chunk type %d(%x)??\n",
1088		    chk->chunk_type, (uint32_t) chk->chunk_type);
1089		break;
1090	}
1091}
1092
1093/*
1094 * Skip past the param header and then we will find the param that caused the
1095 * problem.  There are a number of param's in a ASCONF OR the prsctp param
1096 * these will turn of specific features.
1097 */
1098static void
1099sctp_process_unrecog_param(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr)
1100{
1101	struct sctp_paramhdr *pbad;
1102
1103	pbad = phdr + 1;
1104	switch (ntohs(pbad->param_type)) {
1105		/* pr-sctp draft */
1106	case SCTP_PRSCTP_SUPPORTED:
1107		stcb->asoc.peer_supports_prsctp = 0;
1108		break;
1109	case SCTP_SUPPORTED_CHUNK_EXT:
1110		break;
1111		/* draft-ietf-tsvwg-addip-sctp */
1112	case SCTP_HAS_NAT_SUPPORT:
1113		stcb->asoc.peer_supports_nat = 0;
1114		break;
1115	case SCTP_ADD_IP_ADDRESS:
1116	case SCTP_DEL_IP_ADDRESS:
1117	case SCTP_SET_PRIM_ADDR:
1118		stcb->asoc.peer_supports_asconf = 0;
1119		break;
1120	case SCTP_SUCCESS_REPORT:
1121	case SCTP_ERROR_CAUSE_IND:
1122		SCTPDBG(SCTP_DEBUG_INPUT2, "Huh, the peer does not support success? or error cause?\n");
1123		SCTPDBG(SCTP_DEBUG_INPUT2,
1124		    "Turning off ASCONF to this strange peer\n");
1125		stcb->asoc.peer_supports_asconf = 0;
1126		break;
1127	default:
1128		SCTPDBG(SCTP_DEBUG_INPUT2,
1129		    "Peer does not support param type %d(%x)??\n",
1130		    pbad->param_type, (uint32_t) pbad->param_type);
1131		break;
1132	}
1133}
1134
1135static int
1136sctp_handle_error(struct sctp_chunkhdr *ch,
1137    struct sctp_tcb *stcb, struct sctp_nets *net)
1138{
1139	int chklen;
1140	struct sctp_paramhdr *phdr;
1141	uint16_t error, error_type;
1142	uint16_t error_len;
1143	struct sctp_association *asoc;
1144	int adjust;
1145
1146#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1147	struct socket *so;
1148
1149#endif
1150
1151	/* parse through all of the errors and process */
1152	asoc = &stcb->asoc;
1153	phdr = (struct sctp_paramhdr *)((caddr_t)ch +
1154	    sizeof(struct sctp_chunkhdr));
1155	chklen = ntohs(ch->chunk_length) - sizeof(struct sctp_chunkhdr);
1156	error = 0;
1157	while ((size_t)chklen >= sizeof(struct sctp_paramhdr)) {
1158		/* Process an Error Cause */
1159		error_type = ntohs(phdr->param_type);
1160		error_len = ntohs(phdr->param_length);
1161		if ((error_len > chklen) || (error_len == 0)) {
1162			/* invalid param length for this param */
1163			SCTPDBG(SCTP_DEBUG_INPUT1, "Bogus length in error param- chunk left:%d errorlen:%d\n",
1164			    chklen, error_len);
1165			return (0);
1166		}
1167		if (error == 0) {
1168			/* report the first error cause */
1169			error = error_type;
1170		}
1171		switch (error_type) {
1172		case SCTP_CAUSE_INVALID_STREAM:
1173		case SCTP_CAUSE_MISSING_PARAM:
1174		case SCTP_CAUSE_INVALID_PARAM:
1175		case SCTP_CAUSE_NO_USER_DATA:
1176			SCTPDBG(SCTP_DEBUG_INPUT1, "Software error we got a %d back? We have a bug :/ (or do they?)\n",
1177			    error_type);
1178			break;
1179		case SCTP_CAUSE_NAT_COLLIDING_STATE:
1180			SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state abort flags:%x\n",
1181			    ch->chunk_flags);
1182			if (sctp_handle_nat_colliding_state(stcb)) {
1183				return (0);
1184			}
1185			break;
1186		case SCTP_CAUSE_NAT_MISSING_STATE:
1187			SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state abort flags:%x\n",
1188			    ch->chunk_flags);
1189			if (sctp_handle_nat_missing_state(stcb, net)) {
1190				return (0);
1191			}
1192			break;
1193		case SCTP_CAUSE_STALE_COOKIE:
1194			/*
1195			 * We only act if we have echoed a cookie and are
1196			 * waiting.
1197			 */
1198			if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) {
1199				int *p;
1200
1201				p = (int *)((caddr_t)phdr + sizeof(*phdr));
1202				/* Save the time doubled */
1203				asoc->cookie_preserve_req = ntohl(*p) << 1;
1204				asoc->stale_cookie_count++;
1205				if (asoc->stale_cookie_count >
1206				    asoc->max_init_times) {
1207					sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
1208					/* now free the asoc */
1209#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1210					so = SCTP_INP_SO(stcb->sctp_ep);
1211					atomic_add_int(&stcb->asoc.refcnt, 1);
1212					SCTP_TCB_UNLOCK(stcb);
1213					SCTP_SOCKET_LOCK(so, 1);
1214					SCTP_TCB_LOCK(stcb);
1215					atomic_subtract_int(&stcb->asoc.refcnt, 1);
1216#endif
1217					(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
1218					    SCTP_FROM_SCTP_INPUT + SCTP_LOC_11);
1219#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1220					SCTP_SOCKET_UNLOCK(so, 1);
1221#endif
1222					return (-1);
1223				}
1224				/* blast back to INIT state */
1225				sctp_toss_old_cookies(stcb, &stcb->asoc);
1226				asoc->state &= ~SCTP_STATE_COOKIE_ECHOED;
1227				asoc->state |= SCTP_STATE_COOKIE_WAIT;
1228				sctp_stop_all_cookie_timers(stcb);
1229				sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1230			}
1231			break;
1232		case SCTP_CAUSE_UNRESOLVABLE_ADDR:
1233			/*
1234			 * Nothing we can do here, we don't do hostname
1235			 * addresses so if the peer does not like my IPv6
1236			 * (or IPv4 for that matter) it does not matter. If
1237			 * they don't support that type of address, they can
1238			 * NOT possibly get that packet type... i.e. with no
1239			 * IPv6 you can't recieve a IPv6 packet. so we can
1240			 * safely ignore this one. If we ever added support
1241			 * for HOSTNAME Addresses, then we would need to do
1242			 * something here.
1243			 */
1244			break;
1245		case SCTP_CAUSE_UNRECOG_CHUNK:
1246			sctp_process_unrecog_chunk(stcb, phdr, net);
1247			break;
1248		case SCTP_CAUSE_UNRECOG_PARAM:
1249			sctp_process_unrecog_param(stcb, phdr);
1250			break;
1251		case SCTP_CAUSE_COOKIE_IN_SHUTDOWN:
1252			/*
1253			 * We ignore this since the timer will drive out a
1254			 * new cookie anyway and there timer will drive us
1255			 * to send a SHUTDOWN_COMPLETE. We can't send one
1256			 * here since we don't have their tag.
1257			 */
1258			break;
1259		case SCTP_CAUSE_DELETING_LAST_ADDR:
1260		case SCTP_CAUSE_RESOURCE_SHORTAGE:
1261		case SCTP_CAUSE_DELETING_SRC_ADDR:
1262			/*
1263			 * We should NOT get these here, but in a
1264			 * ASCONF-ACK.
1265			 */
1266			SCTPDBG(SCTP_DEBUG_INPUT2, "Peer sends ASCONF errors in a Operational Error?<%d>?\n",
1267			    error_type);
1268			break;
1269		case SCTP_CAUSE_OUT_OF_RESC:
1270			/*
1271			 * And what, pray tell do we do with the fact that
1272			 * the peer is out of resources? Not really sure we
1273			 * could do anything but abort. I suspect this
1274			 * should have came WITH an abort instead of in a
1275			 * OP-ERROR.
1276			 */
1277			break;
1278		default:
1279			SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_handle_error: unknown error type = 0x%xh\n",
1280			    error_type);
1281			break;
1282		}
1283		adjust = SCTP_SIZE32(error_len);
1284		chklen -= adjust;
1285		phdr = (struct sctp_paramhdr *)((caddr_t)phdr + adjust);
1286	}
1287	sctp_ulp_notify(SCTP_NOTIFY_REMOTE_ERROR, stcb, error, ch, SCTP_SO_NOT_LOCKED);
1288	return (0);
1289}
1290
1291static int
1292sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset,
1293    struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh,
1294    struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
1295    struct sctp_nets *net, int *abort_no_unlock,
1296    uint8_t use_mflowid, uint32_t mflowid,
1297    uint32_t vrf_id)
1298{
1299	struct sctp_init_ack *init_ack;
1300	struct mbuf *op_err;
1301
1302	SCTPDBG(SCTP_DEBUG_INPUT2,
1303	    "sctp_handle_init_ack: handling INIT-ACK\n");
1304
1305	if (stcb == NULL) {
1306		SCTPDBG(SCTP_DEBUG_INPUT2,
1307		    "sctp_handle_init_ack: TCB is null\n");
1308		return (-1);
1309	}
1310	if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_ack_chunk)) {
1311		/* Invalid length */
1312		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
1313		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
1314		    src, dst, sh, op_err,
1315		    use_mflowid, mflowid,
1316		    vrf_id, net->port);
1317		*abort_no_unlock = 1;
1318		return (-1);
1319	}
1320	init_ack = &cp->init;
1321	/* validate parameters */
1322	if (init_ack->initiate_tag == 0) {
1323		/* protocol error... send an abort */
1324		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
1325		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
1326		    src, dst, sh, op_err,
1327		    use_mflowid, mflowid,
1328		    vrf_id, net->port);
1329		*abort_no_unlock = 1;
1330		return (-1);
1331	}
1332	if (ntohl(init_ack->a_rwnd) < SCTP_MIN_RWND) {
1333		/* protocol error... send an abort */
1334		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
1335		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
1336		    src, dst, sh, op_err,
1337		    use_mflowid, mflowid,
1338		    vrf_id, net->port);
1339		*abort_no_unlock = 1;
1340		return (-1);
1341	}
1342	if (init_ack->num_inbound_streams == 0) {
1343		/* protocol error... send an abort */
1344		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
1345		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
1346		    src, dst, sh, op_err,
1347		    use_mflowid, mflowid,
1348		    vrf_id, net->port);
1349		*abort_no_unlock = 1;
1350		return (-1);
1351	}
1352	if (init_ack->num_outbound_streams == 0) {
1353		/* protocol error... send an abort */
1354		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
1355		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
1356		    src, dst, sh, op_err,
1357		    use_mflowid, mflowid,
1358		    vrf_id, net->port);
1359		*abort_no_unlock = 1;
1360		return (-1);
1361	}
1362	/* process according to association state... */
1363	switch (stcb->asoc.state & SCTP_STATE_MASK) {
1364	case SCTP_STATE_COOKIE_WAIT:
1365		/* this is the expected state for this chunk */
1366		/* process the INIT-ACK parameters */
1367		if (stcb->asoc.primary_destination->dest_state &
1368		    SCTP_ADDR_UNCONFIRMED) {
1369			/*
1370			 * The primary is where we sent the INIT, we can
1371			 * always consider it confirmed when the INIT-ACK is
1372			 * returned. Do this before we load addresses
1373			 * though.
1374			 */
1375			stcb->asoc.primary_destination->dest_state &=
1376			    ~SCTP_ADDR_UNCONFIRMED;
1377			sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
1378			    stcb, 0, (void *)stcb->asoc.primary_destination, SCTP_SO_NOT_LOCKED);
1379		}
1380		if (sctp_process_init_ack(m, iphlen, offset, src, dst, sh, cp, stcb,
1381		    net, abort_no_unlock,
1382		    use_mflowid, mflowid,
1383		    vrf_id) < 0) {
1384			/* error in parsing parameters */
1385			return (-1);
1386		}
1387		/* update our state */
1388		SCTPDBG(SCTP_DEBUG_INPUT2, "moving to COOKIE-ECHOED state\n");
1389		SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_ECHOED);
1390
1391		/* reset the RTO calc */
1392		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
1393			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
1394			    stcb->asoc.overall_error_count,
1395			    0,
1396			    SCTP_FROM_SCTP_INPUT,
1397			    __LINE__);
1398		}
1399		stcb->asoc.overall_error_count = 0;
1400		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
1401		/*
1402		 * collapse the init timer back in case of a exponential
1403		 * backoff
1404		 */
1405		sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, stcb->sctp_ep,
1406		    stcb, net);
1407		/*
1408		 * the send at the end of the inbound data processing will
1409		 * cause the cookie to be sent
1410		 */
1411		break;
1412	case SCTP_STATE_SHUTDOWN_SENT:
1413		/* incorrect state... discard */
1414		break;
1415	case SCTP_STATE_COOKIE_ECHOED:
1416		/* incorrect state... discard */
1417		break;
1418	case SCTP_STATE_OPEN:
1419		/* incorrect state... discard */
1420		break;
1421	case SCTP_STATE_EMPTY:
1422	case SCTP_STATE_INUSE:
1423	default:
1424		/* incorrect state... discard */
1425		return (-1);
1426		break;
1427	}
1428	SCTPDBG(SCTP_DEBUG_INPUT1, "Leaving handle-init-ack end\n");
1429	return (0);
1430}
1431
1432static struct sctp_tcb *
1433sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
1434    struct sockaddr *src, struct sockaddr *dst,
1435    struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
1436    struct sctp_inpcb *inp, struct sctp_nets **netp,
1437    struct sockaddr *init_src, int *notification,
1438    int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
1439    uint8_t use_mflowid, uint32_t mflowid,
1440    uint32_t vrf_id, uint16_t port);
1441
1442
1443/*
1444 * handle a state cookie for an existing association m: input packet mbuf
1445 * chain-- assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a
1446 * "split" mbuf and the cookie signature does not exist offset: offset into
1447 * mbuf to the cookie-echo chunk
1448 */
1449static struct sctp_tcb *
1450sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
1451    struct sockaddr *src, struct sockaddr *dst,
1452    struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
1453    struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets **netp,
1454    struct sockaddr *init_src, int *notification,
1455    int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
1456    uint8_t use_mflowid, uint32_t mflowid,
1457    uint32_t vrf_id, uint16_t port)
1458{
1459	struct sctp_association *asoc;
1460	struct sctp_init_chunk *init_cp, init_buf;
1461	struct sctp_init_ack_chunk *initack_cp, initack_buf;
1462	struct sctp_nets *net;
1463	struct mbuf *op_err;
1464	int init_offset, initack_offset, i;
1465	int retval;
1466	int spec_flag = 0;
1467	uint32_t how_indx;
1468
1469	net = *netp;
1470	/* I know that the TCB is non-NULL from the caller */
1471	asoc = &stcb->asoc;
1472	for (how_indx = 0; how_indx < sizeof(asoc->cookie_how); how_indx++) {
1473		if (asoc->cookie_how[how_indx] == 0)
1474			break;
1475	}
1476	if (how_indx < sizeof(asoc->cookie_how)) {
1477		asoc->cookie_how[how_indx] = 1;
1478	}
1479	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
1480		/* SHUTDOWN came in after sending INIT-ACK */
1481		sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination);
1482		op_err = sctp_generate_cause(SCTP_CAUSE_COOKIE_IN_SHUTDOWN, "");
1483		sctp_send_operr_to(src, dst, sh, cookie->peers_vtag, op_err,
1484		    use_mflowid, mflowid,
1485		    vrf_id, net->port);
1486		if (how_indx < sizeof(asoc->cookie_how))
1487			asoc->cookie_how[how_indx] = 2;
1488		return (NULL);
1489	}
1490	/*
1491	 * find and validate the INIT chunk in the cookie (peer's info) the
1492	 * INIT should start after the cookie-echo header struct (chunk
1493	 * header, state cookie header struct)
1494	 */
1495	init_offset = offset += sizeof(struct sctp_cookie_echo_chunk);
1496
1497	init_cp = (struct sctp_init_chunk *)
1498	    sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
1499	    (uint8_t *) & init_buf);
1500	if (init_cp == NULL) {
1501		/* could not pull a INIT chunk in cookie */
1502		return (NULL);
1503	}
1504	if (init_cp->ch.chunk_type != SCTP_INITIATION) {
1505		return (NULL);
1506	}
1507	/*
1508	 * find and validate the INIT-ACK chunk in the cookie (my info) the
1509	 * INIT-ACK follows the INIT chunk
1510	 */
1511	initack_offset = init_offset + SCTP_SIZE32(ntohs(init_cp->ch.chunk_length));
1512	initack_cp = (struct sctp_init_ack_chunk *)
1513	    sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
1514	    (uint8_t *) & initack_buf);
1515	if (initack_cp == NULL) {
1516		/* could not pull INIT-ACK chunk in cookie */
1517		return (NULL);
1518	}
1519	if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
1520		return (NULL);
1521	}
1522	if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1523	    (ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag)) {
1524		/*
1525		 * case D in Section 5.2.4 Table 2: MMAA process accordingly
1526		 * to get into the OPEN state
1527		 */
1528		if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
1529			/*-
1530			 * Opps, this means that we somehow generated two vtag's
1531			 * the same. I.e. we did:
1532			 *  Us               Peer
1533			 *   <---INIT(tag=a)------
1534			 *   ----INIT-ACK(tag=t)-->
1535			 *   ----INIT(tag=t)------> *1
1536			 *   <---INIT-ACK(tag=a)---
1537                         *   <----CE(tag=t)------------- *2
1538			 *
1539			 * At point *1 we should be generating a different
1540			 * tag t'. Which means we would throw away the CE and send
1541			 * ours instead. Basically this is case C (throw away side).
1542			 */
1543			if (how_indx < sizeof(asoc->cookie_how))
1544				asoc->cookie_how[how_indx] = 17;
1545			return (NULL);
1546
1547		}
1548		switch (SCTP_GET_STATE(asoc)) {
1549		case SCTP_STATE_COOKIE_WAIT:
1550		case SCTP_STATE_COOKIE_ECHOED:
1551			/*
1552			 * INIT was sent but got a COOKIE_ECHO with the
1553			 * correct tags... just accept it...but we must
1554			 * process the init so that we can make sure we have
1555			 * the right seq no's.
1556			 */
1557			/* First we must process the INIT !! */
1558			retval = sctp_process_init(init_cp, stcb);
1559			if (retval < 0) {
1560				if (how_indx < sizeof(asoc->cookie_how))
1561					asoc->cookie_how[how_indx] = 3;
1562				return (NULL);
1563			}
1564			/* we have already processed the INIT so no problem */
1565			sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb,
1566			    net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_12);
1567			sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_13);
1568			/* update current state */
1569			if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)
1570				SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1571			else
1572				SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1573
1574			SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
1575			if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1576				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1577				    stcb->sctp_ep, stcb, asoc->primary_destination);
1578			}
1579			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1580			sctp_stop_all_cookie_timers(stcb);
1581			if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1582			    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1583			    (inp->sctp_socket->so_qlimit == 0)
1584			    ) {
1585#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1586				struct socket *so;
1587
1588#endif
1589				/*
1590				 * Here is where collision would go if we
1591				 * did a connect() and instead got a
1592				 * init/init-ack/cookie done before the
1593				 * init-ack came back..
1594				 */
1595				stcb->sctp_ep->sctp_flags |=
1596				    SCTP_PCB_FLAGS_CONNECTED;
1597#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1598				so = SCTP_INP_SO(stcb->sctp_ep);
1599				atomic_add_int(&stcb->asoc.refcnt, 1);
1600				SCTP_TCB_UNLOCK(stcb);
1601				SCTP_SOCKET_LOCK(so, 1);
1602				SCTP_TCB_LOCK(stcb);
1603				atomic_add_int(&stcb->asoc.refcnt, -1);
1604				if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1605					SCTP_SOCKET_UNLOCK(so, 1);
1606					return (NULL);
1607				}
1608#endif
1609				soisconnected(stcb->sctp_socket);
1610#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1611				SCTP_SOCKET_UNLOCK(so, 1);
1612#endif
1613			}
1614			/* notify upper layer */
1615			*notification = SCTP_NOTIFY_ASSOC_UP;
1616			/*
1617			 * since we did not send a HB make sure we don't
1618			 * double things
1619			 */
1620			net->hb_responded = 1;
1621			net->RTO = sctp_calculate_rto(stcb, asoc, net,
1622			    &cookie->time_entered,
1623			    sctp_align_unsafe_makecopy,
1624			    SCTP_RTT_FROM_NON_DATA);
1625
1626			if (stcb->asoc.sctp_autoclose_ticks &&
1627			    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))) {
1628				sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
1629				    inp, stcb, NULL);
1630			}
1631			break;
1632		default:
1633			/*
1634			 * we're in the OPEN state (or beyond), so peer must
1635			 * have simply lost the COOKIE-ACK
1636			 */
1637			break;
1638		}		/* end switch */
1639		sctp_stop_all_cookie_timers(stcb);
1640		/*
1641		 * We ignore the return code here.. not sure if we should
1642		 * somehow abort.. but we do have an existing asoc. This
1643		 * really should not fail.
1644		 */
1645		if (sctp_load_addresses_from_init(stcb, m,
1646		    init_offset + sizeof(struct sctp_init_chunk),
1647		    initack_offset, src, dst, init_src)) {
1648			if (how_indx < sizeof(asoc->cookie_how))
1649				asoc->cookie_how[how_indx] = 4;
1650			return (NULL);
1651		}
1652		/* respond with a COOKIE-ACK */
1653		sctp_toss_old_cookies(stcb, asoc);
1654		sctp_send_cookie_ack(stcb);
1655		if (how_indx < sizeof(asoc->cookie_how))
1656			asoc->cookie_how[how_indx] = 5;
1657		return (stcb);
1658	}
1659	if (ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
1660	    ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag &&
1661	    cookie->tie_tag_my_vtag == 0 &&
1662	    cookie->tie_tag_peer_vtag == 0) {
1663		/*
1664		 * case C in Section 5.2.4 Table 2: XMOO silently discard
1665		 */
1666		if (how_indx < sizeof(asoc->cookie_how))
1667			asoc->cookie_how[how_indx] = 6;
1668		return (NULL);
1669	}
1670	/*
1671	 * If nat support, and the below and stcb is established, send back
1672	 * a ABORT(colliding state) if we are established.
1673	 */
1674	if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) &&
1675	    (asoc->peer_supports_nat) &&
1676	    ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1677	    ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) ||
1678	    (asoc->peer_vtag == 0)))) {
1679		/*
1680		 * Special case - Peer's support nat. We may have two init's
1681		 * that we gave out the same tag on since one was not
1682		 * established.. i.e. we get INIT from host-1 behind the nat
1683		 * and we respond tag-a, we get a INIT from host-2 behind
1684		 * the nat and we get tag-a again. Then we bring up host-1
1685		 * (or 2's) assoc, Then comes the cookie from hsot-2 (or 1).
1686		 * Now we have colliding state. We must send an abort here
1687		 * with colliding state indication.
1688		 */
1689		op_err = sctp_generate_cause(SCTP_CAUSE_NAT_COLLIDING_STATE, "");
1690		sctp_send_abort(m, iphlen, src, dst, sh, 0, op_err,
1691		    use_mflowid, mflowid,
1692		    vrf_id, port);
1693		return (NULL);
1694	}
1695	if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1696	    ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) ||
1697	    (asoc->peer_vtag == 0))) {
1698		/*
1699		 * case B in Section 5.2.4 Table 2: MXAA or MOAA my info
1700		 * should be ok, re-accept peer info
1701		 */
1702		if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
1703			/*
1704			 * Extension of case C. If we hit this, then the
1705			 * random number generator returned the same vtag
1706			 * when we first sent our INIT-ACK and when we later
1707			 * sent our INIT. The side with the seq numbers that
1708			 * are different will be the one that normnally
1709			 * would have hit case C. This in effect "extends"
1710			 * our vtags in this collision case to be 64 bits.
1711			 * The same collision could occur aka you get both
1712			 * vtag and seq number the same twice in a row.. but
1713			 * is much less likely. If it did happen then we
1714			 * would proceed through and bring up the assoc.. we
1715			 * may end up with the wrong stream setup however..
1716			 * which would be bad.. but there is no way to
1717			 * tell.. until we send on a stream that does not
1718			 * exist :-)
1719			 */
1720			if (how_indx < sizeof(asoc->cookie_how))
1721				asoc->cookie_how[how_indx] = 7;
1722
1723			return (NULL);
1724		}
1725		if (how_indx < sizeof(asoc->cookie_how))
1726			asoc->cookie_how[how_indx] = 8;
1727		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_14);
1728		sctp_stop_all_cookie_timers(stcb);
1729		/*
1730		 * since we did not send a HB make sure we don't double
1731		 * things
1732		 */
1733		net->hb_responded = 1;
1734		if (stcb->asoc.sctp_autoclose_ticks &&
1735		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
1736			sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb,
1737			    NULL);
1738		}
1739		asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
1740		asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
1741
1742		if (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) {
1743			/*
1744			 * Ok the peer probably discarded our data (if we
1745			 * echoed a cookie+data). So anything on the
1746			 * sent_queue should be marked for retransmit, we
1747			 * may not get something to kick us so it COULD
1748			 * still take a timeout to move these.. but it can't
1749			 * hurt to mark them.
1750			 */
1751			struct sctp_tmit_chunk *chk;
1752
1753			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
1754				if (chk->sent < SCTP_DATAGRAM_RESEND) {
1755					chk->sent = SCTP_DATAGRAM_RESEND;
1756					sctp_flight_size_decrease(chk);
1757					sctp_total_flight_decrease(stcb, chk);
1758					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1759					spec_flag++;
1760				}
1761			}
1762
1763		}
1764		/* process the INIT info (peer's info) */
1765		retval = sctp_process_init(init_cp, stcb);
1766		if (retval < 0) {
1767			if (how_indx < sizeof(asoc->cookie_how))
1768				asoc->cookie_how[how_indx] = 9;
1769			return (NULL);
1770		}
1771		if (sctp_load_addresses_from_init(stcb, m,
1772		    init_offset + sizeof(struct sctp_init_chunk),
1773		    initack_offset, src, dst, init_src)) {
1774			if (how_indx < sizeof(asoc->cookie_how))
1775				asoc->cookie_how[how_indx] = 10;
1776			return (NULL);
1777		}
1778		if ((asoc->state & SCTP_STATE_COOKIE_WAIT) ||
1779		    (asoc->state & SCTP_STATE_COOKIE_ECHOED)) {
1780			*notification = SCTP_NOTIFY_ASSOC_UP;
1781
1782			if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1783			    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1784			    (inp->sctp_socket->so_qlimit == 0)) {
1785#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1786				struct socket *so;
1787
1788#endif
1789				stcb->sctp_ep->sctp_flags |=
1790				    SCTP_PCB_FLAGS_CONNECTED;
1791#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1792				so = SCTP_INP_SO(stcb->sctp_ep);
1793				atomic_add_int(&stcb->asoc.refcnt, 1);
1794				SCTP_TCB_UNLOCK(stcb);
1795				SCTP_SOCKET_LOCK(so, 1);
1796				SCTP_TCB_LOCK(stcb);
1797				atomic_add_int(&stcb->asoc.refcnt, -1);
1798				if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1799					SCTP_SOCKET_UNLOCK(so, 1);
1800					return (NULL);
1801				}
1802#endif
1803				soisconnected(stcb->sctp_socket);
1804#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1805				SCTP_SOCKET_UNLOCK(so, 1);
1806#endif
1807			}
1808			if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)
1809				SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1810			else
1811				SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1812			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1813		} else if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
1814			SCTP_STAT_INCR_COUNTER32(sctps_restartestab);
1815		} else {
1816			SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1817		}
1818		SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
1819		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1820			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1821			    stcb->sctp_ep, stcb, asoc->primary_destination);
1822		}
1823		sctp_stop_all_cookie_timers(stcb);
1824		sctp_toss_old_cookies(stcb, asoc);
1825		sctp_send_cookie_ack(stcb);
1826		if (spec_flag) {
1827			/*
1828			 * only if we have retrans set do we do this. What
1829			 * this call does is get only the COOKIE-ACK out and
1830			 * then when we return the normal call to
1831			 * sctp_chunk_output will get the retrans out behind
1832			 * this.
1833			 */
1834			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_COOKIE_ACK, SCTP_SO_NOT_LOCKED);
1835		}
1836		if (how_indx < sizeof(asoc->cookie_how))
1837			asoc->cookie_how[how_indx] = 11;
1838
1839		return (stcb);
1840	}
1841	if ((ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
1842	    ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) &&
1843	    cookie->tie_tag_my_vtag == asoc->my_vtag_nonce &&
1844	    cookie->tie_tag_peer_vtag == asoc->peer_vtag_nonce &&
1845	    cookie->tie_tag_peer_vtag != 0) {
1846		struct sctpasochead *head;
1847
1848#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1849		struct socket *so;
1850
1851#endif
1852
1853		if (asoc->peer_supports_nat) {
1854			/*
1855			 * This is a gross gross hack. Just call the
1856			 * cookie_new code since we are allowing a duplicate
1857			 * association. I hope this works...
1858			 */
1859			return (sctp_process_cookie_new(m, iphlen, offset, src, dst,
1860			    sh, cookie, cookie_len,
1861			    inp, netp, init_src, notification,
1862			    auth_skipped, auth_offset, auth_len,
1863			    use_mflowid, mflowid,
1864			    vrf_id, port));
1865		}
1866		/*
1867		 * case A in Section 5.2.4 Table 2: XXMM (peer restarted)
1868		 */
1869		/* temp code */
1870		if (how_indx < sizeof(asoc->cookie_how))
1871			asoc->cookie_how[how_indx] = 12;
1872		sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_15);
1873		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16);
1874
1875		/* notify upper layer */
1876		*notification = SCTP_NOTIFY_ASSOC_RESTART;
1877		atomic_add_int(&stcb->asoc.refcnt, 1);
1878		if ((SCTP_GET_STATE(asoc) != SCTP_STATE_OPEN) &&
1879		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
1880		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) {
1881			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1882		}
1883		if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
1884			SCTP_STAT_INCR_GAUGE32(sctps_restartestab);
1885		} else if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) {
1886			SCTP_STAT_INCR_GAUGE32(sctps_collisionestab);
1887		}
1888		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1889			SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
1890			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1891			    stcb->sctp_ep, stcb, asoc->primary_destination);
1892
1893		} else if (!(asoc->state & SCTP_STATE_SHUTDOWN_SENT)) {
1894			/* move to OPEN state, if not in SHUTDOWN_SENT */
1895			SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
1896		}
1897		asoc->pre_open_streams =
1898		    ntohs(initack_cp->init.num_outbound_streams);
1899		asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
1900		asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
1901		asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1902
1903		asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
1904
1905		asoc->str_reset_seq_in = asoc->init_seq_number;
1906
1907		asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1908		if (asoc->mapping_array) {
1909			memset(asoc->mapping_array, 0,
1910			    asoc->mapping_array_size);
1911		}
1912		if (asoc->nr_mapping_array) {
1913			memset(asoc->nr_mapping_array, 0,
1914			    asoc->mapping_array_size);
1915		}
1916		SCTP_TCB_UNLOCK(stcb);
1917#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1918		so = SCTP_INP_SO(stcb->sctp_ep);
1919		SCTP_SOCKET_LOCK(so, 1);
1920#endif
1921		SCTP_INP_INFO_WLOCK();
1922		SCTP_INP_WLOCK(stcb->sctp_ep);
1923		SCTP_TCB_LOCK(stcb);
1924		atomic_add_int(&stcb->asoc.refcnt, -1);
1925		/* send up all the data */
1926		SCTP_TCB_SEND_LOCK(stcb);
1927
1928		sctp_report_all_outbound(stcb, 0, 1, SCTP_SO_LOCKED);
1929		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
1930			stcb->asoc.strmout[i].chunks_on_queues = 0;
1931			stcb->asoc.strmout[i].stream_no = i;
1932			stcb->asoc.strmout[i].next_sequence_send = 0;
1933			stcb->asoc.strmout[i].last_msg_incomplete = 0;
1934		}
1935		/* process the INIT-ACK info (my info) */
1936		asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
1937		asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
1938
1939		/* pull from vtag hash */
1940		LIST_REMOVE(stcb, sctp_asocs);
1941		/* re-insert to new vtag position */
1942		head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag,
1943		    SCTP_BASE_INFO(hashasocmark))];
1944		/*
1945		 * put it in the bucket in the vtag hash of assoc's for the
1946		 * system
1947		 */
1948		LIST_INSERT_HEAD(head, stcb, sctp_asocs);
1949
1950		SCTP_TCB_SEND_UNLOCK(stcb);
1951		SCTP_INP_WUNLOCK(stcb->sctp_ep);
1952		SCTP_INP_INFO_WUNLOCK();
1953#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1954		SCTP_SOCKET_UNLOCK(so, 1);
1955#endif
1956		asoc->total_flight = 0;
1957		asoc->total_flight_count = 0;
1958		/* process the INIT info (peer's info) */
1959		retval = sctp_process_init(init_cp, stcb);
1960		if (retval < 0) {
1961			if (how_indx < sizeof(asoc->cookie_how))
1962				asoc->cookie_how[how_indx] = 13;
1963
1964			return (NULL);
1965		}
1966		/*
1967		 * since we did not send a HB make sure we don't double
1968		 * things
1969		 */
1970		net->hb_responded = 1;
1971
1972		if (sctp_load_addresses_from_init(stcb, m,
1973		    init_offset + sizeof(struct sctp_init_chunk),
1974		    initack_offset, src, dst, init_src)) {
1975			if (how_indx < sizeof(asoc->cookie_how))
1976				asoc->cookie_how[how_indx] = 14;
1977
1978			return (NULL);
1979		}
1980		/* respond with a COOKIE-ACK */
1981		sctp_stop_all_cookie_timers(stcb);
1982		sctp_toss_old_cookies(stcb, asoc);
1983		sctp_send_cookie_ack(stcb);
1984		if (how_indx < sizeof(asoc->cookie_how))
1985			asoc->cookie_how[how_indx] = 15;
1986
1987		return (stcb);
1988	}
1989	if (how_indx < sizeof(asoc->cookie_how))
1990		asoc->cookie_how[how_indx] = 16;
1991	/* all other cases... */
1992	return (NULL);
1993}
1994
1995
1996/*
1997 * handle a state cookie for a new association m: input packet mbuf chain--
1998 * assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a "split" mbuf
1999 * and the cookie signature does not exist offset: offset into mbuf to the
2000 * cookie-echo chunk length: length of the cookie chunk to: where the init
2001 * was from returns a new TCB
2002 */
2003static struct sctp_tcb *
2004sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
2005    struct sockaddr *src, struct sockaddr *dst,
2006    struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
2007    struct sctp_inpcb *inp, struct sctp_nets **netp,
2008    struct sockaddr *init_src, int *notification,
2009    int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
2010    uint8_t use_mflowid, uint32_t mflowid,
2011    uint32_t vrf_id, uint16_t port)
2012{
2013	struct sctp_tcb *stcb;
2014	struct sctp_init_chunk *init_cp, init_buf;
2015	struct sctp_init_ack_chunk *initack_cp, initack_buf;
2016	struct sockaddr_storage sa_store;
2017	struct sockaddr *initack_src = (struct sockaddr *)&sa_store;
2018	struct sctp_association *asoc;
2019	int init_offset, initack_offset, initack_limit;
2020	int retval;
2021	int error = 0;
2022	uint8_t auth_chunk_buf[SCTP_PARAM_BUFFER_SIZE];
2023
2024#ifdef INET
2025	struct sockaddr_in *sin;
2026
2027#endif
2028#ifdef INET6
2029	struct sockaddr_in6 *sin6;
2030
2031#endif
2032#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2033	struct socket *so;
2034
2035	so = SCTP_INP_SO(inp);
2036#endif
2037
2038	/*
2039	 * find and validate the INIT chunk in the cookie (peer's info) the
2040	 * INIT should start after the cookie-echo header struct (chunk
2041	 * header, state cookie header struct)
2042	 */
2043	init_offset = offset + sizeof(struct sctp_cookie_echo_chunk);
2044	init_cp = (struct sctp_init_chunk *)
2045	    sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
2046	    (uint8_t *) & init_buf);
2047	if (init_cp == NULL) {
2048		/* could not pull a INIT chunk in cookie */
2049		SCTPDBG(SCTP_DEBUG_INPUT1,
2050		    "process_cookie_new: could not pull INIT chunk hdr\n");
2051		return (NULL);
2052	}
2053	if (init_cp->ch.chunk_type != SCTP_INITIATION) {
2054		SCTPDBG(SCTP_DEBUG_INPUT1, "HUH? process_cookie_new: could not find INIT chunk!\n");
2055		return (NULL);
2056	}
2057	initack_offset = init_offset + SCTP_SIZE32(ntohs(init_cp->ch.chunk_length));
2058	/*
2059	 * find and validate the INIT-ACK chunk in the cookie (my info) the
2060	 * INIT-ACK follows the INIT chunk
2061	 */
2062	initack_cp = (struct sctp_init_ack_chunk *)
2063	    sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
2064	    (uint8_t *) & initack_buf);
2065	if (initack_cp == NULL) {
2066		/* could not pull INIT-ACK chunk in cookie */
2067		SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: could not pull INIT-ACK chunk hdr\n");
2068		return (NULL);
2069	}
2070	if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
2071		return (NULL);
2072	}
2073	/*
2074	 * NOTE: We can't use the INIT_ACK's chk_length to determine the
2075	 * "initack_limit" value.  This is because the chk_length field
2076	 * includes the length of the cookie, but the cookie is omitted when
2077	 * the INIT and INIT_ACK are tacked onto the cookie...
2078	 */
2079	initack_limit = offset + cookie_len;
2080
2081	/*
2082	 * now that we know the INIT/INIT-ACK are in place, create a new TCB
2083	 * and popluate
2084	 */
2085
2086	/*
2087	 * Here we do a trick, we set in NULL for the proc/thread argument.
2088	 * We do this since in effect we only use the p argument when the
2089	 * socket is unbound and we must do an implicit bind. Since we are
2090	 * getting a cookie, we cannot be unbound.
2091	 */
2092	stcb = sctp_aloc_assoc(inp, init_src, &error,
2093	    ntohl(initack_cp->init.initiate_tag), vrf_id,
2094	    (struct thread *)NULL
2095	    );
2096	if (stcb == NULL) {
2097		struct mbuf *op_err;
2098
2099		/* memory problem? */
2100		SCTPDBG(SCTP_DEBUG_INPUT1,
2101		    "process_cookie_new: no room for another TCB!\n");
2102		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
2103		sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
2104		    src, dst, sh, op_err,
2105		    use_mflowid, mflowid,
2106		    vrf_id, port);
2107		return (NULL);
2108	}
2109	/* get the correct sctp_nets */
2110	if (netp)
2111		*netp = sctp_findnet(stcb, init_src);
2112
2113	asoc = &stcb->asoc;
2114	/* get scope variables out of cookie */
2115	asoc->scope.ipv4_local_scope = cookie->ipv4_scope;
2116	asoc->scope.site_scope = cookie->site_scope;
2117	asoc->scope.local_scope = cookie->local_scope;
2118	asoc->scope.loopback_scope = cookie->loopback_scope;
2119
2120	if ((asoc->scope.ipv4_addr_legal != cookie->ipv4_addr_legal) ||
2121	    (asoc->scope.ipv6_addr_legal != cookie->ipv6_addr_legal)) {
2122		struct mbuf *op_err;
2123
2124		/*
2125		 * Houston we have a problem. The EP changed while the
2126		 * cookie was in flight. Only recourse is to abort the
2127		 * association.
2128		 */
2129		atomic_add_int(&stcb->asoc.refcnt, 1);
2130		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
2131		sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
2132		    src, dst, sh, op_err,
2133		    use_mflowid, mflowid,
2134		    vrf_id, port);
2135#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2136		SCTP_TCB_UNLOCK(stcb);
2137		SCTP_SOCKET_LOCK(so, 1);
2138		SCTP_TCB_LOCK(stcb);
2139#endif
2140		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
2141		    SCTP_FROM_SCTP_INPUT + SCTP_LOC_16);
2142#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2143		SCTP_SOCKET_UNLOCK(so, 1);
2144#endif
2145		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2146		return (NULL);
2147	}
2148	/* process the INIT-ACK info (my info) */
2149	asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
2150	asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
2151	asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
2152	asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
2153	asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
2154	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
2155	asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
2156	asoc->str_reset_seq_in = asoc->init_seq_number;
2157
2158	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
2159
2160	/* process the INIT info (peer's info) */
2161	if (netp)
2162		retval = sctp_process_init(init_cp, stcb);
2163	else
2164		retval = 0;
2165	if (retval < 0) {
2166		atomic_add_int(&stcb->asoc.refcnt, 1);
2167#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2168		SCTP_TCB_UNLOCK(stcb);
2169		SCTP_SOCKET_LOCK(so, 1);
2170		SCTP_TCB_LOCK(stcb);
2171#endif
2172		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16);
2173#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2174		SCTP_SOCKET_UNLOCK(so, 1);
2175#endif
2176		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2177		return (NULL);
2178	}
2179	/* load all addresses */
2180	if (sctp_load_addresses_from_init(stcb, m,
2181	    init_offset + sizeof(struct sctp_init_chunk), initack_offset,
2182	    src, dst, init_src)) {
2183		atomic_add_int(&stcb->asoc.refcnt, 1);
2184#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2185		SCTP_TCB_UNLOCK(stcb);
2186		SCTP_SOCKET_LOCK(so, 1);
2187		SCTP_TCB_LOCK(stcb);
2188#endif
2189		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_17);
2190#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2191		SCTP_SOCKET_UNLOCK(so, 1);
2192#endif
2193		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2194		return (NULL);
2195	}
2196	/*
2197	 * verify any preceding AUTH chunk that was skipped
2198	 */
2199	/* pull the local authentication parameters from the cookie/init-ack */
2200	sctp_auth_get_cookie_params(stcb, m,
2201	    initack_offset + sizeof(struct sctp_init_ack_chunk),
2202	    initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)));
2203	if (auth_skipped) {
2204		struct sctp_auth_chunk *auth;
2205
2206		auth = (struct sctp_auth_chunk *)
2207		    sctp_m_getptr(m, auth_offset, auth_len, auth_chunk_buf);
2208		if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, auth_offset)) {
2209			/* auth HMAC failed, dump the assoc and packet */
2210			SCTPDBG(SCTP_DEBUG_AUTH1,
2211			    "COOKIE-ECHO: AUTH failed\n");
2212			atomic_add_int(&stcb->asoc.refcnt, 1);
2213#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2214			SCTP_TCB_UNLOCK(stcb);
2215			SCTP_SOCKET_LOCK(so, 1);
2216			SCTP_TCB_LOCK(stcb);
2217#endif
2218			(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_18);
2219#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2220			SCTP_SOCKET_UNLOCK(so, 1);
2221#endif
2222			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2223			return (NULL);
2224		} else {
2225			/* remaining chunks checked... good to go */
2226			stcb->asoc.authenticated = 1;
2227		}
2228	}
2229	/* update current state */
2230	SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
2231	SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
2232	if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
2233		sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
2234		    stcb->sctp_ep, stcb, asoc->primary_destination);
2235	}
2236	sctp_stop_all_cookie_timers(stcb);
2237	SCTP_STAT_INCR_COUNTER32(sctps_passiveestab);
2238	SCTP_STAT_INCR_GAUGE32(sctps_currestab);
2239
2240	/*
2241	 * if we're doing ASCONFs, check to see if we have any new local
2242	 * addresses that need to get added to the peer (eg. addresses
2243	 * changed while cookie echo in flight).  This needs to be done
2244	 * after we go to the OPEN state to do the correct asconf
2245	 * processing. else, make sure we have the correct addresses in our
2246	 * lists
2247	 */
2248
2249	/* warning, we re-use sin, sin6, sa_store here! */
2250	/* pull in local_address (our "from" address) */
2251	switch (cookie->laddr_type) {
2252#ifdef INET
2253	case SCTP_IPV4_ADDRESS:
2254		/* source addr is IPv4 */
2255		sin = (struct sockaddr_in *)initack_src;
2256		memset(sin, 0, sizeof(*sin));
2257		sin->sin_family = AF_INET;
2258		sin->sin_len = sizeof(struct sockaddr_in);
2259		sin->sin_addr.s_addr = cookie->laddress[0];
2260		break;
2261#endif
2262#ifdef INET6
2263	case SCTP_IPV6_ADDRESS:
2264		/* source addr is IPv6 */
2265		sin6 = (struct sockaddr_in6 *)initack_src;
2266		memset(sin6, 0, sizeof(*sin6));
2267		sin6->sin6_family = AF_INET6;
2268		sin6->sin6_len = sizeof(struct sockaddr_in6);
2269		sin6->sin6_scope_id = cookie->scope_id;
2270		memcpy(&sin6->sin6_addr, cookie->laddress,
2271		    sizeof(sin6->sin6_addr));
2272		break;
2273#endif
2274	default:
2275		atomic_add_int(&stcb->asoc.refcnt, 1);
2276#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2277		SCTP_TCB_UNLOCK(stcb);
2278		SCTP_SOCKET_LOCK(so, 1);
2279		SCTP_TCB_LOCK(stcb);
2280#endif
2281		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_19);
2282#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2283		SCTP_SOCKET_UNLOCK(so, 1);
2284#endif
2285		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2286		return (NULL);
2287	}
2288
2289	/* set up to notify upper layer */
2290	*notification = SCTP_NOTIFY_ASSOC_UP;
2291	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2292	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2293	    (inp->sctp_socket->so_qlimit == 0)) {
2294		/*
2295		 * This is an endpoint that called connect() how it got a
2296		 * cookie that is NEW is a bit of a mystery. It must be that
2297		 * the INIT was sent, but before it got there.. a complete
2298		 * INIT/INIT-ACK/COOKIE arrived. But of course then it
2299		 * should have went to the other code.. not here.. oh well..
2300		 * a bit of protection is worth having..
2301		 */
2302		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
2303#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2304		atomic_add_int(&stcb->asoc.refcnt, 1);
2305		SCTP_TCB_UNLOCK(stcb);
2306		SCTP_SOCKET_LOCK(so, 1);
2307		SCTP_TCB_LOCK(stcb);
2308		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2309		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2310			SCTP_SOCKET_UNLOCK(so, 1);
2311			return (NULL);
2312		}
2313#endif
2314		soisconnected(stcb->sctp_socket);
2315#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2316		SCTP_SOCKET_UNLOCK(so, 1);
2317#endif
2318	} else if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
2319	    (inp->sctp_socket->so_qlimit)) {
2320		/*
2321		 * We don't want to do anything with this one. Since it is
2322		 * the listening guy. The timer will get started for
2323		 * accepted connections in the caller.
2324		 */
2325		;
2326	}
2327	/* since we did not send a HB make sure we don't double things */
2328	if ((netp) && (*netp))
2329		(*netp)->hb_responded = 1;
2330
2331	if (stcb->asoc.sctp_autoclose_ticks &&
2332	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
2333		sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL);
2334	}
2335	/* calculate the RTT */
2336	(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
2337	if ((netp) && (*netp)) {
2338		(*netp)->RTO = sctp_calculate_rto(stcb, asoc, *netp,
2339		    &cookie->time_entered, sctp_align_unsafe_makecopy,
2340		    SCTP_RTT_FROM_NON_DATA);
2341	}
2342	/* respond with a COOKIE-ACK */
2343	sctp_send_cookie_ack(stcb);
2344
2345	/*
2346	 * check the address lists for any ASCONFs that need to be sent
2347	 * AFTER the cookie-ack is sent
2348	 */
2349	sctp_check_address_list(stcb, m,
2350	    initack_offset + sizeof(struct sctp_init_ack_chunk),
2351	    initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)),
2352	    initack_src, cookie->local_scope, cookie->site_scope,
2353	    cookie->ipv4_scope, cookie->loopback_scope);
2354
2355
2356	return (stcb);
2357}
2358
2359/*
2360 * CODE LIKE THIS NEEDS TO RUN IF the peer supports the NAT extension, i.e
2361 * we NEED to make sure we are not already using the vtag. If so we
2362 * need to send back an ABORT-TRY-AGAIN-WITH-NEW-TAG No middle box bit!
2363	head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(tag,
2364							    SCTP_BASE_INFO(hashasocmark))];
2365	LIST_FOREACH(stcb, head, sctp_asocs) {
2366	        if ((stcb->asoc.my_vtag == tag) && (stcb->rport == rport) && (inp == stcb->sctp_ep))  {
2367		       -- SEND ABORT - TRY AGAIN --
2368		}
2369	}
2370*/
2371
2372/*
2373 * handles a COOKIE-ECHO message stcb: modified to either a new or left as
2374 * existing (non-NULL) TCB
2375 */
2376static struct mbuf *
2377sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset,
2378    struct sockaddr *src, struct sockaddr *dst,
2379    struct sctphdr *sh, struct sctp_cookie_echo_chunk *cp,
2380    struct sctp_inpcb **inp_p, struct sctp_tcb **stcb, struct sctp_nets **netp,
2381    int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
2382    struct sctp_tcb **locked_tcb,
2383    uint8_t use_mflowid, uint32_t mflowid,
2384    uint32_t vrf_id, uint16_t port)
2385{
2386	struct sctp_state_cookie *cookie;
2387	struct sctp_tcb *l_stcb = *stcb;
2388	struct sctp_inpcb *l_inp;
2389	struct sockaddr *to;
2390	struct sctp_pcb *ep;
2391	struct mbuf *m_sig;
2392	uint8_t calc_sig[SCTP_SIGNATURE_SIZE], tmp_sig[SCTP_SIGNATURE_SIZE];
2393	uint8_t *sig;
2394	uint8_t cookie_ok = 0;
2395	unsigned int sig_offset, cookie_offset;
2396	unsigned int cookie_len;
2397	struct timeval now;
2398	struct timeval time_expires;
2399	int notification = 0;
2400	struct sctp_nets *netl;
2401	int had_a_existing_tcb = 0;
2402	int send_int_conf = 0;
2403
2404#ifdef INET
2405	struct sockaddr_in sin;
2406
2407#endif
2408#ifdef INET6
2409	struct sockaddr_in6 sin6;
2410
2411#endif
2412
2413	SCTPDBG(SCTP_DEBUG_INPUT2,
2414	    "sctp_handle_cookie: handling COOKIE-ECHO\n");
2415
2416	if (inp_p == NULL) {
2417		return (NULL);
2418	}
2419	cookie = &cp->cookie;
2420	cookie_offset = offset + sizeof(struct sctp_chunkhdr);
2421	cookie_len = ntohs(cp->ch.chunk_length);
2422
2423	if ((cookie->peerport != sh->src_port) &&
2424	    (cookie->myport != sh->dest_port) &&
2425	    (cookie->my_vtag != sh->v_tag)) {
2426		/*
2427		 * invalid ports or bad tag.  Note that we always leave the
2428		 * v_tag in the header in network order and when we stored
2429		 * it in the my_vtag slot we also left it in network order.
2430		 * This maintains the match even though it may be in the
2431		 * opposite byte order of the machine :->
2432		 */
2433		return (NULL);
2434	}
2435	if (cookie_len < sizeof(struct sctp_cookie_echo_chunk) +
2436	    sizeof(struct sctp_init_chunk) +
2437	    sizeof(struct sctp_init_ack_chunk) + SCTP_SIGNATURE_SIZE) {
2438		/* cookie too small */
2439		return (NULL);
2440	}
2441	/*
2442	 * split off the signature into its own mbuf (since it should not be
2443	 * calculated in the sctp_hmac_m() call).
2444	 */
2445	sig_offset = offset + cookie_len - SCTP_SIGNATURE_SIZE;
2446	m_sig = m_split(m, sig_offset, M_DONTWAIT);
2447	if (m_sig == NULL) {
2448		/* out of memory or ?? */
2449		return (NULL);
2450	}
2451#ifdef SCTP_MBUF_LOGGING
2452	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2453		struct mbuf *mat;
2454
2455		for (mat = m_sig; mat; mat = SCTP_BUF_NEXT(mat)) {
2456			if (SCTP_BUF_IS_EXTENDED(mat)) {
2457				sctp_log_mb(mat, SCTP_MBUF_SPLIT);
2458			}
2459		}
2460	}
2461#endif
2462
2463	/*
2464	 * compute the signature/digest for the cookie
2465	 */
2466	ep = &(*inp_p)->sctp_ep;
2467	l_inp = *inp_p;
2468	if (l_stcb) {
2469		SCTP_TCB_UNLOCK(l_stcb);
2470	}
2471	SCTP_INP_RLOCK(l_inp);
2472	if (l_stcb) {
2473		SCTP_TCB_LOCK(l_stcb);
2474	}
2475	/* which cookie is it? */
2476	if ((cookie->time_entered.tv_sec < (long)ep->time_of_secret_change) &&
2477	    (ep->current_secret_number != ep->last_secret_number)) {
2478		/* it's the old cookie */
2479		(void)sctp_hmac_m(SCTP_HMAC,
2480		    (uint8_t *) ep->secret_key[(int)ep->last_secret_number],
2481		    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2482	} else {
2483		/* it's the current cookie */
2484		(void)sctp_hmac_m(SCTP_HMAC,
2485		    (uint8_t *) ep->secret_key[(int)ep->current_secret_number],
2486		    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2487	}
2488	/* get the signature */
2489	SCTP_INP_RUNLOCK(l_inp);
2490	sig = (uint8_t *) sctp_m_getptr(m_sig, 0, SCTP_SIGNATURE_SIZE, (uint8_t *) & tmp_sig);
2491	if (sig == NULL) {
2492		/* couldn't find signature */
2493		sctp_m_freem(m_sig);
2494		return (NULL);
2495	}
2496	/* compare the received digest with the computed digest */
2497	if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) != 0) {
2498		/* try the old cookie? */
2499		if ((cookie->time_entered.tv_sec == (long)ep->time_of_secret_change) &&
2500		    (ep->current_secret_number != ep->last_secret_number)) {
2501			/* compute digest with old */
2502			(void)sctp_hmac_m(SCTP_HMAC,
2503			    (uint8_t *) ep->secret_key[(int)ep->last_secret_number],
2504			    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2505			/* compare */
2506			if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) == 0)
2507				cookie_ok = 1;
2508		}
2509	} else {
2510		cookie_ok = 1;
2511	}
2512
2513	/*
2514	 * Now before we continue we must reconstruct our mbuf so that
2515	 * normal processing of any other chunks will work.
2516	 */
2517	{
2518		struct mbuf *m_at;
2519
2520		m_at = m;
2521		while (SCTP_BUF_NEXT(m_at) != NULL) {
2522			m_at = SCTP_BUF_NEXT(m_at);
2523		}
2524		SCTP_BUF_NEXT(m_at) = m_sig;
2525	}
2526
2527	if (cookie_ok == 0) {
2528		SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: cookie signature validation failed!\n");
2529		SCTPDBG(SCTP_DEBUG_INPUT2,
2530		    "offset = %u, cookie_offset = %u, sig_offset = %u\n",
2531		    (uint32_t) offset, cookie_offset, sig_offset);
2532		return (NULL);
2533	}
2534	/*
2535	 * check the cookie timestamps to be sure it's not stale
2536	 */
2537	(void)SCTP_GETTIME_TIMEVAL(&now);
2538	/* Expire time is in Ticks, so we convert to seconds */
2539	time_expires.tv_sec = cookie->time_entered.tv_sec + TICKS_TO_SEC(cookie->cookie_life);
2540	time_expires.tv_usec = cookie->time_entered.tv_usec;
2541	/*
2542	 * TODO sctp_constants.h needs alternative time macros when _KERNEL
2543	 * is undefined.
2544	 */
2545	if (timevalcmp(&now, &time_expires, >)) {
2546		/* cookie is stale! */
2547		struct mbuf *op_err;
2548		struct sctp_stale_cookie_msg *scm;
2549		uint32_t tim;
2550
2551		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_stale_cookie_msg),
2552		    0, M_DONTWAIT, 1, MT_DATA);
2553		if (op_err == NULL) {
2554			/* FOOBAR */
2555			return (NULL);
2556		}
2557		/* Set the len */
2558		SCTP_BUF_LEN(op_err) = sizeof(struct sctp_stale_cookie_msg);
2559		scm = mtod(op_err, struct sctp_stale_cookie_msg *);
2560		scm->ph.param_type = htons(SCTP_CAUSE_STALE_COOKIE);
2561		scm->ph.param_length = htons((sizeof(struct sctp_paramhdr) +
2562		    (sizeof(uint32_t))));
2563		/* seconds to usec */
2564		tim = (now.tv_sec - time_expires.tv_sec) * 1000000;
2565		/* add in usec */
2566		if (tim == 0)
2567			tim = now.tv_usec - cookie->time_entered.tv_usec;
2568		scm->time_usec = htonl(tim);
2569		sctp_send_operr_to(src, dst, sh, cookie->peers_vtag, op_err,
2570		    use_mflowid, mflowid,
2571		    vrf_id, port);
2572		return (NULL);
2573	}
2574	/*
2575	 * Now we must see with the lookup address if we have an existing
2576	 * asoc. This will only happen if we were in the COOKIE-WAIT state
2577	 * and a INIT collided with us and somewhere the peer sent the
2578	 * cookie on another address besides the single address our assoc
2579	 * had for him. In this case we will have one of the tie-tags set at
2580	 * least AND the address field in the cookie can be used to look it
2581	 * up.
2582	 */
2583	to = NULL;
2584	switch (cookie->addr_type) {
2585#ifdef INET6
2586	case SCTP_IPV6_ADDRESS:
2587		memset(&sin6, 0, sizeof(sin6));
2588		sin6.sin6_family = AF_INET6;
2589		sin6.sin6_len = sizeof(sin6);
2590		sin6.sin6_port = sh->src_port;
2591		sin6.sin6_scope_id = cookie->scope_id;
2592		memcpy(&sin6.sin6_addr.s6_addr, cookie->address,
2593		    sizeof(sin6.sin6_addr.s6_addr));
2594		to = (struct sockaddr *)&sin6;
2595		break;
2596#endif
2597#ifdef INET
2598	case SCTP_IPV4_ADDRESS:
2599		memset(&sin, 0, sizeof(sin));
2600		sin.sin_family = AF_INET;
2601		sin.sin_len = sizeof(sin);
2602		sin.sin_port = sh->src_port;
2603		sin.sin_addr.s_addr = cookie->address[0];
2604		to = (struct sockaddr *)&sin;
2605		break;
2606#endif
2607	default:
2608		/* This should not happen */
2609		return (NULL);
2610	}
2611	if ((*stcb == NULL) && to) {
2612		/* Yep, lets check */
2613		*stcb = sctp_findassociation_ep_addr(inp_p, to, netp, dst, NULL);
2614		if (*stcb == NULL) {
2615			/*
2616			 * We should have only got back the same inp. If we
2617			 * got back a different ep we have a problem. The
2618			 * original findep got back l_inp and now
2619			 */
2620			if (l_inp != *inp_p) {
2621				SCTP_PRINTF("Bad problem find_ep got a diff inp then special_locate?\n");
2622			}
2623		} else {
2624			if (*locked_tcb == NULL) {
2625				/*
2626				 * In this case we found the assoc only
2627				 * after we locked the create lock. This
2628				 * means we are in a colliding case and we
2629				 * must make sure that we unlock the tcb if
2630				 * its one of the cases where we throw away
2631				 * the incoming packets.
2632				 */
2633				*locked_tcb = *stcb;
2634
2635				/*
2636				 * We must also increment the inp ref count
2637				 * since the ref_count flags was set when we
2638				 * did not find the TCB, now we found it
2639				 * which reduces the refcount.. we must
2640				 * raise it back out to balance it all :-)
2641				 */
2642				SCTP_INP_INCR_REF((*stcb)->sctp_ep);
2643				if ((*stcb)->sctp_ep != l_inp) {
2644					SCTP_PRINTF("Huh? ep:%p diff then l_inp:%p?\n",
2645					    (void *)(*stcb)->sctp_ep, (void *)l_inp);
2646				}
2647			}
2648		}
2649	}
2650	if (to == NULL) {
2651		return (NULL);
2652	}
2653	cookie_len -= SCTP_SIGNATURE_SIZE;
2654	if (*stcb == NULL) {
2655		/* this is the "normal" case... get a new TCB */
2656		*stcb = sctp_process_cookie_new(m, iphlen, offset, src, dst, sh,
2657		    cookie, cookie_len, *inp_p,
2658		    netp, to, &notification,
2659		    auth_skipped, auth_offset, auth_len,
2660		    use_mflowid, mflowid,
2661		    vrf_id, port);
2662	} else {
2663		/* this is abnormal... cookie-echo on existing TCB */
2664		had_a_existing_tcb = 1;
2665		*stcb = sctp_process_cookie_existing(m, iphlen, offset,
2666		    src, dst, sh,
2667		    cookie, cookie_len, *inp_p, *stcb, netp, to,
2668		    &notification, auth_skipped, auth_offset, auth_len,
2669		    use_mflowid, mflowid,
2670		    vrf_id, port);
2671	}
2672
2673	if (*stcb == NULL) {
2674		/* still no TCB... must be bad cookie-echo */
2675		return (NULL);
2676	}
2677	if ((*netp != NULL) && (use_mflowid != 0)) {
2678		(*netp)->flowid = mflowid;
2679#ifdef INVARIANTS
2680		(*netp)->flowidset = 1;
2681#endif
2682	}
2683	/*
2684	 * Ok, we built an association so confirm the address we sent the
2685	 * INIT-ACK to.
2686	 */
2687	netl = sctp_findnet(*stcb, to);
2688	/*
2689	 * This code should in theory NOT run but
2690	 */
2691	if (netl == NULL) {
2692		/* TSNH! Huh, why do I need to add this address here? */
2693		if (sctp_add_remote_addr(*stcb, to, NULL, SCTP_DONOT_SETSCOPE, SCTP_IN_COOKIE_PROC)) {
2694			return (NULL);
2695		}
2696		netl = sctp_findnet(*stcb, to);
2697	}
2698	if (netl) {
2699		if (netl->dest_state & SCTP_ADDR_UNCONFIRMED) {
2700			netl->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
2701			(void)sctp_set_primary_addr((*stcb), (struct sockaddr *)NULL,
2702			    netl);
2703			send_int_conf = 1;
2704		}
2705	}
2706	sctp_start_net_timers(*stcb);
2707	if ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
2708		if (!had_a_existing_tcb ||
2709		    (((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
2710			/*
2711			 * If we have a NEW cookie or the connect never
2712			 * reached the connected state during collision we
2713			 * must do the TCP accept thing.
2714			 */
2715			struct socket *so, *oso;
2716			struct sctp_inpcb *inp;
2717
2718			if (notification == SCTP_NOTIFY_ASSOC_RESTART) {
2719				/*
2720				 * For a restart we will keep the same
2721				 * socket, no need to do anything. I THINK!!
2722				 */
2723				sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
2724				if (send_int_conf) {
2725					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
2726					    (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
2727				}
2728				return (m);
2729			}
2730			oso = (*inp_p)->sctp_socket;
2731			atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2732			SCTP_TCB_UNLOCK((*stcb));
2733			CURVNET_SET(oso->so_vnet);
2734			so = sonewconn(oso, 0
2735			    );
2736			CURVNET_RESTORE();
2737			SCTP_TCB_LOCK((*stcb));
2738			atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2739
2740			if (so == NULL) {
2741				struct mbuf *op_err;
2742
2743#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2744				struct socket *pcb_so;
2745
2746#endif
2747				/* Too many sockets */
2748				SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: no room for another socket!\n");
2749				op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
2750				sctp_abort_association(*inp_p, NULL, m, iphlen,
2751				    src, dst, sh, op_err,
2752				    use_mflowid, mflowid,
2753				    vrf_id, port);
2754#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2755				pcb_so = SCTP_INP_SO(*inp_p);
2756				atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2757				SCTP_TCB_UNLOCK((*stcb));
2758				SCTP_SOCKET_LOCK(pcb_so, 1);
2759				SCTP_TCB_LOCK((*stcb));
2760				atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2761#endif
2762				(void)sctp_free_assoc(*inp_p, *stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_20);
2763#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2764				SCTP_SOCKET_UNLOCK(pcb_so, 1);
2765#endif
2766				return (NULL);
2767			}
2768			inp = (struct sctp_inpcb *)so->so_pcb;
2769			SCTP_INP_INCR_REF(inp);
2770			/*
2771			 * We add the unbound flag here so that if we get an
2772			 * soabort() before we get the move_pcb done, we
2773			 * will properly cleanup.
2774			 */
2775			inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE |
2776			    SCTP_PCB_FLAGS_CONNECTED |
2777			    SCTP_PCB_FLAGS_IN_TCPPOOL |
2778			    SCTP_PCB_FLAGS_UNBOUND |
2779			    (SCTP_PCB_COPY_FLAGS & (*inp_p)->sctp_flags) |
2780			    SCTP_PCB_FLAGS_DONT_WAKE);
2781			inp->sctp_features = (*inp_p)->sctp_features;
2782			inp->sctp_mobility_features = (*inp_p)->sctp_mobility_features;
2783			inp->sctp_socket = so;
2784			inp->sctp_frag_point = (*inp_p)->sctp_frag_point;
2785			inp->sctp_cmt_on_off = (*inp_p)->sctp_cmt_on_off;
2786			inp->sctp_ecn_enable = (*inp_p)->sctp_ecn_enable;
2787			inp->partial_delivery_point = (*inp_p)->partial_delivery_point;
2788			inp->sctp_context = (*inp_p)->sctp_context;
2789			inp->local_strreset_support = (*inp_p)->local_strreset_support;
2790			inp->inp_starting_point_for_iterator = NULL;
2791			/*
2792			 * copy in the authentication parameters from the
2793			 * original endpoint
2794			 */
2795			if (inp->sctp_ep.local_hmacs)
2796				sctp_free_hmaclist(inp->sctp_ep.local_hmacs);
2797			inp->sctp_ep.local_hmacs =
2798			    sctp_copy_hmaclist((*inp_p)->sctp_ep.local_hmacs);
2799			if (inp->sctp_ep.local_auth_chunks)
2800				sctp_free_chunklist(inp->sctp_ep.local_auth_chunks);
2801			inp->sctp_ep.local_auth_chunks =
2802			    sctp_copy_chunklist((*inp_p)->sctp_ep.local_auth_chunks);
2803
2804			/*
2805			 * Now we must move it from one hash table to
2806			 * another and get the tcb in the right place.
2807			 */
2808
2809			/*
2810			 * This is where the one-2-one socket is put into
2811			 * the accept state waiting for the accept!
2812			 */
2813			if (*stcb) {
2814				(*stcb)->asoc.state |= SCTP_STATE_IN_ACCEPT_QUEUE;
2815			}
2816			sctp_move_pcb_and_assoc(*inp_p, inp, *stcb);
2817
2818			atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2819			SCTP_TCB_UNLOCK((*stcb));
2820
2821			sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb,
2822			    0);
2823			SCTP_TCB_LOCK((*stcb));
2824			atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2825
2826
2827			/*
2828			 * now we must check to see if we were aborted while
2829			 * the move was going on and the lock/unlock
2830			 * happened.
2831			 */
2832			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2833				/*
2834				 * yep it was, we leave the assoc attached
2835				 * to the socket since the sctp_inpcb_free()
2836				 * call will send an abort for us.
2837				 */
2838				SCTP_INP_DECR_REF(inp);
2839				return (NULL);
2840			}
2841			SCTP_INP_DECR_REF(inp);
2842			/* Switch over to the new guy */
2843			*inp_p = inp;
2844			sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
2845			if (send_int_conf) {
2846				sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
2847				    (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
2848			}
2849			/*
2850			 * Pull it from the incomplete queue and wake the
2851			 * guy
2852			 */
2853#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2854			atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2855			SCTP_TCB_UNLOCK((*stcb));
2856			SCTP_SOCKET_LOCK(so, 1);
2857#endif
2858			soisconnected(so);
2859#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2860			SCTP_TCB_LOCK((*stcb));
2861			atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2862			SCTP_SOCKET_UNLOCK(so, 1);
2863#endif
2864			return (m);
2865		}
2866	}
2867	if (notification) {
2868		sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
2869	}
2870	if (send_int_conf) {
2871		sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
2872		    (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
2873	}
2874	return (m);
2875}
2876
2877static void
2878sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp SCTP_UNUSED,
2879    struct sctp_tcb *stcb, struct sctp_nets *net)
2880{
2881	/* cp must not be used, others call this without a c-ack :-) */
2882	struct sctp_association *asoc;
2883
2884	SCTPDBG(SCTP_DEBUG_INPUT2,
2885	    "sctp_handle_cookie_ack: handling COOKIE-ACK\n");
2886	if (stcb == NULL)
2887		return;
2888
2889	asoc = &stcb->asoc;
2890
2891	sctp_stop_all_cookie_timers(stcb);
2892	/* process according to association state */
2893	if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) {
2894		/* state change only needed when I am in right state */
2895		SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
2896		SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
2897		sctp_start_net_timers(stcb);
2898		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
2899			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
2900			    stcb->sctp_ep, stcb, asoc->primary_destination);
2901
2902		}
2903		/* update RTO */
2904		SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
2905		SCTP_STAT_INCR_GAUGE32(sctps_currestab);
2906		if (asoc->overall_error_count == 0) {
2907			net->RTO = sctp_calculate_rto(stcb, asoc, net,
2908			    &asoc->time_entered, sctp_align_safe_nocopy,
2909			    SCTP_RTT_FROM_NON_DATA);
2910		}
2911		(void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
2912		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_UP, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
2913		if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2914		    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
2915#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2916			struct socket *so;
2917
2918#endif
2919			stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
2920#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2921			so = SCTP_INP_SO(stcb->sctp_ep);
2922			atomic_add_int(&stcb->asoc.refcnt, 1);
2923			SCTP_TCB_UNLOCK(stcb);
2924			SCTP_SOCKET_LOCK(so, 1);
2925			SCTP_TCB_LOCK(stcb);
2926			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2927#endif
2928			if ((stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) == 0) {
2929				soisconnected(stcb->sctp_socket);
2930			}
2931#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2932			SCTP_SOCKET_UNLOCK(so, 1);
2933#endif
2934		}
2935		/*
2936		 * since we did not send a HB make sure we don't double
2937		 * things
2938		 */
2939		net->hb_responded = 1;
2940
2941		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2942			/*
2943			 * We don't need to do the asconf thing, nor hb or
2944			 * autoclose if the socket is closed.
2945			 */
2946			goto closed_socket;
2947		}
2948		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep,
2949		    stcb, net);
2950
2951
2952		if (stcb->asoc.sctp_autoclose_ticks &&
2953		    sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_AUTOCLOSE)) {
2954			sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
2955			    stcb->sctp_ep, stcb, NULL);
2956		}
2957		/*
2958		 * send ASCONF if parameters are pending and ASCONFs are
2959		 * allowed (eg. addresses changed when init/cookie echo were
2960		 * in flight)
2961		 */
2962		if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_DO_ASCONF)) &&
2963		    (stcb->asoc.peer_supports_asconf) &&
2964		    (!TAILQ_EMPTY(&stcb->asoc.asconf_queue))) {
2965#ifdef SCTP_TIMER_BASED_ASCONF
2966			sctp_timer_start(SCTP_TIMER_TYPE_ASCONF,
2967			    stcb->sctp_ep, stcb,
2968			    stcb->asoc.primary_destination);
2969#else
2970			sctp_send_asconf(stcb, stcb->asoc.primary_destination,
2971			    SCTP_ADDR_NOT_LOCKED);
2972#endif
2973		}
2974	}
2975closed_socket:
2976	/* Toss the cookie if I can */
2977	sctp_toss_old_cookies(stcb, asoc);
2978	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
2979		/* Restart the timer if we have pending data */
2980		struct sctp_tmit_chunk *chk;
2981
2982		chk = TAILQ_FIRST(&asoc->sent_queue);
2983		sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo);
2984	}
2985}
2986
2987static void
2988sctp_handle_ecn_echo(struct sctp_ecne_chunk *cp,
2989    struct sctp_tcb *stcb)
2990{
2991	struct sctp_nets *net;
2992	struct sctp_tmit_chunk *lchk;
2993	struct sctp_ecne_chunk bkup;
2994	uint8_t override_bit;
2995	uint32_t tsn, window_data_tsn;
2996	int len;
2997	unsigned int pkt_cnt;
2998
2999	len = ntohs(cp->ch.chunk_length);
3000	if ((len != sizeof(struct sctp_ecne_chunk)) &&
3001	    (len != sizeof(struct old_sctp_ecne_chunk))) {
3002		return;
3003	}
3004	if (len == sizeof(struct old_sctp_ecne_chunk)) {
3005		/* Its the old format */
3006		memcpy(&bkup, cp, sizeof(struct old_sctp_ecne_chunk));
3007		bkup.num_pkts_since_cwr = htonl(1);
3008		cp = &bkup;
3009	}
3010	SCTP_STAT_INCR(sctps_recvecne);
3011	tsn = ntohl(cp->tsn);
3012	pkt_cnt = ntohl(cp->num_pkts_since_cwr);
3013	lchk = TAILQ_LAST(&stcb->asoc.send_queue, sctpchunk_listhead);
3014	if (lchk == NULL) {
3015		window_data_tsn = stcb->asoc.sending_seq - 1;
3016	} else {
3017		window_data_tsn = lchk->rec.data.TSN_seq;
3018	}
3019
3020	/* Find where it was sent to if possible. */
3021	net = NULL;
3022	TAILQ_FOREACH(lchk, &stcb->asoc.sent_queue, sctp_next) {
3023		if (lchk->rec.data.TSN_seq == tsn) {
3024			net = lchk->whoTo;
3025			net->ecn_prev_cwnd = lchk->rec.data.cwnd_at_send;
3026			break;
3027		}
3028		if (SCTP_TSN_GT(lchk->rec.data.TSN_seq, tsn)) {
3029			break;
3030		}
3031	}
3032	if (net == NULL) {
3033		/*
3034		 * What to do. A previous send of a CWR was possibly lost.
3035		 * See how old it is, we may have it marked on the actual
3036		 * net.
3037		 */
3038		TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
3039			if (tsn == net->last_cwr_tsn) {
3040				/* Found him, send it off */
3041				break;
3042			}
3043		}
3044		if (net == NULL) {
3045			/*
3046			 * If we reach here, we need to send a special CWR
3047			 * that says hey, we did this a long time ago and
3048			 * you lost the response.
3049			 */
3050			net = TAILQ_FIRST(&stcb->asoc.nets);
3051			if (net == NULL) {
3052				/* TSNH */
3053				return;
3054			}
3055			override_bit = SCTP_CWR_REDUCE_OVERRIDE;
3056		} else {
3057			override_bit = 0;
3058		}
3059	} else {
3060		override_bit = 0;
3061	}
3062	if (SCTP_TSN_GT(tsn, net->cwr_window_tsn) &&
3063	    ((override_bit & SCTP_CWR_REDUCE_OVERRIDE) == 0)) {
3064		/*
3065		 * JRS - Use the congestion control given in the pluggable
3066		 * CC module
3067		 */
3068		stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net, 0, pkt_cnt);
3069		/*
3070		 * We reduce once every RTT. So we will only lower cwnd at
3071		 * the next sending seq i.e. the window_data_tsn
3072		 */
3073		net->cwr_window_tsn = window_data_tsn;
3074		net->ecn_ce_pkt_cnt += pkt_cnt;
3075		net->lost_cnt = pkt_cnt;
3076		net->last_cwr_tsn = tsn;
3077	} else {
3078		override_bit |= SCTP_CWR_IN_SAME_WINDOW;
3079		if (SCTP_TSN_GT(tsn, net->last_cwr_tsn) &&
3080		    ((override_bit & SCTP_CWR_REDUCE_OVERRIDE) == 0)) {
3081			/*
3082			 * Another loss in the same window update how many
3083			 * marks/packets lost we have had.
3084			 */
3085			int cnt = 1;
3086
3087			if (pkt_cnt > net->lost_cnt) {
3088				/* Should be the case */
3089				cnt = (pkt_cnt - net->lost_cnt);
3090				net->ecn_ce_pkt_cnt += cnt;
3091			}
3092			net->lost_cnt = pkt_cnt;
3093			net->last_cwr_tsn = tsn;
3094			/*
3095			 * Most CC functions will ignore this call, since we
3096			 * are in-window yet of the initial CE the peer saw.
3097			 */
3098			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net, 1, cnt);
3099		}
3100	}
3101	/*
3102	 * We always send a CWR this way if our previous one was lost our
3103	 * peer will get an update, or if it is not time again to reduce we
3104	 * still get the cwr to the peer. Note we set the override when we
3105	 * could not find the TSN on the chunk or the destination network.
3106	 */
3107	sctp_send_cwr(stcb, net, net->last_cwr_tsn, override_bit);
3108}
3109
3110static void
3111sctp_handle_ecn_cwr(struct sctp_cwr_chunk *cp, struct sctp_tcb *stcb, struct sctp_nets *net)
3112{
3113	/*
3114	 * Here we get a CWR from the peer. We must look in the outqueue and
3115	 * make sure that we have a covered ECNE in the control chunk part.
3116	 * If so remove it.
3117	 */
3118	struct sctp_tmit_chunk *chk;
3119	struct sctp_ecne_chunk *ecne;
3120	int override;
3121	uint32_t cwr_tsn;
3122
3123	cwr_tsn = ntohl(cp->tsn);
3124
3125	override = cp->ch.chunk_flags & SCTP_CWR_REDUCE_OVERRIDE;
3126	TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
3127		if (chk->rec.chunk_id.id != SCTP_ECN_ECHO) {
3128			continue;
3129		}
3130		if ((override == 0) && (chk->whoTo != net)) {
3131			/* Must be from the right src unless override is set */
3132			continue;
3133		}
3134		ecne = mtod(chk->data, struct sctp_ecne_chunk *);
3135		if (SCTP_TSN_GE(cwr_tsn, ntohl(ecne->tsn))) {
3136			/* this covers this ECNE, we can remove it */
3137			stcb->asoc.ecn_echo_cnt_onq--;
3138			TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk,
3139			    sctp_next);
3140			if (chk->data) {
3141				sctp_m_freem(chk->data);
3142				chk->data = NULL;
3143			}
3144			stcb->asoc.ctrl_queue_cnt--;
3145			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
3146			if (override == 0) {
3147				break;
3148			}
3149		}
3150	}
3151}
3152
3153static void
3154sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp SCTP_UNUSED,
3155    struct sctp_tcb *stcb, struct sctp_nets *net)
3156{
3157	struct sctp_association *asoc;
3158
3159#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3160	struct socket *so;
3161
3162#endif
3163
3164	SCTPDBG(SCTP_DEBUG_INPUT2,
3165	    "sctp_handle_shutdown_complete: handling SHUTDOWN-COMPLETE\n");
3166	if (stcb == NULL)
3167		return;
3168
3169	asoc = &stcb->asoc;
3170	/* process according to association state */
3171	if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) {
3172		/* unexpected SHUTDOWN-COMPLETE... so ignore... */
3173		SCTPDBG(SCTP_DEBUG_INPUT2,
3174		    "sctp_handle_shutdown_complete: not in SCTP_STATE_SHUTDOWN_ACK_SENT --- ignore\n");
3175		SCTP_TCB_UNLOCK(stcb);
3176		return;
3177	}
3178	/* notify upper layer protocol */
3179	if (stcb->sctp_socket) {
3180		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
3181	}
3182#ifdef INVARIANTS
3183	if (!TAILQ_EMPTY(&asoc->send_queue) ||
3184	    !TAILQ_EMPTY(&asoc->sent_queue) ||
3185	    !stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
3186		panic("Queues are not empty when handling SHUTDOWN-COMPLETE");
3187	}
3188#endif
3189	/* stop the timer */
3190	sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_22);
3191	SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
3192	/* free the TCB */
3193	SCTPDBG(SCTP_DEBUG_INPUT2,
3194	    "sctp_handle_shutdown_complete: calls free-asoc\n");
3195#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3196	so = SCTP_INP_SO(stcb->sctp_ep);
3197	atomic_add_int(&stcb->asoc.refcnt, 1);
3198	SCTP_TCB_UNLOCK(stcb);
3199	SCTP_SOCKET_LOCK(so, 1);
3200	SCTP_TCB_LOCK(stcb);
3201	atomic_subtract_int(&stcb->asoc.refcnt, 1);
3202#endif
3203	(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_23);
3204#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3205	SCTP_SOCKET_UNLOCK(so, 1);
3206#endif
3207	return;
3208}
3209
3210static int
3211process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc,
3212    struct sctp_nets *net, uint8_t flg)
3213{
3214	switch (desc->chunk_type) {
3215	case SCTP_DATA:
3216		/* find the tsn to resend (possibly */
3217		{
3218			uint32_t tsn;
3219			struct sctp_tmit_chunk *tp1;
3220
3221			tsn = ntohl(desc->tsn_ifany);
3222			TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
3223				if (tp1->rec.data.TSN_seq == tsn) {
3224					/* found it */
3225					break;
3226				}
3227				if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, tsn)) {
3228					/* not found */
3229					tp1 = NULL;
3230					break;
3231				}
3232			}
3233			if (tp1 == NULL) {
3234				/*
3235				 * Do it the other way , aka without paying
3236				 * attention to queue seq order.
3237				 */
3238				SCTP_STAT_INCR(sctps_pdrpdnfnd);
3239				TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
3240					if (tp1->rec.data.TSN_seq == tsn) {
3241						/* found it */
3242						break;
3243					}
3244				}
3245			}
3246			if (tp1 == NULL) {
3247				SCTP_STAT_INCR(sctps_pdrptsnnf);
3248			}
3249			if ((tp1) && (tp1->sent < SCTP_DATAGRAM_ACKED)) {
3250				uint8_t *ddp;
3251
3252				if (((flg & SCTP_BADCRC) == 0) &&
3253				    ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) {
3254					return (0);
3255				}
3256				if ((stcb->asoc.peers_rwnd == 0) &&
3257				    ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) {
3258					SCTP_STAT_INCR(sctps_pdrpdiwnp);
3259					return (0);
3260				}
3261				if (stcb->asoc.peers_rwnd == 0 &&
3262				    (flg & SCTP_FROM_MIDDLE_BOX)) {
3263					SCTP_STAT_INCR(sctps_pdrpdizrw);
3264					return (0);
3265				}
3266				ddp = (uint8_t *) (mtod(tp1->data, caddr_t)+
3267				    sizeof(struct sctp_data_chunk));
3268				{
3269					unsigned int iii;
3270
3271					for (iii = 0; iii < sizeof(desc->data_bytes);
3272					    iii++) {
3273						if (ddp[iii] != desc->data_bytes[iii]) {
3274							SCTP_STAT_INCR(sctps_pdrpbadd);
3275							return (-1);
3276						}
3277					}
3278				}
3279
3280				if (tp1->do_rtt) {
3281					/*
3282					 * this guy had a RTO calculation
3283					 * pending on it, cancel it
3284					 */
3285					if (tp1->whoTo->rto_needed == 0) {
3286						tp1->whoTo->rto_needed = 1;
3287					}
3288					tp1->do_rtt = 0;
3289				}
3290				SCTP_STAT_INCR(sctps_pdrpmark);
3291				if (tp1->sent != SCTP_DATAGRAM_RESEND)
3292					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3293				/*
3294				 * mark it as if we were doing a FR, since
3295				 * we will be getting gap ack reports behind
3296				 * the info from the router.
3297				 */
3298				tp1->rec.data.doing_fast_retransmit = 1;
3299				/*
3300				 * mark the tsn with what sequences can
3301				 * cause a new FR.
3302				 */
3303				if (TAILQ_EMPTY(&stcb->asoc.send_queue)) {
3304					tp1->rec.data.fast_retran_tsn = stcb->asoc.sending_seq;
3305				} else {
3306					tp1->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq;
3307				}
3308
3309				/* restart the timer */
3310				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3311				    stcb, tp1->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_24);
3312				sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3313				    stcb, tp1->whoTo);
3314
3315				/* fix counts and things */
3316				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3317					sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PDRP,
3318					    tp1->whoTo->flight_size,
3319					    tp1->book_size,
3320					    (uintptr_t) stcb,
3321					    tp1->rec.data.TSN_seq);
3322				}
3323				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3324					sctp_flight_size_decrease(tp1);
3325					sctp_total_flight_decrease(stcb, tp1);
3326				}
3327				tp1->sent = SCTP_DATAGRAM_RESEND;
3328			} {
3329				/* audit code */
3330				unsigned int audit;
3331
3332				audit = 0;
3333				TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
3334					if (tp1->sent == SCTP_DATAGRAM_RESEND)
3335						audit++;
3336				}
3337				TAILQ_FOREACH(tp1, &stcb->asoc.control_send_queue,
3338				    sctp_next) {
3339					if (tp1->sent == SCTP_DATAGRAM_RESEND)
3340						audit++;
3341				}
3342				if (audit != stcb->asoc.sent_queue_retran_cnt) {
3343					SCTP_PRINTF("**Local Audit finds cnt:%d asoc cnt:%d\n",
3344					    audit, stcb->asoc.sent_queue_retran_cnt);
3345#ifndef SCTP_AUDITING_ENABLED
3346					stcb->asoc.sent_queue_retran_cnt = audit;
3347#endif
3348				}
3349			}
3350		}
3351		break;
3352	case SCTP_ASCONF:
3353		{
3354			struct sctp_tmit_chunk *asconf;
3355
3356			TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue,
3357			    sctp_next) {
3358				if (asconf->rec.chunk_id.id == SCTP_ASCONF) {
3359					break;
3360				}
3361			}
3362			if (asconf) {
3363				if (asconf->sent != SCTP_DATAGRAM_RESEND)
3364					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3365				asconf->sent = SCTP_DATAGRAM_RESEND;
3366				asconf->snd_count--;
3367			}
3368		}
3369		break;
3370	case SCTP_INITIATION:
3371		/* resend the INIT */
3372		stcb->asoc.dropped_special_cnt++;
3373		if (stcb->asoc.dropped_special_cnt < SCTP_RETRY_DROPPED_THRESH) {
3374			/*
3375			 * If we can get it in, in a few attempts we do
3376			 * this, otherwise we let the timer fire.
3377			 */
3378			sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep,
3379			    stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_25);
3380			sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
3381		}
3382		break;
3383	case SCTP_SELECTIVE_ACK:
3384	case SCTP_NR_SELECTIVE_ACK:
3385		/* resend the sack */
3386		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
3387		break;
3388	case SCTP_HEARTBEAT_REQUEST:
3389		/* resend a demand HB */
3390		if ((stcb->asoc.overall_error_count + 3) < stcb->asoc.max_send_times) {
3391			/*
3392			 * Only retransmit if we KNOW we wont destroy the
3393			 * tcb
3394			 */
3395			sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED);
3396		}
3397		break;
3398	case SCTP_SHUTDOWN:
3399		sctp_send_shutdown(stcb, net);
3400		break;
3401	case SCTP_SHUTDOWN_ACK:
3402		sctp_send_shutdown_ack(stcb, net);
3403		break;
3404	case SCTP_COOKIE_ECHO:
3405		{
3406			struct sctp_tmit_chunk *cookie;
3407
3408			cookie = NULL;
3409			TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue,
3410			    sctp_next) {
3411				if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
3412					break;
3413				}
3414			}
3415			if (cookie) {
3416				if (cookie->sent != SCTP_DATAGRAM_RESEND)
3417					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3418				cookie->sent = SCTP_DATAGRAM_RESEND;
3419				sctp_stop_all_cookie_timers(stcb);
3420			}
3421		}
3422		break;
3423	case SCTP_COOKIE_ACK:
3424		sctp_send_cookie_ack(stcb);
3425		break;
3426	case SCTP_ASCONF_ACK:
3427		/* resend last asconf ack */
3428		sctp_send_asconf_ack(stcb);
3429		break;
3430	case SCTP_FORWARD_CUM_TSN:
3431		send_forward_tsn(stcb, &stcb->asoc);
3432		break;
3433		/* can't do anything with these */
3434	case SCTP_PACKET_DROPPED:
3435	case SCTP_INITIATION_ACK:	/* this should not happen */
3436	case SCTP_HEARTBEAT_ACK:
3437	case SCTP_ABORT_ASSOCIATION:
3438	case SCTP_OPERATION_ERROR:
3439	case SCTP_SHUTDOWN_COMPLETE:
3440	case SCTP_ECN_ECHO:
3441	case SCTP_ECN_CWR:
3442	default:
3443		break;
3444	}
3445	return (0);
3446}
3447
3448void
3449sctp_reset_in_stream(struct sctp_tcb *stcb, uint32_t number_entries, uint16_t * list)
3450{
3451	uint32_t i;
3452	uint16_t temp;
3453
3454	/*
3455	 * We set things to 0xffff since this is the last delivered sequence
3456	 * and we will be sending in 0 after the reset.
3457	 */
3458
3459	if (number_entries) {
3460		for (i = 0; i < number_entries; i++) {
3461			temp = ntohs(list[i]);
3462			if (temp >= stcb->asoc.streamincnt) {
3463				continue;
3464			}
3465			stcb->asoc.strmin[temp].last_sequence_delivered = 0xffff;
3466		}
3467	} else {
3468		list = NULL;
3469		for (i = 0; i < stcb->asoc.streamincnt; i++) {
3470			stcb->asoc.strmin[i].last_sequence_delivered = 0xffff;
3471		}
3472	}
3473	sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_RECV, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED);
3474}
3475
3476static void
3477sctp_reset_out_streams(struct sctp_tcb *stcb, uint32_t number_entries, uint16_t * list)
3478{
3479	uint32_t i;
3480	uint16_t temp;
3481
3482	if (number_entries > 0) {
3483		for (i = 0; i < number_entries; i++) {
3484			temp = ntohs(list[i]);
3485			if (temp >= stcb->asoc.streamoutcnt) {
3486				/* no such stream */
3487				continue;
3488			}
3489			stcb->asoc.strmout[temp].next_sequence_send = 0;
3490		}
3491	} else {
3492		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3493			stcb->asoc.strmout[i].next_sequence_send = 0;
3494		}
3495	}
3496	sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_SEND, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED);
3497}
3498
3499
3500struct sctp_stream_reset_out_request *
3501sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq, struct sctp_tmit_chunk **bchk)
3502{
3503	struct sctp_association *asoc;
3504	struct sctp_chunkhdr *ch;
3505	struct sctp_stream_reset_out_request *r;
3506	struct sctp_tmit_chunk *chk;
3507	int len, clen;
3508
3509	asoc = &stcb->asoc;
3510	if (TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
3511		asoc->stream_reset_outstanding = 0;
3512		return (NULL);
3513	}
3514	if (stcb->asoc.str_reset == NULL) {
3515		asoc->stream_reset_outstanding = 0;
3516		return (NULL);
3517	}
3518	chk = stcb->asoc.str_reset;
3519	if (chk->data == NULL) {
3520		return (NULL);
3521	}
3522	if (bchk) {
3523		/* he wants a copy of the chk pointer */
3524		*bchk = chk;
3525	}
3526	clen = chk->send_size;
3527	ch = mtod(chk->data, struct sctp_chunkhdr *);
3528	r = (struct sctp_stream_reset_out_request *)(ch + 1);
3529	if (ntohl(r->request_seq) == seq) {
3530		/* found it */
3531		return (r);
3532	}
3533	len = SCTP_SIZE32(ntohs(r->ph.param_length));
3534	if (clen > (len + (int)sizeof(struct sctp_chunkhdr))) {
3535		/* move to the next one, there can only be a max of two */
3536		r = (struct sctp_stream_reset_out_request *)((caddr_t)r + len);
3537		if (ntohl(r->request_seq) == seq) {
3538			return (r);
3539		}
3540	}
3541	/* that seq is not here */
3542	return (NULL);
3543}
3544
3545static void
3546sctp_clean_up_stream_reset(struct sctp_tcb *stcb)
3547{
3548	struct sctp_association *asoc;
3549	struct sctp_tmit_chunk *chk = stcb->asoc.str_reset;
3550
3551	if (stcb->asoc.str_reset == NULL) {
3552		return;
3553	}
3554	asoc = &stcb->asoc;
3555
3556	sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_26);
3557	TAILQ_REMOVE(&asoc->control_send_queue,
3558	    chk,
3559	    sctp_next);
3560	if (chk->data) {
3561		sctp_m_freem(chk->data);
3562		chk->data = NULL;
3563	}
3564	asoc->ctrl_queue_cnt--;
3565	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
3566	/* sa_ignore NO_NULL_CHK */
3567	stcb->asoc.str_reset = NULL;
3568}
3569
3570
3571static int
3572sctp_handle_stream_reset_response(struct sctp_tcb *stcb,
3573    uint32_t seq, uint32_t action,
3574    struct sctp_stream_reset_response *respin)
3575{
3576	uint16_t type;
3577	int lparm_len;
3578	struct sctp_association *asoc = &stcb->asoc;
3579	struct sctp_tmit_chunk *chk;
3580	struct sctp_stream_reset_out_request *srparam;
3581	uint32_t number_entries;
3582
3583	if (asoc->stream_reset_outstanding == 0) {
3584		/* duplicate */
3585		return (0);
3586	}
3587	if (seq == stcb->asoc.str_reset_seq_out) {
3588		srparam = sctp_find_stream_reset(stcb, seq, &chk);
3589		if (srparam) {
3590			stcb->asoc.str_reset_seq_out++;
3591			type = ntohs(srparam->ph.param_type);
3592			lparm_len = ntohs(srparam->ph.param_length);
3593			if (type == SCTP_STR_RESET_OUT_REQUEST) {
3594				number_entries = (lparm_len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t);
3595				asoc->stream_reset_out_is_outstanding = 0;
3596				if (asoc->stream_reset_outstanding)
3597					asoc->stream_reset_outstanding--;
3598				if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) {
3599					/* do it */
3600					sctp_reset_out_streams(stcb, number_entries, srparam->list_of_streams);
3601				} else if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
3602					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_DENIED_OUT, stcb, number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED);
3603				} else {
3604					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_OUT, stcb, number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED);
3605				}
3606			} else if (type == SCTP_STR_RESET_IN_REQUEST) {
3607				/* Answered my request */
3608				number_entries = (lparm_len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t);
3609				if (asoc->stream_reset_outstanding)
3610					asoc->stream_reset_outstanding--;
3611				if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
3612					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_DENIED_IN, stcb,
3613					    number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED);
3614				} else if (action != SCTP_STREAM_RESET_RESULT_PERFORMED) {
3615					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_IN, stcb,
3616					    number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED);
3617				}
3618			} else if (type == SCTP_STR_RESET_ADD_OUT_STREAMS) {
3619				/* Ok we now may have more streams */
3620				int num_stream;
3621
3622				num_stream = stcb->asoc.strm_pending_add_size;
3623				if (num_stream > (stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt)) {
3624					/* TSNH */
3625					num_stream = stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt;
3626				}
3627				stcb->asoc.strm_pending_add_size = 0;
3628				if (asoc->stream_reset_outstanding)
3629					asoc->stream_reset_outstanding--;
3630				if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) {
3631					/* Put the new streams into effect */
3632					stcb->asoc.streamoutcnt += num_stream;
3633					sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt, 0);
3634				} else if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
3635					sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt,
3636					    SCTP_STREAM_CHANGE_DENIED);
3637				} else {
3638					sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt,
3639					    SCTP_STREAM_CHANGE_FAILED);
3640				}
3641			} else if (type == SCTP_STR_RESET_ADD_IN_STREAMS) {
3642				if (asoc->stream_reset_outstanding)
3643					asoc->stream_reset_outstanding--;
3644				if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
3645					sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt,
3646					    SCTP_STREAM_CHANGE_DENIED);
3647				} else if (action != SCTP_STREAM_RESET_RESULT_PERFORMED) {
3648					sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt,
3649					    SCTP_STREAM_CHANGE_FAILED);
3650				}
3651			} else if (type == SCTP_STR_RESET_TSN_REQUEST) {
3652				/**
3653				 * a) Adopt the new in tsn.
3654				 * b) reset the map
3655				 * c) Adopt the new out-tsn
3656				 */
3657				struct sctp_stream_reset_response_tsn *resp;
3658				struct sctp_forward_tsn_chunk fwdtsn;
3659				int abort_flag = 0;
3660
3661				if (respin == NULL) {
3662					/* huh ? */
3663					return (0);
3664				}
3665				if (ntohs(respin->ph.param_length) < sizeof(struct sctp_stream_reset_response_tsn)) {
3666					return (0);
3667				}
3668				if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) {
3669					resp = (struct sctp_stream_reset_response_tsn *)respin;
3670					asoc->stream_reset_outstanding--;
3671					fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
3672					fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
3673					fwdtsn.new_cumulative_tsn = htonl(ntohl(resp->senders_next_tsn) - 1);
3674					sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0);
3675					if (abort_flag) {
3676						return (1);
3677					}
3678					stcb->asoc.highest_tsn_inside_map = (ntohl(resp->senders_next_tsn) - 1);
3679					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
3680						sctp_log_map(0, 7, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
3681					}
3682					stcb->asoc.tsn_last_delivered = stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
3683					stcb->asoc.mapping_array_base_tsn = ntohl(resp->senders_next_tsn);
3684					memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
3685
3686					stcb->asoc.highest_tsn_inside_nr_map = stcb->asoc.highest_tsn_inside_map;
3687					memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
3688
3689					stcb->asoc.sending_seq = ntohl(resp->receivers_next_tsn);
3690					stcb->asoc.last_acked_seq = stcb->asoc.cumulative_tsn;
3691
3692					sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
3693					sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
3694					sctp_notify_stream_reset_tsn(stcb, stcb->asoc.sending_seq, (stcb->asoc.mapping_array_base_tsn + 1), 0);
3695				} else if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
3696					sctp_notify_stream_reset_tsn(stcb, stcb->asoc.sending_seq, (stcb->asoc.mapping_array_base_tsn + 1),
3697					    SCTP_ASSOC_RESET_DENIED);
3698				} else {
3699					sctp_notify_stream_reset_tsn(stcb, stcb->asoc.sending_seq, (stcb->asoc.mapping_array_base_tsn + 1),
3700					    SCTP_ASSOC_RESET_FAILED);
3701				}
3702			}
3703			/* get rid of the request and get the request flags */
3704			if (asoc->stream_reset_outstanding == 0) {
3705				sctp_clean_up_stream_reset(stcb);
3706			}
3707		}
3708	}
3709	return (0);
3710}
3711
3712static void
3713sctp_handle_str_reset_request_in(struct sctp_tcb *stcb,
3714    struct sctp_tmit_chunk *chk,
3715    struct sctp_stream_reset_in_request *req, int trunc)
3716{
3717	uint32_t seq;
3718	int len, i;
3719	int number_entries;
3720	uint16_t temp;
3721
3722	/*
3723	 * peer wants me to send a str-reset to him for my outgoing seq's if
3724	 * seq_in is right.
3725	 */
3726	struct sctp_association *asoc = &stcb->asoc;
3727
3728	seq = ntohl(req->request_seq);
3729	if (asoc->str_reset_seq_in == seq) {
3730		asoc->last_reset_action[1] = asoc->last_reset_action[0];
3731		if (!(asoc->local_strreset_support & SCTP_ENABLE_RESET_STREAM_REQ)) {
3732			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
3733		} else if (trunc) {
3734			/* Can't do it, since they exceeded our buffer size  */
3735			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
3736		} else if (stcb->asoc.stream_reset_out_is_outstanding == 0) {
3737			len = ntohs(req->ph.param_length);
3738			number_entries = ((len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t));
3739			for (i = 0; i < number_entries; i++) {
3740				temp = ntohs(req->list_of_streams[i]);
3741				req->list_of_streams[i] = temp;
3742			}
3743			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
3744			sctp_add_stream_reset_out(chk, number_entries, req->list_of_streams,
3745			    asoc->str_reset_seq_out,
3746			    seq, (asoc->sending_seq - 1));
3747			asoc->stream_reset_out_is_outstanding = 1;
3748			asoc->str_reset = chk;
3749			sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
3750			stcb->asoc.stream_reset_outstanding++;
3751		} else {
3752			/* Can't do it, since we have sent one out */
3753			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_ERR_IN_PROGRESS;
3754		}
3755		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3756		asoc->str_reset_seq_in++;
3757	} else if (asoc->str_reset_seq_in - 1 == seq) {
3758		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3759	} else if (asoc->str_reset_seq_in - 2 == seq) {
3760		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
3761	} else {
3762		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
3763	}
3764}
3765
3766static int
3767sctp_handle_str_reset_request_tsn(struct sctp_tcb *stcb,
3768    struct sctp_tmit_chunk *chk,
3769    struct sctp_stream_reset_tsn_request *req)
3770{
3771	/* reset all in and out and update the tsn */
3772	/*
3773	 * A) reset my str-seq's on in and out. B) Select a receive next,
3774	 * and set cum-ack to it. Also process this selected number as a
3775	 * fwd-tsn as well. C) set in the response my next sending seq.
3776	 */
3777	struct sctp_forward_tsn_chunk fwdtsn;
3778	struct sctp_association *asoc = &stcb->asoc;
3779	int abort_flag = 0;
3780	uint32_t seq;
3781
3782	seq = ntohl(req->request_seq);
3783	if (asoc->str_reset_seq_in == seq) {
3784		asoc->last_reset_action[1] = stcb->asoc.last_reset_action[0];
3785		if (!(asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ)) {
3786			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
3787		} else {
3788			fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
3789			fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
3790			fwdtsn.ch.chunk_flags = 0;
3791			fwdtsn.new_cumulative_tsn = htonl(stcb->asoc.highest_tsn_inside_map + 1);
3792			sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0);
3793			if (abort_flag) {
3794				return (1);
3795			}
3796			asoc->highest_tsn_inside_map += SCTP_STREAM_RESET_TSN_DELTA;
3797			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
3798				sctp_log_map(0, 10, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
3799			}
3800			asoc->tsn_last_delivered = asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
3801			asoc->mapping_array_base_tsn = asoc->highest_tsn_inside_map + 1;
3802			memset(asoc->mapping_array, 0, asoc->mapping_array_size);
3803			asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map;
3804			memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
3805			atomic_add_int(&asoc->sending_seq, 1);
3806			/* save off historical data for retrans */
3807			asoc->last_sending_seq[1] = asoc->last_sending_seq[0];
3808			asoc->last_sending_seq[0] = asoc->sending_seq;
3809			asoc->last_base_tsnsent[1] = asoc->last_base_tsnsent[0];
3810			asoc->last_base_tsnsent[0] = asoc->mapping_array_base_tsn;
3811			sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
3812			sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
3813			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
3814			sctp_notify_stream_reset_tsn(stcb, asoc->sending_seq, (asoc->mapping_array_base_tsn + 1), 0);
3815		}
3816		sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0],
3817		    asoc->last_sending_seq[0], asoc->last_base_tsnsent[0]);
3818		asoc->str_reset_seq_in++;
3819	} else if (asoc->str_reset_seq_in - 1 == seq) {
3820		sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0],
3821		    asoc->last_sending_seq[0], asoc->last_base_tsnsent[0]);
3822	} else if (asoc->str_reset_seq_in - 2 == seq) {
3823		sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[1],
3824		    asoc->last_sending_seq[1], asoc->last_base_tsnsent[1]);
3825	} else {
3826		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
3827	}
3828	return (0);
3829}
3830
3831static void
3832sctp_handle_str_reset_request_out(struct sctp_tcb *stcb,
3833    struct sctp_tmit_chunk *chk,
3834    struct sctp_stream_reset_out_request *req, int trunc)
3835{
3836	uint32_t seq, tsn;
3837	int number_entries, len;
3838	struct sctp_association *asoc = &stcb->asoc;
3839
3840	seq = ntohl(req->request_seq);
3841
3842	/* now if its not a duplicate we process it */
3843	if (asoc->str_reset_seq_in == seq) {
3844		len = ntohs(req->ph.param_length);
3845		number_entries = ((len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t));
3846		/*
3847		 * the sender is resetting, handle the list issue.. we must
3848		 * a) verify if we can do the reset, if so no problem b) If
3849		 * we can't do the reset we must copy the request. c) queue
3850		 * it, and setup the data in processor to trigger it off
3851		 * when needed and dequeue all the queued data.
3852		 */
3853		tsn = ntohl(req->send_reset_at_tsn);
3854
3855		/* move the reset action back one */
3856		asoc->last_reset_action[1] = asoc->last_reset_action[0];
3857		if (!(asoc->local_strreset_support & SCTP_ENABLE_RESET_STREAM_REQ)) {
3858			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
3859		} else if (trunc) {
3860			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
3861		} else if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
3862			/* we can do it now */
3863			sctp_reset_in_stream(stcb, number_entries, req->list_of_streams);
3864			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
3865		} else {
3866			/*
3867			 * we must queue it up and thus wait for the TSN's
3868			 * to arrive that are at or before tsn
3869			 */
3870			struct sctp_stream_reset_list *liste;
3871			int siz;
3872
3873			siz = sizeof(struct sctp_stream_reset_list) + (number_entries * sizeof(uint16_t));
3874			SCTP_MALLOC(liste, struct sctp_stream_reset_list *,
3875			    siz, SCTP_M_STRESET);
3876			if (liste == NULL) {
3877				/* gak out of memory */
3878				asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
3879				sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3880				return;
3881			}
3882			liste->tsn = tsn;
3883			liste->number_entries = number_entries;
3884			memcpy(&liste->list_of_streams, req->list_of_streams, number_entries * sizeof(uint16_t));
3885			TAILQ_INSERT_TAIL(&asoc->resetHead, liste, next_resp);
3886			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
3887		}
3888		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3889		asoc->str_reset_seq_in++;
3890	} else if ((asoc->str_reset_seq_in - 1) == seq) {
3891		/*
3892		 * one seq back, just echo back last action since my
3893		 * response was lost.
3894		 */
3895		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3896	} else if ((asoc->str_reset_seq_in - 2) == seq) {
3897		/*
3898		 * two seq back, just echo back last action since my
3899		 * response was lost.
3900		 */
3901		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
3902	} else {
3903		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
3904	}
3905}
3906
3907static void
3908sctp_handle_str_reset_add_strm(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk,
3909    struct sctp_stream_reset_add_strm *str_add)
3910{
3911	/*
3912	 * Peer is requesting to add more streams. If its within our
3913	 * max-streams we will allow it.
3914	 */
3915	uint32_t num_stream, i;
3916	uint32_t seq;
3917	struct sctp_association *asoc = &stcb->asoc;
3918	struct sctp_queued_to_read *ctl, *nctl;
3919
3920	/* Get the number. */
3921	seq = ntohl(str_add->request_seq);
3922	num_stream = ntohs(str_add->number_of_streams);
3923	/* Now what would be the new total? */
3924	if (asoc->str_reset_seq_in == seq) {
3925		num_stream += stcb->asoc.streamincnt;
3926		stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
3927		if (!(asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ)) {
3928			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
3929		} else if ((num_stream > stcb->asoc.max_inbound_streams) ||
3930		    (num_stream > 0xffff)) {
3931			/* We must reject it they ask for to many */
3932	denied:
3933			stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
3934		} else {
3935			/* Ok, we can do that :-) */
3936			struct sctp_stream_in *oldstrm;
3937
3938			/* save off the old */
3939			oldstrm = stcb->asoc.strmin;
3940			SCTP_MALLOC(stcb->asoc.strmin, struct sctp_stream_in *,
3941			    (num_stream * sizeof(struct sctp_stream_in)),
3942			    SCTP_M_STRMI);
3943			if (stcb->asoc.strmin == NULL) {
3944				stcb->asoc.strmin = oldstrm;
3945				goto denied;
3946			}
3947			/* copy off the old data */
3948			for (i = 0; i < stcb->asoc.streamincnt; i++) {
3949				TAILQ_INIT(&stcb->asoc.strmin[i].inqueue);
3950				stcb->asoc.strmin[i].stream_no = i;
3951				stcb->asoc.strmin[i].last_sequence_delivered = oldstrm[i].last_sequence_delivered;
3952				stcb->asoc.strmin[i].delivery_started = oldstrm[i].delivery_started;
3953				/* now anything on those queues? */
3954				TAILQ_FOREACH_SAFE(ctl, &oldstrm[i].inqueue, next, nctl) {
3955					TAILQ_REMOVE(&oldstrm[i].inqueue, ctl, next);
3956					TAILQ_INSERT_TAIL(&stcb->asoc.strmin[i].inqueue, ctl, next);
3957				}
3958			}
3959			/* Init the new streams */
3960			for (i = stcb->asoc.streamincnt; i < num_stream; i++) {
3961				TAILQ_INIT(&stcb->asoc.strmin[i].inqueue);
3962				stcb->asoc.strmin[i].stream_no = i;
3963				stcb->asoc.strmin[i].last_sequence_delivered = 0xffff;
3964				stcb->asoc.strmin[i].delivery_started = 0;
3965			}
3966			SCTP_FREE(oldstrm, SCTP_M_STRMI);
3967			/* update the size */
3968			stcb->asoc.streamincnt = num_stream;
3969			stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
3970			sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt, 0);
3971		}
3972		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3973		asoc->str_reset_seq_in++;
3974	} else if ((asoc->str_reset_seq_in - 1) == seq) {
3975		/*
3976		 * one seq back, just echo back last action since my
3977		 * response was lost.
3978		 */
3979		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3980	} else if ((asoc->str_reset_seq_in - 2) == seq) {
3981		/*
3982		 * two seq back, just echo back last action since my
3983		 * response was lost.
3984		 */
3985		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
3986	} else {
3987		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
3988
3989	}
3990}
3991
3992static void
3993sctp_handle_str_reset_add_out_strm(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk,
3994    struct sctp_stream_reset_add_strm *str_add)
3995{
3996	/*
3997	 * Peer is requesting to add more streams. If its within our
3998	 * max-streams we will allow it.
3999	 */
4000	uint16_t num_stream;
4001	uint32_t seq;
4002	struct sctp_association *asoc = &stcb->asoc;
4003
4004	/* Get the number. */
4005	seq = ntohl(str_add->request_seq);
4006	num_stream = ntohs(str_add->number_of_streams);
4007	/* Now what would be the new total? */
4008	if (asoc->str_reset_seq_in == seq) {
4009		stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
4010		if (!(asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ)) {
4011			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4012		} else if (stcb->asoc.stream_reset_outstanding) {
4013			/* We must reject it we have something pending */
4014			stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_ERR_IN_PROGRESS;
4015		} else {
4016			/* Ok, we can do that :-) */
4017			int mychk;
4018
4019			mychk = stcb->asoc.streamoutcnt;
4020			mychk += num_stream;
4021			if (mychk < 0x10000) {
4022				stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
4023				if (sctp_send_str_reset_req(stcb, 0, NULL, 0, 0, 0, 1, num_stream, 0, 1)) {
4024					stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4025				}
4026			} else {
4027				stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4028			}
4029		}
4030		sctp_add_stream_reset_result(chk, seq, stcb->asoc.last_reset_action[0]);
4031		asoc->str_reset_seq_in++;
4032	} else if ((asoc->str_reset_seq_in - 1) == seq) {
4033		/*
4034		 * one seq back, just echo back last action since my
4035		 * response was lost.
4036		 */
4037		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
4038	} else if ((asoc->str_reset_seq_in - 2) == seq) {
4039		/*
4040		 * two seq back, just echo back last action since my
4041		 * response was lost.
4042		 */
4043		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
4044	} else {
4045		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
4046	}
4047}
4048
4049#ifdef __GNUC__
4050__attribute__((noinline))
4051#endif
4052	static int
4053	    sctp_handle_stream_reset(struct sctp_tcb *stcb, struct mbuf *m, int offset,
4054        struct sctp_chunkhdr *ch_req)
4055{
4056	uint16_t remaining_length, param_len, ptype;
4057	struct sctp_paramhdr pstore;
4058	uint8_t cstore[SCTP_CHUNK_BUFFER_SIZE];
4059	uint32_t seq = 0;
4060	int num_req = 0;
4061	int trunc = 0;
4062	struct sctp_tmit_chunk *chk;
4063	struct sctp_chunkhdr *ch;
4064	struct sctp_paramhdr *ph;
4065	int ret_code = 0;
4066	int num_param = 0;
4067
4068	/* now it may be a reset or a reset-response */
4069	remaining_length = ntohs(ch_req->chunk_length) - sizeof(struct sctp_chunkhdr);
4070
4071	/* setup for adding the response */
4072	sctp_alloc_a_chunk(stcb, chk);
4073	if (chk == NULL) {
4074		return (ret_code);
4075	}
4076	chk->rec.chunk_id.id = SCTP_STREAM_RESET;
4077	chk->rec.chunk_id.can_take_data = 0;
4078	chk->asoc = &stcb->asoc;
4079	chk->no_fr_allowed = 0;
4080	chk->book_size = chk->send_size = sizeof(struct sctp_chunkhdr);
4081	chk->book_size_scale = 0;
4082	chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
4083	if (chk->data == NULL) {
4084strres_nochunk:
4085		if (chk->data) {
4086			sctp_m_freem(chk->data);
4087			chk->data = NULL;
4088		}
4089		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
4090		return (ret_code);
4091	}
4092	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
4093
4094	/* setup chunk parameters */
4095	chk->sent = SCTP_DATAGRAM_UNSENT;
4096	chk->snd_count = 0;
4097	chk->whoTo = NULL;
4098
4099	ch = mtod(chk->data, struct sctp_chunkhdr *);
4100	ch->chunk_type = SCTP_STREAM_RESET;
4101	ch->chunk_flags = 0;
4102	ch->chunk_length = htons(chk->send_size);
4103	SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
4104	offset += sizeof(struct sctp_chunkhdr);
4105	while (remaining_length >= sizeof(struct sctp_paramhdr)) {
4106		ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, sizeof(pstore), (uint8_t *) & pstore);
4107		if (ph == NULL) {
4108			/* TSNH */
4109			break;
4110		}
4111		param_len = ntohs(ph->param_length);
4112		if ((param_len > remaining_length) ||
4113		    (param_len < (sizeof(struct sctp_paramhdr) + sizeof(uint32_t)))) {
4114			/* bad parameter length */
4115			break;
4116		}
4117		ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, min(param_len, sizeof(cstore)),
4118		    (uint8_t *) & cstore);
4119		if (ph == NULL) {
4120			/* TSNH */
4121			break;
4122		}
4123		ptype = ntohs(ph->param_type);
4124		num_param++;
4125		if (param_len > sizeof(cstore)) {
4126			trunc = 1;
4127		} else {
4128			trunc = 0;
4129		}
4130		if (num_param > SCTP_MAX_RESET_PARAMS) {
4131			/* hit the max of parameters already sorry.. */
4132			break;
4133		}
4134		if (ptype == SCTP_STR_RESET_OUT_REQUEST) {
4135			struct sctp_stream_reset_out_request *req_out;
4136
4137			if (param_len < sizeof(struct sctp_stream_reset_out_request)) {
4138				break;
4139			}
4140			req_out = (struct sctp_stream_reset_out_request *)ph;
4141			num_req++;
4142			if (stcb->asoc.stream_reset_outstanding) {
4143				seq = ntohl(req_out->response_seq);
4144				if (seq == stcb->asoc.str_reset_seq_out) {
4145					/* implicit ack */
4146					(void)sctp_handle_stream_reset_response(stcb, seq, SCTP_STREAM_RESET_RESULT_PERFORMED, NULL);
4147				}
4148			}
4149			sctp_handle_str_reset_request_out(stcb, chk, req_out, trunc);
4150		} else if (ptype == SCTP_STR_RESET_ADD_OUT_STREAMS) {
4151			struct sctp_stream_reset_add_strm *str_add;
4152
4153			if (param_len < sizeof(struct sctp_stream_reset_add_strm)) {
4154				break;
4155			}
4156			str_add = (struct sctp_stream_reset_add_strm *)ph;
4157			num_req++;
4158			sctp_handle_str_reset_add_strm(stcb, chk, str_add);
4159		} else if (ptype == SCTP_STR_RESET_ADD_IN_STREAMS) {
4160			struct sctp_stream_reset_add_strm *str_add;
4161
4162			if (param_len < sizeof(struct sctp_stream_reset_add_strm)) {
4163				break;
4164			}
4165			str_add = (struct sctp_stream_reset_add_strm *)ph;
4166			num_req++;
4167			sctp_handle_str_reset_add_out_strm(stcb, chk, str_add);
4168		} else if (ptype == SCTP_STR_RESET_IN_REQUEST) {
4169			struct sctp_stream_reset_in_request *req_in;
4170
4171			num_req++;
4172			req_in = (struct sctp_stream_reset_in_request *)ph;
4173			sctp_handle_str_reset_request_in(stcb, chk, req_in, trunc);
4174		} else if (ptype == SCTP_STR_RESET_TSN_REQUEST) {
4175			struct sctp_stream_reset_tsn_request *req_tsn;
4176
4177			num_req++;
4178			req_tsn = (struct sctp_stream_reset_tsn_request *)ph;
4179			if (sctp_handle_str_reset_request_tsn(stcb, chk, req_tsn)) {
4180				ret_code = 1;
4181				goto strres_nochunk;
4182			}
4183			/* no more */
4184			break;
4185		} else if (ptype == SCTP_STR_RESET_RESPONSE) {
4186			struct sctp_stream_reset_response *resp;
4187			uint32_t result;
4188
4189			if (param_len < sizeof(struct sctp_stream_reset_response)) {
4190				break;
4191			}
4192			resp = (struct sctp_stream_reset_response *)ph;
4193			seq = ntohl(resp->response_seq);
4194			result = ntohl(resp->result);
4195			if (sctp_handle_stream_reset_response(stcb, seq, result, resp)) {
4196				ret_code = 1;
4197				goto strres_nochunk;
4198			}
4199		} else {
4200			break;
4201		}
4202		offset += SCTP_SIZE32(param_len);
4203		if (remaining_length >= SCTP_SIZE32(param_len)) {
4204			remaining_length -= SCTP_SIZE32(param_len);
4205		} else {
4206			remaining_length = 0;
4207		}
4208	}
4209	if (num_req == 0) {
4210		/* we have no response free the stuff */
4211		goto strres_nochunk;
4212	}
4213	/* ok we have a chunk to link in */
4214	TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue,
4215	    chk,
4216	    sctp_next);
4217	stcb->asoc.ctrl_queue_cnt++;
4218	return (ret_code);
4219}
4220
4221/*
4222 * Handle a router or endpoints report of a packet loss, there are two ways
4223 * to handle this, either we get the whole packet and must disect it
4224 * ourselves (possibly with truncation and or corruption) or it is a summary
4225 * from a middle box that did the disectting for us.
4226 */
4227static void
4228sctp_handle_packet_dropped(struct sctp_pktdrop_chunk *cp,
4229    struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t limit)
4230{
4231	uint32_t bottle_bw, on_queue;
4232	uint16_t trunc_len;
4233	unsigned int chlen;
4234	unsigned int at;
4235	struct sctp_chunk_desc desc;
4236	struct sctp_chunkhdr *ch;
4237
4238	chlen = ntohs(cp->ch.chunk_length);
4239	chlen -= sizeof(struct sctp_pktdrop_chunk);
4240	/* XXX possible chlen underflow */
4241	if (chlen == 0) {
4242		ch = NULL;
4243		if (cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)
4244			SCTP_STAT_INCR(sctps_pdrpbwrpt);
4245	} else {
4246		ch = (struct sctp_chunkhdr *)(cp->data + sizeof(struct sctphdr));
4247		chlen -= sizeof(struct sctphdr);
4248		/* XXX possible chlen underflow */
4249		memset(&desc, 0, sizeof(desc));
4250	}
4251	trunc_len = (uint16_t) ntohs(cp->trunc_len);
4252	if (trunc_len > limit) {
4253		trunc_len = limit;
4254	}
4255	/* now the chunks themselves */
4256	while ((ch != NULL) && (chlen >= sizeof(struct sctp_chunkhdr))) {
4257		desc.chunk_type = ch->chunk_type;
4258		/* get amount we need to move */
4259		at = ntohs(ch->chunk_length);
4260		if (at < sizeof(struct sctp_chunkhdr)) {
4261			/* corrupt chunk, maybe at the end? */
4262			SCTP_STAT_INCR(sctps_pdrpcrupt);
4263			break;
4264		}
4265		if (trunc_len == 0) {
4266			/* we are supposed to have all of it */
4267			if (at > chlen) {
4268				/* corrupt skip it */
4269				SCTP_STAT_INCR(sctps_pdrpcrupt);
4270				break;
4271			}
4272		} else {
4273			/* is there enough of it left ? */
4274			if (desc.chunk_type == SCTP_DATA) {
4275				if (chlen < (sizeof(struct sctp_data_chunk) +
4276				    sizeof(desc.data_bytes))) {
4277					break;
4278				}
4279			} else {
4280				if (chlen < sizeof(struct sctp_chunkhdr)) {
4281					break;
4282				}
4283			}
4284		}
4285		if (desc.chunk_type == SCTP_DATA) {
4286			/* can we get out the tsn? */
4287			if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX))
4288				SCTP_STAT_INCR(sctps_pdrpmbda);
4289
4290			if (chlen >= (sizeof(struct sctp_data_chunk) + sizeof(uint32_t))) {
4291				/* yep */
4292				struct sctp_data_chunk *dcp;
4293				uint8_t *ddp;
4294				unsigned int iii;
4295
4296				dcp = (struct sctp_data_chunk *)ch;
4297				ddp = (uint8_t *) (dcp + 1);
4298				for (iii = 0; iii < sizeof(desc.data_bytes); iii++) {
4299					desc.data_bytes[iii] = ddp[iii];
4300				}
4301				desc.tsn_ifany = dcp->dp.tsn;
4302			} else {
4303				/* nope we are done. */
4304				SCTP_STAT_INCR(sctps_pdrpnedat);
4305				break;
4306			}
4307		} else {
4308			if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX))
4309				SCTP_STAT_INCR(sctps_pdrpmbct);
4310		}
4311
4312		if (process_chunk_drop(stcb, &desc, net, cp->ch.chunk_flags)) {
4313			SCTP_STAT_INCR(sctps_pdrppdbrk);
4314			break;
4315		}
4316		if (SCTP_SIZE32(at) > chlen) {
4317			break;
4318		}
4319		chlen -= SCTP_SIZE32(at);
4320		if (chlen < sizeof(struct sctp_chunkhdr)) {
4321			/* done, none left */
4322			break;
4323		}
4324		ch = (struct sctp_chunkhdr *)((caddr_t)ch + SCTP_SIZE32(at));
4325	}
4326	/* Now update any rwnd --- possibly */
4327	if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) == 0) {
4328		/* From a peer, we get a rwnd report */
4329		uint32_t a_rwnd;
4330
4331		SCTP_STAT_INCR(sctps_pdrpfehos);
4332
4333		bottle_bw = ntohl(cp->bottle_bw);
4334		on_queue = ntohl(cp->current_onq);
4335		if (bottle_bw && on_queue) {
4336			/* a rwnd report is in here */
4337			if (bottle_bw > on_queue)
4338				a_rwnd = bottle_bw - on_queue;
4339			else
4340				a_rwnd = 0;
4341
4342			if (a_rwnd == 0)
4343				stcb->asoc.peers_rwnd = 0;
4344			else {
4345				if (a_rwnd > stcb->asoc.total_flight) {
4346					stcb->asoc.peers_rwnd =
4347					    a_rwnd - stcb->asoc.total_flight;
4348				} else {
4349					stcb->asoc.peers_rwnd = 0;
4350				}
4351				if (stcb->asoc.peers_rwnd <
4352				    stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4353					/* SWS sender side engages */
4354					stcb->asoc.peers_rwnd = 0;
4355				}
4356			}
4357		}
4358	} else {
4359		SCTP_STAT_INCR(sctps_pdrpfmbox);
4360	}
4361
4362	/* now middle boxes in sat networks get a cwnd bump */
4363	if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) &&
4364	    (stcb->asoc.sat_t3_loss_recovery == 0) &&
4365	    (stcb->asoc.sat_network)) {
4366		/*
4367		 * This is debateable but for sat networks it makes sense
4368		 * Note if a T3 timer has went off, we will prohibit any
4369		 * changes to cwnd until we exit the t3 loss recovery.
4370		 */
4371		stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped(stcb,
4372		    net, cp, &bottle_bw, &on_queue);
4373	}
4374}
4375
4376/*
4377 * handles all control chunks in a packet inputs: - m: mbuf chain, assumed to
4378 * still contain IP/SCTP header - stcb: is the tcb found for this packet -
4379 * offset: offset into the mbuf chain to first chunkhdr - length: is the
4380 * length of the complete packet outputs: - length: modified to remaining
4381 * length after control processing - netp: modified to new sctp_nets after
4382 * cookie-echo processing - return NULL to discard the packet (ie. no asoc,
4383 * bad packet,...) otherwise return the tcb for this packet
4384 */
4385#ifdef __GNUC__
4386__attribute__((noinline))
4387#endif
4388	static struct sctp_tcb *
4389	         sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length,
4390             struct sockaddr *src, struct sockaddr *dst,
4391             struct sctphdr *sh, struct sctp_chunkhdr *ch, struct sctp_inpcb *inp,
4392             struct sctp_tcb *stcb, struct sctp_nets **netp, int *fwd_tsn_seen,
4393             uint8_t use_mflowid, uint32_t mflowid,
4394             uint32_t vrf_id, uint16_t port)
4395{
4396	struct sctp_association *asoc;
4397	struct mbuf *op_err;
4398	char msg[SCTP_DIAG_INFO_LEN];
4399	uint32_t vtag_in;
4400	int num_chunks = 0;	/* number of control chunks processed */
4401	uint32_t chk_length;
4402	int ret;
4403	int abort_no_unlock = 0;
4404	int ecne_seen = 0;
4405
4406	/*
4407	 * How big should this be, and should it be alloc'd? Lets try the
4408	 * d-mtu-ceiling for now (2k) and that should hopefully work ...
4409	 * until we get into jumbo grams and such..
4410	 */
4411	uint8_t chunk_buf[SCTP_CHUNK_BUFFER_SIZE];
4412	struct sctp_tcb *locked_tcb = stcb;
4413	int got_auth = 0;
4414	uint32_t auth_offset = 0, auth_len = 0;
4415	int auth_skipped = 0;
4416	int asconf_cnt = 0;
4417
4418#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4419	struct socket *so;
4420
4421#endif
4422
4423	SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_control: iphlen=%u, offset=%u, length=%u stcb:%p\n",
4424	    iphlen, *offset, length, (void *)stcb);
4425
4426	/* validate chunk header length... */
4427	if (ntohs(ch->chunk_length) < sizeof(*ch)) {
4428		SCTPDBG(SCTP_DEBUG_INPUT1, "Invalid header length %d\n",
4429		    ntohs(ch->chunk_length));
4430		if (locked_tcb) {
4431			SCTP_TCB_UNLOCK(locked_tcb);
4432		}
4433		return (NULL);
4434	}
4435	/*
4436	 * validate the verification tag
4437	 */
4438	vtag_in = ntohl(sh->v_tag);
4439
4440	if (locked_tcb) {
4441		SCTP_TCB_LOCK_ASSERT(locked_tcb);
4442	}
4443	if (ch->chunk_type == SCTP_INITIATION) {
4444		SCTPDBG(SCTP_DEBUG_INPUT1, "Its an INIT of len:%d vtag:%x\n",
4445		    ntohs(ch->chunk_length), vtag_in);
4446		if (vtag_in != 0) {
4447			/* protocol error- silently discard... */
4448			SCTP_STAT_INCR(sctps_badvtag);
4449			if (locked_tcb) {
4450				SCTP_TCB_UNLOCK(locked_tcb);
4451			}
4452			return (NULL);
4453		}
4454	} else if (ch->chunk_type != SCTP_COOKIE_ECHO) {
4455		/*
4456		 * If there is no stcb, skip the AUTH chunk and process
4457		 * later after a stcb is found (to validate the lookup was
4458		 * valid.
4459		 */
4460		if ((ch->chunk_type == SCTP_AUTHENTICATION) &&
4461		    (stcb == NULL) &&
4462		    !SCTP_BASE_SYSCTL(sctp_auth_disable)) {
4463			/* save this chunk for later processing */
4464			auth_skipped = 1;
4465			auth_offset = *offset;
4466			auth_len = ntohs(ch->chunk_length);
4467
4468			/* (temporarily) move past this chunk */
4469			*offset += SCTP_SIZE32(auth_len);
4470			if (*offset >= length) {
4471				/* no more data left in the mbuf chain */
4472				*offset = length;
4473				if (locked_tcb) {
4474					SCTP_TCB_UNLOCK(locked_tcb);
4475				}
4476				return (NULL);
4477			}
4478			ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4479			    sizeof(struct sctp_chunkhdr), chunk_buf);
4480		}
4481		if (ch == NULL) {
4482			/* Help */
4483			*offset = length;
4484			if (locked_tcb) {
4485				SCTP_TCB_UNLOCK(locked_tcb);
4486			}
4487			return (NULL);
4488		}
4489		if (ch->chunk_type == SCTP_COOKIE_ECHO) {
4490			goto process_control_chunks;
4491		}
4492		/*
4493		 * first check if it's an ASCONF with an unknown src addr we
4494		 * need to look inside to find the association
4495		 */
4496		if (ch->chunk_type == SCTP_ASCONF && stcb == NULL) {
4497			struct sctp_chunkhdr *asconf_ch = ch;
4498			uint32_t asconf_offset = 0, asconf_len = 0;
4499
4500			/* inp's refcount may be reduced */
4501			SCTP_INP_INCR_REF(inp);
4502
4503			asconf_offset = *offset;
4504			do {
4505				asconf_len = ntohs(asconf_ch->chunk_length);
4506				if (asconf_len < sizeof(struct sctp_asconf_paramhdr))
4507					break;
4508				stcb = sctp_findassociation_ep_asconf(m,
4509				    *offset,
4510				    dst,
4511				    sh, &inp, netp, vrf_id);
4512				if (stcb != NULL)
4513					break;
4514				asconf_offset += SCTP_SIZE32(asconf_len);
4515				asconf_ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, asconf_offset,
4516				    sizeof(struct sctp_chunkhdr), chunk_buf);
4517			} while (asconf_ch != NULL && asconf_ch->chunk_type == SCTP_ASCONF);
4518			if (stcb == NULL) {
4519				/*
4520				 * reduce inp's refcount if not reduced in
4521				 * sctp_findassociation_ep_asconf().
4522				 */
4523				SCTP_INP_DECR_REF(inp);
4524			} else {
4525				locked_tcb = stcb;
4526			}
4527
4528			/* now go back and verify any auth chunk to be sure */
4529			if (auth_skipped && (stcb != NULL)) {
4530				struct sctp_auth_chunk *auth;
4531
4532				auth = (struct sctp_auth_chunk *)
4533				    sctp_m_getptr(m, auth_offset,
4534				    auth_len, chunk_buf);
4535				got_auth = 1;
4536				auth_skipped = 0;
4537				if ((auth == NULL) || sctp_handle_auth(stcb, auth, m,
4538				    auth_offset)) {
4539					/* auth HMAC failed so dump it */
4540					*offset = length;
4541					if (locked_tcb) {
4542						SCTP_TCB_UNLOCK(locked_tcb);
4543					}
4544					return (NULL);
4545				} else {
4546					/* remaining chunks are HMAC checked */
4547					stcb->asoc.authenticated = 1;
4548				}
4549			}
4550		}
4551		if (stcb == NULL) {
4552			snprintf(msg, sizeof(msg), "OOTB, %s:%d at %s\n", __FILE__, __LINE__, __FUNCTION__);
4553			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
4554			    msg);
4555			/* no association, so it's out of the blue... */
4556			sctp_handle_ootb(m, iphlen, *offset, src, dst, sh, inp, op_err,
4557			    use_mflowid, mflowid,
4558			    vrf_id, port);
4559			*offset = length;
4560			if (locked_tcb) {
4561				SCTP_TCB_UNLOCK(locked_tcb);
4562			}
4563			return (NULL);
4564		}
4565		asoc = &stcb->asoc;
4566		/* ABORT and SHUTDOWN can use either v_tag... */
4567		if ((ch->chunk_type == SCTP_ABORT_ASSOCIATION) ||
4568		    (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) ||
4569		    (ch->chunk_type == SCTP_PACKET_DROPPED)) {
4570			/* Take the T-bit always into account. */
4571			if ((((ch->chunk_flags & SCTP_HAD_NO_TCB) == 0) &&
4572			    (vtag_in == asoc->my_vtag)) ||
4573			    (((ch->chunk_flags & SCTP_HAD_NO_TCB) == SCTP_HAD_NO_TCB) &&
4574			    (vtag_in == asoc->peer_vtag))) {
4575				/* this is valid */
4576			} else {
4577				/* drop this packet... */
4578				SCTP_STAT_INCR(sctps_badvtag);
4579				if (locked_tcb) {
4580					SCTP_TCB_UNLOCK(locked_tcb);
4581				}
4582				return (NULL);
4583			}
4584		} else if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
4585			if (vtag_in != asoc->my_vtag) {
4586				/*
4587				 * this could be a stale SHUTDOWN-ACK or the
4588				 * peer never got the SHUTDOWN-COMPLETE and
4589				 * is still hung; we have started a new asoc
4590				 * but it won't complete until the shutdown
4591				 * is completed
4592				 */
4593				if (locked_tcb) {
4594					SCTP_TCB_UNLOCK(locked_tcb);
4595				}
4596				snprintf(msg, sizeof(msg), "OOTB, %s:%d at %s\n", __FILE__, __LINE__, __FUNCTION__);
4597				op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
4598				    msg);
4599				sctp_handle_ootb(m, iphlen, *offset, src, dst,
4600				    sh, inp, op_err,
4601				    use_mflowid, mflowid,
4602				    vrf_id, port);
4603				return (NULL);
4604			}
4605		} else {
4606			/* for all other chunks, vtag must match */
4607			if (vtag_in != asoc->my_vtag) {
4608				/* invalid vtag... */
4609				SCTPDBG(SCTP_DEBUG_INPUT3,
4610				    "invalid vtag: %xh, expect %xh\n",
4611				    vtag_in, asoc->my_vtag);
4612				SCTP_STAT_INCR(sctps_badvtag);
4613				if (locked_tcb) {
4614					SCTP_TCB_UNLOCK(locked_tcb);
4615				}
4616				*offset = length;
4617				return (NULL);
4618			}
4619		}
4620	}			/* end if !SCTP_COOKIE_ECHO */
4621	/*
4622	 * process all control chunks...
4623	 */
4624	if (((ch->chunk_type == SCTP_SELECTIVE_ACK) ||
4625	    (ch->chunk_type == SCTP_NR_SELECTIVE_ACK) ||
4626	    (ch->chunk_type == SCTP_HEARTBEAT_REQUEST)) &&
4627	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
4628		/* implied cookie-ack.. we must have lost the ack */
4629		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4630			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4631			    stcb->asoc.overall_error_count,
4632			    0,
4633			    SCTP_FROM_SCTP_INPUT,
4634			    __LINE__);
4635		}
4636		stcb->asoc.overall_error_count = 0;
4637		sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb,
4638		    *netp);
4639	}
4640process_control_chunks:
4641	while (IS_SCTP_CONTROL(ch)) {
4642		/* validate chunk length */
4643		chk_length = ntohs(ch->chunk_length);
4644		SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_process_control: processing a chunk type=%u, len=%u\n",
4645		    ch->chunk_type, chk_length);
4646		SCTP_LTRACE_CHK(inp, stcb, ch->chunk_type, chk_length);
4647		if (chk_length < sizeof(*ch) ||
4648		    (*offset + (int)chk_length) > length) {
4649			*offset = length;
4650			if (locked_tcb) {
4651				SCTP_TCB_UNLOCK(locked_tcb);
4652			}
4653			return (NULL);
4654		}
4655		SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks);
4656		/*
4657		 * INIT-ACK only gets the init ack "header" portion only
4658		 * because we don't have to process the peer's COOKIE. All
4659		 * others get a complete chunk.
4660		 */
4661		if ((ch->chunk_type == SCTP_INITIATION_ACK) ||
4662		    (ch->chunk_type == SCTP_INITIATION)) {
4663			/* get an init-ack chunk */
4664			ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4665			    sizeof(struct sctp_init_ack_chunk), chunk_buf);
4666			if (ch == NULL) {
4667				*offset = length;
4668				if (locked_tcb) {
4669					SCTP_TCB_UNLOCK(locked_tcb);
4670				}
4671				return (NULL);
4672			}
4673		} else {
4674			/* For cookies and all other chunks. */
4675			if (chk_length > sizeof(chunk_buf)) {
4676				/*
4677				 * use just the size of the chunk buffer so
4678				 * the front part of our chunks fit in
4679				 * contiguous space up to the chunk buffer
4680				 * size (508 bytes). For chunks that need to
4681				 * get more than that they must use the
4682				 * sctp_m_getptr() function or other means
4683				 * (e.g. know how to parse mbuf chains).
4684				 * Cookies do this already.
4685				 */
4686				ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4687				    (sizeof(chunk_buf) - 4),
4688				    chunk_buf);
4689				if (ch == NULL) {
4690					*offset = length;
4691					if (locked_tcb) {
4692						SCTP_TCB_UNLOCK(locked_tcb);
4693					}
4694					return (NULL);
4695				}
4696			} else {
4697				/* We can fit it all */
4698				ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4699				    chk_length, chunk_buf);
4700				if (ch == NULL) {
4701					SCTP_PRINTF("sctp_process_control: Can't get the all data....\n");
4702					*offset = length;
4703					if (locked_tcb) {
4704						SCTP_TCB_UNLOCK(locked_tcb);
4705					}
4706					return (NULL);
4707				}
4708			}
4709		}
4710		num_chunks++;
4711		/* Save off the last place we got a control from */
4712		if (stcb != NULL) {
4713			if (((netp != NULL) && (*netp != NULL)) || (ch->chunk_type == SCTP_ASCONF)) {
4714				/*
4715				 * allow last_control to be NULL if
4716				 * ASCONF... ASCONF processing will find the
4717				 * right net later
4718				 */
4719				if ((netp != NULL) && (*netp != NULL))
4720					stcb->asoc.last_control_chunk_from = *netp;
4721			}
4722		}
4723#ifdef SCTP_AUDITING_ENABLED
4724		sctp_audit_log(0xB0, ch->chunk_type);
4725#endif
4726
4727		/* check to see if this chunk required auth, but isn't */
4728		if ((stcb != NULL) &&
4729		    !SCTP_BASE_SYSCTL(sctp_auth_disable) &&
4730		    sctp_auth_is_required_chunk(ch->chunk_type, stcb->asoc.local_auth_chunks) &&
4731		    !stcb->asoc.authenticated) {
4732			/* "silently" ignore */
4733			SCTP_STAT_INCR(sctps_recvauthmissing);
4734			goto next_chunk;
4735		}
4736		switch (ch->chunk_type) {
4737		case SCTP_INITIATION:
4738			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT\n");
4739			/* The INIT chunk must be the only chunk. */
4740			if ((num_chunks > 1) ||
4741			    (length - *offset > (int)SCTP_SIZE32(chk_length))) {
4742				op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
4743				    "INIT not the only chunk");
4744				sctp_abort_association(inp, stcb, m, iphlen,
4745				    src, dst, sh, op_err,
4746				    use_mflowid, mflowid,
4747				    vrf_id, port);
4748				*offset = length;
4749				return (NULL);
4750			}
4751			/* Honor our resource limit. */
4752			if (chk_length > SCTP_LARGEST_INIT_ACCEPTED) {
4753				op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
4754				sctp_abort_association(inp, stcb, m, iphlen,
4755				    src, dst, sh, op_err,
4756				    use_mflowid, mflowid,
4757				    vrf_id, port);
4758				*offset = length;
4759				return (NULL);
4760			}
4761			sctp_handle_init(m, iphlen, *offset, src, dst, sh,
4762			    (struct sctp_init_chunk *)ch, inp,
4763			    stcb, &abort_no_unlock,
4764			    use_mflowid, mflowid,
4765			    vrf_id, port);
4766			*offset = length;
4767			if ((!abort_no_unlock) && (locked_tcb)) {
4768				SCTP_TCB_UNLOCK(locked_tcb);
4769			}
4770			return (NULL);
4771			break;
4772		case SCTP_PAD_CHUNK:
4773			break;
4774		case SCTP_INITIATION_ACK:
4775			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT-ACK\n");
4776			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4777				/* We are not interested anymore */
4778				if ((stcb) && (stcb->asoc.total_output_queue_size)) {
4779					;
4780				} else {
4781					if (locked_tcb != stcb) {
4782						/* Very unlikely */
4783						SCTP_TCB_UNLOCK(locked_tcb);
4784					}
4785					*offset = length;
4786					if (stcb) {
4787#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4788						so = SCTP_INP_SO(inp);
4789						atomic_add_int(&stcb->asoc.refcnt, 1);
4790						SCTP_TCB_UNLOCK(stcb);
4791						SCTP_SOCKET_LOCK(so, 1);
4792						SCTP_TCB_LOCK(stcb);
4793						atomic_subtract_int(&stcb->asoc.refcnt, 1);
4794#endif
4795						(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27);
4796#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4797						SCTP_SOCKET_UNLOCK(so, 1);
4798#endif
4799					}
4800					return (NULL);
4801				}
4802			}
4803			/* The INIT-ACK chunk must be the only chunk. */
4804			if ((num_chunks > 1) ||
4805			    (length - *offset > (int)SCTP_SIZE32(chk_length))) {
4806				*offset = length;
4807				if (locked_tcb) {
4808					SCTP_TCB_UNLOCK(locked_tcb);
4809				}
4810				return (NULL);
4811			}
4812			if ((netp) && (*netp)) {
4813				ret = sctp_handle_init_ack(m, iphlen, *offset,
4814				    src, dst, sh,
4815				    (struct sctp_init_ack_chunk *)ch,
4816				    stcb, *netp,
4817				    &abort_no_unlock,
4818				    use_mflowid, mflowid,
4819				    vrf_id);
4820			} else {
4821				ret = -1;
4822			}
4823			*offset = length;
4824			if (abort_no_unlock) {
4825				return (NULL);
4826			}
4827			/*
4828			 * Special case, I must call the output routine to
4829			 * get the cookie echoed
4830			 */
4831			if ((stcb != NULL) && (ret == 0)) {
4832				sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
4833			}
4834			if (locked_tcb) {
4835				SCTP_TCB_UNLOCK(locked_tcb);
4836			}
4837			return (NULL);
4838			break;
4839		case SCTP_SELECTIVE_ACK:
4840			{
4841				struct sctp_sack_chunk *sack;
4842				int abort_now = 0;
4843				uint32_t a_rwnd, cum_ack;
4844				uint16_t num_seg, num_dup;
4845				uint8_t flags;
4846				int offset_seg, offset_dup;
4847
4848				SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK\n");
4849				SCTP_STAT_INCR(sctps_recvsacks);
4850				if (stcb == NULL) {
4851					SCTPDBG(SCTP_DEBUG_INDATA1, "No stcb when processing SACK chunk\n");
4852					break;
4853				}
4854				if (chk_length < sizeof(struct sctp_sack_chunk)) {
4855					SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on SACK chunk, too small\n");
4856					break;
4857				}
4858				if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
4859					/*-
4860					 * If we have sent a shutdown-ack, we will pay no
4861					 * attention to a sack sent in to us since
4862					 * we don't care anymore.
4863					 */
4864					break;
4865				}
4866				sack = (struct sctp_sack_chunk *)ch;
4867				flags = ch->chunk_flags;
4868				cum_ack = ntohl(sack->sack.cum_tsn_ack);
4869				num_seg = ntohs(sack->sack.num_gap_ack_blks);
4870				num_dup = ntohs(sack->sack.num_dup_tsns);
4871				a_rwnd = (uint32_t) ntohl(sack->sack.a_rwnd);
4872				if (sizeof(struct sctp_sack_chunk) +
4873				    num_seg * sizeof(struct sctp_gap_ack_block) +
4874				    num_dup * sizeof(uint32_t) != chk_length) {
4875					SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of SACK chunk\n");
4876					break;
4877				}
4878				offset_seg = *offset + sizeof(struct sctp_sack_chunk);
4879				offset_dup = offset_seg + num_seg * sizeof(struct sctp_gap_ack_block);
4880				SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n",
4881				    cum_ack, num_seg, a_rwnd);
4882				stcb->asoc.seen_a_sack_this_pkt = 1;
4883				if ((stcb->asoc.pr_sctp_cnt == 0) &&
4884				    (num_seg == 0) &&
4885				    SCTP_TSN_GE(cum_ack, stcb->asoc.last_acked_seq) &&
4886				    (stcb->asoc.saw_sack_with_frags == 0) &&
4887				    (stcb->asoc.saw_sack_with_nr_frags == 0) &&
4888				    (!TAILQ_EMPTY(&stcb->asoc.sent_queue))
4889				    ) {
4890					/*
4891					 * We have a SIMPLE sack having no
4892					 * prior segments and data on sent
4893					 * queue to be acked.. Use the
4894					 * faster path sack processing. We
4895					 * also allow window update sacks
4896					 * with no missing segments to go
4897					 * this way too.
4898					 */
4899					sctp_express_handle_sack(stcb, cum_ack, a_rwnd, &abort_now, ecne_seen);
4900				} else {
4901					if (netp && *netp)
4902						sctp_handle_sack(m, offset_seg, offset_dup, stcb,
4903						    num_seg, 0, num_dup, &abort_now, flags,
4904						    cum_ack, a_rwnd, ecne_seen);
4905				}
4906				if (abort_now) {
4907					/* ABORT signal from sack processing */
4908					*offset = length;
4909					return (NULL);
4910				}
4911				if (TAILQ_EMPTY(&stcb->asoc.send_queue) &&
4912				    TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
4913				    (stcb->asoc.stream_queue_cnt == 0)) {
4914					sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
4915				}
4916			}
4917			break;
4918			/*
4919			 * EY - nr_sack:  If the received chunk is an
4920			 * nr_sack chunk
4921			 */
4922		case SCTP_NR_SELECTIVE_ACK:
4923			{
4924				struct sctp_nr_sack_chunk *nr_sack;
4925				int abort_now = 0;
4926				uint32_t a_rwnd, cum_ack;
4927				uint16_t num_seg, num_nr_seg, num_dup;
4928				uint8_t flags;
4929				int offset_seg, offset_dup;
4930
4931				SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_NR_SACK\n");
4932				SCTP_STAT_INCR(sctps_recvsacks);
4933				if (stcb == NULL) {
4934					SCTPDBG(SCTP_DEBUG_INDATA1, "No stcb when processing NR-SACK chunk\n");
4935					break;
4936				}
4937				if ((stcb->asoc.sctp_nr_sack_on_off == 0) ||
4938				    (stcb->asoc.peer_supports_nr_sack == 0)) {
4939					goto unknown_chunk;
4940				}
4941				if (chk_length < sizeof(struct sctp_nr_sack_chunk)) {
4942					SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on NR-SACK chunk, too small\n");
4943					break;
4944				}
4945				if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
4946					/*-
4947					 * If we have sent a shutdown-ack, we will pay no
4948					 * attention to a sack sent in to us since
4949					 * we don't care anymore.
4950					 */
4951					break;
4952				}
4953				nr_sack = (struct sctp_nr_sack_chunk *)ch;
4954				flags = ch->chunk_flags;
4955				cum_ack = ntohl(nr_sack->nr_sack.cum_tsn_ack);
4956				num_seg = ntohs(nr_sack->nr_sack.num_gap_ack_blks);
4957				num_nr_seg = ntohs(nr_sack->nr_sack.num_nr_gap_ack_blks);
4958				num_dup = ntohs(nr_sack->nr_sack.num_dup_tsns);
4959				a_rwnd = (uint32_t) ntohl(nr_sack->nr_sack.a_rwnd);
4960				if (sizeof(struct sctp_nr_sack_chunk) +
4961				    (num_seg + num_nr_seg) * sizeof(struct sctp_gap_ack_block) +
4962				    num_dup * sizeof(uint32_t) != chk_length) {
4963					SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of NR_SACK chunk\n");
4964					break;
4965				}
4966				offset_seg = *offset + sizeof(struct sctp_nr_sack_chunk);
4967				offset_dup = offset_seg + num_seg * sizeof(struct sctp_gap_ack_block);
4968				SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_NR_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n",
4969				    cum_ack, num_seg, a_rwnd);
4970				stcb->asoc.seen_a_sack_this_pkt = 1;
4971				if ((stcb->asoc.pr_sctp_cnt == 0) &&
4972				    (num_seg == 0) && (num_nr_seg == 0) &&
4973				    SCTP_TSN_GE(cum_ack, stcb->asoc.last_acked_seq) &&
4974				    (stcb->asoc.saw_sack_with_frags == 0) &&
4975				    (stcb->asoc.saw_sack_with_nr_frags == 0) &&
4976				    (!TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
4977					/*
4978					 * We have a SIMPLE sack having no
4979					 * prior segments and data on sent
4980					 * queue to be acked. Use the faster
4981					 * path sack processing. We also
4982					 * allow window update sacks with no
4983					 * missing segments to go this way
4984					 * too.
4985					 */
4986					sctp_express_handle_sack(stcb, cum_ack, a_rwnd,
4987					    &abort_now, ecne_seen);
4988				} else {
4989					if (netp && *netp)
4990						sctp_handle_sack(m, offset_seg, offset_dup, stcb,
4991						    num_seg, num_nr_seg, num_dup, &abort_now, flags,
4992						    cum_ack, a_rwnd, ecne_seen);
4993				}
4994				if (abort_now) {
4995					/* ABORT signal from sack processing */
4996					*offset = length;
4997					return (NULL);
4998				}
4999				if (TAILQ_EMPTY(&stcb->asoc.send_queue) &&
5000				    TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
5001				    (stcb->asoc.stream_queue_cnt == 0)) {
5002					sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
5003				}
5004			}
5005			break;
5006
5007		case SCTP_HEARTBEAT_REQUEST:
5008			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT\n");
5009			if ((stcb) && netp && *netp) {
5010				SCTP_STAT_INCR(sctps_recvheartbeat);
5011				sctp_send_heartbeat_ack(stcb, m, *offset,
5012				    chk_length, *netp);
5013
5014				/* He's alive so give him credit */
5015				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5016					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5017					    stcb->asoc.overall_error_count,
5018					    0,
5019					    SCTP_FROM_SCTP_INPUT,
5020					    __LINE__);
5021				}
5022				stcb->asoc.overall_error_count = 0;
5023			}
5024			break;
5025		case SCTP_HEARTBEAT_ACK:
5026			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT-ACK\n");
5027			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_heartbeat_chunk))) {
5028				/* Its not ours */
5029				*offset = length;
5030				if (locked_tcb) {
5031					SCTP_TCB_UNLOCK(locked_tcb);
5032				}
5033				return (NULL);
5034			}
5035			/* He's alive so give him credit */
5036			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5037				sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5038				    stcb->asoc.overall_error_count,
5039				    0,
5040				    SCTP_FROM_SCTP_INPUT,
5041				    __LINE__);
5042			}
5043			stcb->asoc.overall_error_count = 0;
5044			SCTP_STAT_INCR(sctps_recvheartbeatack);
5045			if (netp && *netp)
5046				sctp_handle_heartbeat_ack((struct sctp_heartbeat_chunk *)ch,
5047				    stcb, *netp);
5048			break;
5049		case SCTP_ABORT_ASSOCIATION:
5050			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ABORT, stcb %p\n",
5051			    (void *)stcb);
5052			if ((stcb) && netp && *netp)
5053				sctp_handle_abort((struct sctp_abort_chunk *)ch,
5054				    stcb, *netp);
5055			*offset = length;
5056			return (NULL);
5057			break;
5058		case SCTP_SHUTDOWN:
5059			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN, stcb %p\n",
5060			    (void *)stcb);
5061			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_shutdown_chunk))) {
5062				*offset = length;
5063				if (locked_tcb) {
5064					SCTP_TCB_UNLOCK(locked_tcb);
5065				}
5066				return (NULL);
5067			}
5068			if (netp && *netp) {
5069				int abort_flag = 0;
5070
5071				sctp_handle_shutdown((struct sctp_shutdown_chunk *)ch,
5072				    stcb, *netp, &abort_flag);
5073				if (abort_flag) {
5074					*offset = length;
5075					return (NULL);
5076				}
5077			}
5078			break;
5079		case SCTP_SHUTDOWN_ACK:
5080			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-ACK, stcb %p\n", (void *)stcb);
5081			if ((stcb) && (netp) && (*netp))
5082				sctp_handle_shutdown_ack((struct sctp_shutdown_ack_chunk *)ch, stcb, *netp);
5083			*offset = length;
5084			return (NULL);
5085			break;
5086
5087		case SCTP_OPERATION_ERROR:
5088			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_OP-ERR\n");
5089			if ((stcb) && netp && *netp && sctp_handle_error(ch, stcb, *netp) < 0) {
5090				*offset = length;
5091				return (NULL);
5092			}
5093			break;
5094		case SCTP_COOKIE_ECHO:
5095			SCTPDBG(SCTP_DEBUG_INPUT3,
5096			    "SCTP_COOKIE-ECHO, stcb %p\n", (void *)stcb);
5097			if ((stcb) && (stcb->asoc.total_output_queue_size)) {
5098				;
5099			} else {
5100				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5101					/* We are not interested anymore */
5102			abend:
5103					if (stcb) {
5104						SCTP_TCB_UNLOCK(stcb);
5105					}
5106					*offset = length;
5107					return (NULL);
5108				}
5109			}
5110			/*
5111			 * First are we accepting? We do this again here
5112			 * since it is possible that a previous endpoint WAS
5113			 * listening responded to a INIT-ACK and then
5114			 * closed. We opened and bound.. and are now no
5115			 * longer listening.
5116			 */
5117
5118			if ((stcb == NULL) && (inp->sctp_socket->so_qlen >= inp->sctp_socket->so_qlimit)) {
5119				if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
5120				    (SCTP_BASE_SYSCTL(sctp_abort_if_one_2_one_hits_limit))) {
5121					op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
5122					sctp_abort_association(inp, stcb, m, iphlen,
5123					    src, dst, sh, op_err,
5124					    use_mflowid, mflowid,
5125					    vrf_id, port);
5126				}
5127				*offset = length;
5128				return (NULL);
5129			} else {
5130				struct mbuf *ret_buf;
5131				struct sctp_inpcb *linp;
5132
5133				if (stcb) {
5134					linp = NULL;
5135				} else {
5136					linp = inp;
5137				}
5138
5139				if (linp) {
5140					SCTP_ASOC_CREATE_LOCK(linp);
5141					if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5142					    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5143						SCTP_ASOC_CREATE_UNLOCK(linp);
5144						goto abend;
5145					}
5146				}
5147				if (netp) {
5148					ret_buf =
5149					    sctp_handle_cookie_echo(m, iphlen,
5150					    *offset,
5151					    src, dst,
5152					    sh,
5153					    (struct sctp_cookie_echo_chunk *)ch,
5154					    &inp, &stcb, netp,
5155					    auth_skipped,
5156					    auth_offset,
5157					    auth_len,
5158					    &locked_tcb,
5159					    use_mflowid,
5160					    mflowid,
5161					    vrf_id,
5162					    port);
5163				} else {
5164					ret_buf = NULL;
5165				}
5166				if (linp) {
5167					SCTP_ASOC_CREATE_UNLOCK(linp);
5168				}
5169				if (ret_buf == NULL) {
5170					if (locked_tcb) {
5171						SCTP_TCB_UNLOCK(locked_tcb);
5172					}
5173					SCTPDBG(SCTP_DEBUG_INPUT3,
5174					    "GAK, null buffer\n");
5175					*offset = length;
5176					return (NULL);
5177				}
5178				/* if AUTH skipped, see if it verified... */
5179				if (auth_skipped) {
5180					got_auth = 1;
5181					auth_skipped = 0;
5182				}
5183				if (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) {
5184					/*
5185					 * Restart the timer if we have
5186					 * pending data
5187					 */
5188					struct sctp_tmit_chunk *chk;
5189
5190					chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
5191					sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo);
5192				}
5193			}
5194			break;
5195		case SCTP_COOKIE_ACK:
5196			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_COOKIE-ACK, stcb %p\n", (void *)stcb);
5197			if ((stcb == NULL) || chk_length != sizeof(struct sctp_cookie_ack_chunk)) {
5198				if (locked_tcb) {
5199					SCTP_TCB_UNLOCK(locked_tcb);
5200				}
5201				return (NULL);
5202			}
5203			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5204				/* We are not interested anymore */
5205				if ((stcb) && (stcb->asoc.total_output_queue_size)) {
5206					;
5207				} else if (stcb) {
5208#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5209					so = SCTP_INP_SO(inp);
5210					atomic_add_int(&stcb->asoc.refcnt, 1);
5211					SCTP_TCB_UNLOCK(stcb);
5212					SCTP_SOCKET_LOCK(so, 1);
5213					SCTP_TCB_LOCK(stcb);
5214					atomic_subtract_int(&stcb->asoc.refcnt, 1);
5215#endif
5216					(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27);
5217#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5218					SCTP_SOCKET_UNLOCK(so, 1);
5219#endif
5220					*offset = length;
5221					return (NULL);
5222				}
5223			}
5224			/* He's alive so give him credit */
5225			if ((stcb) && netp && *netp) {
5226				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5227					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5228					    stcb->asoc.overall_error_count,
5229					    0,
5230					    SCTP_FROM_SCTP_INPUT,
5231					    __LINE__);
5232				}
5233				stcb->asoc.overall_error_count = 0;
5234				sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, *netp);
5235			}
5236			break;
5237		case SCTP_ECN_ECHO:
5238			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-ECHO\n");
5239			/* He's alive so give him credit */
5240			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_ecne_chunk))) {
5241				/* Its not ours */
5242				if (locked_tcb) {
5243					SCTP_TCB_UNLOCK(locked_tcb);
5244				}
5245				*offset = length;
5246				return (NULL);
5247			}
5248			if (stcb) {
5249				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5250					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5251					    stcb->asoc.overall_error_count,
5252					    0,
5253					    SCTP_FROM_SCTP_INPUT,
5254					    __LINE__);
5255				}
5256				stcb->asoc.overall_error_count = 0;
5257				sctp_handle_ecn_echo((struct sctp_ecne_chunk *)ch,
5258				    stcb);
5259				ecne_seen = 1;
5260			}
5261			break;
5262		case SCTP_ECN_CWR:
5263			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-CWR\n");
5264			/* He's alive so give him credit */
5265			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_cwr_chunk))) {
5266				/* Its not ours */
5267				if (locked_tcb) {
5268					SCTP_TCB_UNLOCK(locked_tcb);
5269				}
5270				*offset = length;
5271				return (NULL);
5272			}
5273			if (stcb) {
5274				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5275					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5276					    stcb->asoc.overall_error_count,
5277					    0,
5278					    SCTP_FROM_SCTP_INPUT,
5279					    __LINE__);
5280				}
5281				stcb->asoc.overall_error_count = 0;
5282				sctp_handle_ecn_cwr((struct sctp_cwr_chunk *)ch, stcb, *netp);
5283			}
5284			break;
5285		case SCTP_SHUTDOWN_COMPLETE:
5286			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-COMPLETE, stcb %p\n", (void *)stcb);
5287			/* must be first and only chunk */
5288			if ((num_chunks > 1) ||
5289			    (length - *offset > (int)SCTP_SIZE32(chk_length))) {
5290				*offset = length;
5291				if (locked_tcb) {
5292					SCTP_TCB_UNLOCK(locked_tcb);
5293				}
5294				return (NULL);
5295			}
5296			if ((stcb) && netp && *netp) {
5297				sctp_handle_shutdown_complete((struct sctp_shutdown_complete_chunk *)ch,
5298				    stcb, *netp);
5299			}
5300			*offset = length;
5301			return (NULL);
5302			break;
5303		case SCTP_ASCONF:
5304			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF\n");
5305			/* He's alive so give him credit */
5306			if (stcb) {
5307				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5308					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5309					    stcb->asoc.overall_error_count,
5310					    0,
5311					    SCTP_FROM_SCTP_INPUT,
5312					    __LINE__);
5313				}
5314				stcb->asoc.overall_error_count = 0;
5315				sctp_handle_asconf(m, *offset, src,
5316				    (struct sctp_asconf_chunk *)ch, stcb, asconf_cnt == 0);
5317				asconf_cnt++;
5318			}
5319			break;
5320		case SCTP_ASCONF_ACK:
5321			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF-ACK\n");
5322			if (chk_length < sizeof(struct sctp_asconf_ack_chunk)) {
5323				/* Its not ours */
5324				if (locked_tcb) {
5325					SCTP_TCB_UNLOCK(locked_tcb);
5326				}
5327				*offset = length;
5328				return (NULL);
5329			}
5330			if ((stcb) && netp && *netp) {
5331				/* He's alive so give him credit */
5332				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5333					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5334					    stcb->asoc.overall_error_count,
5335					    0,
5336					    SCTP_FROM_SCTP_INPUT,
5337					    __LINE__);
5338				}
5339				stcb->asoc.overall_error_count = 0;
5340				sctp_handle_asconf_ack(m, *offset,
5341				    (struct sctp_asconf_ack_chunk *)ch, stcb, *netp, &abort_no_unlock);
5342				if (abort_no_unlock)
5343					return (NULL);
5344			}
5345			break;
5346		case SCTP_FORWARD_CUM_TSN:
5347			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_FWD-TSN\n");
5348			if (chk_length < sizeof(struct sctp_forward_tsn_chunk)) {
5349				/* Its not ours */
5350				if (locked_tcb) {
5351					SCTP_TCB_UNLOCK(locked_tcb);
5352				}
5353				*offset = length;
5354				return (NULL);
5355			}
5356			/* He's alive so give him credit */
5357			if (stcb) {
5358				int abort_flag = 0;
5359
5360				stcb->asoc.overall_error_count = 0;
5361				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5362					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5363					    stcb->asoc.overall_error_count,
5364					    0,
5365					    SCTP_FROM_SCTP_INPUT,
5366					    __LINE__);
5367				}
5368				*fwd_tsn_seen = 1;
5369				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5370					/* We are not interested anymore */
5371#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5372					so = SCTP_INP_SO(inp);
5373					atomic_add_int(&stcb->asoc.refcnt, 1);
5374					SCTP_TCB_UNLOCK(stcb);
5375					SCTP_SOCKET_LOCK(so, 1);
5376					SCTP_TCB_LOCK(stcb);
5377					atomic_subtract_int(&stcb->asoc.refcnt, 1);
5378#endif
5379					(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_29);
5380#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5381					SCTP_SOCKET_UNLOCK(so, 1);
5382#endif
5383					*offset = length;
5384					return (NULL);
5385				}
5386				sctp_handle_forward_tsn(stcb,
5387				    (struct sctp_forward_tsn_chunk *)ch, &abort_flag, m, *offset);
5388				if (abort_flag) {
5389					*offset = length;
5390					return (NULL);
5391				} else {
5392					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5393						sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5394						    stcb->asoc.overall_error_count,
5395						    0,
5396						    SCTP_FROM_SCTP_INPUT,
5397						    __LINE__);
5398					}
5399					stcb->asoc.overall_error_count = 0;
5400				}
5401
5402			}
5403			break;
5404		case SCTP_STREAM_RESET:
5405			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_STREAM_RESET\n");
5406			if (((stcb == NULL) || (ch == NULL) || (chk_length < sizeof(struct sctp_stream_reset_tsn_req)))) {
5407				/* Its not ours */
5408				if (locked_tcb) {
5409					SCTP_TCB_UNLOCK(locked_tcb);
5410				}
5411				*offset = length;
5412				return (NULL);
5413			}
5414			if (stcb->asoc.peer_supports_strreset == 0) {
5415				/*
5416				 * hmm, peer should have announced this, but
5417				 * we will turn it on since he is sending us
5418				 * a stream reset.
5419				 */
5420				stcb->asoc.peer_supports_strreset = 1;
5421			}
5422			if (sctp_handle_stream_reset(stcb, m, *offset, ch)) {
5423				/* stop processing */
5424				*offset = length;
5425				return (NULL);
5426			}
5427			break;
5428		case SCTP_PACKET_DROPPED:
5429			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_PACKET_DROPPED\n");
5430			/* re-get it all please */
5431			if (chk_length < sizeof(struct sctp_pktdrop_chunk)) {
5432				/* Its not ours */
5433				if (locked_tcb) {
5434					SCTP_TCB_UNLOCK(locked_tcb);
5435				}
5436				*offset = length;
5437				return (NULL);
5438			}
5439			if (ch && (stcb) && netp && (*netp)) {
5440				sctp_handle_packet_dropped((struct sctp_pktdrop_chunk *)ch,
5441				    stcb, *netp,
5442				    min(chk_length, (sizeof(chunk_buf) - 4)));
5443
5444			}
5445			break;
5446
5447		case SCTP_AUTHENTICATION:
5448			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_AUTHENTICATION\n");
5449			if (SCTP_BASE_SYSCTL(sctp_auth_disable))
5450				goto unknown_chunk;
5451
5452			if (stcb == NULL) {
5453				/* save the first AUTH for later processing */
5454				if (auth_skipped == 0) {
5455					auth_offset = *offset;
5456					auth_len = chk_length;
5457					auth_skipped = 1;
5458				}
5459				/* skip this chunk (temporarily) */
5460				goto next_chunk;
5461			}
5462			if ((chk_length < (sizeof(struct sctp_auth_chunk))) ||
5463			    (chk_length > (sizeof(struct sctp_auth_chunk) +
5464			    SCTP_AUTH_DIGEST_LEN_MAX))) {
5465				/* Its not ours */
5466				if (locked_tcb) {
5467					SCTP_TCB_UNLOCK(locked_tcb);
5468				}
5469				*offset = length;
5470				return (NULL);
5471			}
5472			if (got_auth == 1) {
5473				/* skip this chunk... it's already auth'd */
5474				goto next_chunk;
5475			}
5476			got_auth = 1;
5477			if ((ch == NULL) || sctp_handle_auth(stcb, (struct sctp_auth_chunk *)ch,
5478			    m, *offset)) {
5479				/* auth HMAC failed so dump the packet */
5480				*offset = length;
5481				return (stcb);
5482			} else {
5483				/* remaining chunks are HMAC checked */
5484				stcb->asoc.authenticated = 1;
5485			}
5486			break;
5487
5488		default:
5489	unknown_chunk:
5490			/* it's an unknown chunk! */
5491			if ((ch->chunk_type & 0x40) && (stcb != NULL)) {
5492				struct mbuf *mm;
5493				struct sctp_paramhdr *phd;
5494
5495				mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
5496				    0, M_DONTWAIT, 1, MT_DATA);
5497				if (mm) {
5498					phd = mtod(mm, struct sctp_paramhdr *);
5499					/*
5500					 * We cheat and use param type since
5501					 * we did not bother to define a
5502					 * error cause struct. They are the
5503					 * same basic format with different
5504					 * names.
5505					 */
5506					phd->param_type = htons(SCTP_CAUSE_UNRECOG_CHUNK);
5507					phd->param_length = htons(chk_length + sizeof(*phd));
5508					SCTP_BUF_LEN(mm) = sizeof(*phd);
5509					SCTP_BUF_NEXT(mm) = SCTP_M_COPYM(m, *offset, chk_length, M_DONTWAIT);
5510					if (SCTP_BUF_NEXT(mm)) {
5511						if (sctp_pad_lastmbuf(SCTP_BUF_NEXT(mm), SCTP_SIZE32(chk_length) - chk_length, NULL)) {
5512							sctp_m_freem(mm);
5513						} else {
5514#ifdef SCTP_MBUF_LOGGING
5515							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
5516								struct mbuf *mat;
5517
5518								for (mat = SCTP_BUF_NEXT(mm); mat; mat = SCTP_BUF_NEXT(mat)) {
5519									if (SCTP_BUF_IS_EXTENDED(mat)) {
5520										sctp_log_mb(mat, SCTP_MBUF_ICOPY);
5521									}
5522								}
5523							}
5524#endif
5525							sctp_queue_op_err(stcb, mm);
5526						}
5527					} else {
5528						sctp_m_freem(mm);
5529					}
5530				}
5531			}
5532			if ((ch->chunk_type & 0x80) == 0) {
5533				/* discard this packet */
5534				*offset = length;
5535				return (stcb);
5536			}	/* else skip this bad chunk and continue... */
5537			break;
5538		}		/* switch (ch->chunk_type) */
5539
5540
5541next_chunk:
5542		/* get the next chunk */
5543		*offset += SCTP_SIZE32(chk_length);
5544		if (*offset >= length) {
5545			/* no more data left in the mbuf chain */
5546			break;
5547		}
5548		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
5549		    sizeof(struct sctp_chunkhdr), chunk_buf);
5550		if (ch == NULL) {
5551			if (locked_tcb) {
5552				SCTP_TCB_UNLOCK(locked_tcb);
5553			}
5554			*offset = length;
5555			return (NULL);
5556		}
5557	}			/* while */
5558
5559	if (asconf_cnt > 0 && stcb != NULL) {
5560		sctp_send_asconf_ack(stcb);
5561	}
5562	return (stcb);
5563}
5564
5565
5566#ifdef INVARIANTS
5567#ifdef __GNUC__
5568__attribute__((noinline))
5569#endif
5570	void
5571	     sctp_validate_no_locks(struct sctp_inpcb *inp)
5572{
5573	struct sctp_tcb *lstcb;
5574
5575	LIST_FOREACH(lstcb, &inp->sctp_asoc_list, sctp_tcblist) {
5576		if (mtx_owned(&lstcb->tcb_mtx)) {
5577			panic("Own lock on stcb at return from input");
5578		}
5579	}
5580	if (mtx_owned(&inp->inp_create_mtx)) {
5581		panic("Own create lock on inp");
5582	}
5583	if (mtx_owned(&inp->inp_mtx)) {
5584		panic("Own inp lock on inp");
5585	}
5586}
5587
5588#endif
5589
5590/*
5591 * common input chunk processing (v4 and v6)
5592 */
5593void
5594sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset, int length,
5595    struct sockaddr *src, struct sockaddr *dst,
5596    struct sctphdr *sh, struct sctp_chunkhdr *ch,
5597#if !defined(SCTP_WITH_NO_CSUM)
5598    uint8_t compute_crc,
5599#endif
5600    uint8_t ecn_bits,
5601    uint8_t use_mflowid, uint32_t mflowid,
5602    uint32_t vrf_id, uint16_t port)
5603{
5604	uint32_t high_tsn;
5605	int fwd_tsn_seen = 0, data_processed = 0;
5606	struct mbuf *m = *mm, *op_err;
5607	char msg[SCTP_DIAG_INFO_LEN];
5608	int un_sent;
5609	int cnt_ctrl_ready = 0;
5610	struct sctp_inpcb *inp = NULL, *inp_decr = NULL;
5611	struct sctp_tcb *stcb = NULL;
5612	struct sctp_nets *net = NULL;
5613
5614	SCTP_STAT_INCR(sctps_recvdatagrams);
5615#ifdef SCTP_AUDITING_ENABLED
5616	sctp_audit_log(0xE0, 1);
5617	sctp_auditing(0, inp, stcb, net);
5618#endif
5619#if !defined(SCTP_WITH_NO_CSUM)
5620	if (compute_crc != 0) {
5621		uint32_t check, calc_check;
5622
5623		check = sh->checksum;
5624		sh->checksum = 0;
5625		calc_check = sctp_calculate_cksum(m, iphlen);
5626		sh->checksum = check;
5627		if (calc_check != check) {
5628			SCTPDBG(SCTP_DEBUG_INPUT1, "Bad CSUM on SCTP packet calc_check:%x check:%x  m:%p mlen:%d iphlen:%d\n",
5629			    calc_check, check, (void *)m, length, iphlen);
5630			stcb = sctp_findassociation_addr(m, offset, src, dst,
5631			    sh, ch, &inp, &net, vrf_id);
5632			if ((net != NULL) && (port != 0)) {
5633				if (net->port == 0) {
5634					sctp_pathmtu_adjustment(stcb, net->mtu - sizeof(struct udphdr));
5635				}
5636				net->port = port;
5637			}
5638			if ((net != NULL) && (use_mflowid != 0)) {
5639				net->flowid = mflowid;
5640#ifdef INVARIANTS
5641				net->flowidset = 1;
5642#endif
5643			}
5644			if ((inp != NULL) && (stcb != NULL)) {
5645				sctp_send_packet_dropped(stcb, net, m, length, iphlen, 1);
5646				sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_INPUT_ERROR, SCTP_SO_NOT_LOCKED);
5647			} else if ((inp != NULL) && (stcb == NULL)) {
5648				inp_decr = inp;
5649			}
5650			SCTP_STAT_INCR(sctps_badsum);
5651			SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors);
5652			goto out;
5653		}
5654	}
5655#endif
5656	/* Destination port of 0 is illegal, based on RFC4960. */
5657	if (sh->dest_port == 0) {
5658		SCTP_STAT_INCR(sctps_hdrops);
5659		goto out;
5660	}
5661	stcb = sctp_findassociation_addr(m, offset, src, dst,
5662	    sh, ch, &inp, &net, vrf_id);
5663	if ((net != NULL) && (port != 0)) {
5664		if (net->port == 0) {
5665			sctp_pathmtu_adjustment(stcb, net->mtu - sizeof(struct udphdr));
5666		}
5667		net->port = port;
5668	}
5669	if ((net != NULL) && (use_mflowid != 0)) {
5670		net->flowid = mflowid;
5671#ifdef INVARIANTS
5672		net->flowidset = 1;
5673#endif
5674	}
5675	if (inp == NULL) {
5676		SCTP_STAT_INCR(sctps_noport);
5677		if (badport_bandlim(BANDLIM_SCTP_OOTB) < 0) {
5678			goto out;
5679		}
5680		if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
5681			sctp_send_shutdown_complete2(src, dst, sh,
5682			    use_mflowid, mflowid,
5683			    vrf_id, port);
5684			goto out;
5685		}
5686		if (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) {
5687			goto out;
5688		}
5689		if (ch->chunk_type != SCTP_ABORT_ASSOCIATION) {
5690			if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
5691			    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
5692			    (ch->chunk_type != SCTP_INIT))) {
5693				op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5694				    "Out of the blue");
5695				sctp_send_abort(m, iphlen, src, dst,
5696				    sh, 0, op_err,
5697				    use_mflowid, mflowid,
5698				    vrf_id, port);
5699			}
5700		}
5701		goto out;
5702	} else if (stcb == NULL) {
5703		inp_decr = inp;
5704	}
5705#ifdef IPSEC
5706	/*-
5707	 * I very much doubt any of the IPSEC stuff will work but I have no
5708	 * idea, so I will leave it in place.
5709	 */
5710	if (inp != NULL) {
5711		switch (dst->sa_family) {
5712#ifdef INET
5713		case AF_INET:
5714			if (ipsec4_in_reject(m, &inp->ip_inp.inp)) {
5715				IPSECSTAT_INC(in_polvio);
5716				SCTP_STAT_INCR(sctps_hdrops);
5717				goto out;
5718			}
5719			break;
5720#endif
5721#ifdef INET6
5722		case AF_INET6:
5723			if (ipsec6_in_reject(m, &inp->ip_inp.inp)) {
5724				IPSEC6STAT_INC(in_polvio);
5725				SCTP_STAT_INCR(sctps_hdrops);
5726				goto out;
5727			}
5728			break;
5729#endif
5730		default:
5731			break;
5732		}
5733	}
5734#endif
5735	SCTPDBG(SCTP_DEBUG_INPUT1, "Ok, Common input processing called, m:%p iphlen:%d offset:%d length:%d stcb:%p\n",
5736	    (void *)m, iphlen, offset, length, (void *)stcb);
5737	if (stcb) {
5738		/* always clear this before beginning a packet */
5739		stcb->asoc.authenticated = 0;
5740		stcb->asoc.seen_a_sack_this_pkt = 0;
5741		SCTPDBG(SCTP_DEBUG_INPUT1, "stcb:%p state:%x\n",
5742		    (void *)stcb, stcb->asoc.state);
5743
5744		if ((stcb->asoc.state & SCTP_STATE_WAS_ABORTED) ||
5745		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5746			/*-
5747			 * If we hit here, we had a ref count
5748			 * up when the assoc was aborted and the
5749			 * timer is clearing out the assoc, we should
5750			 * NOT respond to any packet.. its OOTB.
5751			 */
5752			SCTP_TCB_UNLOCK(stcb);
5753			stcb = NULL;
5754			snprintf(msg, sizeof(msg), "OOTB, %s:%d at %s\n", __FILE__, __LINE__, __FUNCTION__);
5755			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5756			    msg);
5757			sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp, op_err,
5758			    use_mflowid, mflowid,
5759			    vrf_id, port);
5760			goto out;
5761		}
5762	}
5763	if (IS_SCTP_CONTROL(ch)) {
5764		/* process the control portion of the SCTP packet */
5765		/* sa_ignore NO_NULL_CHK */
5766		stcb = sctp_process_control(m, iphlen, &offset, length,
5767		    src, dst, sh, ch,
5768		    inp, stcb, &net, &fwd_tsn_seen,
5769		    use_mflowid, mflowid,
5770		    vrf_id, port);
5771		if (stcb) {
5772			/*
5773			 * This covers us if the cookie-echo was there and
5774			 * it changes our INP.
5775			 */
5776			inp = stcb->sctp_ep;
5777			if ((net) && (port)) {
5778				if (net->port == 0) {
5779					sctp_pathmtu_adjustment(stcb, net->mtu - sizeof(struct udphdr));
5780				}
5781				net->port = port;
5782			}
5783		}
5784	} else {
5785		/*
5786		 * no control chunks, so pre-process DATA chunks (these
5787		 * checks are taken care of by control processing)
5788		 */
5789
5790		/*
5791		 * if DATA only packet, and auth is required, then punt...
5792		 * can't have authenticated without any AUTH (control)
5793		 * chunks
5794		 */
5795		if ((stcb != NULL) &&
5796		    !SCTP_BASE_SYSCTL(sctp_auth_disable) &&
5797		    sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks)) {
5798			/* "silently" ignore */
5799			SCTP_STAT_INCR(sctps_recvauthmissing);
5800			goto out;
5801		}
5802		if (stcb == NULL) {
5803			/* out of the blue DATA chunk */
5804			snprintf(msg, sizeof(msg), "OOTB, %s:%d at %s\n", __FILE__, __LINE__, __FUNCTION__);
5805			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5806			    msg);
5807			sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp, op_err,
5808			    use_mflowid, mflowid,
5809			    vrf_id, port);
5810			goto out;
5811		}
5812		if (stcb->asoc.my_vtag != ntohl(sh->v_tag)) {
5813			/* v_tag mismatch! */
5814			SCTP_STAT_INCR(sctps_badvtag);
5815			goto out;
5816		}
5817	}
5818
5819	if (stcb == NULL) {
5820		/*
5821		 * no valid TCB for this packet, or we found it's a bad
5822		 * packet while processing control, or we're done with this
5823		 * packet (done or skip rest of data), so we drop it...
5824		 */
5825		goto out;
5826	}
5827	/*
5828	 * DATA chunk processing
5829	 */
5830	/* plow through the data chunks while length > offset */
5831
5832	/*
5833	 * Rest should be DATA only.  Check authentication state if AUTH for
5834	 * DATA is required.
5835	 */
5836	if ((length > offset) &&
5837	    (stcb != NULL) &&
5838	    !SCTP_BASE_SYSCTL(sctp_auth_disable) &&
5839	    sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks) &&
5840	    !stcb->asoc.authenticated) {
5841		/* "silently" ignore */
5842		SCTP_STAT_INCR(sctps_recvauthmissing);
5843		SCTPDBG(SCTP_DEBUG_AUTH1,
5844		    "Data chunk requires AUTH, skipped\n");
5845		goto trigger_send;
5846	}
5847	if (length > offset) {
5848		int retval;
5849
5850		/*
5851		 * First check to make sure our state is correct. We would
5852		 * not get here unless we really did have a tag, so we don't
5853		 * abort if this happens, just dump the chunk silently.
5854		 */
5855		switch (SCTP_GET_STATE(&stcb->asoc)) {
5856		case SCTP_STATE_COOKIE_ECHOED:
5857			/*
5858			 * we consider data with valid tags in this state
5859			 * shows us the cookie-ack was lost. Imply it was
5860			 * there.
5861			 */
5862			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5863				sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5864				    stcb->asoc.overall_error_count,
5865				    0,
5866				    SCTP_FROM_SCTP_INPUT,
5867				    __LINE__);
5868			}
5869			stcb->asoc.overall_error_count = 0;
5870			sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, net);
5871			break;
5872		case SCTP_STATE_COOKIE_WAIT:
5873			/*
5874			 * We consider OOTB any data sent during asoc setup.
5875			 */
5876			snprintf(msg, sizeof(msg), "OOTB, %s:%d at %s\n", __FILE__, __LINE__, __FUNCTION__);
5877			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5878			    msg);
5879			sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp, op_err,
5880			    use_mflowid, mflowid,
5881			    vrf_id, port);
5882			goto out;
5883			/* sa_ignore NOTREACHED */
5884			break;
5885		case SCTP_STATE_EMPTY:	/* should not happen */
5886		case SCTP_STATE_INUSE:	/* should not happen */
5887		case SCTP_STATE_SHUTDOWN_RECEIVED:	/* This is a peer error */
5888		case SCTP_STATE_SHUTDOWN_ACK_SENT:
5889		default:
5890			goto out;
5891			/* sa_ignore NOTREACHED */
5892			break;
5893		case SCTP_STATE_OPEN:
5894		case SCTP_STATE_SHUTDOWN_SENT:
5895			break;
5896		}
5897		/* plow through the data chunks while length > offset */
5898		retval = sctp_process_data(mm, iphlen, &offset, length,
5899		    src, dst, sh,
5900		    inp, stcb, net, &high_tsn,
5901		    use_mflowid, mflowid,
5902		    vrf_id, port);
5903		if (retval == 2) {
5904			/*
5905			 * The association aborted, NO UNLOCK needed since
5906			 * the association is destroyed.
5907			 */
5908			stcb = NULL;
5909			goto out;
5910		}
5911		data_processed = 1;
5912		/*
5913		 * Anything important needs to have been m_copy'ed in
5914		 * process_data
5915		 */
5916	}
5917	/* take care of ecn */
5918	if ((data_processed == 1) &&
5919	    (stcb->asoc.ecn_allowed == 1) &&
5920	    ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS)) {
5921		/* Yep, we need to add a ECNE */
5922		sctp_send_ecn_echo(stcb, net, high_tsn);
5923	}
5924	if ((data_processed == 0) && (fwd_tsn_seen)) {
5925		int was_a_gap;
5926		uint32_t highest_tsn;
5927
5928		if (SCTP_TSN_GT(stcb->asoc.highest_tsn_inside_nr_map, stcb->asoc.highest_tsn_inside_map)) {
5929			highest_tsn = stcb->asoc.highest_tsn_inside_nr_map;
5930		} else {
5931			highest_tsn = stcb->asoc.highest_tsn_inside_map;
5932		}
5933		was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
5934		stcb->asoc.send_sack = 1;
5935		sctp_sack_check(stcb, was_a_gap);
5936	} else if (fwd_tsn_seen) {
5937		stcb->asoc.send_sack = 1;
5938	}
5939	/* trigger send of any chunks in queue... */
5940trigger_send:
5941#ifdef SCTP_AUDITING_ENABLED
5942	sctp_audit_log(0xE0, 2);
5943	sctp_auditing(1, inp, stcb, net);
5944#endif
5945	SCTPDBG(SCTP_DEBUG_INPUT1,
5946	    "Check for chunk output prw:%d tqe:%d tf=%d\n",
5947	    stcb->asoc.peers_rwnd,
5948	    TAILQ_EMPTY(&stcb->asoc.control_send_queue),
5949	    stcb->asoc.total_flight);
5950	un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
5951	if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
5952		cnt_ctrl_ready = stcb->asoc.ctrl_queue_cnt - stcb->asoc.ecn_echo_cnt_onq;
5953	}
5954	if (cnt_ctrl_ready ||
5955	    ((un_sent) &&
5956	    (stcb->asoc.peers_rwnd > 0 ||
5957	    (stcb->asoc.peers_rwnd <= 0 && stcb->asoc.total_flight == 0)))) {
5958		SCTPDBG(SCTP_DEBUG_INPUT3, "Calling chunk OUTPUT\n");
5959		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
5960		SCTPDBG(SCTP_DEBUG_INPUT3, "chunk OUTPUT returns\n");
5961	}
5962#ifdef SCTP_AUDITING_ENABLED
5963	sctp_audit_log(0xE0, 3);
5964	sctp_auditing(2, inp, stcb, net);
5965#endif
5966out:
5967	if (stcb != NULL) {
5968		SCTP_TCB_UNLOCK(stcb);
5969	}
5970	if (inp_decr != NULL) {
5971		/* reduce ref-count */
5972		SCTP_INP_WLOCK(inp_decr);
5973		SCTP_INP_DECR_REF(inp_decr);
5974		SCTP_INP_WUNLOCK(inp_decr);
5975	}
5976#ifdef INVARIANTS
5977	if (inp != NULL) {
5978		sctp_validate_no_locks(inp);
5979	}
5980#endif
5981	return;
5982}
5983
5984#if 0
5985static void
5986sctp_print_mbuf_chain(struct mbuf *m)
5987{
5988	for (; m; m = SCTP_BUF_NEXT(m)) {
5989		SCTP_PRINTF("%p: m_len = %ld\n", (void *)m, SCTP_BUF_LEN(m));
5990		if (SCTP_BUF_IS_EXTENDED(m))
5991			SCTP_PRINTF("%p: extend_size = %d\n", (void *)m, SCTP_BUF_EXTEND_SIZE(m));
5992	}
5993}
5994
5995#endif
5996
5997#ifdef INET
5998void
5999sctp_input_with_port(struct mbuf *i_pak, int off, uint16_t port)
6000{
6001	struct mbuf *m;
6002	int iphlen;
6003	uint32_t vrf_id = 0;
6004	uint8_t ecn_bits;
6005	struct sockaddr_in src, dst;
6006	struct ip *ip;
6007	struct sctphdr *sh;
6008	struct sctp_chunkhdr *ch;
6009	int length, offset;
6010
6011#if !defined(SCTP_WITH_NO_CSUM)
6012	uint8_t compute_crc;
6013
6014#endif
6015	uint32_t mflowid;
6016	uint8_t use_mflowid;
6017
6018	iphlen = off;
6019	if (SCTP_GET_PKT_VRFID(i_pak, vrf_id)) {
6020		SCTP_RELEASE_PKT(i_pak);
6021		return;
6022	}
6023	m = SCTP_HEADER_TO_CHAIN(i_pak);
6024#ifdef SCTP_MBUF_LOGGING
6025	/* Log in any input mbufs */
6026	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6027		struct mbuf *mat;
6028
6029		for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
6030			if (SCTP_BUF_IS_EXTENDED(mat)) {
6031				sctp_log_mb(mat, SCTP_MBUF_INPUT);
6032			}
6033		}
6034	}
6035#endif
6036#ifdef SCTP_PACKET_LOGGING
6037	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
6038		sctp_packet_log(m);
6039	}
6040#endif
6041	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6042	    "sctp_input(): Packet of length %d received on %s with csum_flags 0x%x.\n",
6043	    m->m_pkthdr.len,
6044	    if_name(m->m_pkthdr.rcvif),
6045	    m->m_pkthdr.csum_flags);
6046	if (m->m_flags & M_FLOWID) {
6047		mflowid = m->m_pkthdr.flowid;
6048		use_mflowid = 1;
6049	} else {
6050		mflowid = 0;
6051		use_mflowid = 0;
6052	}
6053	SCTP_STAT_INCR(sctps_recvpackets);
6054	SCTP_STAT_INCR_COUNTER64(sctps_inpackets);
6055	/* Get IP, SCTP, and first chunk header together in the first mbuf. */
6056	offset = iphlen + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
6057	if (SCTP_BUF_LEN(m) < offset) {
6058		if ((m = m_pullup(m, offset)) == NULL) {
6059			SCTP_STAT_INCR(sctps_hdrops);
6060			return;
6061		}
6062	}
6063	ip = mtod(m, struct ip *);
6064	sh = (struct sctphdr *)((caddr_t)ip + iphlen);
6065	ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(struct sctphdr));
6066	offset -= sizeof(struct sctp_chunkhdr);
6067	memset(&src, 0, sizeof(struct sockaddr_in));
6068	src.sin_family = AF_INET;
6069	src.sin_len = sizeof(struct sockaddr_in);
6070	src.sin_port = sh->src_port;
6071	src.sin_addr = ip->ip_src;
6072	memset(&dst, 0, sizeof(struct sockaddr_in));
6073	dst.sin_family = AF_INET;
6074	dst.sin_len = sizeof(struct sockaddr_in);
6075	dst.sin_port = sh->dest_port;
6076	dst.sin_addr = ip->ip_dst;
6077	length = ip->ip_len + iphlen;
6078	/* Validate mbuf chain length with IP payload length. */
6079	if (SCTP_HEADER_LEN(m) != length) {
6080		SCTPDBG(SCTP_DEBUG_INPUT1,
6081		    "sctp_input() length:%d reported length:%d\n", length, SCTP_HEADER_LEN(m));
6082		SCTP_STAT_INCR(sctps_hdrops);
6083		goto out;
6084	}
6085	/* SCTP does not allow broadcasts or multicasts */
6086	if (IN_MULTICAST(ntohl(dst.sin_addr.s_addr))) {
6087		goto out;
6088	}
6089	if (SCTP_IS_IT_BROADCAST(dst.sin_addr, m)) {
6090		goto out;
6091	}
6092	ecn_bits = ip->ip_tos;
6093#if defined(SCTP_WITH_NO_CSUM)
6094	SCTP_STAT_INCR(sctps_recvnocrc);
6095#else
6096	if (m->m_pkthdr.csum_flags & CSUM_SCTP_VALID) {
6097		SCTP_STAT_INCR(sctps_recvhwcrc);
6098		compute_crc = 0;
6099	} else {
6100		SCTP_STAT_INCR(sctps_recvswcrc);
6101		compute_crc = 1;
6102	}
6103#endif
6104	sctp_common_input_processing(&m, iphlen, offset, length,
6105	    (struct sockaddr *)&src,
6106	    (struct sockaddr *)&dst,
6107	    sh, ch,
6108#if !defined(SCTP_WITH_NO_CSUM)
6109	    compute_crc,
6110#endif
6111	    ecn_bits,
6112	    use_mflowid, mflowid,
6113	    vrf_id, port);
6114out:
6115	if (m) {
6116		sctp_m_freem(m);
6117	}
6118	return;
6119}
6120
6121#if defined(__FreeBSD__) && defined(SCTP_MCORE_INPUT) && defined(SMP)
6122extern int *sctp_cpuarry;
6123
6124#endif
6125
6126void
6127sctp_input(struct mbuf *m, int off)
6128{
6129#if defined(__FreeBSD__) && defined(SCTP_MCORE_INPUT) && defined(SMP)
6130	struct ip *ip;
6131	struct sctphdr *sh;
6132	int offset;
6133	int cpu_to_use;
6134	uint32_t flowid, tag;
6135
6136	if (mp_ncpus > 1) {
6137		if (m->m_flags & M_FLOWID) {
6138			flowid = m->m_pkthdr.flowid;
6139		} else {
6140			/*
6141			 * No flow id built by lower layers fix it so we
6142			 * create one.
6143			 */
6144			offset = off + sizeof(struct sctphdr);
6145			if (SCTP_BUF_LEN(m) < offset) {
6146				if ((m = m_pullup(m, offset)) == NULL) {
6147					SCTP_STAT_INCR(sctps_hdrops);
6148					return;
6149				}
6150			}
6151			ip = mtod(m, struct ip *);
6152			sh = (struct sctphdr *)((caddr_t)ip + off);
6153			tag = htonl(sh->v_tag);
6154			flowid = tag ^ ntohs(sh->dest_port) ^ ntohs(sh->src_port);
6155			m->m_pkthdr.flowid = flowid;
6156			m->m_flags |= M_FLOWID;
6157		}
6158		cpu_to_use = sctp_cpuarry[flowid % mp_ncpus];
6159		sctp_queue_to_mcore(m, off, cpu_to_use);
6160		return;
6161	}
6162#endif
6163	sctp_input_with_port(m, off, 0);
6164}
6165
6166#endif
6167