1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * a) Redistributions of source code must retain the above copyright notice,
12 *    this list of conditions and the following disclaimer.
13 *
14 * b) Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in
16 *    the documentation and/or other materials provided with the distribution.
17 *
18 * c) Neither the name of Cisco Systems, Inc. nor the names of its
19 *    contributors may be used to endorse or promote products derived
20 *    from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#include <netinet/sctp_os.h>
36#include <netinet/sctp_var.h>
37#include <netinet/sctp_sysctl.h>
38#include <netinet/sctp_pcb.h>
39#include <netinet/sctp_header.h>
40#include <netinet/sctputil.h>
41#include <netinet/sctp_output.h>
42#include <netinet/sctp_input.h>
43#include <netinet/sctp_auth.h>
44#include <netinet/sctp_indata.h>
45#include <netinet/sctp_asconf.h>
46#include <netinet/sctp_bsd_addr.h>
47#include <netinet/sctp_timer.h>
48#include <netinet/sctp_crc32.h>
49#include <netinet/sctp_kdtrace.h>
50#if defined(INET) || defined(INET6)
51#include <netinet/udp.h>
52#endif
53#include <sys/smp.h>
54
55static void
56sctp_stop_all_cookie_timers(struct sctp_tcb *stcb)
57{
58	struct sctp_nets *net;
59
60	/*
61	 * This now not only stops all cookie timers it also stops any INIT
62	 * timers as well. This will make sure that the timers are stopped
63	 * in all collision cases.
64	 */
65	SCTP_TCB_LOCK_ASSERT(stcb);
66	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
67		if (net->rxt_timer.type == SCTP_TIMER_TYPE_COOKIE) {
68			sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE,
69			    stcb->sctp_ep,
70			    stcb,
71			    net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_1);
72		} else if (net->rxt_timer.type == SCTP_TIMER_TYPE_INIT) {
73			sctp_timer_stop(SCTP_TIMER_TYPE_INIT,
74			    stcb->sctp_ep,
75			    stcb,
76			    net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_2);
77		}
78	}
79}
80
81/* INIT handler */
82static void
83sctp_handle_init(struct mbuf *m, int iphlen, int offset,
84    struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh,
85    struct sctp_init_chunk *cp, struct sctp_inpcb *inp,
86    struct sctp_tcb *stcb, struct sctp_nets *net,
87    uint8_t mflowtype, uint32_t mflowid,
88    uint32_t vrf_id, uint16_t port)
89{
90	struct sctp_init *init;
91	struct mbuf *op_err;
92
93	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_init: handling INIT tcb:%p\n",
94	    (void *)stcb);
95	if (stcb == NULL) {
96		SCTP_INP_RLOCK(inp);
97	}
98	/* Validate parameters */
99	init = &cp->init;
100	if (ntohl(init->initiate_tag) == 0) {
101		goto outnow;
102	}
103	if ((ntohl(init->a_rwnd) < SCTP_MIN_RWND) ||
104	    (ntohs(init->num_inbound_streams) == 0) ||
105	    (ntohs(init->num_outbound_streams) == 0)) {
106		/* protocol error... send abort */
107		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
108		sctp_send_abort(m, iphlen, src, dst, sh, init->initiate_tag, op_err,
109		    mflowtype, mflowid, inp->fibnum,
110		    vrf_id, port);
111		goto outnow;
112	}
113	if (sctp_validate_init_auth_params(m, offset + sizeof(*cp),
114	    offset + ntohs(cp->ch.chunk_length))) {
115		/* auth parameter(s) error... send abort */
116		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
117		    "Problem with AUTH parameters");
118		sctp_send_abort(m, iphlen, src, dst, sh, init->initiate_tag, op_err,
119		    mflowtype, mflowid, inp->fibnum,
120		    vrf_id, port);
121		goto outnow;
122	}
123	/* We are only accepting if we have a listening socket. */
124	if ((stcb == NULL) &&
125	    ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
126	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
127	    (!SCTP_IS_LISTENING(inp)))) {
128		/*
129		 * FIX ME ?? What about TCP model and we have a
130		 * match/restart case? Actually no fix is needed. the lookup
131		 * will always find the existing assoc so stcb would not be
132		 * NULL. It may be questionable to do this since we COULD
133		 * just send back the INIT-ACK and hope that the app did
134		 * accept()'s by the time the COOKIE was sent. But there is
135		 * a price to pay for COOKIE generation and I don't want to
136		 * pay it on the chance that the app will actually do some
137		 * accepts(). The App just looses and should NOT be in this
138		 * state :-)
139		 */
140		if (SCTP_BASE_SYSCTL(sctp_blackhole) == 0) {
141			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
142			    "No listener");
143			sctp_send_abort(m, iphlen, src, dst, sh, 0, op_err,
144			    mflowtype, mflowid, inp->fibnum,
145			    vrf_id, port);
146		}
147		goto outnow;
148	}
149	if ((stcb != NULL) &&
150	    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT)) {
151		SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending SHUTDOWN-ACK\n");
152		sctp_send_shutdown_ack(stcb, NULL);
153		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
154	} else {
155		SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending INIT-ACK\n");
156		sctp_send_initiate_ack(inp, stcb, net, m, iphlen, offset,
157		    src, dst, sh, cp,
158		    mflowtype, mflowid,
159		    vrf_id, port);
160	}
161outnow:
162	if (stcb == NULL) {
163		SCTP_INP_RUNLOCK(inp);
164	}
165}
166
167/*
168 * process peer "INIT/INIT-ACK" chunk returns value < 0 on error
169 */
170
171int
172sctp_is_there_unsent_data(struct sctp_tcb *stcb, int so_locked)
173{
174	int unsent_data;
175	unsigned int i;
176	struct sctp_stream_queue_pending *sp;
177	struct sctp_association *asoc;
178
179	SCTP_TCB_LOCK_ASSERT(stcb);
180
181	/*
182	 * This function returns if any stream has true unsent data on it.
183	 * Note that as it looks through it will clean up any places that
184	 * have old data that has been sent but left at top of stream queue.
185	 */
186	asoc = &stcb->asoc;
187	unsent_data = 0;
188	if (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
189		/* Check to see if some data queued */
190		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
191			/* sa_ignore FREED_MEMORY */
192			sp = TAILQ_FIRST(&stcb->asoc.strmout[i].outqueue);
193			if (sp == NULL) {
194				continue;
195			}
196			if ((sp->msg_is_complete) &&
197			    (sp->length == 0) &&
198			    (sp->sender_all_done)) {
199				/*
200				 * We are doing differed cleanup. Last time
201				 * through when we took all the data the
202				 * sender_all_done was not set.
203				 */
204				if (sp->put_last_out == 0) {
205					SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
206					SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d\n",
207					    sp->sender_all_done,
208					    sp->length,
209					    sp->msg_is_complete,
210					    sp->put_last_out);
211				}
212				atomic_subtract_int(&stcb->asoc.stream_queue_cnt, 1);
213				TAILQ_REMOVE(&stcb->asoc.strmout[i].outqueue, sp, next);
214				stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, &asoc->strmout[i], sp);
215				if (sp->net) {
216					sctp_free_remote_addr(sp->net);
217					sp->net = NULL;
218				}
219				if (sp->data) {
220					sctp_m_freem(sp->data);
221					sp->data = NULL;
222				}
223				sctp_free_a_strmoq(stcb, sp, so_locked);
224				if (!TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
225					unsent_data++;
226				}
227			} else {
228				unsent_data++;
229			}
230			if (unsent_data > 0) {
231				break;
232			}
233		}
234	}
235	return (unsent_data);
236}
237
238static int
239sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb)
240{
241	struct sctp_init *init;
242	struct sctp_association *asoc;
243	struct sctp_nets *lnet;
244	unsigned int i;
245
246	SCTP_TCB_LOCK_ASSERT(stcb);
247
248	init = &cp->init;
249	asoc = &stcb->asoc;
250	/* save off parameters */
251	asoc->peer_vtag = ntohl(init->initiate_tag);
252	asoc->peers_rwnd = ntohl(init->a_rwnd);
253	/* init tsn's */
254	asoc->highest_tsn_inside_map = asoc->asconf_seq_in = ntohl(init->initial_tsn) - 1;
255
256	if (!TAILQ_EMPTY(&asoc->nets)) {
257		/* update any ssthresh's that may have a default */
258		TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
259			lnet->ssthresh = asoc->peers_rwnd;
260			if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) {
261				sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_INITIALIZATION);
262			}
263		}
264	}
265	if (asoc->pre_open_streams > ntohs(init->num_inbound_streams)) {
266		unsigned int newcnt;
267		struct sctp_stream_out *outs;
268		struct sctp_stream_queue_pending *sp, *nsp;
269		struct sctp_tmit_chunk *chk, *nchk;
270
271		/* abandon the upper streams */
272		newcnt = ntohs(init->num_inbound_streams);
273		TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
274			if (chk->rec.data.sid >= newcnt) {
275				TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
276				asoc->send_queue_cnt--;
277				if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
278					asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
279#ifdef INVARIANTS
280				} else {
281					panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
282#endif
283				}
284				if (chk->data != NULL) {
285					sctp_free_bufspace(stcb, asoc, chk, 1);
286					sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
287					    0, chk, SCTP_SO_NOT_LOCKED);
288					if (chk->data) {
289						sctp_m_freem(chk->data);
290						chk->data = NULL;
291					}
292				}
293				sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
294				/* sa_ignore FREED_MEMORY */
295			}
296		}
297		if (asoc->strmout) {
298			for (i = newcnt; i < asoc->pre_open_streams; i++) {
299				outs = &asoc->strmout[i];
300				TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
301					atomic_subtract_int(&stcb->asoc.stream_queue_cnt, 1);
302					TAILQ_REMOVE(&outs->outqueue, sp, next);
303					stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp);
304					sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL,
305					    stcb, 0, sp, SCTP_SO_NOT_LOCKED);
306					if (sp->data) {
307						sctp_m_freem(sp->data);
308						sp->data = NULL;
309					}
310					if (sp->net) {
311						sctp_free_remote_addr(sp->net);
312						sp->net = NULL;
313					}
314					/* Free the chunk */
315					sctp_free_a_strmoq(stcb, sp, SCTP_SO_NOT_LOCKED);
316					/* sa_ignore FREED_MEMORY */
317				}
318				outs->state = SCTP_STREAM_CLOSED;
319			}
320		}
321		/* cut back the count */
322		asoc->pre_open_streams = newcnt;
323	}
324	asoc->streamoutcnt = asoc->pre_open_streams;
325	if (asoc->strmout) {
326		for (i = 0; i < asoc->streamoutcnt; i++) {
327			asoc->strmout[i].state = SCTP_STREAM_OPEN;
328		}
329	}
330	/* EY - nr_sack: initialize highest tsn in nr_mapping_array */
331	asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map;
332	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
333		sctp_log_map(0, 5, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
334	}
335	/* This is the next one we expect */
336	asoc->str_reset_seq_in = asoc->asconf_seq_in + 1;
337
338	asoc->mapping_array_base_tsn = ntohl(init->initial_tsn);
339	asoc->tsn_last_delivered = asoc->cumulative_tsn = asoc->asconf_seq_in;
340
341	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
342	/* open the requested streams */
343
344	if (asoc->strmin != NULL) {
345		/* Free the old ones */
346		for (i = 0; i < asoc->streamincnt; i++) {
347			sctp_clean_up_stream(stcb, &asoc->strmin[i].inqueue);
348			sctp_clean_up_stream(stcb, &asoc->strmin[i].uno_inqueue);
349		}
350		SCTP_FREE(asoc->strmin, SCTP_M_STRMI);
351	}
352	if (asoc->max_inbound_streams > ntohs(init->num_outbound_streams)) {
353		asoc->streamincnt = ntohs(init->num_outbound_streams);
354	} else {
355		asoc->streamincnt = asoc->max_inbound_streams;
356	}
357	SCTP_MALLOC(asoc->strmin, struct sctp_stream_in *, asoc->streamincnt *
358	    sizeof(struct sctp_stream_in), SCTP_M_STRMI);
359	if (asoc->strmin == NULL) {
360		/* we didn't get memory for the streams! */
361		SCTPDBG(SCTP_DEBUG_INPUT2, "process_init: couldn't get memory for the streams!\n");
362		return (-1);
363	}
364	for (i = 0; i < asoc->streamincnt; i++) {
365		asoc->strmin[i].sid = i;
366		asoc->strmin[i].last_mid_delivered = 0xffffffff;
367		TAILQ_INIT(&asoc->strmin[i].inqueue);
368		TAILQ_INIT(&asoc->strmin[i].uno_inqueue);
369		asoc->strmin[i].pd_api_started = 0;
370		asoc->strmin[i].delivery_started = 0;
371	}
372	/*
373	 * load_address_from_init will put the addresses into the
374	 * association when the COOKIE is processed or the INIT-ACK is
375	 * processed. Both types of COOKIE's existing and new call this
376	 * routine. It will remove addresses that are no longer in the
377	 * association (for the restarting case where addresses are
378	 * removed). Up front when the INIT arrives we will discard it if it
379	 * is a restart and new addresses have been added.
380	 */
381	/* sa_ignore MEMLEAK */
382	return (0);
383}
384
385/*
386 * INIT-ACK message processing/consumption returns value < 0 on error
387 */
388static int
389sctp_process_init_ack(struct mbuf *m, int iphlen, int offset,
390    struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh,
391    struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
392    struct sctp_nets *net, int *abort_no_unlock,
393    uint8_t mflowtype, uint32_t mflowid,
394    uint32_t vrf_id)
395{
396	struct sctp_association *asoc;
397	struct mbuf *op_err;
398	int retval, abort_flag, cookie_found;
399	int initack_limit;
400	int nat_friendly = 0;
401
402	/* First verify that we have no illegal param's */
403	abort_flag = 0;
404	cookie_found = 0;
405
406	op_err = sctp_arethere_unrecognized_parameters(m,
407	    (offset + sizeof(struct sctp_init_chunk)),
408	    &abort_flag, (struct sctp_chunkhdr *)cp,
409	    &nat_friendly, &cookie_found, NULL);
410	if (abort_flag) {
411		/* Send an abort and notify peer */
412		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
413		    src, dst, sh, op_err,
414		    mflowtype, mflowid,
415		    vrf_id, net->port);
416		*abort_no_unlock = 1;
417		return (-1);
418	}
419	if (!cookie_found) {
420		uint16_t len;
421
422		/* Only report the missing cookie parameter */
423		if (op_err != NULL) {
424			sctp_m_freem(op_err);
425		}
426		len = (uint16_t)(sizeof(struct sctp_error_missing_param) + sizeof(uint16_t));
427		/* We abort with an error of missing mandatory param */
428		op_err = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
429		if (op_err != NULL) {
430			struct sctp_error_missing_param *cause;
431
432			SCTP_BUF_LEN(op_err) = len;
433			cause = mtod(op_err, struct sctp_error_missing_param *);
434			/* Subtract the reserved param */
435			cause->cause.code = htons(SCTP_CAUSE_MISSING_PARAM);
436			cause->cause.length = htons(len);
437			cause->num_missing_params = htonl(1);
438			cause->type[0] = htons(SCTP_STATE_COOKIE);
439		}
440		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
441		    src, dst, sh, op_err,
442		    mflowtype, mflowid,
443		    vrf_id, net->port);
444		*abort_no_unlock = 1;
445		return (-3);
446	}
447	asoc = &stcb->asoc;
448	asoc->peer_supports_nat = (uint8_t)nat_friendly;
449	/* process the peer's parameters in the INIT-ACK */
450	if (sctp_process_init((struct sctp_init_chunk *)cp, stcb) < 0) {
451		if (op_err != NULL) {
452			sctp_m_freem(op_err);
453		}
454		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
455		SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_init() failed\n");
456		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
457		    src, dst, sh, op_err,
458		    mflowtype, mflowid,
459		    vrf_id, net->port);
460		*abort_no_unlock = 1;
461		return (-1);
462	}
463	initack_limit = offset + ntohs(cp->ch.chunk_length);
464	/* load all addresses */
465	if ((retval = sctp_load_addresses_from_init(stcb, m,
466	    offset + sizeof(struct sctp_init_chunk),
467	    initack_limit, src, dst, NULL, stcb->asoc.port)) < 0) {
468		if (op_err != NULL) {
469			sctp_m_freem(op_err);
470		}
471		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
472		    "Problem with address parameters");
473		SCTPDBG(SCTP_DEBUG_INPUT1,
474		    "Load addresses from INIT causes an abort %d\n",
475		    retval);
476		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
477		    src, dst, sh, op_err,
478		    mflowtype, mflowid,
479		    vrf_id, net->port);
480		*abort_no_unlock = 1;
481		return (-1);
482	}
483	/* if the peer doesn't support asconf, flush the asconf queue */
484	if (asoc->asconf_supported == 0) {
485		struct sctp_asconf_addr *param, *nparam;
486
487		TAILQ_FOREACH_SAFE(param, &asoc->asconf_queue, next, nparam) {
488			TAILQ_REMOVE(&asoc->asconf_queue, param, next);
489			SCTP_FREE(param, SCTP_M_ASC_ADDR);
490		}
491	}
492
493	stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs,
494	    stcb->asoc.local_hmacs);
495	if (op_err) {
496		sctp_queue_op_err(stcb, op_err);
497		/* queuing will steal away the mbuf chain to the out queue */
498		op_err = NULL;
499	}
500	/* extract the cookie and queue it to "echo" it back... */
501	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
502		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
503		    stcb->asoc.overall_error_count,
504		    0,
505		    SCTP_FROM_SCTP_INPUT,
506		    __LINE__);
507	}
508
509	/*
510	 * Cancel the INIT timer, We do this first before queueing the
511	 * cookie. We always cancel at the primary to assume that we are
512	 * canceling the timer started by the INIT which always goes to the
513	 * primary.
514	 */
515	sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, stcb,
516	    asoc->primary_destination, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
517
518	/* calculate the RTO */
519	if (asoc->overall_error_count == 0) {
520		sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered,
521		    SCTP_RTT_FROM_NON_DATA);
522	}
523	stcb->asoc.overall_error_count = 0;
524	net->error_count = 0;
525	retval = sctp_send_cookie_echo(m, offset, initack_limit, stcb, net);
526	return (retval);
527}
528
529static void
530sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp,
531    struct sctp_tcb *stcb, struct sctp_nets *net)
532{
533	union sctp_sockstore store;
534	struct sctp_nets *r_net, *f_net;
535	struct timeval tv;
536	int req_prim = 0;
537	uint16_t old_error_counter;
538
539	if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_heartbeat_chunk)) {
540		/* Invalid length */
541		return;
542	}
543
544	memset(&store, 0, sizeof(store));
545	switch (cp->heartbeat.hb_info.addr_family) {
546#ifdef INET
547	case AF_INET:
548		if (cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in)) {
549			store.sin.sin_family = cp->heartbeat.hb_info.addr_family;
550			store.sin.sin_len = cp->heartbeat.hb_info.addr_len;
551			store.sin.sin_port = stcb->rport;
552			memcpy(&store.sin.sin_addr, cp->heartbeat.hb_info.address,
553			    sizeof(store.sin.sin_addr));
554		} else {
555			return;
556		}
557		break;
558#endif
559#ifdef INET6
560	case AF_INET6:
561		if (cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in6)) {
562			store.sin6.sin6_family = cp->heartbeat.hb_info.addr_family;
563			store.sin6.sin6_len = cp->heartbeat.hb_info.addr_len;
564			store.sin6.sin6_port = stcb->rport;
565			memcpy(&store.sin6.sin6_addr, cp->heartbeat.hb_info.address, sizeof(struct in6_addr));
566		} else {
567			return;
568		}
569		break;
570#endif
571	default:
572		return;
573	}
574	r_net = sctp_findnet(stcb, &store.sa);
575	if (r_net == NULL) {
576		SCTPDBG(SCTP_DEBUG_INPUT1, "Huh? I can't find the address I sent it to, discard\n");
577		return;
578	}
579	if ((r_net && (r_net->dest_state & SCTP_ADDR_UNCONFIRMED)) &&
580	    (r_net->heartbeat_random1 == cp->heartbeat.hb_info.random_value1) &&
581	    (r_net->heartbeat_random2 == cp->heartbeat.hb_info.random_value2)) {
582		/*
583		 * If the its a HB and it's random value is correct when can
584		 * confirm the destination.
585		 */
586		r_net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
587		if (r_net->dest_state & SCTP_ADDR_REQ_PRIMARY) {
588			stcb->asoc.primary_destination = r_net;
589			r_net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY;
590			f_net = TAILQ_FIRST(&stcb->asoc.nets);
591			if (f_net != r_net) {
592				/*
593				 * first one on the list is NOT the primary
594				 * sctp_cmpaddr() is much more efficient if
595				 * the primary is the first on the list,
596				 * make it so.
597				 */
598				TAILQ_REMOVE(&stcb->asoc.nets, r_net, sctp_next);
599				TAILQ_INSERT_HEAD(&stcb->asoc.nets, r_net, sctp_next);
600			}
601			req_prim = 1;
602		}
603		sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
604		    stcb, 0, (void *)r_net, SCTP_SO_NOT_LOCKED);
605		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb,
606		    r_net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_4);
607		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, r_net);
608	}
609	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
610		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
611		    stcb->asoc.overall_error_count,
612		    0,
613		    SCTP_FROM_SCTP_INPUT,
614		    __LINE__);
615	}
616	stcb->asoc.overall_error_count = 0;
617	old_error_counter = r_net->error_count;
618	r_net->error_count = 0;
619	r_net->hb_responded = 1;
620	tv.tv_sec = cp->heartbeat.hb_info.time_value_1;
621	tv.tv_usec = cp->heartbeat.hb_info.time_value_2;
622	/* Now lets do a RTO with this */
623	sctp_calculate_rto(stcb, &stcb->asoc, r_net, &tv,
624	    SCTP_RTT_FROM_NON_DATA);
625	if ((r_net->dest_state & SCTP_ADDR_REACHABLE) == 0) {
626		r_net->dest_state |= SCTP_ADDR_REACHABLE;
627		sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
628		    0, (void *)r_net, SCTP_SO_NOT_LOCKED);
629	}
630	if (r_net->dest_state & SCTP_ADDR_PF) {
631		r_net->dest_state &= ~SCTP_ADDR_PF;
632		stcb->asoc.cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
633	}
634	if (old_error_counter > 0) {
635		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep,
636		    stcb, r_net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_5);
637		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, r_net);
638	}
639	if (r_net == stcb->asoc.primary_destination) {
640		if (stcb->asoc.alternate) {
641			/* release the alternate, primary is good */
642			sctp_free_remote_addr(stcb->asoc.alternate);
643			stcb->asoc.alternate = NULL;
644		}
645	}
646	/* Mobility adaptation */
647	if (req_prim) {
648		if ((sctp_is_mobility_feature_on(stcb->sctp_ep,
649		    SCTP_MOBILITY_BASE) ||
650		    sctp_is_mobility_feature_on(stcb->sctp_ep,
651		    SCTP_MOBILITY_FASTHANDOFF)) &&
652		    sctp_is_mobility_feature_on(stcb->sctp_ep,
653		    SCTP_MOBILITY_PRIM_DELETED)) {
654			sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED,
655			    stcb->sctp_ep, stcb, NULL,
656			    SCTP_FROM_SCTP_INPUT + SCTP_LOC_6);
657			if (sctp_is_mobility_feature_on(stcb->sctp_ep,
658			    SCTP_MOBILITY_FASTHANDOFF)) {
659				sctp_assoc_immediate_retrans(stcb,
660				    stcb->asoc.primary_destination);
661			}
662			if (sctp_is_mobility_feature_on(stcb->sctp_ep,
663			    SCTP_MOBILITY_BASE)) {
664				sctp_move_chunks_from_net(stcb,
665				    stcb->asoc.deleted_primary);
666			}
667			sctp_delete_prim_timer(stcb->sctp_ep, stcb);
668		}
669	}
670}
671
672static int
673sctp_handle_nat_colliding_state(struct sctp_tcb *stcb)
674{
675	/*
676	 * Return 0 means we want you to proceed with the abort non-zero
677	 * means no abort processing.
678	 */
679	uint32_t new_vtag;
680	struct sctpasochead *head;
681
682	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
683	    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
684		atomic_add_int(&stcb->asoc.refcnt, 1);
685		SCTP_TCB_UNLOCK(stcb);
686		SCTP_INP_INFO_WLOCK();
687		SCTP_TCB_LOCK(stcb);
688		atomic_subtract_int(&stcb->asoc.refcnt, 1);
689	} else {
690		return (0);
691	}
692	new_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
693	if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) {
694		/* generate a new vtag and send init */
695		LIST_REMOVE(stcb, sctp_asocs);
696		stcb->asoc.my_vtag = new_vtag;
697		head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))];
698		/*
699		 * put it in the bucket in the vtag hash of assoc's for the
700		 * system
701		 */
702		LIST_INSERT_HEAD(head, stcb, sctp_asocs);
703		SCTP_INP_INFO_WUNLOCK();
704		sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
705		return (1);
706	} else {
707		/*
708		 * treat like a case where the cookie expired i.e.: - dump
709		 * current cookie. - generate a new vtag. - resend init.
710		 */
711		/* generate a new vtag and send init */
712		LIST_REMOVE(stcb, sctp_asocs);
713		SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT);
714		sctp_stop_all_cookie_timers(stcb);
715		sctp_toss_old_cookies(stcb, &stcb->asoc);
716		stcb->asoc.my_vtag = new_vtag;
717		head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))];
718		/*
719		 * put it in the bucket in the vtag hash of assoc's for the
720		 * system
721		 */
722		LIST_INSERT_HEAD(head, stcb, sctp_asocs);
723		SCTP_INP_INFO_WUNLOCK();
724		sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
725		return (1);
726	}
727	return (0);
728}
729
730static int
731sctp_handle_nat_missing_state(struct sctp_tcb *stcb,
732    struct sctp_nets *net)
733{
734	/*
735	 * return 0 means we want you to proceed with the abort non-zero
736	 * means no abort processing
737	 */
738	if (stcb->asoc.auth_supported == 0) {
739		SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_nat_missing_state: Peer does not support AUTH, cannot send an asconf\n");
740		return (0);
741	}
742	sctp_asconf_send_nat_state_update(stcb, net);
743	return (1);
744}
745
746/* Returns 1 if the stcb was aborted, 0 otherwise */
747static int
748sctp_handle_abort(struct sctp_abort_chunk *abort,
749    struct sctp_tcb *stcb, struct sctp_nets *net)
750{
751	uint16_t len;
752	uint16_t error;
753
754	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: handling ABORT\n");
755	if (stcb == NULL)
756		return (0);
757
758	len = ntohs(abort->ch.chunk_length);
759	if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_error_cause)) {
760		/*
761		 * Need to check the cause codes for our two magic nat
762		 * aborts which don't kill the assoc necessarily.
763		 */
764		struct sctp_error_cause *cause;
765
766		cause = (struct sctp_error_cause *)(abort + 1);
767		error = ntohs(cause->code);
768		if (error == SCTP_CAUSE_NAT_COLLIDING_STATE) {
769			SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state, ABORT flags:%x\n",
770			    abort->ch.chunk_flags);
771			if (sctp_handle_nat_colliding_state(stcb)) {
772				return (0);
773			}
774		} else if (error == SCTP_CAUSE_NAT_MISSING_STATE) {
775			SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state, ABORT flags:%x\n",
776			    abort->ch.chunk_flags);
777			if (sctp_handle_nat_missing_state(stcb, net)) {
778				return (0);
779			}
780		}
781	} else {
782		error = 0;
783	}
784	/* stop any receive timers */
785	sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
786	    SCTP_FROM_SCTP_INPUT + SCTP_LOC_7);
787	/* notify user of the abort and clean up... */
788	sctp_abort_notification(stcb, true, false, error, abort, SCTP_SO_NOT_LOCKED);
789	/* free the tcb */
790	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
791	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
792	    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
793		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
794	}
795#ifdef SCTP_ASOCLOG_OF_TSNS
796	sctp_print_out_track_log(stcb);
797#endif
798	(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
799	    SCTP_FROM_SCTP_INPUT + SCTP_LOC_8);
800	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: finished\n");
801	return (1);
802}
803
804static void
805sctp_start_net_timers(struct sctp_tcb *stcb)
806{
807	uint32_t cnt_hb_sent;
808	struct sctp_nets *net;
809
810	cnt_hb_sent = 0;
811	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
812		/*
813		 * For each network start: 1) A pmtu timer. 2) A HB timer 3)
814		 * If the dest in unconfirmed send a hb as well if under
815		 * max_hb_burst have been sent.
816		 */
817		sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, stcb->sctp_ep, stcb, net);
818		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
819		if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
820		    (cnt_hb_sent < SCTP_BASE_SYSCTL(sctp_hb_maxburst))) {
821			sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED);
822			cnt_hb_sent++;
823		}
824	}
825	if (cnt_hb_sent) {
826		sctp_chunk_output(stcb->sctp_ep, stcb,
827		    SCTP_OUTPUT_FROM_COOKIE_ACK,
828		    SCTP_SO_NOT_LOCKED);
829	}
830}
831
832static void
833sctp_check_data_from_peer(struct sctp_tcb *stcb, int *abort_flag)
834{
835	char msg[SCTP_DIAG_INFO_LEN];
836	struct sctp_association *asoc;
837	struct mbuf *op_err;
838	unsigned int i;
839
840	*abort_flag = 0;
841	asoc = &stcb->asoc;
842	if (SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->cumulative_tsn) ||
843	    SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->cumulative_tsn)) {
844		SCTP_SNPRINTF(msg, sizeof(msg), "Missing TSN");
845		*abort_flag = 1;
846	}
847	if (!*abort_flag) {
848		for (i = 0; i < asoc->streamincnt; i++) {
849			if (!TAILQ_EMPTY(&asoc->strmin[i].inqueue) ||
850			    !TAILQ_EMPTY(&asoc->strmin[i].uno_inqueue)) {
851				SCTP_SNPRINTF(msg, sizeof(msg), "Missing user data");
852				*abort_flag = 1;
853				break;
854			}
855		}
856	}
857	if (*abort_flag) {
858		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
859		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INPUT + SCTP_LOC_9;
860		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
861	}
862}
863
864static void
865sctp_handle_shutdown(struct sctp_shutdown_chunk *cp,
866    struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_flag)
867{
868	int some_on_streamwheel;
869	int old_state;
870
871	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_shutdown: handling SHUTDOWN\n");
872	if (stcb == NULL)
873		return;
874	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
875	    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
876		return;
877	}
878	if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_shutdown_chunk)) {
879		/* Shutdown NOT the expected size */
880		return;
881	}
882	old_state = SCTP_GET_STATE(stcb);
883	sctp_update_acked(stcb, cp, abort_flag);
884	if (*abort_flag) {
885		return;
886	}
887	sctp_check_data_from_peer(stcb, abort_flag);
888	if (*abort_flag) {
889		return;
890	}
891	if (stcb->sctp_socket) {
892		if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
893		    (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT) &&
894		    (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT)) {
895			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_RECEIVED);
896			/*
897			 * notify upper layer that peer has initiated a
898			 * shutdown
899			 */
900			sctp_ulp_notify(SCTP_NOTIFY_PEER_SHUTDOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
901
902			/* reset time */
903			(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
904		}
905	}
906	if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
907		/*
908		 * stop the shutdown timer, since we WILL move to
909		 * SHUTDOWN-ACK-SENT.
910		 */
911		sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
912		    net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_9);
913	}
914	/* Now is there unsent data on a stream somewhere? */
915	some_on_streamwheel = sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED);
916
917	if (!TAILQ_EMPTY(&stcb->asoc.send_queue) ||
918	    !TAILQ_EMPTY(&stcb->asoc.sent_queue) ||
919	    some_on_streamwheel) {
920		/* By returning we will push more data out */
921		return;
922	} else {
923		/* no outstanding data to send, so move on... */
924		/* send SHUTDOWN-ACK */
925		/* move to SHUTDOWN-ACK-SENT state */
926		if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
927		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
928			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
929		}
930		if (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT) {
931			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
932			sctp_stop_timers_for_shutdown(stcb);
933			sctp_send_shutdown_ack(stcb, net);
934			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
935			    stcb->sctp_ep, stcb, net);
936		} else if (old_state == SCTP_STATE_SHUTDOWN_ACK_SENT) {
937			sctp_send_shutdown_ack(stcb, net);
938		}
939	}
940}
941
942static void
943sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp SCTP_UNUSED,
944    struct sctp_tcb *stcb,
945    struct sctp_nets *net)
946{
947	int abort_flag;
948
949	SCTPDBG(SCTP_DEBUG_INPUT2,
950	    "sctp_handle_shutdown_ack: handling SHUTDOWN ACK\n");
951	if (stcb == NULL) {
952		return;
953	}
954
955	/* process according to association state */
956	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
957	    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
958		/* unexpected SHUTDOWN-ACK... do OOTB handling... */
959		sctp_send_shutdown_complete(stcb, net, 1);
960		SCTP_TCB_UNLOCK(stcb);
961		return;
962	}
963	if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) &&
964	    (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
965		/* unexpected SHUTDOWN-ACK... so ignore... */
966		SCTP_TCB_UNLOCK(stcb);
967		return;
968	}
969	sctp_check_data_from_peer(stcb, &abort_flag);
970	if (abort_flag) {
971		return;
972	}
973#ifdef INVARIANTS
974	if (!TAILQ_EMPTY(&stcb->asoc.send_queue) ||
975	    !TAILQ_EMPTY(&stcb->asoc.sent_queue) ||
976	    sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED)) {
977		panic("Queues are not empty when handling SHUTDOWN-ACK");
978	}
979#endif
980	/* stop the timer */
981	sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net,
982	    SCTP_FROM_SCTP_INPUT + SCTP_LOC_10);
983	/* send SHUTDOWN-COMPLETE */
984	sctp_send_shutdown_complete(stcb, net, 0);
985	/* notify upper layer protocol */
986	if (stcb->sctp_socket) {
987		if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
988		    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
989			SCTP_SB_CLEAR(stcb->sctp_socket->so_snd);
990		}
991		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
992	}
993	SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
994	/* free the TCB but first save off the ep */
995	(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
996	    SCTP_FROM_SCTP_INPUT + SCTP_LOC_11);
997}
998
999static void
1000sctp_process_unrecog_chunk(struct sctp_tcb *stcb, uint8_t chunk_type)
1001{
1002	switch (chunk_type) {
1003	case SCTP_ASCONF_ACK:
1004	case SCTP_ASCONF:
1005		sctp_asconf_cleanup(stcb);
1006		break;
1007	case SCTP_IFORWARD_CUM_TSN:
1008	case SCTP_FORWARD_CUM_TSN:
1009		stcb->asoc.prsctp_supported = 0;
1010		break;
1011	default:
1012		SCTPDBG(SCTP_DEBUG_INPUT2,
1013		    "Peer does not support chunk type %d (0x%x).\n",
1014		    chunk_type, chunk_type);
1015		break;
1016	}
1017}
1018
1019/*
1020 * Skip past the param header and then we will find the param that caused the
1021 * problem.  There are a number of param's in a ASCONF OR the prsctp param
1022 * these will turn of specific features.
1023 * XXX: Is this the right thing to do?
1024 */
1025static void
1026sctp_process_unrecog_param(struct sctp_tcb *stcb, uint16_t parameter_type)
1027{
1028	switch (parameter_type) {
1029		/* pr-sctp draft */
1030	case SCTP_PRSCTP_SUPPORTED:
1031		stcb->asoc.prsctp_supported = 0;
1032		break;
1033	case SCTP_SUPPORTED_CHUNK_EXT:
1034		break;
1035		/* draft-ietf-tsvwg-addip-sctp */
1036	case SCTP_HAS_NAT_SUPPORT:
1037		stcb->asoc.peer_supports_nat = 0;
1038		break;
1039	case SCTP_ADD_IP_ADDRESS:
1040	case SCTP_DEL_IP_ADDRESS:
1041	case SCTP_SET_PRIM_ADDR:
1042		stcb->asoc.asconf_supported = 0;
1043		break;
1044	case SCTP_SUCCESS_REPORT:
1045	case SCTP_ERROR_CAUSE_IND:
1046		SCTPDBG(SCTP_DEBUG_INPUT2, "Huh, the peer does not support success? or error cause?\n");
1047		SCTPDBG(SCTP_DEBUG_INPUT2,
1048		    "Turning off ASCONF to this strange peer\n");
1049		stcb->asoc.asconf_supported = 0;
1050		break;
1051	default:
1052		SCTPDBG(SCTP_DEBUG_INPUT2,
1053		    "Peer does not support param type %d (0x%x)??\n",
1054		    parameter_type, parameter_type);
1055		break;
1056	}
1057}
1058
1059static int
1060sctp_handle_error(struct sctp_chunkhdr *ch,
1061    struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t limit)
1062{
1063	struct sctp_error_cause *cause;
1064	struct sctp_association *asoc;
1065	uint32_t remaining_length, adjust;
1066	uint16_t code, cause_code, cause_length;
1067
1068	/* parse through all of the errors and process */
1069	asoc = &stcb->asoc;
1070	cause = (struct sctp_error_cause *)((caddr_t)ch +
1071	    sizeof(struct sctp_chunkhdr));
1072	remaining_length = ntohs(ch->chunk_length);
1073	if (remaining_length > limit) {
1074		remaining_length = limit;
1075	}
1076	if (remaining_length >= sizeof(struct sctp_chunkhdr)) {
1077		remaining_length -= sizeof(struct sctp_chunkhdr);
1078	} else {
1079		remaining_length = 0;
1080	}
1081	code = 0;
1082	while (remaining_length >= sizeof(struct sctp_error_cause)) {
1083		/* Process an Error Cause */
1084		cause_code = ntohs(cause->code);
1085		cause_length = ntohs(cause->length);
1086		if ((cause_length > remaining_length) || (cause_length == 0)) {
1087			/* Invalid cause length, possibly due to truncation. */
1088			SCTPDBG(SCTP_DEBUG_INPUT1, "Bogus length in cause - bytes left: %u cause length: %u\n",
1089			    remaining_length, cause_length);
1090			return (0);
1091		}
1092		if (code == 0) {
1093			/* report the first error cause */
1094			code = cause_code;
1095		}
1096		switch (cause_code) {
1097		case SCTP_CAUSE_INVALID_STREAM:
1098		case SCTP_CAUSE_MISSING_PARAM:
1099		case SCTP_CAUSE_INVALID_PARAM:
1100		case SCTP_CAUSE_NO_USER_DATA:
1101			SCTPDBG(SCTP_DEBUG_INPUT1, "Software error we got a %u back? We have a bug :/ (or do they?)\n",
1102			    cause_code);
1103			break;
1104		case SCTP_CAUSE_NAT_COLLIDING_STATE:
1105			SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state, ERROR flags: %x\n",
1106			    ch->chunk_flags);
1107			if (sctp_handle_nat_colliding_state(stcb)) {
1108				return (0);
1109			}
1110			break;
1111		case SCTP_CAUSE_NAT_MISSING_STATE:
1112			SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state, ERROR flags: %x\n",
1113			    ch->chunk_flags);
1114			if (sctp_handle_nat_missing_state(stcb, net)) {
1115				return (0);
1116			}
1117			break;
1118		case SCTP_CAUSE_STALE_COOKIE:
1119			/*
1120			 * We only act if we have echoed a cookie and are
1121			 * waiting.
1122			 */
1123			if ((cause_length >= sizeof(struct sctp_error_stale_cookie)) &&
1124			    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
1125				struct timeval now;
1126				struct sctp_error_stale_cookie *stale_cookie;
1127				uint64_t stale_time;
1128
1129				asoc->stale_cookie_count++;
1130				if (asoc->stale_cookie_count > asoc->max_init_times) {
1131					sctp_abort_notification(stcb, false, true, 0, NULL, SCTP_SO_NOT_LOCKED);
1132					(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
1133					    SCTP_FROM_SCTP_INPUT + SCTP_LOC_12);
1134					return (-1);
1135				}
1136				stale_cookie = (struct sctp_error_stale_cookie *)cause;
1137				stale_time = ntohl(stale_cookie->stale_time);
1138				if (stale_time == 0) {
1139					/* Use an RTT as an approximation. */
1140					(void)SCTP_GETTIME_TIMEVAL(&now);
1141					timevalsub(&now, &asoc->time_entered);
1142					stale_time = (uint64_t)1000000 * (uint64_t)now.tv_sec + (uint64_t)now.tv_usec;
1143					if (stale_time == 0) {
1144						stale_time = 1;
1145					}
1146				}
1147				/*
1148				 * stale_time is in usec, convert it to
1149				 * msec. Round upwards, to ensure that it is
1150				 * non-zero.
1151				 */
1152				stale_time = (stale_time + 999) / 1000;
1153				/* Double it, to be more robust on RTX. */
1154				stale_time = 2 * stale_time;
1155				asoc->cookie_preserve_req = (uint32_t)stale_time;
1156				if (asoc->overall_error_count == 0) {
1157					sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered,
1158					    SCTP_RTT_FROM_NON_DATA);
1159				}
1160				asoc->overall_error_count = 0;
1161				/* Blast back to INIT state */
1162				sctp_toss_old_cookies(stcb, &stcb->asoc);
1163				sctp_stop_all_cookie_timers(stcb);
1164				SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT);
1165				(void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
1166				sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1167			}
1168			break;
1169		case SCTP_CAUSE_UNRESOLVABLE_ADDR:
1170			/*
1171			 * Nothing we can do here, we don't do hostname
1172			 * addresses so if the peer does not like my IPv6
1173			 * (or IPv4 for that matter) it does not matter. If
1174			 * they don't support that type of address, they can
1175			 * NOT possibly get that packet type... i.e. with no
1176			 * IPv6 you can't receive a IPv6 packet. so we can
1177			 * safely ignore this one. If we ever added support
1178			 * for HOSTNAME Addresses, then we would need to do
1179			 * something here.
1180			 */
1181			break;
1182		case SCTP_CAUSE_UNRECOG_CHUNK:
1183			if (cause_length >= sizeof(struct sctp_error_unrecognized_chunk)) {
1184				struct sctp_error_unrecognized_chunk *unrec_chunk;
1185
1186				unrec_chunk = (struct sctp_error_unrecognized_chunk *)cause;
1187				sctp_process_unrecog_chunk(stcb, unrec_chunk->ch.chunk_type);
1188			}
1189			break;
1190		case SCTP_CAUSE_UNRECOG_PARAM:
1191			/* XXX: We only consider the first parameter */
1192			if (cause_length >= sizeof(struct sctp_error_cause) + sizeof(struct sctp_paramhdr)) {
1193				struct sctp_paramhdr *unrec_parameter;
1194
1195				unrec_parameter = (struct sctp_paramhdr *)(cause + 1);
1196				sctp_process_unrecog_param(stcb, ntohs(unrec_parameter->param_type));
1197			}
1198			break;
1199		case SCTP_CAUSE_COOKIE_IN_SHUTDOWN:
1200			/*
1201			 * We ignore this since the timer will drive out a
1202			 * new cookie anyway and there timer will drive us
1203			 * to send a SHUTDOWN_COMPLETE. We can't send one
1204			 * here since we don't have their tag.
1205			 */
1206			break;
1207		case SCTP_CAUSE_DELETING_LAST_ADDR:
1208		case SCTP_CAUSE_RESOURCE_SHORTAGE:
1209		case SCTP_CAUSE_DELETING_SRC_ADDR:
1210			/*
1211			 * We should NOT get these here, but in a
1212			 * ASCONF-ACK.
1213			 */
1214			SCTPDBG(SCTP_DEBUG_INPUT2, "Peer sends ASCONF errors in a error cause with code %u.\n",
1215			    cause_code);
1216			break;
1217		case SCTP_CAUSE_OUT_OF_RESC:
1218			/*
1219			 * And what, pray tell do we do with the fact that
1220			 * the peer is out of resources? Not really sure we
1221			 * could do anything but abort. I suspect this
1222			 * should have came WITH an abort instead of in a
1223			 * OP-ERROR.
1224			 */
1225			break;
1226		default:
1227			SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_handle_error: unknown code 0x%x\n",
1228			    cause_code);
1229			break;
1230		}
1231		adjust = SCTP_SIZE32(cause_length);
1232		if (remaining_length >= adjust) {
1233			remaining_length -= adjust;
1234		} else {
1235			remaining_length = 0;
1236		}
1237		cause = (struct sctp_error_cause *)((caddr_t)cause + adjust);
1238	}
1239	sctp_ulp_notify(SCTP_NOTIFY_REMOTE_ERROR, stcb, code, ch, SCTP_SO_NOT_LOCKED);
1240	return (0);
1241}
1242
1243static int
1244sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset,
1245    struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh,
1246    struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
1247    struct sctp_nets *net, int *abort_no_unlock,
1248    uint8_t mflowtype, uint32_t mflowid,
1249    uint32_t vrf_id)
1250{
1251	struct sctp_init_ack *init_ack;
1252	struct mbuf *op_err;
1253
1254	SCTPDBG(SCTP_DEBUG_INPUT2,
1255	    "sctp_handle_init_ack: handling INIT-ACK\n");
1256
1257	if (stcb == NULL) {
1258		SCTPDBG(SCTP_DEBUG_INPUT2,
1259		    "sctp_handle_init_ack: TCB is null\n");
1260		return (-1);
1261	}
1262	/* Only process the INIT-ACK chunk in COOKIE WAIT state. */
1263	if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) {
1264		init_ack = &cp->init;
1265		/* Validate parameters. */
1266		if ((ntohl(init_ack->initiate_tag) == 0) ||
1267		    (ntohl(init_ack->a_rwnd) < SCTP_MIN_RWND) ||
1268		    (ntohs(init_ack->num_inbound_streams) == 0) ||
1269		    (ntohs(init_ack->num_outbound_streams) == 0)) {
1270			/* One of the mandatory parameters is illegal. */
1271			op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
1272			sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
1273			    src, dst, sh, op_err,
1274			    mflowtype, mflowid,
1275			    vrf_id, net->port);
1276			*abort_no_unlock = 1;
1277			return (-1);
1278		}
1279		if (stcb->asoc.primary_destination->dest_state &
1280		    SCTP_ADDR_UNCONFIRMED) {
1281			/*
1282			 * The primary is where we sent the INIT, we can
1283			 * always consider it confirmed when the INIT-ACK is
1284			 * returned. Do this before we load addresses
1285			 * though.
1286			 */
1287			stcb->asoc.primary_destination->dest_state &=
1288			    ~SCTP_ADDR_UNCONFIRMED;
1289			sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
1290			    stcb, 0, (void *)stcb->asoc.primary_destination, SCTP_SO_NOT_LOCKED);
1291		}
1292		if (sctp_process_init_ack(m, iphlen, offset, src, dst, sh, cp, stcb,
1293		    net, abort_no_unlock,
1294		    mflowtype, mflowid,
1295		    vrf_id) < 0) {
1296			/* error in parsing parameters */
1297			return (-1);
1298		}
1299		/* Update our state. */
1300		SCTPDBG(SCTP_DEBUG_INPUT2, "moving to COOKIE-ECHOED state\n");
1301		SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_ECHOED);
1302
1303		/* Reset the RTO calculation. */
1304		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
1305			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
1306			    stcb->asoc.overall_error_count,
1307			    0,
1308			    SCTP_FROM_SCTP_INPUT,
1309			    __LINE__);
1310		}
1311		stcb->asoc.overall_error_count = 0;
1312		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
1313		/*
1314		 * Collapse the init timer back in case of a exponential
1315		 * backoff.
1316		 */
1317		sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, stcb->sctp_ep,
1318		    stcb, net);
1319		/*
1320		 * The output routine at the end of the inbound data
1321		 * processing will cause the cookie to be sent.
1322		 */
1323		SCTPDBG(SCTP_DEBUG_INPUT1, "Leaving handle-init-ack end\n");
1324		return (0);
1325	} else {
1326		return (-1);
1327	}
1328}
1329
1330static struct sctp_tcb *
1331sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
1332    struct sockaddr *src, struct sockaddr *dst,
1333    struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
1334    struct sctp_inpcb *inp, struct sctp_nets **netp,
1335    struct sockaddr *init_src, int *notification,
1336    int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
1337    uint8_t mflowtype, uint32_t mflowid,
1338    uint32_t vrf_id, uint16_t port);
1339
1340/*
1341 * handle a state cookie for an existing association m: input packet mbuf
1342 * chain-- assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a
1343 * "split" mbuf and the cookie signature does not exist offset: offset into
1344 * mbuf to the cookie-echo chunk
1345 */
1346static struct sctp_tcb *
1347sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
1348    struct sockaddr *src, struct sockaddr *dst,
1349    struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
1350    struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets **netp,
1351    struct sockaddr *init_src, int *notification,
1352    int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
1353    uint8_t mflowtype, uint32_t mflowid,
1354    uint32_t vrf_id, uint16_t port)
1355{
1356	struct sctp_association *asoc;
1357	struct sctp_init_chunk *init_cp, init_buf;
1358	struct sctp_init_ack_chunk *initack_cp, initack_buf;
1359	struct sctp_asconf_addr *aparam, *naparam;
1360	struct sctp_asconf_ack *aack, *naack;
1361	struct sctp_tmit_chunk *chk, *nchk;
1362	struct sctp_stream_reset_list *strrst, *nstrrst;
1363	struct sctp_queued_to_read *sq, *nsq;
1364	struct sctp_nets *net;
1365	struct mbuf *op_err;
1366	int init_offset, initack_offset, i;
1367	int retval;
1368	int spec_flag = 0;
1369	uint32_t how_indx;
1370#if defined(SCTP_DETAILED_STR_STATS)
1371	int j;
1372#endif
1373
1374	net = *netp;
1375	/* I know that the TCB is non-NULL from the caller */
1376	asoc = &stcb->asoc;
1377	for (how_indx = 0; how_indx < sizeof(asoc->cookie_how); how_indx++) {
1378		if (asoc->cookie_how[how_indx] == 0)
1379			break;
1380	}
1381	if (how_indx < sizeof(asoc->cookie_how)) {
1382		asoc->cookie_how[how_indx] = 1;
1383	}
1384	if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
1385		/* SHUTDOWN came in after sending INIT-ACK */
1386		sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination);
1387		op_err = sctp_generate_cause(SCTP_CAUSE_COOKIE_IN_SHUTDOWN, "");
1388		sctp_send_operr_to(src, dst, sh, cookie->peers_vtag, op_err,
1389		    mflowtype, mflowid, inp->fibnum,
1390		    vrf_id, net->port);
1391		if (how_indx < sizeof(asoc->cookie_how))
1392			asoc->cookie_how[how_indx] = 2;
1393		SCTP_TCB_UNLOCK(stcb);
1394		return (NULL);
1395	}
1396	/*
1397	 * find and validate the INIT chunk in the cookie (peer's info) the
1398	 * INIT should start after the cookie-echo header struct (chunk
1399	 * header, state cookie header struct)
1400	 */
1401	init_offset = offset += sizeof(struct sctp_cookie_echo_chunk);
1402
1403	init_cp = (struct sctp_init_chunk *)
1404	    sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
1405	    (uint8_t *)&init_buf);
1406	if (init_cp == NULL) {
1407		/* could not pull a INIT chunk in cookie */
1408		SCTP_TCB_UNLOCK(stcb);
1409		return (NULL);
1410	}
1411	if (init_cp->ch.chunk_type != SCTP_INITIATION) {
1412		SCTP_TCB_UNLOCK(stcb);
1413		return (NULL);
1414	}
1415	/*
1416	 * find and validate the INIT-ACK chunk in the cookie (my info) the
1417	 * INIT-ACK follows the INIT chunk
1418	 */
1419	initack_offset = init_offset + SCTP_SIZE32(ntohs(init_cp->ch.chunk_length));
1420	initack_cp = (struct sctp_init_ack_chunk *)
1421	    sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
1422	    (uint8_t *)&initack_buf);
1423	if (initack_cp == NULL) {
1424		/* could not pull INIT-ACK chunk in cookie */
1425		SCTP_TCB_UNLOCK(stcb);
1426		return (NULL);
1427	}
1428	if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
1429		SCTP_TCB_UNLOCK(stcb);
1430		return (NULL);
1431	}
1432	if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1433	    (ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag)) {
1434		/*
1435		 * case D in Section 5.2.4 Table 2: MMAA process accordingly
1436		 * to get into the OPEN state
1437		 */
1438		if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
1439			/*-
1440			 * Opps, this means that we somehow generated two vtag's
1441			 * the same. I.e. we did:
1442			 *  Us               Peer
1443			 *   <---INIT(tag=a)------
1444			 *   ----INIT-ACK(tag=t)-->
1445			 *   ----INIT(tag=t)------> *1
1446			 *   <---INIT-ACK(tag=a)---
1447			 *   <----CE(tag=t)------------- *2
1448			 *
1449			 * At point *1 we should be generating a different
1450			 * tag t'. Which means we would throw away the CE and send
1451			 * ours instead. Basically this is case C (throw away side).
1452			 */
1453			if (how_indx < sizeof(asoc->cookie_how))
1454				asoc->cookie_how[how_indx] = 17;
1455			SCTP_TCB_UNLOCK(stcb);
1456			return (NULL);
1457		}
1458		switch (SCTP_GET_STATE(stcb)) {
1459		case SCTP_STATE_COOKIE_WAIT:
1460		case SCTP_STATE_COOKIE_ECHOED:
1461			/*
1462			 * INIT was sent but got a COOKIE_ECHO with the
1463			 * correct tags... just accept it...but we must
1464			 * process the init so that we can make sure we have
1465			 * the right seq no's.
1466			 */
1467			/* First we must process the INIT !! */
1468			if (sctp_process_init(init_cp, stcb) < 0) {
1469				if (how_indx < sizeof(asoc->cookie_how))
1470					asoc->cookie_how[how_indx] = 3;
1471				op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1472				SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_init() failed\n");
1473				sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
1474				    src, dst, sh, op_err,
1475				    mflowtype, mflowid,
1476				    vrf_id, net->port);
1477				return (NULL);
1478			}
1479			/* we have already processed the INIT so no problem */
1480			sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp,
1481			    stcb, net,
1482			    SCTP_FROM_SCTP_INPUT + SCTP_LOC_13);
1483			sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp,
1484			    stcb, net,
1485			    SCTP_FROM_SCTP_INPUT + SCTP_LOC_14);
1486			/* update current state */
1487			if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)
1488				SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1489			else
1490				SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1491
1492			SCTP_SET_STATE(stcb, SCTP_STATE_OPEN);
1493			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1494			sctp_stop_all_cookie_timers(stcb);
1495			if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1496			    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1497			    (!SCTP_IS_LISTENING(inp))) {
1498				/*
1499				 * Here is where collision would go if we
1500				 * did a connect() and instead got a
1501				 * init/init-ack/cookie done before the
1502				 * init-ack came back..
1503				 */
1504				sctp_pcb_add_flags(stcb->sctp_ep, SCTP_PCB_FLAGS_CONNECTED);
1505				soisconnected(stcb->sctp_socket);
1506			}
1507			/* notify upper layer */
1508			*notification = SCTP_NOTIFY_ASSOC_UP;
1509			net->hb_responded = 1;
1510			if (stcb->asoc.sctp_autoclose_ticks &&
1511			    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))) {
1512				sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
1513				    inp, stcb, NULL);
1514			}
1515			break;
1516		default:
1517			/*
1518			 * we're in the OPEN state (or beyond), so peer must
1519			 * have simply lost the COOKIE-ACK
1520			 */
1521			break;
1522		}		/* end switch */
1523		sctp_stop_all_cookie_timers(stcb);
1524		if ((retval = sctp_load_addresses_from_init(stcb, m,
1525		    init_offset + sizeof(struct sctp_init_chunk),
1526		    initack_offset, src, dst, init_src, stcb->asoc.port)) < 0) {
1527			if (how_indx < sizeof(asoc->cookie_how))
1528				asoc->cookie_how[how_indx] = 4;
1529			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
1530			    "Problem with address parameters");
1531			SCTPDBG(SCTP_DEBUG_INPUT1,
1532			    "Load addresses from INIT causes an abort %d\n",
1533			    retval);
1534			sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
1535			    src, dst, sh, op_err,
1536			    mflowtype, mflowid,
1537			    vrf_id, net->port);
1538			return (NULL);
1539		}
1540		/* respond with a COOKIE-ACK */
1541		sctp_toss_old_cookies(stcb, asoc);
1542		sctp_send_cookie_ack(stcb);
1543		if (how_indx < sizeof(asoc->cookie_how))
1544			asoc->cookie_how[how_indx] = 5;
1545		return (stcb);
1546	}
1547
1548	if (ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
1549	    ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag &&
1550	    cookie->tie_tag_my_vtag == 0 &&
1551	    cookie->tie_tag_peer_vtag == 0) {
1552		/*
1553		 * case C in Section 5.2.4 Table 2: XMOO silently discard
1554		 */
1555		if (how_indx < sizeof(asoc->cookie_how))
1556			asoc->cookie_how[how_indx] = 6;
1557		SCTP_TCB_UNLOCK(stcb);
1558		return (NULL);
1559	}
1560	/*
1561	 * If nat support, and the below and stcb is established, send back
1562	 * a ABORT(colliding state) if we are established.
1563	 */
1564	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) &&
1565	    (asoc->peer_supports_nat) &&
1566	    ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1567	    ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) ||
1568	    (asoc->peer_vtag == 0)))) {
1569		/*
1570		 * Special case - Peer's support nat. We may have two init's
1571		 * that we gave out the same tag on since one was not
1572		 * established.. i.e. we get INIT from host-1 behind the nat
1573		 * and we respond tag-a, we get a INIT from host-2 behind
1574		 * the nat and we get tag-a again. Then we bring up host-1
1575		 * (or 2's) assoc, Then comes the cookie from hsot-2 (or 1).
1576		 * Now we have colliding state. We must send an abort here
1577		 * with colliding state indication.
1578		 */
1579		op_err = sctp_generate_cause(SCTP_CAUSE_NAT_COLLIDING_STATE, "");
1580		sctp_send_abort(m, iphlen, src, dst, sh, 0, op_err,
1581		    mflowtype, mflowid, inp->fibnum,
1582		    vrf_id, port);
1583		SCTP_TCB_UNLOCK(stcb);
1584		return (NULL);
1585	}
1586	if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1587	    ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) ||
1588	    (asoc->peer_vtag == 0))) {
1589		/*
1590		 * case B in Section 5.2.4 Table 2: MXAA or MOAA my info
1591		 * should be ok, re-accept peer info
1592		 */
1593		if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
1594			/*
1595			 * Extension of case C. If we hit this, then the
1596			 * random number generator returned the same vtag
1597			 * when we first sent our INIT-ACK and when we later
1598			 * sent our INIT. The side with the seq numbers that
1599			 * are different will be the one that normally would
1600			 * have hit case C. This in effect "extends" our
1601			 * vtags in this collision case to be 64 bits. The
1602			 * same collision could occur aka you get both vtag
1603			 * and seq number the same twice in a row.. but is
1604			 * much less likely. If it did happen then we would
1605			 * proceed through and bring up the assoc.. we may
1606			 * end up with the wrong stream setup however..
1607			 * which would be bad.. but there is no way to
1608			 * tell.. until we send on a stream that does not
1609			 * exist :-)
1610			 */
1611			if (how_indx < sizeof(asoc->cookie_how))
1612				asoc->cookie_how[how_indx] = 7;
1613
1614			SCTP_TCB_UNLOCK(stcb);
1615			return (NULL);
1616		}
1617		if (how_indx < sizeof(asoc->cookie_how))
1618			asoc->cookie_how[how_indx] = 8;
1619		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
1620		    SCTP_FROM_SCTP_INPUT + SCTP_LOC_15);
1621		sctp_stop_all_cookie_timers(stcb);
1622		/*
1623		 * since we did not send a HB make sure we don't double
1624		 * things
1625		 */
1626		net->hb_responded = 1;
1627		if (stcb->asoc.sctp_autoclose_ticks &&
1628		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
1629			sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb,
1630			    NULL);
1631		}
1632		asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
1633		if (asoc->pre_open_streams < asoc->streamoutcnt) {
1634			asoc->pre_open_streams = asoc->streamoutcnt;
1635		}
1636
1637		if (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) {
1638			/*
1639			 * Ok the peer probably discarded our data (if we
1640			 * echoed a cookie+data). So anything on the
1641			 * sent_queue should be marked for retransmit, we
1642			 * may not get something to kick us so it COULD
1643			 * still take a timeout to move these.. but it can't
1644			 * hurt to mark them.
1645			 */
1646
1647			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
1648				if (chk->sent < SCTP_DATAGRAM_RESEND) {
1649					chk->sent = SCTP_DATAGRAM_RESEND;
1650					sctp_flight_size_decrease(chk);
1651					sctp_total_flight_decrease(stcb, chk);
1652					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1653					spec_flag++;
1654				}
1655			}
1656		}
1657		/* process the INIT info (peer's info) */
1658		if (sctp_process_init(init_cp, stcb) < 0) {
1659			if (how_indx < sizeof(asoc->cookie_how))
1660				asoc->cookie_how[how_indx] = 9;
1661			op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1662			SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_init() failed\n");
1663			sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
1664			    src, dst, sh, op_err,
1665			    mflowtype, mflowid,
1666			    vrf_id, net->port);
1667			return (NULL);
1668		}
1669		if ((retval = sctp_load_addresses_from_init(stcb, m,
1670		    init_offset + sizeof(struct sctp_init_chunk),
1671		    initack_offset, src, dst, init_src, stcb->asoc.port)) < 0) {
1672			if (how_indx < sizeof(asoc->cookie_how))
1673				asoc->cookie_how[how_indx] = 10;
1674			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
1675			    "Problem with address parameters");
1676			SCTPDBG(SCTP_DEBUG_INPUT1,
1677			    "Load addresses from INIT causes an abort %d\n",
1678			    retval);
1679			sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
1680			    src, dst, sh, op_err,
1681			    mflowtype, mflowid,
1682			    vrf_id, net->port);
1683			return (NULL);
1684		}
1685		if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
1686		    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
1687			*notification = SCTP_NOTIFY_ASSOC_UP;
1688
1689			if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1690			    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1691			    (!SCTP_IS_LISTENING(inp))) {
1692				sctp_pcb_add_flags(stcb->sctp_ep, SCTP_PCB_FLAGS_CONNECTED);
1693				soisconnected(stcb->sctp_socket);
1694			}
1695			if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)
1696				SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1697			else
1698				SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1699			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1700		} else if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
1701			SCTP_STAT_INCR_COUNTER32(sctps_restartestab);
1702		} else {
1703			SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1704		}
1705		SCTP_SET_STATE(stcb, SCTP_STATE_OPEN);
1706		sctp_stop_all_cookie_timers(stcb);
1707		sctp_toss_old_cookies(stcb, asoc);
1708		sctp_send_cookie_ack(stcb);
1709		if (spec_flag) {
1710			/*
1711			 * only if we have retrans set do we do this. What
1712			 * this call does is get only the COOKIE-ACK out and
1713			 * then when we return the normal call to
1714			 * sctp_chunk_output will get the retrans out behind
1715			 * this.
1716			 */
1717			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_COOKIE_ACK, SCTP_SO_NOT_LOCKED);
1718		}
1719		if (how_indx < sizeof(asoc->cookie_how))
1720			asoc->cookie_how[how_indx] = 11;
1721
1722		return (stcb);
1723	}
1724	if ((ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
1725	    ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) &&
1726	    cookie->tie_tag_my_vtag == asoc->my_vtag_nonce &&
1727	    cookie->tie_tag_peer_vtag == asoc->peer_vtag_nonce &&
1728	    cookie->tie_tag_peer_vtag != 0) {
1729		struct sctpasochead *head;
1730
1731		if (asoc->peer_supports_nat) {
1732			struct sctp_tcb *local_stcb;
1733
1734			/*
1735			 * This is a gross gross hack. Just call the
1736			 * cookie_new code since we are allowing a duplicate
1737			 * association. I hope this works...
1738			 */
1739			local_stcb = sctp_process_cookie_new(m, iphlen, offset, src, dst,
1740			    sh, cookie, cookie_len,
1741			    inp, netp, init_src, notification,
1742			    auth_skipped, auth_offset, auth_len,
1743			    mflowtype, mflowid,
1744			    vrf_id, port);
1745			if (local_stcb == NULL) {
1746				SCTP_TCB_UNLOCK(stcb);
1747			}
1748			return (local_stcb);
1749		}
1750		/*
1751		 * case A in Section 5.2.4 Table 2: XXMM (peer restarted)
1752		 */
1753		/* temp code */
1754		if (how_indx < sizeof(asoc->cookie_how))
1755			asoc->cookie_how[how_indx] = 12;
1756		sctp_stop_association_timers(stcb, false);
1757		/* notify upper layer */
1758		*notification = SCTP_NOTIFY_ASSOC_RESTART;
1759		atomic_add_int(&stcb->asoc.refcnt, 1);
1760		if ((SCTP_GET_STATE(stcb) != SCTP_STATE_OPEN) &&
1761		    (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
1762		    (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT)) {
1763			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1764		}
1765		if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
1766			SCTP_STAT_INCR_GAUGE32(sctps_restartestab);
1767		} else if (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) {
1768			SCTP_STAT_INCR_GAUGE32(sctps_collisionestab);
1769		}
1770		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1771			SCTP_SET_STATE(stcb, SCTP_STATE_OPEN);
1772
1773		} else if (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) {
1774			/* move to OPEN state, if not in SHUTDOWN_SENT */
1775			SCTP_SET_STATE(stcb, SCTP_STATE_OPEN);
1776		}
1777		if (asoc->pre_open_streams < asoc->streamoutcnt) {
1778			asoc->pre_open_streams = asoc->streamoutcnt;
1779		}
1780		asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
1781		asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
1782		asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1783		asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
1784		asoc->str_reset_seq_in = asoc->init_seq_number;
1785		asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1786		asoc->send_sack = 1;
1787		asoc->data_pkts_seen = 0;
1788		asoc->last_data_chunk_from = NULL;
1789		asoc->last_control_chunk_from = NULL;
1790		asoc->last_net_cmt_send_started = NULL;
1791		if (asoc->mapping_array) {
1792			memset(asoc->mapping_array, 0,
1793			    asoc->mapping_array_size);
1794		}
1795		if (asoc->nr_mapping_array) {
1796			memset(asoc->nr_mapping_array, 0,
1797			    asoc->mapping_array_size);
1798		}
1799		SCTP_TCB_UNLOCK(stcb);
1800		SCTP_INP_INFO_WLOCK();
1801		SCTP_INP_WLOCK(stcb->sctp_ep);
1802		SCTP_TCB_LOCK(stcb);
1803		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1804		/* send up all the data */
1805		sctp_report_all_outbound(stcb, 0, SCTP_SO_LOCKED);
1806		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
1807			stcb->asoc.strmout[i].chunks_on_queues = 0;
1808#if defined(SCTP_DETAILED_STR_STATS)
1809			for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1810				asoc->strmout[i].abandoned_sent[j] = 0;
1811				asoc->strmout[i].abandoned_unsent[j] = 0;
1812			}
1813#else
1814			asoc->strmout[i].abandoned_sent[0] = 0;
1815			asoc->strmout[i].abandoned_unsent[0] = 0;
1816#endif
1817			stcb->asoc.strmout[i].next_mid_ordered = 0;
1818			stcb->asoc.strmout[i].next_mid_unordered = 0;
1819			stcb->asoc.strmout[i].sid = i;
1820			stcb->asoc.strmout[i].last_msg_incomplete = 0;
1821		}
1822		TAILQ_FOREACH_SAFE(strrst, &asoc->resetHead, next_resp, nstrrst) {
1823			TAILQ_REMOVE(&asoc->resetHead, strrst, next_resp);
1824			SCTP_FREE(strrst, SCTP_M_STRESET);
1825		}
1826		TAILQ_FOREACH_SAFE(sq, &asoc->pending_reply_queue, next, nsq) {
1827			TAILQ_REMOVE(&asoc->pending_reply_queue, sq, next);
1828			if (sq->data) {
1829				sctp_m_freem(sq->data);
1830				sq->data = NULL;
1831			}
1832			sctp_free_remote_addr(sq->whoFrom);
1833			sq->whoFrom = NULL;
1834			sq->stcb = NULL;
1835			sctp_free_a_readq(stcb, sq);
1836		}
1837		TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
1838			TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
1839			if (chk->data) {
1840				sctp_m_freem(chk->data);
1841				chk->data = NULL;
1842			}
1843			if (chk->holds_key_ref)
1844				sctp_auth_key_release(stcb, chk->auth_keyid, SCTP_SO_LOCKED);
1845			sctp_free_remote_addr(chk->whoTo);
1846			SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk);
1847			SCTP_DECR_CHK_COUNT();
1848		}
1849		asoc->ctrl_queue_cnt = 0;
1850		asoc->str_reset = NULL;
1851		asoc->stream_reset_outstanding = 0;
1852		TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
1853			TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next);
1854			if (chk->data) {
1855				sctp_m_freem(chk->data);
1856				chk->data = NULL;
1857			}
1858			if (chk->holds_key_ref)
1859				sctp_auth_key_release(stcb, chk->auth_keyid, SCTP_SO_LOCKED);
1860			sctp_free_remote_addr(chk->whoTo);
1861			SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk);
1862			SCTP_DECR_CHK_COUNT();
1863		}
1864		TAILQ_FOREACH_SAFE(aparam, &asoc->asconf_queue, next, naparam) {
1865			TAILQ_REMOVE(&asoc->asconf_queue, aparam, next);
1866			SCTP_FREE(aparam, SCTP_M_ASC_ADDR);
1867		}
1868		TAILQ_FOREACH_SAFE(aack, &asoc->asconf_ack_sent, next, naack) {
1869			TAILQ_REMOVE(&asoc->asconf_ack_sent, aack, next);
1870			if (aack->data != NULL) {
1871				sctp_m_freem(aack->data);
1872			}
1873			SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asconf_ack), aack);
1874		}
1875		asoc->rcv_edmid = cookie->rcv_edmid;
1876
1877		/* process the INIT-ACK info (my info) */
1878		asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
1879		asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
1880
1881		/* pull from vtag hash */
1882		LIST_REMOVE(stcb, sctp_asocs);
1883		/* re-insert to new vtag position */
1884		head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag,
1885		    SCTP_BASE_INFO(hashasocmark))];
1886		/*
1887		 * put it in the bucket in the vtag hash of assoc's for the
1888		 * system
1889		 */
1890		LIST_INSERT_HEAD(head, stcb, sctp_asocs);
1891
1892		SCTP_INP_WUNLOCK(stcb->sctp_ep);
1893		SCTP_INP_INFO_WUNLOCK();
1894		asoc->total_flight = 0;
1895		asoc->total_flight_count = 0;
1896		/* process the INIT info (peer's info) */
1897		if (sctp_process_init(init_cp, stcb) < 0) {
1898			if (how_indx < sizeof(asoc->cookie_how))
1899				asoc->cookie_how[how_indx] = 13;
1900			op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1901			SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_init() failed\n");
1902			sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
1903			    src, dst, sh, op_err,
1904			    mflowtype, mflowid,
1905			    vrf_id, net->port);
1906			return (NULL);
1907		}
1908		/*
1909		 * since we did not send a HB make sure we don't double
1910		 * things
1911		 */
1912		net->hb_responded = 1;
1913
1914		if ((retval = sctp_load_addresses_from_init(stcb, m,
1915		    init_offset + sizeof(struct sctp_init_chunk),
1916		    initack_offset, src, dst, init_src, stcb->asoc.port)) < 0) {
1917			if (how_indx < sizeof(asoc->cookie_how))
1918				asoc->cookie_how[how_indx] = 14;
1919			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
1920			    "Problem with address parameters");
1921			SCTPDBG(SCTP_DEBUG_INPUT1,
1922			    "Load addresses from INIT causes an abort %d\n",
1923			    retval);
1924			sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
1925			    src, dst, sh, op_err,
1926			    mflowtype, mflowid,
1927			    vrf_id, net->port);
1928			return (NULL);
1929		}
1930		/* respond with a COOKIE-ACK */
1931		sctp_send_cookie_ack(stcb);
1932		if (how_indx < sizeof(asoc->cookie_how))
1933			asoc->cookie_how[how_indx] = 15;
1934		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE) &&
1935		    (asoc->sctp_autoclose_ticks > 0)) {
1936			sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL);
1937		}
1938		return (stcb);
1939	}
1940	if (how_indx < sizeof(asoc->cookie_how))
1941		asoc->cookie_how[how_indx] = 16;
1942	/* all other cases... */
1943	SCTP_TCB_UNLOCK(stcb);
1944	return (NULL);
1945}
1946
1947/*
1948 * handle a state cookie for a new association m: input packet mbuf chain--
1949 * assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a "split" mbuf
1950 * and the cookie signature does not exist offset: offset into mbuf to the
1951 * cookie-echo chunk length: length of the cookie chunk to: where the init
1952 * was from returns a new TCB
1953 */
1954static struct sctp_tcb *
1955sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
1956    struct sockaddr *src, struct sockaddr *dst,
1957    struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
1958    struct sctp_inpcb *inp, struct sctp_nets **netp,
1959    struct sockaddr *init_src, int *notification,
1960    int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
1961    uint8_t mflowtype, uint32_t mflowid,
1962    uint32_t vrf_id, uint16_t port)
1963{
1964	struct sctp_tcb *stcb;
1965	struct sctp_init_chunk *init_cp, init_buf;
1966	struct sctp_init_ack_chunk *initack_cp, initack_buf;
1967	union sctp_sockstore store;
1968	struct sctp_association *asoc;
1969	int init_offset, initack_offset, initack_limit;
1970	int error = 0;
1971	uint8_t auth_chunk_buf[SCTP_CHUNK_BUFFER_SIZE];
1972
1973	/*
1974	 * find and validate the INIT chunk in the cookie (peer's info) the
1975	 * INIT should start after the cookie-echo header struct (chunk
1976	 * header, state cookie header struct)
1977	 */
1978	init_offset = offset + sizeof(struct sctp_cookie_echo_chunk);
1979	init_cp = (struct sctp_init_chunk *)
1980	    sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
1981	    (uint8_t *)&init_buf);
1982	if (init_cp == NULL) {
1983		/* could not pull a INIT chunk in cookie */
1984		SCTPDBG(SCTP_DEBUG_INPUT1,
1985		    "process_cookie_new: could not pull INIT chunk hdr\n");
1986		return (NULL);
1987	}
1988	if (init_cp->ch.chunk_type != SCTP_INITIATION) {
1989		SCTPDBG(SCTP_DEBUG_INPUT1, "HUH? process_cookie_new: could not find INIT chunk!\n");
1990		return (NULL);
1991	}
1992	initack_offset = init_offset + SCTP_SIZE32(ntohs(init_cp->ch.chunk_length));
1993	/*
1994	 * find and validate the INIT-ACK chunk in the cookie (my info) the
1995	 * INIT-ACK follows the INIT chunk
1996	 */
1997	initack_cp = (struct sctp_init_ack_chunk *)
1998	    sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
1999	    (uint8_t *)&initack_buf);
2000	if (initack_cp == NULL) {
2001		/* could not pull INIT-ACK chunk in cookie */
2002		SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: could not pull INIT-ACK chunk hdr\n");
2003		return (NULL);
2004	}
2005	if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
2006		return (NULL);
2007	}
2008	/*
2009	 * NOTE: We can't use the INIT_ACK's chk_length to determine the
2010	 * "initack_limit" value.  This is because the chk_length field
2011	 * includes the length of the cookie, but the cookie is omitted when
2012	 * the INIT and INIT_ACK are tacked onto the cookie...
2013	 */
2014	initack_limit = offset + cookie_len;
2015
2016	/*
2017	 * now that we know the INIT/INIT-ACK are in place, create a new TCB
2018	 * and populate
2019	 */
2020
2021	/*
2022	 * Here we do a trick, we set in NULL for the proc/thread argument.
2023	 * We do this since in effect we only use the p argument when the
2024	 * socket is unbound and we must do an implicit bind. Since we are
2025	 * getting a cookie, we cannot be unbound.
2026	 */
2027	stcb = sctp_aloc_assoc(inp, init_src, &error,
2028	    ntohl(initack_cp->init.initiate_tag),
2029	    ntohl(initack_cp->init.initial_tsn), vrf_id,
2030	    ntohs(initack_cp->init.num_outbound_streams),
2031	    port,
2032	    (struct thread *)NULL,
2033	    SCTP_DONT_INITIALIZE_AUTH_PARAMS);
2034	if (stcb == NULL) {
2035		struct mbuf *op_err;
2036
2037		/* memory problem? */
2038		SCTPDBG(SCTP_DEBUG_INPUT1,
2039		    "process_cookie_new: no room for another TCB!\n");
2040		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
2041		sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
2042		    src, dst, sh, op_err,
2043		    mflowtype, mflowid,
2044		    vrf_id, port);
2045		return (NULL);
2046	}
2047	asoc = &stcb->asoc;
2048	/* get scope variables out of cookie */
2049	asoc->scope.ipv4_local_scope = cookie->ipv4_scope;
2050	asoc->scope.site_scope = cookie->site_scope;
2051	asoc->scope.local_scope = cookie->local_scope;
2052	asoc->scope.loopback_scope = cookie->loopback_scope;
2053
2054	if ((asoc->scope.ipv4_addr_legal != cookie->ipv4_addr_legal) ||
2055	    (asoc->scope.ipv6_addr_legal != cookie->ipv6_addr_legal)) {
2056		struct mbuf *op_err;
2057
2058		/*
2059		 * Houston we have a problem. The EP changed while the
2060		 * cookie was in flight. Only recourse is to abort the
2061		 * association.
2062		 */
2063		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
2064		sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
2065		    src, dst, sh, op_err,
2066		    mflowtype, mflowid,
2067		    vrf_id, port);
2068		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
2069		    SCTP_FROM_SCTP_INPUT + SCTP_LOC_18);
2070		return (NULL);
2071	}
2072	asoc->rcv_edmid = cookie->rcv_edmid;
2073	/* process the INIT-ACK info (my info) */
2074	asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
2075
2076	/* process the INIT info (peer's info) */
2077	if (sctp_process_init(init_cp, stcb) < 0) {
2078		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
2079		    SCTP_FROM_SCTP_INPUT + SCTP_LOC_19);
2080		return (NULL);
2081	}
2082	/* load all addresses */
2083	if (sctp_load_addresses_from_init(stcb, m,
2084	    init_offset + sizeof(struct sctp_init_chunk),
2085	    initack_offset, src, dst, init_src, port) < 0) {
2086		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
2087		    SCTP_FROM_SCTP_INPUT + SCTP_LOC_20);
2088		return (NULL);
2089	}
2090	/*
2091	 * verify any preceding AUTH chunk that was skipped
2092	 */
2093	/* pull the local authentication parameters from the cookie/init-ack */
2094	sctp_auth_get_cookie_params(stcb, m,
2095	    initack_offset + sizeof(struct sctp_init_ack_chunk),
2096	    initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)));
2097	if (auth_skipped) {
2098		struct sctp_auth_chunk *auth;
2099
2100		if (auth_len <= SCTP_CHUNK_BUFFER_SIZE) {
2101			auth = (struct sctp_auth_chunk *)sctp_m_getptr(m, auth_offset, auth_len, auth_chunk_buf);
2102		} else {
2103			auth = NULL;
2104		}
2105		if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, auth_offset)) {
2106			/* auth HMAC failed, dump the assoc and packet */
2107			SCTPDBG(SCTP_DEBUG_AUTH1,
2108			    "COOKIE-ECHO: AUTH failed\n");
2109			(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
2110			    SCTP_FROM_SCTP_INPUT + SCTP_LOC_21);
2111			return (NULL);
2112		} else {
2113			/* remaining chunks checked... good to go */
2114			stcb->asoc.authenticated = 1;
2115		}
2116	}
2117
2118	/*
2119	 * if we're doing ASCONFs, check to see if we have any new local
2120	 * addresses that need to get added to the peer (eg. addresses
2121	 * changed while cookie echo in flight).  This needs to be done
2122	 * after we go to the OPEN state to do the correct asconf
2123	 * processing. else, make sure we have the correct addresses in our
2124	 * lists
2125	 */
2126
2127	/* warning, we re-use sin, sin6, sa_store here! */
2128	/* pull in local_address (our "from" address) */
2129	switch (cookie->laddr_type) {
2130#ifdef INET
2131	case SCTP_IPV4_ADDRESS:
2132		/* source addr is IPv4 */
2133		memset(&store.sin, 0, sizeof(struct sockaddr_in));
2134		store.sin.sin_family = AF_INET;
2135		store.sin.sin_len = sizeof(struct sockaddr_in);
2136		store.sin.sin_addr.s_addr = cookie->laddress[0];
2137		break;
2138#endif
2139#ifdef INET6
2140	case SCTP_IPV6_ADDRESS:
2141		/* source addr is IPv6 */
2142		memset(&store.sin6, 0, sizeof(struct sockaddr_in6));
2143		store.sin6.sin6_family = AF_INET6;
2144		store.sin6.sin6_len = sizeof(struct sockaddr_in6);
2145		store.sin6.sin6_scope_id = cookie->scope_id;
2146		memcpy(&store.sin6.sin6_addr, cookie->laddress, sizeof(struct in6_addr));
2147		break;
2148#endif
2149	default:
2150		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
2151		    SCTP_FROM_SCTP_INPUT + SCTP_LOC_22);
2152		return (NULL);
2153	}
2154
2155	/* update current state */
2156	SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
2157	SCTP_SET_STATE(stcb, SCTP_STATE_OPEN);
2158	sctp_stop_all_cookie_timers(stcb);
2159	SCTP_STAT_INCR_COUNTER32(sctps_passiveestab);
2160	SCTP_STAT_INCR_GAUGE32(sctps_currestab);
2161
2162	/* set up to notify upper layer */
2163	*notification = SCTP_NOTIFY_ASSOC_UP;
2164	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2165	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2166	    (!SCTP_IS_LISTENING(inp))) {
2167		/*
2168		 * This is an endpoint that called connect() how it got a
2169		 * cookie that is NEW is a bit of a mystery. It must be that
2170		 * the INIT was sent, but before it got there.. a complete
2171		 * INIT/INIT-ACK/COOKIE arrived. But of course then it
2172		 * should have went to the other code.. not here.. oh well..
2173		 * a bit of protection is worth having..
2174		 *
2175		 * XXXMJ unlocked
2176		 */
2177		sctp_pcb_add_flags(stcb->sctp_ep, SCTP_PCB_FLAGS_CONNECTED);
2178		soisconnected(stcb->sctp_socket);
2179	} else if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
2180	    (SCTP_IS_LISTENING(inp))) {
2181		/*
2182		 * We don't want to do anything with this one. Since it is
2183		 * the listening guy. The timer will get started for
2184		 * accepted connections in the caller.
2185		 */
2186		;
2187	}
2188	if (stcb->asoc.sctp_autoclose_ticks &&
2189	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
2190		sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL);
2191	}
2192	(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
2193	*netp = sctp_findnet(stcb, init_src);
2194	if (*netp != NULL) {
2195		/*
2196		 * Since we did not send a HB, make sure we don't double
2197		 * things.
2198		 */
2199		(*netp)->hb_responded = 1;
2200	}
2201	/* respond with a COOKIE-ACK */
2202	sctp_send_cookie_ack(stcb);
2203
2204	/*
2205	 * check the address lists for any ASCONFs that need to be sent
2206	 * AFTER the cookie-ack is sent
2207	 */
2208	sctp_check_address_list(stcb, m,
2209	    initack_offset + sizeof(struct sctp_init_ack_chunk),
2210	    initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)),
2211	    &store.sa, cookie->local_scope, cookie->site_scope,
2212	    cookie->ipv4_scope, cookie->loopback_scope);
2213
2214	return (stcb);
2215}
2216
2217/*
2218 * CODE LIKE THIS NEEDS TO RUN IF the peer supports the NAT extension, i.e
2219 * we NEED to make sure we are not already using the vtag. If so we
2220 * need to send back an ABORT-TRY-AGAIN-WITH-NEW-TAG No middle box bit!
2221	head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(tag,
2222							    SCTP_BASE_INFO(hashasocmark))];
2223	LIST_FOREACH(stcb, head, sctp_asocs) {
2224	        if ((stcb->asoc.my_vtag == tag) && (stcb->rport == rport) && (inp == stcb->sctp_ep)) {
2225		       -- SEND ABORT - TRY AGAIN --
2226		}
2227	}
2228*/
2229
2230/*
2231 * handles a COOKIE-ECHO message stcb: modified to either a new or left as
2232 * existing (non-NULL) TCB
2233 */
2234static struct mbuf *
2235sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset,
2236    struct sockaddr *src, struct sockaddr *dst,
2237    struct sctphdr *sh, struct sctp_cookie_echo_chunk *cp,
2238    struct sctp_inpcb **inp_p, struct sctp_tcb **stcb, struct sctp_nets **netp,
2239    int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
2240    struct sctp_tcb **locked_tcb,
2241    uint8_t mflowtype, uint32_t mflowid,
2242    uint32_t vrf_id, uint16_t port)
2243{
2244	struct sctp_state_cookie *cookie;
2245	struct sctp_tcb *l_stcb = *stcb;
2246	struct sctp_inpcb *l_inp;
2247	struct sockaddr *to;
2248	struct sctp_pcb *ep;
2249	struct mbuf *m_sig;
2250	uint8_t calc_sig[SCTP_SIGNATURE_SIZE], tmp_sig[SCTP_SIGNATURE_SIZE];
2251	uint8_t *sig;
2252	uint8_t cookie_ok = 0;
2253	unsigned int sig_offset, cookie_offset;
2254	unsigned int cookie_len;
2255	struct timeval now;
2256	struct timeval time_entered, time_expires;
2257	int notification = 0;
2258	struct sctp_nets *netl;
2259	int had_a_existing_tcb = 0;
2260	int send_int_conf = 0;
2261#ifdef INET
2262	struct sockaddr_in sin;
2263#endif
2264#ifdef INET6
2265	struct sockaddr_in6 sin6;
2266#endif
2267
2268	SCTPDBG(SCTP_DEBUG_INPUT2,
2269	    "sctp_handle_cookie: handling COOKIE-ECHO\n");
2270
2271	if (inp_p == NULL) {
2272		return (NULL);
2273	}
2274	cookie = &cp->cookie;
2275	cookie_offset = offset + sizeof(struct sctp_chunkhdr);
2276	cookie_len = ntohs(cp->ch.chunk_length);
2277
2278	if (cookie_len < sizeof(struct sctp_cookie_echo_chunk) +
2279	    sizeof(struct sctp_init_chunk) +
2280	    sizeof(struct sctp_init_ack_chunk) + SCTP_SIGNATURE_SIZE) {
2281		/* cookie too small */
2282		return (NULL);
2283	}
2284	if ((cookie->peerport != sh->src_port) ||
2285	    (cookie->myport != sh->dest_port) ||
2286	    (cookie->my_vtag != sh->v_tag)) {
2287		/*
2288		 * invalid ports or bad tag.  Note that we always leave the
2289		 * v_tag in the header in network order and when we stored
2290		 * it in the my_vtag slot we also left it in network order.
2291		 * This maintains the match even though it may be in the
2292		 * opposite byte order of the machine :->
2293		 */
2294		return (NULL);
2295	}
2296	/*
2297	 * split off the signature into its own mbuf (since it should not be
2298	 * calculated in the sctp_hmac_m() call).
2299	 */
2300	sig_offset = offset + cookie_len - SCTP_SIGNATURE_SIZE;
2301	m_sig = m_split(m, sig_offset, M_NOWAIT);
2302	if (m_sig == NULL) {
2303		/* out of memory or ?? */
2304		return (NULL);
2305	}
2306#ifdef SCTP_MBUF_LOGGING
2307	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2308		sctp_log_mbc(m_sig, SCTP_MBUF_SPLIT);
2309	}
2310#endif
2311
2312	/*
2313	 * compute the signature/digest for the cookie
2314	 */
2315	if (l_stcb != NULL) {
2316		atomic_add_int(&l_stcb->asoc.refcnt, 1);
2317		SCTP_TCB_UNLOCK(l_stcb);
2318	}
2319	l_inp = *inp_p;
2320	SCTP_INP_RLOCK(l_inp);
2321	if (l_stcb != NULL) {
2322		SCTP_TCB_LOCK(l_stcb);
2323		atomic_subtract_int(&l_stcb->asoc.refcnt, 1);
2324	}
2325	if (l_inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
2326		SCTP_INP_RUNLOCK(l_inp);
2327		sctp_m_freem(m_sig);
2328		return (NULL);
2329	}
2330	ep = &(*inp_p)->sctp_ep;
2331	/* which cookie is it? */
2332	if ((cookie->time_entered.tv_sec < ep->time_of_secret_change) &&
2333	    (ep->current_secret_number != ep->last_secret_number)) {
2334		/* it's the old cookie */
2335		(void)sctp_hmac_m(SCTP_HMAC,
2336		    (uint8_t *)ep->secret_key[(int)ep->last_secret_number],
2337		    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2338	} else {
2339		/* it's the current cookie */
2340		(void)sctp_hmac_m(SCTP_HMAC,
2341		    (uint8_t *)ep->secret_key[(int)ep->current_secret_number],
2342		    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2343	}
2344	/* get the signature */
2345	SCTP_INP_RUNLOCK(l_inp);
2346	sig = (uint8_t *)sctp_m_getptr(m_sig, 0, SCTP_SIGNATURE_SIZE, (uint8_t *)&tmp_sig);
2347	if (sig == NULL) {
2348		/* couldn't find signature */
2349		sctp_m_freem(m_sig);
2350		return (NULL);
2351	}
2352	/* compare the received digest with the computed digest */
2353	if (timingsafe_bcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) != 0) {
2354		/* try the old cookie? */
2355		if ((cookie->time_entered.tv_sec == ep->time_of_secret_change) &&
2356		    (ep->current_secret_number != ep->last_secret_number)) {
2357			/* compute digest with old */
2358			(void)sctp_hmac_m(SCTP_HMAC,
2359			    (uint8_t *)ep->secret_key[(int)ep->last_secret_number],
2360			    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2361			/* compare */
2362			if (timingsafe_bcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) == 0)
2363				cookie_ok = 1;
2364		}
2365	} else {
2366		cookie_ok = 1;
2367	}
2368
2369	/*
2370	 * Now before we continue we must reconstruct our mbuf so that
2371	 * normal processing of any other chunks will work.
2372	 */
2373	{
2374		struct mbuf *m_at;
2375
2376		m_at = m;
2377		while (SCTP_BUF_NEXT(m_at) != NULL) {
2378			m_at = SCTP_BUF_NEXT(m_at);
2379		}
2380		SCTP_BUF_NEXT(m_at) = m_sig;
2381	}
2382
2383	if (cookie_ok == 0) {
2384		SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: cookie signature validation failed!\n");
2385		SCTPDBG(SCTP_DEBUG_INPUT2,
2386		    "offset = %u, cookie_offset = %u, sig_offset = %u\n",
2387		    (uint32_t)offset, cookie_offset, sig_offset);
2388		return (NULL);
2389	}
2390
2391	if (sctp_ticks_to_msecs(cookie->cookie_life) > SCTP_MAX_COOKIE_LIFE) {
2392		SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: Invalid cookie lifetime\n");
2393		return (NULL);
2394	}
2395	time_entered.tv_sec = cookie->time_entered.tv_sec;
2396	time_entered.tv_usec = cookie->time_entered.tv_usec;
2397	if ((time_entered.tv_sec < 0) ||
2398	    (time_entered.tv_usec < 0) ||
2399	    (time_entered.tv_usec >= 1000000)) {
2400		/* Invalid time stamp. Cookie must have been modified. */
2401		SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: Invalid time stamp\n");
2402		return (NULL);
2403	}
2404	(void)SCTP_GETTIME_TIMEVAL(&now);
2405	if (timevalcmp(&now, &time_entered, <)) {
2406		SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: cookie generated in the future!\n");
2407		return (NULL);
2408	}
2409	/*
2410	 * Check the cookie timestamps to be sure it's not stale.
2411	 * cookie_life is in ticks, so we convert to seconds.
2412	 */
2413	time_expires.tv_sec = time_entered.tv_sec + sctp_ticks_to_secs(cookie->cookie_life);
2414	time_expires.tv_usec = time_entered.tv_usec;
2415	if (timevalcmp(&now, &time_expires, >)) {
2416		/* cookie is stale! */
2417		struct mbuf *op_err;
2418		struct sctp_error_stale_cookie *cause;
2419		struct timeval diff;
2420		uint32_t staleness;
2421
2422		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_stale_cookie),
2423		    0, M_NOWAIT, 1, MT_DATA);
2424		if (op_err == NULL) {
2425			/* FOOBAR */
2426			return (NULL);
2427		}
2428		/* Set the len */
2429		SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_stale_cookie);
2430		cause = mtod(op_err, struct sctp_error_stale_cookie *);
2431		cause->cause.code = htons(SCTP_CAUSE_STALE_COOKIE);
2432		cause->cause.length = htons(sizeof(struct sctp_error_stale_cookie));
2433		diff = now;
2434		timevalsub(&diff, &time_expires);
2435		if ((uint32_t)diff.tv_sec > UINT32_MAX / 1000000) {
2436			staleness = UINT32_MAX;
2437		} else {
2438			staleness = (uint32_t)diff.tv_sec * 1000000;
2439		}
2440		if (UINT32_MAX - staleness >= (uint32_t)diff.tv_usec) {
2441			staleness += (uint32_t)diff.tv_usec;
2442		} else {
2443			staleness = UINT32_MAX;
2444		}
2445		cause->stale_time = htonl(staleness);
2446		sctp_send_operr_to(src, dst, sh, cookie->peers_vtag, op_err,
2447		    mflowtype, mflowid, l_inp->fibnum,
2448		    vrf_id, port);
2449		return (NULL);
2450	}
2451	/*
2452	 * Now we must see with the lookup address if we have an existing
2453	 * asoc. This will only happen if we were in the COOKIE-WAIT state
2454	 * and a INIT collided with us and somewhere the peer sent the
2455	 * cookie on another address besides the single address our assoc
2456	 * had for him. In this case we will have one of the tie-tags set at
2457	 * least AND the address field in the cookie can be used to look it
2458	 * up.
2459	 */
2460	to = NULL;
2461	switch (cookie->addr_type) {
2462#ifdef INET6
2463	case SCTP_IPV6_ADDRESS:
2464		memset(&sin6, 0, sizeof(sin6));
2465		sin6.sin6_family = AF_INET6;
2466		sin6.sin6_len = sizeof(sin6);
2467		sin6.sin6_port = sh->src_port;
2468		sin6.sin6_scope_id = cookie->scope_id;
2469		memcpy(&sin6.sin6_addr.s6_addr, cookie->address,
2470		    sizeof(sin6.sin6_addr.s6_addr));
2471		to = (struct sockaddr *)&sin6;
2472		break;
2473#endif
2474#ifdef INET
2475	case SCTP_IPV4_ADDRESS:
2476		memset(&sin, 0, sizeof(sin));
2477		sin.sin_family = AF_INET;
2478		sin.sin_len = sizeof(sin);
2479		sin.sin_port = sh->src_port;
2480		sin.sin_addr.s_addr = cookie->address[0];
2481		to = (struct sockaddr *)&sin;
2482		break;
2483#endif
2484	default:
2485		/* This should not happen */
2486		return (NULL);
2487	}
2488	if (*stcb == NULL) {
2489		/* Yep, lets check */
2490		*stcb = sctp_findassociation_ep_addr(inp_p, to, netp, dst, NULL);
2491		if (*stcb == NULL) {
2492			/*
2493			 * We should have only got back the same inp. If we
2494			 * got back a different ep we have a problem. The
2495			 * original findep got back l_inp and now
2496			 */
2497			if (l_inp != *inp_p) {
2498				SCTP_PRINTF("Bad problem find_ep got a diff inp then special_locate?\n");
2499			}
2500		} else {
2501			if (*locked_tcb == NULL) {
2502				/*
2503				 * In this case we found the assoc only
2504				 * after we locked the create lock. This
2505				 * means we are in a colliding case and we
2506				 * must make sure that we unlock the tcb if
2507				 * its one of the cases where we throw away
2508				 * the incoming packets.
2509				 */
2510				*locked_tcb = *stcb;
2511
2512				/*
2513				 * We must also increment the inp ref count
2514				 * since the ref_count flags was set when we
2515				 * did not find the TCB, now we found it
2516				 * which reduces the refcount.. we must
2517				 * raise it back out to balance it all :-)
2518				 */
2519				SCTP_INP_INCR_REF((*stcb)->sctp_ep);
2520				if ((*stcb)->sctp_ep != l_inp) {
2521					SCTP_PRINTF("Huh? ep:%p diff then l_inp:%p?\n",
2522					    (void *)(*stcb)->sctp_ep, (void *)l_inp);
2523				}
2524			}
2525		}
2526	}
2527
2528	cookie_len -= SCTP_SIGNATURE_SIZE;
2529	if (*stcb == NULL) {
2530		/* this is the "normal" case... get a new TCB */
2531		*stcb = sctp_process_cookie_new(m, iphlen, offset, src, dst, sh,
2532		    cookie, cookie_len, *inp_p,
2533		    netp, to, &notification,
2534		    auth_skipped, auth_offset, auth_len,
2535		    mflowtype, mflowid,
2536		    vrf_id, port);
2537	} else {
2538		/* this is abnormal... cookie-echo on existing TCB */
2539		had_a_existing_tcb = 1;
2540		*stcb = sctp_process_cookie_existing(m, iphlen, offset,
2541		    src, dst, sh,
2542		    cookie, cookie_len, *inp_p, *stcb, netp, to,
2543		    &notification, auth_skipped, auth_offset, auth_len,
2544		    mflowtype, mflowid,
2545		    vrf_id, port);
2546		if (*stcb == NULL) {
2547			*locked_tcb = NULL;
2548		}
2549	}
2550
2551	if (*stcb == NULL) {
2552		/* still no TCB... must be bad cookie-echo */
2553		return (NULL);
2554	}
2555	if (*netp != NULL) {
2556		(*netp)->flowtype = mflowtype;
2557		(*netp)->flowid = mflowid;
2558	}
2559	/*
2560	 * Ok, we built an association so confirm the address we sent the
2561	 * INIT-ACK to.
2562	 */
2563	netl = sctp_findnet(*stcb, to);
2564	/*
2565	 * This code should in theory NOT run but
2566	 */
2567	if (netl == NULL) {
2568		/* TSNH! Huh, why do I need to add this address here? */
2569		if (sctp_add_remote_addr(*stcb, to, NULL, port,
2570		    SCTP_DONOT_SETSCOPE, SCTP_IN_COOKIE_PROC)) {
2571			return (NULL);
2572		}
2573		netl = sctp_findnet(*stcb, to);
2574	}
2575	if (netl) {
2576		if (netl->dest_state & SCTP_ADDR_UNCONFIRMED) {
2577			netl->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
2578			(void)sctp_set_primary_addr((*stcb), (struct sockaddr *)NULL,
2579			    netl);
2580			send_int_conf = 1;
2581		}
2582	}
2583	sctp_start_net_timers(*stcb);
2584	if ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
2585		if (!had_a_existing_tcb ||
2586		    (((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
2587			/*
2588			 * If we have a NEW cookie or the connect never
2589			 * reached the connected state during collision we
2590			 * must do the TCP accept thing.
2591			 */
2592			struct socket *so, *oso;
2593			struct sctp_inpcb *inp;
2594
2595			if (notification == SCTP_NOTIFY_ASSOC_RESTART) {
2596				/*
2597				 * For a restart we will keep the same
2598				 * socket, no need to do anything. I THINK!!
2599				 */
2600				sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
2601				if (send_int_conf) {
2602					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
2603					    (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
2604				}
2605				return (m);
2606			}
2607			oso = (*inp_p)->sctp_socket;
2608			atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2609			SCTP_TCB_UNLOCK((*stcb));
2610			CURVNET_SET(oso->so_vnet);
2611			so = sonewconn(oso, 0
2612			    );
2613			CURVNET_RESTORE();
2614			SCTP_TCB_LOCK((*stcb));
2615			atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2616
2617			if (so == NULL) {
2618				struct mbuf *op_err;
2619
2620				/* Too many sockets */
2621				SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: no room for another socket!\n");
2622				op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
2623				sctp_abort_association(*inp_p, NULL, m, iphlen,
2624				    src, dst, sh, op_err,
2625				    mflowtype, mflowid,
2626				    vrf_id, port);
2627				(void)sctp_free_assoc(*inp_p, *stcb, SCTP_NORMAL_PROC,
2628				    SCTP_FROM_SCTP_INPUT + SCTP_LOC_23);
2629				return (NULL);
2630			}
2631			inp = (struct sctp_inpcb *)so->so_pcb;
2632			SCTP_INP_INCR_REF(inp);
2633			/*
2634			 * We add the unbound flag here so that if we get an
2635			 * soabort() before we get the move_pcb done, we
2636			 * will properly cleanup.
2637			 */
2638			inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE |
2639			    SCTP_PCB_FLAGS_CONNECTED |
2640			    SCTP_PCB_FLAGS_IN_TCPPOOL |
2641			    SCTP_PCB_FLAGS_UNBOUND |
2642			    (SCTP_PCB_COPY_FLAGS & (*inp_p)->sctp_flags) |
2643			    SCTP_PCB_FLAGS_DONT_WAKE);
2644			inp->sctp_features = (*inp_p)->sctp_features;
2645			inp->sctp_mobility_features = (*inp_p)->sctp_mobility_features;
2646			inp->sctp_socket = so;
2647			inp->sctp_frag_point = (*inp_p)->sctp_frag_point;
2648			inp->max_cwnd = (*inp_p)->max_cwnd;
2649			inp->sctp_cmt_on_off = (*inp_p)->sctp_cmt_on_off;
2650			inp->ecn_supported = (*inp_p)->ecn_supported;
2651			inp->prsctp_supported = (*inp_p)->prsctp_supported;
2652			inp->auth_supported = (*inp_p)->auth_supported;
2653			inp->asconf_supported = (*inp_p)->asconf_supported;
2654			inp->reconfig_supported = (*inp_p)->reconfig_supported;
2655			inp->nrsack_supported = (*inp_p)->nrsack_supported;
2656			inp->pktdrop_supported = (*inp_p)->pktdrop_supported;
2657			inp->partial_delivery_point = (*inp_p)->partial_delivery_point;
2658			inp->sctp_context = (*inp_p)->sctp_context;
2659			inp->local_strreset_support = (*inp_p)->local_strreset_support;
2660			inp->fibnum = (*inp_p)->fibnum;
2661			/*
2662			 * copy in the authentication parameters from the
2663			 * original endpoint
2664			 */
2665			if (inp->sctp_ep.local_hmacs)
2666				sctp_free_hmaclist(inp->sctp_ep.local_hmacs);
2667			inp->sctp_ep.local_hmacs =
2668			    sctp_copy_hmaclist((*inp_p)->sctp_ep.local_hmacs);
2669			if (inp->sctp_ep.local_auth_chunks)
2670				sctp_free_chunklist(inp->sctp_ep.local_auth_chunks);
2671			inp->sctp_ep.local_auth_chunks =
2672			    sctp_copy_chunklist((*inp_p)->sctp_ep.local_auth_chunks);
2673
2674			/*
2675			 * Now we must move it from one hash table to
2676			 * another and get the tcb in the right place.
2677			 */
2678
2679			/*
2680			 * This is where the one-2-one socket is put into
2681			 * the accept state waiting for the accept!
2682			 */
2683			if (*stcb) {
2684				SCTP_ADD_SUBSTATE(*stcb, SCTP_STATE_IN_ACCEPT_QUEUE);
2685			}
2686			sctp_move_pcb_and_assoc(*inp_p, inp, *stcb);
2687
2688			atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2689			SCTP_TCB_UNLOCK((*stcb));
2690
2691			sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb,
2692			    0);
2693			SCTP_TCB_LOCK((*stcb));
2694			atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2695
2696			/*
2697			 * now we must check to see if we were aborted while
2698			 * the move was going on and the lock/unlock
2699			 * happened.
2700			 */
2701			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2702				/*
2703				 * yep it was, we leave the assoc attached
2704				 * to the socket since the sctp_inpcb_free()
2705				 * call will send an abort for us.
2706				 */
2707				SCTP_INP_DECR_REF(inp);
2708				return (NULL);
2709			}
2710			SCTP_INP_DECR_REF(inp);
2711			/* Switch over to the new guy */
2712			*inp_p = inp;
2713			sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
2714			if (send_int_conf) {
2715				sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
2716				    (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
2717			}
2718
2719			/*
2720			 * Pull it from the incomplete queue and wake the
2721			 * guy
2722			 */
2723			soisconnected(so);
2724			return (m);
2725		}
2726	}
2727	if (notification) {
2728		sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
2729	}
2730	if (send_int_conf) {
2731		sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
2732		    (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
2733	}
2734	return (m);
2735}
2736
2737static void
2738sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp SCTP_UNUSED,
2739    struct sctp_tcb *stcb, struct sctp_nets *net)
2740{
2741	/* cp must not be used, others call this without a c-ack :-) */
2742	struct sctp_association *asoc;
2743	struct sctp_tmit_chunk *chk;
2744
2745	SCTPDBG(SCTP_DEBUG_INPUT2,
2746	    "sctp_handle_cookie_ack: handling COOKIE-ACK\n");
2747	if ((stcb == NULL) || (net == NULL)) {
2748		return;
2749	}
2750
2751	asoc = &stcb->asoc;
2752	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2753		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2754		    asoc->overall_error_count,
2755		    0,
2756		    SCTP_FROM_SCTP_INPUT,
2757		    __LINE__);
2758	}
2759	sctp_stop_all_cookie_timers(stcb);
2760	sctp_toss_old_cookies(stcb, asoc);
2761	/* process according to association state */
2762	if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED) {
2763		/* state change only needed when I am in right state */
2764		SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
2765		SCTP_SET_STATE(stcb, SCTP_STATE_OPEN);
2766		sctp_start_net_timers(stcb);
2767		/* update RTO */
2768		SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
2769		SCTP_STAT_INCR_GAUGE32(sctps_currestab);
2770		if (asoc->overall_error_count == 0) {
2771			sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered,
2772			    SCTP_RTT_FROM_NON_DATA);
2773		}
2774		/*
2775		 * Since we did not send a HB make sure we don't double
2776		 * things.
2777		 */
2778		asoc->overall_error_count = 0;
2779		net->hb_responded = 1;
2780		(void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
2781		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_UP, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
2782		if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2783		    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
2784			sctp_pcb_add_flags(stcb->sctp_ep, SCTP_PCB_FLAGS_CONNECTED);
2785			if ((stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) == 0) {
2786				soisconnected(stcb->sctp_socket);
2787			}
2788		}
2789
2790		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
2791		    TAILQ_EMPTY(&asoc->send_queue) &&
2792		    TAILQ_EMPTY(&asoc->sent_queue) &&
2793		    (asoc->stream_queue_cnt == 0)) {
2794			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
2795			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
2796			sctp_stop_timers_for_shutdown(stcb);
2797			sctp_send_shutdown(stcb, net);
2798			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
2799			    stcb->sctp_ep, stcb, net);
2800			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
2801			    stcb->sctp_ep, stcb, NULL);
2802			sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_LOCKED);
2803		}
2804
2805		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2806			/*
2807			 * We don't need to do the asconf thing, nor hb or
2808			 * autoclose if the socket is closed.
2809			 */
2810			goto closed_socket;
2811		}
2812
2813		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep,
2814		    stcb, net);
2815
2816		if (stcb->asoc.sctp_autoclose_ticks &&
2817		    sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_AUTOCLOSE)) {
2818			sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
2819			    stcb->sctp_ep, stcb, NULL);
2820		}
2821		/*
2822		 * send ASCONF if parameters are pending and ASCONFs are
2823		 * allowed (eg. addresses changed when init/cookie echo were
2824		 * in flight)
2825		 */
2826		if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_DO_ASCONF)) &&
2827		    (stcb->asoc.asconf_supported == 1) &&
2828		    (!TAILQ_EMPTY(&stcb->asoc.asconf_queue))) {
2829#ifdef SCTP_TIMER_BASED_ASCONF
2830			sctp_timer_start(SCTP_TIMER_TYPE_ASCONF,
2831			    stcb->sctp_ep, stcb,
2832			    stcb->asoc.primary_destination);
2833#else
2834			sctp_send_asconf(stcb, stcb->asoc.primary_destination,
2835			    SCTP_ADDR_NOT_LOCKED);
2836#endif
2837		}
2838	}
2839closed_socket:
2840	/* Restart the timer if we have pending data */
2841	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2842		if (chk->whoTo != NULL) {
2843			break;
2844		}
2845	}
2846	if (chk != NULL) {
2847		sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo);
2848	}
2849}
2850
2851static void
2852sctp_handle_ecn_echo(struct sctp_ecne_chunk *cp,
2853    struct sctp_tcb *stcb)
2854{
2855	struct sctp_nets *net;
2856	struct sctp_tmit_chunk *lchk;
2857	struct sctp_ecne_chunk bkup;
2858	uint8_t override_bit;
2859	uint32_t tsn, window_data_tsn;
2860	int len;
2861	unsigned int pkt_cnt;
2862
2863	len = ntohs(cp->ch.chunk_length);
2864	if (len == sizeof(struct old_sctp_ecne_chunk)) {
2865		/* Its the old format */
2866		memcpy(&bkup, cp, sizeof(struct old_sctp_ecne_chunk));
2867		bkup.num_pkts_since_cwr = htonl(1);
2868		cp = &bkup;
2869	}
2870	SCTP_STAT_INCR(sctps_recvecne);
2871	tsn = ntohl(cp->tsn);
2872	pkt_cnt = ntohl(cp->num_pkts_since_cwr);
2873	lchk = TAILQ_LAST(&stcb->asoc.send_queue, sctpchunk_listhead);
2874	if (lchk == NULL) {
2875		window_data_tsn = stcb->asoc.sending_seq - 1;
2876	} else {
2877		window_data_tsn = lchk->rec.data.tsn;
2878	}
2879
2880	/* Find where it was sent to if possible. */
2881	net = NULL;
2882	TAILQ_FOREACH(lchk, &stcb->asoc.sent_queue, sctp_next) {
2883		if (lchk->rec.data.tsn == tsn) {
2884			net = lchk->whoTo;
2885			net->ecn_prev_cwnd = lchk->rec.data.cwnd_at_send;
2886			break;
2887		}
2888		if (SCTP_TSN_GT(lchk->rec.data.tsn, tsn)) {
2889			break;
2890		}
2891	}
2892	if (net == NULL) {
2893		/*
2894		 * What to do. A previous send of a CWR was possibly lost.
2895		 * See how old it is, we may have it marked on the actual
2896		 * net.
2897		 */
2898		TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
2899			if (tsn == net->last_cwr_tsn) {
2900				/* Found him, send it off */
2901				break;
2902			}
2903		}
2904		if (net == NULL) {
2905			/*
2906			 * If we reach here, we need to send a special CWR
2907			 * that says hey, we did this a long time ago and
2908			 * you lost the response.
2909			 */
2910			net = TAILQ_FIRST(&stcb->asoc.nets);
2911			if (net == NULL) {
2912				/* TSNH */
2913				return;
2914			}
2915			override_bit = SCTP_CWR_REDUCE_OVERRIDE;
2916		} else {
2917			override_bit = 0;
2918		}
2919	} else {
2920		override_bit = 0;
2921	}
2922	if (SCTP_TSN_GT(tsn, net->cwr_window_tsn) &&
2923	    ((override_bit & SCTP_CWR_REDUCE_OVERRIDE) == 0)) {
2924		/*
2925		 * JRS - Use the congestion control given in the pluggable
2926		 * CC module
2927		 */
2928		stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net, 0, pkt_cnt);
2929		/*
2930		 * We reduce once every RTT. So we will only lower cwnd at
2931		 * the next sending seq i.e. the window_data_tsn
2932		 */
2933		net->cwr_window_tsn = window_data_tsn;
2934		net->ecn_ce_pkt_cnt += pkt_cnt;
2935		net->lost_cnt = pkt_cnt;
2936		net->last_cwr_tsn = tsn;
2937	} else {
2938		override_bit |= SCTP_CWR_IN_SAME_WINDOW;
2939		if (SCTP_TSN_GT(tsn, net->last_cwr_tsn) &&
2940		    ((override_bit & SCTP_CWR_REDUCE_OVERRIDE) == 0)) {
2941			/*
2942			 * Another loss in the same window update how many
2943			 * marks/packets lost we have had.
2944			 */
2945			int cnt = 1;
2946
2947			if (pkt_cnt > net->lost_cnt) {
2948				/* Should be the case */
2949				cnt = (pkt_cnt - net->lost_cnt);
2950				net->ecn_ce_pkt_cnt += cnt;
2951			}
2952			net->lost_cnt = pkt_cnt;
2953			net->last_cwr_tsn = tsn;
2954			/*
2955			 * Most CC functions will ignore this call, since we
2956			 * are in-window yet of the initial CE the peer saw.
2957			 */
2958			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net, 1, cnt);
2959		}
2960	}
2961	/*
2962	 * We always send a CWR this way if our previous one was lost our
2963	 * peer will get an update, or if it is not time again to reduce we
2964	 * still get the cwr to the peer. Note we set the override when we
2965	 * could not find the TSN on the chunk or the destination network.
2966	 */
2967	sctp_send_cwr(stcb, net, net->last_cwr_tsn, override_bit);
2968}
2969
2970static void
2971sctp_handle_ecn_cwr(struct sctp_cwr_chunk *cp, struct sctp_tcb *stcb, struct sctp_nets *net)
2972{
2973	/*
2974	 * Here we get a CWR from the peer. We must look in the outqueue and
2975	 * make sure that we have a covered ECNE in the control chunk part.
2976	 * If so remove it.
2977	 */
2978	struct sctp_tmit_chunk *chk, *nchk;
2979	struct sctp_ecne_chunk *ecne;
2980	int override;
2981	uint32_t cwr_tsn;
2982
2983	cwr_tsn = ntohl(cp->tsn);
2984	override = cp->ch.chunk_flags & SCTP_CWR_REDUCE_OVERRIDE;
2985	TAILQ_FOREACH_SAFE(chk, &stcb->asoc.control_send_queue, sctp_next, nchk) {
2986		if (chk->rec.chunk_id.id != SCTP_ECN_ECHO) {
2987			continue;
2988		}
2989		if ((override == 0) && (chk->whoTo != net)) {
2990			/* Must be from the right src unless override is set */
2991			continue;
2992		}
2993		ecne = mtod(chk->data, struct sctp_ecne_chunk *);
2994		if (SCTP_TSN_GE(cwr_tsn, ntohl(ecne->tsn))) {
2995			/* this covers this ECNE, we can remove it */
2996			stcb->asoc.ecn_echo_cnt_onq--;
2997			TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk,
2998			    sctp_next);
2999			stcb->asoc.ctrl_queue_cnt--;
3000			sctp_m_freem(chk->data);
3001			chk->data = NULL;
3002			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
3003			if (override == 0) {
3004				break;
3005			}
3006		}
3007	}
3008}
3009
3010static void
3011sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp SCTP_UNUSED,
3012    struct sctp_tcb *stcb, struct sctp_nets *net)
3013{
3014
3015	SCTPDBG(SCTP_DEBUG_INPUT2,
3016	    "sctp_handle_shutdown_complete: handling SHUTDOWN-COMPLETE\n");
3017	if (stcb == NULL)
3018		return;
3019
3020	/* process according to association state */
3021	if (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT) {
3022		/* unexpected SHUTDOWN-COMPLETE... so ignore... */
3023		SCTPDBG(SCTP_DEBUG_INPUT2,
3024		    "sctp_handle_shutdown_complete: not in SCTP_STATE_SHUTDOWN_ACK_SENT --- ignore\n");
3025		SCTP_TCB_UNLOCK(stcb);
3026		return;
3027	}
3028	/* notify upper layer protocol */
3029	if (stcb->sctp_socket) {
3030		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
3031	}
3032#ifdef INVARIANTS
3033	if (!TAILQ_EMPTY(&stcb->asoc.send_queue) ||
3034	    !TAILQ_EMPTY(&stcb->asoc.sent_queue) ||
3035	    sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED)) {
3036		panic("Queues are not empty when handling SHUTDOWN-COMPLETE");
3037	}
3038#endif
3039	/* stop the timer */
3040	sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep, stcb, net,
3041	    SCTP_FROM_SCTP_INPUT + SCTP_LOC_24);
3042	SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
3043	/* free the TCB */
3044	SCTPDBG(SCTP_DEBUG_INPUT2,
3045	    "sctp_handle_shutdown_complete: calls free-asoc\n");
3046	(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
3047	    SCTP_FROM_SCTP_INPUT + SCTP_LOC_25);
3048	return;
3049}
3050
3051static int
3052process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc,
3053    struct sctp_nets *net, uint8_t flg)
3054{
3055	switch (desc->chunk_type) {
3056	case SCTP_DATA:
3057	case SCTP_IDATA:
3058		/* find the tsn to resend (possibly) */
3059		{
3060			uint32_t tsn;
3061			struct sctp_tmit_chunk *tp1;
3062
3063			tsn = ntohl(desc->tsn_ifany);
3064			TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
3065				if (tp1->rec.data.tsn == tsn) {
3066					/* found it */
3067					break;
3068				}
3069				if (SCTP_TSN_GT(tp1->rec.data.tsn, tsn)) {
3070					/* not found */
3071					tp1 = NULL;
3072					break;
3073				}
3074			}
3075			if (tp1 == NULL) {
3076				/*
3077				 * Do it the other way , aka without paying
3078				 * attention to queue seq order.
3079				 */
3080				SCTP_STAT_INCR(sctps_pdrpdnfnd);
3081				TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
3082					if (tp1->rec.data.tsn == tsn) {
3083						/* found it */
3084						break;
3085					}
3086				}
3087			}
3088			if (tp1 == NULL) {
3089				SCTP_STAT_INCR(sctps_pdrptsnnf);
3090			}
3091			if ((tp1) && (tp1->sent < SCTP_DATAGRAM_ACKED)) {
3092				if (((flg & SCTP_BADCRC) == 0) &&
3093				    ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) {
3094					return (0);
3095				}
3096				if ((stcb->asoc.peers_rwnd == 0) &&
3097				    ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) {
3098					SCTP_STAT_INCR(sctps_pdrpdiwnp);
3099					return (0);
3100				}
3101				if (stcb->asoc.peers_rwnd == 0 &&
3102				    (flg & SCTP_FROM_MIDDLE_BOX)) {
3103					SCTP_STAT_INCR(sctps_pdrpdizrw);
3104					return (0);
3105				}
3106				if ((uint32_t)SCTP_BUF_LEN(tp1->data) <
3107				    SCTP_DATA_CHUNK_OVERHEAD(stcb) + SCTP_NUM_DB_TO_VERIFY) {
3108					/* Payload not matching. */
3109					SCTP_STAT_INCR(sctps_pdrpbadd);
3110					return (-1);
3111				}
3112				if (memcmp(mtod(tp1->data, caddr_t)+SCTP_DATA_CHUNK_OVERHEAD(stcb),
3113				    desc->data_bytes, SCTP_NUM_DB_TO_VERIFY) != 0) {
3114					/* Payload not matching. */
3115					SCTP_STAT_INCR(sctps_pdrpbadd);
3116					return (-1);
3117				}
3118				if (tp1->do_rtt) {
3119					/*
3120					 * this guy had a RTO calculation
3121					 * pending on it, cancel it
3122					 */
3123					if (tp1->whoTo->rto_needed == 0) {
3124						tp1->whoTo->rto_needed = 1;
3125					}
3126					tp1->do_rtt = 0;
3127				}
3128				SCTP_STAT_INCR(sctps_pdrpmark);
3129				if (tp1->sent != SCTP_DATAGRAM_RESEND)
3130					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3131				/*
3132				 * mark it as if we were doing a FR, since
3133				 * we will be getting gap ack reports behind
3134				 * the info from the router.
3135				 */
3136				tp1->rec.data.doing_fast_retransmit = 1;
3137				/*
3138				 * mark the tsn with what sequences can
3139				 * cause a new FR.
3140				 */
3141				if (TAILQ_EMPTY(&stcb->asoc.send_queue)) {
3142					tp1->rec.data.fast_retran_tsn = stcb->asoc.sending_seq;
3143				} else {
3144					tp1->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.tsn;
3145				}
3146
3147				/* restart the timer */
3148				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3149				    stcb, tp1->whoTo,
3150				    SCTP_FROM_SCTP_INPUT + SCTP_LOC_26);
3151				sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3152				    stcb, tp1->whoTo);
3153
3154				/* fix counts and things */
3155				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3156					sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PDRP,
3157					    tp1->whoTo->flight_size,
3158					    tp1->book_size,
3159					    (uint32_t)(uintptr_t)stcb,
3160					    tp1->rec.data.tsn);
3161				}
3162				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3163					sctp_flight_size_decrease(tp1);
3164					sctp_total_flight_decrease(stcb, tp1);
3165				}
3166				tp1->sent = SCTP_DATAGRAM_RESEND;
3167			} {
3168				/* audit code */
3169				unsigned int audit;
3170
3171				audit = 0;
3172				TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
3173					if (tp1->sent == SCTP_DATAGRAM_RESEND)
3174						audit++;
3175				}
3176				TAILQ_FOREACH(tp1, &stcb->asoc.control_send_queue,
3177				    sctp_next) {
3178					if (tp1->sent == SCTP_DATAGRAM_RESEND)
3179						audit++;
3180				}
3181				if (audit != stcb->asoc.sent_queue_retran_cnt) {
3182					SCTP_PRINTF("**Local Audit finds cnt:%d asoc cnt:%d\n",
3183					    audit, stcb->asoc.sent_queue_retran_cnt);
3184#ifndef SCTP_AUDITING_ENABLED
3185					stcb->asoc.sent_queue_retran_cnt = audit;
3186#endif
3187				}
3188			}
3189		}
3190		break;
3191	case SCTP_ASCONF:
3192		{
3193			struct sctp_tmit_chunk *asconf;
3194
3195			TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue,
3196			    sctp_next) {
3197				if (asconf->rec.chunk_id.id == SCTP_ASCONF) {
3198					break;
3199				}
3200			}
3201			if (asconf) {
3202				if (asconf->sent != SCTP_DATAGRAM_RESEND)
3203					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3204				asconf->sent = SCTP_DATAGRAM_RESEND;
3205				asconf->snd_count--;
3206			}
3207		}
3208		break;
3209	case SCTP_INITIATION:
3210		/* resend the INIT */
3211		stcb->asoc.dropped_special_cnt++;
3212		if (stcb->asoc.dropped_special_cnt < SCTP_RETRY_DROPPED_THRESH) {
3213			/*
3214			 * If we can get it in, in a few attempts we do
3215			 * this, otherwise we let the timer fire.
3216			 */
3217			sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep,
3218			    stcb, net,
3219			    SCTP_FROM_SCTP_INPUT + SCTP_LOC_27);
3220			sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
3221		}
3222		break;
3223	case SCTP_SELECTIVE_ACK:
3224	case SCTP_NR_SELECTIVE_ACK:
3225		/* resend the sack */
3226		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
3227		break;
3228	case SCTP_HEARTBEAT_REQUEST:
3229		/* resend a demand HB */
3230		if ((stcb->asoc.overall_error_count + 3) < stcb->asoc.max_send_times) {
3231			/*
3232			 * Only retransmit if we KNOW we wont destroy the
3233			 * tcb
3234			 */
3235			sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED);
3236		}
3237		break;
3238	case SCTP_SHUTDOWN:
3239		sctp_send_shutdown(stcb, net);
3240		break;
3241	case SCTP_SHUTDOWN_ACK:
3242		sctp_send_shutdown_ack(stcb, net);
3243		break;
3244	case SCTP_COOKIE_ECHO:
3245		{
3246			struct sctp_tmit_chunk *cookie;
3247
3248			cookie = NULL;
3249			TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue,
3250			    sctp_next) {
3251				if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
3252					break;
3253				}
3254			}
3255			if (cookie) {
3256				if (cookie->sent != SCTP_DATAGRAM_RESEND)
3257					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3258				cookie->sent = SCTP_DATAGRAM_RESEND;
3259				sctp_stop_all_cookie_timers(stcb);
3260			}
3261		}
3262		break;
3263	case SCTP_COOKIE_ACK:
3264		sctp_send_cookie_ack(stcb);
3265		break;
3266	case SCTP_ASCONF_ACK:
3267		/* resend last asconf ack */
3268		sctp_send_asconf_ack(stcb);
3269		break;
3270	case SCTP_IFORWARD_CUM_TSN:
3271	case SCTP_FORWARD_CUM_TSN:
3272		send_forward_tsn(stcb, &stcb->asoc);
3273		break;
3274		/* can't do anything with these */
3275	case SCTP_PACKET_DROPPED:
3276	case SCTP_INITIATION_ACK:	/* this should not happen */
3277	case SCTP_HEARTBEAT_ACK:
3278	case SCTP_ABORT_ASSOCIATION:
3279	case SCTP_OPERATION_ERROR:
3280	case SCTP_SHUTDOWN_COMPLETE:
3281	case SCTP_ECN_ECHO:
3282	case SCTP_ECN_CWR:
3283	default:
3284		break;
3285	}
3286	return (0);
3287}
3288
3289void
3290sctp_reset_in_stream(struct sctp_tcb *stcb, uint32_t number_entries, uint16_t *list)
3291{
3292	uint32_t i;
3293	uint16_t temp;
3294
3295	/*
3296	 * We set things to 0xffffffff since this is the last delivered
3297	 * sequence and we will be sending in 0 after the reset.
3298	 */
3299
3300	if (number_entries) {
3301		for (i = 0; i < number_entries; i++) {
3302			temp = ntohs(list[i]);
3303			if (temp >= stcb->asoc.streamincnt) {
3304				continue;
3305			}
3306			stcb->asoc.strmin[temp].last_mid_delivered = 0xffffffff;
3307		}
3308	} else {
3309		list = NULL;
3310		for (i = 0; i < stcb->asoc.streamincnt; i++) {
3311			stcb->asoc.strmin[i].last_mid_delivered = 0xffffffff;
3312		}
3313	}
3314	sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_RECV, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED);
3315}
3316
3317static void
3318sctp_reset_out_streams(struct sctp_tcb *stcb, uint32_t number_entries, uint16_t *list)
3319{
3320	uint32_t i;
3321	uint16_t temp;
3322
3323	if (number_entries > 0) {
3324		for (i = 0; i < number_entries; i++) {
3325			temp = ntohs(list[i]);
3326			if (temp >= stcb->asoc.streamoutcnt) {
3327				/* no such stream */
3328				continue;
3329			}
3330			stcb->asoc.strmout[temp].next_mid_ordered = 0;
3331			stcb->asoc.strmout[temp].next_mid_unordered = 0;
3332		}
3333	} else {
3334		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3335			stcb->asoc.strmout[i].next_mid_ordered = 0;
3336			stcb->asoc.strmout[i].next_mid_unordered = 0;
3337		}
3338	}
3339	sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_SEND, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED);
3340}
3341
3342static void
3343sctp_reset_clear_pending(struct sctp_tcb *stcb, uint32_t number_entries, uint16_t *list)
3344{
3345	uint32_t i;
3346	uint16_t temp;
3347
3348	if (number_entries > 0) {
3349		for (i = 0; i < number_entries; i++) {
3350			temp = ntohs(list[i]);
3351			if (temp >= stcb->asoc.streamoutcnt) {
3352				/* no such stream */
3353				continue;
3354			}
3355			stcb->asoc.strmout[temp].state = SCTP_STREAM_OPEN;
3356		}
3357	} else {
3358		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3359			stcb->asoc.strmout[i].state = SCTP_STREAM_OPEN;
3360		}
3361	}
3362}
3363
3364struct sctp_stream_reset_request *
3365sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq, struct sctp_tmit_chunk **bchk)
3366{
3367	struct sctp_association *asoc;
3368	struct sctp_chunkhdr *ch;
3369	struct sctp_stream_reset_request *r;
3370	struct sctp_tmit_chunk *chk;
3371	int len, clen;
3372
3373	asoc = &stcb->asoc;
3374	chk = asoc->str_reset;
3375	if (TAILQ_EMPTY(&asoc->control_send_queue) ||
3376	    (chk == NULL)) {
3377		asoc->stream_reset_outstanding = 0;
3378		return (NULL);
3379	}
3380	if (chk->data == NULL) {
3381		return (NULL);
3382	}
3383	if (bchk != NULL) {
3384		/* he wants a copy of the chk pointer */
3385		*bchk = chk;
3386	}
3387	clen = chk->send_size;
3388	ch = mtod(chk->data, struct sctp_chunkhdr *);
3389	r = (struct sctp_stream_reset_request *)(ch + 1);
3390	if (ntohl(r->request_seq) == seq) {
3391		/* found it */
3392		return (r);
3393	}
3394	len = SCTP_SIZE32(ntohs(r->ph.param_length));
3395	if (clen > (len + (int)sizeof(struct sctp_chunkhdr))) {
3396		/* move to the next one, there can only be a max of two */
3397		r = (struct sctp_stream_reset_request *)((caddr_t)r + len);
3398		if (ntohl(r->request_seq) == seq) {
3399			return (r);
3400		}
3401	}
3402	/* that seq is not here */
3403	return (NULL);
3404}
3405
3406static void
3407sctp_clean_up_stream_reset(struct sctp_tcb *stcb)
3408{
3409	struct sctp_association *asoc;
3410	struct sctp_tmit_chunk *chk;
3411
3412	asoc = &stcb->asoc;
3413	chk = asoc->str_reset;
3414	if (chk == NULL) {
3415		return;
3416	}
3417	asoc->str_reset = NULL;
3418	sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb,
3419	    NULL, SCTP_FROM_SCTP_INPUT + SCTP_LOC_28);
3420	TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
3421	asoc->ctrl_queue_cnt--;
3422	if (chk->data) {
3423		sctp_m_freem(chk->data);
3424		chk->data = NULL;
3425	}
3426	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
3427}
3428
3429static int
3430sctp_handle_stream_reset_response(struct sctp_tcb *stcb,
3431    uint32_t seq, uint32_t action,
3432    struct sctp_stream_reset_response *respin)
3433{
3434	uint16_t type;
3435	int lparam_len;
3436	struct sctp_association *asoc = &stcb->asoc;
3437	struct sctp_tmit_chunk *chk;
3438	struct sctp_stream_reset_request *req_param;
3439	struct sctp_stream_reset_out_request *req_out_param;
3440	struct sctp_stream_reset_in_request *req_in_param;
3441	uint32_t number_entries;
3442
3443	if (asoc->stream_reset_outstanding == 0) {
3444		/* duplicate */
3445		return (0);
3446	}
3447	if (seq == stcb->asoc.str_reset_seq_out) {
3448		req_param = sctp_find_stream_reset(stcb, seq, &chk);
3449		if (req_param != NULL) {
3450			stcb->asoc.str_reset_seq_out++;
3451			type = ntohs(req_param->ph.param_type);
3452			lparam_len = ntohs(req_param->ph.param_length);
3453			if (type == SCTP_STR_RESET_OUT_REQUEST) {
3454				int no_clear = 0;
3455
3456				req_out_param = (struct sctp_stream_reset_out_request *)req_param;
3457				number_entries = (lparam_len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t);
3458				asoc->stream_reset_out_is_outstanding = 0;
3459				if (asoc->stream_reset_outstanding)
3460					asoc->stream_reset_outstanding--;
3461				if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) {
3462					/* do it */
3463					sctp_reset_out_streams(stcb, number_entries, req_out_param->list_of_streams);
3464				} else if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
3465					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_DENIED_OUT, stcb, number_entries, req_out_param->list_of_streams, SCTP_SO_NOT_LOCKED);
3466				} else if (action == SCTP_STREAM_RESET_RESULT_IN_PROGRESS) {
3467					/*
3468					 * Set it up so we don't stop
3469					 * retransmitting
3470					 */
3471					asoc->stream_reset_outstanding++;
3472					stcb->asoc.str_reset_seq_out--;
3473					asoc->stream_reset_out_is_outstanding = 1;
3474					no_clear = 1;
3475				} else {
3476					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_OUT, stcb, number_entries, req_out_param->list_of_streams, SCTP_SO_NOT_LOCKED);
3477				}
3478				if (no_clear == 0) {
3479					sctp_reset_clear_pending(stcb, number_entries, req_out_param->list_of_streams);
3480				}
3481			} else if (type == SCTP_STR_RESET_IN_REQUEST) {
3482				req_in_param = (struct sctp_stream_reset_in_request *)req_param;
3483				number_entries = (lparam_len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t);
3484				if (asoc->stream_reset_outstanding)
3485					asoc->stream_reset_outstanding--;
3486				if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
3487					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_DENIED_IN, stcb,
3488					    number_entries, req_in_param->list_of_streams, SCTP_SO_NOT_LOCKED);
3489				} else if (action != SCTP_STREAM_RESET_RESULT_PERFORMED) {
3490					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_IN, stcb,
3491					    number_entries, req_in_param->list_of_streams, SCTP_SO_NOT_LOCKED);
3492				}
3493			} else if (type == SCTP_STR_RESET_ADD_OUT_STREAMS) {
3494				/* Ok we now may have more streams */
3495				int num_stream;
3496
3497				num_stream = stcb->asoc.strm_pending_add_size;
3498				if (num_stream > (stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt)) {
3499					/* TSNH */
3500					num_stream = stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt;
3501				}
3502				stcb->asoc.strm_pending_add_size = 0;
3503				if (asoc->stream_reset_outstanding)
3504					asoc->stream_reset_outstanding--;
3505				if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) {
3506					/* Put the new streams into effect */
3507					int i;
3508
3509					for (i = asoc->streamoutcnt; i < (asoc->streamoutcnt + num_stream); i++) {
3510						asoc->strmout[i].state = SCTP_STREAM_OPEN;
3511					}
3512					asoc->streamoutcnt += num_stream;
3513					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_ADD, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
3514				} else if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
3515					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_ADD, stcb, SCTP_STREAM_CHANGE_DENIED, NULL, SCTP_SO_NOT_LOCKED);
3516				} else {
3517					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_ADD, stcb, SCTP_STREAM_CHANGE_FAILED, NULL, SCTP_SO_NOT_LOCKED);
3518				}
3519			} else if (type == SCTP_STR_RESET_ADD_IN_STREAMS) {
3520				if (asoc->stream_reset_outstanding)
3521					asoc->stream_reset_outstanding--;
3522				if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
3523					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_ADD, stcb, SCTP_STREAM_CHANGE_DENIED, NULL, SCTP_SO_NOT_LOCKED);
3524				} else if (action != SCTP_STREAM_RESET_RESULT_PERFORMED) {
3525					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_ADD, stcb, SCTP_STREAM_CHANGE_DENIED, NULL, SCTP_SO_NOT_LOCKED);
3526				}
3527			} else if (type == SCTP_STR_RESET_TSN_REQUEST) {
3528				/**
3529				 * a) Adopt the new in tsn.
3530				 * b) reset the map
3531				 * c) Adopt the new out-tsn
3532				 */
3533				struct sctp_stream_reset_response_tsn *resp;
3534				struct sctp_forward_tsn_chunk fwdtsn;
3535				int abort_flag = 0;
3536
3537				if (respin == NULL) {
3538					/* huh ? */
3539					return (0);
3540				}
3541				if (ntohs(respin->ph.param_length) < sizeof(struct sctp_stream_reset_response_tsn)) {
3542					return (0);
3543				}
3544				if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) {
3545					resp = (struct sctp_stream_reset_response_tsn *)respin;
3546					asoc->stream_reset_outstanding--;
3547					fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
3548					fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
3549					fwdtsn.new_cumulative_tsn = htonl(ntohl(resp->senders_next_tsn) - 1);
3550					sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0);
3551					if (abort_flag) {
3552						return (1);
3553					}
3554					stcb->asoc.highest_tsn_inside_map = (ntohl(resp->senders_next_tsn) - 1);
3555					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
3556						sctp_log_map(0, 7, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
3557					}
3558
3559					stcb->asoc.tsn_last_delivered = stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
3560					stcb->asoc.mapping_array_base_tsn = ntohl(resp->senders_next_tsn);
3561					memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
3562
3563					stcb->asoc.highest_tsn_inside_nr_map = stcb->asoc.highest_tsn_inside_map;
3564					memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
3565
3566					stcb->asoc.sending_seq = ntohl(resp->receivers_next_tsn);
3567					stcb->asoc.last_acked_seq = stcb->asoc.cumulative_tsn;
3568
3569					sctp_reset_out_streams(stcb, 0, (uint16_t *)NULL);
3570					sctp_reset_in_stream(stcb, 0, (uint16_t *)NULL);
3571					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_TSN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
3572				} else if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
3573					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_TSN, stcb, SCTP_ASSOC_RESET_DENIED, NULL, SCTP_SO_NOT_LOCKED);
3574				} else {
3575					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_TSN, stcb, SCTP_ASSOC_RESET_FAILED, NULL, SCTP_SO_NOT_LOCKED);
3576				}
3577			}
3578			/* get rid of the request and get the request flags */
3579			if (asoc->stream_reset_outstanding == 0) {
3580				sctp_clean_up_stream_reset(stcb);
3581			}
3582		}
3583	}
3584	if (asoc->stream_reset_outstanding == 0) {
3585		sctp_send_stream_reset_out_if_possible(stcb, SCTP_SO_NOT_LOCKED);
3586	}
3587	return (0);
3588}
3589
3590static void
3591sctp_handle_str_reset_request_in(struct sctp_tcb *stcb,
3592    struct sctp_tmit_chunk *chk,
3593    struct sctp_stream_reset_in_request *req, int trunc)
3594{
3595	uint32_t seq;
3596	int len, i;
3597	int number_entries;
3598	uint16_t temp;
3599
3600	/*
3601	 * peer wants me to send a str-reset to him for my outgoing seq's if
3602	 * seq_in is right.
3603	 */
3604	struct sctp_association *asoc = &stcb->asoc;
3605
3606	seq = ntohl(req->request_seq);
3607	if (asoc->str_reset_seq_in == seq) {
3608		asoc->last_reset_action[1] = asoc->last_reset_action[0];
3609		if ((asoc->local_strreset_support & SCTP_ENABLE_RESET_STREAM_REQ) == 0) {
3610			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
3611		} else if (trunc) {
3612			/* Can't do it, since they exceeded our buffer size  */
3613			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
3614		} else if (stcb->asoc.stream_reset_out_is_outstanding == 0) {
3615			len = ntohs(req->ph.param_length);
3616			number_entries = ((len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t));
3617			if (number_entries) {
3618				for (i = 0; i < number_entries; i++) {
3619					temp = ntohs(req->list_of_streams[i]);
3620					if (temp >= stcb->asoc.streamoutcnt) {
3621						asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
3622						goto bad_boy;
3623					}
3624					req->list_of_streams[i] = temp;
3625				}
3626				for (i = 0; i < number_entries; i++) {
3627					if (stcb->asoc.strmout[req->list_of_streams[i]].state == SCTP_STREAM_OPEN) {
3628						stcb->asoc.strmout[req->list_of_streams[i]].state = SCTP_STREAM_RESET_PENDING;
3629					}
3630				}
3631			} else {
3632				/* Its all */
3633				for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3634					if (stcb->asoc.strmout[i].state == SCTP_STREAM_OPEN)
3635						stcb->asoc.strmout[i].state = SCTP_STREAM_RESET_PENDING;
3636				}
3637			}
3638			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
3639		} else {
3640			/* Can't do it, since we have sent one out */
3641			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_ERR_IN_PROGRESS;
3642		}
3643bad_boy:
3644		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3645		asoc->str_reset_seq_in++;
3646	} else if (asoc->str_reset_seq_in - 1 == seq) {
3647		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3648	} else if (asoc->str_reset_seq_in - 2 == seq) {
3649		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
3650	} else {
3651		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
3652	}
3653	sctp_send_stream_reset_out_if_possible(stcb, SCTP_SO_NOT_LOCKED);
3654}
3655
3656static int
3657sctp_handle_str_reset_request_tsn(struct sctp_tcb *stcb,
3658    struct sctp_tmit_chunk *chk,
3659    struct sctp_stream_reset_tsn_request *req)
3660{
3661	/* reset all in and out and update the tsn */
3662	/*
3663	 * A) reset my str-seq's on in and out. B) Select a receive next,
3664	 * and set cum-ack to it. Also process this selected number as a
3665	 * fwd-tsn as well. C) set in the response my next sending seq.
3666	 */
3667	struct sctp_forward_tsn_chunk fwdtsn;
3668	struct sctp_association *asoc = &stcb->asoc;
3669	int abort_flag = 0;
3670	uint32_t seq;
3671
3672	seq = ntohl(req->request_seq);
3673	if (asoc->str_reset_seq_in == seq) {
3674		asoc->last_reset_action[1] = stcb->asoc.last_reset_action[0];
3675		if ((asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ) == 0) {
3676			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
3677		} else {
3678			fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
3679			fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
3680			fwdtsn.ch.chunk_flags = 0;
3681			fwdtsn.new_cumulative_tsn = htonl(stcb->asoc.highest_tsn_inside_map + 1);
3682			sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0);
3683			if (abort_flag) {
3684				return (1);
3685			}
3686			asoc->highest_tsn_inside_map += SCTP_STREAM_RESET_TSN_DELTA;
3687			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
3688				sctp_log_map(0, 10, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
3689			}
3690			asoc->tsn_last_delivered = asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
3691			asoc->mapping_array_base_tsn = asoc->highest_tsn_inside_map + 1;
3692			memset(asoc->mapping_array, 0, asoc->mapping_array_size);
3693			asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map;
3694			memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
3695			atomic_add_int(&asoc->sending_seq, 1);
3696			/* save off historical data for retrans */
3697			asoc->last_sending_seq[1] = asoc->last_sending_seq[0];
3698			asoc->last_sending_seq[0] = asoc->sending_seq;
3699			asoc->last_base_tsnsent[1] = asoc->last_base_tsnsent[0];
3700			asoc->last_base_tsnsent[0] = asoc->mapping_array_base_tsn;
3701			sctp_reset_out_streams(stcb, 0, (uint16_t *)NULL);
3702			sctp_reset_in_stream(stcb, 0, (uint16_t *)NULL);
3703			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
3704			sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_TSN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
3705		}
3706		sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0],
3707		    asoc->last_sending_seq[0], asoc->last_base_tsnsent[0]);
3708		asoc->str_reset_seq_in++;
3709	} else if (asoc->str_reset_seq_in - 1 == seq) {
3710		sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0],
3711		    asoc->last_sending_seq[0], asoc->last_base_tsnsent[0]);
3712	} else if (asoc->str_reset_seq_in - 2 == seq) {
3713		sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[1],
3714		    asoc->last_sending_seq[1], asoc->last_base_tsnsent[1]);
3715	} else {
3716		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
3717	}
3718	return (0);
3719}
3720
3721static void
3722sctp_handle_str_reset_request_out(struct sctp_tcb *stcb,
3723    struct sctp_tmit_chunk *chk,
3724    struct sctp_stream_reset_out_request *req, int trunc)
3725{
3726	uint32_t seq, tsn;
3727	int number_entries, len;
3728	struct sctp_association *asoc = &stcb->asoc;
3729
3730	seq = ntohl(req->request_seq);
3731
3732	/* now if its not a duplicate we process it */
3733	if (asoc->str_reset_seq_in == seq) {
3734		len = ntohs(req->ph.param_length);
3735		number_entries = ((len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t));
3736		/*
3737		 * the sender is resetting, handle the list issue.. we must
3738		 * a) verify if we can do the reset, if so no problem b) If
3739		 * we can't do the reset we must copy the request. c) queue
3740		 * it, and setup the data in processor to trigger it off
3741		 * when needed and dequeue all the queued data.
3742		 */
3743		tsn = ntohl(req->send_reset_at_tsn);
3744
3745		/* move the reset action back one */
3746		asoc->last_reset_action[1] = asoc->last_reset_action[0];
3747		if ((asoc->local_strreset_support & SCTP_ENABLE_RESET_STREAM_REQ) == 0) {
3748			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
3749		} else if (trunc) {
3750			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
3751		} else if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
3752			/* we can do it now */
3753			sctp_reset_in_stream(stcb, number_entries, req->list_of_streams);
3754			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
3755		} else {
3756			/*
3757			 * we must queue it up and thus wait for the TSN's
3758			 * to arrive that are at or before tsn
3759			 */
3760			struct sctp_stream_reset_list *liste;
3761			int siz;
3762
3763			siz = sizeof(struct sctp_stream_reset_list) + (number_entries * sizeof(uint16_t));
3764			SCTP_MALLOC(liste, struct sctp_stream_reset_list *,
3765			    siz, SCTP_M_STRESET);
3766			if (liste == NULL) {
3767				/* gak out of memory */
3768				asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
3769				sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3770				return;
3771			}
3772			liste->seq = seq;
3773			liste->tsn = tsn;
3774			liste->number_entries = number_entries;
3775			memcpy(&liste->list_of_streams, req->list_of_streams, number_entries * sizeof(uint16_t));
3776			TAILQ_INSERT_TAIL(&asoc->resetHead, liste, next_resp);
3777			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_IN_PROGRESS;
3778		}
3779		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3780		asoc->str_reset_seq_in++;
3781	} else if ((asoc->str_reset_seq_in - 1) == seq) {
3782		/*
3783		 * one seq back, just echo back last action since my
3784		 * response was lost.
3785		 */
3786		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3787	} else if ((asoc->str_reset_seq_in - 2) == seq) {
3788		/*
3789		 * two seq back, just echo back last action since my
3790		 * response was lost.
3791		 */
3792		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
3793	} else {
3794		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
3795	}
3796}
3797
3798static void
3799sctp_handle_str_reset_add_strm(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk,
3800    struct sctp_stream_reset_add_strm *str_add)
3801{
3802	/*
3803	 * Peer is requesting to add more streams. If its within our
3804	 * max-streams we will allow it.
3805	 */
3806	uint32_t num_stream, i;
3807	uint32_t seq;
3808	struct sctp_association *asoc = &stcb->asoc;
3809	struct sctp_queued_to_read *ctl, *nctl;
3810
3811	/* Get the number. */
3812	seq = ntohl(str_add->request_seq);
3813	num_stream = ntohs(str_add->number_of_streams);
3814	/* Now what would be the new total? */
3815	if (asoc->str_reset_seq_in == seq) {
3816		num_stream += stcb->asoc.streamincnt;
3817		stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
3818		if ((asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ) == 0) {
3819			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
3820		} else if ((num_stream > stcb->asoc.max_inbound_streams) ||
3821		    (num_stream > 0xffff)) {
3822			/* We must reject it they ask for to many */
3823	denied:
3824			stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
3825		} else {
3826			/* Ok, we can do that :-) */
3827			struct sctp_stream_in *oldstrm;
3828
3829			/* save off the old */
3830			oldstrm = stcb->asoc.strmin;
3831			SCTP_MALLOC(stcb->asoc.strmin, struct sctp_stream_in *,
3832			    (num_stream * sizeof(struct sctp_stream_in)),
3833			    SCTP_M_STRMI);
3834			if (stcb->asoc.strmin == NULL) {
3835				stcb->asoc.strmin = oldstrm;
3836				goto denied;
3837			}
3838			/* copy off the old data */
3839			for (i = 0; i < stcb->asoc.streamincnt; i++) {
3840				TAILQ_INIT(&stcb->asoc.strmin[i].inqueue);
3841				TAILQ_INIT(&stcb->asoc.strmin[i].uno_inqueue);
3842				stcb->asoc.strmin[i].sid = i;
3843				stcb->asoc.strmin[i].last_mid_delivered = oldstrm[i].last_mid_delivered;
3844				stcb->asoc.strmin[i].delivery_started = oldstrm[i].delivery_started;
3845				stcb->asoc.strmin[i].pd_api_started = oldstrm[i].pd_api_started;
3846				/* now anything on those queues? */
3847				TAILQ_FOREACH_SAFE(ctl, &oldstrm[i].inqueue, next_instrm, nctl) {
3848					TAILQ_REMOVE(&oldstrm[i].inqueue, ctl, next_instrm);
3849					TAILQ_INSERT_TAIL(&stcb->asoc.strmin[i].inqueue, ctl, next_instrm);
3850				}
3851				TAILQ_FOREACH_SAFE(ctl, &oldstrm[i].uno_inqueue, next_instrm, nctl) {
3852					TAILQ_REMOVE(&oldstrm[i].uno_inqueue, ctl, next_instrm);
3853					TAILQ_INSERT_TAIL(&stcb->asoc.strmin[i].uno_inqueue, ctl, next_instrm);
3854				}
3855			}
3856			/* Init the new streams */
3857			for (i = stcb->asoc.streamincnt; i < num_stream; i++) {
3858				TAILQ_INIT(&stcb->asoc.strmin[i].inqueue);
3859				TAILQ_INIT(&stcb->asoc.strmin[i].uno_inqueue);
3860				stcb->asoc.strmin[i].sid = i;
3861				stcb->asoc.strmin[i].last_mid_delivered = 0xffffffff;
3862				stcb->asoc.strmin[i].pd_api_started = 0;
3863				stcb->asoc.strmin[i].delivery_started = 0;
3864			}
3865			SCTP_FREE(oldstrm, SCTP_M_STRMI);
3866			/* update the size */
3867			stcb->asoc.streamincnt = num_stream;
3868			stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
3869			sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_ADD, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
3870		}
3871		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3872		asoc->str_reset_seq_in++;
3873	} else if ((asoc->str_reset_seq_in - 1) == seq) {
3874		/*
3875		 * one seq back, just echo back last action since my
3876		 * response was lost.
3877		 */
3878		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3879	} else if ((asoc->str_reset_seq_in - 2) == seq) {
3880		/*
3881		 * two seq back, just echo back last action since my
3882		 * response was lost.
3883		 */
3884		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
3885	} else {
3886		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
3887	}
3888}
3889
3890static void
3891sctp_handle_str_reset_add_out_strm(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk,
3892    struct sctp_stream_reset_add_strm *str_add)
3893{
3894	/*
3895	 * Peer is requesting to add more streams. If its within our
3896	 * max-streams we will allow it.
3897	 */
3898	uint16_t num_stream;
3899	uint32_t seq;
3900	struct sctp_association *asoc = &stcb->asoc;
3901
3902	/* Get the number. */
3903	seq = ntohl(str_add->request_seq);
3904	num_stream = ntohs(str_add->number_of_streams);
3905	/* Now what would be the new total? */
3906	if (asoc->str_reset_seq_in == seq) {
3907		stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
3908		if ((asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ) == 0) {
3909			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
3910		} else if (stcb->asoc.stream_reset_outstanding) {
3911			/* We must reject it we have something pending */
3912			stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_ERR_IN_PROGRESS;
3913		} else {
3914			/* Ok, we can do that :-) */
3915			int mychk;
3916
3917			mychk = stcb->asoc.streamoutcnt;
3918			mychk += num_stream;
3919			if (mychk < 0x10000) {
3920				stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
3921				if (sctp_send_str_reset_req(stcb, 0, NULL, 0, 0, 1, num_stream, 0, 1)) {
3922					stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
3923				}
3924			} else {
3925				stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
3926			}
3927		}
3928		sctp_add_stream_reset_result(chk, seq, stcb->asoc.last_reset_action[0]);
3929		asoc->str_reset_seq_in++;
3930	} else if ((asoc->str_reset_seq_in - 1) == seq) {
3931		/*
3932		 * one seq back, just echo back last action since my
3933		 * response was lost.
3934		 */
3935		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3936	} else if ((asoc->str_reset_seq_in - 2) == seq) {
3937		/*
3938		 * two seq back, just echo back last action since my
3939		 * response was lost.
3940		 */
3941		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
3942	} else {
3943		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
3944	}
3945}
3946
3947#ifdef __GNUC__
3948__attribute__((noinline))
3949#endif
3950static int
3951sctp_handle_stream_reset(struct sctp_tcb *stcb, struct mbuf *m, int offset,
3952    struct sctp_chunkhdr *ch_req)
3953{
3954	uint16_t remaining_length, param_len, ptype;
3955	struct sctp_paramhdr pstore;
3956	uint8_t cstore[SCTP_CHUNK_BUFFER_SIZE];
3957	uint32_t seq = 0;
3958	int num_req = 0;
3959	int trunc = 0;
3960	struct sctp_tmit_chunk *chk;
3961	struct sctp_chunkhdr *ch;
3962	struct sctp_paramhdr *ph;
3963	int ret_code = 0;
3964	int num_param = 0;
3965
3966	/* now it may be a reset or a reset-response */
3967	remaining_length = ntohs(ch_req->chunk_length) - sizeof(struct sctp_chunkhdr);
3968
3969	/* setup for adding the response */
3970	sctp_alloc_a_chunk(stcb, chk);
3971	if (chk == NULL) {
3972		return (ret_code);
3973	}
3974	chk->copy_by_ref = 0;
3975	chk->rec.chunk_id.id = SCTP_STREAM_RESET;
3976	chk->rec.chunk_id.can_take_data = 0;
3977	chk->flags = 0;
3978	chk->asoc = &stcb->asoc;
3979	chk->no_fr_allowed = 0;
3980	chk->book_size = chk->send_size = sizeof(struct sctp_chunkhdr);
3981	chk->book_size_scale = 0;
3982	chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3983	if (chk->data == NULL) {
3984strres_nochunk:
3985		if (chk->data) {
3986			sctp_m_freem(chk->data);
3987			chk->data = NULL;
3988		}
3989		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
3990		return (ret_code);
3991	}
3992	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
3993
3994	/* setup chunk parameters */
3995	chk->sent = SCTP_DATAGRAM_UNSENT;
3996	chk->snd_count = 0;
3997	chk->whoTo = NULL;
3998
3999	ch = mtod(chk->data, struct sctp_chunkhdr *);
4000	ch->chunk_type = SCTP_STREAM_RESET;
4001	ch->chunk_flags = 0;
4002	ch->chunk_length = htons(chk->send_size);
4003	SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
4004	offset += sizeof(struct sctp_chunkhdr);
4005	while (remaining_length >= sizeof(struct sctp_paramhdr)) {
4006		ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, sizeof(pstore), (uint8_t *)&pstore);
4007		if (ph == NULL) {
4008			/* TSNH */
4009			break;
4010		}
4011		param_len = ntohs(ph->param_length);
4012		if ((param_len > remaining_length) ||
4013		    (param_len < (sizeof(struct sctp_paramhdr) + sizeof(uint32_t)))) {
4014			/* bad parameter length */
4015			break;
4016		}
4017		ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, min(param_len, sizeof(cstore)),
4018		    (uint8_t *)&cstore);
4019		if (ph == NULL) {
4020			/* TSNH */
4021			break;
4022		}
4023		ptype = ntohs(ph->param_type);
4024		num_param++;
4025		if (param_len > sizeof(cstore)) {
4026			trunc = 1;
4027		} else {
4028			trunc = 0;
4029		}
4030		if (num_param > SCTP_MAX_RESET_PARAMS) {
4031			/* hit the max of parameters already sorry.. */
4032			break;
4033		}
4034		if (ptype == SCTP_STR_RESET_OUT_REQUEST) {
4035			struct sctp_stream_reset_out_request *req_out;
4036
4037			if (param_len < sizeof(struct sctp_stream_reset_out_request)) {
4038				break;
4039			}
4040			req_out = (struct sctp_stream_reset_out_request *)ph;
4041			num_req++;
4042			if (stcb->asoc.stream_reset_outstanding) {
4043				seq = ntohl(req_out->response_seq);
4044				if (seq == stcb->asoc.str_reset_seq_out) {
4045					/* implicit ack */
4046					(void)sctp_handle_stream_reset_response(stcb, seq, SCTP_STREAM_RESET_RESULT_PERFORMED, NULL);
4047				}
4048			}
4049			sctp_handle_str_reset_request_out(stcb, chk, req_out, trunc);
4050		} else if (ptype == SCTP_STR_RESET_ADD_OUT_STREAMS) {
4051			struct sctp_stream_reset_add_strm *str_add;
4052
4053			if (param_len < sizeof(struct sctp_stream_reset_add_strm)) {
4054				break;
4055			}
4056			str_add = (struct sctp_stream_reset_add_strm *)ph;
4057			num_req++;
4058			sctp_handle_str_reset_add_strm(stcb, chk, str_add);
4059		} else if (ptype == SCTP_STR_RESET_ADD_IN_STREAMS) {
4060			struct sctp_stream_reset_add_strm *str_add;
4061
4062			if (param_len < sizeof(struct sctp_stream_reset_add_strm)) {
4063				break;
4064			}
4065			str_add = (struct sctp_stream_reset_add_strm *)ph;
4066			num_req++;
4067			sctp_handle_str_reset_add_out_strm(stcb, chk, str_add);
4068		} else if (ptype == SCTP_STR_RESET_IN_REQUEST) {
4069			struct sctp_stream_reset_in_request *req_in;
4070
4071			num_req++;
4072			req_in = (struct sctp_stream_reset_in_request *)ph;
4073			sctp_handle_str_reset_request_in(stcb, chk, req_in, trunc);
4074		} else if (ptype == SCTP_STR_RESET_TSN_REQUEST) {
4075			struct sctp_stream_reset_tsn_request *req_tsn;
4076
4077			num_req++;
4078			req_tsn = (struct sctp_stream_reset_tsn_request *)ph;
4079			if (sctp_handle_str_reset_request_tsn(stcb, chk, req_tsn)) {
4080				ret_code = 1;
4081				goto strres_nochunk;
4082			}
4083			/* no more */
4084			break;
4085		} else if (ptype == SCTP_STR_RESET_RESPONSE) {
4086			struct sctp_stream_reset_response *resp;
4087			uint32_t result;
4088
4089			if (param_len < sizeof(struct sctp_stream_reset_response)) {
4090				break;
4091			}
4092			resp = (struct sctp_stream_reset_response *)ph;
4093			seq = ntohl(resp->response_seq);
4094			result = ntohl(resp->result);
4095			if (sctp_handle_stream_reset_response(stcb, seq, result, resp)) {
4096				ret_code = 1;
4097				goto strres_nochunk;
4098			}
4099		} else {
4100			break;
4101		}
4102		offset += SCTP_SIZE32(param_len);
4103		if (remaining_length >= SCTP_SIZE32(param_len)) {
4104			remaining_length -= SCTP_SIZE32(param_len);
4105		} else {
4106			remaining_length = 0;
4107		}
4108	}
4109	if (num_req == 0) {
4110		/* we have no response free the stuff */
4111		goto strres_nochunk;
4112	}
4113	/* ok we have a chunk to link in */
4114	TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue,
4115	    chk,
4116	    sctp_next);
4117	stcb->asoc.ctrl_queue_cnt++;
4118	return (ret_code);
4119}
4120
4121/*
4122 * Handle a router or endpoints report of a packet loss, there are two ways
4123 * to handle this, either we get the whole packet and must disect it
4124 * ourselves (possibly with truncation and or corruption) or it is a summary
4125 * from a middle box that did the disecting for us.
4126 */
4127static void
4128sctp_handle_packet_dropped(struct sctp_pktdrop_chunk *cp,
4129    struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t limit)
4130{
4131	struct sctp_chunk_desc desc;
4132	struct sctp_chunkhdr *chk_hdr;
4133	struct sctp_data_chunk *data_chunk;
4134	struct sctp_idata_chunk *idata_chunk;
4135	uint32_t bottle_bw, on_queue;
4136	uint32_t offset, chk_len;
4137	uint16_t pktdrp_len;
4138	uint8_t pktdrp_flags;
4139
4140	KASSERT(sizeof(struct sctp_pktdrop_chunk) <= limit,
4141	    ("PKTDROP chunk too small"));
4142	pktdrp_flags = cp->ch.chunk_flags;
4143	pktdrp_len = ntohs(cp->ch.chunk_length);
4144	KASSERT(limit <= pktdrp_len, ("Inconsistent limit"));
4145	if (pktdrp_flags & SCTP_PACKET_TRUNCATED) {
4146		if (ntohs(cp->trunc_len) <= pktdrp_len - sizeof(struct sctp_pktdrop_chunk)) {
4147			/* The peer plays games with us. */
4148			return;
4149		}
4150	}
4151	limit -= sizeof(struct sctp_pktdrop_chunk);
4152	offset = 0;
4153	if (offset == limit) {
4154		if (pktdrp_flags & SCTP_FROM_MIDDLE_BOX) {
4155			SCTP_STAT_INCR(sctps_pdrpbwrpt);
4156		}
4157	} else if (offset + sizeof(struct sctphdr) > limit) {
4158		/* Only a partial SCTP common header. */
4159		SCTP_STAT_INCR(sctps_pdrpcrupt);
4160		offset = limit;
4161	} else {
4162		/* XXX: Check embedded SCTP common header. */
4163		offset += sizeof(struct sctphdr);
4164	}
4165	/* Now parse through the chunks themselves. */
4166	while (offset < limit) {
4167		if (offset + sizeof(struct sctp_chunkhdr) > limit) {
4168			SCTP_STAT_INCR(sctps_pdrpcrupt);
4169			break;
4170		}
4171		chk_hdr = (struct sctp_chunkhdr *)(cp->data + offset);
4172		desc.chunk_type = chk_hdr->chunk_type;
4173		/* get amount we need to move */
4174		chk_len = (uint32_t)ntohs(chk_hdr->chunk_length);
4175		if (chk_len < sizeof(struct sctp_chunkhdr)) {
4176			/* Someone is lying... */
4177			break;
4178		}
4179		if (desc.chunk_type == SCTP_DATA) {
4180			if (stcb->asoc.idata_supported) {
4181				/* Some is playing games with us. */
4182				break;
4183			}
4184			if (chk_len <= sizeof(struct sctp_data_chunk)) {
4185				/* Some is playing games with us. */
4186				break;
4187			}
4188			if (chk_len < sizeof(struct sctp_data_chunk) + SCTP_NUM_DB_TO_VERIFY) {
4189				/*
4190				 * Not enough data bytes available in the
4191				 * chunk.
4192				 */
4193				SCTP_STAT_INCR(sctps_pdrpnedat);
4194				goto next_chunk;
4195			}
4196			if (offset + sizeof(struct sctp_data_chunk) + SCTP_NUM_DB_TO_VERIFY > limit) {
4197				/* Not enough data in buffer. */
4198				break;
4199			}
4200			data_chunk = (struct sctp_data_chunk *)(cp->data + offset);
4201			memcpy(desc.data_bytes, data_chunk + 1, SCTP_NUM_DB_TO_VERIFY);
4202			desc.tsn_ifany = data_chunk->dp.tsn;
4203			if (pktdrp_flags & SCTP_FROM_MIDDLE_BOX) {
4204				SCTP_STAT_INCR(sctps_pdrpmbda);
4205			}
4206		} else if (desc.chunk_type == SCTP_IDATA) {
4207			if (!stcb->asoc.idata_supported) {
4208				/* Some is playing games with us. */
4209				break;
4210			}
4211			if (chk_len <= sizeof(struct sctp_idata_chunk)) {
4212				/* Some is playing games with us. */
4213				break;
4214			}
4215			if (chk_len < sizeof(struct sctp_idata_chunk) + SCTP_NUM_DB_TO_VERIFY) {
4216				/*
4217				 * Not enough data bytes available in the
4218				 * chunk.
4219				 */
4220				SCTP_STAT_INCR(sctps_pdrpnedat);
4221				goto next_chunk;
4222			}
4223			if (offset + sizeof(struct sctp_idata_chunk) + SCTP_NUM_DB_TO_VERIFY > limit) {
4224				/* Not enough data in buffer. */
4225				break;
4226			}
4227			idata_chunk = (struct sctp_idata_chunk *)(cp->data + offset);
4228			memcpy(desc.data_bytes, idata_chunk + 1, SCTP_NUM_DB_TO_VERIFY);
4229			desc.tsn_ifany = idata_chunk->dp.tsn;
4230			if (pktdrp_flags & SCTP_FROM_MIDDLE_BOX) {
4231				SCTP_STAT_INCR(sctps_pdrpmbda);
4232			}
4233		} else {
4234			desc.tsn_ifany = htonl(0);
4235			memset(desc.data_bytes, 0, SCTP_NUM_DB_TO_VERIFY);
4236			if (pktdrp_flags & SCTP_FROM_MIDDLE_BOX) {
4237				SCTP_STAT_INCR(sctps_pdrpmbct);
4238			}
4239		}
4240		if (process_chunk_drop(stcb, &desc, net, pktdrp_flags)) {
4241			SCTP_STAT_INCR(sctps_pdrppdbrk);
4242			break;
4243		}
4244next_chunk:
4245		offset += SCTP_SIZE32(chk_len);
4246	}
4247	/* Now update any rwnd --- possibly */
4248	if ((pktdrp_flags & SCTP_FROM_MIDDLE_BOX) == 0) {
4249		/* From a peer, we get a rwnd report */
4250		uint32_t a_rwnd;
4251
4252		SCTP_STAT_INCR(sctps_pdrpfehos);
4253
4254		bottle_bw = ntohl(cp->bottle_bw);
4255		on_queue = ntohl(cp->current_onq);
4256		if (bottle_bw && on_queue) {
4257			/* a rwnd report is in here */
4258			if (bottle_bw > on_queue)
4259				a_rwnd = bottle_bw - on_queue;
4260			else
4261				a_rwnd = 0;
4262
4263			if (a_rwnd == 0)
4264				stcb->asoc.peers_rwnd = 0;
4265			else {
4266				if (a_rwnd > stcb->asoc.total_flight) {
4267					stcb->asoc.peers_rwnd =
4268					    a_rwnd - stcb->asoc.total_flight;
4269				} else {
4270					stcb->asoc.peers_rwnd = 0;
4271				}
4272				if (stcb->asoc.peers_rwnd <
4273				    stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4274					/* SWS sender side engages */
4275					stcb->asoc.peers_rwnd = 0;
4276				}
4277			}
4278		}
4279	} else {
4280		SCTP_STAT_INCR(sctps_pdrpfmbox);
4281	}
4282
4283	/* now middle boxes in sat networks get a cwnd bump */
4284	if ((pktdrp_flags & SCTP_FROM_MIDDLE_BOX) &&
4285	    (stcb->asoc.sat_t3_loss_recovery == 0) &&
4286	    (stcb->asoc.sat_network)) {
4287		/*
4288		 * This is debatable but for sat networks it makes sense
4289		 * Note if a T3 timer has went off, we will prohibit any
4290		 * changes to cwnd until we exit the t3 loss recovery.
4291		 */
4292		stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped(stcb,
4293		    net, cp, &bottle_bw, &on_queue);
4294	}
4295}
4296
4297/*
4298 * handles all control chunks in a packet inputs: - m: mbuf chain, assumed to
4299 * still contain IP/SCTP header - stcb: is the tcb found for this packet -
4300 * offset: offset into the mbuf chain to first chunkhdr - length: is the
4301 * length of the complete packet outputs: - length: modified to remaining
4302 * length after control processing - netp: modified to new sctp_nets after
4303 * cookie-echo processing - return NULL to discard the packet (ie. no asoc,
4304 * bad packet,...) otherwise return the tcb for this packet
4305 */
4306#ifdef __GNUC__
4307__attribute__((noinline))
4308#endif
4309static struct sctp_tcb *
4310sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length,
4311    struct sockaddr *src, struct sockaddr *dst,
4312    struct sctphdr *sh, struct sctp_chunkhdr *ch, struct sctp_inpcb *inp,
4313    struct sctp_tcb *stcb, struct sctp_nets **netp, int *fwd_tsn_seen,
4314    uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4315    uint32_t vrf_id, uint16_t port)
4316{
4317	struct sctp_association *asoc;
4318	struct mbuf *op_err;
4319	char msg[SCTP_DIAG_INFO_LEN];
4320	uint32_t vtag_in;
4321	int num_chunks = 0;	/* number of control chunks processed */
4322	uint32_t chk_length, contiguous;
4323	int ret;
4324	int abort_no_unlock = 0;
4325	int ecne_seen = 0;
4326	int abort_flag;
4327
4328	/*
4329	 * How big should this be, and should it be alloc'd? Lets try the
4330	 * d-mtu-ceiling for now (2k) and that should hopefully work ...
4331	 * until we get into jumbo grams and such..
4332	 */
4333	uint8_t chunk_buf[SCTP_CHUNK_BUFFER_SIZE];
4334	int got_auth = 0;
4335	uint32_t auth_offset = 0, auth_len = 0;
4336	int auth_skipped = 0;
4337	int asconf_cnt = 0;
4338
4339	SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_control: iphlen=%u, offset=%u, length=%u stcb:%p\n",
4340	    iphlen, *offset, length, (void *)stcb);
4341
4342	if (stcb) {
4343		SCTP_TCB_LOCK_ASSERT(stcb);
4344	}
4345	/* validate chunk header length... */
4346	if (ntohs(ch->chunk_length) < sizeof(*ch)) {
4347		SCTPDBG(SCTP_DEBUG_INPUT1, "Invalid header length %d\n",
4348		    ntohs(ch->chunk_length));
4349		*offset = length;
4350		return (stcb);
4351	}
4352	/*
4353	 * validate the verification tag
4354	 */
4355	vtag_in = ntohl(sh->v_tag);
4356
4357	if (ch->chunk_type == SCTP_INITIATION) {
4358		SCTPDBG(SCTP_DEBUG_INPUT1, "Its an INIT of len:%d vtag:%x\n",
4359		    ntohs(ch->chunk_length), vtag_in);
4360		if (vtag_in != 0) {
4361			/* protocol error- silently discard... */
4362			SCTP_STAT_INCR(sctps_badvtag);
4363			if (stcb != NULL) {
4364				SCTP_TCB_UNLOCK(stcb);
4365			}
4366			return (NULL);
4367		}
4368	} else if (ch->chunk_type != SCTP_COOKIE_ECHO) {
4369		/*
4370		 * If there is no stcb, skip the AUTH chunk and process
4371		 * later after a stcb is found (to validate the lookup was
4372		 * valid.
4373		 */
4374		if ((ch->chunk_type == SCTP_AUTHENTICATION) &&
4375		    (stcb == NULL) &&
4376		    (inp->auth_supported == 1)) {
4377			/* save this chunk for later processing */
4378			auth_skipped = 1;
4379			auth_offset = *offset;
4380			auth_len = ntohs(ch->chunk_length);
4381
4382			/* (temporarily) move past this chunk */
4383			*offset += SCTP_SIZE32(auth_len);
4384			if (*offset >= length) {
4385				/* no more data left in the mbuf chain */
4386				*offset = length;
4387				return (NULL);
4388			}
4389			ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4390			    sizeof(struct sctp_chunkhdr), chunk_buf);
4391		}
4392		if (ch == NULL) {
4393			/* Help */
4394			*offset = length;
4395			return (stcb);
4396		}
4397		if (ch->chunk_type == SCTP_COOKIE_ECHO) {
4398			goto process_control_chunks;
4399		}
4400		/*
4401		 * first check if it's an ASCONF with an unknown src addr we
4402		 * need to look inside to find the association
4403		 */
4404		if (ch->chunk_type == SCTP_ASCONF && stcb == NULL) {
4405			struct sctp_chunkhdr *asconf_ch = ch;
4406			uint32_t asconf_offset = 0, asconf_len = 0;
4407
4408			/* inp's refcount may be reduced */
4409			SCTP_INP_INCR_REF(inp);
4410
4411			asconf_offset = *offset;
4412			do {
4413				asconf_len = ntohs(asconf_ch->chunk_length);
4414				if (asconf_len < sizeof(struct sctp_asconf_paramhdr))
4415					break;
4416				stcb = sctp_findassociation_ep_asconf(m,
4417				    *offset,
4418				    dst,
4419				    sh, &inp, netp, vrf_id);
4420				if (stcb != NULL)
4421					break;
4422				asconf_offset += SCTP_SIZE32(asconf_len);
4423				asconf_ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, asconf_offset,
4424				    sizeof(struct sctp_chunkhdr), chunk_buf);
4425			} while (asconf_ch != NULL && asconf_ch->chunk_type == SCTP_ASCONF);
4426			if (stcb == NULL) {
4427				/*
4428				 * reduce inp's refcount if not reduced in
4429				 * sctp_findassociation_ep_asconf().
4430				 */
4431				SCTP_INP_DECR_REF(inp);
4432			}
4433
4434			/* now go back and verify any auth chunk to be sure */
4435			if (auth_skipped && (stcb != NULL)) {
4436				struct sctp_auth_chunk *auth;
4437
4438				if (auth_len <= SCTP_CHUNK_BUFFER_SIZE) {
4439					auth = (struct sctp_auth_chunk *)sctp_m_getptr(m, auth_offset, auth_len, chunk_buf);
4440					got_auth = 1;
4441					auth_skipped = 0;
4442				} else {
4443					auth = NULL;
4444				}
4445				if ((auth == NULL) || sctp_handle_auth(stcb, auth, m,
4446				    auth_offset)) {
4447					/* auth HMAC failed so dump it */
4448					*offset = length;
4449					return (stcb);
4450				} else {
4451					/* remaining chunks are HMAC checked */
4452					stcb->asoc.authenticated = 1;
4453				}
4454			}
4455		}
4456		if (stcb == NULL) {
4457			SCTP_SNPRINTF(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__);
4458			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
4459			    msg);
4460			/* no association, so it's out of the blue... */
4461			sctp_handle_ootb(m, iphlen, *offset, src, dst, sh, inp, op_err,
4462			    mflowtype, mflowid, inp->fibnum,
4463			    vrf_id, port);
4464			*offset = length;
4465			return (NULL);
4466		}
4467		asoc = &stcb->asoc;
4468		/* ABORT and SHUTDOWN can use either v_tag... */
4469		if ((ch->chunk_type == SCTP_ABORT_ASSOCIATION) ||
4470		    (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) ||
4471		    (ch->chunk_type == SCTP_PACKET_DROPPED)) {
4472			/* Take the T-bit always into account. */
4473			if ((((ch->chunk_flags & SCTP_HAD_NO_TCB) == 0) &&
4474			    (vtag_in == asoc->my_vtag)) ||
4475			    (((ch->chunk_flags & SCTP_HAD_NO_TCB) == SCTP_HAD_NO_TCB) &&
4476			    (asoc->peer_vtag != htonl(0)) &&
4477			    (vtag_in == asoc->peer_vtag))) {
4478				/* this is valid */
4479			} else {
4480				/* drop this packet... */
4481				SCTP_STAT_INCR(sctps_badvtag);
4482				if (stcb != NULL) {
4483					SCTP_TCB_UNLOCK(stcb);
4484				}
4485				return (NULL);
4486			}
4487		} else {
4488			/* for all other chunks, vtag must match */
4489			if (vtag_in != asoc->my_vtag) {
4490				/* invalid vtag... */
4491				SCTPDBG(SCTP_DEBUG_INPUT3,
4492				    "invalid vtag: %xh, expect %xh\n",
4493				    vtag_in, asoc->my_vtag);
4494				SCTP_STAT_INCR(sctps_badvtag);
4495				if (stcb != NULL) {
4496					SCTP_TCB_UNLOCK(stcb);
4497				}
4498				*offset = length;
4499				return (NULL);
4500			}
4501		}
4502	}			/* end if !SCTP_COOKIE_ECHO */
4503	/*
4504	 * process all control chunks...
4505	 */
4506	if (((ch->chunk_type == SCTP_SELECTIVE_ACK) ||
4507	    (ch->chunk_type == SCTP_NR_SELECTIVE_ACK) ||
4508	    (ch->chunk_type == SCTP_HEARTBEAT_REQUEST)) &&
4509	    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
4510		/* implied cookie-ack.. we must have lost the ack */
4511		sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb,
4512		    *netp);
4513	}
4514
4515process_control_chunks:
4516	while (IS_SCTP_CONTROL(ch)) {
4517		/* validate chunk length */
4518		chk_length = ntohs(ch->chunk_length);
4519		SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_process_control: processing a chunk type=%u, len=%u\n",
4520		    ch->chunk_type, chk_length);
4521		SCTP_LTRACE_CHK(inp, stcb, ch->chunk_type, chk_length);
4522		if (chk_length < sizeof(*ch) ||
4523		    (*offset + (int)chk_length) > length) {
4524			*offset = length;
4525			return (stcb);
4526		}
4527		SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks);
4528		/*
4529		 * INIT and INIT-ACK only gets the init ack "header" portion
4530		 * only because we don't have to process the peer's COOKIE.
4531		 * All others get a complete chunk.
4532		 */
4533		switch (ch->chunk_type) {
4534		case SCTP_INITIATION:
4535			contiguous = sizeof(struct sctp_init_chunk);
4536			break;
4537		case SCTP_INITIATION_ACK:
4538			contiguous = sizeof(struct sctp_init_ack_chunk);
4539			break;
4540		default:
4541			contiguous = min(chk_length, sizeof(chunk_buf));
4542			break;
4543		}
4544		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4545		    contiguous,
4546		    chunk_buf);
4547		if (ch == NULL) {
4548			*offset = length;
4549			return (stcb);
4550		}
4551
4552		num_chunks++;
4553		/* Save off the last place we got a control from */
4554		if (stcb != NULL) {
4555			if (((netp != NULL) && (*netp != NULL)) || (ch->chunk_type == SCTP_ASCONF)) {
4556				/*
4557				 * allow last_control to be NULL if
4558				 * ASCONF... ASCONF processing will find the
4559				 * right net later
4560				 */
4561				if ((netp != NULL) && (*netp != NULL))
4562					stcb->asoc.last_control_chunk_from = *netp;
4563			}
4564		}
4565#ifdef SCTP_AUDITING_ENABLED
4566		sctp_audit_log(0xB0, ch->chunk_type);
4567#endif
4568
4569		/* check to see if this chunk required auth, but isn't */
4570		if ((stcb != NULL) &&
4571		    sctp_auth_is_required_chunk(ch->chunk_type, stcb->asoc.local_auth_chunks) &&
4572		    !stcb->asoc.authenticated) {
4573			/* "silently" ignore */
4574			SCTP_STAT_INCR(sctps_recvauthmissing);
4575			goto next_chunk;
4576		}
4577		switch (ch->chunk_type) {
4578		case SCTP_INITIATION:
4579			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT\n");
4580			/* The INIT chunk must be the only chunk. */
4581			if ((num_chunks > 1) ||
4582			    (length - *offset > (int)SCTP_SIZE32(chk_length))) {
4583				/*
4584				 * RFC 4960bis requires stopping the
4585				 * processing of the packet.
4586				 */
4587				*offset = length;
4588				return (stcb);
4589			}
4590			/* Honor our resource limit. */
4591			if (chk_length > SCTP_LARGEST_INIT_ACCEPTED) {
4592				op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
4593				sctp_send_abort(m, iphlen, src, dst, sh, 0, op_err,
4594				    mflowtype, mflowid, inp->fibnum,
4595				    vrf_id, port);
4596				*offset = length;
4597				if (stcb != NULL) {
4598					SCTP_TCB_UNLOCK(stcb);
4599				}
4600				return (NULL);
4601			}
4602			sctp_handle_init(m, iphlen, *offset, src, dst, sh,
4603			    (struct sctp_init_chunk *)ch, inp,
4604			    stcb, *netp,
4605			    mflowtype, mflowid,
4606			    vrf_id, port);
4607			*offset = length;
4608			if (stcb != NULL) {
4609				SCTP_TCB_UNLOCK(stcb);
4610			}
4611			return (NULL);
4612			break;
4613		case SCTP_PAD_CHUNK:
4614			break;
4615		case SCTP_INITIATION_ACK:
4616			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT_ACK\n");
4617			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4618				/* We are not interested anymore */
4619				if ((stcb != NULL) && (stcb->asoc.total_output_queue_size)) {
4620					;
4621				} else {
4622					*offset = length;
4623					if (stcb != NULL) {
4624						(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4625						    SCTP_FROM_SCTP_INPUT + SCTP_LOC_29);
4626					}
4627					return (NULL);
4628				}
4629			}
4630			/* The INIT-ACK chunk must be the only chunk. */
4631			if ((num_chunks > 1) ||
4632			    (length - *offset > (int)SCTP_SIZE32(chk_length))) {
4633				*offset = length;
4634				return (stcb);
4635			}
4636			if ((netp != NULL) && (*netp != NULL)) {
4637				ret = sctp_handle_init_ack(m, iphlen, *offset,
4638				    src, dst, sh,
4639				    (struct sctp_init_ack_chunk *)ch,
4640				    stcb, *netp,
4641				    &abort_no_unlock,
4642				    mflowtype, mflowid,
4643				    vrf_id);
4644			} else {
4645				ret = -1;
4646			}
4647			*offset = length;
4648			if (abort_no_unlock) {
4649				return (NULL);
4650			}
4651			/*
4652			 * Special case, I must call the output routine to
4653			 * get the cookie echoed
4654			 */
4655			if ((stcb != NULL) && (ret == 0)) {
4656				sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
4657			}
4658			return (stcb);
4659			break;
4660		case SCTP_SELECTIVE_ACK:
4661		case SCTP_NR_SELECTIVE_ACK:
4662			{
4663				int abort_now = 0;
4664				uint32_t a_rwnd, cum_ack;
4665				uint16_t num_seg, num_nr_seg, num_dup;
4666				uint8_t flags;
4667				int offset_seg, offset_dup;
4668
4669				SCTPDBG(SCTP_DEBUG_INPUT3, "%s\n",
4670				    ch->chunk_type == SCTP_SELECTIVE_ACK ? "SCTP_SACK" : "SCTP_NR_SACK");
4671				SCTP_STAT_INCR(sctps_recvsacks);
4672				if (stcb == NULL) {
4673					SCTPDBG(SCTP_DEBUG_INDATA1, "No stcb when processing %s chunk\n",
4674					    (ch->chunk_type == SCTP_SELECTIVE_ACK) ? "SCTP_SACK" : "SCTP_NR_SACK");
4675					break;
4676				}
4677				if (ch->chunk_type == SCTP_SELECTIVE_ACK) {
4678					if (chk_length < sizeof(struct sctp_sack_chunk)) {
4679						SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on SACK chunk, too small\n");
4680						break;
4681					}
4682				} else {
4683					if (stcb->asoc.nrsack_supported == 0) {
4684						goto unknown_chunk;
4685					}
4686					if (chk_length < sizeof(struct sctp_nr_sack_chunk)) {
4687						SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on NR_SACK chunk, too small\n");
4688						break;
4689					}
4690				}
4691				if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
4692					/*-
4693					 * If we have sent a shutdown-ack, we will pay no
4694					 * attention to a sack sent in to us since
4695					 * we don't care anymore.
4696					 */
4697					break;
4698				}
4699				flags = ch->chunk_flags;
4700				if (ch->chunk_type == SCTP_SELECTIVE_ACK) {
4701					struct sctp_sack_chunk *sack;
4702
4703					sack = (struct sctp_sack_chunk *)ch;
4704					cum_ack = ntohl(sack->sack.cum_tsn_ack);
4705					num_seg = ntohs(sack->sack.num_gap_ack_blks);
4706					num_nr_seg = 0;
4707					num_dup = ntohs(sack->sack.num_dup_tsns);
4708					a_rwnd = ntohl(sack->sack.a_rwnd);
4709					if (sizeof(struct sctp_sack_chunk) +
4710					    num_seg * sizeof(struct sctp_gap_ack_block) +
4711					    num_dup * sizeof(uint32_t) != chk_length) {
4712						SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of SACK chunk\n");
4713						break;
4714					}
4715					offset_seg = *offset + sizeof(struct sctp_sack_chunk);
4716					offset_dup = offset_seg + num_seg * sizeof(struct sctp_gap_ack_block);
4717				} else {
4718					struct sctp_nr_sack_chunk *nr_sack;
4719
4720					nr_sack = (struct sctp_nr_sack_chunk *)ch;
4721					cum_ack = ntohl(nr_sack->nr_sack.cum_tsn_ack);
4722					num_seg = ntohs(nr_sack->nr_sack.num_gap_ack_blks);
4723					num_nr_seg = ntohs(nr_sack->nr_sack.num_nr_gap_ack_blks);
4724					num_dup = ntohs(nr_sack->nr_sack.num_dup_tsns);
4725					a_rwnd = ntohl(nr_sack->nr_sack.a_rwnd);
4726					if (sizeof(struct sctp_nr_sack_chunk) +
4727					    (num_seg + num_nr_seg) * sizeof(struct sctp_gap_ack_block) +
4728					    num_dup * sizeof(uint32_t) != chk_length) {
4729						SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of NR_SACK chunk\n");
4730						break;
4731					}
4732					offset_seg = *offset + sizeof(struct sctp_nr_sack_chunk);
4733					offset_dup = offset_seg + (num_seg + num_nr_seg) * sizeof(struct sctp_gap_ack_block);
4734				}
4735				SCTPDBG(SCTP_DEBUG_INPUT3, "%s process cum_ack:%x num_seg:%d a_rwnd:%d\n",
4736				    (ch->chunk_type == SCTP_SELECTIVE_ACK) ? "SCTP_SACK" : "SCTP_NR_SACK",
4737				    cum_ack, num_seg, a_rwnd);
4738				stcb->asoc.seen_a_sack_this_pkt = 1;
4739				if ((stcb->asoc.pr_sctp_cnt == 0) &&
4740				    (num_seg == 0) && (num_nr_seg == 0) &&
4741				    SCTP_TSN_GE(cum_ack, stcb->asoc.last_acked_seq) &&
4742				    (stcb->asoc.saw_sack_with_frags == 0) &&
4743				    (stcb->asoc.saw_sack_with_nr_frags == 0) &&
4744				    (!TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
4745					/*
4746					 * We have a SIMPLE sack having no
4747					 * prior segments and data on sent
4748					 * queue to be acked. Use the faster
4749					 * path sack processing. We also
4750					 * allow window update sacks with no
4751					 * missing segments to go this way
4752					 * too.
4753					 */
4754					sctp_express_handle_sack(stcb, cum_ack, a_rwnd,
4755					    &abort_now, ecne_seen);
4756				} else {
4757					if ((netp != NULL) && (*netp != NULL)) {
4758						sctp_handle_sack(m, offset_seg, offset_dup, stcb,
4759						    num_seg, num_nr_seg, num_dup, &abort_now, flags,
4760						    cum_ack, a_rwnd, ecne_seen);
4761					}
4762				}
4763				if (abort_now) {
4764					/* ABORT signal from sack processing */
4765					*offset = length;
4766					return (NULL);
4767				}
4768				if (TAILQ_EMPTY(&stcb->asoc.send_queue) &&
4769				    TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
4770				    (stcb->asoc.stream_queue_cnt == 0)) {
4771					sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
4772				}
4773				break;
4774			}
4775		case SCTP_HEARTBEAT_REQUEST:
4776			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT\n");
4777			if ((stcb != NULL) && (netp != NULL) && (*netp != NULL)) {
4778				SCTP_STAT_INCR(sctps_recvheartbeat);
4779				sctp_send_heartbeat_ack(stcb, m, *offset,
4780				    chk_length, *netp);
4781			}
4782			break;
4783		case SCTP_HEARTBEAT_ACK:
4784			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT_ACK\n");
4785			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_heartbeat_chunk))) {
4786				/* Its not ours */
4787				break;
4788			}
4789			SCTP_STAT_INCR(sctps_recvheartbeatack);
4790			if ((netp != NULL) && (*netp != NULL)) {
4791				sctp_handle_heartbeat_ack((struct sctp_heartbeat_chunk *)ch,
4792				    stcb, *netp);
4793			}
4794			break;
4795		case SCTP_ABORT_ASSOCIATION:
4796			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ABORT, stcb %p\n",
4797			    (void *)stcb);
4798			*offset = length;
4799			if ((stcb != NULL) && (netp != NULL) && (*netp != NULL)) {
4800				if (sctp_handle_abort((struct sctp_abort_chunk *)ch, stcb, *netp)) {
4801					return (NULL);
4802				} else {
4803					return (stcb);
4804				}
4805			} else {
4806				return (NULL);
4807			}
4808			break;
4809		case SCTP_SHUTDOWN:
4810			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN, stcb %p\n",
4811			    (void *)stcb);
4812			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_shutdown_chunk))) {
4813				break;
4814			}
4815			if ((netp != NULL) && (*netp != NULL)) {
4816				abort_flag = 0;
4817				sctp_handle_shutdown((struct sctp_shutdown_chunk *)ch,
4818				    stcb, *netp, &abort_flag);
4819				if (abort_flag) {
4820					*offset = length;
4821					return (NULL);
4822				}
4823			}
4824			break;
4825		case SCTP_SHUTDOWN_ACK:
4826			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN_ACK, stcb %p\n", (void *)stcb);
4827			if ((chk_length == sizeof(struct sctp_shutdown_ack_chunk)) &&
4828			    (stcb != NULL) && (netp != NULL) && (*netp != NULL)) {
4829				sctp_handle_shutdown_ack((struct sctp_shutdown_ack_chunk *)ch, stcb, *netp);
4830				*offset = length;
4831				return (NULL);
4832			}
4833			break;
4834		case SCTP_OPERATION_ERROR:
4835			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_OP_ERR\n");
4836			if ((stcb != NULL) && (netp != NULL) && (*netp != NULL) &&
4837			    sctp_handle_error(ch, stcb, *netp, contiguous) < 0) {
4838				*offset = length;
4839				return (NULL);
4840			}
4841			break;
4842		case SCTP_COOKIE_ECHO:
4843			SCTPDBG(SCTP_DEBUG_INPUT3,
4844			    "SCTP_COOKIE_ECHO, stcb %p\n", (void *)stcb);
4845			if ((stcb != NULL) && (stcb->asoc.total_output_queue_size > 0)) {
4846				;
4847			} else {
4848				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4849					/* We are not interested anymore */
4850			abend:
4851					if (stcb != NULL) {
4852						SCTP_TCB_UNLOCK(stcb);
4853					}
4854					*offset = length;
4855					return (NULL);
4856				}
4857			}
4858			/*-
4859			 * First are we accepting? We do this again here
4860			 * since it is possible that a previous endpoint WAS
4861			 * listening responded to a INIT-ACK and then
4862			 * closed. We opened and bound.. and are now no
4863			 * longer listening.
4864			 *
4865			 * XXXGL: notes on checking listen queue length.
4866			 * 1) SCTP_IS_LISTENING() doesn't necessarily mean
4867			 *    SOLISTENING(), because a listening "UDP type"
4868			 *    socket isn't listening in terms of the socket
4869			 *    layer.  It is a normal data flow socket, that
4870			 *    can fork off new connections.  Thus, we should
4871			 *    look into sol_qlen only in case we are !UDP.
4872			 * 2) Checking sol_qlen in general requires locking
4873			 *    the socket, and this code lacks that.
4874			 */
4875			if ((stcb == NULL) &&
4876			    (!SCTP_IS_LISTENING(inp) ||
4877			    (((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) == 0) &&
4878			    inp->sctp_socket->sol_qlen >= inp->sctp_socket->sol_qlimit))) {
4879				if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
4880				    (SCTP_BASE_SYSCTL(sctp_abort_if_one_2_one_hits_limit))) {
4881					op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
4882					sctp_abort_association(inp, stcb, m, iphlen,
4883					    src, dst, sh, op_err,
4884					    mflowtype, mflowid,
4885					    vrf_id, port);
4886				}
4887				*offset = length;
4888				return (NULL);
4889			} else {
4890				struct mbuf *ret_buf;
4891				struct sctp_inpcb *linp;
4892				struct sctp_tmit_chunk *chk;
4893
4894				if (inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE |
4895				    SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
4896					goto abend;
4897				}
4898
4899				if (stcb) {
4900					linp = NULL;
4901				} else {
4902					linp = inp;
4903				}
4904
4905				if (linp != NULL) {
4906					SCTP_ASOC_CREATE_LOCK(linp);
4907				}
4908
4909				if (netp != NULL) {
4910					struct sctp_tcb *locked_stcb;
4911
4912					locked_stcb = stcb;
4913					ret_buf =
4914					    sctp_handle_cookie_echo(m, iphlen,
4915					    *offset,
4916					    src, dst,
4917					    sh,
4918					    (struct sctp_cookie_echo_chunk *)ch,
4919					    &inp, &stcb, netp,
4920					    auth_skipped,
4921					    auth_offset,
4922					    auth_len,
4923					    &locked_stcb,
4924					    mflowtype,
4925					    mflowid,
4926					    vrf_id,
4927					    port);
4928					if ((locked_stcb != NULL) && (locked_stcb != stcb)) {
4929						SCTP_TCB_UNLOCK(locked_stcb);
4930					}
4931					if (stcb != NULL) {
4932						SCTP_TCB_LOCK_ASSERT(stcb);
4933					}
4934				} else {
4935					ret_buf = NULL;
4936				}
4937				if (linp != NULL) {
4938					SCTP_ASOC_CREATE_UNLOCK(linp);
4939				}
4940				if (ret_buf == NULL) {
4941					if (stcb != NULL) {
4942						SCTP_TCB_UNLOCK(stcb);
4943					}
4944					SCTPDBG(SCTP_DEBUG_INPUT3,
4945					    "GAK, null buffer\n");
4946					*offset = length;
4947					return (NULL);
4948				}
4949				/* if AUTH skipped, see if it verified... */
4950				if (auth_skipped) {
4951					got_auth = 1;
4952					auth_skipped = 0;
4953				}
4954				/* Restart the timer if we have pending data */
4955				TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
4956					if (chk->whoTo != NULL) {
4957						break;
4958					}
4959				}
4960				if (chk != NULL) {
4961					sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo);
4962				}
4963			}
4964			break;
4965		case SCTP_COOKIE_ACK:
4966			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_COOKIE_ACK, stcb %p\n", (void *)stcb);
4967			if ((stcb == NULL) || chk_length != sizeof(struct sctp_cookie_ack_chunk)) {
4968				break;
4969			}
4970			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4971				/* We are not interested anymore */
4972				if ((stcb) && (stcb->asoc.total_output_queue_size)) {
4973					;
4974				} else if (stcb) {
4975					(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4976					    SCTP_FROM_SCTP_INPUT + SCTP_LOC_30);
4977					*offset = length;
4978					return (NULL);
4979				}
4980			}
4981			if ((netp != NULL) && (*netp != NULL)) {
4982				sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, *netp);
4983			}
4984			break;
4985		case SCTP_ECN_ECHO:
4986			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN_ECHO\n");
4987			if (stcb == NULL) {
4988				break;
4989			}
4990			if (stcb->asoc.ecn_supported == 0) {
4991				goto unknown_chunk;
4992			}
4993			if ((chk_length != sizeof(struct sctp_ecne_chunk)) &&
4994			    (chk_length != sizeof(struct old_sctp_ecne_chunk))) {
4995				break;
4996			}
4997			sctp_handle_ecn_echo((struct sctp_ecne_chunk *)ch, stcb);
4998			ecne_seen = 1;
4999			break;
5000		case SCTP_ECN_CWR:
5001			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN_CWR\n");
5002			if (stcb == NULL) {
5003				break;
5004			}
5005			if (stcb->asoc.ecn_supported == 0) {
5006				goto unknown_chunk;
5007			}
5008			if (chk_length != sizeof(struct sctp_cwr_chunk)) {
5009				break;
5010			}
5011			sctp_handle_ecn_cwr((struct sctp_cwr_chunk *)ch, stcb, *netp);
5012			break;
5013		case SCTP_SHUTDOWN_COMPLETE:
5014			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN_COMPLETE, stcb %p\n", (void *)stcb);
5015			/* must be first and only chunk */
5016			if ((num_chunks > 1) ||
5017			    (length - *offset > (int)SCTP_SIZE32(chk_length))) {
5018				*offset = length;
5019				return (stcb);
5020			}
5021			if ((chk_length == sizeof(struct sctp_shutdown_complete_chunk)) &&
5022			    (stcb != NULL) && (netp != NULL) && (*netp != NULL)) {
5023				sctp_handle_shutdown_complete((struct sctp_shutdown_complete_chunk *)ch,
5024				    stcb, *netp);
5025				*offset = length;
5026				return (NULL);
5027			}
5028			break;
5029		case SCTP_ASCONF:
5030			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF\n");
5031			if (stcb != NULL) {
5032				if (stcb->asoc.asconf_supported == 0) {
5033					goto unknown_chunk;
5034				}
5035				sctp_handle_asconf(m, *offset, src,
5036				    (struct sctp_asconf_chunk *)ch, stcb, asconf_cnt == 0);
5037				asconf_cnt++;
5038			}
5039			break;
5040		case SCTP_ASCONF_ACK:
5041			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF_ACK\n");
5042			if (stcb == NULL) {
5043				break;
5044			}
5045			if (stcb->asoc.asconf_supported == 0) {
5046				goto unknown_chunk;
5047			}
5048			if (chk_length < sizeof(struct sctp_asconf_ack_chunk)) {
5049				break;
5050			}
5051			if ((netp != NULL) && (*netp != NULL)) {
5052				/* He's alive so give him credit */
5053				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5054					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5055					    stcb->asoc.overall_error_count,
5056					    0,
5057					    SCTP_FROM_SCTP_INPUT,
5058					    __LINE__);
5059				}
5060				stcb->asoc.overall_error_count = 0;
5061				sctp_handle_asconf_ack(m, *offset,
5062				    (struct sctp_asconf_ack_chunk *)ch, stcb, *netp, &abort_no_unlock);
5063				if (abort_no_unlock)
5064					return (NULL);
5065			}
5066			break;
5067		case SCTP_FORWARD_CUM_TSN:
5068		case SCTP_IFORWARD_CUM_TSN:
5069			SCTPDBG(SCTP_DEBUG_INPUT3, "%s\n",
5070			    ch->chunk_type == SCTP_FORWARD_CUM_TSN ? "FORWARD_TSN" : "I_FORWARD_TSN");
5071			if (stcb == NULL) {
5072				break;
5073			}
5074			if (stcb->asoc.prsctp_supported == 0) {
5075				goto unknown_chunk;
5076			}
5077			if (chk_length < sizeof(struct sctp_forward_tsn_chunk)) {
5078				break;
5079			}
5080			if (((stcb->asoc.idata_supported == 1) && (ch->chunk_type == SCTP_FORWARD_CUM_TSN)) ||
5081			    ((stcb->asoc.idata_supported == 0) && (ch->chunk_type == SCTP_IFORWARD_CUM_TSN))) {
5082				if (ch->chunk_type == SCTP_FORWARD_CUM_TSN) {
5083					SCTP_SNPRINTF(msg, sizeof(msg), "%s", "FORWARD-TSN chunk received when I-FORWARD-TSN was negotiated");
5084				} else {
5085					SCTP_SNPRINTF(msg, sizeof(msg), "%s", "I-FORWARD-TSN chunk received when FORWARD-TSN was negotiated");
5086				}
5087				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5088				sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
5089				*offset = length;
5090				return (NULL);
5091			}
5092			*fwd_tsn_seen = 1;
5093			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5094				/* We are not interested anymore */
5095				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
5096				    SCTP_FROM_SCTP_INPUT + SCTP_LOC_31);
5097				*offset = length;
5098				return (NULL);
5099			}
5100			/*
5101			 * For sending a SACK this looks like DATA chunks.
5102			 */
5103			stcb->asoc.last_data_chunk_from = stcb->asoc.last_control_chunk_from;
5104			abort_flag = 0;
5105			sctp_handle_forward_tsn(stcb,
5106			    (struct sctp_forward_tsn_chunk *)ch, &abort_flag, m, *offset);
5107			if (abort_flag) {
5108				*offset = length;
5109				return (NULL);
5110			}
5111			break;
5112		case SCTP_STREAM_RESET:
5113			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_STREAM_RESET\n");
5114			if (stcb == NULL) {
5115				break;
5116			}
5117			if (stcb->asoc.reconfig_supported == 0) {
5118				goto unknown_chunk;
5119			}
5120			if (chk_length < sizeof(struct sctp_stream_reset_tsn_req)) {
5121				break;
5122			}
5123			if (sctp_handle_stream_reset(stcb, m, *offset, ch)) {
5124				/* stop processing */
5125				*offset = length;
5126				return (NULL);
5127			}
5128			break;
5129		case SCTP_PACKET_DROPPED:
5130			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_PACKET_DROPPED\n");
5131			if (stcb == NULL) {
5132				break;
5133			}
5134			if (stcb->asoc.pktdrop_supported == 0) {
5135				goto unknown_chunk;
5136			}
5137			if (chk_length < sizeof(struct sctp_pktdrop_chunk)) {
5138				break;
5139			}
5140			if ((netp != NULL) && (*netp != NULL)) {
5141				sctp_handle_packet_dropped((struct sctp_pktdrop_chunk *)ch,
5142				    stcb, *netp,
5143				    min(chk_length, contiguous));
5144			}
5145			break;
5146		case SCTP_AUTHENTICATION:
5147			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_AUTHENTICATION\n");
5148			if (stcb == NULL) {
5149				/* save the first AUTH for later processing */
5150				if (auth_skipped == 0) {
5151					auth_offset = *offset;
5152					auth_len = chk_length;
5153					auth_skipped = 1;
5154				}
5155				/* skip this chunk (temporarily) */
5156				break;
5157			}
5158			if (stcb->asoc.auth_supported == 0) {
5159				goto unknown_chunk;
5160			}
5161			if ((chk_length < (sizeof(struct sctp_auth_chunk))) ||
5162			    (chk_length > (sizeof(struct sctp_auth_chunk) +
5163			    SCTP_AUTH_DIGEST_LEN_MAX))) {
5164				/* Its not ours */
5165				*offset = length;
5166				return (stcb);
5167			}
5168			if (got_auth == 1) {
5169				/* skip this chunk... it's already auth'd */
5170				break;
5171			}
5172			got_auth = 1;
5173			if (sctp_handle_auth(stcb, (struct sctp_auth_chunk *)ch, m, *offset)) {
5174				/* auth HMAC failed so dump the packet */
5175				*offset = length;
5176				return (stcb);
5177			} else {
5178				/* remaining chunks are HMAC checked */
5179				stcb->asoc.authenticated = 1;
5180			}
5181			break;
5182
5183		default:
5184	unknown_chunk:
5185			/* it's an unknown chunk! */
5186			if ((ch->chunk_type & 0x40) &&
5187			    (stcb != NULL) &&
5188			    (SCTP_GET_STATE(stcb) != SCTP_STATE_EMPTY) &&
5189			    (SCTP_GET_STATE(stcb) != SCTP_STATE_INUSE) &&
5190			    (SCTP_GET_STATE(stcb) != SCTP_STATE_COOKIE_WAIT)) {
5191				struct sctp_gen_error_cause *cause;
5192				int len;
5193
5194				op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
5195				    0, M_NOWAIT, 1, MT_DATA);
5196				if (op_err != NULL) {
5197					len = min(SCTP_SIZE32(chk_length), (uint32_t)(length - *offset));
5198					cause = mtod(op_err, struct sctp_gen_error_cause *);
5199					cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
5200					cause->length = htons((uint16_t)(len + sizeof(struct sctp_gen_error_cause)));
5201					SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
5202					SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, len, M_NOWAIT);
5203					if (SCTP_BUF_NEXT(op_err) != NULL) {
5204#ifdef SCTP_MBUF_LOGGING
5205						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
5206							sctp_log_mbc(SCTP_BUF_NEXT(op_err), SCTP_MBUF_ICOPY);
5207						}
5208#endif
5209						sctp_queue_op_err(stcb, op_err);
5210					} else {
5211						sctp_m_freem(op_err);
5212					}
5213				}
5214			}
5215			if ((ch->chunk_type & 0x80) == 0) {
5216				/* discard this packet */
5217				*offset = length;
5218				return (stcb);
5219			}	/* else skip this bad chunk and continue... */
5220			break;
5221		}		/* switch (ch->chunk_type) */
5222
5223next_chunk:
5224		/* get the next chunk */
5225		*offset += SCTP_SIZE32(chk_length);
5226		if (*offset >= length) {
5227			/* no more data left in the mbuf chain */
5228			break;
5229		}
5230		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
5231		    sizeof(struct sctp_chunkhdr), chunk_buf);
5232		if (ch == NULL) {
5233			*offset = length;
5234			return (stcb);
5235		}
5236	}			/* while */
5237
5238	if ((asconf_cnt > 0) && (stcb != NULL)) {
5239		sctp_send_asconf_ack(stcb);
5240	}
5241	return (stcb);
5242}
5243
5244/*
5245 * common input chunk processing (v4 and v6)
5246 */
5247void
5248sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset, int length,
5249    struct sockaddr *src, struct sockaddr *dst,
5250    struct sctphdr *sh, struct sctp_chunkhdr *ch,
5251    uint8_t compute_crc,
5252    uint8_t ecn_bits,
5253    uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
5254    uint32_t vrf_id, uint16_t port)
5255{
5256	char msg[SCTP_DIAG_INFO_LEN];
5257	struct mbuf *m = *mm, *op_err;
5258	struct sctp_inpcb *inp = NULL, *inp_decr = NULL;
5259	struct sctp_tcb *stcb = NULL;
5260	struct sctp_nets *net = NULL;
5261	uint32_t high_tsn;
5262	uint32_t cksum_in_hdr;
5263	int un_sent;
5264	int cnt_ctrl_ready = 0;
5265	int fwd_tsn_seen = 0, data_processed = 0;
5266	bool cksum_validated, stcb_looked_up;
5267
5268	SCTP_STAT_INCR(sctps_recvdatagrams);
5269#ifdef SCTP_AUDITING_ENABLED
5270	sctp_audit_log(0xE0, 1);
5271	sctp_auditing(0, inp, stcb, net);
5272#endif
5273
5274	stcb_looked_up = false;
5275	if (compute_crc != 0) {
5276		cksum_validated = false;
5277		cksum_in_hdr = sh->checksum;
5278		if (cksum_in_hdr != htonl(0)) {
5279			uint32_t cksum_calculated;
5280
5281	validate_cksum:
5282			sh->checksum = 0;
5283			cksum_calculated = sctp_calculate_cksum(m, iphlen);
5284			sh->checksum = cksum_in_hdr;
5285			if (cksum_calculated != cksum_in_hdr) {
5286				if (stcb_looked_up) {
5287					/*
5288					 * The packet has a zero checksum,
5289					 * which is not the correct CRC, no
5290					 * stcb has been found or an stcb
5291					 * has been found but an incorrect
5292					 * zero checksum is not acceptable.
5293					 */
5294					KASSERT(cksum_in_hdr == htonl(0),
5295					    ("cksum in header not zero: %x",
5296					    ntohl(cksum_in_hdr)));
5297					if ((inp == NULL) &&
5298					    (SCTP_BASE_SYSCTL(sctp_ootb_with_zero_cksum) == 1)) {
5299						/*
5300						 * This is an OOTB packet,
5301						 * depending on the sysctl
5302						 * variable, pretend that
5303						 * the checksum is
5304						 * acceptable, to allow an
5305						 * appropriate response
5306						 * (ABORT, for examlpe) to
5307						 * be sent.
5308						 */
5309						KASSERT(stcb == NULL,
5310						    ("stcb is %p", stcb));
5311						SCTP_STAT_INCR(sctps_recvzerocrc);
5312						goto cksum_validated;
5313					}
5314				} else {
5315					stcb = sctp_findassociation_addr(m, offset, src, dst,
5316					    sh, ch, &inp, &net, vrf_id);
5317				}
5318				SCTPDBG(SCTP_DEBUG_INPUT1, "Bad cksum in SCTP packet:%x calculated:%x m:%p mlen:%d iphlen:%d\n",
5319				    ntohl(cksum_in_hdr), ntohl(cksum_calculated), (void *)m, length, iphlen);
5320#if defined(INET) || defined(INET6)
5321				if ((ch->chunk_type != SCTP_INITIATION) &&
5322				    (net != NULL) && (net->port != port)) {
5323					if (net->port == 0) {
5324						/*
5325						 * UDP encapsulation turned
5326						 * on.
5327						 */
5328						net->mtu -= sizeof(struct udphdr);
5329						if (stcb->asoc.smallest_mtu > net->mtu) {
5330							sctp_pathmtu_adjustment(stcb, net->mtu, true);
5331						}
5332					} else if (port == 0) {
5333						/*
5334						 * UDP encapsulation turned
5335						 * off.
5336						 */
5337						net->mtu += sizeof(struct udphdr);
5338						/* XXX Update smallest_mtu */
5339					}
5340					net->port = port;
5341				}
5342#endif
5343				if (net != NULL) {
5344					net->flowtype = mflowtype;
5345					net->flowid = mflowid;
5346				}
5347				SCTP_PROBE5(receive, NULL, stcb, m, stcb, sh);
5348				if ((inp != NULL) && (stcb != NULL)) {
5349					if (stcb->asoc.pktdrop_supported) {
5350						sctp_send_packet_dropped(stcb, net, m, length, iphlen, 1);
5351						sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_INPUT_ERROR, SCTP_SO_NOT_LOCKED);
5352					}
5353				} else if ((inp != NULL) && (stcb == NULL)) {
5354					inp_decr = inp;
5355				}
5356				SCTP_STAT_INCR(sctps_badsum);
5357				SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors);
5358				goto out;
5359			} else {
5360				cksum_validated = true;
5361			}
5362		}
5363		KASSERT(cksum_validated || cksum_in_hdr == htonl(0),
5364		    ("cksum 0x%08x not zero and not validated", ntohl(cksum_in_hdr)));
5365		if (!cksum_validated) {
5366			stcb = sctp_findassociation_addr(m, offset, src, dst,
5367			    sh, ch, &inp, &net, vrf_id);
5368			stcb_looked_up = true;
5369			if (stcb == NULL) {
5370				goto validate_cksum;
5371			}
5372			if (stcb->asoc.rcv_edmid == SCTP_EDMID_NONE) {
5373				goto validate_cksum;
5374			}
5375			KASSERT(stcb->asoc.rcv_edmid == SCTP_EDMID_LOWER_LAYER_DTLS,
5376			    ("Unexpected EDMID %u", stcb->asoc.rcv_edmid));
5377			SCTP_STAT_INCR(sctps_recvzerocrc);
5378		}
5379	}
5380cksum_validated:
5381	/* Destination port of 0 is illegal, based on RFC4960. */
5382	if (sh->dest_port == htons(0)) {
5383		SCTP_STAT_INCR(sctps_hdrops);
5384		if ((stcb == NULL) && (inp != NULL)) {
5385			inp_decr = inp;
5386		}
5387		goto out;
5388	}
5389	if (!stcb_looked_up) {
5390		stcb = sctp_findassociation_addr(m, offset, src, dst,
5391		    sh, ch, &inp, &net, vrf_id);
5392	}
5393#if defined(INET) || defined(INET6)
5394	if ((ch->chunk_type != SCTP_INITIATION) &&
5395	    (net != NULL) && (net->port != port)) {
5396		if (net->port == 0) {
5397			/* UDP encapsulation turned on. */
5398			net->mtu -= sizeof(struct udphdr);
5399			if (stcb->asoc.smallest_mtu > net->mtu) {
5400				sctp_pathmtu_adjustment(stcb, net->mtu, true);
5401			}
5402		} else if (port == 0) {
5403			/* UDP encapsulation turned off. */
5404			net->mtu += sizeof(struct udphdr);
5405			/* XXX Update smallest_mtu */
5406		}
5407		net->port = port;
5408	}
5409#endif
5410	if (net != NULL) {
5411		net->flowtype = mflowtype;
5412		net->flowid = mflowid;
5413	}
5414	if (inp == NULL) {
5415		SCTP_STAT_INCR(sctps_noport);
5416		SCTP_PROBE5(receive, NULL, stcb, m, stcb, sh);
5417		if (badport_bandlim(BANDLIM_SCTP_OOTB) < 0) {
5418			goto out;
5419		}
5420		if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
5421			SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks);
5422			sctp_send_shutdown_complete2(src, dst, sh,
5423			    mflowtype, mflowid, fibnum,
5424			    vrf_id, port);
5425			goto out;
5426		}
5427		if (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) {
5428			SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks);
5429			goto out;
5430		}
5431		if (ch->chunk_type != SCTP_ABORT_ASSOCIATION) {
5432			if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
5433			    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
5434			    (ch->chunk_type != SCTP_INIT))) {
5435				op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5436				    "Out of the blue");
5437				sctp_send_abort(m, iphlen, src, dst,
5438				    sh, 0, op_err,
5439				    mflowtype, mflowid, fibnum,
5440				    vrf_id, port);
5441			}
5442		}
5443		goto out;
5444	} else if (stcb == NULL) {
5445		inp_decr = inp;
5446	}
5447	SCTPDBG(SCTP_DEBUG_INPUT1, "Ok, Common input processing called, m:%p iphlen:%d offset:%d length:%d stcb:%p\n",
5448	    (void *)m, iphlen, offset, length, (void *)stcb);
5449	if (stcb) {
5450		/* always clear this before beginning a packet */
5451		stcb->asoc.authenticated = 0;
5452		stcb->asoc.seen_a_sack_this_pkt = 0;
5453		SCTPDBG(SCTP_DEBUG_INPUT1, "stcb:%p state:%x\n",
5454		    (void *)stcb, stcb->asoc.state);
5455
5456		if ((stcb->asoc.state & SCTP_STATE_WAS_ABORTED) ||
5457		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5458			/*-
5459			 * If we hit here, we had a ref count
5460			 * up when the assoc was aborted and the
5461			 * timer is clearing out the assoc, we should
5462			 * NOT respond to any packet.. its OOTB.
5463			 */
5464			SCTP_TCB_UNLOCK(stcb);
5465			stcb = NULL;
5466			SCTP_PROBE5(receive, NULL, stcb, m, stcb, sh);
5467			SCTP_SNPRINTF(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__);
5468			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5469			    msg);
5470			sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp, op_err,
5471			    mflowtype, mflowid, inp->fibnum,
5472			    vrf_id, port);
5473			goto out;
5474		}
5475	}
5476	if (IS_SCTP_CONTROL(ch)) {
5477		/* process the control portion of the SCTP packet */
5478		/* sa_ignore NO_NULL_CHK */
5479		stcb = sctp_process_control(m, iphlen, &offset, length,
5480		    src, dst, sh, ch,
5481		    inp, stcb, &net, &fwd_tsn_seen,
5482		    mflowtype, mflowid, fibnum,
5483		    vrf_id, port);
5484		if (stcb) {
5485			/*
5486			 * This covers us if the cookie-echo was there and
5487			 * it changes our INP.
5488			 */
5489			inp = stcb->sctp_ep;
5490#if defined(INET) || defined(INET6)
5491			if ((ch->chunk_type != SCTP_INITIATION) &&
5492			    (net != NULL) && (net->port != port)) {
5493				if (net->port == 0) {
5494					/* UDP encapsulation turned on. */
5495					net->mtu -= sizeof(struct udphdr);
5496					if (stcb->asoc.smallest_mtu > net->mtu) {
5497						sctp_pathmtu_adjustment(stcb, net->mtu, true);
5498					}
5499				} else if (port == 0) {
5500					/* UDP encapsulation turned off. */
5501					net->mtu += sizeof(struct udphdr);
5502					/* XXX Update smallest_mtu */
5503				}
5504				net->port = port;
5505			}
5506#endif
5507		}
5508	} else {
5509		/*
5510		 * no control chunks, so pre-process DATA chunks (these
5511		 * checks are taken care of by control processing)
5512		 */
5513
5514		/*
5515		 * if DATA only packet, and auth is required, then punt...
5516		 * can't have authenticated without any AUTH (control)
5517		 * chunks
5518		 */
5519		if ((stcb != NULL) &&
5520		    sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks)) {
5521			/* "silently" ignore */
5522			SCTP_PROBE5(receive, NULL, stcb, m, stcb, sh);
5523			SCTP_STAT_INCR(sctps_recvauthmissing);
5524			goto out;
5525		}
5526		if (stcb == NULL) {
5527			/* out of the blue DATA chunk */
5528			SCTP_PROBE5(receive, NULL, NULL, m, NULL, sh);
5529			SCTP_SNPRINTF(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__);
5530			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5531			    msg);
5532			sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp, op_err,
5533			    mflowtype, mflowid, fibnum,
5534			    vrf_id, port);
5535			goto out;
5536		}
5537		if (stcb->asoc.my_vtag != ntohl(sh->v_tag)) {
5538			/* v_tag mismatch! */
5539			SCTP_PROBE5(receive, NULL, stcb, m, stcb, sh);
5540			SCTP_STAT_INCR(sctps_badvtag);
5541			goto out;
5542		}
5543	}
5544
5545	SCTP_PROBE5(receive, NULL, stcb, m, stcb, sh);
5546	if (stcb == NULL) {
5547		/*
5548		 * no valid TCB for this packet, or we found it's a bad
5549		 * packet while processing control, or we're done with this
5550		 * packet (done or skip rest of data), so we drop it...
5551		 */
5552		goto out;
5553	}
5554
5555	/*
5556	 * DATA chunk processing
5557	 */
5558	/* plow through the data chunks while length > offset */
5559
5560	/*
5561	 * Rest should be DATA only.  Check authentication state if AUTH for
5562	 * DATA is required.
5563	 */
5564	if ((length > offset) &&
5565	    (stcb != NULL) &&
5566	    sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks) &&
5567	    !stcb->asoc.authenticated) {
5568		/* "silently" ignore */
5569		SCTP_STAT_INCR(sctps_recvauthmissing);
5570		SCTPDBG(SCTP_DEBUG_AUTH1,
5571		    "Data chunk requires AUTH, skipped\n");
5572		goto trigger_send;
5573	}
5574	if (length > offset) {
5575		int retval;
5576
5577		/*
5578		 * First check to make sure our state is correct. We would
5579		 * not get here unless we really did have a tag, so we don't
5580		 * abort if this happens, just dump the chunk silently.
5581		 */
5582		switch (SCTP_GET_STATE(stcb)) {
5583		case SCTP_STATE_COOKIE_ECHOED:
5584			/*
5585			 * we consider data with valid tags in this state
5586			 * shows us the cookie-ack was lost. Imply it was
5587			 * there.
5588			 */
5589			sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, net);
5590			break;
5591		case SCTP_STATE_COOKIE_WAIT:
5592			/*
5593			 * We consider OOTB any data sent during asoc setup.
5594			 */
5595			SCTP_SNPRINTF(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__);
5596			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5597			    msg);
5598			sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp, op_err,
5599			    mflowtype, mflowid, inp->fibnum,
5600			    vrf_id, port);
5601			goto out;
5602			/* sa_ignore NOTREACHED */
5603			break;
5604		case SCTP_STATE_EMPTY:	/* should not happen */
5605		case SCTP_STATE_INUSE:	/* should not happen */
5606		case SCTP_STATE_SHUTDOWN_RECEIVED:	/* This is a peer error */
5607		case SCTP_STATE_SHUTDOWN_ACK_SENT:
5608		default:
5609			goto out;
5610			/* sa_ignore NOTREACHED */
5611			break;
5612		case SCTP_STATE_OPEN:
5613		case SCTP_STATE_SHUTDOWN_SENT:
5614			break;
5615		}
5616		/* plow through the data chunks while length > offset */
5617		retval = sctp_process_data(mm, iphlen, &offset, length,
5618		    inp, stcb, net, &high_tsn);
5619		if (retval == 2) {
5620			/*
5621			 * The association aborted, NO UNLOCK needed since
5622			 * the association is destroyed.
5623			 */
5624			stcb = NULL;
5625			goto out;
5626		}
5627		if (retval == 0) {
5628			data_processed = 1;
5629		}
5630		/*
5631		 * Anything important needs to have been m_copy'ed in
5632		 * process_data
5633		 */
5634	}
5635
5636	/* take care of ecn */
5637	if ((data_processed == 1) &&
5638	    (stcb->asoc.ecn_supported == 1) &&
5639	    ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS)) {
5640		/* Yep, we need to add a ECNE */
5641		sctp_send_ecn_echo(stcb, net, high_tsn);
5642	}
5643
5644	if ((data_processed == 0) && (fwd_tsn_seen)) {
5645		int was_a_gap;
5646		uint32_t highest_tsn;
5647
5648		if (SCTP_TSN_GT(stcb->asoc.highest_tsn_inside_nr_map, stcb->asoc.highest_tsn_inside_map)) {
5649			highest_tsn = stcb->asoc.highest_tsn_inside_nr_map;
5650		} else {
5651			highest_tsn = stcb->asoc.highest_tsn_inside_map;
5652		}
5653		was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
5654		stcb->asoc.send_sack = 1;
5655		sctp_sack_check(stcb, was_a_gap);
5656	} else if (fwd_tsn_seen) {
5657		stcb->asoc.send_sack = 1;
5658	}
5659	/* trigger send of any chunks in queue... */
5660trigger_send:
5661#ifdef SCTP_AUDITING_ENABLED
5662	sctp_audit_log(0xE0, 2);
5663	sctp_auditing(1, inp, stcb, net);
5664#endif
5665	SCTPDBG(SCTP_DEBUG_INPUT1,
5666	    "Check for chunk output prw:%d tqe:%d tf=%d\n",
5667	    stcb->asoc.peers_rwnd,
5668	    TAILQ_EMPTY(&stcb->asoc.control_send_queue),
5669	    stcb->asoc.total_flight);
5670	un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
5671	if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
5672		cnt_ctrl_ready = stcb->asoc.ctrl_queue_cnt - stcb->asoc.ecn_echo_cnt_onq;
5673	}
5674	if (!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue) ||
5675	    cnt_ctrl_ready ||
5676	    stcb->asoc.trigger_reset ||
5677	    ((un_sent > 0) &&
5678	    (stcb->asoc.peers_rwnd > 0 || stcb->asoc.total_flight == 0))) {
5679		SCTPDBG(SCTP_DEBUG_INPUT3, "Calling chunk OUTPUT\n");
5680		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
5681		SCTPDBG(SCTP_DEBUG_INPUT3, "chunk OUTPUT returns\n");
5682	}
5683#ifdef SCTP_AUDITING_ENABLED
5684	sctp_audit_log(0xE0, 3);
5685	sctp_auditing(2, inp, stcb, net);
5686#endif
5687out:
5688	if (stcb != NULL) {
5689		SCTP_TCB_UNLOCK(stcb);
5690	}
5691	if (inp_decr != NULL) {
5692		/* reduce ref-count */
5693		SCTP_INP_WLOCK(inp_decr);
5694		SCTP_INP_DECR_REF(inp_decr);
5695		SCTP_INP_WUNLOCK(inp_decr);
5696	}
5697	return;
5698}
5699
5700#ifdef INET
5701void
5702sctp_input_with_port(struct mbuf *i_pak, int off, uint16_t port)
5703{
5704	struct mbuf *m;
5705	int iphlen;
5706	uint32_t vrf_id = 0;
5707	uint8_t ecn_bits;
5708	struct sockaddr_in src, dst;
5709	struct ip *ip;
5710	struct sctphdr *sh;
5711	struct sctp_chunkhdr *ch;
5712	int length, offset;
5713	uint8_t compute_crc;
5714	uint32_t mflowid;
5715	uint8_t mflowtype;
5716	uint16_t fibnum;
5717
5718	iphlen = off;
5719	if (SCTP_GET_PKT_VRFID(i_pak, vrf_id)) {
5720		SCTP_RELEASE_PKT(i_pak);
5721		return;
5722	}
5723	m = SCTP_HEADER_TO_CHAIN(i_pak);
5724#ifdef SCTP_MBUF_LOGGING
5725	/* Log in any input mbufs */
5726	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
5727		sctp_log_mbc(m, SCTP_MBUF_INPUT);
5728	}
5729#endif
5730#ifdef SCTP_PACKET_LOGGING
5731	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
5732		sctp_packet_log(m);
5733	}
5734#endif
5735	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
5736	    "sctp_input(): Packet of length %d received on %s with csum_flags 0x%b.\n",
5737	    m->m_pkthdr.len,
5738	    if_name(m->m_pkthdr.rcvif),
5739	    (int)m->m_pkthdr.csum_flags, CSUM_BITS);
5740	mflowid = m->m_pkthdr.flowid;
5741	mflowtype = M_HASHTYPE_GET(m);
5742	fibnum = M_GETFIB(m);
5743	SCTP_STAT_INCR(sctps_recvpackets);
5744	SCTP_STAT_INCR_COUNTER64(sctps_inpackets);
5745	/* Get IP, SCTP, and first chunk header together in the first mbuf. */
5746	offset = iphlen + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5747	if (SCTP_BUF_LEN(m) < offset) {
5748		if ((m = m_pullup(m, offset)) == NULL) {
5749			SCTP_STAT_INCR(sctps_hdrops);
5750			return;
5751		}
5752	}
5753	ip = mtod(m, struct ip *);
5754	sh = (struct sctphdr *)((caddr_t)ip + iphlen);
5755	ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(struct sctphdr));
5756	offset -= sizeof(struct sctp_chunkhdr);
5757	memset(&src, 0, sizeof(struct sockaddr_in));
5758	src.sin_family = AF_INET;
5759	src.sin_len = sizeof(struct sockaddr_in);
5760	src.sin_port = sh->src_port;
5761	src.sin_addr = ip->ip_src;
5762	memset(&dst, 0, sizeof(struct sockaddr_in));
5763	dst.sin_family = AF_INET;
5764	dst.sin_len = sizeof(struct sockaddr_in);
5765	dst.sin_port = sh->dest_port;
5766	dst.sin_addr = ip->ip_dst;
5767	length = ntohs(ip->ip_len);
5768	/* Validate mbuf chain length with IP payload length. */
5769	if (SCTP_HEADER_LEN(m) != length) {
5770		SCTPDBG(SCTP_DEBUG_INPUT1,
5771		    "sctp_input() length:%d reported length:%d\n", length, SCTP_HEADER_LEN(m));
5772		SCTP_STAT_INCR(sctps_hdrops);
5773		goto out;
5774	}
5775	/* SCTP does not allow broadcasts or multicasts */
5776	if (IN_MULTICAST(ntohl(dst.sin_addr.s_addr))) {
5777		goto out;
5778	}
5779	if (SCTP_IS_IT_BROADCAST(dst.sin_addr, m)) {
5780		goto out;
5781	}
5782	ecn_bits = ip->ip_tos;
5783	if (m->m_pkthdr.csum_flags & CSUM_SCTP_VALID) {
5784		SCTP_STAT_INCR(sctps_recvhwcrc);
5785		compute_crc = 0;
5786	} else {
5787		SCTP_STAT_INCR(sctps_recvswcrc);
5788		compute_crc = 1;
5789	}
5790	sctp_common_input_processing(&m, iphlen, offset, length,
5791	    (struct sockaddr *)&src,
5792	    (struct sockaddr *)&dst,
5793	    sh, ch,
5794	    compute_crc,
5795	    ecn_bits,
5796	    mflowtype, mflowid, fibnum,
5797	    vrf_id, port);
5798out:
5799	if (m) {
5800		sctp_m_freem(m);
5801	}
5802	return;
5803}
5804
5805#if defined(SCTP_MCORE_INPUT) && defined(SMP)
5806extern int *sctp_cpuarry;
5807#endif
5808
5809int
5810sctp_input(struct mbuf **mp, int *offp, int proto SCTP_UNUSED)
5811{
5812	struct mbuf *m;
5813	int off;
5814
5815	m = *mp;
5816	off = *offp;
5817#if defined(SCTP_MCORE_INPUT) && defined(SMP)
5818	if (mp_ncpus > 1) {
5819		struct ip *ip;
5820		struct sctphdr *sh;
5821		int offset;
5822		int cpu_to_use;
5823		uint32_t flowid, tag;
5824
5825		if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
5826			flowid = m->m_pkthdr.flowid;
5827		} else {
5828			/*
5829			 * No flow id built by lower layers fix it so we
5830			 * create one.
5831			 */
5832			offset = off + sizeof(struct sctphdr);
5833			if (SCTP_BUF_LEN(m) < offset) {
5834				if ((m = m_pullup(m, offset)) == NULL) {
5835					SCTP_STAT_INCR(sctps_hdrops);
5836					return (IPPROTO_DONE);
5837				}
5838			}
5839			ip = mtod(m, struct ip *);
5840			sh = (struct sctphdr *)((caddr_t)ip + off);
5841			tag = htonl(sh->v_tag);
5842			flowid = tag ^ ntohs(sh->dest_port) ^ ntohs(sh->src_port);
5843			m->m_pkthdr.flowid = flowid;
5844			M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE_HASH);
5845		}
5846		cpu_to_use = sctp_cpuarry[flowid % mp_ncpus];
5847		sctp_queue_to_mcore(m, off, cpu_to_use);
5848		return (IPPROTO_DONE);
5849	}
5850#endif
5851	sctp_input_with_port(m, off, 0);
5852	return (IPPROTO_DONE);
5853}
5854#endif
5855