sctp_indata.c revision 283822
1/*-
2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * a) Redistributions of source code must retain the above copyright notice,
10 *    this list of conditions and the following disclaimer.
11 *
12 * b) Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in
14 *    the documentation and/or other materials provided with the distribution.
15 *
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 *    contributors may be used to endorse or promote products derived
18 *    from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: stable/10/sys/netinet/sctp_indata.c 283822 2015-05-31 12:46:40Z tuexen $");
35
36#include <netinet/sctp_os.h>
37#include <netinet/sctp_var.h>
38#include <netinet/sctp_sysctl.h>
39#include <netinet/sctp_pcb.h>
40#include <netinet/sctp_header.h>
41#include <netinet/sctputil.h>
42#include <netinet/sctp_output.h>
43#include <netinet/sctp_input.h>
44#include <netinet/sctp_indata.h>
45#include <netinet/sctp_uio.h>
46#include <netinet/sctp_timer.h>
47
48
49/*
50 * NOTES: On the outbound side of things I need to check the sack timer to
51 * see if I should generate a sack into the chunk queue (if I have data to
52 * send that is and will be sending it .. for bundling.
53 *
54 * The callback in sctp_usrreq.c will get called when the socket is read from.
55 * This will cause sctp_service_queues() to get called on the top entry in
56 * the list.
57 */
58
59void
60sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
61{
62	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
63}
64
65/* Calculate what the rwnd would be */
66uint32_t
67sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
68{
69	uint32_t calc = 0;
70
71	/*
72	 * This is really set wrong with respect to a 1-2-m socket. Since
73	 * the sb_cc is the count that everyone as put up. When we re-write
74	 * sctp_soreceive then we will fix this so that ONLY this
75	 * associations data is taken into account.
76	 */
77	if (stcb->sctp_socket == NULL)
78		return (calc);
79
80	if (stcb->asoc.sb_cc == 0 &&
81	    asoc->size_on_reasm_queue == 0 &&
82	    asoc->size_on_all_streams == 0) {
83		/* Full rwnd granted */
84		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
85		return (calc);
86	}
87	/* get actual space */
88	calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
89
90	/*
91	 * take out what has NOT been put on socket queue and we yet hold
92	 * for putting up.
93	 */
94	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
95	    asoc->cnt_on_reasm_queue * MSIZE));
96	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
97	    asoc->cnt_on_all_streams * MSIZE));
98
99	if (calc == 0) {
100		/* out of space */
101		return (calc);
102	}
103	/* what is the overhead of all these rwnd's */
104	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
105	/*
106	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
107	 * even it is 0. SWS engaged
108	 */
109	if (calc < stcb->asoc.my_rwnd_control_len) {
110		calc = 1;
111	}
112	return (calc);
113}
114
115
116
117/*
118 * Build out our readq entry based on the incoming packet.
119 */
120struct sctp_queued_to_read *
121sctp_build_readq_entry(struct sctp_tcb *stcb,
122    struct sctp_nets *net,
123    uint32_t tsn, uint32_t ppid,
124    uint32_t context, uint16_t stream_no,
125    uint16_t stream_seq, uint8_t flags,
126    struct mbuf *dm)
127{
128	struct sctp_queued_to_read *read_queue_e = NULL;
129
130	sctp_alloc_a_readq(stcb, read_queue_e);
131	if (read_queue_e == NULL) {
132		goto failed_build;
133	}
134	read_queue_e->sinfo_stream = stream_no;
135	read_queue_e->sinfo_ssn = stream_seq;
136	read_queue_e->sinfo_flags = (flags << 8);
137	read_queue_e->sinfo_ppid = ppid;
138	read_queue_e->sinfo_context = context;
139	read_queue_e->sinfo_timetolive = 0;
140	read_queue_e->sinfo_tsn = tsn;
141	read_queue_e->sinfo_cumtsn = tsn;
142	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
143	read_queue_e->whoFrom = net;
144	read_queue_e->length = 0;
145	atomic_add_int(&net->ref_count, 1);
146	read_queue_e->data = dm;
147	read_queue_e->spec_flags = 0;
148	read_queue_e->tail_mbuf = NULL;
149	read_queue_e->aux_data = NULL;
150	read_queue_e->stcb = stcb;
151	read_queue_e->port_from = stcb->rport;
152	read_queue_e->do_not_ref_stcb = 0;
153	read_queue_e->end_added = 0;
154	read_queue_e->some_taken = 0;
155	read_queue_e->pdapi_aborted = 0;
156failed_build:
157	return (read_queue_e);
158}
159
160
161/*
162 * Build out our readq entry based on the incoming packet.
163 */
164static struct sctp_queued_to_read *
165sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
166    struct sctp_tmit_chunk *chk)
167{
168	struct sctp_queued_to_read *read_queue_e = NULL;
169
170	sctp_alloc_a_readq(stcb, read_queue_e);
171	if (read_queue_e == NULL) {
172		goto failed_build;
173	}
174	read_queue_e->sinfo_stream = chk->rec.data.stream_number;
175	read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
176	read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
177	read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
178	read_queue_e->sinfo_context = stcb->asoc.context;
179	read_queue_e->sinfo_timetolive = 0;
180	read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
181	read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
182	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
183	read_queue_e->whoFrom = chk->whoTo;
184	read_queue_e->aux_data = NULL;
185	read_queue_e->length = 0;
186	atomic_add_int(&chk->whoTo->ref_count, 1);
187	read_queue_e->data = chk->data;
188	read_queue_e->tail_mbuf = NULL;
189	read_queue_e->stcb = stcb;
190	read_queue_e->port_from = stcb->rport;
191	read_queue_e->spec_flags = 0;
192	read_queue_e->do_not_ref_stcb = 0;
193	read_queue_e->end_added = 0;
194	read_queue_e->some_taken = 0;
195	read_queue_e->pdapi_aborted = 0;
196failed_build:
197	return (read_queue_e);
198}
199
200
201struct mbuf *
202sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
203{
204	struct sctp_extrcvinfo *seinfo;
205	struct sctp_sndrcvinfo *outinfo;
206	struct sctp_rcvinfo *rcvinfo;
207	struct sctp_nxtinfo *nxtinfo;
208	struct cmsghdr *cmh;
209	struct mbuf *ret;
210	int len;
211	int use_extended;
212	int provide_nxt;
213
214	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
215	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
216	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
217		/* user does not want any ancillary data */
218		return (NULL);
219	}
220	len = 0;
221	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
222		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
223	}
224	seinfo = (struct sctp_extrcvinfo *)sinfo;
225	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
226	    (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
227		provide_nxt = 1;
228		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
229	} else {
230		provide_nxt = 0;
231	}
232	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
233		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
234			use_extended = 1;
235			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
236		} else {
237			use_extended = 0;
238			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
239		}
240	} else {
241		use_extended = 0;
242	}
243
244	ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
245	if (ret == NULL) {
246		/* No space */
247		return (ret);
248	}
249	SCTP_BUF_LEN(ret) = 0;
250
251	/* We need a CMSG header followed by the struct */
252	cmh = mtod(ret, struct cmsghdr *);
253	/*
254	 * Make sure that there is no un-initialized padding between the
255	 * cmsg header and cmsg data and after the cmsg data.
256	 */
257	memset(cmh, 0, len);
258	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
259		cmh->cmsg_level = IPPROTO_SCTP;
260		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
261		cmh->cmsg_type = SCTP_RCVINFO;
262		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
263		rcvinfo->rcv_sid = sinfo->sinfo_stream;
264		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
265		rcvinfo->rcv_flags = sinfo->sinfo_flags;
266		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
267		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
268		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
269		rcvinfo->rcv_context = sinfo->sinfo_context;
270		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
271		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
272		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
273	}
274	if (provide_nxt) {
275		cmh->cmsg_level = IPPROTO_SCTP;
276		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
277		cmh->cmsg_type = SCTP_NXTINFO;
278		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
279		nxtinfo->nxt_sid = seinfo->sreinfo_next_stream;
280		nxtinfo->nxt_flags = 0;
281		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
282			nxtinfo->nxt_flags |= SCTP_UNORDERED;
283		}
284		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
285			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
286		}
287		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
288			nxtinfo->nxt_flags |= SCTP_COMPLETE;
289		}
290		nxtinfo->nxt_ppid = seinfo->sreinfo_next_ppid;
291		nxtinfo->nxt_length = seinfo->sreinfo_next_length;
292		nxtinfo->nxt_assoc_id = seinfo->sreinfo_next_aid;
293		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
294		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
295	}
296	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
297		cmh->cmsg_level = IPPROTO_SCTP;
298		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
299		if (use_extended) {
300			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
301			cmh->cmsg_type = SCTP_EXTRCV;
302			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
303			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
304		} else {
305			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
306			cmh->cmsg_type = SCTP_SNDRCV;
307			*outinfo = *sinfo;
308			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
309		}
310	}
311	return (ret);
312}
313
314
315static void
316sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
317{
318	uint32_t gap, i, cumackp1;
319	int fnd = 0;
320
321	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
322		return;
323	}
324	cumackp1 = asoc->cumulative_tsn + 1;
325	if (SCTP_TSN_GT(cumackp1, tsn)) {
326		/*
327		 * this tsn is behind the cum ack and thus we don't need to
328		 * worry about it being moved from one to the other.
329		 */
330		return;
331	}
332	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
333	if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
334		SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
335		sctp_print_mapping_array(asoc);
336#ifdef INVARIANTS
337		panic("Things are really messed up now!!");
338#endif
339	}
340	SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
341	SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
342	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
343		asoc->highest_tsn_inside_nr_map = tsn;
344	}
345	if (tsn == asoc->highest_tsn_inside_map) {
346		/* We must back down to see what the new highest is */
347		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
348			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
349			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
350				asoc->highest_tsn_inside_map = i;
351				fnd = 1;
352				break;
353			}
354		}
355		if (!fnd) {
356			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
357		}
358	}
359}
360
361
362/*
363 * We are delivering currently from the reassembly queue. We must continue to
364 * deliver until we either: 1) run out of space. 2) run out of sequential
365 * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
366 */
367static void
368sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
369{
370	struct sctp_tmit_chunk *chk, *nchk;
371	uint16_t nxt_todel;
372	uint16_t stream_no;
373	int end = 0;
374	int cntDel;
375	struct sctp_queued_to_read *control, *ctl, *nctl;
376
377	if (stcb == NULL)
378		return;
379
380	cntDel = stream_no = 0;
381	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
382	    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
383	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
384		/* socket above is long gone or going.. */
385abandon:
386		asoc->fragmented_delivery_inprogress = 0;
387		TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
388			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
389			asoc->size_on_reasm_queue -= chk->send_size;
390			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
391			/*
392			 * Lose the data pointer, since its in the socket
393			 * buffer
394			 */
395			if (chk->data) {
396				sctp_m_freem(chk->data);
397				chk->data = NULL;
398			}
399			/* Now free the address and data */
400			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
401			/* sa_ignore FREED_MEMORY */
402		}
403		return;
404	}
405	SCTP_TCB_LOCK_ASSERT(stcb);
406	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
407		if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
408			/* Can't deliver more :< */
409			return;
410		}
411		stream_no = chk->rec.data.stream_number;
412		nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
413		if (nxt_todel != chk->rec.data.stream_seq &&
414		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
415			/*
416			 * Not the next sequence to deliver in its stream OR
417			 * unordered
418			 */
419			return;
420		}
421		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
422
423			control = sctp_build_readq_entry_chk(stcb, chk);
424			if (control == NULL) {
425				/* out of memory? */
426				return;
427			}
428			/* save it off for our future deliveries */
429			stcb->asoc.control_pdapi = control;
430			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
431				end = 1;
432			else
433				end = 0;
434			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
435			sctp_add_to_readq(stcb->sctp_ep,
436			    stcb, control, &stcb->sctp_socket->so_rcv, end,
437			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
438			cntDel++;
439		} else {
440			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
441				end = 1;
442			else
443				end = 0;
444			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
445			if (sctp_append_to_readq(stcb->sctp_ep, stcb,
446			    stcb->asoc.control_pdapi,
447			    chk->data, end, chk->rec.data.TSN_seq,
448			    &stcb->sctp_socket->so_rcv)) {
449				/*
450				 * something is very wrong, either
451				 * control_pdapi is NULL, or the tail_mbuf
452				 * is corrupt, or there is a EOM already on
453				 * the mbuf chain.
454				 */
455				if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
456					goto abandon;
457				} else {
458#ifdef INVARIANTS
459					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
460						panic("This should not happen control_pdapi NULL?");
461					}
462					/* if we did not panic, it was a EOM */
463					panic("Bad chunking ??");
464#else
465					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
466						SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
467					}
468					SCTP_PRINTF("Bad chunking ??\n");
469					SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
470
471#endif
472					goto abandon;
473				}
474			}
475			cntDel++;
476		}
477		/* pull it we did it */
478		TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
479		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
480			asoc->fragmented_delivery_inprogress = 0;
481			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
482				asoc->strmin[stream_no].last_sequence_delivered++;
483			}
484			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
485				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
486			}
487		} else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
488			/*
489			 * turn the flag back on since we just  delivered
490			 * yet another one.
491			 */
492			asoc->fragmented_delivery_inprogress = 1;
493		}
494		asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
495		asoc->last_flags_delivered = chk->rec.data.rcv_flags;
496		asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
497		asoc->last_strm_no_delivered = chk->rec.data.stream_number;
498
499		asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
500		asoc->size_on_reasm_queue -= chk->send_size;
501		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
502		/* free up the chk */
503		chk->data = NULL;
504		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
505
506		if (asoc->fragmented_delivery_inprogress == 0) {
507			/*
508			 * Now lets see if we can deliver the next one on
509			 * the stream
510			 */
511			struct sctp_stream_in *strm;
512
513			strm = &asoc->strmin[stream_no];
514			nxt_todel = strm->last_sequence_delivered + 1;
515			TAILQ_FOREACH_SAFE(ctl, &strm->inqueue, next, nctl) {
516				/* Deliver more if we can. */
517				if (nxt_todel == ctl->sinfo_ssn) {
518					TAILQ_REMOVE(&strm->inqueue, ctl, next);
519					asoc->size_on_all_streams -= ctl->length;
520					sctp_ucount_decr(asoc->cnt_on_all_streams);
521					strm->last_sequence_delivered++;
522					sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
523					sctp_add_to_readq(stcb->sctp_ep, stcb,
524					    ctl,
525					    &stcb->sctp_socket->so_rcv, 1,
526					    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
527				} else {
528					break;
529				}
530				nxt_todel = strm->last_sequence_delivered + 1;
531			}
532			break;
533		}
534	}
535}
536
537/*
538 * Queue the chunk either right into the socket buffer if it is the next one
539 * to go OR put it in the correct place in the delivery queue.  If we do
540 * append to the so_buf, keep doing so until we are out of order. One big
541 * question still remains, what to do when the socket buffer is FULL??
542 */
543static void
544sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
545    struct sctp_queued_to_read *control, int *abort_flag)
546{
547	/*
548	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
549	 * all the data in one stream this could happen quite rapidly. One
550	 * could use the TSN to keep track of things, but this scheme breaks
551	 * down in the other type of stream useage that could occur. Send a
552	 * single msg to stream 0, send 4Billion messages to stream 1, now
553	 * send a message to stream 0. You have a situation where the TSN
554	 * has wrapped but not in the stream. Is this worth worrying about
555	 * or should we just change our queue sort at the bottom to be by
556	 * TSN.
557	 *
558	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
559	 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
560	 * assignment this could happen... and I don't see how this would be
561	 * a violation. So for now I am undecided an will leave the sort by
562	 * SSN alone. Maybe a hybred approach is the answer
563	 *
564	 */
565	struct sctp_stream_in *strm;
566	struct sctp_queued_to_read *at;
567	int queue_needed;
568	uint16_t nxt_todel;
569	struct mbuf *op_err;
570	char msg[SCTP_DIAG_INFO_LEN];
571
572	queue_needed = 1;
573	asoc->size_on_all_streams += control->length;
574	sctp_ucount_incr(asoc->cnt_on_all_streams);
575	strm = &asoc->strmin[control->sinfo_stream];
576	nxt_todel = strm->last_sequence_delivered + 1;
577	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
578		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
579	}
580	SCTPDBG(SCTP_DEBUG_INDATA1,
581	    "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
582	    (uint32_t) control->sinfo_stream,
583	    (uint32_t) strm->last_sequence_delivered,
584	    (uint32_t) nxt_todel);
585	if (SCTP_SSN_GE(strm->last_sequence_delivered, control->sinfo_ssn)) {
586		/* The incoming sseq is behind where we last delivered? */
587		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n",
588		    control->sinfo_ssn, strm->last_sequence_delivered);
589protocol_error:
590		/*
591		 * throw it in the stream so it gets cleaned up in
592		 * association destruction
593		 */
594		TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
595		snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
596		    strm->last_sequence_delivered, control->sinfo_tsn,
597		    control->sinfo_stream, control->sinfo_ssn);
598		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
599		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
600		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
601		*abort_flag = 1;
602		return;
603
604	}
605	if (nxt_todel == control->sinfo_ssn) {
606		/* can be delivered right away? */
607		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
608			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
609		}
610		/* EY it wont be queued if it could be delivered directly */
611		queue_needed = 0;
612		asoc->size_on_all_streams -= control->length;
613		sctp_ucount_decr(asoc->cnt_on_all_streams);
614		strm->last_sequence_delivered++;
615
616		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
617		sctp_add_to_readq(stcb->sctp_ep, stcb,
618		    control,
619		    &stcb->sctp_socket->so_rcv, 1,
620		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
621		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next, at) {
622			/* all delivered */
623			nxt_todel = strm->last_sequence_delivered + 1;
624			if (nxt_todel == control->sinfo_ssn) {
625				TAILQ_REMOVE(&strm->inqueue, control, next);
626				asoc->size_on_all_streams -= control->length;
627				sctp_ucount_decr(asoc->cnt_on_all_streams);
628				strm->last_sequence_delivered++;
629				/*
630				 * We ignore the return of deliver_data here
631				 * since we always can hold the chunk on the
632				 * d-queue. And we have a finite number that
633				 * can be delivered from the strq.
634				 */
635				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
636					sctp_log_strm_del(control, NULL,
637					    SCTP_STR_LOG_FROM_IMMED_DEL);
638				}
639				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
640				sctp_add_to_readq(stcb->sctp_ep, stcb,
641				    control,
642				    &stcb->sctp_socket->so_rcv, 1,
643				    SCTP_READ_LOCK_NOT_HELD,
644				    SCTP_SO_NOT_LOCKED);
645				continue;
646			}
647			break;
648		}
649	}
650	if (queue_needed) {
651		/*
652		 * Ok, we did not deliver this guy, find the correct place
653		 * to put it on the queue.
654		 */
655		if (SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) {
656			goto protocol_error;
657		}
658		if (TAILQ_EMPTY(&strm->inqueue)) {
659			/* Empty queue */
660			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
661				sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
662			}
663			TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
664		} else {
665			TAILQ_FOREACH(at, &strm->inqueue, next) {
666				if (SCTP_SSN_GT(at->sinfo_ssn, control->sinfo_ssn)) {
667					/*
668					 * one in queue is bigger than the
669					 * new one, insert before this one
670					 */
671					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
672						sctp_log_strm_del(control, at,
673						    SCTP_STR_LOG_FROM_INSERT_MD);
674					}
675					TAILQ_INSERT_BEFORE(at, control, next);
676					break;
677				} else if (at->sinfo_ssn == control->sinfo_ssn) {
678					/*
679					 * Gak, He sent me a duplicate str
680					 * seq number
681					 */
682					/*
683					 * foo bar, I guess I will just free
684					 * this new guy, should we abort
685					 * too? FIX ME MAYBE? Or it COULD be
686					 * that the SSN's have wrapped.
687					 * Maybe I should compare to TSN
688					 * somehow... sigh for now just blow
689					 * away the chunk!
690					 */
691
692					if (control->data)
693						sctp_m_freem(control->data);
694					control->data = NULL;
695					asoc->size_on_all_streams -= control->length;
696					sctp_ucount_decr(asoc->cnt_on_all_streams);
697					if (control->whoFrom) {
698						sctp_free_remote_addr(control->whoFrom);
699						control->whoFrom = NULL;
700					}
701					sctp_free_a_readq(stcb, control);
702					return;
703				} else {
704					if (TAILQ_NEXT(at, next) == NULL) {
705						/*
706						 * We are at the end, insert
707						 * it after this one
708						 */
709						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
710							sctp_log_strm_del(control, at,
711							    SCTP_STR_LOG_FROM_INSERT_TL);
712						}
713						TAILQ_INSERT_AFTER(&strm->inqueue,
714						    at, control, next);
715						break;
716					}
717				}
718			}
719		}
720	}
721}
722
723/*
724 * Returns two things: You get the total size of the deliverable parts of the
725 * first fragmented message on the reassembly queue. And you get a 1 back if
726 * all of the message is ready or a 0 back if the message is still incomplete
727 */
728static int
729sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
730{
731	struct sctp_tmit_chunk *chk;
732	uint32_t tsn;
733
734	*t_size = 0;
735	chk = TAILQ_FIRST(&asoc->reasmqueue);
736	if (chk == NULL) {
737		/* nothing on the queue */
738		return (0);
739	}
740	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
741		/* Not a first on the queue */
742		return (0);
743	}
744	tsn = chk->rec.data.TSN_seq;
745	TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) {
746		if (tsn != chk->rec.data.TSN_seq) {
747			return (0);
748		}
749		*t_size += chk->send_size;
750		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
751			return (1);
752		}
753		tsn++;
754	}
755	return (0);
756}
757
758static void
759sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
760{
761	struct sctp_tmit_chunk *chk;
762	uint16_t nxt_todel;
763	uint32_t tsize, pd_point;
764
765doit_again:
766	chk = TAILQ_FIRST(&asoc->reasmqueue);
767	if (chk == NULL) {
768		/* Huh? */
769		asoc->size_on_reasm_queue = 0;
770		asoc->cnt_on_reasm_queue = 0;
771		return;
772	}
773	if (asoc->fragmented_delivery_inprogress == 0) {
774		nxt_todel =
775		    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
776		if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
777		    (nxt_todel == chk->rec.data.stream_seq ||
778		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
779			/*
780			 * Yep the first one is here and its ok to deliver
781			 * but should we?
782			 */
783			if (stcb->sctp_socket) {
784				pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
785				    stcb->sctp_ep->partial_delivery_point);
786			} else {
787				pd_point = stcb->sctp_ep->partial_delivery_point;
788			}
789			if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
790				/*
791				 * Yes, we setup to start reception, by
792				 * backing down the TSN just in case we
793				 * can't deliver. If we
794				 */
795				asoc->fragmented_delivery_inprogress = 1;
796				asoc->tsn_last_delivered =
797				    chk->rec.data.TSN_seq - 1;
798				asoc->str_of_pdapi =
799				    chk->rec.data.stream_number;
800				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
801				asoc->pdapi_ppid = chk->rec.data.payloadtype;
802				asoc->fragment_flags = chk->rec.data.rcv_flags;
803				sctp_service_reassembly(stcb, asoc);
804			}
805		}
806	} else {
807		/*
808		 * Service re-assembly will deliver stream data queued at
809		 * the end of fragmented delivery.. but it wont know to go
810		 * back and call itself again... we do that here with the
811		 * got doit_again
812		 */
813		sctp_service_reassembly(stcb, asoc);
814		if (asoc->fragmented_delivery_inprogress == 0) {
815			/*
816			 * finished our Fragmented delivery, could be more
817			 * waiting?
818			 */
819			goto doit_again;
820		}
821	}
822}
823
824/*
825 * Dump onto the re-assembly queue, in its proper place. After dumping on the
826 * queue, see if anthing can be delivered. If so pull it off (or as much as
827 * we can. If we run out of space then we must dump what we can and set the
828 * appropriate flag to say we queued what we could.
829 */
830static void
831sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
832    struct sctp_tmit_chunk *chk, int *abort_flag)
833{
834	struct mbuf *op_err;
835	char msg[SCTP_DIAG_INFO_LEN];
836	uint32_t cum_ackp1, prev_tsn, post_tsn;
837	struct sctp_tmit_chunk *at, *prev, *next;
838
839	prev = next = NULL;
840	cum_ackp1 = asoc->tsn_last_delivered + 1;
841	if (TAILQ_EMPTY(&asoc->reasmqueue)) {
842		/* This is the first one on the queue */
843		TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
844		/*
845		 * we do not check for delivery of anything when only one
846		 * fragment is here
847		 */
848		asoc->size_on_reasm_queue = chk->send_size;
849		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
850		if (chk->rec.data.TSN_seq == cum_ackp1) {
851			if (asoc->fragmented_delivery_inprogress == 0 &&
852			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
853			    SCTP_DATA_FIRST_FRAG) {
854				/*
855				 * An empty queue, no delivery inprogress,
856				 * we hit the next one and it does NOT have
857				 * a FIRST fragment mark.
858				 */
859				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
860				snprintf(msg, sizeof(msg),
861				    "Expected B-bit for TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
862				    chk->rec.data.TSN_seq,
863				    chk->rec.data.stream_number,
864				    chk->rec.data.stream_seq);
865				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
866				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
867				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
868				*abort_flag = 1;
869			} else if (asoc->fragmented_delivery_inprogress &&
870			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
871				/*
872				 * We are doing a partial delivery and the
873				 * NEXT chunk MUST be either the LAST or
874				 * MIDDLE fragment NOT a FIRST
875				 */
876				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
877				snprintf(msg, sizeof(msg),
878				    "Didn't expect B-bit for TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
879				    chk->rec.data.TSN_seq,
880				    chk->rec.data.stream_number,
881				    chk->rec.data.stream_seq);
882				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
883				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
884				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
885				*abort_flag = 1;
886			} else if (asoc->fragmented_delivery_inprogress) {
887				/*
888				 * Here we are ok with a MIDDLE or LAST
889				 * piece
890				 */
891				if (chk->rec.data.stream_number !=
892				    asoc->str_of_pdapi) {
893					/* Got to be the right STR No */
894					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
895					    chk->rec.data.stream_number,
896					    asoc->str_of_pdapi);
897					snprintf(msg, sizeof(msg),
898					    "Expected SID=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
899					    asoc->str_of_pdapi,
900					    chk->rec.data.TSN_seq,
901					    chk->rec.data.stream_number,
902					    chk->rec.data.stream_seq);
903					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
904					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
905					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
906					*abort_flag = 1;
907				} else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
908					    SCTP_DATA_UNORDERED &&
909				    chk->rec.data.stream_seq != asoc->ssn_of_pdapi) {
910					/* Got to be the right STR Seq */
911					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
912					    chk->rec.data.stream_seq,
913					    asoc->ssn_of_pdapi);
914					snprintf(msg, sizeof(msg),
915					    "Expected SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
916					    asoc->ssn_of_pdapi,
917					    chk->rec.data.TSN_seq,
918					    chk->rec.data.stream_number,
919					    chk->rec.data.stream_seq);
920					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
921					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
922					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
923					*abort_flag = 1;
924				}
925			}
926		}
927		return;
928	}
929	/* Find its place */
930	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
931		if (SCTP_TSN_GT(at->rec.data.TSN_seq, chk->rec.data.TSN_seq)) {
932			/*
933			 * one in queue is bigger than the new one, insert
934			 * before this one
935			 */
936			/* A check */
937			asoc->size_on_reasm_queue += chk->send_size;
938			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
939			next = at;
940			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
941			break;
942		} else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
943			/* Gak, He sent me a duplicate str seq number */
944			/*
945			 * foo bar, I guess I will just free this new guy,
946			 * should we abort too? FIX ME MAYBE? Or it COULD be
947			 * that the SSN's have wrapped. Maybe I should
948			 * compare to TSN somehow... sigh for now just blow
949			 * away the chunk!
950			 */
951			if (chk->data) {
952				sctp_m_freem(chk->data);
953				chk->data = NULL;
954			}
955			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
956			return;
957		} else {
958			prev = at;
959			if (TAILQ_NEXT(at, sctp_next) == NULL) {
960				/*
961				 * We are at the end, insert it after this
962				 * one
963				 */
964				/* check it first */
965				asoc->size_on_reasm_queue += chk->send_size;
966				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
967				TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
968				break;
969			}
970		}
971	}
972	/* Now the audits */
973	if (prev) {
974		prev_tsn = chk->rec.data.TSN_seq - 1;
975		if (prev_tsn == prev->rec.data.TSN_seq) {
976			/*
977			 * Ok the one I am dropping onto the end is the
978			 * NEXT. A bit of valdiation here.
979			 */
980			if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
981			    SCTP_DATA_FIRST_FRAG ||
982			    (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
983			    SCTP_DATA_MIDDLE_FRAG) {
984				/*
985				 * Insert chk MUST be a MIDDLE or LAST
986				 * fragment
987				 */
988				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
989				    SCTP_DATA_FIRST_FRAG) {
990					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
991					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
992					snprintf(msg, sizeof(msg),
993					    "Can't handle B-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
994					    chk->rec.data.TSN_seq,
995					    chk->rec.data.stream_number,
996					    chk->rec.data.stream_seq);
997					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
998					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
999					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1000					*abort_flag = 1;
1001					return;
1002				}
1003				if (chk->rec.data.stream_number !=
1004				    prev->rec.data.stream_number) {
1005					/*
1006					 * Huh, need the correct STR here,
1007					 * they must be the same.
1008					 */
1009					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sid:%d not the same as at:%d\n",
1010					    chk->rec.data.stream_number,
1011					    prev->rec.data.stream_number);
1012					snprintf(msg, sizeof(msg),
1013					    "Expect SID=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1014					    prev->rec.data.stream_number,
1015					    chk->rec.data.TSN_seq,
1016					    chk->rec.data.stream_number,
1017					    chk->rec.data.stream_seq);
1018					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1019					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1020					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1021					*abort_flag = 1;
1022					return;
1023				}
1024				if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
1025				    (prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
1026					/*
1027					 * Huh, need the same ordering here,
1028					 * they must be the same.
1029					 */
1030					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, U-bit not constant\n");
1031					snprintf(msg, sizeof(msg),
1032					    "Expect U-bit=%d for TSN=%8.8x, got U-bit=%d",
1033					    (prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0,
1034					    chk->rec.data.TSN_seq,
1035					    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0);
1036					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1037					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1038					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1039					*abort_flag = 1;
1040					return;
1041				}
1042				if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1043				    chk->rec.data.stream_seq !=
1044				    prev->rec.data.stream_seq) {
1045					/*
1046					 * Huh, need the correct STR here,
1047					 * they must be the same.
1048					 */
1049					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1050					    chk->rec.data.stream_seq,
1051					    prev->rec.data.stream_seq);
1052					snprintf(msg, sizeof(msg),
1053					    "Expect SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1054					    prev->rec.data.stream_seq,
1055					    chk->rec.data.TSN_seq,
1056					    chk->rec.data.stream_number,
1057					    chk->rec.data.stream_seq);
1058					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1059					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1060					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1061					*abort_flag = 1;
1062					return;
1063				}
1064			} else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1065			    SCTP_DATA_LAST_FRAG) {
1066				/* Insert chk MUST be a FIRST */
1067				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1068				    SCTP_DATA_FIRST_FRAG) {
1069					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1070					snprintf(msg, sizeof(msg),
1071					    "Expect B-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1072					    chk->rec.data.TSN_seq,
1073					    chk->rec.data.stream_number,
1074					    chk->rec.data.stream_seq);
1075					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1076					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1077					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1078					*abort_flag = 1;
1079					return;
1080				}
1081			}
1082		}
1083	}
1084	if (next) {
1085		post_tsn = chk->rec.data.TSN_seq + 1;
1086		if (post_tsn == next->rec.data.TSN_seq) {
1087			/*
1088			 * Ok the one I am inserting ahead of is my NEXT
1089			 * one. A bit of valdiation here.
1090			 */
1091			if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1092				/* Insert chk MUST be a last fragment */
1093				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1094				    != SCTP_DATA_LAST_FRAG) {
1095					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1096					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1097					snprintf(msg, sizeof(msg),
1098					    "Expect only E-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1099					    chk->rec.data.TSN_seq,
1100					    chk->rec.data.stream_number,
1101					    chk->rec.data.stream_seq);
1102					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1103					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1104					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1105					*abort_flag = 1;
1106					return;
1107				}
1108			} else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1109				    SCTP_DATA_MIDDLE_FRAG ||
1110				    (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1111			    SCTP_DATA_LAST_FRAG) {
1112				/*
1113				 * Insert chk CAN be MIDDLE or FIRST NOT
1114				 * LAST
1115				 */
1116				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1117				    SCTP_DATA_LAST_FRAG) {
1118					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1119					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1120					snprintf(msg, sizeof(msg),
1121					    "Didn't expect E-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1122					    chk->rec.data.TSN_seq,
1123					    chk->rec.data.stream_number,
1124					    chk->rec.data.stream_seq);
1125					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1126					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1127					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1128					*abort_flag = 1;
1129					return;
1130				}
1131				if (chk->rec.data.stream_number !=
1132				    next->rec.data.stream_number) {
1133					/*
1134					 * Huh, need the correct STR here,
1135					 * they must be the same.
1136					 */
1137					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1138					    chk->rec.data.stream_number,
1139					    next->rec.data.stream_number);
1140					snprintf(msg, sizeof(msg),
1141					    "Required SID %4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1142					    next->rec.data.stream_number,
1143					    chk->rec.data.TSN_seq,
1144					    chk->rec.data.stream_number,
1145					    chk->rec.data.stream_seq);
1146					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1147					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1148					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1149					*abort_flag = 1;
1150					return;
1151				}
1152				if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
1153				    (next->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
1154					/*
1155					 * Huh, need the same ordering here,
1156					 * they must be the same.
1157					 */
1158					SCTPDBG(SCTP_DEBUG_INDATA1, "Next check - Gak, Evil plot, U-bit not constant\n");
1159					snprintf(msg, sizeof(msg),
1160					    "Expect U-bit=%d for TSN=%8.8x, got U-bit=%d",
1161					    (next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0,
1162					    chk->rec.data.TSN_seq,
1163					    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0);
1164					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1165					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1166					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1167					*abort_flag = 1;
1168					return;
1169				}
1170				if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1171				    chk->rec.data.stream_seq !=
1172				    next->rec.data.stream_seq) {
1173					/*
1174					 * Huh, need the correct STR here,
1175					 * they must be the same.
1176					 */
1177					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1178					    chk->rec.data.stream_seq,
1179					    next->rec.data.stream_seq);
1180					snprintf(msg, sizeof(msg),
1181					    "Required SSN %4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1182					    next->rec.data.stream_seq,
1183					    chk->rec.data.TSN_seq,
1184					    chk->rec.data.stream_number,
1185					    chk->rec.data.stream_seq);
1186					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1187					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1188					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1189					*abort_flag = 1;
1190					return;
1191				}
1192			}
1193		}
1194	}
1195	/* Do we need to do some delivery? check */
1196	sctp_deliver_reasm_check(stcb, asoc);
1197}
1198
1199/*
1200 * This is an unfortunate routine. It checks to make sure a evil guy is not
1201 * stuffing us full of bad packet fragments. A broken peer could also do this
1202 * but this is doubtful. It is to bad I must worry about evil crackers sigh
1203 * :< more cycles.
1204 */
1205static int
1206sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1207    uint32_t TSN_seq)
1208{
1209	struct sctp_tmit_chunk *at;
1210	uint32_t tsn_est;
1211
1212	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1213		if (SCTP_TSN_GT(TSN_seq, at->rec.data.TSN_seq)) {
1214			/* is it one bigger? */
1215			tsn_est = at->rec.data.TSN_seq + 1;
1216			if (tsn_est == TSN_seq) {
1217				/* yep. It better be a last then */
1218				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1219				    SCTP_DATA_LAST_FRAG) {
1220					/*
1221					 * Ok this guy belongs next to a guy
1222					 * that is NOT last, it should be a
1223					 * middle/last, not a complete
1224					 * chunk.
1225					 */
1226					return (1);
1227				} else {
1228					/*
1229					 * This guy is ok since its a LAST
1230					 * and the new chunk is a fully
1231					 * self- contained one.
1232					 */
1233					return (0);
1234				}
1235			}
1236		} else if (TSN_seq == at->rec.data.TSN_seq) {
1237			/* Software error since I have a dup? */
1238			return (1);
1239		} else {
1240			/*
1241			 * Ok, 'at' is larger than new chunk but does it
1242			 * need to be right before it.
1243			 */
1244			tsn_est = TSN_seq + 1;
1245			if (tsn_est == at->rec.data.TSN_seq) {
1246				/* Yep, It better be a first */
1247				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1248				    SCTP_DATA_FIRST_FRAG) {
1249					return (1);
1250				} else {
1251					return (0);
1252				}
1253			}
1254		}
1255	}
1256	return (0);
1257}
1258
1259static int
1260sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1261    struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1262    struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1263    int *break_flag, int last_chunk)
1264{
1265	/* Process a data chunk */
1266	/* struct sctp_tmit_chunk *chk; */
1267	struct sctp_tmit_chunk *chk;
1268	uint32_t tsn, gap;
1269	struct mbuf *dmbuf;
1270	int the_len;
1271	int need_reasm_check = 0;
1272	uint16_t strmno, strmseq;
1273	struct mbuf *op_err;
1274	char msg[SCTP_DIAG_INFO_LEN];
1275	struct sctp_queued_to_read *control;
1276	int ordered;
1277	uint32_t protocol_id;
1278	uint8_t chunk_flags;
1279	struct sctp_stream_reset_list *liste;
1280
1281	chk = NULL;
1282	tsn = ntohl(ch->dp.tsn);
1283	chunk_flags = ch->ch.chunk_flags;
1284	if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1285		asoc->send_sack = 1;
1286	}
1287	protocol_id = ch->dp.protocol_id;
1288	ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1289	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1290		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1291	}
1292	if (stcb == NULL) {
1293		return (0);
1294	}
1295	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1296	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1297		/* It is a duplicate */
1298		SCTP_STAT_INCR(sctps_recvdupdata);
1299		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1300			/* Record a dup for the next outbound sack */
1301			asoc->dup_tsns[asoc->numduptsns] = tsn;
1302			asoc->numduptsns++;
1303		}
1304		asoc->send_sack = 1;
1305		return (0);
1306	}
1307	/* Calculate the number of TSN's between the base and this TSN */
1308	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1309	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1310		/* Can't hold the bit in the mapping at max array, toss it */
1311		return (0);
1312	}
1313	if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1314		SCTP_TCB_LOCK_ASSERT(stcb);
1315		if (sctp_expand_mapping_array(asoc, gap)) {
1316			/* Can't expand, drop it */
1317			return (0);
1318		}
1319	}
1320	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1321		*high_tsn = tsn;
1322	}
1323	/* See if we have received this one already */
1324	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1325	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1326		SCTP_STAT_INCR(sctps_recvdupdata);
1327		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1328			/* Record a dup for the next outbound sack */
1329			asoc->dup_tsns[asoc->numduptsns] = tsn;
1330			asoc->numduptsns++;
1331		}
1332		asoc->send_sack = 1;
1333		return (0);
1334	}
1335	/*
1336	 * Check to see about the GONE flag, duplicates would cause a sack
1337	 * to be sent up above
1338	 */
1339	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1340	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1341	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1342		/*
1343		 * wait a minute, this guy is gone, there is no longer a
1344		 * receiver. Send peer an ABORT!
1345		 */
1346		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1347		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1348		*abort_flag = 1;
1349		return (0);
1350	}
1351	/*
1352	 * Now before going further we see if there is room. If NOT then we
1353	 * MAY let one through only IF this TSN is the one we are waiting
1354	 * for on a partial delivery API.
1355	 */
1356
1357	/* now do the tests */
1358	if (((asoc->cnt_on_all_streams +
1359	    asoc->cnt_on_reasm_queue +
1360	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1361	    (((int)asoc->my_rwnd) <= 0)) {
1362		/*
1363		 * When we have NO room in the rwnd we check to make sure
1364		 * the reader is doing its job...
1365		 */
1366		if (stcb->sctp_socket->so_rcv.sb_cc) {
1367			/* some to read, wake-up */
1368#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1369			struct socket *so;
1370
1371			so = SCTP_INP_SO(stcb->sctp_ep);
1372			atomic_add_int(&stcb->asoc.refcnt, 1);
1373			SCTP_TCB_UNLOCK(stcb);
1374			SCTP_SOCKET_LOCK(so, 1);
1375			SCTP_TCB_LOCK(stcb);
1376			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1377			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1378				/* assoc was freed while we were unlocked */
1379				SCTP_SOCKET_UNLOCK(so, 1);
1380				return (0);
1381			}
1382#endif
1383			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1384#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1385			SCTP_SOCKET_UNLOCK(so, 1);
1386#endif
1387		}
1388		/* now is it in the mapping array of what we have accepted? */
1389		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1390		    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1391			/* Nope not in the valid range dump it */
1392			sctp_set_rwnd(stcb, asoc);
1393			if ((asoc->cnt_on_all_streams +
1394			    asoc->cnt_on_reasm_queue +
1395			    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1396				SCTP_STAT_INCR(sctps_datadropchklmt);
1397			} else {
1398				SCTP_STAT_INCR(sctps_datadroprwnd);
1399			}
1400			*break_flag = 1;
1401			return (0);
1402		}
1403	}
1404	strmno = ntohs(ch->dp.stream_id);
1405	if (strmno >= asoc->streamincnt) {
1406		struct sctp_paramhdr *phdr;
1407		struct mbuf *mb;
1408
1409		mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1410		    0, M_NOWAIT, 1, MT_DATA);
1411		if (mb != NULL) {
1412			/* add some space up front so prepend will work well */
1413			SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1414			phdr = mtod(mb, struct sctp_paramhdr *);
1415			/*
1416			 * Error causes are just param's and this one has
1417			 * two back to back phdr, one with the error type
1418			 * and size, the other with the streamid and a rsvd
1419			 */
1420			SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1421			phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1422			phdr->param_length =
1423			    htons(sizeof(struct sctp_paramhdr) * 2);
1424			phdr++;
1425			/* We insert the stream in the type field */
1426			phdr->param_type = ch->dp.stream_id;
1427			/* And set the length to 0 for the rsvd field */
1428			phdr->param_length = 0;
1429			sctp_queue_op_err(stcb, mb);
1430		}
1431		SCTP_STAT_INCR(sctps_badsid);
1432		SCTP_TCB_LOCK_ASSERT(stcb);
1433		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1434		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1435			asoc->highest_tsn_inside_nr_map = tsn;
1436		}
1437		if (tsn == (asoc->cumulative_tsn + 1)) {
1438			/* Update cum-ack */
1439			asoc->cumulative_tsn = tsn;
1440		}
1441		return (0);
1442	}
1443	/*
1444	 * Before we continue lets validate that we are not being fooled by
1445	 * an evil attacker. We can only have 4k chunks based on our TSN
1446	 * spread allowed by the mapping array 512 * 8 bits, so there is no
1447	 * way our stream sequence numbers could have wrapped. We of course
1448	 * only validate the FIRST fragment so the bit must be set.
1449	 */
1450	strmseq = ntohs(ch->dp.stream_sequence);
1451#ifdef SCTP_ASOCLOG_OF_TSNS
1452	SCTP_TCB_LOCK_ASSERT(stcb);
1453	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1454		asoc->tsn_in_at = 0;
1455		asoc->tsn_in_wrapped = 1;
1456	}
1457	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1458	asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1459	asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1460	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1461	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1462	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1463	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1464	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1465	asoc->tsn_in_at++;
1466#endif
1467	if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1468	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1469	    (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1470	    SCTP_SSN_GE(asoc->strmin[strmno].last_sequence_delivered, strmseq)) {
1471		/* The incoming sseq is behind where we last delivered? */
1472		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1473		    strmseq, asoc->strmin[strmno].last_sequence_delivered);
1474
1475		snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1476		    asoc->strmin[strmno].last_sequence_delivered,
1477		    tsn, strmno, strmseq);
1478		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1479		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1480		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1481		*abort_flag = 1;
1482		return (0);
1483	}
1484	/************************************
1485	 * From here down we may find ch-> invalid
1486	 * so its a good idea NOT to use it.
1487	 *************************************/
1488
1489	the_len = (chk_length - sizeof(struct sctp_data_chunk));
1490	if (last_chunk == 0) {
1491		dmbuf = SCTP_M_COPYM(*m,
1492		    (offset + sizeof(struct sctp_data_chunk)),
1493		    the_len, M_NOWAIT);
1494#ifdef SCTP_MBUF_LOGGING
1495		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1496			sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
1497		}
1498#endif
1499	} else {
1500		/* We can steal the last chunk */
1501		int l_len;
1502
1503		dmbuf = *m;
1504		/* lop off the top part */
1505		m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1506		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1507			l_len = SCTP_BUF_LEN(dmbuf);
1508		} else {
1509			/*
1510			 * need to count up the size hopefully does not hit
1511			 * this to often :-0
1512			 */
1513			struct mbuf *lat;
1514
1515			l_len = 0;
1516			for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
1517				l_len += SCTP_BUF_LEN(lat);
1518			}
1519		}
1520		if (l_len > the_len) {
1521			/* Trim the end round bytes off  too */
1522			m_adj(dmbuf, -(l_len - the_len));
1523		}
1524	}
1525	if (dmbuf == NULL) {
1526		SCTP_STAT_INCR(sctps_nomem);
1527		return (0);
1528	}
1529	if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1530	    asoc->fragmented_delivery_inprogress == 0 &&
1531	    TAILQ_EMPTY(&asoc->resetHead) &&
1532	    ((ordered == 0) ||
1533	    ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1534	    TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1535		/* Candidate for express delivery */
1536		/*
1537		 * Its not fragmented, No PD-API is up, Nothing in the
1538		 * delivery queue, Its un-ordered OR ordered and the next to
1539		 * deliver AND nothing else is stuck on the stream queue,
1540		 * And there is room for it in the socket buffer. Lets just
1541		 * stuff it up the buffer....
1542		 */
1543
1544		/* It would be nice to avoid this copy if we could :< */
1545		sctp_alloc_a_readq(stcb, control);
1546		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1547		    protocol_id,
1548		    strmno, strmseq,
1549		    chunk_flags,
1550		    dmbuf);
1551		if (control == NULL) {
1552			goto failed_express_del;
1553		}
1554		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1555		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1556			asoc->highest_tsn_inside_nr_map = tsn;
1557		}
1558		sctp_add_to_readq(stcb->sctp_ep, stcb,
1559		    control, &stcb->sctp_socket->so_rcv,
1560		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1561
1562		if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1563			/* for ordered, bump what we delivered */
1564			asoc->strmin[strmno].last_sequence_delivered++;
1565		}
1566		SCTP_STAT_INCR(sctps_recvexpress);
1567		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1568			sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1569			    SCTP_STR_LOG_FROM_EXPRS_DEL);
1570		}
1571		control = NULL;
1572
1573		goto finish_express_del;
1574	}
1575failed_express_del:
1576	/* If we reach here this is a new chunk */
1577	chk = NULL;
1578	control = NULL;
1579	/* Express for fragmented delivery? */
1580	if ((asoc->fragmented_delivery_inprogress) &&
1581	    (stcb->asoc.control_pdapi) &&
1582	    (asoc->str_of_pdapi == strmno) &&
1583	    (asoc->ssn_of_pdapi == strmseq)
1584	    ) {
1585		control = stcb->asoc.control_pdapi;
1586		if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1587			/* Can't be another first? */
1588			goto failed_pdapi_express_del;
1589		}
1590		if (tsn == (control->sinfo_tsn + 1)) {
1591			/* Yep, we can add it on */
1592			int end = 0;
1593
1594			if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1595				end = 1;
1596			}
1597			if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1598			    tsn,
1599			    &stcb->sctp_socket->so_rcv)) {
1600				SCTP_PRINTF("Append fails end:%d\n", end);
1601				goto failed_pdapi_express_del;
1602			}
1603			SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1604			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1605				asoc->highest_tsn_inside_nr_map = tsn;
1606			}
1607			SCTP_STAT_INCR(sctps_recvexpressm);
1608			asoc->tsn_last_delivered = tsn;
1609			asoc->fragment_flags = chunk_flags;
1610			asoc->tsn_of_pdapi_last_delivered = tsn;
1611			asoc->last_flags_delivered = chunk_flags;
1612			asoc->last_strm_seq_delivered = strmseq;
1613			asoc->last_strm_no_delivered = strmno;
1614			if (end) {
1615				/* clean up the flags and such */
1616				asoc->fragmented_delivery_inprogress = 0;
1617				if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1618					asoc->strmin[strmno].last_sequence_delivered++;
1619				}
1620				stcb->asoc.control_pdapi = NULL;
1621				if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
1622					/*
1623					 * There could be another message
1624					 * ready
1625					 */
1626					need_reasm_check = 1;
1627				}
1628			}
1629			control = NULL;
1630			goto finish_express_del;
1631		}
1632	}
1633failed_pdapi_express_del:
1634	control = NULL;
1635	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
1636		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1637		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1638			asoc->highest_tsn_inside_nr_map = tsn;
1639		}
1640	} else {
1641		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1642		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
1643			asoc->highest_tsn_inside_map = tsn;
1644		}
1645	}
1646	if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1647		sctp_alloc_a_chunk(stcb, chk);
1648		if (chk == NULL) {
1649			/* No memory so we drop the chunk */
1650			SCTP_STAT_INCR(sctps_nomem);
1651			if (last_chunk == 0) {
1652				/* we copied it, free the copy */
1653				sctp_m_freem(dmbuf);
1654			}
1655			return (0);
1656		}
1657		chk->rec.data.TSN_seq = tsn;
1658		chk->no_fr_allowed = 0;
1659		chk->rec.data.stream_seq = strmseq;
1660		chk->rec.data.stream_number = strmno;
1661		chk->rec.data.payloadtype = protocol_id;
1662		chk->rec.data.context = stcb->asoc.context;
1663		chk->rec.data.doing_fast_retransmit = 0;
1664		chk->rec.data.rcv_flags = chunk_flags;
1665		chk->asoc = asoc;
1666		chk->send_size = the_len;
1667		chk->whoTo = net;
1668		atomic_add_int(&net->ref_count, 1);
1669		chk->data = dmbuf;
1670	} else {
1671		sctp_alloc_a_readq(stcb, control);
1672		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1673		    protocol_id,
1674		    strmno, strmseq,
1675		    chunk_flags,
1676		    dmbuf);
1677		if (control == NULL) {
1678			/* No memory so we drop the chunk */
1679			SCTP_STAT_INCR(sctps_nomem);
1680			if (last_chunk == 0) {
1681				/* we copied it, free the copy */
1682				sctp_m_freem(dmbuf);
1683			}
1684			return (0);
1685		}
1686		control->length = the_len;
1687	}
1688
1689	/* Mark it as received */
1690	/* Now queue it where it belongs */
1691	if (control != NULL) {
1692		/* First a sanity check */
1693		if (asoc->fragmented_delivery_inprogress) {
1694			/*
1695			 * Ok, we have a fragmented delivery in progress if
1696			 * this chunk is next to deliver OR belongs in our
1697			 * view to the reassembly, the peer is evil or
1698			 * broken.
1699			 */
1700			uint32_t estimate_tsn;
1701
1702			estimate_tsn = asoc->tsn_last_delivered + 1;
1703			if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1704			    (estimate_tsn == control->sinfo_tsn)) {
1705				/* Evil/Broke peer */
1706				sctp_m_freem(control->data);
1707				control->data = NULL;
1708				if (control->whoFrom) {
1709					sctp_free_remote_addr(control->whoFrom);
1710					control->whoFrom = NULL;
1711				}
1712				sctp_free_a_readq(stcb, control);
1713				snprintf(msg, sizeof(msg), "Reas. queue emtpy, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1714				    tsn, strmno, strmseq);
1715				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1716				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
1717				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1718				*abort_flag = 1;
1719				if (last_chunk) {
1720					*m = NULL;
1721				}
1722				return (0);
1723			} else {
1724				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1725					sctp_m_freem(control->data);
1726					control->data = NULL;
1727					if (control->whoFrom) {
1728						sctp_free_remote_addr(control->whoFrom);
1729						control->whoFrom = NULL;
1730					}
1731					sctp_free_a_readq(stcb, control);
1732					snprintf(msg, sizeof(msg), "PD ongoing, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1733					    tsn, strmno, strmseq);
1734					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1735					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
1736					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1737					*abort_flag = 1;
1738					if (last_chunk) {
1739						*m = NULL;
1740					}
1741					return (0);
1742				}
1743			}
1744		} else {
1745			/* No PDAPI running */
1746			if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
1747				/*
1748				 * Reassembly queue is NOT empty validate
1749				 * that this tsn does not need to be in
1750				 * reasembly queue. If it does then our peer
1751				 * is broken or evil.
1752				 */
1753				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1754					sctp_m_freem(control->data);
1755					control->data = NULL;
1756					if (control->whoFrom) {
1757						sctp_free_remote_addr(control->whoFrom);
1758						control->whoFrom = NULL;
1759					}
1760					sctp_free_a_readq(stcb, control);
1761					snprintf(msg, sizeof(msg), "No PD ongoing, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1762					    tsn, strmno, strmseq);
1763					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1764					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
1765					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1766					*abort_flag = 1;
1767					if (last_chunk) {
1768						*m = NULL;
1769					}
1770					return (0);
1771				}
1772			}
1773		}
1774		/* ok, if we reach here we have passed the sanity checks */
1775		if (chunk_flags & SCTP_DATA_UNORDERED) {
1776			/* queue directly into socket buffer */
1777			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1778			sctp_add_to_readq(stcb->sctp_ep, stcb,
1779			    control,
1780			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1781		} else {
1782			/*
1783			 * Special check for when streams are resetting. We
1784			 * could be more smart about this and check the
1785			 * actual stream to see if it is not being reset..
1786			 * that way we would not create a HOLB when amongst
1787			 * streams being reset and those not being reset.
1788			 *
1789			 * We take complete messages that have a stream reset
1790			 * intervening (aka the TSN is after where our
1791			 * cum-ack needs to be) off and put them on a
1792			 * pending_reply_queue. The reassembly ones we do
1793			 * not have to worry about since they are all sorted
1794			 * and proceessed by TSN order. It is only the
1795			 * singletons I must worry about.
1796			 */
1797			if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
1798			    SCTP_TSN_GT(tsn, liste->tsn)) {
1799				/*
1800				 * yep its past where we need to reset... go
1801				 * ahead and queue it.
1802				 */
1803				if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
1804					/* first one on */
1805					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
1806				} else {
1807					struct sctp_queued_to_read *ctlOn,
1808					                   *nctlOn;
1809					unsigned char inserted = 0;
1810
1811					TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
1812						if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
1813							continue;
1814						} else {
1815							/* found it */
1816							TAILQ_INSERT_BEFORE(ctlOn, control, next);
1817							inserted = 1;
1818							break;
1819						}
1820					}
1821					if (inserted == 0) {
1822						/*
1823						 * must be put at end, use
1824						 * prevP (all setup from
1825						 * loop) to setup nextP.
1826						 */
1827						TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
1828					}
1829				}
1830			} else {
1831				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
1832				if (*abort_flag) {
1833					if (last_chunk) {
1834						*m = NULL;
1835					}
1836					return (0);
1837				}
1838			}
1839		}
1840	} else {
1841		/* Into the re-assembly queue */
1842		sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
1843		if (*abort_flag) {
1844			/*
1845			 * the assoc is now gone and chk was put onto the
1846			 * reasm queue, which has all been freed.
1847			 */
1848			if (last_chunk) {
1849				*m = NULL;
1850			}
1851			return (0);
1852		}
1853	}
1854finish_express_del:
1855	if (tsn == (asoc->cumulative_tsn + 1)) {
1856		/* Update cum-ack */
1857		asoc->cumulative_tsn = tsn;
1858	}
1859	if (last_chunk) {
1860		*m = NULL;
1861	}
1862	if (ordered) {
1863		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
1864	} else {
1865		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
1866	}
1867	SCTP_STAT_INCR(sctps_recvdata);
1868	/* Set it present please */
1869	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1870		sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
1871	}
1872	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1873		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
1874		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
1875	}
1876	/* check the special flag for stream resets */
1877	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
1878	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
1879		/*
1880		 * we have finished working through the backlogged TSN's now
1881		 * time to reset streams. 1: call reset function. 2: free
1882		 * pending_reply space 3: distribute any chunks in
1883		 * pending_reply_queue.
1884		 */
1885		struct sctp_queued_to_read *ctl, *nctl;
1886
1887		sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
1888		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
1889		SCTP_FREE(liste, SCTP_M_STRESET);
1890		/* sa_ignore FREED_MEMORY */
1891		liste = TAILQ_FIRST(&asoc->resetHead);
1892		if (TAILQ_EMPTY(&asoc->resetHead)) {
1893			/* All can be removed */
1894			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
1895				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
1896				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
1897				if (*abort_flag) {
1898					return (0);
1899				}
1900			}
1901		} else {
1902			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
1903				if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
1904					break;
1905				}
1906				/*
1907				 * if ctl->sinfo_tsn is <= liste->tsn we can
1908				 * process it which is the NOT of
1909				 * ctl->sinfo_tsn > liste->tsn
1910				 */
1911				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
1912				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
1913				if (*abort_flag) {
1914					return (0);
1915				}
1916			}
1917		}
1918		/*
1919		 * Now service re-assembly to pick up anything that has been
1920		 * held on reassembly queue?
1921		 */
1922		sctp_deliver_reasm_check(stcb, asoc);
1923		need_reasm_check = 0;
1924	}
1925	if (need_reasm_check) {
1926		/* Another one waits ? */
1927		sctp_deliver_reasm_check(stcb, asoc);
1928	}
1929	return (1);
1930}
1931
1932int8_t sctp_map_lookup_tab[256] = {
1933	0, 1, 0, 2, 0, 1, 0, 3,
1934	0, 1, 0, 2, 0, 1, 0, 4,
1935	0, 1, 0, 2, 0, 1, 0, 3,
1936	0, 1, 0, 2, 0, 1, 0, 5,
1937	0, 1, 0, 2, 0, 1, 0, 3,
1938	0, 1, 0, 2, 0, 1, 0, 4,
1939	0, 1, 0, 2, 0, 1, 0, 3,
1940	0, 1, 0, 2, 0, 1, 0, 6,
1941	0, 1, 0, 2, 0, 1, 0, 3,
1942	0, 1, 0, 2, 0, 1, 0, 4,
1943	0, 1, 0, 2, 0, 1, 0, 3,
1944	0, 1, 0, 2, 0, 1, 0, 5,
1945	0, 1, 0, 2, 0, 1, 0, 3,
1946	0, 1, 0, 2, 0, 1, 0, 4,
1947	0, 1, 0, 2, 0, 1, 0, 3,
1948	0, 1, 0, 2, 0, 1, 0, 7,
1949	0, 1, 0, 2, 0, 1, 0, 3,
1950	0, 1, 0, 2, 0, 1, 0, 4,
1951	0, 1, 0, 2, 0, 1, 0, 3,
1952	0, 1, 0, 2, 0, 1, 0, 5,
1953	0, 1, 0, 2, 0, 1, 0, 3,
1954	0, 1, 0, 2, 0, 1, 0, 4,
1955	0, 1, 0, 2, 0, 1, 0, 3,
1956	0, 1, 0, 2, 0, 1, 0, 6,
1957	0, 1, 0, 2, 0, 1, 0, 3,
1958	0, 1, 0, 2, 0, 1, 0, 4,
1959	0, 1, 0, 2, 0, 1, 0, 3,
1960	0, 1, 0, 2, 0, 1, 0, 5,
1961	0, 1, 0, 2, 0, 1, 0, 3,
1962	0, 1, 0, 2, 0, 1, 0, 4,
1963	0, 1, 0, 2, 0, 1, 0, 3,
1964	0, 1, 0, 2, 0, 1, 0, 8
1965};
1966
1967
1968void
1969sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
1970{
1971	/*
1972	 * Now we also need to check the mapping array in a couple of ways.
1973	 * 1) Did we move the cum-ack point?
1974	 *
1975	 * When you first glance at this you might think that all entries that
1976	 * make up the postion of the cum-ack would be in the nr-mapping
1977	 * array only.. i.e. things up to the cum-ack are always
1978	 * deliverable. Thats true with one exception, when its a fragmented
1979	 * message we may not deliver the data until some threshold (or all
1980	 * of it) is in place. So we must OR the nr_mapping_array and
1981	 * mapping_array to get a true picture of the cum-ack.
1982	 */
1983	struct sctp_association *asoc;
1984	int at;
1985	uint8_t val;
1986	int slide_from, slide_end, lgap, distance;
1987	uint32_t old_cumack, old_base, old_highest, highest_tsn;
1988
1989	asoc = &stcb->asoc;
1990
1991	old_cumack = asoc->cumulative_tsn;
1992	old_base = asoc->mapping_array_base_tsn;
1993	old_highest = asoc->highest_tsn_inside_map;
1994	/*
1995	 * We could probably improve this a small bit by calculating the
1996	 * offset of the current cum-ack as the starting point.
1997	 */
1998	at = 0;
1999	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2000		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2001		if (val == 0xff) {
2002			at += 8;
2003		} else {
2004			/* there is a 0 bit */
2005			at += sctp_map_lookup_tab[val];
2006			break;
2007		}
2008	}
2009	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2010
2011	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2012	    SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2013#ifdef INVARIANTS
2014		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2015		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2016#else
2017		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2018		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2019		sctp_print_mapping_array(asoc);
2020		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2021			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2022		}
2023		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2024		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2025#endif
2026	}
2027	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2028		highest_tsn = asoc->highest_tsn_inside_nr_map;
2029	} else {
2030		highest_tsn = asoc->highest_tsn_inside_map;
2031	}
2032	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2033		/* The complete array was completed by a single FR */
2034		/* highest becomes the cum-ack */
2035		int clr;
2036
2037#ifdef INVARIANTS
2038		unsigned int i;
2039
2040#endif
2041
2042		/* clear the array */
2043		clr = ((at + 7) >> 3);
2044		if (clr > asoc->mapping_array_size) {
2045			clr = asoc->mapping_array_size;
2046		}
2047		memset(asoc->mapping_array, 0, clr);
2048		memset(asoc->nr_mapping_array, 0, clr);
2049#ifdef INVARIANTS
2050		for (i = 0; i < asoc->mapping_array_size; i++) {
2051			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2052				SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2053				sctp_print_mapping_array(asoc);
2054			}
2055		}
2056#endif
2057		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2058		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2059	} else if (at >= 8) {
2060		/* we can slide the mapping array down */
2061		/* slide_from holds where we hit the first NON 0xff byte */
2062
2063		/*
2064		 * now calculate the ceiling of the move using our highest
2065		 * TSN value
2066		 */
2067		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2068		slide_end = (lgap >> 3);
2069		if (slide_end < slide_from) {
2070			sctp_print_mapping_array(asoc);
2071#ifdef INVARIANTS
2072			panic("impossible slide");
2073#else
2074			SCTP_PRINTF("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
2075			    lgap, slide_end, slide_from, at);
2076			return;
2077#endif
2078		}
2079		if (slide_end > asoc->mapping_array_size) {
2080#ifdef INVARIANTS
2081			panic("would overrun buffer");
2082#else
2083			SCTP_PRINTF("Gak, would have overrun map end:%d slide_end:%d\n",
2084			    asoc->mapping_array_size, slide_end);
2085			slide_end = asoc->mapping_array_size;
2086#endif
2087		}
2088		distance = (slide_end - slide_from) + 1;
2089		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2090			sctp_log_map(old_base, old_cumack, old_highest,
2091			    SCTP_MAP_PREPARE_SLIDE);
2092			sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2093			    (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2094		}
2095		if (distance + slide_from > asoc->mapping_array_size ||
2096		    distance < 0) {
2097			/*
2098			 * Here we do NOT slide forward the array so that
2099			 * hopefully when more data comes in to fill it up
2100			 * we will be able to slide it forward. Really I
2101			 * don't think this should happen :-0
2102			 */
2103
2104			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2105				sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2106				    (uint32_t) asoc->mapping_array_size,
2107				    SCTP_MAP_SLIDE_NONE);
2108			}
2109		} else {
2110			int ii;
2111
2112			for (ii = 0; ii < distance; ii++) {
2113				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2114				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2115
2116			}
2117			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2118				asoc->mapping_array[ii] = 0;
2119				asoc->nr_mapping_array[ii] = 0;
2120			}
2121			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2122				asoc->highest_tsn_inside_map += (slide_from << 3);
2123			}
2124			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2125				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2126			}
2127			asoc->mapping_array_base_tsn += (slide_from << 3);
2128			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2129				sctp_log_map(asoc->mapping_array_base_tsn,
2130				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2131				    SCTP_MAP_SLIDE_RESULT);
2132			}
2133		}
2134	}
2135}
2136
2137void
2138sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2139{
2140	struct sctp_association *asoc;
2141	uint32_t highest_tsn;
2142
2143	asoc = &stcb->asoc;
2144	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2145		highest_tsn = asoc->highest_tsn_inside_nr_map;
2146	} else {
2147		highest_tsn = asoc->highest_tsn_inside_map;
2148	}
2149
2150	/*
2151	 * Now we need to see if we need to queue a sack or just start the
2152	 * timer (if allowed).
2153	 */
2154	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2155		/*
2156		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2157		 * sure SACK timer is off and instead send a SHUTDOWN and a
2158		 * SACK
2159		 */
2160		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2161			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2162			    stcb->sctp_ep, stcb, NULL,
2163			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_20);
2164		}
2165		sctp_send_shutdown(stcb,
2166		    ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2167		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2168	} else {
2169		int is_a_gap;
2170
2171		/* is there a gap now ? */
2172		is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2173
2174		/*
2175		 * CMT DAC algorithm: increase number of packets received
2176		 * since last ack
2177		 */
2178		stcb->asoc.cmt_dac_pkts_rcvd++;
2179
2180		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2181							 * SACK */
2182		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2183							 * longer is one */
2184		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2185		    (is_a_gap) ||	/* is still a gap */
2186		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2187		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2188		    ) {
2189
2190			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2191			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2192			    (stcb->asoc.send_sack == 0) &&
2193			    (stcb->asoc.numduptsns == 0) &&
2194			    (stcb->asoc.delayed_ack) &&
2195			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2196
2197				/*
2198				 * CMT DAC algorithm: With CMT, delay acks
2199				 * even in the face of
2200				 *
2201				 * reordering. Therefore, if acks that do not
2202				 * have to be sent because of the above
2203				 * reasons, will be delayed. That is, acks
2204				 * that would have been sent due to gap
2205				 * reports will be delayed with DAC. Start
2206				 * the delayed ack timer.
2207				 */
2208				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2209				    stcb->sctp_ep, stcb, NULL);
2210			} else {
2211				/*
2212				 * Ok we must build a SACK since the timer
2213				 * is pending, we got our first packet OR
2214				 * there are gaps or duplicates.
2215				 */
2216				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2217				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2218			}
2219		} else {
2220			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2221				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2222				    stcb->sctp_ep, stcb, NULL);
2223			}
2224		}
2225	}
2226}
2227
2228void
2229sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2230{
2231	struct sctp_tmit_chunk *chk;
2232	uint32_t tsize, pd_point;
2233	uint16_t nxt_todel;
2234
2235	if (asoc->fragmented_delivery_inprogress) {
2236		sctp_service_reassembly(stcb, asoc);
2237	}
2238	/* Can we proceed further, i.e. the PD-API is complete */
2239	if (asoc->fragmented_delivery_inprogress) {
2240		/* no */
2241		return;
2242	}
2243	/*
2244	 * Now is there some other chunk I can deliver from the reassembly
2245	 * queue.
2246	 */
2247doit_again:
2248	chk = TAILQ_FIRST(&asoc->reasmqueue);
2249	if (chk == NULL) {
2250		asoc->size_on_reasm_queue = 0;
2251		asoc->cnt_on_reasm_queue = 0;
2252		return;
2253	}
2254	nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2255	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2256	    ((nxt_todel == chk->rec.data.stream_seq) ||
2257	    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2258		/*
2259		 * Yep the first one is here. We setup to start reception,
2260		 * by backing down the TSN just in case we can't deliver.
2261		 */
2262
2263		/*
2264		 * Before we start though either all of the message should
2265		 * be here or the socket buffer max or nothing on the
2266		 * delivery queue and something can be delivered.
2267		 */
2268		if (stcb->sctp_socket) {
2269			pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
2270			    stcb->sctp_ep->partial_delivery_point);
2271		} else {
2272			pd_point = stcb->sctp_ep->partial_delivery_point;
2273		}
2274		if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
2275			asoc->fragmented_delivery_inprogress = 1;
2276			asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2277			asoc->str_of_pdapi = chk->rec.data.stream_number;
2278			asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2279			asoc->pdapi_ppid = chk->rec.data.payloadtype;
2280			asoc->fragment_flags = chk->rec.data.rcv_flags;
2281			sctp_service_reassembly(stcb, asoc);
2282			if (asoc->fragmented_delivery_inprogress == 0) {
2283				goto doit_again;
2284			}
2285		}
2286	}
2287}
2288
2289int
2290sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2291    struct sockaddr *src, struct sockaddr *dst,
2292    struct sctphdr *sh, struct sctp_inpcb *inp,
2293    struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t * high_tsn,
2294    uint8_t mflowtype, uint32_t mflowid,
2295    uint32_t vrf_id, uint16_t port)
2296{
2297	struct sctp_data_chunk *ch, chunk_buf;
2298	struct sctp_association *asoc;
2299	int num_chunks = 0;	/* number of control chunks processed */
2300	int stop_proc = 0;
2301	int chk_length, break_flag, last_chunk;
2302	int abort_flag = 0, was_a_gap;
2303	struct mbuf *m;
2304	uint32_t highest_tsn;
2305
2306	/* set the rwnd */
2307	sctp_set_rwnd(stcb, &stcb->asoc);
2308
2309	m = *mm;
2310	SCTP_TCB_LOCK_ASSERT(stcb);
2311	asoc = &stcb->asoc;
2312	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2313		highest_tsn = asoc->highest_tsn_inside_nr_map;
2314	} else {
2315		highest_tsn = asoc->highest_tsn_inside_map;
2316	}
2317	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2318	/*
2319	 * setup where we got the last DATA packet from for any SACK that
2320	 * may need to go out. Don't bump the net. This is done ONLY when a
2321	 * chunk is assigned.
2322	 */
2323	asoc->last_data_chunk_from = net;
2324
2325	/*-
2326	 * Now before we proceed we must figure out if this is a wasted
2327	 * cluster... i.e. it is a small packet sent in and yet the driver
2328	 * underneath allocated a full cluster for it. If so we must copy it
2329	 * to a smaller mbuf and free up the cluster mbuf. This will help
2330	 * with cluster starvation. Note for __Panda__ we don't do this
2331	 * since it has clusters all the way down to 64 bytes.
2332	 */
2333	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2334		/* we only handle mbufs that are singletons.. not chains */
2335		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2336		if (m) {
2337			/* ok lets see if we can copy the data up */
2338			caddr_t *from, *to;
2339
2340			/* get the pointers and copy */
2341			to = mtod(m, caddr_t *);
2342			from = mtod((*mm), caddr_t *);
2343			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2344			/* copy the length and free up the old */
2345			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2346			sctp_m_freem(*mm);
2347			/* sucess, back copy */
2348			*mm = m;
2349		} else {
2350			/* We are in trouble in the mbuf world .. yikes */
2351			m = *mm;
2352		}
2353	}
2354	/* get pointer to the first chunk header */
2355	ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2356	    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2357	if (ch == NULL) {
2358		return (1);
2359	}
2360	/*
2361	 * process all DATA chunks...
2362	 */
2363	*high_tsn = asoc->cumulative_tsn;
2364	break_flag = 0;
2365	asoc->data_pkts_seen++;
2366	while (stop_proc == 0) {
2367		/* validate chunk length */
2368		chk_length = ntohs(ch->ch.chunk_length);
2369		if (length - *offset < chk_length) {
2370			/* all done, mutulated chunk */
2371			stop_proc = 1;
2372			continue;
2373		}
2374		if (ch->ch.chunk_type == SCTP_DATA) {
2375			if ((size_t)chk_length < sizeof(struct sctp_data_chunk)) {
2376				/*
2377				 * Need to send an abort since we had a
2378				 * invalid data chunk.
2379				 */
2380				struct mbuf *op_err;
2381				char msg[SCTP_DIAG_INFO_LEN];
2382
2383				snprintf(msg, sizeof(msg), "DATA chunk of length %d",
2384				    chk_length);
2385				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2386				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
2387				sctp_abort_association(inp, stcb, m, iphlen,
2388				    src, dst, sh, op_err,
2389				    mflowtype, mflowid,
2390				    vrf_id, port);
2391				return (2);
2392			}
2393			if ((size_t)chk_length == sizeof(struct sctp_data_chunk)) {
2394				/*
2395				 * Need to send an abort since we had an
2396				 * empty data chunk.
2397				 */
2398				struct mbuf *op_err;
2399
2400				op_err = sctp_generate_no_user_data_cause(ch->dp.tsn);
2401				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_22;
2402				sctp_abort_association(inp, stcb, m, iphlen,
2403				    src, dst, sh, op_err,
2404				    mflowtype, mflowid,
2405				    vrf_id, port);
2406				return (2);
2407			}
2408#ifdef SCTP_AUDITING_ENABLED
2409			sctp_audit_log(0xB1, 0);
2410#endif
2411			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2412				last_chunk = 1;
2413			} else {
2414				last_chunk = 0;
2415			}
2416			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2417			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2418			    last_chunk)) {
2419				num_chunks++;
2420			}
2421			if (abort_flag)
2422				return (2);
2423
2424			if (break_flag) {
2425				/*
2426				 * Set because of out of rwnd space and no
2427				 * drop rep space left.
2428				 */
2429				stop_proc = 1;
2430				continue;
2431			}
2432		} else {
2433			/* not a data chunk in the data region */
2434			switch (ch->ch.chunk_type) {
2435			case SCTP_INITIATION:
2436			case SCTP_INITIATION_ACK:
2437			case SCTP_SELECTIVE_ACK:
2438			case SCTP_NR_SELECTIVE_ACK:
2439			case SCTP_HEARTBEAT_REQUEST:
2440			case SCTP_HEARTBEAT_ACK:
2441			case SCTP_ABORT_ASSOCIATION:
2442			case SCTP_SHUTDOWN:
2443			case SCTP_SHUTDOWN_ACK:
2444			case SCTP_OPERATION_ERROR:
2445			case SCTP_COOKIE_ECHO:
2446			case SCTP_COOKIE_ACK:
2447			case SCTP_ECN_ECHO:
2448			case SCTP_ECN_CWR:
2449			case SCTP_SHUTDOWN_COMPLETE:
2450			case SCTP_AUTHENTICATION:
2451			case SCTP_ASCONF_ACK:
2452			case SCTP_PACKET_DROPPED:
2453			case SCTP_STREAM_RESET:
2454			case SCTP_FORWARD_CUM_TSN:
2455			case SCTP_ASCONF:
2456				/*
2457				 * Now, what do we do with KNOWN chunks that
2458				 * are NOT in the right place?
2459				 *
2460				 * For now, I do nothing but ignore them. We
2461				 * may later want to add sysctl stuff to
2462				 * switch out and do either an ABORT() or
2463				 * possibly process them.
2464				 */
2465				if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2466					struct mbuf *op_err;
2467
2468					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, "");
2469					sctp_abort_association(inp, stcb,
2470					    m, iphlen,
2471					    src, dst,
2472					    sh, op_err,
2473					    mflowtype, mflowid,
2474					    vrf_id, port);
2475					return (2);
2476				}
2477				break;
2478			default:
2479				/* unknown chunk type, use bit rules */
2480				if (ch->ch.chunk_type & 0x40) {
2481					/* Add a error report to the queue */
2482					struct mbuf *merr;
2483					struct sctp_paramhdr *phd;
2484
2485					merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_NOWAIT, 1, MT_DATA);
2486					if (merr) {
2487						phd = mtod(merr, struct sctp_paramhdr *);
2488						/*
2489						 * We cheat and use param
2490						 * type since we did not
2491						 * bother to define a error
2492						 * cause struct. They are
2493						 * the same basic format
2494						 * with different names.
2495						 */
2496						phd->param_type =
2497						    htons(SCTP_CAUSE_UNRECOG_CHUNK);
2498						phd->param_length =
2499						    htons(chk_length + sizeof(*phd));
2500						SCTP_BUF_LEN(merr) = sizeof(*phd);
2501						SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2502						if (SCTP_BUF_NEXT(merr)) {
2503							if (sctp_pad_lastmbuf(SCTP_BUF_NEXT(merr), SCTP_SIZE32(chk_length) - chk_length, NULL) == NULL) {
2504								sctp_m_freem(merr);
2505							} else {
2506								sctp_queue_op_err(stcb, merr);
2507							}
2508						} else {
2509							sctp_m_freem(merr);
2510						}
2511					}
2512				}
2513				if ((ch->ch.chunk_type & 0x80) == 0) {
2514					/* discard the rest of this packet */
2515					stop_proc = 1;
2516				}	/* else skip this bad chunk and
2517					 * continue... */
2518				break;
2519			}	/* switch of chunk type */
2520		}
2521		*offset += SCTP_SIZE32(chk_length);
2522		if ((*offset >= length) || stop_proc) {
2523			/* no more data left in the mbuf chain */
2524			stop_proc = 1;
2525			continue;
2526		}
2527		ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2528		    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2529		if (ch == NULL) {
2530			*offset = length;
2531			stop_proc = 1;
2532			continue;
2533		}
2534	}
2535	if (break_flag) {
2536		/*
2537		 * we need to report rwnd overrun drops.
2538		 */
2539		sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2540	}
2541	if (num_chunks) {
2542		/*
2543		 * Did we get data, if so update the time for auto-close and
2544		 * give peer credit for being alive.
2545		 */
2546		SCTP_STAT_INCR(sctps_recvpktwithdata);
2547		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2548			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2549			    stcb->asoc.overall_error_count,
2550			    0,
2551			    SCTP_FROM_SCTP_INDATA,
2552			    __LINE__);
2553		}
2554		stcb->asoc.overall_error_count = 0;
2555		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2556	}
2557	/* now service all of the reassm queue if needed */
2558	if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2559		sctp_service_queues(stcb, asoc);
2560
2561	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2562		/* Assure that we ack right away */
2563		stcb->asoc.send_sack = 1;
2564	}
2565	/* Start a sack timer or QUEUE a SACK for sending */
2566	sctp_sack_check(stcb, was_a_gap);
2567	return (0);
2568}
2569
2570static int
2571sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2572    uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2573    int *num_frs,
2574    uint32_t * biggest_newly_acked_tsn,
2575    uint32_t * this_sack_lowest_newack,
2576    int *rto_ok)
2577{
2578	struct sctp_tmit_chunk *tp1;
2579	unsigned int theTSN;
2580	int j, wake_him = 0, circled = 0;
2581
2582	/* Recover the tp1 we last saw */
2583	tp1 = *p_tp1;
2584	if (tp1 == NULL) {
2585		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2586	}
2587	for (j = frag_strt; j <= frag_end; j++) {
2588		theTSN = j + last_tsn;
2589		while (tp1) {
2590			if (tp1->rec.data.doing_fast_retransmit)
2591				(*num_frs) += 1;
2592
2593			/*-
2594			 * CMT: CUCv2 algorithm. For each TSN being
2595			 * processed from the sent queue, track the
2596			 * next expected pseudo-cumack, or
2597			 * rtx_pseudo_cumack, if required. Separate
2598			 * cumack trackers for first transmissions,
2599			 * and retransmissions.
2600			 */
2601			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2602			    (tp1->whoTo->find_pseudo_cumack == 1) &&
2603			    (tp1->snd_count == 1)) {
2604				tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2605				tp1->whoTo->find_pseudo_cumack = 0;
2606			}
2607			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2608			    (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2609			    (tp1->snd_count > 1)) {
2610				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2611				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2612			}
2613			if (tp1->rec.data.TSN_seq == theTSN) {
2614				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2615					/*-
2616					 * must be held until
2617					 * cum-ack passes
2618					 */
2619					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2620						/*-
2621						 * If it is less than RESEND, it is
2622						 * now no-longer in flight.
2623						 * Higher values may already be set
2624						 * via previous Gap Ack Blocks...
2625						 * i.e. ACKED or RESEND.
2626						 */
2627						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2628						    *biggest_newly_acked_tsn)) {
2629							*biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2630						}
2631						/*-
2632						 * CMT: SFR algo (and HTNA) - set
2633						 * saw_newack to 1 for dest being
2634						 * newly acked. update
2635						 * this_sack_highest_newack if
2636						 * appropriate.
2637						 */
2638						if (tp1->rec.data.chunk_was_revoked == 0)
2639							tp1->whoTo->saw_newack = 1;
2640
2641						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2642						    tp1->whoTo->this_sack_highest_newack)) {
2643							tp1->whoTo->this_sack_highest_newack =
2644							    tp1->rec.data.TSN_seq;
2645						}
2646						/*-
2647						 * CMT DAC algo: also update
2648						 * this_sack_lowest_newack
2649						 */
2650						if (*this_sack_lowest_newack == 0) {
2651							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2652								sctp_log_sack(*this_sack_lowest_newack,
2653								    last_tsn,
2654								    tp1->rec.data.TSN_seq,
2655								    0,
2656								    0,
2657								    SCTP_LOG_TSN_ACKED);
2658							}
2659							*this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2660						}
2661						/*-
2662						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2663						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2664						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2665						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2666						 * Separate pseudo_cumack trackers for first transmissions and
2667						 * retransmissions.
2668						 */
2669						if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2670							if (tp1->rec.data.chunk_was_revoked == 0) {
2671								tp1->whoTo->new_pseudo_cumack = 1;
2672							}
2673							tp1->whoTo->find_pseudo_cumack = 1;
2674						}
2675						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2676							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2677						}
2678						if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2679							if (tp1->rec.data.chunk_was_revoked == 0) {
2680								tp1->whoTo->new_pseudo_cumack = 1;
2681							}
2682							tp1->whoTo->find_rtx_pseudo_cumack = 1;
2683						}
2684						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2685							sctp_log_sack(*biggest_newly_acked_tsn,
2686							    last_tsn,
2687							    tp1->rec.data.TSN_seq,
2688							    frag_strt,
2689							    frag_end,
2690							    SCTP_LOG_TSN_ACKED);
2691						}
2692						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2693							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2694							    tp1->whoTo->flight_size,
2695							    tp1->book_size,
2696							    (uintptr_t) tp1->whoTo,
2697							    tp1->rec.data.TSN_seq);
2698						}
2699						sctp_flight_size_decrease(tp1);
2700						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2701							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
2702							    tp1);
2703						}
2704						sctp_total_flight_decrease(stcb, tp1);
2705
2706						tp1->whoTo->net_ack += tp1->send_size;
2707						if (tp1->snd_count < 2) {
2708							/*-
2709							 * True non-retransmited chunk
2710							 */
2711							tp1->whoTo->net_ack2 += tp1->send_size;
2712
2713							/*-
2714							 * update RTO too ?
2715							 */
2716							if (tp1->do_rtt) {
2717								if (*rto_ok) {
2718									tp1->whoTo->RTO =
2719									    sctp_calculate_rto(stcb,
2720									    &stcb->asoc,
2721									    tp1->whoTo,
2722									    &tp1->sent_rcv_time,
2723									    sctp_align_safe_nocopy,
2724									    SCTP_RTT_FROM_DATA);
2725									*rto_ok = 0;
2726								}
2727								if (tp1->whoTo->rto_needed == 0) {
2728									tp1->whoTo->rto_needed = 1;
2729								}
2730								tp1->do_rtt = 0;
2731							}
2732						}
2733					}
2734					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2735						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2736						    stcb->asoc.this_sack_highest_gap)) {
2737							stcb->asoc.this_sack_highest_gap =
2738							    tp1->rec.data.TSN_seq;
2739						}
2740						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2741							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2742#ifdef SCTP_AUDITING_ENABLED
2743							sctp_audit_log(0xB2,
2744							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
2745#endif
2746						}
2747					}
2748					/*-
2749					 * All chunks NOT UNSENT fall through here and are marked
2750					 * (leave PR-SCTP ones that are to skip alone though)
2751					 */
2752					if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
2753					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2754						tp1->sent = SCTP_DATAGRAM_MARKED;
2755					}
2756					if (tp1->rec.data.chunk_was_revoked) {
2757						/* deflate the cwnd */
2758						tp1->whoTo->cwnd -= tp1->book_size;
2759						tp1->rec.data.chunk_was_revoked = 0;
2760					}
2761					/* NR Sack code here */
2762					if (nr_sacking &&
2763					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2764						if (stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
2765							stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues--;
2766#ifdef INVARIANTS
2767						} else {
2768							panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
2769#endif
2770						}
2771						tp1->sent = SCTP_DATAGRAM_NR_ACKED;
2772						if (tp1->data) {
2773							/*
2774							 * sa_ignore
2775							 * NO_NULL_CHK
2776							 */
2777							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
2778							sctp_m_freem(tp1->data);
2779							tp1->data = NULL;
2780						}
2781						wake_him++;
2782					}
2783				}
2784				break;
2785			}	/* if (tp1->TSN_seq == theTSN) */
2786			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
2787				break;
2788			}
2789			tp1 = TAILQ_NEXT(tp1, sctp_next);
2790			if ((tp1 == NULL) && (circled == 0)) {
2791				circled++;
2792				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2793			}
2794		}		/* end while (tp1) */
2795		if (tp1 == NULL) {
2796			circled = 0;
2797			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2798		}
2799		/* In case the fragments were not in order we must reset */
2800	}			/* end for (j = fragStart */
2801	*p_tp1 = tp1;
2802	return (wake_him);	/* Return value only used for nr-sack */
2803}
2804
2805
2806static int
2807sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
2808    uint32_t last_tsn, uint32_t * biggest_tsn_acked,
2809    uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
2810    int num_seg, int num_nr_seg, int *rto_ok)
2811{
2812	struct sctp_gap_ack_block *frag, block;
2813	struct sctp_tmit_chunk *tp1;
2814	int i;
2815	int num_frs = 0;
2816	int chunk_freed;
2817	int non_revocable;
2818	uint16_t frag_strt, frag_end, prev_frag_end;
2819
2820	tp1 = TAILQ_FIRST(&asoc->sent_queue);
2821	prev_frag_end = 0;
2822	chunk_freed = 0;
2823
2824	for (i = 0; i < (num_seg + num_nr_seg); i++) {
2825		if (i == num_seg) {
2826			prev_frag_end = 0;
2827			tp1 = TAILQ_FIRST(&asoc->sent_queue);
2828		}
2829		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
2830		    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
2831		*offset += sizeof(block);
2832		if (frag == NULL) {
2833			return (chunk_freed);
2834		}
2835		frag_strt = ntohs(frag->start);
2836		frag_end = ntohs(frag->end);
2837
2838		if (frag_strt > frag_end) {
2839			/* This gap report is malformed, skip it. */
2840			continue;
2841		}
2842		if (frag_strt <= prev_frag_end) {
2843			/* This gap report is not in order, so restart. */
2844			tp1 = TAILQ_FIRST(&asoc->sent_queue);
2845		}
2846		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
2847			*biggest_tsn_acked = last_tsn + frag_end;
2848		}
2849		if (i < num_seg) {
2850			non_revocable = 0;
2851		} else {
2852			non_revocable = 1;
2853		}
2854		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
2855		    non_revocable, &num_frs, biggest_newly_acked_tsn,
2856		    this_sack_lowest_newack, rto_ok)) {
2857			chunk_freed = 1;
2858		}
2859		prev_frag_end = frag_end;
2860	}
2861	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
2862		if (num_frs)
2863			sctp_log_fr(*biggest_tsn_acked,
2864			    *biggest_newly_acked_tsn,
2865			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
2866	}
2867	return (chunk_freed);
2868}
2869
2870static void
2871sctp_check_for_revoked(struct sctp_tcb *stcb,
2872    struct sctp_association *asoc, uint32_t cumack,
2873    uint32_t biggest_tsn_acked)
2874{
2875	struct sctp_tmit_chunk *tp1;
2876
2877	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
2878		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
2879			/*
2880			 * ok this guy is either ACK or MARKED. If it is
2881			 * ACKED it has been previously acked but not this
2882			 * time i.e. revoked.  If it is MARKED it was ACK'ed
2883			 * again.
2884			 */
2885			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
2886				break;
2887			}
2888			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
2889				/* it has been revoked */
2890				tp1->sent = SCTP_DATAGRAM_SENT;
2891				tp1->rec.data.chunk_was_revoked = 1;
2892				/*
2893				 * We must add this stuff back in to assure
2894				 * timers and such get started.
2895				 */
2896				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2897					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
2898					    tp1->whoTo->flight_size,
2899					    tp1->book_size,
2900					    (uintptr_t) tp1->whoTo,
2901					    tp1->rec.data.TSN_seq);
2902				}
2903				sctp_flight_size_increase(tp1);
2904				sctp_total_flight_increase(stcb, tp1);
2905				/*
2906				 * We inflate the cwnd to compensate for our
2907				 * artificial inflation of the flight_size.
2908				 */
2909				tp1->whoTo->cwnd += tp1->book_size;
2910				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2911					sctp_log_sack(asoc->last_acked_seq,
2912					    cumack,
2913					    tp1->rec.data.TSN_seq,
2914					    0,
2915					    0,
2916					    SCTP_LOG_TSN_REVOKED);
2917				}
2918			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
2919				/* it has been re-acked in this SACK */
2920				tp1->sent = SCTP_DATAGRAM_ACKED;
2921			}
2922		}
2923		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
2924			break;
2925	}
2926}
2927
2928
2929static void
2930sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
2931    uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
2932{
2933	struct sctp_tmit_chunk *tp1;
2934	int strike_flag = 0;
2935	struct timeval now;
2936	int tot_retrans = 0;
2937	uint32_t sending_seq;
2938	struct sctp_nets *net;
2939	int num_dests_sacked = 0;
2940
2941	/*
2942	 * select the sending_seq, this is either the next thing ready to be
2943	 * sent but not transmitted, OR, the next seq we assign.
2944	 */
2945	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
2946	if (tp1 == NULL) {
2947		sending_seq = asoc->sending_seq;
2948	} else {
2949		sending_seq = tp1->rec.data.TSN_seq;
2950	}
2951
2952	/* CMT DAC algo: finding out if SACK is a mixed SACK */
2953	if ((asoc->sctp_cmt_on_off > 0) &&
2954	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
2955		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
2956			if (net->saw_newack)
2957				num_dests_sacked++;
2958		}
2959	}
2960	if (stcb->asoc.prsctp_supported) {
2961		(void)SCTP_GETTIME_TIMEVAL(&now);
2962	}
2963	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
2964		strike_flag = 0;
2965		if (tp1->no_fr_allowed) {
2966			/* this one had a timeout or something */
2967			continue;
2968		}
2969		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
2970			if (tp1->sent < SCTP_DATAGRAM_RESEND)
2971				sctp_log_fr(biggest_tsn_newly_acked,
2972				    tp1->rec.data.TSN_seq,
2973				    tp1->sent,
2974				    SCTP_FR_LOG_CHECK_STRIKE);
2975		}
2976		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
2977		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
2978			/* done */
2979			break;
2980		}
2981		if (stcb->asoc.prsctp_supported) {
2982			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
2983				/* Is it expired? */
2984				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
2985					/* Yes so drop it */
2986					if (tp1->data != NULL) {
2987						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
2988						    SCTP_SO_NOT_LOCKED);
2989					}
2990					continue;
2991				}
2992			}
2993		}
2994		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
2995			/* we are beyond the tsn in the sack  */
2996			break;
2997		}
2998		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
2999			/* either a RESEND, ACKED, or MARKED */
3000			/* skip */
3001			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3002				/* Continue strikin FWD-TSN chunks */
3003				tp1->rec.data.fwd_tsn_cnt++;
3004			}
3005			continue;
3006		}
3007		/*
3008		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3009		 */
3010		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3011			/*
3012			 * No new acks were receieved for data sent to this
3013			 * dest. Therefore, according to the SFR algo for
3014			 * CMT, no data sent to this dest can be marked for
3015			 * FR using this SACK.
3016			 */
3017			continue;
3018		} else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3019		    tp1->whoTo->this_sack_highest_newack)) {
3020			/*
3021			 * CMT: New acks were receieved for data sent to
3022			 * this dest. But no new acks were seen for data
3023			 * sent after tp1. Therefore, according to the SFR
3024			 * algo for CMT, tp1 cannot be marked for FR using
3025			 * this SACK. This step covers part of the DAC algo
3026			 * and the HTNA algo as well.
3027			 */
3028			continue;
3029		}
3030		/*
3031		 * Here we check to see if we were have already done a FR
3032		 * and if so we see if the biggest TSN we saw in the sack is
3033		 * smaller than the recovery point. If so we don't strike
3034		 * the tsn... otherwise we CAN strike the TSN.
3035		 */
3036		/*
3037		 * @@@ JRI: Check for CMT if (accum_moved &&
3038		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3039		 * 0)) {
3040		 */
3041		if (accum_moved && asoc->fast_retran_loss_recovery) {
3042			/*
3043			 * Strike the TSN if in fast-recovery and cum-ack
3044			 * moved.
3045			 */
3046			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3047				sctp_log_fr(biggest_tsn_newly_acked,
3048				    tp1->rec.data.TSN_seq,
3049				    tp1->sent,
3050				    SCTP_FR_LOG_STRIKE_CHUNK);
3051			}
3052			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3053				tp1->sent++;
3054			}
3055			if ((asoc->sctp_cmt_on_off > 0) &&
3056			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3057				/*
3058				 * CMT DAC algorithm: If SACK flag is set to
3059				 * 0, then lowest_newack test will not pass
3060				 * because it would have been set to the
3061				 * cumack earlier. If not already to be
3062				 * rtx'd, If not a mixed sack and if tp1 is
3063				 * not between two sacked TSNs, then mark by
3064				 * one more. NOTE that we are marking by one
3065				 * additional time since the SACK DAC flag
3066				 * indicates that two packets have been
3067				 * received after this missing TSN.
3068				 */
3069				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3070				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3071					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3072						sctp_log_fr(16 + num_dests_sacked,
3073						    tp1->rec.data.TSN_seq,
3074						    tp1->sent,
3075						    SCTP_FR_LOG_STRIKE_CHUNK);
3076					}
3077					tp1->sent++;
3078				}
3079			}
3080		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3081		    (asoc->sctp_cmt_on_off == 0)) {
3082			/*
3083			 * For those that have done a FR we must take
3084			 * special consideration if we strike. I.e the
3085			 * biggest_newly_acked must be higher than the
3086			 * sending_seq at the time we did the FR.
3087			 */
3088			if (
3089#ifdef SCTP_FR_TO_ALTERNATE
3090			/*
3091			 * If FR's go to new networks, then we must only do
3092			 * this for singly homed asoc's. However if the FR's
3093			 * go to the same network (Armando's work) then its
3094			 * ok to FR multiple times.
3095			 */
3096			    (asoc->numnets < 2)
3097#else
3098			    (1)
3099#endif
3100			    ) {
3101
3102				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3103				    tp1->rec.data.fast_retran_tsn)) {
3104					/*
3105					 * Strike the TSN, since this ack is
3106					 * beyond where things were when we
3107					 * did a FR.
3108					 */
3109					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3110						sctp_log_fr(biggest_tsn_newly_acked,
3111						    tp1->rec.data.TSN_seq,
3112						    tp1->sent,
3113						    SCTP_FR_LOG_STRIKE_CHUNK);
3114					}
3115					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3116						tp1->sent++;
3117					}
3118					strike_flag = 1;
3119					if ((asoc->sctp_cmt_on_off > 0) &&
3120					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3121						/*
3122						 * CMT DAC algorithm: If
3123						 * SACK flag is set to 0,
3124						 * then lowest_newack test
3125						 * will not pass because it
3126						 * would have been set to
3127						 * the cumack earlier. If
3128						 * not already to be rtx'd,
3129						 * If not a mixed sack and
3130						 * if tp1 is not between two
3131						 * sacked TSNs, then mark by
3132						 * one more. NOTE that we
3133						 * are marking by one
3134						 * additional time since the
3135						 * SACK DAC flag indicates
3136						 * that two packets have
3137						 * been received after this
3138						 * missing TSN.
3139						 */
3140						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3141						    (num_dests_sacked == 1) &&
3142						    SCTP_TSN_GT(this_sack_lowest_newack,
3143						    tp1->rec.data.TSN_seq)) {
3144							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3145								sctp_log_fr(32 + num_dests_sacked,
3146								    tp1->rec.data.TSN_seq,
3147								    tp1->sent,
3148								    SCTP_FR_LOG_STRIKE_CHUNK);
3149							}
3150							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3151								tp1->sent++;
3152							}
3153						}
3154					}
3155				}
3156			}
3157			/*
3158			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3159			 * algo covers HTNA.
3160			 */
3161		} else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3162		    biggest_tsn_newly_acked)) {
3163			/*
3164			 * We don't strike these: This is the  HTNA
3165			 * algorithm i.e. we don't strike If our TSN is
3166			 * larger than the Highest TSN Newly Acked.
3167			 */
3168			;
3169		} else {
3170			/* Strike the TSN */
3171			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3172				sctp_log_fr(biggest_tsn_newly_acked,
3173				    tp1->rec.data.TSN_seq,
3174				    tp1->sent,
3175				    SCTP_FR_LOG_STRIKE_CHUNK);
3176			}
3177			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3178				tp1->sent++;
3179			}
3180			if ((asoc->sctp_cmt_on_off > 0) &&
3181			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3182				/*
3183				 * CMT DAC algorithm: If SACK flag is set to
3184				 * 0, then lowest_newack test will not pass
3185				 * because it would have been set to the
3186				 * cumack earlier. If not already to be
3187				 * rtx'd, If not a mixed sack and if tp1 is
3188				 * not between two sacked TSNs, then mark by
3189				 * one more. NOTE that we are marking by one
3190				 * additional time since the SACK DAC flag
3191				 * indicates that two packets have been
3192				 * received after this missing TSN.
3193				 */
3194				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3195				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3196					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3197						sctp_log_fr(48 + num_dests_sacked,
3198						    tp1->rec.data.TSN_seq,
3199						    tp1->sent,
3200						    SCTP_FR_LOG_STRIKE_CHUNK);
3201					}
3202					tp1->sent++;
3203				}
3204			}
3205		}
3206		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3207			struct sctp_nets *alt;
3208
3209			/* fix counts and things */
3210			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3211				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3212				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3213				    tp1->book_size,
3214				    (uintptr_t) tp1->whoTo,
3215				    tp1->rec.data.TSN_seq);
3216			}
3217			if (tp1->whoTo) {
3218				tp1->whoTo->net_ack++;
3219				sctp_flight_size_decrease(tp1);
3220				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3221					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3222					    tp1);
3223				}
3224			}
3225			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3226				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3227				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3228			}
3229			/* add back to the rwnd */
3230			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3231
3232			/* remove from the total flight */
3233			sctp_total_flight_decrease(stcb, tp1);
3234
3235			if ((stcb->asoc.prsctp_supported) &&
3236			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3237				/*
3238				 * Has it been retransmitted tv_sec times? -
3239				 * we store the retran count there.
3240				 */
3241				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3242					/* Yes, so drop it */
3243					if (tp1->data != NULL) {
3244						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3245						    SCTP_SO_NOT_LOCKED);
3246					}
3247					/* Make sure to flag we had a FR */
3248					tp1->whoTo->net_ack++;
3249					continue;
3250				}
3251			}
3252			/*
3253			 * SCTP_PRINTF("OK, we are now ready to FR this
3254			 * guy\n");
3255			 */
3256			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3257				sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3258				    0, SCTP_FR_MARKED);
3259			}
3260			if (strike_flag) {
3261				/* This is a subsequent FR */
3262				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3263			}
3264			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3265			if (asoc->sctp_cmt_on_off > 0) {
3266				/*
3267				 * CMT: Using RTX_SSTHRESH policy for CMT.
3268				 * If CMT is being used, then pick dest with
3269				 * largest ssthresh for any retransmission.
3270				 */
3271				tp1->no_fr_allowed = 1;
3272				alt = tp1->whoTo;
3273				/* sa_ignore NO_NULL_CHK */
3274				if (asoc->sctp_cmt_pf > 0) {
3275					/*
3276					 * JRS 5/18/07 - If CMT PF is on,
3277					 * use the PF version of
3278					 * find_alt_net()
3279					 */
3280					alt = sctp_find_alternate_net(stcb, alt, 2);
3281				} else {
3282					/*
3283					 * JRS 5/18/07 - If only CMT is on,
3284					 * use the CMT version of
3285					 * find_alt_net()
3286					 */
3287					/* sa_ignore NO_NULL_CHK */
3288					alt = sctp_find_alternate_net(stcb, alt, 1);
3289				}
3290				if (alt == NULL) {
3291					alt = tp1->whoTo;
3292				}
3293				/*
3294				 * CUCv2: If a different dest is picked for
3295				 * the retransmission, then new
3296				 * (rtx-)pseudo_cumack needs to be tracked
3297				 * for orig dest. Let CUCv2 track new (rtx-)
3298				 * pseudo-cumack always.
3299				 */
3300				if (tp1->whoTo) {
3301					tp1->whoTo->find_pseudo_cumack = 1;
3302					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3303				}
3304			} else {/* CMT is OFF */
3305
3306#ifdef SCTP_FR_TO_ALTERNATE
3307				/* Can we find an alternate? */
3308				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3309#else
3310				/*
3311				 * default behavior is to NOT retransmit
3312				 * FR's to an alternate. Armando Caro's
3313				 * paper details why.
3314				 */
3315				alt = tp1->whoTo;
3316#endif
3317			}
3318
3319			tp1->rec.data.doing_fast_retransmit = 1;
3320			tot_retrans++;
3321			/* mark the sending seq for possible subsequent FR's */
3322			/*
3323			 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3324			 * (uint32_t)tpi->rec.data.TSN_seq);
3325			 */
3326			if (TAILQ_EMPTY(&asoc->send_queue)) {
3327				/*
3328				 * If the queue of send is empty then its
3329				 * the next sequence number that will be
3330				 * assigned so we subtract one from this to
3331				 * get the one we last sent.
3332				 */
3333				tp1->rec.data.fast_retran_tsn = sending_seq;
3334			} else {
3335				/*
3336				 * If there are chunks on the send queue
3337				 * (unsent data that has made it from the
3338				 * stream queues but not out the door, we
3339				 * take the first one (which will have the
3340				 * lowest TSN) and subtract one to get the
3341				 * one we last sent.
3342				 */
3343				struct sctp_tmit_chunk *ttt;
3344
3345				ttt = TAILQ_FIRST(&asoc->send_queue);
3346				tp1->rec.data.fast_retran_tsn =
3347				    ttt->rec.data.TSN_seq;
3348			}
3349
3350			if (tp1->do_rtt) {
3351				/*
3352				 * this guy had a RTO calculation pending on
3353				 * it, cancel it
3354				 */
3355				if ((tp1->whoTo != NULL) &&
3356				    (tp1->whoTo->rto_needed == 0)) {
3357					tp1->whoTo->rto_needed = 1;
3358				}
3359				tp1->do_rtt = 0;
3360			}
3361			if (alt != tp1->whoTo) {
3362				/* yes, there is an alternate. */
3363				sctp_free_remote_addr(tp1->whoTo);
3364				/* sa_ignore FREED_MEMORY */
3365				tp1->whoTo = alt;
3366				atomic_add_int(&alt->ref_count, 1);
3367			}
3368		}
3369	}
3370}
3371
3372struct sctp_tmit_chunk *
3373sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3374    struct sctp_association *asoc)
3375{
3376	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3377	struct timeval now;
3378	int now_filled = 0;
3379
3380	if (asoc->prsctp_supported == 0) {
3381		return (NULL);
3382	}
3383	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3384		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3385		    tp1->sent != SCTP_DATAGRAM_RESEND &&
3386		    tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3387			/* no chance to advance, out of here */
3388			break;
3389		}
3390		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3391			if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3392			    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3393				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3394				    asoc->advanced_peer_ack_point,
3395				    tp1->rec.data.TSN_seq, 0, 0);
3396			}
3397		}
3398		if (!PR_SCTP_ENABLED(tp1->flags)) {
3399			/*
3400			 * We can't fwd-tsn past any that are reliable aka
3401			 * retransmitted until the asoc fails.
3402			 */
3403			break;
3404		}
3405		if (!now_filled) {
3406			(void)SCTP_GETTIME_TIMEVAL(&now);
3407			now_filled = 1;
3408		}
3409		/*
3410		 * now we got a chunk which is marked for another
3411		 * retransmission to a PR-stream but has run out its chances
3412		 * already maybe OR has been marked to skip now. Can we skip
3413		 * it if its a resend?
3414		 */
3415		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3416		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3417			/*
3418			 * Now is this one marked for resend and its time is
3419			 * now up?
3420			 */
3421			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3422				/* Yes so drop it */
3423				if (tp1->data) {
3424					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3425					    1, SCTP_SO_NOT_LOCKED);
3426				}
3427			} else {
3428				/*
3429				 * No, we are done when hit one for resend
3430				 * whos time as not expired.
3431				 */
3432				break;
3433			}
3434		}
3435		/*
3436		 * Ok now if this chunk is marked to drop it we can clean up
3437		 * the chunk, advance our peer ack point and we can check
3438		 * the next chunk.
3439		 */
3440		if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3441		    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3442			/* advance PeerAckPoint goes forward */
3443			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
3444				asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3445				a_adv = tp1;
3446			} else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3447				/* No update but we do save the chk */
3448				a_adv = tp1;
3449			}
3450		} else {
3451			/*
3452			 * If it is still in RESEND we can advance no
3453			 * further
3454			 */
3455			break;
3456		}
3457	}
3458	return (a_adv);
3459}
3460
3461static int
3462sctp_fs_audit(struct sctp_association *asoc)
3463{
3464	struct sctp_tmit_chunk *chk;
3465	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3466	int ret;
3467
3468#ifndef INVARIANTS
3469	int entry_flight, entry_cnt;
3470
3471#endif
3472
3473	ret = 0;
3474#ifndef INVARIANTS
3475	entry_flight = asoc->total_flight;
3476	entry_cnt = asoc->total_flight_count;
3477#endif
3478	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3479		return (0);
3480
3481	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3482		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3483			SCTP_PRINTF("Chk TSN:%u size:%d inflight cnt:%d\n",
3484			    chk->rec.data.TSN_seq,
3485			    chk->send_size,
3486			    chk->snd_count);
3487			inflight++;
3488		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3489			resend++;
3490		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3491			inbetween++;
3492		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3493			above++;
3494		} else {
3495			acked++;
3496		}
3497	}
3498
3499	if ((inflight > 0) || (inbetween > 0)) {
3500#ifdef INVARIANTS
3501		panic("Flight size-express incorrect? \n");
3502#else
3503		SCTP_PRINTF("asoc->total_flight:%d cnt:%d\n",
3504		    entry_flight, entry_cnt);
3505
3506		SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
3507		    inflight, inbetween, resend, above, acked);
3508		ret = 1;
3509#endif
3510	}
3511	return (ret);
3512}
3513
3514
3515static void
3516sctp_window_probe_recovery(struct sctp_tcb *stcb,
3517    struct sctp_association *asoc,
3518    struct sctp_tmit_chunk *tp1)
3519{
3520	tp1->window_probe = 0;
3521	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3522		/* TSN's skipped we do NOT move back. */
3523		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3524		    tp1->whoTo ? tp1->whoTo->flight_size : 0,
3525		    tp1->book_size,
3526		    (uintptr_t) tp1->whoTo,
3527		    tp1->rec.data.TSN_seq);
3528		return;
3529	}
3530	/* First setup this by shrinking flight */
3531	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3532		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3533		    tp1);
3534	}
3535	sctp_flight_size_decrease(tp1);
3536	sctp_total_flight_decrease(stcb, tp1);
3537	/* Now mark for resend */
3538	tp1->sent = SCTP_DATAGRAM_RESEND;
3539	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3540
3541	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3542		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3543		    tp1->whoTo->flight_size,
3544		    tp1->book_size,
3545		    (uintptr_t) tp1->whoTo,
3546		    tp1->rec.data.TSN_seq);
3547	}
3548}
3549
3550void
3551sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3552    uint32_t rwnd, int *abort_now, int ecne_seen)
3553{
3554	struct sctp_nets *net;
3555	struct sctp_association *asoc;
3556	struct sctp_tmit_chunk *tp1, *tp2;
3557	uint32_t old_rwnd;
3558	int win_probe_recovery = 0;
3559	int win_probe_recovered = 0;
3560	int j, done_once = 0;
3561	int rto_ok = 1;
3562
3563	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3564		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3565		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3566	}
3567	SCTP_TCB_LOCK_ASSERT(stcb);
3568#ifdef SCTP_ASOCLOG_OF_TSNS
3569	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3570	stcb->asoc.cumack_log_at++;
3571	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3572		stcb->asoc.cumack_log_at = 0;
3573	}
3574#endif
3575	asoc = &stcb->asoc;
3576	old_rwnd = asoc->peers_rwnd;
3577	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3578		/* old ack */
3579		return;
3580	} else if (asoc->last_acked_seq == cumack) {
3581		/* Window update sack */
3582		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3583		    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3584		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3585			/* SWS sender side engages */
3586			asoc->peers_rwnd = 0;
3587		}
3588		if (asoc->peers_rwnd > old_rwnd) {
3589			goto again;
3590		}
3591		return;
3592	}
3593	/* First setup for CC stuff */
3594	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3595		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3596			/* Drag along the window_tsn for cwr's */
3597			net->cwr_window_tsn = cumack;
3598		}
3599		net->prev_cwnd = net->cwnd;
3600		net->net_ack = 0;
3601		net->net_ack2 = 0;
3602
3603		/*
3604		 * CMT: Reset CUC and Fast recovery algo variables before
3605		 * SACK processing
3606		 */
3607		net->new_pseudo_cumack = 0;
3608		net->will_exit_fast_recovery = 0;
3609		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3610			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3611		}
3612	}
3613	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3614		uint32_t send_s;
3615
3616		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3617			tp1 = TAILQ_LAST(&asoc->sent_queue,
3618			    sctpchunk_listhead);
3619			send_s = tp1->rec.data.TSN_seq + 1;
3620		} else {
3621			send_s = asoc->sending_seq;
3622		}
3623		if (SCTP_TSN_GE(cumack, send_s)) {
3624			struct mbuf *op_err;
3625			char msg[SCTP_DIAG_INFO_LEN];
3626
3627			*abort_now = 1;
3628			/* XXX */
3629			snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
3630			    cumack, send_s);
3631			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
3632			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_23;
3633			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3634			return;
3635		}
3636	}
3637	asoc->this_sack_highest_gap = cumack;
3638	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3639		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3640		    stcb->asoc.overall_error_count,
3641		    0,
3642		    SCTP_FROM_SCTP_INDATA,
3643		    __LINE__);
3644	}
3645	stcb->asoc.overall_error_count = 0;
3646	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3647		/* process the new consecutive TSN first */
3648		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3649			if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) {
3650				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3651					SCTP_PRINTF("Warning, an unsent is now acked?\n");
3652				}
3653				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3654					/*
3655					 * If it is less than ACKED, it is
3656					 * now no-longer in flight. Higher
3657					 * values may occur during marking
3658					 */
3659					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3660						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3661							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3662							    tp1->whoTo->flight_size,
3663							    tp1->book_size,
3664							    (uintptr_t) tp1->whoTo,
3665							    tp1->rec.data.TSN_seq);
3666						}
3667						sctp_flight_size_decrease(tp1);
3668						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3669							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3670							    tp1);
3671						}
3672						/* sa_ignore NO_NULL_CHK */
3673						sctp_total_flight_decrease(stcb, tp1);
3674					}
3675					tp1->whoTo->net_ack += tp1->send_size;
3676					if (tp1->snd_count < 2) {
3677						/*
3678						 * True non-retransmited
3679						 * chunk
3680						 */
3681						tp1->whoTo->net_ack2 +=
3682						    tp1->send_size;
3683
3684						/* update RTO too? */
3685						if (tp1->do_rtt) {
3686							if (rto_ok) {
3687								tp1->whoTo->RTO =
3688								/*
3689								 * sa_ignore
3690								 * NO_NULL_CH
3691								 * K
3692								 */
3693								    sctp_calculate_rto(stcb,
3694								    asoc, tp1->whoTo,
3695								    &tp1->sent_rcv_time,
3696								    sctp_align_safe_nocopy,
3697								    SCTP_RTT_FROM_DATA);
3698								rto_ok = 0;
3699							}
3700							if (tp1->whoTo->rto_needed == 0) {
3701								tp1->whoTo->rto_needed = 1;
3702							}
3703							tp1->do_rtt = 0;
3704						}
3705					}
3706					/*
3707					 * CMT: CUCv2 algorithm. From the
3708					 * cumack'd TSNs, for each TSN being
3709					 * acked for the first time, set the
3710					 * following variables for the
3711					 * corresp destination.
3712					 * new_pseudo_cumack will trigger a
3713					 * cwnd update.
3714					 * find_(rtx_)pseudo_cumack will
3715					 * trigger search for the next
3716					 * expected (rtx-)pseudo-cumack.
3717					 */
3718					tp1->whoTo->new_pseudo_cumack = 1;
3719					tp1->whoTo->find_pseudo_cumack = 1;
3720					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3721
3722					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3723						/* sa_ignore NO_NULL_CHK */
3724						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3725					}
3726				}
3727				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3728					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3729				}
3730				if (tp1->rec.data.chunk_was_revoked) {
3731					/* deflate the cwnd */
3732					tp1->whoTo->cwnd -= tp1->book_size;
3733					tp1->rec.data.chunk_was_revoked = 0;
3734				}
3735				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3736					if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
3737						asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
3738#ifdef INVARIANTS
3739					} else {
3740						panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
3741#endif
3742					}
3743				}
3744				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3745				if (tp1->data) {
3746					/* sa_ignore NO_NULL_CHK */
3747					sctp_free_bufspace(stcb, asoc, tp1, 1);
3748					sctp_m_freem(tp1->data);
3749					tp1->data = NULL;
3750				}
3751				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3752					sctp_log_sack(asoc->last_acked_seq,
3753					    cumack,
3754					    tp1->rec.data.TSN_seq,
3755					    0,
3756					    0,
3757					    SCTP_LOG_FREE_SENT);
3758				}
3759				asoc->sent_queue_cnt--;
3760				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
3761			} else {
3762				break;
3763			}
3764		}
3765
3766	}
3767	/* sa_ignore NO_NULL_CHK */
3768	if (stcb->sctp_socket) {
3769#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3770		struct socket *so;
3771
3772#endif
3773		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
3774		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3775			/* sa_ignore NO_NULL_CHK */
3776			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
3777		}
3778#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3779		so = SCTP_INP_SO(stcb->sctp_ep);
3780		atomic_add_int(&stcb->asoc.refcnt, 1);
3781		SCTP_TCB_UNLOCK(stcb);
3782		SCTP_SOCKET_LOCK(so, 1);
3783		SCTP_TCB_LOCK(stcb);
3784		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3785		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3786			/* assoc was freed while we were unlocked */
3787			SCTP_SOCKET_UNLOCK(so, 1);
3788			return;
3789		}
3790#endif
3791		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
3792#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3793		SCTP_SOCKET_UNLOCK(so, 1);
3794#endif
3795	} else {
3796		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3797			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
3798		}
3799	}
3800
3801	/* JRS - Use the congestion control given in the CC module */
3802	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
3803		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3804			if (net->net_ack2 > 0) {
3805				/*
3806				 * Karn's rule applies to clearing error
3807				 * count, this is optional.
3808				 */
3809				net->error_count = 0;
3810				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
3811					/* addr came good */
3812					net->dest_state |= SCTP_ADDR_REACHABLE;
3813					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
3814					    0, (void *)net, SCTP_SO_NOT_LOCKED);
3815				}
3816				if (net == stcb->asoc.primary_destination) {
3817					if (stcb->asoc.alternate) {
3818						/*
3819						 * release the alternate,
3820						 * primary is good
3821						 */
3822						sctp_free_remote_addr(stcb->asoc.alternate);
3823						stcb->asoc.alternate = NULL;
3824					}
3825				}
3826				if (net->dest_state & SCTP_ADDR_PF) {
3827					net->dest_state &= ~SCTP_ADDR_PF;
3828					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
3829					    stcb->sctp_ep, stcb, net,
3830					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
3831					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
3832					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
3833					/* Done with this net */
3834					net->net_ack = 0;
3835				}
3836				/* restore any doubled timers */
3837				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
3838				if (net->RTO < stcb->asoc.minrto) {
3839					net->RTO = stcb->asoc.minrto;
3840				}
3841				if (net->RTO > stcb->asoc.maxrto) {
3842					net->RTO = stcb->asoc.maxrto;
3843				}
3844			}
3845		}
3846		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
3847	}
3848	asoc->last_acked_seq = cumack;
3849
3850	if (TAILQ_EMPTY(&asoc->sent_queue)) {
3851		/* nothing left in-flight */
3852		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3853			net->flight_size = 0;
3854			net->partial_bytes_acked = 0;
3855		}
3856		asoc->total_flight = 0;
3857		asoc->total_flight_count = 0;
3858	}
3859	/* RWND update */
3860	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3861	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3862	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3863		/* SWS sender side engages */
3864		asoc->peers_rwnd = 0;
3865	}
3866	if (asoc->peers_rwnd > old_rwnd) {
3867		win_probe_recovery = 1;
3868	}
3869	/* Now assure a timer where data is queued at */
3870again:
3871	j = 0;
3872	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3873		int to_ticks;
3874
3875		if (win_probe_recovery && (net->window_probe)) {
3876			win_probe_recovered = 1;
3877			/*
3878			 * Find first chunk that was used with window probe
3879			 * and clear the sent
3880			 */
3881			/* sa_ignore FREED_MEMORY */
3882			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3883				if (tp1->window_probe) {
3884					/* move back to data send queue */
3885					sctp_window_probe_recovery(stcb, asoc, tp1);
3886					break;
3887				}
3888			}
3889		}
3890		if (net->RTO == 0) {
3891			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
3892		} else {
3893			to_ticks = MSEC_TO_TICKS(net->RTO);
3894		}
3895		if (net->flight_size) {
3896			j++;
3897			(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
3898			    sctp_timeout_handler, &net->rxt_timer);
3899			if (net->window_probe) {
3900				net->window_probe = 0;
3901			}
3902		} else {
3903			if (net->window_probe) {
3904				/*
3905				 * In window probes we must assure a timer
3906				 * is still running there
3907				 */
3908				net->window_probe = 0;
3909				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
3910					SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
3911					    sctp_timeout_handler, &net->rxt_timer);
3912				}
3913			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
3914				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3915				    stcb, net,
3916				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
3917			}
3918		}
3919	}
3920	if ((j == 0) &&
3921	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
3922	    (asoc->sent_queue_retran_cnt == 0) &&
3923	    (win_probe_recovered == 0) &&
3924	    (done_once == 0)) {
3925		/*
3926		 * huh, this should not happen unless all packets are
3927		 * PR-SCTP and marked to skip of course.
3928		 */
3929		if (sctp_fs_audit(asoc)) {
3930			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3931				net->flight_size = 0;
3932			}
3933			asoc->total_flight = 0;
3934			asoc->total_flight_count = 0;
3935			asoc->sent_queue_retran_cnt = 0;
3936			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3937				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3938					sctp_flight_size_increase(tp1);
3939					sctp_total_flight_increase(stcb, tp1);
3940				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3941					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3942				}
3943			}
3944		}
3945		done_once = 1;
3946		goto again;
3947	}
3948	/**********************************/
3949	/* Now what about shutdown issues */
3950	/**********************************/
3951	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
3952		/* nothing left on sendqueue.. consider done */
3953		/* clean up */
3954		if ((asoc->stream_queue_cnt == 1) &&
3955		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
3956		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
3957		    (asoc->locked_on_sending)
3958		    ) {
3959			struct sctp_stream_queue_pending *sp;
3960
3961			/*
3962			 * I may be in a state where we got all across.. but
3963			 * cannot write more due to a shutdown... we abort
3964			 * since the user did not indicate EOR in this case.
3965			 * The sp will be cleaned during free of the asoc.
3966			 */
3967			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
3968			    sctp_streamhead);
3969			if ((sp) && (sp->length == 0)) {
3970				/* Let cleanup code purge it */
3971				if (sp->msg_is_complete) {
3972					asoc->stream_queue_cnt--;
3973				} else {
3974					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
3975					asoc->locked_on_sending = NULL;
3976					asoc->stream_queue_cnt--;
3977				}
3978			}
3979		}
3980		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
3981		    (asoc->stream_queue_cnt == 0)) {
3982			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
3983				/* Need to abort here */
3984				struct mbuf *op_err;
3985
3986		abort_out_now:
3987				*abort_now = 1;
3988				/* XXX */
3989				op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
3990				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_26;
3991				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3992			} else {
3993				struct sctp_nets *netp;
3994
3995				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
3996				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3997					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3998				}
3999				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4000				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4001				sctp_stop_timers_for_shutdown(stcb);
4002				if (asoc->alternate) {
4003					netp = asoc->alternate;
4004				} else {
4005					netp = asoc->primary_destination;
4006				}
4007				sctp_send_shutdown(stcb, netp);
4008				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4009				    stcb->sctp_ep, stcb, netp);
4010				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4011				    stcb->sctp_ep, stcb, netp);
4012			}
4013		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4014		    (asoc->stream_queue_cnt == 0)) {
4015			struct sctp_nets *netp;
4016
4017			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4018				goto abort_out_now;
4019			}
4020			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4021			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4022			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4023			sctp_stop_timers_for_shutdown(stcb);
4024			if (asoc->alternate) {
4025				netp = asoc->alternate;
4026			} else {
4027				netp = asoc->primary_destination;
4028			}
4029			sctp_send_shutdown_ack(stcb, netp);
4030			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4031			    stcb->sctp_ep, stcb, netp);
4032		}
4033	}
4034	/*********************************************/
4035	/* Here we perform PR-SCTP procedures        */
4036	/* (section 4.2)                             */
4037	/*********************************************/
4038	/* C1. update advancedPeerAckPoint */
4039	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4040		asoc->advanced_peer_ack_point = cumack;
4041	}
4042	/* PR-Sctp issues need to be addressed too */
4043	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4044		struct sctp_tmit_chunk *lchk;
4045		uint32_t old_adv_peer_ack_point;
4046
4047		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4048		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4049		/* C3. See if we need to send a Fwd-TSN */
4050		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4051			/*
4052			 * ISSUE with ECN, see FWD-TSN processing.
4053			 */
4054			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4055				send_forward_tsn(stcb, asoc);
4056			} else if (lchk) {
4057				/* try to FR fwd-tsn's that get lost too */
4058				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4059					send_forward_tsn(stcb, asoc);
4060				}
4061			}
4062		}
4063		if (lchk) {
4064			/* Assure a timer is up */
4065			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4066			    stcb->sctp_ep, stcb, lchk->whoTo);
4067		}
4068	}
4069	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4070		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4071		    rwnd,
4072		    stcb->asoc.peers_rwnd,
4073		    stcb->asoc.total_flight,
4074		    stcb->asoc.total_output_queue_size);
4075	}
4076}
4077
4078void
4079sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4080    struct sctp_tcb *stcb,
4081    uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4082    int *abort_now, uint8_t flags,
4083    uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4084{
4085	struct sctp_association *asoc;
4086	struct sctp_tmit_chunk *tp1, *tp2;
4087	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4088	uint16_t wake_him = 0;
4089	uint32_t send_s = 0;
4090	long j;
4091	int accum_moved = 0;
4092	int will_exit_fast_recovery = 0;
4093	uint32_t a_rwnd, old_rwnd;
4094	int win_probe_recovery = 0;
4095	int win_probe_recovered = 0;
4096	struct sctp_nets *net = NULL;
4097	int done_once;
4098	int rto_ok = 1;
4099	uint8_t reneged_all = 0;
4100	uint8_t cmt_dac_flag;
4101
4102	/*
4103	 * we take any chance we can to service our queues since we cannot
4104	 * get awoken when the socket is read from :<
4105	 */
4106	/*
4107	 * Now perform the actual SACK handling: 1) Verify that it is not an
4108	 * old sack, if so discard. 2) If there is nothing left in the send
4109	 * queue (cum-ack is equal to last acked) then you have a duplicate
4110	 * too, update any rwnd change and verify no timers are running.
4111	 * then return. 3) Process any new consequtive data i.e. cum-ack
4112	 * moved process these first and note that it moved. 4) Process any
4113	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4114	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4115	 * sync up flightsizes and things, stop all timers and also check
4116	 * for shutdown_pending state. If so then go ahead and send off the
4117	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4118	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4119	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4120	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4121	 * if in shutdown_recv state.
4122	 */
4123	SCTP_TCB_LOCK_ASSERT(stcb);
4124	/* CMT DAC algo */
4125	this_sack_lowest_newack = 0;
4126	SCTP_STAT_INCR(sctps_slowpath_sack);
4127	last_tsn = cum_ack;
4128	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4129#ifdef SCTP_ASOCLOG_OF_TSNS
4130	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4131	stcb->asoc.cumack_log_at++;
4132	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4133		stcb->asoc.cumack_log_at = 0;
4134	}
4135#endif
4136	a_rwnd = rwnd;
4137
4138	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4139		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4140		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4141	}
4142	old_rwnd = stcb->asoc.peers_rwnd;
4143	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4144		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4145		    stcb->asoc.overall_error_count,
4146		    0,
4147		    SCTP_FROM_SCTP_INDATA,
4148		    __LINE__);
4149	}
4150	stcb->asoc.overall_error_count = 0;
4151	asoc = &stcb->asoc;
4152	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4153		sctp_log_sack(asoc->last_acked_seq,
4154		    cum_ack,
4155		    0,
4156		    num_seg,
4157		    num_dup,
4158		    SCTP_LOG_NEW_SACK);
4159	}
4160	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4161		uint16_t i;
4162		uint32_t *dupdata, dblock;
4163
4164		for (i = 0; i < num_dup; i++) {
4165			dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4166			    sizeof(uint32_t), (uint8_t *) & dblock);
4167			if (dupdata == NULL) {
4168				break;
4169			}
4170			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4171		}
4172	}
4173	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4174		/* reality check */
4175		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4176			tp1 = TAILQ_LAST(&asoc->sent_queue,
4177			    sctpchunk_listhead);
4178			send_s = tp1->rec.data.TSN_seq + 1;
4179		} else {
4180			tp1 = NULL;
4181			send_s = asoc->sending_seq;
4182		}
4183		if (SCTP_TSN_GE(cum_ack, send_s)) {
4184			struct mbuf *op_err;
4185			char msg[SCTP_DIAG_INFO_LEN];
4186
4187			/*
4188			 * no way, we have not even sent this TSN out yet.
4189			 * Peer is hopelessly messed up with us.
4190			 */
4191			SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4192			    cum_ack, send_s);
4193			if (tp1) {
4194				SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1:%p\n",
4195				    tp1->rec.data.TSN_seq, (void *)tp1);
4196			}
4197	hopeless_peer:
4198			*abort_now = 1;
4199			/* XXX */
4200			snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4201			    cum_ack, send_s);
4202			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4203			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_27;
4204			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4205			return;
4206		}
4207	}
4208	/**********************/
4209	/* 1) check the range */
4210	/**********************/
4211	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4212		/* acking something behind */
4213		return;
4214	}
4215	/* update the Rwnd of the peer */
4216	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4217	    TAILQ_EMPTY(&asoc->send_queue) &&
4218	    (asoc->stream_queue_cnt == 0)) {
4219		/* nothing left on send/sent and strmq */
4220		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4221			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4222			    asoc->peers_rwnd, 0, 0, a_rwnd);
4223		}
4224		asoc->peers_rwnd = a_rwnd;
4225		if (asoc->sent_queue_retran_cnt) {
4226			asoc->sent_queue_retran_cnt = 0;
4227		}
4228		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4229			/* SWS sender side engages */
4230			asoc->peers_rwnd = 0;
4231		}
4232		/* stop any timers */
4233		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4234			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4235			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4236			net->partial_bytes_acked = 0;
4237			net->flight_size = 0;
4238		}
4239		asoc->total_flight = 0;
4240		asoc->total_flight_count = 0;
4241		return;
4242	}
4243	/*
4244	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4245	 * things. The total byte count acked is tracked in netAckSz AND
4246	 * netAck2 is used to track the total bytes acked that are un-
4247	 * amibguious and were never retransmitted. We track these on a per
4248	 * destination address basis.
4249	 */
4250	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4251		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4252			/* Drag along the window_tsn for cwr's */
4253			net->cwr_window_tsn = cum_ack;
4254		}
4255		net->prev_cwnd = net->cwnd;
4256		net->net_ack = 0;
4257		net->net_ack2 = 0;
4258
4259		/*
4260		 * CMT: Reset CUC and Fast recovery algo variables before
4261		 * SACK processing
4262		 */
4263		net->new_pseudo_cumack = 0;
4264		net->will_exit_fast_recovery = 0;
4265		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4266			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4267		}
4268	}
4269	/* process the new consecutive TSN first */
4270	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4271		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) {
4272			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4273				accum_moved = 1;
4274				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4275					/*
4276					 * If it is less than ACKED, it is
4277					 * now no-longer in flight. Higher
4278					 * values may occur during marking
4279					 */
4280					if ((tp1->whoTo->dest_state &
4281					    SCTP_ADDR_UNCONFIRMED) &&
4282					    (tp1->snd_count < 2)) {
4283						/*
4284						 * If there was no retran
4285						 * and the address is
4286						 * un-confirmed and we sent
4287						 * there and are now
4288						 * sacked.. its confirmed,
4289						 * mark it so.
4290						 */
4291						tp1->whoTo->dest_state &=
4292						    ~SCTP_ADDR_UNCONFIRMED;
4293					}
4294					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4295						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4296							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4297							    tp1->whoTo->flight_size,
4298							    tp1->book_size,
4299							    (uintptr_t) tp1->whoTo,
4300							    tp1->rec.data.TSN_seq);
4301						}
4302						sctp_flight_size_decrease(tp1);
4303						sctp_total_flight_decrease(stcb, tp1);
4304						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4305							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4306							    tp1);
4307						}
4308					}
4309					tp1->whoTo->net_ack += tp1->send_size;
4310
4311					/* CMT SFR and DAC algos */
4312					this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4313					tp1->whoTo->saw_newack = 1;
4314
4315					if (tp1->snd_count < 2) {
4316						/*
4317						 * True non-retransmited
4318						 * chunk
4319						 */
4320						tp1->whoTo->net_ack2 +=
4321						    tp1->send_size;
4322
4323						/* update RTO too? */
4324						if (tp1->do_rtt) {
4325							if (rto_ok) {
4326								tp1->whoTo->RTO =
4327								    sctp_calculate_rto(stcb,
4328								    asoc, tp1->whoTo,
4329								    &tp1->sent_rcv_time,
4330								    sctp_align_safe_nocopy,
4331								    SCTP_RTT_FROM_DATA);
4332								rto_ok = 0;
4333							}
4334							if (tp1->whoTo->rto_needed == 0) {
4335								tp1->whoTo->rto_needed = 1;
4336							}
4337							tp1->do_rtt = 0;
4338						}
4339					}
4340					/*
4341					 * CMT: CUCv2 algorithm. From the
4342					 * cumack'd TSNs, for each TSN being
4343					 * acked for the first time, set the
4344					 * following variables for the
4345					 * corresp destination.
4346					 * new_pseudo_cumack will trigger a
4347					 * cwnd update.
4348					 * find_(rtx_)pseudo_cumack will
4349					 * trigger search for the next
4350					 * expected (rtx-)pseudo-cumack.
4351					 */
4352					tp1->whoTo->new_pseudo_cumack = 1;
4353					tp1->whoTo->find_pseudo_cumack = 1;
4354					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4355
4356
4357					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4358						sctp_log_sack(asoc->last_acked_seq,
4359						    cum_ack,
4360						    tp1->rec.data.TSN_seq,
4361						    0,
4362						    0,
4363						    SCTP_LOG_TSN_ACKED);
4364					}
4365					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4366						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4367					}
4368				}
4369				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4370					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4371#ifdef SCTP_AUDITING_ENABLED
4372					sctp_audit_log(0xB3,
4373					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4374#endif
4375				}
4376				if (tp1->rec.data.chunk_was_revoked) {
4377					/* deflate the cwnd */
4378					tp1->whoTo->cwnd -= tp1->book_size;
4379					tp1->rec.data.chunk_was_revoked = 0;
4380				}
4381				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4382					tp1->sent = SCTP_DATAGRAM_ACKED;
4383				}
4384			}
4385		} else {
4386			break;
4387		}
4388	}
4389	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4390	/* always set this up to cum-ack */
4391	asoc->this_sack_highest_gap = last_tsn;
4392
4393	if ((num_seg > 0) || (num_nr_seg > 0)) {
4394
4395		/*
4396		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4397		 * to be greater than the cumack. Also reset saw_newack to 0
4398		 * for all dests.
4399		 */
4400		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4401			net->saw_newack = 0;
4402			net->this_sack_highest_newack = last_tsn;
4403		}
4404
4405		/*
4406		 * thisSackHighestGap will increase while handling NEW
4407		 * segments this_sack_highest_newack will increase while
4408		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4409		 * used for CMT DAC algo. saw_newack will also change.
4410		 */
4411		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4412		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4413		    num_seg, num_nr_seg, &rto_ok)) {
4414			wake_him++;
4415		}
4416		if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4417			/*
4418			 * validate the biggest_tsn_acked in the gap acks if
4419			 * strict adherence is wanted.
4420			 */
4421			if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4422				/*
4423				 * peer is either confused or we are under
4424				 * attack. We must abort.
4425				 */
4426				SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4427				    biggest_tsn_acked, send_s);
4428				goto hopeless_peer;
4429			}
4430		}
4431	}
4432	/*******************************************/
4433	/* cancel ALL T3-send timer if accum moved */
4434	/*******************************************/
4435	if (asoc->sctp_cmt_on_off > 0) {
4436		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4437			if (net->new_pseudo_cumack)
4438				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4439				    stcb, net,
4440				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4441
4442		}
4443	} else {
4444		if (accum_moved) {
4445			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4446				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4447				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4448			}
4449		}
4450	}
4451	/********************************************/
4452	/* drop the acked chunks from the sentqueue */
4453	/********************************************/
4454	asoc->last_acked_seq = cum_ack;
4455
4456	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4457		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
4458			break;
4459		}
4460		if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4461			if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
4462				asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
4463#ifdef INVARIANTS
4464			} else {
4465				panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
4466#endif
4467			}
4468		}
4469		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4470		if (PR_SCTP_ENABLED(tp1->flags)) {
4471			if (asoc->pr_sctp_cnt != 0)
4472				asoc->pr_sctp_cnt--;
4473		}
4474		asoc->sent_queue_cnt--;
4475		if (tp1->data) {
4476			/* sa_ignore NO_NULL_CHK */
4477			sctp_free_bufspace(stcb, asoc, tp1, 1);
4478			sctp_m_freem(tp1->data);
4479			tp1->data = NULL;
4480			if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4481				asoc->sent_queue_cnt_removeable--;
4482			}
4483		}
4484		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4485			sctp_log_sack(asoc->last_acked_seq,
4486			    cum_ack,
4487			    tp1->rec.data.TSN_seq,
4488			    0,
4489			    0,
4490			    SCTP_LOG_FREE_SENT);
4491		}
4492		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4493		wake_him++;
4494	}
4495	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4496#ifdef INVARIANTS
4497		panic("Warning flight size is postive and should be 0");
4498#else
4499		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4500		    asoc->total_flight);
4501#endif
4502		asoc->total_flight = 0;
4503	}
4504	/* sa_ignore NO_NULL_CHK */
4505	if ((wake_him) && (stcb->sctp_socket)) {
4506#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4507		struct socket *so;
4508
4509#endif
4510		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4511		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4512			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4513		}
4514#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4515		so = SCTP_INP_SO(stcb->sctp_ep);
4516		atomic_add_int(&stcb->asoc.refcnt, 1);
4517		SCTP_TCB_UNLOCK(stcb);
4518		SCTP_SOCKET_LOCK(so, 1);
4519		SCTP_TCB_LOCK(stcb);
4520		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4521		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4522			/* assoc was freed while we were unlocked */
4523			SCTP_SOCKET_UNLOCK(so, 1);
4524			return;
4525		}
4526#endif
4527		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4528#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4529		SCTP_SOCKET_UNLOCK(so, 1);
4530#endif
4531	} else {
4532		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4533			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4534		}
4535	}
4536
4537	if (asoc->fast_retran_loss_recovery && accum_moved) {
4538		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4539			/* Setup so we will exit RFC2582 fast recovery */
4540			will_exit_fast_recovery = 1;
4541		}
4542	}
4543	/*
4544	 * Check for revoked fragments:
4545	 *
4546	 * if Previous sack - Had no frags then we can't have any revoked if
4547	 * Previous sack - Had frag's then - If we now have frags aka
4548	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4549	 * some of them. else - The peer revoked all ACKED fragments, since
4550	 * we had some before and now we have NONE.
4551	 */
4552
4553	if (num_seg) {
4554		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4555		asoc->saw_sack_with_frags = 1;
4556	} else if (asoc->saw_sack_with_frags) {
4557		int cnt_revoked = 0;
4558
4559		/* Peer revoked all dg's marked or acked */
4560		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4561			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4562				tp1->sent = SCTP_DATAGRAM_SENT;
4563				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4564					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4565					    tp1->whoTo->flight_size,
4566					    tp1->book_size,
4567					    (uintptr_t) tp1->whoTo,
4568					    tp1->rec.data.TSN_seq);
4569				}
4570				sctp_flight_size_increase(tp1);
4571				sctp_total_flight_increase(stcb, tp1);
4572				tp1->rec.data.chunk_was_revoked = 1;
4573				/*
4574				 * To ensure that this increase in
4575				 * flightsize, which is artificial, does not
4576				 * throttle the sender, we also increase the
4577				 * cwnd artificially.
4578				 */
4579				tp1->whoTo->cwnd += tp1->book_size;
4580				cnt_revoked++;
4581			}
4582		}
4583		if (cnt_revoked) {
4584			reneged_all = 1;
4585		}
4586		asoc->saw_sack_with_frags = 0;
4587	}
4588	if (num_nr_seg > 0)
4589		asoc->saw_sack_with_nr_frags = 1;
4590	else
4591		asoc->saw_sack_with_nr_frags = 0;
4592
4593	/* JRS - Use the congestion control given in the CC module */
4594	if (ecne_seen == 0) {
4595		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4596			if (net->net_ack2 > 0) {
4597				/*
4598				 * Karn's rule applies to clearing error
4599				 * count, this is optional.
4600				 */
4601				net->error_count = 0;
4602				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4603					/* addr came good */
4604					net->dest_state |= SCTP_ADDR_REACHABLE;
4605					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4606					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4607				}
4608				if (net == stcb->asoc.primary_destination) {
4609					if (stcb->asoc.alternate) {
4610						/*
4611						 * release the alternate,
4612						 * primary is good
4613						 */
4614						sctp_free_remote_addr(stcb->asoc.alternate);
4615						stcb->asoc.alternate = NULL;
4616					}
4617				}
4618				if (net->dest_state & SCTP_ADDR_PF) {
4619					net->dest_state &= ~SCTP_ADDR_PF;
4620					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4621					    stcb->sctp_ep, stcb, net,
4622					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
4623					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4624					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4625					/* Done with this net */
4626					net->net_ack = 0;
4627				}
4628				/* restore any doubled timers */
4629				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4630				if (net->RTO < stcb->asoc.minrto) {
4631					net->RTO = stcb->asoc.minrto;
4632				}
4633				if (net->RTO > stcb->asoc.maxrto) {
4634					net->RTO = stcb->asoc.maxrto;
4635				}
4636			}
4637		}
4638		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4639	}
4640	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4641		/* nothing left in-flight */
4642		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4643			/* stop all timers */
4644			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4645			    stcb, net,
4646			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
4647			net->flight_size = 0;
4648			net->partial_bytes_acked = 0;
4649		}
4650		asoc->total_flight = 0;
4651		asoc->total_flight_count = 0;
4652	}
4653	/**********************************/
4654	/* Now what about shutdown issues */
4655	/**********************************/
4656	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4657		/* nothing left on sendqueue.. consider done */
4658		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4659			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4660			    asoc->peers_rwnd, 0, 0, a_rwnd);
4661		}
4662		asoc->peers_rwnd = a_rwnd;
4663		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4664			/* SWS sender side engages */
4665			asoc->peers_rwnd = 0;
4666		}
4667		/* clean up */
4668		if ((asoc->stream_queue_cnt == 1) &&
4669		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4670		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4671		    (asoc->locked_on_sending)
4672		    ) {
4673			struct sctp_stream_queue_pending *sp;
4674
4675			/*
4676			 * I may be in a state where we got all across.. but
4677			 * cannot write more due to a shutdown... we abort
4678			 * since the user did not indicate EOR in this case.
4679			 */
4680			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4681			    sctp_streamhead);
4682			if ((sp) && (sp->length == 0)) {
4683				asoc->locked_on_sending = NULL;
4684				if (sp->msg_is_complete) {
4685					asoc->stream_queue_cnt--;
4686				} else {
4687					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4688					asoc->stream_queue_cnt--;
4689				}
4690			}
4691		}
4692		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4693		    (asoc->stream_queue_cnt == 0)) {
4694			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4695				/* Need to abort here */
4696				struct mbuf *op_err;
4697
4698		abort_out_now:
4699				*abort_now = 1;
4700				/* XXX */
4701				op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4702				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
4703				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4704				return;
4705			} else {
4706				struct sctp_nets *netp;
4707
4708				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4709				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4710					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4711				}
4712				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4713				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4714				sctp_stop_timers_for_shutdown(stcb);
4715				if (asoc->alternate) {
4716					netp = asoc->alternate;
4717				} else {
4718					netp = asoc->primary_destination;
4719				}
4720				sctp_send_shutdown(stcb, netp);
4721				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4722				    stcb->sctp_ep, stcb, netp);
4723				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4724				    stcb->sctp_ep, stcb, netp);
4725			}
4726			return;
4727		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4728		    (asoc->stream_queue_cnt == 0)) {
4729			struct sctp_nets *netp;
4730
4731			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4732				goto abort_out_now;
4733			}
4734			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4735			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4736			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4737			sctp_stop_timers_for_shutdown(stcb);
4738			if (asoc->alternate) {
4739				netp = asoc->alternate;
4740			} else {
4741				netp = asoc->primary_destination;
4742			}
4743			sctp_send_shutdown_ack(stcb, netp);
4744			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4745			    stcb->sctp_ep, stcb, netp);
4746			return;
4747		}
4748	}
4749	/*
4750	 * Now here we are going to recycle net_ack for a different use...
4751	 * HEADS UP.
4752	 */
4753	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4754		net->net_ack = 0;
4755	}
4756
4757	/*
4758	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
4759	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
4760	 * automatically ensure that.
4761	 */
4762	if ((asoc->sctp_cmt_on_off > 0) &&
4763	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
4764	    (cmt_dac_flag == 0)) {
4765		this_sack_lowest_newack = cum_ack;
4766	}
4767	if ((num_seg > 0) || (num_nr_seg > 0)) {
4768		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
4769		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
4770	}
4771	/* JRS - Use the congestion control given in the CC module */
4772	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
4773
4774	/* Now are we exiting loss recovery ? */
4775	if (will_exit_fast_recovery) {
4776		/* Ok, we must exit fast recovery */
4777		asoc->fast_retran_loss_recovery = 0;
4778	}
4779	if ((asoc->sat_t3_loss_recovery) &&
4780	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
4781		/* end satellite t3 loss recovery */
4782		asoc->sat_t3_loss_recovery = 0;
4783	}
4784	/*
4785	 * CMT Fast recovery
4786	 */
4787	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4788		if (net->will_exit_fast_recovery) {
4789			/* Ok, we must exit fast recovery */
4790			net->fast_retran_loss_recovery = 0;
4791		}
4792	}
4793
4794	/* Adjust and set the new rwnd value */
4795	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4796		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4797		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
4798	}
4799	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
4800	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4801	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4802		/* SWS sender side engages */
4803		asoc->peers_rwnd = 0;
4804	}
4805	if (asoc->peers_rwnd > old_rwnd) {
4806		win_probe_recovery = 1;
4807	}
4808	/*
4809	 * Now we must setup so we have a timer up for anyone with
4810	 * outstanding data.
4811	 */
4812	done_once = 0;
4813again:
4814	j = 0;
4815	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4816		if (win_probe_recovery && (net->window_probe)) {
4817			win_probe_recovered = 1;
4818			/*-
4819			 * Find first chunk that was used with
4820			 * window probe and clear the event. Put
4821			 * it back into the send queue as if has
4822			 * not been sent.
4823			 */
4824			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4825				if (tp1->window_probe) {
4826					sctp_window_probe_recovery(stcb, asoc, tp1);
4827					break;
4828				}
4829			}
4830		}
4831		if (net->flight_size) {
4832			j++;
4833			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4834				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4835				    stcb->sctp_ep, stcb, net);
4836			}
4837			if (net->window_probe) {
4838				net->window_probe = 0;
4839			}
4840		} else {
4841			if (net->window_probe) {
4842				/*
4843				 * In window probes we must assure a timer
4844				 * is still running there
4845				 */
4846				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4847					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4848					    stcb->sctp_ep, stcb, net);
4849
4850				}
4851			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4852				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4853				    stcb, net,
4854				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_34);
4855			}
4856		}
4857	}
4858	if ((j == 0) &&
4859	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4860	    (asoc->sent_queue_retran_cnt == 0) &&
4861	    (win_probe_recovered == 0) &&
4862	    (done_once == 0)) {
4863		/*
4864		 * huh, this should not happen unless all packets are
4865		 * PR-SCTP and marked to skip of course.
4866		 */
4867		if (sctp_fs_audit(asoc)) {
4868			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4869				net->flight_size = 0;
4870			}
4871			asoc->total_flight = 0;
4872			asoc->total_flight_count = 0;
4873			asoc->sent_queue_retran_cnt = 0;
4874			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4875				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4876					sctp_flight_size_increase(tp1);
4877					sctp_total_flight_increase(stcb, tp1);
4878				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4879					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4880				}
4881			}
4882		}
4883		done_once = 1;
4884		goto again;
4885	}
4886	/*********************************************/
4887	/* Here we perform PR-SCTP procedures        */
4888	/* (section 4.2)                             */
4889	/*********************************************/
4890	/* C1. update advancedPeerAckPoint */
4891	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
4892		asoc->advanced_peer_ack_point = cum_ack;
4893	}
4894	/* C2. try to further move advancedPeerAckPoint ahead */
4895	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4896		struct sctp_tmit_chunk *lchk;
4897		uint32_t old_adv_peer_ack_point;
4898
4899		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4900		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4901		/* C3. See if we need to send a Fwd-TSN */
4902		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
4903			/*
4904			 * ISSUE with ECN, see FWD-TSN processing.
4905			 */
4906			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
4907				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
4908				    0xee, cum_ack, asoc->advanced_peer_ack_point,
4909				    old_adv_peer_ack_point);
4910			}
4911			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4912				send_forward_tsn(stcb, asoc);
4913			} else if (lchk) {
4914				/* try to FR fwd-tsn's that get lost too */
4915				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4916					send_forward_tsn(stcb, asoc);
4917				}
4918			}
4919		}
4920		if (lchk) {
4921			/* Assure a timer is up */
4922			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4923			    stcb->sctp_ep, stcb, lchk->whoTo);
4924		}
4925	}
4926	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4927		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4928		    a_rwnd,
4929		    stcb->asoc.peers_rwnd,
4930		    stcb->asoc.total_flight,
4931		    stcb->asoc.total_output_queue_size);
4932	}
4933}
4934
4935void
4936sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
4937{
4938	/* Copy cum-ack */
4939	uint32_t cum_ack, a_rwnd;
4940
4941	cum_ack = ntohl(cp->cumulative_tsn_ack);
4942	/* Arrange so a_rwnd does NOT change */
4943	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
4944
4945	/* Now call the express sack handling */
4946	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
4947}
4948
4949static void
4950sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
4951    struct sctp_stream_in *strmin)
4952{
4953	struct sctp_queued_to_read *ctl, *nctl;
4954	struct sctp_association *asoc;
4955	uint16_t tt;
4956
4957	asoc = &stcb->asoc;
4958	tt = strmin->last_sequence_delivered;
4959	/*
4960	 * First deliver anything prior to and including the stream no that
4961	 * came in
4962	 */
4963	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
4964		if (SCTP_SSN_GE(tt, ctl->sinfo_ssn)) {
4965			/* this is deliverable now */
4966			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
4967			/* subtract pending on streams */
4968			asoc->size_on_all_streams -= ctl->length;
4969			sctp_ucount_decr(asoc->cnt_on_all_streams);
4970			/* deliver it to at least the delivery-q */
4971			if (stcb->sctp_socket) {
4972				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
4973				sctp_add_to_readq(stcb->sctp_ep, stcb,
4974				    ctl,
4975				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
4976			}
4977		} else {
4978			/* no more delivery now. */
4979			break;
4980		}
4981	}
4982	/*
4983	 * now we must deliver things in queue the normal way  if any are
4984	 * now ready.
4985	 */
4986	tt = strmin->last_sequence_delivered + 1;
4987	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
4988		if (tt == ctl->sinfo_ssn) {
4989			/* this is deliverable now */
4990			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
4991			/* subtract pending on streams */
4992			asoc->size_on_all_streams -= ctl->length;
4993			sctp_ucount_decr(asoc->cnt_on_all_streams);
4994			/* deliver it to at least the delivery-q */
4995			strmin->last_sequence_delivered = ctl->sinfo_ssn;
4996			if (stcb->sctp_socket) {
4997				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
4998				sctp_add_to_readq(stcb->sctp_ep, stcb,
4999				    ctl,
5000				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5001
5002			}
5003			tt = strmin->last_sequence_delivered + 1;
5004		} else {
5005			break;
5006		}
5007	}
5008}
5009
5010static void
5011sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5012    struct sctp_association *asoc,
5013    uint16_t stream, uint16_t seq)
5014{
5015	struct sctp_tmit_chunk *chk, *nchk;
5016
5017	/* For each one on here see if we need to toss it */
5018	/*
5019	 * For now large messages held on the reasmqueue that are complete
5020	 * will be tossed too. We could in theory do more work to spin
5021	 * through and stop after dumping one msg aka seeing the start of a
5022	 * new msg at the head, and call the delivery function... to see if
5023	 * it can be delivered... But for now we just dump everything on the
5024	 * queue.
5025	 */
5026	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5027		/*
5028		 * Do not toss it if on a different stream or marked for
5029		 * unordered delivery in which case the stream sequence
5030		 * number has no meaning.
5031		 */
5032		if ((chk->rec.data.stream_number != stream) ||
5033		    ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
5034			continue;
5035		}
5036		if (chk->rec.data.stream_seq == seq) {
5037			/* It needs to be tossed */
5038			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5039			if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5040				asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5041				asoc->str_of_pdapi = chk->rec.data.stream_number;
5042				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5043				asoc->fragment_flags = chk->rec.data.rcv_flags;
5044			}
5045			asoc->size_on_reasm_queue -= chk->send_size;
5046			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5047
5048			/* Clear up any stream problem */
5049			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5050			    SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5051				/*
5052				 * We must dump forward this streams
5053				 * sequence number if the chunk is not
5054				 * unordered that is being skipped. There is
5055				 * a chance that if the peer does not
5056				 * include the last fragment in its FWD-TSN
5057				 * we WILL have a problem here since you
5058				 * would have a partial chunk in queue that
5059				 * may not be deliverable. Also if a Partial
5060				 * delivery API as started the user may get
5061				 * a partial chunk. The next read returning
5062				 * a new chunk... really ugly but I see no
5063				 * way around it! Maybe a notify??
5064				 */
5065				asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5066			}
5067			if (chk->data) {
5068				sctp_m_freem(chk->data);
5069				chk->data = NULL;
5070			}
5071			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5072		} else if (SCTP_SSN_GT(chk->rec.data.stream_seq, seq)) {
5073			/*
5074			 * If the stream_seq is > than the purging one, we
5075			 * are done
5076			 */
5077			break;
5078		}
5079	}
5080}
5081
5082
5083void
5084sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5085    struct sctp_forward_tsn_chunk *fwd,
5086    int *abort_flag, struct mbuf *m, int offset)
5087{
5088	/* The pr-sctp fwd tsn */
5089	/*
5090	 * here we will perform all the data receiver side steps for
5091	 * processing FwdTSN, as required in by pr-sctp draft:
5092	 *
5093	 * Assume we get FwdTSN(x):
5094	 *
5095	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5096	 * others we have 3) examine and update re-ordering queue on
5097	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5098	 * report where we are.
5099	 */
5100	struct sctp_association *asoc;
5101	uint32_t new_cum_tsn, gap;
5102	unsigned int i, fwd_sz, m_size;
5103	uint32_t str_seq;
5104	struct sctp_stream_in *strm;
5105	struct sctp_tmit_chunk *chk, *nchk;
5106	struct sctp_queued_to_read *ctl, *sv;
5107
5108	asoc = &stcb->asoc;
5109	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5110		SCTPDBG(SCTP_DEBUG_INDATA1,
5111		    "Bad size too small/big fwd-tsn\n");
5112		return;
5113	}
5114	m_size = (stcb->asoc.mapping_array_size << 3);
5115	/*************************************************************/
5116	/* 1. Here we update local cumTSN and shift the bitmap array */
5117	/*************************************************************/
5118	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5119
5120	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5121		/* Already got there ... */
5122		return;
5123	}
5124	/*
5125	 * now we know the new TSN is more advanced, let's find the actual
5126	 * gap
5127	 */
5128	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5129	asoc->cumulative_tsn = new_cum_tsn;
5130	if (gap >= m_size) {
5131		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5132			struct mbuf *op_err;
5133			char msg[SCTP_DIAG_INFO_LEN];
5134
5135			/*
5136			 * out of range (of single byte chunks in the rwnd I
5137			 * give out). This must be an attacker.
5138			 */
5139			*abort_flag = 1;
5140			snprintf(msg, sizeof(msg),
5141			    "New cum ack %8.8x too high, highest TSN %8.8x",
5142			    new_cum_tsn, asoc->highest_tsn_inside_map);
5143			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5144			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_35;
5145			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5146			return;
5147		}
5148		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5149
5150		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5151		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5152		asoc->highest_tsn_inside_map = new_cum_tsn;
5153
5154		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5155		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5156
5157		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5158			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5159		}
5160	} else {
5161		SCTP_TCB_LOCK_ASSERT(stcb);
5162		for (i = 0; i <= gap; i++) {
5163			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5164			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5165				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5166				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5167					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5168				}
5169			}
5170		}
5171	}
5172	/*************************************************************/
5173	/* 2. Clear up re-assembly queue                             */
5174	/*************************************************************/
5175	/*
5176	 * First service it if pd-api is up, just in case we can progress it
5177	 * forward
5178	 */
5179	if (asoc->fragmented_delivery_inprogress) {
5180		sctp_service_reassembly(stcb, asoc);
5181	}
5182	/* For each one on here see if we need to toss it */
5183	/*
5184	 * For now large messages held on the reasmqueue that are complete
5185	 * will be tossed too. We could in theory do more work to spin
5186	 * through and stop after dumping one msg aka seeing the start of a
5187	 * new msg at the head, and call the delivery function... to see if
5188	 * it can be delivered... But for now we just dump everything on the
5189	 * queue.
5190	 */
5191	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5192		if (SCTP_TSN_GE(new_cum_tsn, chk->rec.data.TSN_seq)) {
5193			/* It needs to be tossed */
5194			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5195			if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5196				asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5197				asoc->str_of_pdapi = chk->rec.data.stream_number;
5198				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5199				asoc->fragment_flags = chk->rec.data.rcv_flags;
5200			}
5201			asoc->size_on_reasm_queue -= chk->send_size;
5202			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5203
5204			/* Clear up any stream problem */
5205			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5206			    SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5207				/*
5208				 * We must dump forward this streams
5209				 * sequence number if the chunk is not
5210				 * unordered that is being skipped. There is
5211				 * a chance that if the peer does not
5212				 * include the last fragment in its FWD-TSN
5213				 * we WILL have a problem here since you
5214				 * would have a partial chunk in queue that
5215				 * may not be deliverable. Also if a Partial
5216				 * delivery API as started the user may get
5217				 * a partial chunk. The next read returning
5218				 * a new chunk... really ugly but I see no
5219				 * way around it! Maybe a notify??
5220				 */
5221				asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5222			}
5223			if (chk->data) {
5224				sctp_m_freem(chk->data);
5225				chk->data = NULL;
5226			}
5227			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5228		} else {
5229			/*
5230			 * Ok we have gone beyond the end of the fwd-tsn's
5231			 * mark.
5232			 */
5233			break;
5234		}
5235	}
5236	/*******************************************************/
5237	/* 3. Update the PR-stream re-ordering queues and fix  */
5238	/* delivery issues as needed.                       */
5239	/*******************************************************/
5240	fwd_sz -= sizeof(*fwd);
5241	if (m && fwd_sz) {
5242		/* New method. */
5243		unsigned int num_str;
5244		struct sctp_strseq *stseq, strseqbuf;
5245
5246		offset += sizeof(*fwd);
5247
5248		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5249		num_str = fwd_sz / sizeof(struct sctp_strseq);
5250		for (i = 0; i < num_str; i++) {
5251			uint16_t st;
5252
5253			stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5254			    sizeof(struct sctp_strseq),
5255			    (uint8_t *) & strseqbuf);
5256			offset += sizeof(struct sctp_strseq);
5257			if (stseq == NULL) {
5258				break;
5259			}
5260			/* Convert */
5261			st = ntohs(stseq->stream);
5262			stseq->stream = st;
5263			st = ntohs(stseq->sequence);
5264			stseq->sequence = st;
5265
5266			/* now process */
5267
5268			/*
5269			 * Ok we now look for the stream/seq on the read
5270			 * queue where its not all delivered. If we find it
5271			 * we transmute the read entry into a PDI_ABORTED.
5272			 */
5273			if (stseq->stream >= asoc->streamincnt) {
5274				/* screwed up streams, stop!  */
5275				break;
5276			}
5277			if ((asoc->str_of_pdapi == stseq->stream) &&
5278			    (asoc->ssn_of_pdapi == stseq->sequence)) {
5279				/*
5280				 * If this is the one we were partially
5281				 * delivering now then we no longer are.
5282				 * Note this will change with the reassembly
5283				 * re-write.
5284				 */
5285				asoc->fragmented_delivery_inprogress = 0;
5286			}
5287			sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
5288			TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5289				if ((ctl->sinfo_stream == stseq->stream) &&
5290				    (ctl->sinfo_ssn == stseq->sequence)) {
5291					str_seq = (stseq->stream << 16) | stseq->sequence;
5292					ctl->end_added = 1;
5293					ctl->pdapi_aborted = 1;
5294					sv = stcb->asoc.control_pdapi;
5295					stcb->asoc.control_pdapi = ctl;
5296					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5297					    stcb,
5298					    SCTP_PARTIAL_DELIVERY_ABORTED,
5299					    (void *)&str_seq,
5300					    SCTP_SO_NOT_LOCKED);
5301					stcb->asoc.control_pdapi = sv;
5302					break;
5303				} else if ((ctl->sinfo_stream == stseq->stream) &&
5304				    SCTP_SSN_GT(ctl->sinfo_ssn, stseq->sequence)) {
5305					/* We are past our victim SSN */
5306					break;
5307				}
5308			}
5309			strm = &asoc->strmin[stseq->stream];
5310			if (SCTP_SSN_GT(stseq->sequence, strm->last_sequence_delivered)) {
5311				/* Update the sequence number */
5312				strm->last_sequence_delivered = stseq->sequence;
5313			}
5314			/* now kick the stream the new way */
5315			/* sa_ignore NO_NULL_CHK */
5316			sctp_kick_prsctp_reorder_queue(stcb, strm);
5317		}
5318		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5319	}
5320	/*
5321	 * Now slide thing forward.
5322	 */
5323	sctp_slide_mapping_arrays(stcb);
5324
5325	if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5326		/* now lets kick out and check for more fragmented delivery */
5327		/* sa_ignore NO_NULL_CHK */
5328		sctp_deliver_reasm_check(stcb, &stcb->asoc);
5329	}
5330}
5331