1163953Srrs/*- 2169382Srrs * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 3237896Stuexen * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 4237896Stuexen * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 5163953Srrs * 6163953Srrs * Redistribution and use in source and binary forms, with or without 7163953Srrs * modification, are permitted provided that the following conditions are met: 8163953Srrs * 9163953Srrs * a) Redistributions of source code must retain the above copyright notice, 10231038Stuexen * this list of conditions and the following disclaimer. 11163953Srrs * 12163953Srrs * b) Redistributions in binary form must reproduce the above copyright 13163953Srrs * notice, this list of conditions and the following disclaimer in 14231038Stuexen * the documentation and/or other materials provided with the distribution. 15163953Srrs * 16163953Srrs * c) Neither the name of Cisco Systems, Inc. nor the names of its 17163953Srrs * contributors may be used to endorse or promote products derived 18163953Srrs * from this software without specific prior written permission. 19163953Srrs * 20163953Srrs * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21163953Srrs * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 22163953Srrs * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23163953Srrs * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24163953Srrs * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25163953Srrs * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26163953Srrs * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27163953Srrs * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28163953Srrs * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29163953Srrs * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30163953Srrs * THE POSSIBILITY OF SUCH DAMAGE. 31163953Srrs */ 32163953Srrs 33163953Srrs#include <sys/cdefs.h> 34163953Srrs__FBSDID("$FreeBSD$"); 35163953Srrs 36163953Srrs#include <netinet/sctp_os.h> 37163953Srrs#include <netinet/sctp_var.h> 38167598Srrs#include <netinet/sctp_sysctl.h> 39163953Srrs#include <netinet/sctp_pcb.h> 40163953Srrs#include <netinet/sctp_header.h> 41163953Srrs#include <netinet/sctputil.h> 42163953Srrs#include <netinet/sctp_output.h> 43163953Srrs#include <netinet/sctp_input.h> 44163953Srrs#include <netinet/sctp_indata.h> 45163953Srrs#include <netinet/sctp_uio.h> 46163953Srrs#include <netinet/sctp_timer.h> 47163953Srrs 48163953Srrs 49163953Srrs/* 50163953Srrs * NOTES: On the outbound side of things I need to check the sack timer to 51163953Srrs * see if I should generate a sack into the chunk queue (if I have data to 52163953Srrs * send that is and will be sending it .. for bundling. 53163953Srrs * 54163953Srrs * The callback in sctp_usrreq.c will get called when the socket is read from. 55163953Srrs * This will cause sctp_service_queues() to get called on the top entry in 56163953Srrs * the list. 57163953Srrs */ 58163953Srrs 59170806Srrsvoid 60163953Srrssctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 61163953Srrs{ 62179783Srrs asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc); 63163953Srrs} 64163953Srrs 65163953Srrs/* Calculate what the rwnd would be */ 66170806Srrsuint32_t 67163953Srrssctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 68163953Srrs{ 69179783Srrs uint32_t calc = 0; 70163953Srrs 71163953Srrs /* 72163953Srrs * This is really set wrong with respect to a 1-2-m socket. Since 73163953Srrs * the sb_cc is the count that everyone as put up. When we re-write 74163953Srrs * sctp_soreceive then we will fix this so that ONLY this 75163953Srrs * associations data is taken into account. 76163953Srrs */ 77163953Srrs if (stcb->sctp_socket == NULL) 78163953Srrs return (calc); 79163953Srrs 80163953Srrs if (stcb->asoc.sb_cc == 0 && 81163953Srrs asoc->size_on_reasm_queue == 0 && 82163953Srrs asoc->size_on_all_streams == 0) { 83163953Srrs /* Full rwnd granted */ 84179783Srrs calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND); 85163953Srrs return (calc); 86163953Srrs } 87163953Srrs /* get actual space */ 88163953Srrs calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv); 89163953Srrs 90163953Srrs /* 91163953Srrs * take out what has NOT been put on socket queue and we yet hold 92163953Srrs * for putting up. 93163953Srrs */ 94210599Srrs calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue + 95210599Srrs asoc->cnt_on_reasm_queue * MSIZE)); 96210599Srrs calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams + 97210599Srrs asoc->cnt_on_all_streams * MSIZE)); 98163953Srrs 99163953Srrs if (calc == 0) { 100163953Srrs /* out of space */ 101163953Srrs return (calc); 102163953Srrs } 103163953Srrs /* what is the overhead of all these rwnd's */ 104171990Srrs calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len); 105179783Srrs /* 106179783Srrs * If the window gets too small due to ctrl-stuff, reduce it to 1, 107179783Srrs * even it is 0. SWS engaged 108179783Srrs */ 109179783Srrs if (calc < stcb->asoc.my_rwnd_control_len) { 110179783Srrs calc = 1; 111163953Srrs } 112179783Srrs return (calc); 113163953Srrs} 114163953Srrs 115163953Srrs 116163953Srrs 117163953Srrs/* 118163953Srrs * Build out our readq entry based on the incoming packet. 119163953Srrs */ 120163953Srrsstruct sctp_queued_to_read * 121163953Srrssctp_build_readq_entry(struct sctp_tcb *stcb, 122163953Srrs struct sctp_nets *net, 123163953Srrs uint32_t tsn, uint32_t ppid, 124163953Srrs uint32_t context, uint16_t stream_no, 125163953Srrs uint16_t stream_seq, uint8_t flags, 126163953Srrs struct mbuf *dm) 127163953Srrs{ 128163953Srrs struct sctp_queued_to_read *read_queue_e = NULL; 129163953Srrs 130163953Srrs sctp_alloc_a_readq(stcb, read_queue_e); 131163953Srrs if (read_queue_e == NULL) { 132163953Srrs goto failed_build; 133163953Srrs } 134163953Srrs read_queue_e->sinfo_stream = stream_no; 135163953Srrs read_queue_e->sinfo_ssn = stream_seq; 136163953Srrs read_queue_e->sinfo_flags = (flags << 8); 137163953Srrs read_queue_e->sinfo_ppid = ppid; 138231038Stuexen read_queue_e->sinfo_context = context; 139163953Srrs read_queue_e->sinfo_timetolive = 0; 140163953Srrs read_queue_e->sinfo_tsn = tsn; 141163953Srrs read_queue_e->sinfo_cumtsn = tsn; 142163953Srrs read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb); 143163953Srrs read_queue_e->whoFrom = net; 144163953Srrs read_queue_e->length = 0; 145163953Srrs atomic_add_int(&net->ref_count, 1); 146163953Srrs read_queue_e->data = dm; 147165647Srrs read_queue_e->spec_flags = 0; 148163953Srrs read_queue_e->tail_mbuf = NULL; 149169352Srrs read_queue_e->aux_data = NULL; 150163953Srrs read_queue_e->stcb = stcb; 151163953Srrs read_queue_e->port_from = stcb->rport; 152163953Srrs read_queue_e->do_not_ref_stcb = 0; 153163953Srrs read_queue_e->end_added = 0; 154168943Srrs read_queue_e->some_taken = 0; 155164085Srrs read_queue_e->pdapi_aborted = 0; 156163953Srrsfailed_build: 157163953Srrs return (read_queue_e); 158163953Srrs} 159163953Srrs 160163953Srrs 161163953Srrs/* 162163953Srrs * Build out our readq entry based on the incoming packet. 163163953Srrs */ 164163953Srrsstatic struct sctp_queued_to_read * 165163953Srrssctp_build_readq_entry_chk(struct sctp_tcb *stcb, 166163953Srrs struct sctp_tmit_chunk *chk) 167163953Srrs{ 168163953Srrs struct sctp_queued_to_read *read_queue_e = NULL; 169163953Srrs 170163953Srrs sctp_alloc_a_readq(stcb, read_queue_e); 171163953Srrs if (read_queue_e == NULL) { 172163953Srrs goto failed_build; 173163953Srrs } 174163953Srrs read_queue_e->sinfo_stream = chk->rec.data.stream_number; 175163953Srrs read_queue_e->sinfo_ssn = chk->rec.data.stream_seq; 176163953Srrs read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8); 177163953Srrs read_queue_e->sinfo_ppid = chk->rec.data.payloadtype; 178163953Srrs read_queue_e->sinfo_context = stcb->asoc.context; 179163953Srrs read_queue_e->sinfo_timetolive = 0; 180163953Srrs read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq; 181163953Srrs read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq; 182163953Srrs read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb); 183163953Srrs read_queue_e->whoFrom = chk->whoTo; 184169352Srrs read_queue_e->aux_data = NULL; 185163953Srrs read_queue_e->length = 0; 186163953Srrs atomic_add_int(&chk->whoTo->ref_count, 1); 187163953Srrs read_queue_e->data = chk->data; 188163953Srrs read_queue_e->tail_mbuf = NULL; 189163953Srrs read_queue_e->stcb = stcb; 190163953Srrs read_queue_e->port_from = stcb->rport; 191165647Srrs read_queue_e->spec_flags = 0; 192163953Srrs read_queue_e->do_not_ref_stcb = 0; 193163953Srrs read_queue_e->end_added = 0; 194168943Srrs read_queue_e->some_taken = 0; 195164085Srrs read_queue_e->pdapi_aborted = 0; 196163953Srrsfailed_build: 197163953Srrs return (read_queue_e); 198163953Srrs} 199163953Srrs 200163953Srrs 201163953Srrsstruct mbuf * 202223132Stuexensctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo) 203163953Srrs{ 204223132Stuexen struct sctp_extrcvinfo *seinfo; 205163953Srrs struct sctp_sndrcvinfo *outinfo; 206223132Stuexen struct sctp_rcvinfo *rcvinfo; 207223132Stuexen struct sctp_nxtinfo *nxtinfo; 208163953Srrs struct cmsghdr *cmh; 209163953Srrs struct mbuf *ret; 210163953Srrs int len; 211223132Stuexen int use_extended; 212223132Stuexen int provide_nxt; 213163953Srrs 214223132Stuexen if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && 215223132Stuexen sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && 216223132Stuexen sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { 217223132Stuexen /* user does not want any ancillary data */ 218163953Srrs return (NULL); 219163953Srrs } 220223132Stuexen len = 0; 221223132Stuexen if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) { 222223132Stuexen len += CMSG_SPACE(sizeof(struct sctp_rcvinfo)); 223223132Stuexen } 224223132Stuexen seinfo = (struct sctp_extrcvinfo *)sinfo; 225223132Stuexen if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) && 226223132Stuexen (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) { 227223132Stuexen provide_nxt = 1; 228223132Stuexen len += CMSG_SPACE(sizeof(struct sctp_rcvinfo)); 229163953Srrs } else { 230223132Stuexen provide_nxt = 0; 231163953Srrs } 232223132Stuexen if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { 233223132Stuexen if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) { 234223132Stuexen use_extended = 1; 235223132Stuexen len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo)); 236223132Stuexen } else { 237223132Stuexen use_extended = 0; 238223132Stuexen len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo)); 239223132Stuexen } 240223132Stuexen } else { 241223132Stuexen use_extended = 0; 242223132Stuexen } 243163953Srrs 244223132Stuexen ret = sctp_get_mbuf_for_msg(len, 0, M_DONTWAIT, 1, MT_DATA); 245163953Srrs if (ret == NULL) { 246163953Srrs /* No space */ 247163953Srrs return (ret); 248163953Srrs } 249223132Stuexen SCTP_BUF_LEN(ret) = 0; 250223132Stuexen 251223132Stuexen /* We need a CMSG header followed by the struct */ 252163953Srrs cmh = mtod(ret, struct cmsghdr *); 253268433Sdelphij /* 254268433Sdelphij * Make sure that there is no un-initialized padding between the 255268433Sdelphij * cmsg header and cmsg data and after the cmsg data. 256268433Sdelphij */ 257268433Sdelphij memset(cmh, 0, len); 258223132Stuexen if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) { 259223132Stuexen cmh->cmsg_level = IPPROTO_SCTP; 260223132Stuexen cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo)); 261223132Stuexen cmh->cmsg_type = SCTP_RCVINFO; 262223132Stuexen rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh); 263223132Stuexen rcvinfo->rcv_sid = sinfo->sinfo_stream; 264223132Stuexen rcvinfo->rcv_ssn = sinfo->sinfo_ssn; 265223132Stuexen rcvinfo->rcv_flags = sinfo->sinfo_flags; 266223132Stuexen rcvinfo->rcv_ppid = sinfo->sinfo_ppid; 267223132Stuexen rcvinfo->rcv_tsn = sinfo->sinfo_tsn; 268223132Stuexen rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn; 269223132Stuexen rcvinfo->rcv_context = sinfo->sinfo_context; 270223132Stuexen rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id; 271223132Stuexen cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo))); 272223132Stuexen SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo)); 273163953Srrs } 274223132Stuexen if (provide_nxt) { 275223132Stuexen cmh->cmsg_level = IPPROTO_SCTP; 276223132Stuexen cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo)); 277223132Stuexen cmh->cmsg_type = SCTP_NXTINFO; 278223132Stuexen nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh); 279223132Stuexen nxtinfo->nxt_sid = seinfo->sreinfo_next_stream; 280223132Stuexen nxtinfo->nxt_flags = 0; 281223132Stuexen if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) { 282223132Stuexen nxtinfo->nxt_flags |= SCTP_UNORDERED; 283223132Stuexen } 284223132Stuexen if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) { 285223132Stuexen nxtinfo->nxt_flags |= SCTP_NOTIFICATION; 286223132Stuexen } 287223132Stuexen if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) { 288223132Stuexen nxtinfo->nxt_flags |= SCTP_COMPLETE; 289223132Stuexen } 290223132Stuexen nxtinfo->nxt_ppid = seinfo->sreinfo_next_ppid; 291223132Stuexen nxtinfo->nxt_length = seinfo->sreinfo_next_length; 292223132Stuexen nxtinfo->nxt_assoc_id = seinfo->sreinfo_next_aid; 293223132Stuexen cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo))); 294223132Stuexen SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo)); 295223132Stuexen } 296223132Stuexen if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { 297223132Stuexen cmh->cmsg_level = IPPROTO_SCTP; 298223132Stuexen outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh); 299223132Stuexen if (use_extended) { 300223132Stuexen cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo)); 301223132Stuexen cmh->cmsg_type = SCTP_EXTRCV; 302223132Stuexen memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo)); 303223132Stuexen SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo)); 304223132Stuexen } else { 305223132Stuexen cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 306223132Stuexen cmh->cmsg_type = SCTP_SNDRCV; 307223132Stuexen *outinfo = *sinfo; 308223132Stuexen SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo)); 309223132Stuexen } 310223132Stuexen } 311163953Srrs return (ret); 312163953Srrs} 313163953Srrs 314165647Srrs 315205627Srrsstatic void 316205627Srrssctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn) 317205627Srrs{ 318208902Srrs uint32_t gap, i, cumackp1; 319205627Srrs int fnd = 0; 320169352Srrs 321205627Srrs if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) { 322205627Srrs return; 323205627Srrs } 324208902Srrs cumackp1 = asoc->cumulative_tsn + 1; 325216825Stuexen if (SCTP_TSN_GT(cumackp1, tsn)) { 326208902Srrs /* 327208902Srrs * this tsn is behind the cum ack and thus we don't need to 328208902Srrs * worry about it being moved from one to the other. 329208902Srrs */ 330208902Srrs return; 331208902Srrs } 332205627Srrs SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn); 333205627Srrs if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) { 334235164Stuexen SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn); 335205627Srrs sctp_print_mapping_array(asoc); 336206137Stuexen#ifdef INVARIANTS 337205627Srrs panic("Things are really messed up now!!"); 338206137Stuexen#endif 339205627Srrs } 340205627Srrs SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 341205627Srrs SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap); 342216825Stuexen if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 343205627Srrs asoc->highest_tsn_inside_nr_map = tsn; 344205627Srrs } 345205627Srrs if (tsn == asoc->highest_tsn_inside_map) { 346205627Srrs /* We must back down to see what the new highest is */ 347216825Stuexen for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) { 348205627Srrs SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn); 349205627Srrs if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) { 350205627Srrs asoc->highest_tsn_inside_map = i; 351205627Srrs fnd = 1; 352205627Srrs break; 353205627Srrs } 354205627Srrs } 355205627Srrs if (!fnd) { 356205627Srrs asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1; 357205627Srrs } 358205627Srrs } 359205627Srrs} 360205627Srrs 361205627Srrs 362163953Srrs/* 363163953Srrs * We are delivering currently from the reassembly queue. We must continue to 364163953Srrs * deliver until we either: 1) run out of space. 2) run out of sequential 365163953Srrs * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag. 366163953Srrs */ 367163953Srrsstatic void 368163953Srrssctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc) 369163953Srrs{ 370216822Stuexen struct sctp_tmit_chunk *chk, *nchk; 371163953Srrs uint16_t nxt_todel; 372163953Srrs uint16_t stream_no; 373163953Srrs int end = 0; 374163953Srrs int cntDel; 375216822Stuexen struct sctp_queued_to_read *control, *ctl, *nctl; 376185694Srrs 377169420Srrs if (stcb == NULL) 378169420Srrs return; 379169420Srrs 380163953Srrs cntDel = stream_no = 0; 381169420Srrs if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 382172091Srrs (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) || 383169420Srrs (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 384172091Srrs /* socket above is long gone or going.. */ 385172091Srrsabandon: 386163953Srrs asoc->fragmented_delivery_inprogress = 0; 387216822Stuexen TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) { 388163953Srrs TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 389163953Srrs asoc->size_on_reasm_queue -= chk->send_size; 390163953Srrs sctp_ucount_decr(asoc->cnt_on_reasm_queue); 391163953Srrs /* 392163953Srrs * Lose the data pointer, since its in the socket 393163953Srrs * buffer 394163953Srrs */ 395163953Srrs if (chk->data) { 396163953Srrs sctp_m_freem(chk->data); 397163953Srrs chk->data = NULL; 398163953Srrs } 399163953Srrs /* Now free the address and data */ 400221627Stuexen sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 401169655Srrs /* sa_ignore FREED_MEMORY */ 402163953Srrs } 403163953Srrs return; 404163953Srrs } 405163953Srrs SCTP_TCB_LOCK_ASSERT(stcb); 406216822Stuexen TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) { 407163953Srrs if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) { 408163953Srrs /* Can't deliver more :< */ 409163953Srrs return; 410163953Srrs } 411163953Srrs stream_no = chk->rec.data.stream_number; 412163953Srrs nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1; 413163953Srrs if (nxt_todel != chk->rec.data.stream_seq && 414163953Srrs (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) { 415163953Srrs /* 416163953Srrs * Not the next sequence to deliver in its stream OR 417163953Srrs * unordered 418163953Srrs */ 419163953Srrs return; 420163953Srrs } 421163953Srrs if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 422163953Srrs 423163953Srrs control = sctp_build_readq_entry_chk(stcb, chk); 424163953Srrs if (control == NULL) { 425163953Srrs /* out of memory? */ 426163953Srrs return; 427163953Srrs } 428163953Srrs /* save it off for our future deliveries */ 429163953Srrs stcb->asoc.control_pdapi = control; 430163953Srrs if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) 431163953Srrs end = 1; 432163953Srrs else 433163953Srrs end = 0; 434206137Stuexen sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq); 435163953Srrs sctp_add_to_readq(stcb->sctp_ep, 436195918Srrs stcb, control, &stcb->sctp_socket->so_rcv, end, 437195918Srrs SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 438163953Srrs cntDel++; 439163953Srrs } else { 440163953Srrs if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) 441163953Srrs end = 1; 442163953Srrs else 443163953Srrs end = 0; 444206137Stuexen sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq); 445163953Srrs if (sctp_append_to_readq(stcb->sctp_ep, stcb, 446163953Srrs stcb->asoc.control_pdapi, 447163953Srrs chk->data, end, chk->rec.data.TSN_seq, 448163953Srrs &stcb->sctp_socket->so_rcv)) { 449163953Srrs /* 450163953Srrs * something is very wrong, either 451163953Srrs * control_pdapi is NULL, or the tail_mbuf 452163953Srrs * is corrupt, or there is a EOM already on 453163953Srrs * the mbuf chain. 454163953Srrs */ 455172091Srrs if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 456172091Srrs goto abandon; 457172091Srrs } else { 458182367Srrs#ifdef INVARIANTS 459172091Srrs if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) { 460172091Srrs panic("This should not happen control_pdapi NULL?"); 461172091Srrs } 462172091Srrs /* if we did not panic, it was a EOM */ 463172091Srrs panic("Bad chunking ??"); 464182367Srrs#else 465182367Srrs if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) { 466182367Srrs SCTP_PRINTF("This should not happen control_pdapi NULL?\n"); 467182367Srrs } 468182367Srrs SCTP_PRINTF("Bad chunking ??\n"); 469182367Srrs SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n"); 470182367Srrs 471182367Srrs#endif 472182367Srrs goto abandon; 473163953Srrs } 474163953Srrs } 475163953Srrs cntDel++; 476163953Srrs } 477163953Srrs /* pull it we did it */ 478163953Srrs TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 479163953Srrs if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 480163953Srrs asoc->fragmented_delivery_inprogress = 0; 481163953Srrs if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) { 482163953Srrs asoc->strmin[stream_no].last_sequence_delivered++; 483163953Srrs } 484163953Srrs if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) { 485163953Srrs SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 486163953Srrs } 487163953Srrs } else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 488163953Srrs /* 489163953Srrs * turn the flag back on since we just delivered 490163953Srrs * yet another one. 491163953Srrs */ 492163953Srrs asoc->fragmented_delivery_inprogress = 1; 493163953Srrs } 494163953Srrs asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq; 495163953Srrs asoc->last_flags_delivered = chk->rec.data.rcv_flags; 496163953Srrs asoc->last_strm_seq_delivered = chk->rec.data.stream_seq; 497163953Srrs asoc->last_strm_no_delivered = chk->rec.data.stream_number; 498163953Srrs 499163953Srrs asoc->tsn_last_delivered = chk->rec.data.TSN_seq; 500163953Srrs asoc->size_on_reasm_queue -= chk->send_size; 501163953Srrs sctp_ucount_decr(asoc->cnt_on_reasm_queue); 502163953Srrs /* free up the chk */ 503163953Srrs chk->data = NULL; 504221627Stuexen sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 505163953Srrs 506163953Srrs if (asoc->fragmented_delivery_inprogress == 0) { 507163953Srrs /* 508163953Srrs * Now lets see if we can deliver the next one on 509163953Srrs * the stream 510163953Srrs */ 511163953Srrs struct sctp_stream_in *strm; 512163953Srrs 513163953Srrs strm = &asoc->strmin[stream_no]; 514163953Srrs nxt_todel = strm->last_sequence_delivered + 1; 515216822Stuexen TAILQ_FOREACH_SAFE(ctl, &strm->inqueue, next, nctl) { 516216822Stuexen /* Deliver more if we can. */ 517216822Stuexen if (nxt_todel == ctl->sinfo_ssn) { 518216822Stuexen TAILQ_REMOVE(&strm->inqueue, ctl, next); 519216822Stuexen asoc->size_on_all_streams -= ctl->length; 520216822Stuexen sctp_ucount_decr(asoc->cnt_on_all_streams); 521216822Stuexen strm->last_sequence_delivered++; 522216822Stuexen sctp_mark_non_revokable(asoc, ctl->sinfo_tsn); 523216822Stuexen sctp_add_to_readq(stcb->sctp_ep, stcb, 524216822Stuexen ctl, 525216822Stuexen &stcb->sctp_socket->so_rcv, 1, 526216822Stuexen SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 527216822Stuexen } else { 528216822Stuexen break; 529163953Srrs } 530216822Stuexen nxt_todel = strm->last_sequence_delivered + 1; 531163953Srrs } 532165647Srrs break; 533163953Srrs } 534216822Stuexen } 535163953Srrs} 536163953Srrs 537163953Srrs/* 538163953Srrs * Queue the chunk either right into the socket buffer if it is the next one 539163953Srrs * to go OR put it in the correct place in the delivery queue. If we do 540163953Srrs * append to the so_buf, keep doing so until we are out of order. One big 541163953Srrs * question still remains, what to do when the socket buffer is FULL?? 542163953Srrs */ 543163953Srrsstatic void 544163953Srrssctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc, 545163953Srrs struct sctp_queued_to_read *control, int *abort_flag) 546163953Srrs{ 547163953Srrs /* 548163953Srrs * FIX-ME maybe? What happens when the ssn wraps? If we are getting 549163953Srrs * all the data in one stream this could happen quite rapidly. One 550163953Srrs * could use the TSN to keep track of things, but this scheme breaks 551163953Srrs * down in the other type of stream useage that could occur. Send a 552163953Srrs * single msg to stream 0, send 4Billion messages to stream 1, now 553163953Srrs * send a message to stream 0. You have a situation where the TSN 554163953Srrs * has wrapped but not in the stream. Is this worth worrying about 555163953Srrs * or should we just change our queue sort at the bottom to be by 556163953Srrs * TSN. 557163953Srrs * 558163953Srrs * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2 559163953Srrs * with TSN 1? If the peer is doing some sort of funky TSN/SSN 560163953Srrs * assignment this could happen... and I don't see how this would be 561163953Srrs * a violation. So for now I am undecided an will leave the sort by 562163953Srrs * SSN alone. Maybe a hybred approach is the answer 563163953Srrs * 564163953Srrs */ 565163953Srrs struct sctp_stream_in *strm; 566163953Srrs struct sctp_queued_to_read *at; 567163953Srrs int queue_needed; 568163953Srrs uint16_t nxt_todel; 569266181Stuexen struct mbuf *op_err; 570266181Stuexen char msg[SCTP_DIAG_INFO_LEN]; 571163953Srrs 572163953Srrs queue_needed = 1; 573163953Srrs asoc->size_on_all_streams += control->length; 574163953Srrs sctp_ucount_incr(asoc->cnt_on_all_streams); 575163953Srrs strm = &asoc->strmin[control->sinfo_stream]; 576163953Srrs nxt_todel = strm->last_sequence_delivered + 1; 577179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 578170744Srrs sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD); 579170744Srrs } 580169420Srrs SCTPDBG(SCTP_DEBUG_INDATA1, 581169420Srrs "queue to stream called for ssn:%u lastdel:%u nxt:%u\n", 582169420Srrs (uint32_t) control->sinfo_stream, 583169420Srrs (uint32_t) strm->last_sequence_delivered, 584169420Srrs (uint32_t) nxt_todel); 585216825Stuexen if (SCTP_SSN_GE(strm->last_sequence_delivered, control->sinfo_ssn)) { 586163953Srrs /* The incoming sseq is behind where we last delivered? */ 587266181Stuexen SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n", 588169420Srrs control->sinfo_ssn, strm->last_sequence_delivered); 589178198Srrsprotocol_error: 590163953Srrs /* 591163953Srrs * throw it in the stream so it gets cleaned up in 592163953Srrs * association destruction 593163953Srrs */ 594163953Srrs TAILQ_INSERT_HEAD(&strm->inqueue, control, next); 595266181Stuexen snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 596266181Stuexen strm->last_sequence_delivered, control->sinfo_tsn, 597266181Stuexen control->sinfo_stream, control->sinfo_ssn); 598266181Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 599165220Srrs stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1; 600266181Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 601163953Srrs *abort_flag = 1; 602163953Srrs return; 603163953Srrs 604163953Srrs } 605163953Srrs if (nxt_todel == control->sinfo_ssn) { 606163953Srrs /* can be delivered right away? */ 607179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 608170744Srrs sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL); 609170744Srrs } 610185694Srrs /* EY it wont be queued if it could be delivered directly */ 611163953Srrs queue_needed = 0; 612163953Srrs asoc->size_on_all_streams -= control->length; 613163953Srrs sctp_ucount_decr(asoc->cnt_on_all_streams); 614163953Srrs strm->last_sequence_delivered++; 615205627Srrs 616206137Stuexen sctp_mark_non_revokable(asoc, control->sinfo_tsn); 617163953Srrs sctp_add_to_readq(stcb->sctp_ep, stcb, 618163953Srrs control, 619195918Srrs &stcb->sctp_socket->so_rcv, 1, 620195918Srrs SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 621216822Stuexen TAILQ_FOREACH_SAFE(control, &strm->inqueue, next, at) { 622163953Srrs /* all delivered */ 623163953Srrs nxt_todel = strm->last_sequence_delivered + 1; 624163953Srrs if (nxt_todel == control->sinfo_ssn) { 625163953Srrs TAILQ_REMOVE(&strm->inqueue, control, next); 626163953Srrs asoc->size_on_all_streams -= control->length; 627163953Srrs sctp_ucount_decr(asoc->cnt_on_all_streams); 628163953Srrs strm->last_sequence_delivered++; 629163953Srrs /* 630163953Srrs * We ignore the return of deliver_data here 631163953Srrs * since we always can hold the chunk on the 632163953Srrs * d-queue. And we have a finite number that 633163953Srrs * can be delivered from the strq. 634163953Srrs */ 635179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 636170744Srrs sctp_log_strm_del(control, NULL, 637170744Srrs SCTP_STR_LOG_FROM_IMMED_DEL); 638170744Srrs } 639206137Stuexen sctp_mark_non_revokable(asoc, control->sinfo_tsn); 640163953Srrs sctp_add_to_readq(stcb->sctp_ep, stcb, 641163953Srrs control, 642195918Srrs &stcb->sctp_socket->so_rcv, 1, 643195918Srrs SCTP_READ_LOCK_NOT_HELD, 644195918Srrs SCTP_SO_NOT_LOCKED); 645163953Srrs continue; 646163953Srrs } 647163953Srrs break; 648163953Srrs } 649163953Srrs } 650163953Srrs if (queue_needed) { 651163953Srrs /* 652163953Srrs * Ok, we did not deliver this guy, find the correct place 653163953Srrs * to put it on the queue. 654163953Srrs */ 655216825Stuexen if (SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) { 656178198Srrs goto protocol_error; 657178198Srrs } 658163953Srrs if (TAILQ_EMPTY(&strm->inqueue)) { 659163953Srrs /* Empty queue */ 660179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 661170744Srrs sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD); 662170744Srrs } 663163953Srrs TAILQ_INSERT_HEAD(&strm->inqueue, control, next); 664163953Srrs } else { 665163953Srrs TAILQ_FOREACH(at, &strm->inqueue, next) { 666216825Stuexen if (SCTP_SSN_GT(at->sinfo_ssn, control->sinfo_ssn)) { 667163953Srrs /* 668163953Srrs * one in queue is bigger than the 669163953Srrs * new one, insert before this one 670163953Srrs */ 671179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 672170744Srrs sctp_log_strm_del(control, at, 673170744Srrs SCTP_STR_LOG_FROM_INSERT_MD); 674170744Srrs } 675163953Srrs TAILQ_INSERT_BEFORE(at, control, next); 676163953Srrs break; 677163953Srrs } else if (at->sinfo_ssn == control->sinfo_ssn) { 678163953Srrs /* 679163953Srrs * Gak, He sent me a duplicate str 680163953Srrs * seq number 681163953Srrs */ 682163953Srrs /* 683163953Srrs * foo bar, I guess I will just free 684163953Srrs * this new guy, should we abort 685163953Srrs * too? FIX ME MAYBE? Or it COULD be 686163953Srrs * that the SSN's have wrapped. 687163953Srrs * Maybe I should compare to TSN 688163953Srrs * somehow... sigh for now just blow 689163953Srrs * away the chunk! 690163953Srrs */ 691163953Srrs 692163953Srrs if (control->data) 693163953Srrs sctp_m_freem(control->data); 694163953Srrs control->data = NULL; 695163953Srrs asoc->size_on_all_streams -= control->length; 696163953Srrs sctp_ucount_decr(asoc->cnt_on_all_streams); 697212711Stuexen if (control->whoFrom) { 698171158Srrs sctp_free_remote_addr(control->whoFrom); 699212711Stuexen control->whoFrom = NULL; 700212711Stuexen } 701163953Srrs sctp_free_a_readq(stcb, control); 702163953Srrs return; 703163953Srrs } else { 704163953Srrs if (TAILQ_NEXT(at, next) == NULL) { 705163953Srrs /* 706163953Srrs * We are at the end, insert 707163953Srrs * it after this one 708163953Srrs */ 709179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 710170744Srrs sctp_log_strm_del(control, at, 711170744Srrs SCTP_STR_LOG_FROM_INSERT_TL); 712170744Srrs } 713163953Srrs TAILQ_INSERT_AFTER(&strm->inqueue, 714163953Srrs at, control, next); 715163953Srrs break; 716163953Srrs } 717163953Srrs } 718163953Srrs } 719163953Srrs } 720163953Srrs } 721163953Srrs} 722163953Srrs 723163953Srrs/* 724163953Srrs * Returns two things: You get the total size of the deliverable parts of the 725163953Srrs * first fragmented message on the reassembly queue. And you get a 1 back if 726163953Srrs * all of the message is ready or a 0 back if the message is still incomplete 727163953Srrs */ 728163953Srrsstatic int 729163953Srrssctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size) 730163953Srrs{ 731163953Srrs struct sctp_tmit_chunk *chk; 732163953Srrs uint32_t tsn; 733163953Srrs 734163953Srrs *t_size = 0; 735163953Srrs chk = TAILQ_FIRST(&asoc->reasmqueue); 736163953Srrs if (chk == NULL) { 737163953Srrs /* nothing on the queue */ 738163953Srrs return (0); 739163953Srrs } 740163953Srrs if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) { 741163953Srrs /* Not a first on the queue */ 742163953Srrs return (0); 743163953Srrs } 744163953Srrs tsn = chk->rec.data.TSN_seq; 745216822Stuexen TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) { 746163953Srrs if (tsn != chk->rec.data.TSN_seq) { 747163953Srrs return (0); 748163953Srrs } 749163953Srrs *t_size += chk->send_size; 750163953Srrs if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 751163953Srrs return (1); 752163953Srrs } 753163953Srrs tsn++; 754163953Srrs } 755163953Srrs return (0); 756163953Srrs} 757163953Srrs 758163953Srrsstatic void 759163953Srrssctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc) 760163953Srrs{ 761163953Srrs struct sctp_tmit_chunk *chk; 762163953Srrs uint16_t nxt_todel; 763196260Stuexen uint32_t tsize, pd_point; 764163953Srrs 765165647Srrsdoit_again: 766163953Srrs chk = TAILQ_FIRST(&asoc->reasmqueue); 767163953Srrs if (chk == NULL) { 768163953Srrs /* Huh? */ 769163953Srrs asoc->size_on_reasm_queue = 0; 770163953Srrs asoc->cnt_on_reasm_queue = 0; 771163953Srrs return; 772163953Srrs } 773163953Srrs if (asoc->fragmented_delivery_inprogress == 0) { 774163953Srrs nxt_todel = 775163953Srrs asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1; 776163953Srrs if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) && 777163953Srrs (nxt_todel == chk->rec.data.stream_seq || 778163953Srrs (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) { 779163953Srrs /* 780163953Srrs * Yep the first one is here and its ok to deliver 781163953Srrs * but should we? 782163953Srrs */ 783196260Stuexen if (stcb->sctp_socket) { 784265964Stuexen pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT, 785196260Stuexen stcb->sctp_ep->partial_delivery_point); 786196260Stuexen } else { 787196260Stuexen pd_point = stcb->sctp_ep->partial_delivery_point; 788196260Stuexen } 789196260Stuexen if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) { 790163953Srrs /* 791163953Srrs * Yes, we setup to start reception, by 792163953Srrs * backing down the TSN just in case we 793163953Srrs * can't deliver. If we 794163953Srrs */ 795163953Srrs asoc->fragmented_delivery_inprogress = 1; 796163953Srrs asoc->tsn_last_delivered = 797163953Srrs chk->rec.data.TSN_seq - 1; 798163953Srrs asoc->str_of_pdapi = 799163953Srrs chk->rec.data.stream_number; 800163953Srrs asoc->ssn_of_pdapi = chk->rec.data.stream_seq; 801163953Srrs asoc->pdapi_ppid = chk->rec.data.payloadtype; 802163953Srrs asoc->fragment_flags = chk->rec.data.rcv_flags; 803163953Srrs sctp_service_reassembly(stcb, asoc); 804163953Srrs } 805163953Srrs } 806163953Srrs } else { 807165647Srrs /* 808165647Srrs * Service re-assembly will deliver stream data queued at 809165647Srrs * the end of fragmented delivery.. but it wont know to go 810165647Srrs * back and call itself again... we do that here with the 811165647Srrs * got doit_again 812165647Srrs */ 813163953Srrs sctp_service_reassembly(stcb, asoc); 814165647Srrs if (asoc->fragmented_delivery_inprogress == 0) { 815165647Srrs /* 816165647Srrs * finished our Fragmented delivery, could be more 817165647Srrs * waiting? 818165647Srrs */ 819165647Srrs goto doit_again; 820165647Srrs } 821163953Srrs } 822163953Srrs} 823163953Srrs 824163953Srrs/* 825163953Srrs * Dump onto the re-assembly queue, in its proper place. After dumping on the 826163953Srrs * queue, see if anthing can be delivered. If so pull it off (or as much as 827163953Srrs * we can. If we run out of space then we must dump what we can and set the 828163953Srrs * appropriate flag to say we queued what we could. 829163953Srrs */ 830163953Srrsstatic void 831163953Srrssctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc, 832163953Srrs struct sctp_tmit_chunk *chk, int *abort_flag) 833163953Srrs{ 834266181Stuexen struct mbuf *op_err; 835266181Stuexen char msg[SCTP_DIAG_INFO_LEN]; 836231039Stuexen uint32_t cum_ackp1, prev_tsn, post_tsn; 837163953Srrs struct sctp_tmit_chunk *at, *prev, *next; 838163953Srrs 839163953Srrs prev = next = NULL; 840163953Srrs cum_ackp1 = asoc->tsn_last_delivered + 1; 841163953Srrs if (TAILQ_EMPTY(&asoc->reasmqueue)) { 842163953Srrs /* This is the first one on the queue */ 843163953Srrs TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next); 844163953Srrs /* 845163953Srrs * we do not check for delivery of anything when only one 846163953Srrs * fragment is here 847163953Srrs */ 848163953Srrs asoc->size_on_reasm_queue = chk->send_size; 849163953Srrs sctp_ucount_incr(asoc->cnt_on_reasm_queue); 850163953Srrs if (chk->rec.data.TSN_seq == cum_ackp1) { 851163953Srrs if (asoc->fragmented_delivery_inprogress == 0 && 852163953Srrs (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) != 853163953Srrs SCTP_DATA_FIRST_FRAG) { 854163953Srrs /* 855163953Srrs * An empty queue, no delivery inprogress, 856163953Srrs * we hit the next one and it does NOT have 857163953Srrs * a FIRST fragment mark. 858163953Srrs */ 859169420Srrs SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n"); 860266181Stuexen snprintf(msg, sizeof(msg), 861266181Stuexen "Expected B-bit for TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 862266181Stuexen chk->rec.data.TSN_seq, 863266181Stuexen chk->rec.data.stream_number, 864266181Stuexen chk->rec.data.stream_seq); 865266181Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 866165220Srrs stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2; 867266181Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 868163953Srrs *abort_flag = 1; 869163953Srrs } else if (asoc->fragmented_delivery_inprogress && 870163953Srrs (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) { 871163953Srrs /* 872163953Srrs * We are doing a partial delivery and the 873163953Srrs * NEXT chunk MUST be either the LAST or 874163953Srrs * MIDDLE fragment NOT a FIRST 875163953Srrs */ 876169420Srrs SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n"); 877266181Stuexen snprintf(msg, sizeof(msg), 878266181Stuexen "Didn't expect B-bit for TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 879266181Stuexen chk->rec.data.TSN_seq, 880266181Stuexen chk->rec.data.stream_number, 881266181Stuexen chk->rec.data.stream_seq); 882266181Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 883165220Srrs stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3; 884266181Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 885163953Srrs *abort_flag = 1; 886163953Srrs } else if (asoc->fragmented_delivery_inprogress) { 887163953Srrs /* 888163953Srrs * Here we are ok with a MIDDLE or LAST 889163953Srrs * piece 890163953Srrs */ 891163953Srrs if (chk->rec.data.stream_number != 892163953Srrs asoc->str_of_pdapi) { 893163953Srrs /* Got to be the right STR No */ 894169420Srrs SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n", 895169420Srrs chk->rec.data.stream_number, 896169420Srrs asoc->str_of_pdapi); 897266181Stuexen snprintf(msg, sizeof(msg), 898266181Stuexen "Expected SID=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 899266181Stuexen asoc->str_of_pdapi, 900266181Stuexen chk->rec.data.TSN_seq, 901266181Stuexen chk->rec.data.stream_number, 902266181Stuexen chk->rec.data.stream_seq); 903266181Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 904165220Srrs stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4; 905266181Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 906163953Srrs *abort_flag = 1; 907163953Srrs } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) != 908163953Srrs SCTP_DATA_UNORDERED && 909206137Stuexen chk->rec.data.stream_seq != asoc->ssn_of_pdapi) { 910163953Srrs /* Got to be the right STR Seq */ 911169420Srrs SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n", 912169420Srrs chk->rec.data.stream_seq, 913169420Srrs asoc->ssn_of_pdapi); 914266181Stuexen snprintf(msg, sizeof(msg), 915266181Stuexen "Expected SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 916266181Stuexen asoc->ssn_of_pdapi, 917266181Stuexen chk->rec.data.TSN_seq, 918266181Stuexen chk->rec.data.stream_number, 919266181Stuexen chk->rec.data.stream_seq); 920266181Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 921165220Srrs stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5; 922266181Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 923163953Srrs *abort_flag = 1; 924163953Srrs } 925163953Srrs } 926163953Srrs } 927163953Srrs return; 928163953Srrs } 929163953Srrs /* Find its place */ 930163953Srrs TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) { 931216825Stuexen if (SCTP_TSN_GT(at->rec.data.TSN_seq, chk->rec.data.TSN_seq)) { 932163953Srrs /* 933163953Srrs * one in queue is bigger than the new one, insert 934163953Srrs * before this one 935163953Srrs */ 936163953Srrs /* A check */ 937163953Srrs asoc->size_on_reasm_queue += chk->send_size; 938163953Srrs sctp_ucount_incr(asoc->cnt_on_reasm_queue); 939163953Srrs next = at; 940163953Srrs TAILQ_INSERT_BEFORE(at, chk, sctp_next); 941163953Srrs break; 942163953Srrs } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) { 943163953Srrs /* Gak, He sent me a duplicate str seq number */ 944163953Srrs /* 945163953Srrs * foo bar, I guess I will just free this new guy, 946163953Srrs * should we abort too? FIX ME MAYBE? Or it COULD be 947163953Srrs * that the SSN's have wrapped. Maybe I should 948163953Srrs * compare to TSN somehow... sigh for now just blow 949163953Srrs * away the chunk! 950163953Srrs */ 951163953Srrs if (chk->data) { 952163953Srrs sctp_m_freem(chk->data); 953163953Srrs chk->data = NULL; 954163953Srrs } 955221627Stuexen sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 956163953Srrs return; 957163953Srrs } else { 958163953Srrs prev = at; 959163953Srrs if (TAILQ_NEXT(at, sctp_next) == NULL) { 960163953Srrs /* 961163953Srrs * We are at the end, insert it after this 962163953Srrs * one 963163953Srrs */ 964163953Srrs /* check it first */ 965163953Srrs asoc->size_on_reasm_queue += chk->send_size; 966163953Srrs sctp_ucount_incr(asoc->cnt_on_reasm_queue); 967163953Srrs TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next); 968163953Srrs break; 969163953Srrs } 970163953Srrs } 971163953Srrs } 972163953Srrs /* Now the audits */ 973163953Srrs if (prev) { 974163953Srrs prev_tsn = chk->rec.data.TSN_seq - 1; 975163953Srrs if (prev_tsn == prev->rec.data.TSN_seq) { 976163953Srrs /* 977163953Srrs * Ok the one I am dropping onto the end is the 978163953Srrs * NEXT. A bit of valdiation here. 979163953Srrs */ 980163953Srrs if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 981163953Srrs SCTP_DATA_FIRST_FRAG || 982163953Srrs (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 983163953Srrs SCTP_DATA_MIDDLE_FRAG) { 984163953Srrs /* 985163953Srrs * Insert chk MUST be a MIDDLE or LAST 986163953Srrs * fragment 987163953Srrs */ 988163953Srrs if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 989163953Srrs SCTP_DATA_FIRST_FRAG) { 990169420Srrs SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n"); 991169420Srrs SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n"); 992266181Stuexen snprintf(msg, sizeof(msg), 993266181Stuexen "Can't handle B-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 994266181Stuexen chk->rec.data.TSN_seq, 995266181Stuexen chk->rec.data.stream_number, 996266181Stuexen chk->rec.data.stream_seq); 997266181Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 998165220Srrs stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6; 999266181Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1000163953Srrs *abort_flag = 1; 1001163953Srrs return; 1002163953Srrs } 1003163953Srrs if (chk->rec.data.stream_number != 1004163953Srrs prev->rec.data.stream_number) { 1005163953Srrs /* 1006163953Srrs * Huh, need the correct STR here, 1007163953Srrs * they must be the same. 1008163953Srrs */ 1009266187Stuexen SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sid:%d not the same as at:%d\n", 1010169420Srrs chk->rec.data.stream_number, 1011169420Srrs prev->rec.data.stream_number); 1012266181Stuexen snprintf(msg, sizeof(msg), 1013266181Stuexen "Expect SID=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 1014266181Stuexen prev->rec.data.stream_number, 1015266181Stuexen chk->rec.data.TSN_seq, 1016266181Stuexen chk->rec.data.stream_number, 1017266181Stuexen chk->rec.data.stream_seq); 1018266181Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 1019165220Srrs stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7; 1020266181Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1021163953Srrs *abort_flag = 1; 1022163953Srrs return; 1023163953Srrs } 1024266189Stuexen if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != 1025266189Stuexen (prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) { 1026266189Stuexen /* 1027266189Stuexen * Huh, need the same ordering here, 1028266189Stuexen * they must be the same. 1029266189Stuexen */ 1030266189Stuexen SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, U-bit not constant\n"); 1031266189Stuexen snprintf(msg, sizeof(msg), 1032266189Stuexen "Expect U-bit=%d for TSN=%8.8x, got U-bit=%d", 1033266189Stuexen (prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0, 1034266189Stuexen chk->rec.data.TSN_seq, 1035266189Stuexen (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0); 1036266189Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 1037266189Stuexen stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7; 1038266189Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1039266189Stuexen *abort_flag = 1; 1040266189Stuexen return; 1041266189Stuexen } 1042163953Srrs if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 && 1043163953Srrs chk->rec.data.stream_seq != 1044163953Srrs prev->rec.data.stream_seq) { 1045163953Srrs /* 1046163953Srrs * Huh, need the correct STR here, 1047163953Srrs * they must be the same. 1048163953Srrs */ 1049169420Srrs SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n", 1050169420Srrs chk->rec.data.stream_seq, 1051169420Srrs prev->rec.data.stream_seq); 1052266181Stuexen snprintf(msg, sizeof(msg), 1053266181Stuexen "Expect SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 1054266181Stuexen prev->rec.data.stream_seq, 1055266181Stuexen chk->rec.data.TSN_seq, 1056266181Stuexen chk->rec.data.stream_number, 1057266181Stuexen chk->rec.data.stream_seq); 1058266181Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 1059165220Srrs stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8; 1060266181Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1061163953Srrs *abort_flag = 1; 1062163953Srrs return; 1063163953Srrs } 1064163953Srrs } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1065163953Srrs SCTP_DATA_LAST_FRAG) { 1066163953Srrs /* Insert chk MUST be a FIRST */ 1067163953Srrs if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != 1068163953Srrs SCTP_DATA_FIRST_FRAG) { 1069169420Srrs SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n"); 1070266181Stuexen snprintf(msg, sizeof(msg), 1071266181Stuexen "Expect B-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 1072266181Stuexen chk->rec.data.TSN_seq, 1073266181Stuexen chk->rec.data.stream_number, 1074266181Stuexen chk->rec.data.stream_seq); 1075266181Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 1076165220Srrs stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9; 1077266181Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1078163953Srrs *abort_flag = 1; 1079163953Srrs return; 1080163953Srrs } 1081163953Srrs } 1082163953Srrs } 1083163953Srrs } 1084163953Srrs if (next) { 1085163953Srrs post_tsn = chk->rec.data.TSN_seq + 1; 1086163953Srrs if (post_tsn == next->rec.data.TSN_seq) { 1087163953Srrs /* 1088163953Srrs * Ok the one I am inserting ahead of is my NEXT 1089163953Srrs * one. A bit of valdiation here. 1090163953Srrs */ 1091163953Srrs if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 1092163953Srrs /* Insert chk MUST be a last fragment */ 1093163953Srrs if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) 1094163953Srrs != SCTP_DATA_LAST_FRAG) { 1095169420Srrs SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n"); 1096169420Srrs SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n"); 1097266181Stuexen snprintf(msg, sizeof(msg), 1098266181Stuexen "Expect only E-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 1099266181Stuexen chk->rec.data.TSN_seq, 1100266181Stuexen chk->rec.data.stream_number, 1101266181Stuexen chk->rec.data.stream_seq); 1102266181Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 1103165220Srrs stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10; 1104266181Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1105163953Srrs *abort_flag = 1; 1106163953Srrs return; 1107163953Srrs } 1108163953Srrs } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1109163953Srrs SCTP_DATA_MIDDLE_FRAG || 1110163953Srrs (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1111163953Srrs SCTP_DATA_LAST_FRAG) { 1112163953Srrs /* 1113163953Srrs * Insert chk CAN be MIDDLE or FIRST NOT 1114163953Srrs * LAST 1115163953Srrs */ 1116163953Srrs if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1117163953Srrs SCTP_DATA_LAST_FRAG) { 1118169420Srrs SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n"); 1119169420Srrs SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n"); 1120266181Stuexen snprintf(msg, sizeof(msg), 1121266181Stuexen "Didn't expect E-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 1122266181Stuexen chk->rec.data.TSN_seq, 1123266181Stuexen chk->rec.data.stream_number, 1124266181Stuexen chk->rec.data.stream_seq); 1125266181Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 1126165220Srrs stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11; 1127266181Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1128163953Srrs *abort_flag = 1; 1129163953Srrs return; 1130163953Srrs } 1131163953Srrs if (chk->rec.data.stream_number != 1132163953Srrs next->rec.data.stream_number) { 1133163953Srrs /* 1134163953Srrs * Huh, need the correct STR here, 1135163953Srrs * they must be the same. 1136163953Srrs */ 1137169420Srrs SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n", 1138169420Srrs chk->rec.data.stream_number, 1139169420Srrs next->rec.data.stream_number); 1140266181Stuexen snprintf(msg, sizeof(msg), 1141266181Stuexen "Required SID %4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 1142266181Stuexen next->rec.data.stream_number, 1143266181Stuexen chk->rec.data.TSN_seq, 1144266181Stuexen chk->rec.data.stream_number, 1145266181Stuexen chk->rec.data.stream_seq); 1146266181Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 1147165220Srrs stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12; 1148266181Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1149163953Srrs *abort_flag = 1; 1150163953Srrs return; 1151163953Srrs } 1152266189Stuexen if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != 1153266189Stuexen (next->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) { 1154266189Stuexen /* 1155266189Stuexen * Huh, need the same ordering here, 1156266189Stuexen * they must be the same. 1157266189Stuexen */ 1158266189Stuexen SCTPDBG(SCTP_DEBUG_INDATA1, "Next check - Gak, Evil plot, U-bit not constant\n"); 1159266189Stuexen snprintf(msg, sizeof(msg), 1160266189Stuexen "Expect U-bit=%d for TSN=%8.8x, got U-bit=%d", 1161266189Stuexen (next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0, 1162266189Stuexen chk->rec.data.TSN_seq, 1163266189Stuexen (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0); 1164266189Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 1165266189Stuexen stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12; 1166266189Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1167266189Stuexen *abort_flag = 1; 1168266189Stuexen return; 1169266189Stuexen } 1170163953Srrs if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 && 1171163953Srrs chk->rec.data.stream_seq != 1172163953Srrs next->rec.data.stream_seq) { 1173163953Srrs /* 1174163953Srrs * Huh, need the correct STR here, 1175163953Srrs * they must be the same. 1176163953Srrs */ 1177169420Srrs SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n", 1178169420Srrs chk->rec.data.stream_seq, 1179169420Srrs next->rec.data.stream_seq); 1180266181Stuexen snprintf(msg, sizeof(msg), 1181266181Stuexen "Required SSN %4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 1182266181Stuexen next->rec.data.stream_seq, 1183266181Stuexen chk->rec.data.TSN_seq, 1184266181Stuexen chk->rec.data.stream_number, 1185266181Stuexen chk->rec.data.stream_seq); 1186266181Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 1187165220Srrs stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13; 1188266181Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1189163953Srrs *abort_flag = 1; 1190163953Srrs return; 1191163953Srrs } 1192163953Srrs } 1193163953Srrs } 1194163953Srrs } 1195163953Srrs /* Do we need to do some delivery? check */ 1196163953Srrs sctp_deliver_reasm_check(stcb, asoc); 1197163953Srrs} 1198163953Srrs 1199163953Srrs/* 1200163953Srrs * This is an unfortunate routine. It checks to make sure a evil guy is not 1201163953Srrs * stuffing us full of bad packet fragments. A broken peer could also do this 1202163953Srrs * but this is doubtful. It is to bad I must worry about evil crackers sigh 1203163953Srrs * :< more cycles. 1204163953Srrs */ 1205163953Srrsstatic int 1206163953Srrssctp_does_tsn_belong_to_reasm(struct sctp_association *asoc, 1207163953Srrs uint32_t TSN_seq) 1208163953Srrs{ 1209163953Srrs struct sctp_tmit_chunk *at; 1210163953Srrs uint32_t tsn_est; 1211163953Srrs 1212163953Srrs TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) { 1213216825Stuexen if (SCTP_TSN_GT(TSN_seq, at->rec.data.TSN_seq)) { 1214163953Srrs /* is it one bigger? */ 1215163953Srrs tsn_est = at->rec.data.TSN_seq + 1; 1216163953Srrs if (tsn_est == TSN_seq) { 1217163953Srrs /* yep. It better be a last then */ 1218163953Srrs if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != 1219163953Srrs SCTP_DATA_LAST_FRAG) { 1220163953Srrs /* 1221163953Srrs * Ok this guy belongs next to a guy 1222163953Srrs * that is NOT last, it should be a 1223163953Srrs * middle/last, not a complete 1224163953Srrs * chunk. 1225163953Srrs */ 1226163953Srrs return (1); 1227163953Srrs } else { 1228163953Srrs /* 1229163953Srrs * This guy is ok since its a LAST 1230163953Srrs * and the new chunk is a fully 1231163953Srrs * self- contained one. 1232163953Srrs */ 1233163953Srrs return (0); 1234163953Srrs } 1235163953Srrs } 1236163953Srrs } else if (TSN_seq == at->rec.data.TSN_seq) { 1237163953Srrs /* Software error since I have a dup? */ 1238163953Srrs return (1); 1239163953Srrs } else { 1240163953Srrs /* 1241163953Srrs * Ok, 'at' is larger than new chunk but does it 1242163953Srrs * need to be right before it. 1243163953Srrs */ 1244163953Srrs tsn_est = TSN_seq + 1; 1245163953Srrs if (tsn_est == at->rec.data.TSN_seq) { 1246163953Srrs /* Yep, It better be a first */ 1247163953Srrs if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != 1248163953Srrs SCTP_DATA_FIRST_FRAG) { 1249163953Srrs return (1); 1250163953Srrs } else { 1251163953Srrs return (0); 1252163953Srrs } 1253163953Srrs } 1254163953Srrs } 1255163953Srrs } 1256163953Srrs return (0); 1257163953Srrs} 1258163953Srrs 1259163953Srrsstatic int 1260163953Srrssctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc, 1261163953Srrs struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length, 1262163953Srrs struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag, 1263163953Srrs int *break_flag, int last_chunk) 1264163953Srrs{ 1265163953Srrs /* Process a data chunk */ 1266163953Srrs /* struct sctp_tmit_chunk *chk; */ 1267163953Srrs struct sctp_tmit_chunk *chk; 1268163953Srrs uint32_t tsn, gap; 1269163953Srrs struct mbuf *dmbuf; 1270231038Stuexen int the_len; 1271165647Srrs int need_reasm_check = 0; 1272163953Srrs uint16_t strmno, strmseq; 1273266181Stuexen struct mbuf *op_err; 1274266181Stuexen char msg[SCTP_DIAG_INFO_LEN]; 1275163953Srrs struct sctp_queued_to_read *control; 1276166675Srrs int ordered; 1277166675Srrs uint32_t protocol_id; 1278166675Srrs uint8_t chunk_flags; 1279169352Srrs struct sctp_stream_reset_list *liste; 1280163953Srrs 1281163953Srrs chk = NULL; 1282163953Srrs tsn = ntohl(ch->dp.tsn); 1283166675Srrs chunk_flags = ch->ch.chunk_flags; 1284179783Srrs if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) { 1285179783Srrs asoc->send_sack = 1; 1286179783Srrs } 1287166675Srrs protocol_id = ch->dp.protocol_id; 1288206758Stuexen ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0); 1289179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 1290171943Srrs sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS); 1291170744Srrs } 1292169420Srrs if (stcb == NULL) { 1293169420Srrs return (0); 1294169420Srrs } 1295170744Srrs SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn); 1296216825Stuexen if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) { 1297163953Srrs /* It is a duplicate */ 1298163953Srrs SCTP_STAT_INCR(sctps_recvdupdata); 1299163953Srrs if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1300163953Srrs /* Record a dup for the next outbound sack */ 1301163953Srrs asoc->dup_tsns[asoc->numduptsns] = tsn; 1302163953Srrs asoc->numduptsns++; 1303163953Srrs } 1304172703Srrs asoc->send_sack = 1; 1305163953Srrs return (0); 1306163953Srrs } 1307163953Srrs /* Calculate the number of TSN's between the base and this TSN */ 1308194355Srrs SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn); 1309163953Srrs if (gap >= (SCTP_MAPPING_ARRAY << 3)) { 1310163953Srrs /* Can't hold the bit in the mapping at max array, toss it */ 1311163953Srrs return (0); 1312163953Srrs } 1313163953Srrs if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) { 1314170091Srrs SCTP_TCB_LOCK_ASSERT(stcb); 1315170138Srrs if (sctp_expand_mapping_array(asoc, gap)) { 1316163953Srrs /* Can't expand, drop it */ 1317163953Srrs return (0); 1318163953Srrs } 1319163953Srrs } 1320216825Stuexen if (SCTP_TSN_GT(tsn, *high_tsn)) { 1321163953Srrs *high_tsn = tsn; 1322163953Srrs } 1323163953Srrs /* See if we have received this one already */ 1324205627Srrs if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) || 1325205627Srrs SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) { 1326163953Srrs SCTP_STAT_INCR(sctps_recvdupdata); 1327163953Srrs if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1328163953Srrs /* Record a dup for the next outbound sack */ 1329163953Srrs asoc->dup_tsns[asoc->numduptsns] = tsn; 1330163953Srrs asoc->numduptsns++; 1331163953Srrs } 1332167598Srrs asoc->send_sack = 1; 1333163953Srrs return (0); 1334163953Srrs } 1335163953Srrs /* 1336163953Srrs * Check to see about the GONE flag, duplicates would cause a sack 1337163953Srrs * to be sent up above 1338163953Srrs */ 1339169420Srrs if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 1340163953Srrs (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 1341266181Stuexen (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) { 1342163953Srrs /* 1343163953Srrs * wait a minute, this guy is gone, there is no longer a 1344163953Srrs * receiver. Send peer an ABORT! 1345163953Srrs */ 1346266181Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); 1347237884Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1348163953Srrs *abort_flag = 1; 1349163953Srrs return (0); 1350163953Srrs } 1351163953Srrs /* 1352163953Srrs * Now before going further we see if there is room. If NOT then we 1353163953Srrs * MAY let one through only IF this TSN is the one we are waiting 1354163953Srrs * for on a partial delivery API. 1355163953Srrs */ 1356163953Srrs 1357163953Srrs /* now do the tests */ 1358163953Srrs if (((asoc->cnt_on_all_streams + 1359163953Srrs asoc->cnt_on_reasm_queue + 1360179783Srrs asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) || 1361163953Srrs (((int)asoc->my_rwnd) <= 0)) { 1362163953Srrs /* 1363163953Srrs * When we have NO room in the rwnd we check to make sure 1364163953Srrs * the reader is doing its job... 1365163953Srrs */ 1366163953Srrs if (stcb->sctp_socket->so_rcv.sb_cc) { 1367163953Srrs /* some to read, wake-up */ 1368252882Stuexen#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1369172090Srrs struct socket *so; 1370172090Srrs 1371172090Srrs so = SCTP_INP_SO(stcb->sctp_ep); 1372172090Srrs atomic_add_int(&stcb->asoc.refcnt, 1); 1373172090Srrs SCTP_TCB_UNLOCK(stcb); 1374172090Srrs SCTP_SOCKET_LOCK(so, 1); 1375172090Srrs SCTP_TCB_LOCK(stcb); 1376172090Srrs atomic_subtract_int(&stcb->asoc.refcnt, 1); 1377172090Srrs if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 1378172090Srrs /* assoc was freed while we were unlocked */ 1379172090Srrs SCTP_SOCKET_UNLOCK(so, 1); 1380172090Srrs return (0); 1381172090Srrs } 1382172090Srrs#endif 1383163953Srrs sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 1384252882Stuexen#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1385172090Srrs SCTP_SOCKET_UNLOCK(so, 1); 1386172090Srrs#endif 1387163953Srrs } 1388163953Srrs /* now is it in the mapping array of what we have accepted? */ 1389216825Stuexen if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) && 1390216825Stuexen SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 1391163953Srrs /* Nope not in the valid range dump it */ 1392163953Srrs sctp_set_rwnd(stcb, asoc); 1393163953Srrs if ((asoc->cnt_on_all_streams + 1394163953Srrs asoc->cnt_on_reasm_queue + 1395179783Srrs asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) { 1396163953Srrs SCTP_STAT_INCR(sctps_datadropchklmt); 1397163953Srrs } else { 1398163953Srrs SCTP_STAT_INCR(sctps_datadroprwnd); 1399163953Srrs } 1400163953Srrs *break_flag = 1; 1401163953Srrs return (0); 1402163953Srrs } 1403163953Srrs } 1404163953Srrs strmno = ntohs(ch->dp.stream_id); 1405163953Srrs if (strmno >= asoc->streamincnt) { 1406163953Srrs struct sctp_paramhdr *phdr; 1407163953Srrs struct mbuf *mb; 1408163953Srrs 1409163953Srrs mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2), 1410165647Srrs 0, M_DONTWAIT, 1, MT_DATA); 1411163953Srrs if (mb != NULL) { 1412163953Srrs /* add some space up front so prepend will work well */ 1413165647Srrs SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr)); 1414163953Srrs phdr = mtod(mb, struct sctp_paramhdr *); 1415163953Srrs /* 1416163953Srrs * Error causes are just param's and this one has 1417163953Srrs * two back to back phdr, one with the error type 1418163953Srrs * and size, the other with the streamid and a rsvd 1419163953Srrs */ 1420165647Srrs SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2); 1421163953Srrs phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM); 1422163953Srrs phdr->param_length = 1423163953Srrs htons(sizeof(struct sctp_paramhdr) * 2); 1424163953Srrs phdr++; 1425163953Srrs /* We insert the stream in the type field */ 1426163953Srrs phdr->param_type = ch->dp.stream_id; 1427163953Srrs /* And set the length to 0 for the rsvd field */ 1428163953Srrs phdr->param_length = 0; 1429163953Srrs sctp_queue_op_err(stcb, mb); 1430163953Srrs } 1431163953Srrs SCTP_STAT_INCR(sctps_badsid); 1432170091Srrs SCTP_TCB_LOCK_ASSERT(stcb); 1433205627Srrs SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 1434216825Stuexen if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 1435205627Srrs asoc->highest_tsn_inside_nr_map = tsn; 1436185694Srrs } 1437169208Srrs if (tsn == (asoc->cumulative_tsn + 1)) { 1438169208Srrs /* Update cum-ack */ 1439169208Srrs asoc->cumulative_tsn = tsn; 1440169208Srrs } 1441163953Srrs return (0); 1442163953Srrs } 1443163953Srrs /* 1444163953Srrs * Before we continue lets validate that we are not being fooled by 1445163953Srrs * an evil attacker. We can only have 4k chunks based on our TSN 1446163953Srrs * spread allowed by the mapping array 512 * 8 bits, so there is no 1447163953Srrs * way our stream sequence numbers could have wrapped. We of course 1448163953Srrs * only validate the FIRST fragment so the bit must be set. 1449163953Srrs */ 1450163953Srrs strmseq = ntohs(ch->dp.stream_sequence); 1451166675Srrs#ifdef SCTP_ASOCLOG_OF_TSNS 1452171477Srrs SCTP_TCB_LOCK_ASSERT(stcb); 1453171477Srrs if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) { 1454171477Srrs asoc->tsn_in_at = 0; 1455171477Srrs asoc->tsn_in_wrapped = 1; 1456171477Srrs } 1457166675Srrs asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn; 1458166675Srrs asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno; 1459166675Srrs asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq; 1460168859Srrs asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length; 1461168859Srrs asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags; 1462171477Srrs asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb; 1463171477Srrs asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at; 1464171477Srrs asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1; 1465166675Srrs asoc->tsn_in_at++; 1466166675Srrs#endif 1467166675Srrs if ((chunk_flags & SCTP_DATA_FIRST_FRAG) && 1468170056Srrs (TAILQ_EMPTY(&asoc->resetHead)) && 1469166675Srrs (chunk_flags & SCTP_DATA_UNORDERED) == 0 && 1470216825Stuexen SCTP_SSN_GE(asoc->strmin[strmno].last_sequence_delivered, strmseq)) { 1471163953Srrs /* The incoming sseq is behind where we last delivered? */ 1472169420Srrs SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n", 1473169420Srrs strmseq, asoc->strmin[strmno].last_sequence_delivered); 1474266181Stuexen snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 1475266181Stuexen asoc->strmin[strmno].last_sequence_delivered, 1476266181Stuexen tsn, strmno, strmseq); 1477266181Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 1478165220Srrs stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14; 1479266181Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1480163953Srrs *abort_flag = 1; 1481163953Srrs return (0); 1482163953Srrs } 1483166675Srrs /************************************ 1484166675Srrs * From here down we may find ch-> invalid 1485166675Srrs * so its a good idea NOT to use it. 1486166675Srrs *************************************/ 1487166675Srrs 1488163953Srrs the_len = (chk_length - sizeof(struct sctp_data_chunk)); 1489163953Srrs if (last_chunk == 0) { 1490166023Srrs dmbuf = SCTP_M_COPYM(*m, 1491163953Srrs (offset + sizeof(struct sctp_data_chunk)), 1492163953Srrs the_len, M_DONTWAIT); 1493163953Srrs#ifdef SCTP_MBUF_LOGGING 1494179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 1495163953Srrs struct mbuf *mat; 1496163953Srrs 1497231039Stuexen for (mat = dmbuf; mat; mat = SCTP_BUF_NEXT(mat)) { 1498165647Srrs if (SCTP_BUF_IS_EXTENDED(mat)) { 1499163953Srrs sctp_log_mb(mat, SCTP_MBUF_ICOPY); 1500163953Srrs } 1501163953Srrs } 1502163953Srrs } 1503163953Srrs#endif 1504163953Srrs } else { 1505163953Srrs /* We can steal the last chunk */ 1506165647Srrs int l_len; 1507165647Srrs 1508163953Srrs dmbuf = *m; 1509163953Srrs /* lop off the top part */ 1510163953Srrs m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk))); 1511165647Srrs if (SCTP_BUF_NEXT(dmbuf) == NULL) { 1512165647Srrs l_len = SCTP_BUF_LEN(dmbuf); 1513165647Srrs } else { 1514165647Srrs /* 1515165647Srrs * need to count up the size hopefully does not hit 1516165647Srrs * this to often :-0 1517165647Srrs */ 1518165647Srrs struct mbuf *lat; 1519165647Srrs 1520165647Srrs l_len = 0; 1521231039Stuexen for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) { 1522165647Srrs l_len += SCTP_BUF_LEN(lat); 1523165647Srrs } 1524165647Srrs } 1525165647Srrs if (l_len > the_len) { 1526163953Srrs /* Trim the end round bytes off too */ 1527165647Srrs m_adj(dmbuf, -(l_len - the_len)); 1528163953Srrs } 1529163953Srrs } 1530163953Srrs if (dmbuf == NULL) { 1531163953Srrs SCTP_STAT_INCR(sctps_nomem); 1532163953Srrs return (0); 1533163953Srrs } 1534166675Srrs if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG && 1535163953Srrs asoc->fragmented_delivery_inprogress == 0 && 1536163953Srrs TAILQ_EMPTY(&asoc->resetHead) && 1537166675Srrs ((ordered == 0) || 1538216480Stuexen ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq && 1539163953Srrs TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) { 1540163953Srrs /* Candidate for express delivery */ 1541163953Srrs /* 1542163953Srrs * Its not fragmented, No PD-API is up, Nothing in the 1543163953Srrs * delivery queue, Its un-ordered OR ordered and the next to 1544163953Srrs * deliver AND nothing else is stuck on the stream queue, 1545163953Srrs * And there is room for it in the socket buffer. Lets just 1546163953Srrs * stuff it up the buffer.... 1547163953Srrs */ 1548163953Srrs 1549163953Srrs /* It would be nice to avoid this copy if we could :< */ 1550163953Srrs sctp_alloc_a_readq(stcb, control); 1551163953Srrs sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn, 1552166675Srrs protocol_id, 1553163953Srrs strmno, strmseq, 1554166675Srrs chunk_flags, 1555163953Srrs dmbuf); 1556163953Srrs if (control == NULL) { 1557163953Srrs goto failed_express_del; 1558163953Srrs } 1559212897Stuexen SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 1560216825Stuexen if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 1561212897Stuexen asoc->highest_tsn_inside_nr_map = tsn; 1562212897Stuexen } 1563195918Srrs sctp_add_to_readq(stcb->sctp_ep, stcb, 1564195918Srrs control, &stcb->sctp_socket->so_rcv, 1565195918Srrs 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 1566185694Srrs 1567166675Srrs if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) { 1568163953Srrs /* for ordered, bump what we delivered */ 1569163953Srrs asoc->strmin[strmno].last_sequence_delivered++; 1570163953Srrs } 1571163953Srrs SCTP_STAT_INCR(sctps_recvexpress); 1572179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 1573170744Srrs sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, 1574170744Srrs SCTP_STR_LOG_FROM_EXPRS_DEL); 1575170744Srrs } 1576163953Srrs control = NULL; 1577206137Stuexen 1578163953Srrs goto finish_express_del; 1579163953Srrs } 1580163953Srrsfailed_express_del: 1581163953Srrs /* If we reach here this is a new chunk */ 1582163953Srrs chk = NULL; 1583163953Srrs control = NULL; 1584163953Srrs /* Express for fragmented delivery? */ 1585163953Srrs if ((asoc->fragmented_delivery_inprogress) && 1586163953Srrs (stcb->asoc.control_pdapi) && 1587163953Srrs (asoc->str_of_pdapi == strmno) && 1588163953Srrs (asoc->ssn_of_pdapi == strmseq) 1589163953Srrs ) { 1590163953Srrs control = stcb->asoc.control_pdapi; 1591166675Srrs if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) { 1592163953Srrs /* Can't be another first? */ 1593163953Srrs goto failed_pdapi_express_del; 1594163953Srrs } 1595163953Srrs if (tsn == (control->sinfo_tsn + 1)) { 1596163953Srrs /* Yep, we can add it on */ 1597163953Srrs int end = 0; 1598163953Srrs 1599166675Srrs if (chunk_flags & SCTP_DATA_LAST_FRAG) { 1600163953Srrs end = 1; 1601163953Srrs } 1602163953Srrs if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end, 1603163953Srrs tsn, 1604163953Srrs &stcb->sctp_socket->so_rcv)) { 1605169420Srrs SCTP_PRINTF("Append fails end:%d\n", end); 1606163953Srrs goto failed_pdapi_express_del; 1607163953Srrs } 1608205627Srrs SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 1609216825Stuexen if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 1610205627Srrs asoc->highest_tsn_inside_nr_map = tsn; 1611185694Srrs } 1612163953Srrs SCTP_STAT_INCR(sctps_recvexpressm); 1613163953Srrs asoc->tsn_last_delivered = tsn; 1614166675Srrs asoc->fragment_flags = chunk_flags; 1615163953Srrs asoc->tsn_of_pdapi_last_delivered = tsn; 1616166675Srrs asoc->last_flags_delivered = chunk_flags; 1617163953Srrs asoc->last_strm_seq_delivered = strmseq; 1618163953Srrs asoc->last_strm_no_delivered = strmno; 1619163953Srrs if (end) { 1620163953Srrs /* clean up the flags and such */ 1621163953Srrs asoc->fragmented_delivery_inprogress = 0; 1622166675Srrs if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) { 1623165220Srrs asoc->strmin[strmno].last_sequence_delivered++; 1624165220Srrs } 1625163953Srrs stcb->asoc.control_pdapi = NULL; 1626165647Srrs if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) { 1627165647Srrs /* 1628165647Srrs * There could be another message 1629165647Srrs * ready 1630165647Srrs */ 1631165647Srrs need_reasm_check = 1; 1632165647Srrs } 1633163953Srrs } 1634163953Srrs control = NULL; 1635163953Srrs goto finish_express_del; 1636163953Srrs } 1637163953Srrs } 1638163953Srrsfailed_pdapi_express_del: 1639163953Srrs control = NULL; 1640205627Srrs if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) { 1641205627Srrs SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 1642216825Stuexen if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 1643205627Srrs asoc->highest_tsn_inside_nr_map = tsn; 1644205627Srrs } 1645205627Srrs } else { 1646205627Srrs SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap); 1647216825Stuexen if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) { 1648205627Srrs asoc->highest_tsn_inside_map = tsn; 1649205627Srrs } 1650205627Srrs } 1651166675Srrs if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) { 1652163953Srrs sctp_alloc_a_chunk(stcb, chk); 1653163953Srrs if (chk == NULL) { 1654163953Srrs /* No memory so we drop the chunk */ 1655163953Srrs SCTP_STAT_INCR(sctps_nomem); 1656163953Srrs if (last_chunk == 0) { 1657163953Srrs /* we copied it, free the copy */ 1658163953Srrs sctp_m_freem(dmbuf); 1659163953Srrs } 1660163953Srrs return (0); 1661163953Srrs } 1662163953Srrs chk->rec.data.TSN_seq = tsn; 1663163953Srrs chk->no_fr_allowed = 0; 1664163953Srrs chk->rec.data.stream_seq = strmseq; 1665163953Srrs chk->rec.data.stream_number = strmno; 1666166675Srrs chk->rec.data.payloadtype = protocol_id; 1667163953Srrs chk->rec.data.context = stcb->asoc.context; 1668163953Srrs chk->rec.data.doing_fast_retransmit = 0; 1669166675Srrs chk->rec.data.rcv_flags = chunk_flags; 1670163953Srrs chk->asoc = asoc; 1671163953Srrs chk->send_size = the_len; 1672163953Srrs chk->whoTo = net; 1673163953Srrs atomic_add_int(&net->ref_count, 1); 1674163953Srrs chk->data = dmbuf; 1675163953Srrs } else { 1676163953Srrs sctp_alloc_a_readq(stcb, control); 1677163953Srrs sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn, 1678166675Srrs protocol_id, 1679163953Srrs strmno, strmseq, 1680166675Srrs chunk_flags, 1681163953Srrs dmbuf); 1682163953Srrs if (control == NULL) { 1683163953Srrs /* No memory so we drop the chunk */ 1684163953Srrs SCTP_STAT_INCR(sctps_nomem); 1685163953Srrs if (last_chunk == 0) { 1686163953Srrs /* we copied it, free the copy */ 1687163953Srrs sctp_m_freem(dmbuf); 1688163953Srrs } 1689163953Srrs return (0); 1690163953Srrs } 1691163953Srrs control->length = the_len; 1692163953Srrs } 1693163953Srrs 1694163953Srrs /* Mark it as received */ 1695163953Srrs /* Now queue it where it belongs */ 1696163953Srrs if (control != NULL) { 1697163953Srrs /* First a sanity check */ 1698163953Srrs if (asoc->fragmented_delivery_inprogress) { 1699163953Srrs /* 1700163953Srrs * Ok, we have a fragmented delivery in progress if 1701163953Srrs * this chunk is next to deliver OR belongs in our 1702163953Srrs * view to the reassembly, the peer is evil or 1703163953Srrs * broken. 1704163953Srrs */ 1705163953Srrs uint32_t estimate_tsn; 1706163953Srrs 1707163953Srrs estimate_tsn = asoc->tsn_last_delivered + 1; 1708163953Srrs if (TAILQ_EMPTY(&asoc->reasmqueue) && 1709163953Srrs (estimate_tsn == control->sinfo_tsn)) { 1710163953Srrs /* Evil/Broke peer */ 1711163953Srrs sctp_m_freem(control->data); 1712163953Srrs control->data = NULL; 1713171158Srrs if (control->whoFrom) { 1714171158Srrs sctp_free_remote_addr(control->whoFrom); 1715171158Srrs control->whoFrom = NULL; 1716171158Srrs } 1717163953Srrs sctp_free_a_readq(stcb, control); 1718266181Stuexen snprintf(msg, sizeof(msg), "Reas. queue emtpy, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 1719266181Stuexen tsn, strmno, strmseq); 1720266181Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 1721165220Srrs stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15; 1722266181Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1723163953Srrs *abort_flag = 1; 1724266190Stuexen if (last_chunk) { 1725266190Stuexen *m = NULL; 1726266190Stuexen } 1727163953Srrs return (0); 1728163953Srrs } else { 1729163953Srrs if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) { 1730163953Srrs sctp_m_freem(control->data); 1731163953Srrs control->data = NULL; 1732171158Srrs if (control->whoFrom) { 1733171158Srrs sctp_free_remote_addr(control->whoFrom); 1734171158Srrs control->whoFrom = NULL; 1735171158Srrs } 1736163953Srrs sctp_free_a_readq(stcb, control); 1737266181Stuexen snprintf(msg, sizeof(msg), "PD ongoing, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 1738266181Stuexen tsn, strmno, strmseq); 1739266181Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 1740165220Srrs stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16; 1741266181Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1742163953Srrs *abort_flag = 1; 1743266190Stuexen if (last_chunk) { 1744266190Stuexen *m = NULL; 1745266190Stuexen } 1746163953Srrs return (0); 1747163953Srrs } 1748163953Srrs } 1749163953Srrs } else { 1750163953Srrs /* No PDAPI running */ 1751163953Srrs if (!TAILQ_EMPTY(&asoc->reasmqueue)) { 1752163953Srrs /* 1753163953Srrs * Reassembly queue is NOT empty validate 1754163953Srrs * that this tsn does not need to be in 1755163953Srrs * reasembly queue. If it does then our peer 1756163953Srrs * is broken or evil. 1757163953Srrs */ 1758163953Srrs if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) { 1759163953Srrs sctp_m_freem(control->data); 1760163953Srrs control->data = NULL; 1761171158Srrs if (control->whoFrom) { 1762171158Srrs sctp_free_remote_addr(control->whoFrom); 1763171158Srrs control->whoFrom = NULL; 1764171158Srrs } 1765163953Srrs sctp_free_a_readq(stcb, control); 1766266181Stuexen snprintf(msg, sizeof(msg), "No PD ongoing, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 1767266181Stuexen tsn, strmno, strmseq); 1768266181Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 1769165220Srrs stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17; 1770266181Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1771163953Srrs *abort_flag = 1; 1772266190Stuexen if (last_chunk) { 1773266190Stuexen *m = NULL; 1774266190Stuexen } 1775163953Srrs return (0); 1776163953Srrs } 1777163953Srrs } 1778163953Srrs } 1779163953Srrs /* ok, if we reach here we have passed the sanity checks */ 1780166675Srrs if (chunk_flags & SCTP_DATA_UNORDERED) { 1781163953Srrs /* queue directly into socket buffer */ 1782206137Stuexen sctp_mark_non_revokable(asoc, control->sinfo_tsn); 1783163953Srrs sctp_add_to_readq(stcb->sctp_ep, stcb, 1784163953Srrs control, 1785195918Srrs &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 1786163953Srrs } else { 1787163953Srrs /* 1788163953Srrs * Special check for when streams are resetting. We 1789163953Srrs * could be more smart about this and check the 1790163953Srrs * actual stream to see if it is not being reset.. 1791163953Srrs * that way we would not create a HOLB when amongst 1792163953Srrs * streams being reset and those not being reset. 1793163953Srrs * 1794163953Srrs * We take complete messages that have a stream reset 1795163953Srrs * intervening (aka the TSN is after where our 1796163953Srrs * cum-ack needs to be) off and put them on a 1797163953Srrs * pending_reply_queue. The reassembly ones we do 1798163953Srrs * not have to worry about since they are all sorted 1799163953Srrs * and proceessed by TSN order. It is only the 1800163953Srrs * singletons I must worry about. 1801163953Srrs */ 1802163953Srrs if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 1803216825Stuexen SCTP_TSN_GT(tsn, liste->tsn)) { 1804163953Srrs /* 1805163953Srrs * yep its past where we need to reset... go 1806163953Srrs * ahead and queue it. 1807163953Srrs */ 1808163953Srrs if (TAILQ_EMPTY(&asoc->pending_reply_queue)) { 1809163953Srrs /* first one on */ 1810163953Srrs TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 1811163953Srrs } else { 1812216822Stuexen struct sctp_queued_to_read *ctlOn, 1813216822Stuexen *nctlOn; 1814163953Srrs unsigned char inserted = 0; 1815163953Srrs 1816216822Stuexen TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) { 1817216825Stuexen if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) { 1818216822Stuexen continue; 1819163953Srrs } else { 1820163953Srrs /* found it */ 1821163953Srrs TAILQ_INSERT_BEFORE(ctlOn, control, next); 1822163953Srrs inserted = 1; 1823163953Srrs break; 1824163953Srrs } 1825163953Srrs } 1826163953Srrs if (inserted == 0) { 1827163953Srrs /* 1828163953Srrs * must be put at end, use 1829163953Srrs * prevP (all setup from 1830163953Srrs * loop) to setup nextP. 1831163953Srrs */ 1832163953Srrs TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 1833163953Srrs } 1834163953Srrs } 1835163953Srrs } else { 1836163953Srrs sctp_queue_data_to_stream(stcb, asoc, control, abort_flag); 1837163953Srrs if (*abort_flag) { 1838266190Stuexen if (last_chunk) { 1839266190Stuexen *m = NULL; 1840266190Stuexen } 1841163953Srrs return (0); 1842163953Srrs } 1843163953Srrs } 1844163953Srrs } 1845163953Srrs } else { 1846163953Srrs /* Into the re-assembly queue */ 1847163953Srrs sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag); 1848163953Srrs if (*abort_flag) { 1849165220Srrs /* 1850165220Srrs * the assoc is now gone and chk was put onto the 1851165220Srrs * reasm queue, which has all been freed. 1852165220Srrs */ 1853266190Stuexen if (last_chunk) { 1854266190Stuexen *m = NULL; 1855266190Stuexen } 1856163953Srrs return (0); 1857163953Srrs } 1858163953Srrs } 1859163953Srrsfinish_express_del: 1860206840Stuexen if (tsn == (asoc->cumulative_tsn + 1)) { 1861206840Stuexen /* Update cum-ack */ 1862206840Stuexen asoc->cumulative_tsn = tsn; 1863206840Stuexen } 1864163953Srrs if (last_chunk) { 1865163953Srrs *m = NULL; 1866163953Srrs } 1867166675Srrs if (ordered) { 1868163953Srrs SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks); 1869163953Srrs } else { 1870163953Srrs SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks); 1871163953Srrs } 1872163953Srrs SCTP_STAT_INCR(sctps_recvdata); 1873163953Srrs /* Set it present please */ 1874179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 1875170744Srrs sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN); 1876170744Srrs } 1877179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 1878170744Srrs sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, 1879170744Srrs asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE); 1880170744Srrs } 1881169352Srrs /* check the special flag for stream resets */ 1882169352Srrs if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 1883216825Stuexen SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) { 1884169352Srrs /* 1885169352Srrs * we have finished working through the backlogged TSN's now 1886169352Srrs * time to reset streams. 1: call reset function. 2: free 1887169352Srrs * pending_reply space 3: distribute any chunks in 1888169352Srrs * pending_reply_queue. 1889169352Srrs */ 1890216822Stuexen struct sctp_queued_to_read *ctl, *nctl; 1891169352Srrs 1892252929Stuexen sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams); 1893169352Srrs TAILQ_REMOVE(&asoc->resetHead, liste, next_resp); 1894170091Srrs SCTP_FREE(liste, SCTP_M_STRESET); 1895169655Srrs /* sa_ignore FREED_MEMORY */ 1896169352Srrs liste = TAILQ_FIRST(&asoc->resetHead); 1897216822Stuexen if (TAILQ_EMPTY(&asoc->resetHead)) { 1898169352Srrs /* All can be removed */ 1899216822Stuexen TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) { 1900169352Srrs TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next); 1901169352Srrs sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag); 1902169352Srrs if (*abort_flag) { 1903169352Srrs return (0); 1904169352Srrs } 1905169352Srrs } 1906216822Stuexen } else { 1907216822Stuexen TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) { 1908216825Stuexen if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) { 1909216822Stuexen break; 1910216822Stuexen } 1911169352Srrs /* 1912169352Srrs * if ctl->sinfo_tsn is <= liste->tsn we can 1913169352Srrs * process it which is the NOT of 1914169352Srrs * ctl->sinfo_tsn > liste->tsn 1915169352Srrs */ 1916169352Srrs TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next); 1917169352Srrs sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag); 1918169352Srrs if (*abort_flag) { 1919169352Srrs return (0); 1920169352Srrs } 1921169352Srrs } 1922169352Srrs } 1923169352Srrs /* 1924169352Srrs * Now service re-assembly to pick up anything that has been 1925169352Srrs * held on reassembly queue? 1926169352Srrs */ 1927169352Srrs sctp_deliver_reasm_check(stcb, asoc); 1928169352Srrs need_reasm_check = 0; 1929169352Srrs } 1930165647Srrs if (need_reasm_check) { 1931165647Srrs /* Another one waits ? */ 1932165647Srrs sctp_deliver_reasm_check(stcb, asoc); 1933165647Srrs } 1934163953Srrs return (1); 1935163953Srrs} 1936163953Srrs 1937163953Srrsint8_t sctp_map_lookup_tab[256] = { 1938206137Stuexen 0, 1, 0, 2, 0, 1, 0, 3, 1939206137Stuexen 0, 1, 0, 2, 0, 1, 0, 4, 1940206137Stuexen 0, 1, 0, 2, 0, 1, 0, 3, 1941206137Stuexen 0, 1, 0, 2, 0, 1, 0, 5, 1942206137Stuexen 0, 1, 0, 2, 0, 1, 0, 3, 1943206137Stuexen 0, 1, 0, 2, 0, 1, 0, 4, 1944206137Stuexen 0, 1, 0, 2, 0, 1, 0, 3, 1945206137Stuexen 0, 1, 0, 2, 0, 1, 0, 6, 1946206137Stuexen 0, 1, 0, 2, 0, 1, 0, 3, 1947206137Stuexen 0, 1, 0, 2, 0, 1, 0, 4, 1948206137Stuexen 0, 1, 0, 2, 0, 1, 0, 3, 1949206137Stuexen 0, 1, 0, 2, 0, 1, 0, 5, 1950206137Stuexen 0, 1, 0, 2, 0, 1, 0, 3, 1951206137Stuexen 0, 1, 0, 2, 0, 1, 0, 4, 1952206137Stuexen 0, 1, 0, 2, 0, 1, 0, 3, 1953206137Stuexen 0, 1, 0, 2, 0, 1, 0, 7, 1954206137Stuexen 0, 1, 0, 2, 0, 1, 0, 3, 1955206137Stuexen 0, 1, 0, 2, 0, 1, 0, 4, 1956206137Stuexen 0, 1, 0, 2, 0, 1, 0, 3, 1957206137Stuexen 0, 1, 0, 2, 0, 1, 0, 5, 1958206137Stuexen 0, 1, 0, 2, 0, 1, 0, 3, 1959206137Stuexen 0, 1, 0, 2, 0, 1, 0, 4, 1960206137Stuexen 0, 1, 0, 2, 0, 1, 0, 3, 1961206137Stuexen 0, 1, 0, 2, 0, 1, 0, 6, 1962206137Stuexen 0, 1, 0, 2, 0, 1, 0, 3, 1963206137Stuexen 0, 1, 0, 2, 0, 1, 0, 4, 1964206137Stuexen 0, 1, 0, 2, 0, 1, 0, 3, 1965206137Stuexen 0, 1, 0, 2, 0, 1, 0, 5, 1966206137Stuexen 0, 1, 0, 2, 0, 1, 0, 3, 1967206137Stuexen 0, 1, 0, 2, 0, 1, 0, 4, 1968206137Stuexen 0, 1, 0, 2, 0, 1, 0, 3, 1969206137Stuexen 0, 1, 0, 2, 0, 1, 0, 8 1970163953Srrs}; 1971163953Srrs 1972163953Srrs 1973163953Srrsvoid 1974206137Stuexensctp_slide_mapping_arrays(struct sctp_tcb *stcb) 1975163953Srrs{ 1976163953Srrs /* 1977163953Srrs * Now we also need to check the mapping array in a couple of ways. 1978163953Srrs * 1) Did we move the cum-ack point? 1979208897Srrs * 1980208897Srrs * When you first glance at this you might think that all entries that 1981208897Srrs * make up the postion of the cum-ack would be in the nr-mapping 1982208897Srrs * array only.. i.e. things up to the cum-ack are always 1983208897Srrs * deliverable. Thats true with one exception, when its a fragmented 1984208897Srrs * message we may not deliver the data until some threshold (or all 1985208897Srrs * of it) is in place. So we must OR the nr_mapping_array and 1986208897Srrs * mapping_array to get a true picture of the cum-ack. 1987163953Srrs */ 1988163953Srrs struct sctp_association *asoc; 1989179783Srrs int at; 1990208897Srrs uint8_t val; 1991163953Srrs int slide_from, slide_end, lgap, distance; 1992205627Srrs uint32_t old_cumack, old_base, old_highest, highest_tsn; 1993163953Srrs 1994163953Srrs asoc = &stcb->asoc; 1995163953Srrs 1996163953Srrs old_cumack = asoc->cumulative_tsn; 1997163953Srrs old_base = asoc->mapping_array_base_tsn; 1998163953Srrs old_highest = asoc->highest_tsn_inside_map; 1999163953Srrs /* 2000163953Srrs * We could probably improve this a small bit by calculating the 2001163953Srrs * offset of the current cum-ack as the starting point. 2002163953Srrs */ 2003163953Srrs at = 0; 2004206137Stuexen for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) { 2005208952Srrs val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from]; 2006208897Srrs if (val == 0xff) { 2007163953Srrs at += 8; 2008163953Srrs } else { 2009163953Srrs /* there is a 0 bit */ 2010208897Srrs at += sctp_map_lookup_tab[val]; 2011163953Srrs break; 2012163953Srrs } 2013163953Srrs } 2014206137Stuexen asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1); 2015163953Srrs 2016216825Stuexen if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) && 2017216825Stuexen SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) { 2018165220Srrs#ifdef INVARIANTS 2019172090Srrs panic("huh, cumack 0x%x greater than high-tsn 0x%x in map", 2020172090Srrs asoc->cumulative_tsn, asoc->highest_tsn_inside_map); 2021163953Srrs#else 2022172090Srrs SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n", 2023172090Srrs asoc->cumulative_tsn, asoc->highest_tsn_inside_map); 2024205502Srrs sctp_print_mapping_array(asoc); 2025179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2026179783Srrs sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 2027179783Srrs } 2028163953Srrs asoc->highest_tsn_inside_map = asoc->cumulative_tsn; 2029185694Srrs asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn; 2030163953Srrs#endif 2031163953Srrs } 2032216825Stuexen if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { 2033205627Srrs highest_tsn = asoc->highest_tsn_inside_nr_map; 2034205627Srrs } else { 2035205627Srrs highest_tsn = asoc->highest_tsn_inside_map; 2036205627Srrs } 2037205627Srrs if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) { 2038163953Srrs /* The complete array was completed by a single FR */ 2039205627Srrs /* highest becomes the cum-ack */ 2040206758Stuexen int clr; 2041163953Srrs 2042206758Stuexen#ifdef INVARIANTS 2043206758Stuexen unsigned int i; 2044206758Stuexen 2045206758Stuexen#endif 2046206758Stuexen 2047163953Srrs /* clear the array */ 2048206137Stuexen clr = ((at + 7) >> 3); 2049171943Srrs if (clr > asoc->mapping_array_size) { 2050163953Srrs clr = asoc->mapping_array_size; 2051163953Srrs } 2052163953Srrs memset(asoc->mapping_array, 0, clr); 2053205627Srrs memset(asoc->nr_mapping_array, 0, clr); 2054206758Stuexen#ifdef INVARIANTS 2055206137Stuexen for (i = 0; i < asoc->mapping_array_size; i++) { 2056206137Stuexen if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) { 2057235164Stuexen SCTP_PRINTF("Error Mapping array's not clean at clear\n"); 2058206137Stuexen sctp_print_mapping_array(asoc); 2059206137Stuexen } 2060206137Stuexen } 2061206758Stuexen#endif 2062163953Srrs asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1; 2063205627Srrs asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn; 2064163953Srrs } else if (at >= 8) { 2065163953Srrs /* we can slide the mapping array down */ 2066179783Srrs /* slide_from holds where we hit the first NON 0xff byte */ 2067179783Srrs 2068163953Srrs /* 2069163953Srrs * now calculate the ceiling of the move using our highest 2070163953Srrs * TSN value 2071163953Srrs */ 2072205627Srrs SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn); 2073205627Srrs slide_end = (lgap >> 3); 2074163953Srrs if (slide_end < slide_from) { 2075205627Srrs sctp_print_mapping_array(asoc); 2076172396Srrs#ifdef INVARIANTS 2077163953Srrs panic("impossible slide"); 2078172396Srrs#else 2079235164Stuexen SCTP_PRINTF("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n", 2080205627Srrs lgap, slide_end, slide_from, at); 2081172396Srrs return; 2082172396Srrs#endif 2083163953Srrs } 2084179783Srrs if (slide_end > asoc->mapping_array_size) { 2085179783Srrs#ifdef INVARIANTS 2086179783Srrs panic("would overrun buffer"); 2087179783Srrs#else 2088235164Stuexen SCTP_PRINTF("Gak, would have overrun map end:%d slide_end:%d\n", 2089179783Srrs asoc->mapping_array_size, slide_end); 2090179783Srrs slide_end = asoc->mapping_array_size; 2091179783Srrs#endif 2092179783Srrs } 2093163953Srrs distance = (slide_end - slide_from) + 1; 2094179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2095170744Srrs sctp_log_map(old_base, old_cumack, old_highest, 2096170744Srrs SCTP_MAP_PREPARE_SLIDE); 2097170744Srrs sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end, 2098170744Srrs (uint32_t) lgap, SCTP_MAP_SLIDE_FROM); 2099170744Srrs } 2100163953Srrs if (distance + slide_from > asoc->mapping_array_size || 2101163953Srrs distance < 0) { 2102163953Srrs /* 2103163953Srrs * Here we do NOT slide forward the array so that 2104163953Srrs * hopefully when more data comes in to fill it up 2105163953Srrs * we will be able to slide it forward. Really I 2106163953Srrs * don't think this should happen :-0 2107163953Srrs */ 2108163953Srrs 2109179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2110170744Srrs sctp_log_map((uint32_t) distance, (uint32_t) slide_from, 2111170744Srrs (uint32_t) asoc->mapping_array_size, 2112170744Srrs SCTP_MAP_SLIDE_NONE); 2113170744Srrs } 2114163953Srrs } else { 2115163953Srrs int ii; 2116163953Srrs 2117163953Srrs for (ii = 0; ii < distance; ii++) { 2118206758Stuexen asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii]; 2119206758Stuexen asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii]; 2120205627Srrs 2121163953Srrs } 2122206281Stuexen for (ii = distance; ii < asoc->mapping_array_size; ii++) { 2123163953Srrs asoc->mapping_array[ii] = 0; 2124205627Srrs asoc->nr_mapping_array[ii] = 0; 2125163953Srrs } 2126206892Stuexen if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) { 2127206892Stuexen asoc->highest_tsn_inside_map += (slide_from << 3); 2128206892Stuexen } 2129206892Stuexen if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) { 2130206892Stuexen asoc->highest_tsn_inside_nr_map += (slide_from << 3); 2131206892Stuexen } 2132163953Srrs asoc->mapping_array_base_tsn += (slide_from << 3); 2133179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2134170744Srrs sctp_log_map(asoc->mapping_array_base_tsn, 2135170744Srrs asoc->cumulative_tsn, asoc->highest_tsn_inside_map, 2136170744Srrs SCTP_MAP_SLIDE_RESULT); 2137170744Srrs } 2138185694Srrs } 2139185694Srrs } 2140206137Stuexen} 2141206137Stuexen 2142206137Stuexenvoid 2143231038Stuexensctp_sack_check(struct sctp_tcb *stcb, int was_a_gap) 2144206137Stuexen{ 2145206137Stuexen struct sctp_association *asoc; 2146206137Stuexen uint32_t highest_tsn; 2147206137Stuexen 2148206137Stuexen asoc = &stcb->asoc; 2149216825Stuexen if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { 2150206137Stuexen highest_tsn = asoc->highest_tsn_inside_nr_map; 2151206137Stuexen } else { 2152206137Stuexen highest_tsn = asoc->highest_tsn_inside_map; 2153206137Stuexen } 2154206137Stuexen 2155185694Srrs /* 2156163953Srrs * Now we need to see if we need to queue a sack or just start the 2157163953Srrs * timer (if allowed). 2158163953Srrs */ 2159206137Stuexen if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 2160206137Stuexen /* 2161206137Stuexen * Ok special case, in SHUTDOWN-SENT case. here we maker 2162206137Stuexen * sure SACK timer is off and instead send a SHUTDOWN and a 2163206137Stuexen * SACK 2164206137Stuexen */ 2165206137Stuexen if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2166206137Stuexen sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 2167206137Stuexen stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18); 2168206137Stuexen } 2169224641Stuexen sctp_send_shutdown(stcb, 2170224641Stuexen ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination)); 2171221627Stuexen sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 2172206137Stuexen } else { 2173206137Stuexen int is_a_gap; 2174163953Srrs 2175206137Stuexen /* is there a gap now ? */ 2176216825Stuexen is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn); 2177163953Srrs 2178206137Stuexen /* 2179206137Stuexen * CMT DAC algorithm: increase number of packets received 2180206137Stuexen * since last ack 2181206137Stuexen */ 2182206137Stuexen stcb->asoc.cmt_dac_pkts_rcvd++; 2183163953Srrs 2184206137Stuexen if ((stcb->asoc.send_sack == 1) || /* We need to send a 2185206137Stuexen * SACK */ 2186206137Stuexen ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no 2187206137Stuexen * longer is one */ 2188206137Stuexen (stcb->asoc.numduptsns) || /* we have dup's */ 2189206137Stuexen (is_a_gap) || /* is still a gap */ 2190206137Stuexen (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */ 2191206137Stuexen (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */ 2192206137Stuexen ) { 2193163953Srrs 2194216669Stuexen if ((stcb->asoc.sctp_cmt_on_off > 0) && 2195206137Stuexen (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) && 2196206137Stuexen (stcb->asoc.send_sack == 0) && 2197206137Stuexen (stcb->asoc.numduptsns == 0) && 2198206137Stuexen (stcb->asoc.delayed_ack) && 2199206137Stuexen (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) { 2200163953Srrs 2201206137Stuexen /* 2202206137Stuexen * CMT DAC algorithm: With CMT, delay acks 2203206137Stuexen * even in the face of 2204206137Stuexen * 2205206137Stuexen * reordering. Therefore, if acks that do not 2206206137Stuexen * have to be sent because of the above 2207206137Stuexen * reasons, will be delayed. That is, acks 2208206137Stuexen * that would have been sent due to gap 2209206137Stuexen * reports will be delayed with DAC. Start 2210206137Stuexen * the delayed ack timer. 2211206137Stuexen */ 2212206137Stuexen sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2213206137Stuexen stcb->sctp_ep, stcb, NULL); 2214163953Srrs } else { 2215206137Stuexen /* 2216206137Stuexen * Ok we must build a SACK since the timer 2217206137Stuexen * is pending, we got our first packet OR 2218206137Stuexen * there are gaps or duplicates. 2219206137Stuexen */ 2220206137Stuexen (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer); 2221221627Stuexen sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 2222163953Srrs } 2223206137Stuexen } else { 2224206137Stuexen if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2225206137Stuexen sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2226206137Stuexen stcb->sctp_ep, stcb, NULL); 2227206137Stuexen } 2228163953Srrs } 2229163953Srrs } 2230163953Srrs} 2231163953Srrs 2232163953Srrsvoid 2233163953Srrssctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc) 2234163953Srrs{ 2235163953Srrs struct sctp_tmit_chunk *chk; 2236196260Stuexen uint32_t tsize, pd_point; 2237163953Srrs uint16_t nxt_todel; 2238163953Srrs 2239163953Srrs if (asoc->fragmented_delivery_inprogress) { 2240163953Srrs sctp_service_reassembly(stcb, asoc); 2241163953Srrs } 2242163953Srrs /* Can we proceed further, i.e. the PD-API is complete */ 2243163953Srrs if (asoc->fragmented_delivery_inprogress) { 2244163953Srrs /* no */ 2245163953Srrs return; 2246163953Srrs } 2247163953Srrs /* 2248163953Srrs * Now is there some other chunk I can deliver from the reassembly 2249163953Srrs * queue. 2250163953Srrs */ 2251165647Srrsdoit_again: 2252163953Srrs chk = TAILQ_FIRST(&asoc->reasmqueue); 2253163953Srrs if (chk == NULL) { 2254163953Srrs asoc->size_on_reasm_queue = 0; 2255163953Srrs asoc->cnt_on_reasm_queue = 0; 2256163953Srrs return; 2257163953Srrs } 2258163953Srrs nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1; 2259163953Srrs if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) && 2260163953Srrs ((nxt_todel == chk->rec.data.stream_seq) || 2261163953Srrs (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) { 2262163953Srrs /* 2263163953Srrs * Yep the first one is here. We setup to start reception, 2264163953Srrs * by backing down the TSN just in case we can't deliver. 2265163953Srrs */ 2266163953Srrs 2267163953Srrs /* 2268163953Srrs * Before we start though either all of the message should 2269196509Stuexen * be here or the socket buffer max or nothing on the 2270163953Srrs * delivery queue and something can be delivered. 2271163953Srrs */ 2272196260Stuexen if (stcb->sctp_socket) { 2273265964Stuexen pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT, 2274196260Stuexen stcb->sctp_ep->partial_delivery_point); 2275196260Stuexen } else { 2276196260Stuexen pd_point = stcb->sctp_ep->partial_delivery_point; 2277196260Stuexen } 2278196260Stuexen if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) { 2279163953Srrs asoc->fragmented_delivery_inprogress = 1; 2280163953Srrs asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1; 2281163953Srrs asoc->str_of_pdapi = chk->rec.data.stream_number; 2282163953Srrs asoc->ssn_of_pdapi = chk->rec.data.stream_seq; 2283163953Srrs asoc->pdapi_ppid = chk->rec.data.payloadtype; 2284163953Srrs asoc->fragment_flags = chk->rec.data.rcv_flags; 2285163953Srrs sctp_service_reassembly(stcb, asoc); 2286165647Srrs if (asoc->fragmented_delivery_inprogress == 0) { 2287165647Srrs goto doit_again; 2288165647Srrs } 2289163953Srrs } 2290163953Srrs } 2291163953Srrs} 2292163953Srrs 2293163953Srrsint 2294163953Srrssctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length, 2295252889Stuexen struct sockaddr *src, struct sockaddr *dst, 2296238253Stuexen struct sctphdr *sh, struct sctp_inpcb *inp, 2297238253Stuexen struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t * high_tsn, 2298238253Stuexen uint8_t use_mflowid, uint32_t mflowid, 2299238253Stuexen uint32_t vrf_id, uint16_t port) 2300163953Srrs{ 2301163953Srrs struct sctp_data_chunk *ch, chunk_buf; 2302163953Srrs struct sctp_association *asoc; 2303163953Srrs int num_chunks = 0; /* number of control chunks processed */ 2304163953Srrs int stop_proc = 0; 2305163953Srrs int chk_length, break_flag, last_chunk; 2306216495Stuexen int abort_flag = 0, was_a_gap; 2307163953Srrs struct mbuf *m; 2308216495Stuexen uint32_t highest_tsn; 2309163953Srrs 2310163953Srrs /* set the rwnd */ 2311163953Srrs sctp_set_rwnd(stcb, &stcb->asoc); 2312163953Srrs 2313163953Srrs m = *mm; 2314163953Srrs SCTP_TCB_LOCK_ASSERT(stcb); 2315163953Srrs asoc = &stcb->asoc; 2316216825Stuexen if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { 2317216495Stuexen highest_tsn = asoc->highest_tsn_inside_nr_map; 2318216495Stuexen } else { 2319216495Stuexen highest_tsn = asoc->highest_tsn_inside_map; 2320163953Srrs } 2321216825Stuexen was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn); 2322163953Srrs /* 2323163953Srrs * setup where we got the last DATA packet from for any SACK that 2324163953Srrs * may need to go out. Don't bump the net. This is done ONLY when a 2325163953Srrs * chunk is assigned. 2326163953Srrs */ 2327163953Srrs asoc->last_data_chunk_from = net; 2328163953Srrs 2329169208Srrs /*- 2330163953Srrs * Now before we proceed we must figure out if this is a wasted 2331163953Srrs * cluster... i.e. it is a small packet sent in and yet the driver 2332163953Srrs * underneath allocated a full cluster for it. If so we must copy it 2333163953Srrs * to a smaller mbuf and free up the cluster mbuf. This will help 2334169208Srrs * with cluster starvation. Note for __Panda__ we don't do this 2335169208Srrs * since it has clusters all the way down to 64 bytes. 2336163953Srrs */ 2337166023Srrs if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) { 2338163953Srrs /* we only handle mbufs that are singletons.. not chains */ 2339165647Srrs m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_DONTWAIT, 1, MT_DATA); 2340163953Srrs if (m) { 2341163953Srrs /* ok lets see if we can copy the data up */ 2342163953Srrs caddr_t *from, *to; 2343163953Srrs 2344163953Srrs /* get the pointers and copy */ 2345163953Srrs to = mtod(m, caddr_t *); 2346163953Srrs from = mtod((*mm), caddr_t *); 2347165647Srrs memcpy(to, from, SCTP_BUF_LEN((*mm))); 2348163953Srrs /* copy the length and free up the old */ 2349165647Srrs SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm)); 2350163953Srrs sctp_m_freem(*mm); 2351163953Srrs /* sucess, back copy */ 2352163953Srrs *mm = m; 2353163953Srrs } else { 2354163953Srrs /* We are in trouble in the mbuf world .. yikes */ 2355163953Srrs m = *mm; 2356163953Srrs } 2357163953Srrs } 2358163953Srrs /* get pointer to the first chunk header */ 2359163953Srrs ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset, 2360163953Srrs sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf); 2361163953Srrs if (ch == NULL) { 2362163953Srrs return (1); 2363163953Srrs } 2364163953Srrs /* 2365163953Srrs * process all DATA chunks... 2366163953Srrs */ 2367163953Srrs *high_tsn = asoc->cumulative_tsn; 2368163953Srrs break_flag = 0; 2369167598Srrs asoc->data_pkts_seen++; 2370163953Srrs while (stop_proc == 0) { 2371163953Srrs /* validate chunk length */ 2372163953Srrs chk_length = ntohs(ch->ch.chunk_length); 2373163953Srrs if (length - *offset < chk_length) { 2374163953Srrs /* all done, mutulated chunk */ 2375163953Srrs stop_proc = 1; 2376231039Stuexen continue; 2377163953Srrs } 2378163953Srrs if (ch->ch.chunk_type == SCTP_DATA) { 2379266186Stuexen if ((size_t)chk_length < sizeof(struct sctp_data_chunk)) { 2380163953Srrs /* 2381163953Srrs * Need to send an abort since we had a 2382163953Srrs * invalid data chunk. 2383163953Srrs */ 2384163953Srrs struct mbuf *op_err; 2385266181Stuexen char msg[SCTP_DIAG_INFO_LEN]; 2386163953Srrs 2387266181Stuexen snprintf(msg, sizeof(msg), "DATA chunk of length %d", 2388266181Stuexen chk_length); 2389266181Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2390165220Srrs stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19; 2391252889Stuexen sctp_abort_association(inp, stcb, m, iphlen, 2392252889Stuexen src, dst, sh, op_err, 2393238253Stuexen use_mflowid, mflowid, 2394238253Stuexen vrf_id, port); 2395163953Srrs return (2); 2396163953Srrs } 2397266186Stuexen if ((size_t)chk_length == sizeof(struct sctp_data_chunk)) { 2398266186Stuexen /* 2399266186Stuexen * Need to send an abort since we had an 2400266186Stuexen * empty data chunk. 2401266186Stuexen */ 2402266186Stuexen struct mbuf *op_err; 2403266186Stuexen 2404266186Stuexen op_err = sctp_generate_no_user_data_cause(ch->dp.tsn); 2405266186Stuexen stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19; 2406266186Stuexen sctp_abort_association(inp, stcb, m, iphlen, 2407266186Stuexen src, dst, sh, op_err, 2408266186Stuexen use_mflowid, mflowid, 2409266186Stuexen vrf_id, port); 2410266186Stuexen return (2); 2411266186Stuexen } 2412163953Srrs#ifdef SCTP_AUDITING_ENABLED 2413163953Srrs sctp_audit_log(0xB1, 0); 2414163953Srrs#endif 2415163953Srrs if (SCTP_SIZE32(chk_length) == (length - *offset)) { 2416163953Srrs last_chunk = 1; 2417163953Srrs } else { 2418163953Srrs last_chunk = 0; 2419163953Srrs } 2420163953Srrs if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch, 2421163953Srrs chk_length, net, high_tsn, &abort_flag, &break_flag, 2422163953Srrs last_chunk)) { 2423163953Srrs num_chunks++; 2424163953Srrs } 2425163953Srrs if (abort_flag) 2426163953Srrs return (2); 2427163953Srrs 2428163953Srrs if (break_flag) { 2429163953Srrs /* 2430163953Srrs * Set because of out of rwnd space and no 2431163953Srrs * drop rep space left. 2432163953Srrs */ 2433163953Srrs stop_proc = 1; 2434231039Stuexen continue; 2435163953Srrs } 2436163953Srrs } else { 2437163953Srrs /* not a data chunk in the data region */ 2438163953Srrs switch (ch->ch.chunk_type) { 2439163953Srrs case SCTP_INITIATION: 2440163953Srrs case SCTP_INITIATION_ACK: 2441163953Srrs case SCTP_SELECTIVE_ACK: 2442231039Stuexen case SCTP_NR_SELECTIVE_ACK: 2443163953Srrs case SCTP_HEARTBEAT_REQUEST: 2444163953Srrs case SCTP_HEARTBEAT_ACK: 2445163953Srrs case SCTP_ABORT_ASSOCIATION: 2446163953Srrs case SCTP_SHUTDOWN: 2447163953Srrs case SCTP_SHUTDOWN_ACK: 2448163953Srrs case SCTP_OPERATION_ERROR: 2449163953Srrs case SCTP_COOKIE_ECHO: 2450163953Srrs case SCTP_COOKIE_ACK: 2451163953Srrs case SCTP_ECN_ECHO: 2452163953Srrs case SCTP_ECN_CWR: 2453163953Srrs case SCTP_SHUTDOWN_COMPLETE: 2454163953Srrs case SCTP_AUTHENTICATION: 2455163953Srrs case SCTP_ASCONF_ACK: 2456163953Srrs case SCTP_PACKET_DROPPED: 2457163953Srrs case SCTP_STREAM_RESET: 2458163953Srrs case SCTP_FORWARD_CUM_TSN: 2459163953Srrs case SCTP_ASCONF: 2460163953Srrs /* 2461163953Srrs * Now, what do we do with KNOWN chunks that 2462163953Srrs * are NOT in the right place? 2463163953Srrs * 2464163953Srrs * For now, I do nothing but ignore them. We 2465163953Srrs * may later want to add sysctl stuff to 2466163953Srrs * switch out and do either an ABORT() or 2467163953Srrs * possibly process them. 2468163953Srrs */ 2469179783Srrs if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) { 2470163953Srrs struct mbuf *op_err; 2471163953Srrs 2472266181Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, ""); 2473238253Stuexen sctp_abort_association(inp, stcb, 2474238253Stuexen m, iphlen, 2475252889Stuexen src, dst, 2476238253Stuexen sh, op_err, 2477238253Stuexen use_mflowid, mflowid, 2478238253Stuexen vrf_id, port); 2479163953Srrs return (2); 2480163953Srrs } 2481163953Srrs break; 2482163953Srrs default: 2483163953Srrs /* unknown chunk type, use bit rules */ 2484163953Srrs if (ch->ch.chunk_type & 0x40) { 2485163953Srrs /* Add a error report to the queue */ 2486170056Srrs struct mbuf *merr; 2487163953Srrs struct sctp_paramhdr *phd; 2488163953Srrs 2489170056Srrs merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_DONTWAIT, 1, MT_DATA); 2490170056Srrs if (merr) { 2491170056Srrs phd = mtod(merr, struct sctp_paramhdr *); 2492163953Srrs /* 2493163953Srrs * We cheat and use param 2494163953Srrs * type since we did not 2495163953Srrs * bother to define a error 2496163953Srrs * cause struct. They are 2497163953Srrs * the same basic format 2498163953Srrs * with different names. 2499163953Srrs */ 2500163953Srrs phd->param_type = 2501163953Srrs htons(SCTP_CAUSE_UNRECOG_CHUNK); 2502163953Srrs phd->param_length = 2503163953Srrs htons(chk_length + sizeof(*phd)); 2504170056Srrs SCTP_BUF_LEN(merr) = sizeof(*phd); 2505234794Stuexen SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset, chk_length, M_DONTWAIT); 2506170056Srrs if (SCTP_BUF_NEXT(merr)) { 2507234794Stuexen if (sctp_pad_lastmbuf(SCTP_BUF_NEXT(merr), SCTP_SIZE32(chk_length) - chk_length, NULL)) { 2508234794Stuexen sctp_m_freem(merr); 2509234794Stuexen } else { 2510234794Stuexen sctp_queue_op_err(stcb, merr); 2511234794Stuexen } 2512163953Srrs } else { 2513170056Srrs sctp_m_freem(merr); 2514163953Srrs } 2515163953Srrs } 2516163953Srrs } 2517163953Srrs if ((ch->ch.chunk_type & 0x80) == 0) { 2518163953Srrs /* discard the rest of this packet */ 2519163953Srrs stop_proc = 1; 2520163953Srrs } /* else skip this bad chunk and 2521163953Srrs * continue... */ 2522163953Srrs break; 2523231039Stuexen } /* switch of chunk type */ 2524163953Srrs } 2525163953Srrs *offset += SCTP_SIZE32(chk_length); 2526163953Srrs if ((*offset >= length) || stop_proc) { 2527163953Srrs /* no more data left in the mbuf chain */ 2528163953Srrs stop_proc = 1; 2529163953Srrs continue; 2530163953Srrs } 2531163953Srrs ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset, 2532163953Srrs sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf); 2533163953Srrs if (ch == NULL) { 2534163953Srrs *offset = length; 2535163953Srrs stop_proc = 1; 2536231039Stuexen continue; 2537163953Srrs } 2538231039Stuexen } 2539163953Srrs if (break_flag) { 2540163953Srrs /* 2541163953Srrs * we need to report rwnd overrun drops. 2542163953Srrs */ 2543252881Stuexen sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0); 2544163953Srrs } 2545163953Srrs if (num_chunks) { 2546163953Srrs /* 2547172090Srrs * Did we get data, if so update the time for auto-close and 2548163953Srrs * give peer credit for being alive. 2549163953Srrs */ 2550163953Srrs SCTP_STAT_INCR(sctps_recvpktwithdata); 2551179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 2552171943Srrs sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 2553171943Srrs stcb->asoc.overall_error_count, 2554171943Srrs 0, 2555171943Srrs SCTP_FROM_SCTP_INDATA, 2556171943Srrs __LINE__); 2557171943Srrs } 2558163953Srrs stcb->asoc.overall_error_count = 0; 2559169378Srrs (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd); 2560163953Srrs } 2561163953Srrs /* now service all of the reassm queue if needed */ 2562163953Srrs if (!(TAILQ_EMPTY(&asoc->reasmqueue))) 2563163953Srrs sctp_service_queues(stcb, asoc); 2564163953Srrs 2565163953Srrs if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 2566167598Srrs /* Assure that we ack right away */ 2567167598Srrs stcb->asoc.send_sack = 1; 2568163953Srrs } 2569163953Srrs /* Start a sack timer or QUEUE a SACK for sending */ 2570231038Stuexen sctp_sack_check(stcb, was_a_gap); 2571163953Srrs return (0); 2572163953Srrs} 2573163953Srrs 2574196507Srrsstatic int 2575196507Srrssctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn, 2576196507Srrs uint16_t frag_strt, uint16_t frag_end, int nr_sacking, 2577196507Srrs int *num_frs, 2578196507Srrs uint32_t * biggest_newly_acked_tsn, 2579196507Srrs uint32_t * this_sack_lowest_newack, 2580231038Stuexen int *rto_ok) 2581196507Srrs{ 2582196507Srrs struct sctp_tmit_chunk *tp1; 2583196507Srrs unsigned int theTSN; 2584206137Stuexen int j, wake_him = 0, circled = 0; 2585196507Srrs 2586196507Srrs /* Recover the tp1 we last saw */ 2587196507Srrs tp1 = *p_tp1; 2588196507Srrs if (tp1 == NULL) { 2589196507Srrs tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 2590196507Srrs } 2591196507Srrs for (j = frag_strt; j <= frag_end; j++) { 2592196507Srrs theTSN = j + last_tsn; 2593196507Srrs while (tp1) { 2594196507Srrs if (tp1->rec.data.doing_fast_retransmit) 2595196507Srrs (*num_frs) += 1; 2596196507Srrs 2597196507Srrs /*- 2598196507Srrs * CMT: CUCv2 algorithm. For each TSN being 2599196507Srrs * processed from the sent queue, track the 2600196507Srrs * next expected pseudo-cumack, or 2601196507Srrs * rtx_pseudo_cumack, if required. Separate 2602196507Srrs * cumack trackers for first transmissions, 2603196507Srrs * and retransmissions. 2604196507Srrs */ 2605196507Srrs if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) && 2606196507Srrs (tp1->snd_count == 1)) { 2607196507Srrs tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq; 2608196507Srrs tp1->whoTo->find_pseudo_cumack = 0; 2609196507Srrs } 2610196507Srrs if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) && 2611196507Srrs (tp1->snd_count > 1)) { 2612196507Srrs tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq; 2613196507Srrs tp1->whoTo->find_rtx_pseudo_cumack = 0; 2614196507Srrs } 2615196507Srrs if (tp1->rec.data.TSN_seq == theTSN) { 2616196507Srrs if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 2617196507Srrs /*- 2618196507Srrs * must be held until 2619196507Srrs * cum-ack passes 2620196507Srrs */ 2621196507Srrs if (tp1->sent < SCTP_DATAGRAM_RESEND) { 2622196507Srrs /*- 2623196507Srrs * If it is less than RESEND, it is 2624196507Srrs * now no-longer in flight. 2625196507Srrs * Higher values may already be set 2626196507Srrs * via previous Gap Ack Blocks... 2627196507Srrs * i.e. ACKED or RESEND. 2628196507Srrs */ 2629216825Stuexen if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, 2630216825Stuexen *biggest_newly_acked_tsn)) { 2631196507Srrs *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq; 2632196507Srrs } 2633196507Srrs /*- 2634196507Srrs * CMT: SFR algo (and HTNA) - set 2635196507Srrs * saw_newack to 1 for dest being 2636196507Srrs * newly acked. update 2637196507Srrs * this_sack_highest_newack if 2638196507Srrs * appropriate. 2639196507Srrs */ 2640196507Srrs if (tp1->rec.data.chunk_was_revoked == 0) 2641196507Srrs tp1->whoTo->saw_newack = 1; 2642196507Srrs 2643216825Stuexen if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, 2644216825Stuexen tp1->whoTo->this_sack_highest_newack)) { 2645196507Srrs tp1->whoTo->this_sack_highest_newack = 2646196507Srrs tp1->rec.data.TSN_seq; 2647196507Srrs } 2648196507Srrs /*- 2649196507Srrs * CMT DAC algo: also update 2650196507Srrs * this_sack_lowest_newack 2651196507Srrs */ 2652196507Srrs if (*this_sack_lowest_newack == 0) { 2653196507Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 2654196507Srrs sctp_log_sack(*this_sack_lowest_newack, 2655196507Srrs last_tsn, 2656196507Srrs tp1->rec.data.TSN_seq, 2657196507Srrs 0, 2658196507Srrs 0, 2659196507Srrs SCTP_LOG_TSN_ACKED); 2660196507Srrs } 2661196507Srrs *this_sack_lowest_newack = tp1->rec.data.TSN_seq; 2662196507Srrs } 2663196507Srrs /*- 2664196507Srrs * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp 2665196507Srrs * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set 2666196507Srrs * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be 2667196507Srrs * updated. Also trigger search for the next expected (rtx-)pseudo-cumack. 2668196507Srrs * Separate pseudo_cumack trackers for first transmissions and 2669196507Srrs * retransmissions. 2670196507Srrs */ 2671196507Srrs if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) { 2672196507Srrs if (tp1->rec.data.chunk_was_revoked == 0) { 2673196507Srrs tp1->whoTo->new_pseudo_cumack = 1; 2674196507Srrs } 2675196507Srrs tp1->whoTo->find_pseudo_cumack = 1; 2676196507Srrs } 2677196507Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 2678196507Srrs sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); 2679196507Srrs } 2680196507Srrs if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) { 2681196507Srrs if (tp1->rec.data.chunk_was_revoked == 0) { 2682196507Srrs tp1->whoTo->new_pseudo_cumack = 1; 2683196507Srrs } 2684196507Srrs tp1->whoTo->find_rtx_pseudo_cumack = 1; 2685196507Srrs } 2686196507Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 2687196507Srrs sctp_log_sack(*biggest_newly_acked_tsn, 2688196507Srrs last_tsn, 2689196507Srrs tp1->rec.data.TSN_seq, 2690196507Srrs frag_strt, 2691196507Srrs frag_end, 2692196507Srrs SCTP_LOG_TSN_ACKED); 2693196507Srrs } 2694196507Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 2695196507Srrs sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP, 2696196507Srrs tp1->whoTo->flight_size, 2697196507Srrs tp1->book_size, 2698196507Srrs (uintptr_t) tp1->whoTo, 2699196507Srrs tp1->rec.data.TSN_seq); 2700196507Srrs } 2701196507Srrs sctp_flight_size_decrease(tp1); 2702219057Srrs if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 2703219057Srrs (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 2704219057Srrs tp1); 2705219057Srrs } 2706196507Srrs sctp_total_flight_decrease(stcb, tp1); 2707196507Srrs 2708196507Srrs tp1->whoTo->net_ack += tp1->send_size; 2709196507Srrs if (tp1->snd_count < 2) { 2710196507Srrs /*- 2711196507Srrs * True non-retransmited chunk 2712196507Srrs */ 2713196507Srrs tp1->whoTo->net_ack2 += tp1->send_size; 2714196507Srrs 2715196507Srrs /*- 2716196507Srrs * update RTO too ? 2717196507Srrs */ 2718196507Srrs if (tp1->do_rtt) { 2719219397Srrs if (*rto_ok) { 2720219397Srrs tp1->whoTo->RTO = 2721219397Srrs sctp_calculate_rto(stcb, 2722219397Srrs &stcb->asoc, 2723219397Srrs tp1->whoTo, 2724219397Srrs &tp1->sent_rcv_time, 2725219397Srrs sctp_align_safe_nocopy, 2726219397Srrs SCTP_RTT_FROM_DATA); 2727219397Srrs *rto_ok = 0; 2728219397Srrs } 2729219397Srrs if (tp1->whoTo->rto_needed == 0) { 2730219397Srrs tp1->whoTo->rto_needed = 1; 2731219397Srrs } 2732196507Srrs tp1->do_rtt = 0; 2733196507Srrs } 2734196507Srrs } 2735196507Srrs } 2736196507Srrs if (tp1->sent <= SCTP_DATAGRAM_RESEND) { 2737216825Stuexen if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, 2738216825Stuexen stcb->asoc.this_sack_highest_gap)) { 2739196507Srrs stcb->asoc.this_sack_highest_gap = 2740196507Srrs tp1->rec.data.TSN_seq; 2741196507Srrs } 2742196507Srrs if (tp1->sent == SCTP_DATAGRAM_RESEND) { 2743196507Srrs sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt); 2744196507Srrs#ifdef SCTP_AUDITING_ENABLED 2745196507Srrs sctp_audit_log(0xB2, 2746196507Srrs (stcb->asoc.sent_queue_retran_cnt & 0x000000ff)); 2747196507Srrs#endif 2748196507Srrs } 2749196507Srrs } 2750196507Srrs /*- 2751196507Srrs * All chunks NOT UNSENT fall through here and are marked 2752196507Srrs * (leave PR-SCTP ones that are to skip alone though) 2753196507Srrs */ 2754252941Stuexen if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) && 2755252943Stuexen (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) { 2756196507Srrs tp1->sent = SCTP_DATAGRAM_MARKED; 2757252941Stuexen } 2758196507Srrs if (tp1->rec.data.chunk_was_revoked) { 2759196507Srrs /* deflate the cwnd */ 2760196507Srrs tp1->whoTo->cwnd -= tp1->book_size; 2761196507Srrs tp1->rec.data.chunk_was_revoked = 0; 2762196507Srrs } 2763196507Srrs /* NR Sack code here */ 2764252943Stuexen if (nr_sacking && 2765252943Stuexen (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) { 2766252943Stuexen if (stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) { 2767252943Stuexen stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues--; 2768252943Stuexen#ifdef INVARIANTS 2769252943Stuexen } else { 2770252943Stuexen panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number); 2771252943Stuexen#endif 2772252943Stuexen } 2773252943Stuexen tp1->sent = SCTP_DATAGRAM_NR_ACKED; 2774196507Srrs if (tp1->data) { 2775196507Srrs /* 2776196507Srrs * sa_ignore 2777196507Srrs * NO_NULL_CHK 2778196507Srrs */ 2779196507Srrs sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 2780196507Srrs sctp_m_freem(tp1->data); 2781206137Stuexen tp1->data = NULL; 2782196507Srrs } 2783196507Srrs wake_him++; 2784196507Srrs } 2785196507Srrs } 2786196507Srrs break; 2787196507Srrs } /* if (tp1->TSN_seq == theTSN) */ 2788216825Stuexen if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) { 2789196507Srrs break; 2790216825Stuexen } 2791196507Srrs tp1 = TAILQ_NEXT(tp1, sctp_next); 2792206137Stuexen if ((tp1 == NULL) && (circled == 0)) { 2793206137Stuexen circled++; 2794206137Stuexen tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 2795206137Stuexen } 2796196507Srrs } /* end while (tp1) */ 2797196507Srrs if (tp1 == NULL) { 2798206137Stuexen circled = 0; 2799196507Srrs tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 2800196507Srrs } 2801206137Stuexen /* In case the fragments were not in order we must reset */ 2802196507Srrs } /* end for (j = fragStart */ 2803196507Srrs *p_tp1 = tp1; 2804196507Srrs return (wake_him); /* Return value only used for nr-sack */ 2805196507Srrs} 2806196507Srrs 2807196507Srrs 2808202526Stuexenstatic int 2809170781Srrssctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc, 2810202526Stuexen uint32_t last_tsn, uint32_t * biggest_tsn_acked, 2811165647Srrs uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack, 2812231038Stuexen int num_seg, int num_nr_seg, int *rto_ok) 2813163953Srrs{ 2814170781Srrs struct sctp_gap_ack_block *frag, block; 2815163953Srrs struct sctp_tmit_chunk *tp1; 2816196507Srrs int i; 2817163953Srrs int num_frs = 0; 2818202526Stuexen int chunk_freed; 2819202526Stuexen int non_revocable; 2820216188Stuexen uint16_t frag_strt, frag_end, prev_frag_end; 2821163953Srrs 2822216188Stuexen tp1 = TAILQ_FIRST(&asoc->sent_queue); 2823216188Stuexen prev_frag_end = 0; 2824202526Stuexen chunk_freed = 0; 2825202526Stuexen 2826202526Stuexen for (i = 0; i < (num_seg + num_nr_seg); i++) { 2827216188Stuexen if (i == num_seg) { 2828216188Stuexen prev_frag_end = 0; 2829216188Stuexen tp1 = TAILQ_FIRST(&asoc->sent_queue); 2830216188Stuexen } 2831202526Stuexen frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset, 2832202526Stuexen sizeof(struct sctp_gap_ack_block), (uint8_t *) & block); 2833202526Stuexen *offset += sizeof(block); 2834202526Stuexen if (frag == NULL) { 2835202526Stuexen return (chunk_freed); 2836202526Stuexen } 2837163953Srrs frag_strt = ntohs(frag->start); 2838163953Srrs frag_end = ntohs(frag->end); 2839216188Stuexen 2840163953Srrs if (frag_strt > frag_end) { 2841216188Stuexen /* This gap report is malformed, skip it. */ 2842163953Srrs continue; 2843163953Srrs } 2844216188Stuexen if (frag_strt <= prev_frag_end) { 2845216188Stuexen /* This gap report is not in order, so restart. */ 2846163953Srrs tp1 = TAILQ_FIRST(&asoc->sent_queue); 2847163953Srrs } 2848216825Stuexen if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) { 2849216188Stuexen *biggest_tsn_acked = last_tsn + frag_end; 2850216188Stuexen } 2851202526Stuexen if (i < num_seg) { 2852202526Stuexen non_revocable = 0; 2853202526Stuexen } else { 2854202526Stuexen non_revocable = 1; 2855170781Srrs } 2856202526Stuexen if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end, 2857202526Stuexen non_revocable, &num_frs, biggest_newly_acked_tsn, 2858231038Stuexen this_sack_lowest_newack, rto_ok)) { 2859202526Stuexen chunk_freed = 1; 2860202526Stuexen } 2861216188Stuexen prev_frag_end = frag_end; 2862163953Srrs } 2863179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 2864170744Srrs if (num_frs) 2865170744Srrs sctp_log_fr(*biggest_tsn_acked, 2866170744Srrs *biggest_newly_acked_tsn, 2867170744Srrs last_tsn, SCTP_FR_LOG_BIGGEST_TSNS); 2868170744Srrs } 2869202526Stuexen return (chunk_freed); 2870163953Srrs} 2871163953Srrs 2872163953Srrsstatic void 2873168709Srrssctp_check_for_revoked(struct sctp_tcb *stcb, 2874168709Srrs struct sctp_association *asoc, uint32_t cumack, 2875204040Stuexen uint32_t biggest_tsn_acked) 2876163953Srrs{ 2877163953Srrs struct sctp_tmit_chunk *tp1; 2878163953Srrs 2879216822Stuexen TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 2880216825Stuexen if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) { 2881163953Srrs /* 2882163953Srrs * ok this guy is either ACK or MARKED. If it is 2883163953Srrs * ACKED it has been previously acked but not this 2884163953Srrs * time i.e. revoked. If it is MARKED it was ACK'ed 2885163953Srrs * again. 2886163953Srrs */ 2887216825Stuexen if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) { 2888169208Srrs break; 2889216825Stuexen } 2890163953Srrs if (tp1->sent == SCTP_DATAGRAM_ACKED) { 2891163953Srrs /* it has been revoked */ 2892167598Srrs tp1->sent = SCTP_DATAGRAM_SENT; 2893167598Srrs tp1->rec.data.chunk_was_revoked = 1; 2894167598Srrs /* 2895167598Srrs * We must add this stuff back in to assure 2896167598Srrs * timers and such get started. 2897167598Srrs */ 2898179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 2899170744Srrs sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, 2900170744Srrs tp1->whoTo->flight_size, 2901170744Srrs tp1->book_size, 2902170744Srrs (uintptr_t) tp1->whoTo, 2903170744Srrs tp1->rec.data.TSN_seq); 2904170744Srrs } 2905168709Srrs sctp_flight_size_increase(tp1); 2906168709Srrs sctp_total_flight_increase(stcb, tp1); 2907167598Srrs /* 2908167598Srrs * We inflate the cwnd to compensate for our 2909167598Srrs * artificial inflation of the flight_size. 2910167598Srrs */ 2911167598Srrs tp1->whoTo->cwnd += tp1->book_size; 2912179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 2913170744Srrs sctp_log_sack(asoc->last_acked_seq, 2914170744Srrs cumack, 2915170744Srrs tp1->rec.data.TSN_seq, 2916170744Srrs 0, 2917170744Srrs 0, 2918170744Srrs SCTP_LOG_TSN_REVOKED); 2919170744Srrs } 2920163953Srrs } else if (tp1->sent == SCTP_DATAGRAM_MARKED) { 2921163953Srrs /* it has been re-acked in this SACK */ 2922163953Srrs tp1->sent = SCTP_DATAGRAM_ACKED; 2923163953Srrs } 2924163953Srrs } 2925163953Srrs if (tp1->sent == SCTP_DATAGRAM_UNSENT) 2926163953Srrs break; 2927163953Srrs } 2928163953Srrs} 2929163953Srrs 2930185694Srrs 2931163953Srrsstatic void 2932163953Srrssctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc, 2933204040Stuexen uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved) 2934163953Srrs{ 2935163953Srrs struct sctp_tmit_chunk *tp1; 2936163953Srrs int strike_flag = 0; 2937163953Srrs struct timeval now; 2938163953Srrs int tot_retrans = 0; 2939163953Srrs uint32_t sending_seq; 2940163953Srrs struct sctp_nets *net; 2941163953Srrs int num_dests_sacked = 0; 2942163953Srrs 2943163953Srrs /* 2944163953Srrs * select the sending_seq, this is either the next thing ready to be 2945163953Srrs * sent but not transmitted, OR, the next seq we assign. 2946163953Srrs */ 2947163953Srrs tp1 = TAILQ_FIRST(&stcb->asoc.send_queue); 2948163953Srrs if (tp1 == NULL) { 2949163953Srrs sending_seq = asoc->sending_seq; 2950163953Srrs } else { 2951163953Srrs sending_seq = tp1->rec.data.TSN_seq; 2952163953Srrs } 2953163953Srrs 2954163953Srrs /* CMT DAC algo: finding out if SACK is a mixed SACK */ 2955216669Stuexen if ((asoc->sctp_cmt_on_off > 0) && 2956211944Stuexen SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 2957163953Srrs TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 2958163953Srrs if (net->saw_newack) 2959163953Srrs num_dests_sacked++; 2960163953Srrs } 2961163953Srrs } 2962163953Srrs if (stcb->asoc.peer_supports_prsctp) { 2963169378Srrs (void)SCTP_GETTIME_TIMEVAL(&now); 2964163953Srrs } 2965216822Stuexen TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 2966163953Srrs strike_flag = 0; 2967163953Srrs if (tp1->no_fr_allowed) { 2968163953Srrs /* this one had a timeout or something */ 2969163953Srrs continue; 2970163953Srrs } 2971179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 2972170744Srrs if (tp1->sent < SCTP_DATAGRAM_RESEND) 2973170744Srrs sctp_log_fr(biggest_tsn_newly_acked, 2974170744Srrs tp1->rec.data.TSN_seq, 2975170744Srrs tp1->sent, 2976170744Srrs SCTP_FR_LOG_CHECK_STRIKE); 2977170744Srrs } 2978216825Stuexen if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) || 2979163953Srrs tp1->sent == SCTP_DATAGRAM_UNSENT) { 2980163953Srrs /* done */ 2981163953Srrs break; 2982163953Srrs } 2983163953Srrs if (stcb->asoc.peer_supports_prsctp) { 2984163953Srrs if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) { 2985163953Srrs /* Is it expired? */ 2986212801Stuexen if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { 2987163953Srrs /* Yes so drop it */ 2988163953Srrs if (tp1->data != NULL) { 2989237889Stuexen (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1, 2990189790Srrs SCTP_SO_NOT_LOCKED); 2991163953Srrs } 2992163953Srrs continue; 2993163953Srrs } 2994163953Srrs } 2995163953Srrs } 2996216825Stuexen if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) { 2997163953Srrs /* we are beyond the tsn in the sack */ 2998163953Srrs break; 2999163953Srrs } 3000163953Srrs if (tp1->sent >= SCTP_DATAGRAM_RESEND) { 3001163953Srrs /* either a RESEND, ACKED, or MARKED */ 3002163953Srrs /* skip */ 3003210599Srrs if (tp1->sent == SCTP_FORWARD_TSN_SKIP) { 3004210599Srrs /* Continue strikin FWD-TSN chunks */ 3005210599Srrs tp1->rec.data.fwd_tsn_cnt++; 3006210599Srrs } 3007163953Srrs continue; 3008163953Srrs } 3009163953Srrs /* 3010163953Srrs * CMT : SFR algo (covers part of DAC and HTNA as well) 3011163953Srrs */ 3012169420Srrs if (tp1->whoTo && tp1->whoTo->saw_newack == 0) { 3013163953Srrs /* 3014163953Srrs * No new acks were receieved for data sent to this 3015163953Srrs * dest. Therefore, according to the SFR algo for 3016163953Srrs * CMT, no data sent to this dest can be marked for 3017168709Srrs * FR using this SACK. 3018163953Srrs */ 3019163953Srrs continue; 3020216825Stuexen } else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq, 3021216825Stuexen tp1->whoTo->this_sack_highest_newack)) { 3022163953Srrs /* 3023163953Srrs * CMT: New acks were receieved for data sent to 3024163953Srrs * this dest. But no new acks were seen for data 3025163953Srrs * sent after tp1. Therefore, according to the SFR 3026163953Srrs * algo for CMT, tp1 cannot be marked for FR using 3027163953Srrs * this SACK. This step covers part of the DAC algo 3028163953Srrs * and the HTNA algo as well. 3029163953Srrs */ 3030163953Srrs continue; 3031163953Srrs } 3032163953Srrs /* 3033163953Srrs * Here we check to see if we were have already done a FR 3034163953Srrs * and if so we see if the biggest TSN we saw in the sack is 3035163953Srrs * smaller than the recovery point. If so we don't strike 3036163953Srrs * the tsn... otherwise we CAN strike the TSN. 3037163953Srrs */ 3038163953Srrs /* 3039167598Srrs * @@@ JRI: Check for CMT if (accum_moved && 3040167598Srrs * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 3041167598Srrs * 0)) { 3042163953Srrs */ 3043167598Srrs if (accum_moved && asoc->fast_retran_loss_recovery) { 3044163953Srrs /* 3045163953Srrs * Strike the TSN if in fast-recovery and cum-ack 3046163953Srrs * moved. 3047163953Srrs */ 3048179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3049170744Srrs sctp_log_fr(biggest_tsn_newly_acked, 3050170744Srrs tp1->rec.data.TSN_seq, 3051170744Srrs tp1->sent, 3052170744Srrs SCTP_FR_LOG_STRIKE_CHUNK); 3053170744Srrs } 3054168124Srrs if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3055168124Srrs tp1->sent++; 3056168124Srrs } 3057216669Stuexen if ((asoc->sctp_cmt_on_off > 0) && 3058211944Stuexen SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3059163953Srrs /* 3060163953Srrs * CMT DAC algorithm: If SACK flag is set to 3061163953Srrs * 0, then lowest_newack test will not pass 3062163953Srrs * because it would have been set to the 3063163953Srrs * cumack earlier. If not already to be 3064163953Srrs * rtx'd, If not a mixed sack and if tp1 is 3065163953Srrs * not between two sacked TSNs, then mark by 3066168709Srrs * one more. NOTE that we are marking by one 3067168709Srrs * additional time since the SACK DAC flag 3068168709Srrs * indicates that two packets have been 3069168709Srrs * received after this missing TSN. 3070163953Srrs */ 3071168124Srrs if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3072216825Stuexen SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) { 3073179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3074170744Srrs sctp_log_fr(16 + num_dests_sacked, 3075170744Srrs tp1->rec.data.TSN_seq, 3076170744Srrs tp1->sent, 3077170744Srrs SCTP_FR_LOG_STRIKE_CHUNK); 3078170744Srrs } 3079163953Srrs tp1->sent++; 3080163953Srrs } 3081163953Srrs } 3082211944Stuexen } else if ((tp1->rec.data.doing_fast_retransmit) && 3083211944Stuexen (asoc->sctp_cmt_on_off == 0)) { 3084163953Srrs /* 3085163953Srrs * For those that have done a FR we must take 3086163953Srrs * special consideration if we strike. I.e the 3087163953Srrs * biggest_newly_acked must be higher than the 3088163953Srrs * sending_seq at the time we did the FR. 3089163953Srrs */ 3090168124Srrs if ( 3091163953Srrs#ifdef SCTP_FR_TO_ALTERNATE 3092163953Srrs /* 3093163953Srrs * If FR's go to new networks, then we must only do 3094163953Srrs * this for singly homed asoc's. However if the FR's 3095163953Srrs * go to the same network (Armando's work) then its 3096163953Srrs * ok to FR multiple times. 3097163953Srrs */ 3098168124Srrs (asoc->numnets < 2) 3099163953Srrs#else 3100168124Srrs (1) 3101163953Srrs#endif 3102168124Srrs ) { 3103168124Srrs 3104216825Stuexen if (SCTP_TSN_GE(biggest_tsn_newly_acked, 3105163953Srrs tp1->rec.data.fast_retran_tsn)) { 3106163953Srrs /* 3107163953Srrs * Strike the TSN, since this ack is 3108163953Srrs * beyond where things were when we 3109163953Srrs * did a FR. 3110163953Srrs */ 3111179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3112170744Srrs sctp_log_fr(biggest_tsn_newly_acked, 3113170744Srrs tp1->rec.data.TSN_seq, 3114170744Srrs tp1->sent, 3115170744Srrs SCTP_FR_LOG_STRIKE_CHUNK); 3116170744Srrs } 3117168124Srrs if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3118168124Srrs tp1->sent++; 3119168124Srrs } 3120163953Srrs strike_flag = 1; 3121216669Stuexen if ((asoc->sctp_cmt_on_off > 0) && 3122211944Stuexen SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3123163953Srrs /* 3124163953Srrs * CMT DAC algorithm: If 3125163953Srrs * SACK flag is set to 0, 3126163953Srrs * then lowest_newack test 3127163953Srrs * will not pass because it 3128163953Srrs * would have been set to 3129163953Srrs * the cumack earlier. If 3130163953Srrs * not already to be rtx'd, 3131163953Srrs * If not a mixed sack and 3132163953Srrs * if tp1 is not between two 3133163953Srrs * sacked TSNs, then mark by 3134168709Srrs * one more. NOTE that we 3135168709Srrs * are marking by one 3136168709Srrs * additional time since the 3137168709Srrs * SACK DAC flag indicates 3138168709Srrs * that two packets have 3139168709Srrs * been received after this 3140168709Srrs * missing TSN. 3141163953Srrs */ 3142168124Srrs if ((tp1->sent < SCTP_DATAGRAM_RESEND) && 3143168124Srrs (num_dests_sacked == 1) && 3144216825Stuexen SCTP_TSN_GT(this_sack_lowest_newack, 3145216825Stuexen tp1->rec.data.TSN_seq)) { 3146179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3147170744Srrs sctp_log_fr(32 + num_dests_sacked, 3148170744Srrs tp1->rec.data.TSN_seq, 3149170744Srrs tp1->sent, 3150170744Srrs SCTP_FR_LOG_STRIKE_CHUNK); 3151170744Srrs } 3152168124Srrs if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3153168124Srrs tp1->sent++; 3154168124Srrs } 3155163953Srrs } 3156163953Srrs } 3157163953Srrs } 3158163953Srrs } 3159163953Srrs /* 3160167598Srrs * JRI: TODO: remove code for HTNA algo. CMT's SFR 3161167598Srrs * algo covers HTNA. 3162163953Srrs */ 3163216825Stuexen } else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, 3164216825Stuexen biggest_tsn_newly_acked)) { 3165163953Srrs /* 3166163953Srrs * We don't strike these: This is the HTNA 3167163953Srrs * algorithm i.e. we don't strike If our TSN is 3168163953Srrs * larger than the Highest TSN Newly Acked. 3169163953Srrs */ 3170163953Srrs ; 3171163953Srrs } else { 3172163953Srrs /* Strike the TSN */ 3173179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3174170744Srrs sctp_log_fr(biggest_tsn_newly_acked, 3175170744Srrs tp1->rec.data.TSN_seq, 3176170744Srrs tp1->sent, 3177170744Srrs SCTP_FR_LOG_STRIKE_CHUNK); 3178170744Srrs } 3179168124Srrs if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3180168124Srrs tp1->sent++; 3181168124Srrs } 3182216669Stuexen if ((asoc->sctp_cmt_on_off > 0) && 3183211944Stuexen SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3184163953Srrs /* 3185163953Srrs * CMT DAC algorithm: If SACK flag is set to 3186163953Srrs * 0, then lowest_newack test will not pass 3187163953Srrs * because it would have been set to the 3188163953Srrs * cumack earlier. If not already to be 3189163953Srrs * rtx'd, If not a mixed sack and if tp1 is 3190163953Srrs * not between two sacked TSNs, then mark by 3191168709Srrs * one more. NOTE that we are marking by one 3192168709Srrs * additional time since the SACK DAC flag 3193168709Srrs * indicates that two packets have been 3194168709Srrs * received after this missing TSN. 3195163953Srrs */ 3196168124Srrs if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3197216825Stuexen SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) { 3198179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3199170744Srrs sctp_log_fr(48 + num_dests_sacked, 3200170744Srrs tp1->rec.data.TSN_seq, 3201170744Srrs tp1->sent, 3202170744Srrs SCTP_FR_LOG_STRIKE_CHUNK); 3203170744Srrs } 3204163953Srrs tp1->sent++; 3205163953Srrs } 3206163953Srrs } 3207163953Srrs } 3208163953Srrs if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3209163953Srrs struct sctp_nets *alt; 3210163953Srrs 3211191049Srrs /* fix counts and things */ 3212191049Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3213191049Srrs sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND, 3214191049Srrs (tp1->whoTo ? (tp1->whoTo->flight_size) : 0), 3215191049Srrs tp1->book_size, 3216191049Srrs (uintptr_t) tp1->whoTo, 3217191049Srrs tp1->rec.data.TSN_seq); 3218191049Srrs } 3219191049Srrs if (tp1->whoTo) { 3220191049Srrs tp1->whoTo->net_ack++; 3221191049Srrs sctp_flight_size_decrease(tp1); 3222219057Srrs if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 3223219057Srrs (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 3224219057Srrs tp1); 3225219057Srrs } 3226191049Srrs } 3227191049Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 3228191049Srrs sctp_log_rwnd(SCTP_INCREASE_PEER_RWND, 3229191049Srrs asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); 3230191049Srrs } 3231191049Srrs /* add back to the rwnd */ 3232191049Srrs asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); 3233191049Srrs 3234191049Srrs /* remove from the total flight */ 3235191049Srrs sctp_total_flight_decrease(stcb, tp1); 3236191049Srrs 3237207191Stuexen if ((stcb->asoc.peer_supports_prsctp) && 3238207191Stuexen (PR_SCTP_RTX_ENABLED(tp1->flags))) { 3239207191Stuexen /* 3240207191Stuexen * Has it been retransmitted tv_sec times? - 3241207191Stuexen * we store the retran count there. 3242207191Stuexen */ 3243207191Stuexen if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) { 3244207191Stuexen /* Yes, so drop it */ 3245207191Stuexen if (tp1->data != NULL) { 3246237889Stuexen (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1, 3247207191Stuexen SCTP_SO_NOT_LOCKED); 3248207191Stuexen } 3249207191Stuexen /* Make sure to flag we had a FR */ 3250207191Stuexen tp1->whoTo->net_ack++; 3251207191Stuexen continue; 3252207191Stuexen } 3253207191Stuexen } 3254235164Stuexen /* 3255235164Stuexen * SCTP_PRINTF("OK, we are now ready to FR this 3256235164Stuexen * guy\n"); 3257235164Stuexen */ 3258179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3259170744Srrs sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count, 3260170744Srrs 0, SCTP_FR_MARKED); 3261170744Srrs } 3262163953Srrs if (strike_flag) { 3263163953Srrs /* This is a subsequent FR */ 3264163953Srrs SCTP_STAT_INCR(sctps_sendmultfastretrans); 3265163953Srrs } 3266168124Srrs sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 3267216669Stuexen if (asoc->sctp_cmt_on_off > 0) { 3268163953Srrs /* 3269163953Srrs * CMT: Using RTX_SSTHRESH policy for CMT. 3270163953Srrs * If CMT is being used, then pick dest with 3271163953Srrs * largest ssthresh for any retransmission. 3272163953Srrs */ 3273163953Srrs tp1->no_fr_allowed = 1; 3274163953Srrs alt = tp1->whoTo; 3275169655Srrs /* sa_ignore NO_NULL_CHK */ 3276211944Stuexen if (asoc->sctp_cmt_pf > 0) { 3277171440Srrs /* 3278171440Srrs * JRS 5/18/07 - If CMT PF is on, 3279171440Srrs * use the PF version of 3280171440Srrs * find_alt_net() 3281171440Srrs */ 3282171440Srrs alt = sctp_find_alternate_net(stcb, alt, 2); 3283171440Srrs } else { 3284171440Srrs /* 3285171440Srrs * JRS 5/18/07 - If only CMT is on, 3286171440Srrs * use the CMT version of 3287171440Srrs * find_alt_net() 3288171440Srrs */ 3289171531Srrs /* sa_ignore NO_NULL_CHK */ 3290171440Srrs alt = sctp_find_alternate_net(stcb, alt, 1); 3291171440Srrs } 3292169420Srrs if (alt == NULL) { 3293169420Srrs alt = tp1->whoTo; 3294169420Srrs } 3295163953Srrs /* 3296163953Srrs * CUCv2: If a different dest is picked for 3297163953Srrs * the retransmission, then new 3298163953Srrs * (rtx-)pseudo_cumack needs to be tracked 3299163953Srrs * for orig dest. Let CUCv2 track new (rtx-) 3300163953Srrs * pseudo-cumack always. 3301163953Srrs */ 3302169420Srrs if (tp1->whoTo) { 3303169420Srrs tp1->whoTo->find_pseudo_cumack = 1; 3304169420Srrs tp1->whoTo->find_rtx_pseudo_cumack = 1; 3305169420Srrs } 3306163953Srrs } else {/* CMT is OFF */ 3307163953Srrs 3308163953Srrs#ifdef SCTP_FR_TO_ALTERNATE 3309163953Srrs /* Can we find an alternate? */ 3310163953Srrs alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0); 3311163953Srrs#else 3312163953Srrs /* 3313163953Srrs * default behavior is to NOT retransmit 3314163953Srrs * FR's to an alternate. Armando Caro's 3315163953Srrs * paper details why. 3316163953Srrs */ 3317163953Srrs alt = tp1->whoTo; 3318163953Srrs#endif 3319163953Srrs } 3320163953Srrs 3321163953Srrs tp1->rec.data.doing_fast_retransmit = 1; 3322163953Srrs tot_retrans++; 3323163953Srrs /* mark the sending seq for possible subsequent FR's */ 3324163953Srrs /* 3325235164Stuexen * SCTP_PRINTF("Marking TSN for FR new value %x\n", 3326163953Srrs * (uint32_t)tpi->rec.data.TSN_seq); 3327163953Srrs */ 3328163953Srrs if (TAILQ_EMPTY(&asoc->send_queue)) { 3329163953Srrs /* 3330163953Srrs * If the queue of send is empty then its 3331163953Srrs * the next sequence number that will be 3332163953Srrs * assigned so we subtract one from this to 3333163953Srrs * get the one we last sent. 3334163953Srrs */ 3335163953Srrs tp1->rec.data.fast_retran_tsn = sending_seq; 3336163953Srrs } else { 3337163953Srrs /* 3338163953Srrs * If there are chunks on the send queue 3339163953Srrs * (unsent data that has made it from the 3340163953Srrs * stream queues but not out the door, we 3341163953Srrs * take the first one (which will have the 3342163953Srrs * lowest TSN) and subtract one to get the 3343163953Srrs * one we last sent. 3344163953Srrs */ 3345163953Srrs struct sctp_tmit_chunk *ttt; 3346163953Srrs 3347163953Srrs ttt = TAILQ_FIRST(&asoc->send_queue); 3348163953Srrs tp1->rec.data.fast_retran_tsn = 3349163953Srrs ttt->rec.data.TSN_seq; 3350163953Srrs } 3351163953Srrs 3352163953Srrs if (tp1->do_rtt) { 3353163953Srrs /* 3354163953Srrs * this guy had a RTO calculation pending on 3355163953Srrs * it, cancel it 3356163953Srrs */ 3357231039Stuexen if ((tp1->whoTo != NULL) && 3358231039Stuexen (tp1->whoTo->rto_needed == 0)) { 3359219397Srrs tp1->whoTo->rto_needed = 1; 3360219397Srrs } 3361163953Srrs tp1->do_rtt = 0; 3362163953Srrs } 3363163953Srrs if (alt != tp1->whoTo) { 3364163953Srrs /* yes, there is an alternate. */ 3365163953Srrs sctp_free_remote_addr(tp1->whoTo); 3366169655Srrs /* sa_ignore FREED_MEMORY */ 3367163953Srrs tp1->whoTo = alt; 3368163953Srrs atomic_add_int(&alt->ref_count, 1); 3369163953Srrs } 3370163953Srrs } 3371216822Stuexen } 3372163953Srrs} 3373163953Srrs 3374163953Srrsstruct sctp_tmit_chunk * 3375163953Srrssctp_try_advance_peer_ack_point(struct sctp_tcb *stcb, 3376163953Srrs struct sctp_association *asoc) 3377163953Srrs{ 3378163953Srrs struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL; 3379163953Srrs struct timeval now; 3380163953Srrs int now_filled = 0; 3381163953Srrs 3382163953Srrs if (asoc->peer_supports_prsctp == 0) { 3383163953Srrs return (NULL); 3384163953Srrs } 3385216822Stuexen TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { 3386163953Srrs if (tp1->sent != SCTP_FORWARD_TSN_SKIP && 3387252941Stuexen tp1->sent != SCTP_DATAGRAM_RESEND && 3388252943Stuexen tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 3389163953Srrs /* no chance to advance, out of here */ 3390163953Srrs break; 3391163953Srrs } 3392189790Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { 3393252941Stuexen if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) || 3394252943Stuexen (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) { 3395189790Srrs sctp_misc_ints(SCTP_FWD_TSN_CHECK, 3396189790Srrs asoc->advanced_peer_ack_point, 3397189790Srrs tp1->rec.data.TSN_seq, 0, 0); 3398189790Srrs } 3399189790Srrs } 3400163953Srrs if (!PR_SCTP_ENABLED(tp1->flags)) { 3401163953Srrs /* 3402163953Srrs * We can't fwd-tsn past any that are reliable aka 3403163953Srrs * retransmitted until the asoc fails. 3404163953Srrs */ 3405163953Srrs break; 3406163953Srrs } 3407163953Srrs if (!now_filled) { 3408169378Srrs (void)SCTP_GETTIME_TIMEVAL(&now); 3409163953Srrs now_filled = 1; 3410163953Srrs } 3411163953Srrs /* 3412163953Srrs * now we got a chunk which is marked for another 3413163953Srrs * retransmission to a PR-stream but has run out its chances 3414163953Srrs * already maybe OR has been marked to skip now. Can we skip 3415163953Srrs * it if its a resend? 3416163953Srrs */ 3417163953Srrs if (tp1->sent == SCTP_DATAGRAM_RESEND && 3418163953Srrs (PR_SCTP_TTL_ENABLED(tp1->flags))) { 3419163953Srrs /* 3420163953Srrs * Now is this one marked for resend and its time is 3421163953Srrs * now up? 3422163953Srrs */ 3423163953Srrs if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { 3424163953Srrs /* Yes so drop it */ 3425163953Srrs if (tp1->data) { 3426169420Srrs (void)sctp_release_pr_sctp_chunk(stcb, tp1, 3427237889Stuexen 1, SCTP_SO_NOT_LOCKED); 3428163953Srrs } 3429163953Srrs } else { 3430163953Srrs /* 3431163953Srrs * No, we are done when hit one for resend 3432163953Srrs * whos time as not expired. 3433163953Srrs */ 3434163953Srrs break; 3435163953Srrs } 3436163953Srrs } 3437163953Srrs /* 3438163953Srrs * Ok now if this chunk is marked to drop it we can clean up 3439163953Srrs * the chunk, advance our peer ack point and we can check 3440163953Srrs * the next chunk. 3441163953Srrs */ 3442252941Stuexen if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) || 3443252943Stuexen (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) { 3444163953Srrs /* advance PeerAckPoint goes forward */ 3445216825Stuexen if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) { 3446189790Srrs asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq; 3447189790Srrs a_adv = tp1; 3448189790Srrs } else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) { 3449189790Srrs /* No update but we do save the chk */ 3450189790Srrs a_adv = tp1; 3451189790Srrs } 3452163953Srrs } else { 3453163953Srrs /* 3454163953Srrs * If it is still in RESEND we can advance no 3455163953Srrs * further 3456163953Srrs */ 3457163953Srrs break; 3458163953Srrs } 3459163953Srrs } 3460163953Srrs return (a_adv); 3461163953Srrs} 3462163953Srrs 3463189790Srrsstatic int 3464168709Srrssctp_fs_audit(struct sctp_association *asoc) 3465168299Srrs{ 3466168299Srrs struct sctp_tmit_chunk *chk; 3467168299Srrs int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0; 3468189790Srrs int entry_flight, entry_cnt, ret; 3469168299Srrs 3470189790Srrs entry_flight = asoc->total_flight; 3471189790Srrs entry_cnt = asoc->total_flight_count; 3472189790Srrs ret = 0; 3473189790Srrs 3474189790Srrs if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt) 3475189790Srrs return (0); 3476189790Srrs 3477168299Srrs TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 3478168299Srrs if (chk->sent < SCTP_DATAGRAM_RESEND) { 3479235164Stuexen SCTP_PRINTF("Chk TSN:%u size:%d inflight cnt:%d\n", 3480189790Srrs chk->rec.data.TSN_seq, 3481189790Srrs chk->send_size, 3482235164Stuexen chk->snd_count); 3483168299Srrs inflight++; 3484168299Srrs } else if (chk->sent == SCTP_DATAGRAM_RESEND) { 3485168299Srrs resend++; 3486168299Srrs } else if (chk->sent < SCTP_DATAGRAM_ACKED) { 3487168299Srrs inbetween++; 3488168299Srrs } else if (chk->sent > SCTP_DATAGRAM_ACKED) { 3489168299Srrs above++; 3490168299Srrs } else { 3491168299Srrs acked++; 3492168299Srrs } 3493168299Srrs } 3494168859Srrs 3495168709Srrs if ((inflight > 0) || (inbetween > 0)) { 3496168859Srrs#ifdef INVARIANTS 3497168709Srrs panic("Flight size-express incorrect? \n"); 3498168859Srrs#else 3499235164Stuexen SCTP_PRINTF("asoc->total_flight:%d cnt:%d\n", 3500189790Srrs entry_flight, entry_cnt); 3501189790Srrs 3502189790Srrs SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n", 3503189790Srrs inflight, inbetween, resend, above, acked); 3504189790Srrs ret = 1; 3505168859Srrs#endif 3506168709Srrs } 3507189790Srrs return (ret); 3508168299Srrs} 3509168299Srrs 3510168709Srrs 3511168709Srrsstatic void 3512168709Srrssctp_window_probe_recovery(struct sctp_tcb *stcb, 3513168709Srrs struct sctp_association *asoc, 3514168709Srrs struct sctp_tmit_chunk *tp1) 3515168709Srrs{ 3516189371Srrs tp1->window_probe = 0; 3517189444Srrs if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) { 3518189371Srrs /* TSN's skipped we do NOT move back. */ 3519189371Srrs sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD, 3520189371Srrs tp1->whoTo->flight_size, 3521189371Srrs tp1->book_size, 3522189371Srrs (uintptr_t) tp1->whoTo, 3523189371Srrs tp1->rec.data.TSN_seq); 3524189371Srrs return; 3525189371Srrs } 3526189444Srrs /* First setup this by shrinking flight */ 3527219057Srrs if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 3528219057Srrs (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 3529219057Srrs tp1); 3530219057Srrs } 3531189444Srrs sctp_flight_size_decrease(tp1); 3532189444Srrs sctp_total_flight_decrease(stcb, tp1); 3533189444Srrs /* Now mark for resend */ 3534189444Srrs tp1->sent = SCTP_DATAGRAM_RESEND; 3535208854Srrs sctp_ucount_incr(asoc->sent_queue_retran_cnt); 3536208854Srrs 3537179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3538170744Srrs sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP, 3539170744Srrs tp1->whoTo->flight_size, 3540170744Srrs tp1->book_size, 3541170744Srrs (uintptr_t) tp1->whoTo, 3542170744Srrs tp1->rec.data.TSN_seq); 3543170744Srrs } 3544168709Srrs} 3545168709Srrs 3546163953Srrsvoid 3547163953Srrssctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack, 3548218186Srrs uint32_t rwnd, int *abort_now, int ecne_seen) 3549163953Srrs{ 3550163953Srrs struct sctp_nets *net; 3551163953Srrs struct sctp_association *asoc; 3552163953Srrs struct sctp_tmit_chunk *tp1, *tp2; 3553168124Srrs uint32_t old_rwnd; 3554168124Srrs int win_probe_recovery = 0; 3555168709Srrs int win_probe_recovered = 0; 3556169208Srrs int j, done_once = 0; 3557219397Srrs int rto_ok = 1; 3558163953Srrs 3559179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) { 3560170744Srrs sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack, 3561170744Srrs rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd); 3562170744Srrs } 3563163953Srrs SCTP_TCB_LOCK_ASSERT(stcb); 3564171477Srrs#ifdef SCTP_ASOCLOG_OF_TSNS 3565171477Srrs stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack; 3566171477Srrs stcb->asoc.cumack_log_at++; 3567171477Srrs if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) { 3568171477Srrs stcb->asoc.cumack_log_at = 0; 3569171477Srrs } 3570171477Srrs#endif 3571163953Srrs asoc = &stcb->asoc; 3572169208Srrs old_rwnd = asoc->peers_rwnd; 3573216825Stuexen if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) { 3574168124Srrs /* old ack */ 3575168124Srrs return; 3576169208Srrs } else if (asoc->last_acked_seq == cumack) { 3577169208Srrs /* Window update sack */ 3578169208Srrs asoc->peers_rwnd = sctp_sbspace_sub(rwnd, 3579210599Srrs (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 3580169208Srrs if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 3581169208Srrs /* SWS sender side engages */ 3582169208Srrs asoc->peers_rwnd = 0; 3583169208Srrs } 3584169208Srrs if (asoc->peers_rwnd > old_rwnd) { 3585169208Srrs goto again; 3586169208Srrs } 3587169208Srrs return; 3588168124Srrs } 3589163953Srrs /* First setup for CC stuff */ 3590163953Srrs TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3591218072Srrs if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) { 3592218072Srrs /* Drag along the window_tsn for cwr's */ 3593218072Srrs net->cwr_window_tsn = cumack; 3594218072Srrs } 3595163953Srrs net->prev_cwnd = net->cwnd; 3596163953Srrs net->net_ack = 0; 3597163953Srrs net->net_ack2 = 0; 3598167695Srrs 3599167695Srrs /* 3600167695Srrs * CMT: Reset CUC and Fast recovery algo variables before 3601167695Srrs * SACK processing 3602167695Srrs */ 3603167695Srrs net->new_pseudo_cumack = 0; 3604167695Srrs net->will_exit_fast_recovery = 0; 3605219057Srrs if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) { 3606219057Srrs (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net); 3607219057Srrs } 3608163953Srrs } 3609179783Srrs if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) { 3610165647Srrs uint32_t send_s; 3611165647Srrs 3612168709Srrs if (!TAILQ_EMPTY(&asoc->sent_queue)) { 3613168709Srrs tp1 = TAILQ_LAST(&asoc->sent_queue, 3614168709Srrs sctpchunk_listhead); 3615168709Srrs send_s = tp1->rec.data.TSN_seq + 1; 3616168709Srrs } else { 3617165647Srrs send_s = asoc->sending_seq; 3618165647Srrs } 3619216825Stuexen if (SCTP_TSN_GE(cumack, send_s)) { 3620168709Srrs#ifndef INVARIANTS 3621266181Stuexen struct mbuf *op_err; 3622266181Stuexen char msg[SCTP_DIAG_INFO_LEN]; 3623168709Srrs 3624168709Srrs#endif 3625168709Srrs#ifdef INVARIANTS 3626165647Srrs panic("Impossible sack 1"); 3627165647Srrs#else 3628206137Stuexen 3629165647Srrs *abort_now = 1; 3630165647Srrs /* XXX */ 3631266181Stuexen snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal then TSN %8.8x", 3632266181Stuexen cumack, send_s); 3633266181Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 3634165647Srrs stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25; 3635266181Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 3636165647Srrs return; 3637165647Srrs#endif 3638165647Srrs } 3639165647Srrs } 3640163953Srrs asoc->this_sack_highest_gap = cumack; 3641179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 3642171943Srrs sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 3643171943Srrs stcb->asoc.overall_error_count, 3644171943Srrs 0, 3645171943Srrs SCTP_FROM_SCTP_INDATA, 3646171943Srrs __LINE__); 3647171943Srrs } 3648163953Srrs stcb->asoc.overall_error_count = 0; 3649216825Stuexen if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) { 3650168124Srrs /* process the new consecutive TSN first */ 3651216822Stuexen TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { 3652216825Stuexen if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) { 3653171477Srrs if (tp1->sent == SCTP_DATAGRAM_UNSENT) { 3654235164Stuexen SCTP_PRINTF("Warning, an unsent is now acked?\n"); 3655171477Srrs } 3656171477Srrs if (tp1->sent < SCTP_DATAGRAM_ACKED) { 3657163953Srrs /* 3658171477Srrs * If it is less than ACKED, it is 3659171477Srrs * now no-longer in flight. Higher 3660171477Srrs * values may occur during marking 3661163953Srrs */ 3662171477Srrs if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3663179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3664171477Srrs sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, 3665171477Srrs tp1->whoTo->flight_size, 3666171477Srrs tp1->book_size, 3667171477Srrs (uintptr_t) tp1->whoTo, 3668171477Srrs tp1->rec.data.TSN_seq); 3669168124Srrs } 3670171477Srrs sctp_flight_size_decrease(tp1); 3671219057Srrs if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 3672219057Srrs (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 3673219057Srrs tp1); 3674219057Srrs } 3675172156Srrs /* sa_ignore NO_NULL_CHK */ 3676171477Srrs sctp_total_flight_decrease(stcb, tp1); 3677171477Srrs } 3678171477Srrs tp1->whoTo->net_ack += tp1->send_size; 3679171477Srrs if (tp1->snd_count < 2) { 3680163953Srrs /* 3681171477Srrs * True non-retransmited 3682171477Srrs * chunk 3683163953Srrs */ 3684171477Srrs tp1->whoTo->net_ack2 += 3685171477Srrs tp1->send_size; 3686163953Srrs 3687171477Srrs /* update RTO too? */ 3688171477Srrs if (tp1->do_rtt) { 3689219397Srrs if (rto_ok) { 3690219397Srrs tp1->whoTo->RTO = 3691219397Srrs /* 3692219397Srrs * sa_ignore 3693219397Srrs * NO_NULL_CH 3694219397Srrs * K 3695219397Srrs */ 3696219397Srrs sctp_calculate_rto(stcb, 3697219397Srrs asoc, tp1->whoTo, 3698219397Srrs &tp1->sent_rcv_time, 3699219397Srrs sctp_align_safe_nocopy, 3700219397Srrs SCTP_RTT_FROM_DATA); 3701219397Srrs rto_ok = 0; 3702219397Srrs } 3703219397Srrs if (tp1->whoTo->rto_needed == 0) { 3704219397Srrs tp1->whoTo->rto_needed = 1; 3705219397Srrs } 3706171477Srrs tp1->do_rtt = 0; 3707170744Srrs } 3708168124Srrs } 3709171477Srrs /* 3710171477Srrs * CMT: CUCv2 algorithm. From the 3711171477Srrs * cumack'd TSNs, for each TSN being 3712171477Srrs * acked for the first time, set the 3713171477Srrs * following variables for the 3714171477Srrs * corresp destination. 3715171477Srrs * new_pseudo_cumack will trigger a 3716171477Srrs * cwnd update. 3717171477Srrs * find_(rtx_)pseudo_cumack will 3718171477Srrs * trigger search for the next 3719171477Srrs * expected (rtx-)pseudo-cumack. 3720171477Srrs */ 3721171477Srrs tp1->whoTo->new_pseudo_cumack = 1; 3722171477Srrs tp1->whoTo->find_pseudo_cumack = 1; 3723171477Srrs tp1->whoTo->find_rtx_pseudo_cumack = 1; 3724171477Srrs 3725179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 3726172156Srrs /* sa_ignore NO_NULL_CHK */ 3727171477Srrs sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); 3728168124Srrs } 3729163953Srrs } 3730171477Srrs if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3731171477Srrs sctp_ucount_decr(asoc->sent_queue_retran_cnt); 3732171477Srrs } 3733171477Srrs if (tp1->rec.data.chunk_was_revoked) { 3734171477Srrs /* deflate the cwnd */ 3735171477Srrs tp1->whoTo->cwnd -= tp1->book_size; 3736171477Srrs tp1->rec.data.chunk_was_revoked = 0; 3737171477Srrs } 3738252943Stuexen if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 3739252942Stuexen if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) { 3740252942Stuexen asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--; 3741252942Stuexen#ifdef INVARIANTS 3742252942Stuexen } else { 3743252942Stuexen panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number); 3744252942Stuexen#endif 3745252942Stuexen } 3746252942Stuexen } 3747171477Srrs TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 3748171477Srrs if (tp1->data) { 3749172156Srrs /* sa_ignore NO_NULL_CHK */ 3750171477Srrs sctp_free_bufspace(stcb, asoc, tp1, 1); 3751171477Srrs sctp_m_freem(tp1->data); 3752216822Stuexen tp1->data = NULL; 3753171477Srrs } 3754179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 3755171477Srrs sctp_log_sack(asoc->last_acked_seq, 3756171477Srrs cumack, 3757171477Srrs tp1->rec.data.TSN_seq, 3758171477Srrs 0, 3759171477Srrs 0, 3760171477Srrs SCTP_LOG_FREE_SENT); 3761171477Srrs } 3762171477Srrs asoc->sent_queue_cnt--; 3763221627Stuexen sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED); 3764168124Srrs } else { 3765168124Srrs break; 3766163953Srrs } 3767168124Srrs } 3768171477Srrs 3769163953Srrs } 3770172156Srrs /* sa_ignore NO_NULL_CHK */ 3771163953Srrs if (stcb->sctp_socket) { 3772252882Stuexen#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3773172090Srrs struct socket *so; 3774172090Srrs 3775172090Srrs#endif 3776163953Srrs SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 3777179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 3778172156Srrs /* sa_ignore NO_NULL_CHK */ 3779231038Stuexen sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK); 3780170744Srrs } 3781252882Stuexen#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3782172090Srrs so = SCTP_INP_SO(stcb->sctp_ep); 3783172090Srrs atomic_add_int(&stcb->asoc.refcnt, 1); 3784172090Srrs SCTP_TCB_UNLOCK(stcb); 3785172090Srrs SCTP_SOCKET_LOCK(so, 1); 3786172090Srrs SCTP_TCB_LOCK(stcb); 3787172090Srrs atomic_subtract_int(&stcb->asoc.refcnt, 1); 3788172090Srrs if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 3789172090Srrs /* assoc was freed while we were unlocked */ 3790172090Srrs SCTP_SOCKET_UNLOCK(so, 1); 3791172090Srrs return; 3792172090Srrs } 3793172090Srrs#endif 3794163953Srrs sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 3795252882Stuexen#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3796172090Srrs SCTP_SOCKET_UNLOCK(so, 1); 3797172090Srrs#endif 3798163953Srrs } else { 3799179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 3800231038Stuexen sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK); 3801170744Srrs } 3802163953Srrs } 3803163953Srrs 3804171440Srrs /* JRS - Use the congestion control given in the CC module */ 3805224641Stuexen if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) { 3806224641Stuexen TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3807224641Stuexen if (net->net_ack2 > 0) { 3808224641Stuexen /* 3809224641Stuexen * Karn's rule applies to clearing error 3810224641Stuexen * count, this is optional. 3811224641Stuexen */ 3812224641Stuexen net->error_count = 0; 3813224641Stuexen if (!(net->dest_state & SCTP_ADDR_REACHABLE)) { 3814224641Stuexen /* addr came good */ 3815224641Stuexen net->dest_state |= SCTP_ADDR_REACHABLE; 3816224641Stuexen sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 3817237888Stuexen 0, (void *)net, SCTP_SO_NOT_LOCKED); 3818224641Stuexen } 3819224641Stuexen if (net == stcb->asoc.primary_destination) { 3820224641Stuexen if (stcb->asoc.alternate) { 3821224641Stuexen /* 3822224641Stuexen * release the alternate, 3823224641Stuexen * primary is good 3824224641Stuexen */ 3825224641Stuexen sctp_free_remote_addr(stcb->asoc.alternate); 3826224641Stuexen stcb->asoc.alternate = NULL; 3827224641Stuexen } 3828224641Stuexen } 3829224641Stuexen if (net->dest_state & SCTP_ADDR_PF) { 3830224641Stuexen net->dest_state &= ~SCTP_ADDR_PF; 3831224641Stuexen sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3); 3832224641Stuexen sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); 3833224641Stuexen asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net); 3834224641Stuexen /* Done with this net */ 3835224641Stuexen net->net_ack = 0; 3836224641Stuexen } 3837224641Stuexen /* restore any doubled timers */ 3838224641Stuexen net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 3839224641Stuexen if (net->RTO < stcb->asoc.minrto) { 3840224641Stuexen net->RTO = stcb->asoc.minrto; 3841224641Stuexen } 3842224641Stuexen if (net->RTO > stcb->asoc.maxrto) { 3843224641Stuexen net->RTO = stcb->asoc.maxrto; 3844224641Stuexen } 3845224641Stuexen } 3846224641Stuexen } 3847171440Srrs asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0); 3848224641Stuexen } 3849163953Srrs asoc->last_acked_seq = cumack; 3850168124Srrs 3851163953Srrs if (TAILQ_EMPTY(&asoc->sent_queue)) { 3852163953Srrs /* nothing left in-flight */ 3853163953Srrs TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3854163953Srrs net->flight_size = 0; 3855163953Srrs net->partial_bytes_acked = 0; 3856163953Srrs } 3857163953Srrs asoc->total_flight = 0; 3858163953Srrs asoc->total_flight_count = 0; 3859163953Srrs } 3860163953Srrs /* RWND update */ 3861163953Srrs asoc->peers_rwnd = sctp_sbspace_sub(rwnd, 3862210599Srrs (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 3863163953Srrs if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 3864163953Srrs /* SWS sender side engages */ 3865163953Srrs asoc->peers_rwnd = 0; 3866163953Srrs } 3867168124Srrs if (asoc->peers_rwnd > old_rwnd) { 3868168124Srrs win_probe_recovery = 1; 3869168124Srrs } 3870163953Srrs /* Now assure a timer where data is queued at */ 3871165220Srrsagain: 3872165220Srrs j = 0; 3873163953Srrs TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3874189444Srrs int to_ticks; 3875189444Srrs 3876168124Srrs if (win_probe_recovery && (net->window_probe)) { 3877168709Srrs win_probe_recovered = 1; 3878168124Srrs /* 3879168124Srrs * Find first chunk that was used with window probe 3880168124Srrs * and clear the sent 3881168124Srrs */ 3882169655Srrs /* sa_ignore FREED_MEMORY */ 3883168124Srrs TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 3884168124Srrs if (tp1->window_probe) { 3885202526Stuexen /* move back to data send queue */ 3886231038Stuexen sctp_window_probe_recovery(stcb, asoc, tp1); 3887168124Srrs break; 3888168124Srrs } 3889168124Srrs } 3890168124Srrs } 3891189444Srrs if (net->RTO == 0) { 3892189444Srrs to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 3893189444Srrs } else { 3894189444Srrs to_ticks = MSEC_TO_TICKS(net->RTO); 3895189444Srrs } 3896163953Srrs if (net->flight_size) { 3897165220Srrs j++; 3898169420Srrs (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks, 3899163953Srrs sctp_timeout_handler, &net->rxt_timer); 3900189444Srrs if (net->window_probe) { 3901189444Srrs net->window_probe = 0; 3902189444Srrs } 3903163953Srrs } else { 3904189444Srrs if (net->window_probe) { 3905189444Srrs /* 3906189444Srrs * In window probes we must assure a timer 3907189444Srrs * is still running there 3908189444Srrs */ 3909189444Srrs net->window_probe = 0; 3910189444Srrs if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 3911189444Srrs SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks, 3912189444Srrs sctp_timeout_handler, &net->rxt_timer); 3913189444Srrs } 3914189444Srrs } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 3915163953Srrs sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 3916165220Srrs stcb, net, 3917165220Srrs SCTP_FROM_SCTP_INDATA + SCTP_LOC_22); 3918163953Srrs } 3919163953Srrs } 3920163953Srrs } 3921168299Srrs if ((j == 0) && 3922168299Srrs (!TAILQ_EMPTY(&asoc->sent_queue)) && 3923168299Srrs (asoc->sent_queue_retran_cnt == 0) && 3924168709Srrs (win_probe_recovered == 0) && 3925168299Srrs (done_once == 0)) { 3926189790Srrs /* 3927189790Srrs * huh, this should not happen unless all packets are 3928189790Srrs * PR-SCTP and marked to skip of course. 3929189790Srrs */ 3930189790Srrs if (sctp_fs_audit(asoc)) { 3931189790Srrs TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3932202526Stuexen net->flight_size = 0; 3933165220Srrs } 3934189790Srrs asoc->total_flight = 0; 3935189790Srrs asoc->total_flight_count = 0; 3936189790Srrs asoc->sent_queue_retran_cnt = 0; 3937189790Srrs TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 3938189790Srrs if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3939189790Srrs sctp_flight_size_increase(tp1); 3940189790Srrs sctp_total_flight_increase(stcb, tp1); 3941189790Srrs } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3942208854Srrs sctp_ucount_incr(asoc->sent_queue_retran_cnt); 3943189790Srrs } 3944189790Srrs } 3945165220Srrs } 3946168299Srrs done_once = 1; 3947165220Srrs goto again; 3948165220Srrs } 3949163953Srrs /**********************************/ 3950163953Srrs /* Now what about shutdown issues */ 3951163953Srrs /**********************************/ 3952163953Srrs if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 3953163953Srrs /* nothing left on sendqueue.. consider done */ 3954163953Srrs /* clean up */ 3955163953Srrs if ((asoc->stream_queue_cnt == 1) && 3956163953Srrs ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 3957163953Srrs (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) && 3958163953Srrs (asoc->locked_on_sending) 3959163953Srrs ) { 3960163953Srrs struct sctp_stream_queue_pending *sp; 3961163953Srrs 3962163953Srrs /* 3963163953Srrs * I may be in a state where we got all across.. but 3964163953Srrs * cannot write more due to a shutdown... we abort 3965163953Srrs * since the user did not indicate EOR in this case. 3966163953Srrs * The sp will be cleaned during free of the asoc. 3967163953Srrs */ 3968163953Srrs sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue), 3969163953Srrs sctp_streamhead); 3970171990Srrs if ((sp) && (sp->length == 0)) { 3971171990Srrs /* Let cleanup code purge it */ 3972171990Srrs if (sp->msg_is_complete) { 3973171990Srrs asoc->stream_queue_cnt--; 3974171990Srrs } else { 3975171990Srrs asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 3976171990Srrs asoc->locked_on_sending = NULL; 3977171990Srrs asoc->stream_queue_cnt--; 3978171990Srrs } 3979163953Srrs } 3980163953Srrs } 3981163953Srrs if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 3982163953Srrs (asoc->stream_queue_cnt == 0)) { 3983163953Srrs if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 3984163953Srrs /* Need to abort here */ 3985266181Stuexen struct mbuf *op_err; 3986163953Srrs 3987163953Srrs abort_out_now: 3988163953Srrs *abort_now = 1; 3989163953Srrs /* XXX */ 3990266181Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, ""); 3991165220Srrs stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24; 3992266181Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 3993163953Srrs } else { 3994224641Stuexen struct sctp_nets *netp; 3995224641Stuexen 3996166675Srrs if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 3997166675Srrs (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 3998166675Srrs SCTP_STAT_DECR_GAUGE32(sctps_currestab); 3999166675Srrs } 4000171943Srrs SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 4001172703Srrs SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 4002163953Srrs sctp_stop_timers_for_shutdown(stcb); 4003224641Stuexen if (asoc->alternate) { 4004224641Stuexen netp = asoc->alternate; 4005224641Stuexen } else { 4006224641Stuexen netp = asoc->primary_destination; 4007224641Stuexen } 4008224641Stuexen sctp_send_shutdown(stcb, netp); 4009163953Srrs sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 4010224641Stuexen stcb->sctp_ep, stcb, netp); 4011163953Srrs sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 4012224641Stuexen stcb->sctp_ep, stcb, netp); 4013163953Srrs } 4014163953Srrs } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) && 4015163953Srrs (asoc->stream_queue_cnt == 0)) { 4016224641Stuexen struct sctp_nets *netp; 4017224641Stuexen 4018163953Srrs if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4019163953Srrs goto abort_out_now; 4020163953Srrs } 4021166675Srrs SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4022171943Srrs SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT); 4023172703Srrs SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 4024252960Stuexen sctp_stop_timers_for_shutdown(stcb); 4025252960Stuexen if (asoc->alternate) { 4026252960Stuexen netp = asoc->alternate; 4027252960Stuexen } else { 4028252960Stuexen netp = asoc->primary_destination; 4029252960Stuexen } 4030224641Stuexen sctp_send_shutdown_ack(stcb, netp); 4031163953Srrs sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 4032224641Stuexen stcb->sctp_ep, stcb, netp); 4033163953Srrs } 4034163953Srrs } 4035189371Srrs /*********************************************/ 4036189371Srrs /* Here we perform PR-SCTP procedures */ 4037189371Srrs /* (section 4.2) */ 4038189371Srrs /*********************************************/ 4039189371Srrs /* C1. update advancedPeerAckPoint */ 4040216825Stuexen if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) { 4041189371Srrs asoc->advanced_peer_ack_point = cumack; 4042189371Srrs } 4043185694Srrs /* PR-Sctp issues need to be addressed too */ 4044185694Srrs if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) { 4045185694Srrs struct sctp_tmit_chunk *lchk; 4046185694Srrs uint32_t old_adv_peer_ack_point; 4047185694Srrs 4048185694Srrs old_adv_peer_ack_point = asoc->advanced_peer_ack_point; 4049185694Srrs lchk = sctp_try_advance_peer_ack_point(stcb, asoc); 4050185694Srrs /* C3. See if we need to send a Fwd-TSN */ 4051216825Stuexen if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) { 4052185694Srrs /* 4053218129Srrs * ISSUE with ECN, see FWD-TSN processing. 4054185694Srrs */ 4055216825Stuexen if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) { 4056185694Srrs send_forward_tsn(stcb, asoc); 4057189790Srrs } else if (lchk) { 4058189790Srrs /* try to FR fwd-tsn's that get lost too */ 4059210599Srrs if (lchk->rec.data.fwd_tsn_cnt >= 3) { 4060189790Srrs send_forward_tsn(stcb, asoc); 4061189790Srrs } 4062185694Srrs } 4063185694Srrs } 4064185694Srrs if (lchk) { 4065185694Srrs /* Assure a timer is up */ 4066185694Srrs sctp_timer_start(SCTP_TIMER_TYPE_SEND, 4067185694Srrs stcb->sctp_ep, stcb, lchk->whoTo); 4068185694Srrs } 4069185694Srrs } 4070179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) { 4071170744Srrs sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 4072170744Srrs rwnd, 4073170744Srrs stcb->asoc.peers_rwnd, 4074170744Srrs stcb->asoc.total_flight, 4075170744Srrs stcb->asoc.total_output_queue_size); 4076170744Srrs } 4077163953Srrs} 4078163953Srrs 4079163953Srrsvoid 4080202526Stuexensctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup, 4081231038Stuexen struct sctp_tcb *stcb, 4082202526Stuexen uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup, 4083202526Stuexen int *abort_now, uint8_t flags, 4084218186Srrs uint32_t cum_ack, uint32_t rwnd, int ecne_seen) 4085163953Srrs{ 4086163953Srrs struct sctp_association *asoc; 4087163953Srrs struct sctp_tmit_chunk *tp1, *tp2; 4088202526Stuexen uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack; 4089163953Srrs uint16_t wake_him = 0; 4090168709Srrs uint32_t send_s = 0; 4091163953Srrs long j; 4092163953Srrs int accum_moved = 0; 4093163953Srrs int will_exit_fast_recovery = 0; 4094168124Srrs uint32_t a_rwnd, old_rwnd; 4095168124Srrs int win_probe_recovery = 0; 4096168709Srrs int win_probe_recovered = 0; 4097163953Srrs struct sctp_nets *net = NULL; 4098168299Srrs int done_once; 4099219397Srrs int rto_ok = 1; 4100163953Srrs uint8_t reneged_all = 0; 4101163953Srrs uint8_t cmt_dac_flag; 4102163953Srrs 4103163953Srrs /* 4104163953Srrs * we take any chance we can to service our queues since we cannot 4105163953Srrs * get awoken when the socket is read from :< 4106163953Srrs */ 4107163953Srrs /* 4108163953Srrs * Now perform the actual SACK handling: 1) Verify that it is not an 4109163953Srrs * old sack, if so discard. 2) If there is nothing left in the send 4110163953Srrs * queue (cum-ack is equal to last acked) then you have a duplicate 4111163953Srrs * too, update any rwnd change and verify no timers are running. 4112163953Srrs * then return. 3) Process any new consequtive data i.e. cum-ack 4113163953Srrs * moved process these first and note that it moved. 4) Process any 4114163953Srrs * sack blocks. 5) Drop any acked from the queue. 6) Check for any 4115163953Srrs * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left, 4116163953Srrs * sync up flightsizes and things, stop all timers and also check 4117163953Srrs * for shutdown_pending state. If so then go ahead and send off the 4118163953Srrs * shutdown. If in shutdown recv, send off the shutdown-ack and 4119163953Srrs * start that timer, Ret. 9) Strike any non-acked things and do FR 4120163953Srrs * procedure if needed being sure to set the FR flag. 10) Do pr-sctp 4121163953Srrs * procedures. 11) Apply any FR penalties. 12) Assure we will SACK 4122163953Srrs * if in shutdown_recv state. 4123163953Srrs */ 4124163953Srrs SCTP_TCB_LOCK_ASSERT(stcb); 4125163953Srrs /* CMT DAC algo */ 4126163953Srrs this_sack_lowest_newack = 0; 4127163953Srrs SCTP_STAT_INCR(sctps_slowpath_sack); 4128202526Stuexen last_tsn = cum_ack; 4129202526Stuexen cmt_dac_flag = flags & SCTP_SACK_CMT_DAC; 4130171477Srrs#ifdef SCTP_ASOCLOG_OF_TSNS 4131171477Srrs stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack; 4132171477Srrs stcb->asoc.cumack_log_at++; 4133171477Srrs if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) { 4134171477Srrs stcb->asoc.cumack_log_at = 0; 4135171477Srrs } 4136171477Srrs#endif 4137169208Srrs a_rwnd = rwnd; 4138163953Srrs 4139202526Stuexen if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) { 4140202526Stuexen sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack, 4141202526Stuexen rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd); 4142202526Stuexen } 4143168124Srrs old_rwnd = stcb->asoc.peers_rwnd; 4144179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 4145171943Srrs sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4146171943Srrs stcb->asoc.overall_error_count, 4147171943Srrs 0, 4148171943Srrs SCTP_FROM_SCTP_INDATA, 4149171943Srrs __LINE__); 4150171943Srrs } 4151163953Srrs stcb->asoc.overall_error_count = 0; 4152163953Srrs asoc = &stcb->asoc; 4153179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4154170744Srrs sctp_log_sack(asoc->last_acked_seq, 4155170744Srrs cum_ack, 4156170744Srrs 0, 4157170744Srrs num_seg, 4158170744Srrs num_dup, 4159170744Srrs SCTP_LOG_NEW_SACK); 4160170744Srrs } 4161224641Stuexen if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) { 4162202526Stuexen uint16_t i; 4163170781Srrs uint32_t *dupdata, dblock; 4164163953Srrs 4165202526Stuexen for (i = 0; i < num_dup; i++) { 4166202526Stuexen dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t), 4167170781Srrs sizeof(uint32_t), (uint8_t *) & dblock); 4168202526Stuexen if (dupdata == NULL) { 4169202526Stuexen break; 4170163953Srrs } 4171202526Stuexen sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED); 4172163953Srrs } 4173163953Srrs } 4174179783Srrs if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) { 4175168709Srrs /* reality check */ 4176168709Srrs if (!TAILQ_EMPTY(&asoc->sent_queue)) { 4177168709Srrs tp1 = TAILQ_LAST(&asoc->sent_queue, 4178168709Srrs sctpchunk_listhead); 4179168709Srrs send_s = tp1->rec.data.TSN_seq + 1; 4180168709Srrs } else { 4181206137Stuexen tp1 = NULL; 4182168709Srrs send_s = asoc->sending_seq; 4183168709Srrs } 4184216825Stuexen if (SCTP_TSN_GE(cum_ack, send_s)) { 4185266181Stuexen struct mbuf *op_err; 4186266181Stuexen char msg[SCTP_DIAG_INFO_LEN]; 4187168709Srrs 4188163953Srrs /* 4189163953Srrs * no way, we have not even sent this TSN out yet. 4190163953Srrs * Peer is hopelessly messed up with us. 4191163953Srrs */ 4192235164Stuexen SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n", 4193206137Stuexen cum_ack, send_s); 4194206137Stuexen if (tp1) { 4195235164Stuexen SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1:%p\n", 4196252927Stuexen tp1->rec.data.TSN_seq, (void *)tp1); 4197206137Stuexen } 4198163953Srrs hopeless_peer: 4199163953Srrs *abort_now = 1; 4200163953Srrs /* XXX */ 4201266181Stuexen snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal then TSN %8.8x", 4202266181Stuexen cum_ack, send_s); 4203266181Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 4204165220Srrs stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25; 4205266181Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 4206163953Srrs return; 4207163953Srrs } 4208163953Srrs } 4209163953Srrs /**********************/ 4210163953Srrs /* 1) check the range */ 4211163953Srrs /**********************/ 4212216825Stuexen if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) { 4213163953Srrs /* acking something behind */ 4214163953Srrs return; 4215163953Srrs } 4216163953Srrs /* update the Rwnd of the peer */ 4217163953Srrs if (TAILQ_EMPTY(&asoc->sent_queue) && 4218163953Srrs TAILQ_EMPTY(&asoc->send_queue) && 4219202526Stuexen (asoc->stream_queue_cnt == 0)) { 4220163953Srrs /* nothing left on send/sent and strmq */ 4221179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 4222170744Srrs sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 4223170744Srrs asoc->peers_rwnd, 0, 0, a_rwnd); 4224170744Srrs } 4225163953Srrs asoc->peers_rwnd = a_rwnd; 4226163953Srrs if (asoc->sent_queue_retran_cnt) { 4227163953Srrs asoc->sent_queue_retran_cnt = 0; 4228163953Srrs } 4229163953Srrs if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4230163953Srrs /* SWS sender side engages */ 4231163953Srrs asoc->peers_rwnd = 0; 4232163953Srrs } 4233163953Srrs /* stop any timers */ 4234163953Srrs TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4235163953Srrs sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4236165220Srrs stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26); 4237163953Srrs net->partial_bytes_acked = 0; 4238163953Srrs net->flight_size = 0; 4239163953Srrs } 4240163953Srrs asoc->total_flight = 0; 4241163953Srrs asoc->total_flight_count = 0; 4242163953Srrs return; 4243163953Srrs } 4244163953Srrs /* 4245163953Srrs * We init netAckSz and netAckSz2 to 0. These are used to track 2 4246163953Srrs * things. The total byte count acked is tracked in netAckSz AND 4247163953Srrs * netAck2 is used to track the total bytes acked that are un- 4248163953Srrs * amibguious and were never retransmitted. We track these on a per 4249163953Srrs * destination address basis. 4250163953Srrs */ 4251163953Srrs TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4252218072Srrs if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) { 4253218072Srrs /* Drag along the window_tsn for cwr's */ 4254218072Srrs net->cwr_window_tsn = cum_ack; 4255218072Srrs } 4256163953Srrs net->prev_cwnd = net->cwnd; 4257163953Srrs net->net_ack = 0; 4258163953Srrs net->net_ack2 = 0; 4259163953Srrs 4260163953Srrs /* 4261167598Srrs * CMT: Reset CUC and Fast recovery algo variables before 4262167598Srrs * SACK processing 4263163953Srrs */ 4264163953Srrs net->new_pseudo_cumack = 0; 4265163953Srrs net->will_exit_fast_recovery = 0; 4266219057Srrs if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) { 4267219057Srrs (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net); 4268219057Srrs } 4269163953Srrs } 4270163953Srrs /* process the new consecutive TSN first */ 4271216822Stuexen TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4272216825Stuexen if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) { 4273163953Srrs if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 4274163953Srrs accum_moved = 1; 4275163953Srrs if (tp1->sent < SCTP_DATAGRAM_ACKED) { 4276163953Srrs /* 4277163953Srrs * If it is less than ACKED, it is 4278163953Srrs * now no-longer in flight. Higher 4279163953Srrs * values may occur during marking 4280163953Srrs */ 4281163953Srrs if ((tp1->whoTo->dest_state & 4282163953Srrs SCTP_ADDR_UNCONFIRMED) && 4283163953Srrs (tp1->snd_count < 2)) { 4284163953Srrs /* 4285163953Srrs * If there was no retran 4286163953Srrs * and the address is 4287163953Srrs * un-confirmed and we sent 4288163953Srrs * there and are now 4289163953Srrs * sacked.. its confirmed, 4290163953Srrs * mark it so. 4291163953Srrs */ 4292163953Srrs tp1->whoTo->dest_state &= 4293163953Srrs ~SCTP_ADDR_UNCONFIRMED; 4294163953Srrs } 4295168709Srrs if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4296179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 4297170744Srrs sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, 4298170744Srrs tp1->whoTo->flight_size, 4299170744Srrs tp1->book_size, 4300170744Srrs (uintptr_t) tp1->whoTo, 4301170744Srrs tp1->rec.data.TSN_seq); 4302170744Srrs } 4303168709Srrs sctp_flight_size_decrease(tp1); 4304168709Srrs sctp_total_flight_decrease(stcb, tp1); 4305219057Srrs if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 4306219057Srrs (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 4307219057Srrs tp1); 4308219057Srrs } 4309163953Srrs } 4310163953Srrs tp1->whoTo->net_ack += tp1->send_size; 4311163953Srrs 4312163953Srrs /* CMT SFR and DAC algos */ 4313163953Srrs this_sack_lowest_newack = tp1->rec.data.TSN_seq; 4314163953Srrs tp1->whoTo->saw_newack = 1; 4315163953Srrs 4316163953Srrs if (tp1->snd_count < 2) { 4317163953Srrs /* 4318163953Srrs * True non-retransmited 4319163953Srrs * chunk 4320163953Srrs */ 4321163953Srrs tp1->whoTo->net_ack2 += 4322163953Srrs tp1->send_size; 4323163953Srrs 4324163953Srrs /* update RTO too? */ 4325163953Srrs if (tp1->do_rtt) { 4326219397Srrs if (rto_ok) { 4327219397Srrs tp1->whoTo->RTO = 4328219397Srrs sctp_calculate_rto(stcb, 4329219397Srrs asoc, tp1->whoTo, 4330219397Srrs &tp1->sent_rcv_time, 4331219397Srrs sctp_align_safe_nocopy, 4332219397Srrs SCTP_RTT_FROM_DATA); 4333219397Srrs rto_ok = 0; 4334219397Srrs } 4335219397Srrs if (tp1->whoTo->rto_needed == 0) { 4336219397Srrs tp1->whoTo->rto_needed = 1; 4337219397Srrs } 4338163953Srrs tp1->do_rtt = 0; 4339163953Srrs } 4340163953Srrs } 4341163953Srrs /* 4342163953Srrs * CMT: CUCv2 algorithm. From the 4343163953Srrs * cumack'd TSNs, for each TSN being 4344163953Srrs * acked for the first time, set the 4345163953Srrs * following variables for the 4346163953Srrs * corresp destination. 4347163953Srrs * new_pseudo_cumack will trigger a 4348163953Srrs * cwnd update. 4349163953Srrs * find_(rtx_)pseudo_cumack will 4350163953Srrs * trigger search for the next 4351163953Srrs * expected (rtx-)pseudo-cumack. 4352163953Srrs */ 4353163953Srrs tp1->whoTo->new_pseudo_cumack = 1; 4354163953Srrs tp1->whoTo->find_pseudo_cumack = 1; 4355163953Srrs tp1->whoTo->find_rtx_pseudo_cumack = 1; 4356163953Srrs 4357163953Srrs 4358179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4359170744Srrs sctp_log_sack(asoc->last_acked_seq, 4360170744Srrs cum_ack, 4361170744Srrs tp1->rec.data.TSN_seq, 4362170744Srrs 0, 4363170744Srrs 0, 4364170744Srrs SCTP_LOG_TSN_ACKED); 4365170744Srrs } 4366179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 4367170744Srrs sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); 4368170744Srrs } 4369163953Srrs } 4370163953Srrs if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4371163953Srrs sctp_ucount_decr(asoc->sent_queue_retran_cnt); 4372163953Srrs#ifdef SCTP_AUDITING_ENABLED 4373163953Srrs sctp_audit_log(0xB3, 4374163953Srrs (asoc->sent_queue_retran_cnt & 0x000000ff)); 4375163953Srrs#endif 4376163953Srrs } 4377167598Srrs if (tp1->rec.data.chunk_was_revoked) { 4378167598Srrs /* deflate the cwnd */ 4379167598Srrs tp1->whoTo->cwnd -= tp1->book_size; 4380167598Srrs tp1->rec.data.chunk_was_revoked = 0; 4381167598Srrs } 4382252943Stuexen if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 4383252943Stuexen tp1->sent = SCTP_DATAGRAM_ACKED; 4384252943Stuexen } 4385163953Srrs } 4386163953Srrs } else { 4387163953Srrs break; 4388163953Srrs } 4389163953Srrs } 4390163953Srrs biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn; 4391163953Srrs /* always set this up to cum-ack */ 4392163953Srrs asoc->this_sack_highest_gap = last_tsn; 4393163953Srrs 4394202526Stuexen if ((num_seg > 0) || (num_nr_seg > 0)) { 4395163953Srrs 4396163953Srrs /* 4397163953Srrs * CMT: SFR algo (and HTNA) - this_sack_highest_newack has 4398163953Srrs * to be greater than the cumack. Also reset saw_newack to 0 4399163953Srrs * for all dests. 4400163953Srrs */ 4401163953Srrs TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4402163953Srrs net->saw_newack = 0; 4403163953Srrs net->this_sack_highest_newack = last_tsn; 4404163953Srrs } 4405163953Srrs 4406163953Srrs /* 4407163953Srrs * thisSackHighestGap will increase while handling NEW 4408163953Srrs * segments this_sack_highest_newack will increase while 4409163953Srrs * handling NEWLY ACKED chunks. this_sack_lowest_newack is 4410163953Srrs * used for CMT DAC algo. saw_newack will also change. 4411163953Srrs */ 4412202526Stuexen if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked, 4413202526Stuexen &biggest_tsn_newly_acked, &this_sack_lowest_newack, 4414231038Stuexen num_seg, num_nr_seg, &rto_ok)) { 4415202526Stuexen wake_him++; 4416202526Stuexen } 4417179783Srrs if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) { 4418163953Srrs /* 4419163953Srrs * validate the biggest_tsn_acked in the gap acks if 4420163953Srrs * strict adherence is wanted. 4421163953Srrs */ 4422216825Stuexen if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) { 4423163953Srrs /* 4424163953Srrs * peer is either confused or we are under 4425163953Srrs * attack. We must abort. 4426163953Srrs */ 4427235164Stuexen SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n", 4428235164Stuexen biggest_tsn_acked, send_s); 4429163953Srrs goto hopeless_peer; 4430163953Srrs } 4431163953Srrs } 4432163953Srrs } 4433163953Srrs /*******************************************/ 4434163953Srrs /* cancel ALL T3-send timer if accum moved */ 4435163953Srrs /*******************************************/ 4436216669Stuexen if (asoc->sctp_cmt_on_off > 0) { 4437163953Srrs TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4438163953Srrs if (net->new_pseudo_cumack) 4439163953Srrs sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4440165220Srrs stcb, net, 4441165220Srrs SCTP_FROM_SCTP_INDATA + SCTP_LOC_27); 4442163953Srrs 4443163953Srrs } 4444163953Srrs } else { 4445163953Srrs if (accum_moved) { 4446163953Srrs TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4447163953Srrs sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4448165220Srrs stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28); 4449163953Srrs } 4450163953Srrs } 4451163953Srrs } 4452163953Srrs /********************************************/ 4453216188Stuexen /* drop the acked chunks from the sentqueue */ 4454163953Srrs /********************************************/ 4455163953Srrs asoc->last_acked_seq = cum_ack; 4456163953Srrs 4457216669Stuexen TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { 4458216825Stuexen if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) { 4459163953Srrs break; 4460163953Srrs } 4461252943Stuexen if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 4462252942Stuexen if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) { 4463252942Stuexen asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--; 4464252942Stuexen#ifdef INVARIANTS 4465252942Stuexen } else { 4466252942Stuexen panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number); 4467252942Stuexen#endif 4468252942Stuexen } 4469163953Srrs } 4470163953Srrs TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 4471265963Stuexen if (PR_SCTP_ENABLED(tp1->flags)) { 4472163953Srrs if (asoc->pr_sctp_cnt != 0) 4473163953Srrs asoc->pr_sctp_cnt--; 4474163953Srrs } 4475216669Stuexen asoc->sent_queue_cnt--; 4476163953Srrs if (tp1->data) { 4477172156Srrs /* sa_ignore NO_NULL_CHK */ 4478163953Srrs sctp_free_bufspace(stcb, asoc, tp1, 1); 4479163953Srrs sctp_m_freem(tp1->data); 4480216669Stuexen tp1->data = NULL; 4481196260Stuexen if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(tp1->flags)) { 4482163953Srrs asoc->sent_queue_cnt_removeable--; 4483163953Srrs } 4484163953Srrs } 4485179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4486170744Srrs sctp_log_sack(asoc->last_acked_seq, 4487170744Srrs cum_ack, 4488170744Srrs tp1->rec.data.TSN_seq, 4489170744Srrs 0, 4490170744Srrs 0, 4491170744Srrs SCTP_LOG_FREE_SENT); 4492170744Srrs } 4493221627Stuexen sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED); 4494163953Srrs wake_him++; 4495216669Stuexen } 4496216669Stuexen if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) { 4497216669Stuexen#ifdef INVARIANTS 4498216669Stuexen panic("Warning flight size is postive and should be 0"); 4499216669Stuexen#else 4500216669Stuexen SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n", 4501216669Stuexen asoc->total_flight); 4502216669Stuexen#endif 4503216669Stuexen asoc->total_flight = 0; 4504216669Stuexen } 4505172156Srrs /* sa_ignore NO_NULL_CHK */ 4506163953Srrs if ((wake_him) && (stcb->sctp_socket)) { 4507252882Stuexen#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4508172090Srrs struct socket *so; 4509172090Srrs 4510172090Srrs#endif 4511163953Srrs SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 4512179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4513231038Stuexen sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK); 4514170744Srrs } 4515252882Stuexen#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4516172090Srrs so = SCTP_INP_SO(stcb->sctp_ep); 4517172090Srrs atomic_add_int(&stcb->asoc.refcnt, 1); 4518172090Srrs SCTP_TCB_UNLOCK(stcb); 4519172090Srrs SCTP_SOCKET_LOCK(so, 1); 4520172090Srrs SCTP_TCB_LOCK(stcb); 4521172090Srrs atomic_subtract_int(&stcb->asoc.refcnt, 1); 4522172090Srrs if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 4523172090Srrs /* assoc was freed while we were unlocked */ 4524172090Srrs SCTP_SOCKET_UNLOCK(so, 1); 4525172090Srrs return; 4526172090Srrs } 4527172090Srrs#endif 4528163953Srrs sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 4529252882Stuexen#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4530172090Srrs SCTP_SOCKET_UNLOCK(so, 1); 4531172090Srrs#endif 4532163953Srrs } else { 4533179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4534231038Stuexen sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK); 4535170744Srrs } 4536163953Srrs } 4537163953Srrs 4538167598Srrs if (asoc->fast_retran_loss_recovery && accum_moved) { 4539216825Stuexen if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) { 4540163953Srrs /* Setup so we will exit RFC2582 fast recovery */ 4541163953Srrs will_exit_fast_recovery = 1; 4542163953Srrs } 4543163953Srrs } 4544163953Srrs /* 4545163953Srrs * Check for revoked fragments: 4546163953Srrs * 4547163953Srrs * if Previous sack - Had no frags then we can't have any revoked if 4548163953Srrs * Previous sack - Had frag's then - If we now have frags aka 4549163953Srrs * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked 4550163953Srrs * some of them. else - The peer revoked all ACKED fragments, since 4551163953Srrs * we had some before and now we have NONE. 4552163953Srrs */ 4553163953Srrs 4554216188Stuexen if (num_seg) { 4555168709Srrs sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked); 4556216188Stuexen asoc->saw_sack_with_frags = 1; 4557216188Stuexen } else if (asoc->saw_sack_with_frags) { 4558163953Srrs int cnt_revoked = 0; 4559163953Srrs 4560216822Stuexen /* Peer revoked all dg's marked or acked */ 4561216822Stuexen TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4562216822Stuexen if (tp1->sent == SCTP_DATAGRAM_ACKED) { 4563216822Stuexen tp1->sent = SCTP_DATAGRAM_SENT; 4564216822Stuexen if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 4565216822Stuexen sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, 4566216822Stuexen tp1->whoTo->flight_size, 4567216822Stuexen tp1->book_size, 4568216822Stuexen (uintptr_t) tp1->whoTo, 4569216822Stuexen tp1->rec.data.TSN_seq); 4570163953Srrs } 4571216822Stuexen sctp_flight_size_increase(tp1); 4572216822Stuexen sctp_total_flight_increase(stcb, tp1); 4573216822Stuexen tp1->rec.data.chunk_was_revoked = 1; 4574216822Stuexen /* 4575216822Stuexen * To ensure that this increase in 4576216822Stuexen * flightsize, which is artificial, does not 4577216822Stuexen * throttle the sender, we also increase the 4578216822Stuexen * cwnd artificially. 4579216822Stuexen */ 4580216822Stuexen tp1->whoTo->cwnd += tp1->book_size; 4581216822Stuexen cnt_revoked++; 4582163953Srrs } 4583163953Srrs } 4584216822Stuexen if (cnt_revoked) { 4585216822Stuexen reneged_all = 1; 4586216822Stuexen } 4587163953Srrs asoc->saw_sack_with_frags = 0; 4588163953Srrs } 4589216188Stuexen if (num_nr_seg > 0) 4590216188Stuexen asoc->saw_sack_with_nr_frags = 1; 4591163953Srrs else 4592216188Stuexen asoc->saw_sack_with_nr_frags = 0; 4593163953Srrs 4594171440Srrs /* JRS - Use the congestion control given in the CC module */ 4595224641Stuexen if (ecne_seen == 0) { 4596224641Stuexen TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4597224641Stuexen if (net->net_ack2 > 0) { 4598224641Stuexen /* 4599224641Stuexen * Karn's rule applies to clearing error 4600224641Stuexen * count, this is optional. 4601224641Stuexen */ 4602224641Stuexen net->error_count = 0; 4603224641Stuexen if (!(net->dest_state & SCTP_ADDR_REACHABLE)) { 4604224641Stuexen /* addr came good */ 4605224641Stuexen net->dest_state |= SCTP_ADDR_REACHABLE; 4606224641Stuexen sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 4607237888Stuexen 0, (void *)net, SCTP_SO_NOT_LOCKED); 4608224641Stuexen } 4609224641Stuexen if (net == stcb->asoc.primary_destination) { 4610224641Stuexen if (stcb->asoc.alternate) { 4611224641Stuexen /* 4612224641Stuexen * release the alternate, 4613224641Stuexen * primary is good 4614224641Stuexen */ 4615224641Stuexen sctp_free_remote_addr(stcb->asoc.alternate); 4616224641Stuexen stcb->asoc.alternate = NULL; 4617224641Stuexen } 4618224641Stuexen } 4619224641Stuexen if (net->dest_state & SCTP_ADDR_PF) { 4620224641Stuexen net->dest_state &= ~SCTP_ADDR_PF; 4621224641Stuexen sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3); 4622224641Stuexen sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); 4623224641Stuexen asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net); 4624224641Stuexen /* Done with this net */ 4625224641Stuexen net->net_ack = 0; 4626224641Stuexen } 4627224641Stuexen /* restore any doubled timers */ 4628224641Stuexen net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 4629224641Stuexen if (net->RTO < stcb->asoc.minrto) { 4630224641Stuexen net->RTO = stcb->asoc.minrto; 4631224641Stuexen } 4632224641Stuexen if (net->RTO > stcb->asoc.maxrto) { 4633224641Stuexen net->RTO = stcb->asoc.maxrto; 4634224641Stuexen } 4635224641Stuexen } 4636224641Stuexen } 4637218186Srrs asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery); 4638224641Stuexen } 4639163953Srrs if (TAILQ_EMPTY(&asoc->sent_queue)) { 4640163953Srrs /* nothing left in-flight */ 4641163953Srrs TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4642163953Srrs /* stop all timers */ 4643163953Srrs sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4644165220Srrs stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30); 4645163953Srrs net->flight_size = 0; 4646163953Srrs net->partial_bytes_acked = 0; 4647163953Srrs } 4648163953Srrs asoc->total_flight = 0; 4649163953Srrs asoc->total_flight_count = 0; 4650163953Srrs } 4651163953Srrs /**********************************/ 4652163953Srrs /* Now what about shutdown issues */ 4653163953Srrs /**********************************/ 4654163953Srrs if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 4655163953Srrs /* nothing left on sendqueue.. consider done */ 4656179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 4657170744Srrs sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 4658170744Srrs asoc->peers_rwnd, 0, 0, a_rwnd); 4659170744Srrs } 4660163953Srrs asoc->peers_rwnd = a_rwnd; 4661163953Srrs if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4662163953Srrs /* SWS sender side engages */ 4663163953Srrs asoc->peers_rwnd = 0; 4664163953Srrs } 4665163953Srrs /* clean up */ 4666163953Srrs if ((asoc->stream_queue_cnt == 1) && 4667163953Srrs ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4668163953Srrs (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) && 4669163953Srrs (asoc->locked_on_sending) 4670163953Srrs ) { 4671163953Srrs struct sctp_stream_queue_pending *sp; 4672163953Srrs 4673163953Srrs /* 4674163953Srrs * I may be in a state where we got all across.. but 4675163953Srrs * cannot write more due to a shutdown... we abort 4676163953Srrs * since the user did not indicate EOR in this case. 4677163953Srrs */ 4678163953Srrs sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue), 4679163953Srrs sctp_streamhead); 4680171990Srrs if ((sp) && (sp->length == 0)) { 4681163953Srrs asoc->locked_on_sending = NULL; 4682171990Srrs if (sp->msg_is_complete) { 4683171990Srrs asoc->stream_queue_cnt--; 4684171990Srrs } else { 4685171990Srrs asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 4686171990Srrs asoc->stream_queue_cnt--; 4687171990Srrs } 4688163953Srrs } 4689163953Srrs } 4690163953Srrs if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 4691163953Srrs (asoc->stream_queue_cnt == 0)) { 4692163953Srrs if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4693163953Srrs /* Need to abort here */ 4694266181Stuexen struct mbuf *op_err; 4695163953Srrs 4696163953Srrs abort_out_now: 4697163953Srrs *abort_now = 1; 4698163953Srrs /* XXX */ 4699266181Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, ""); 4700165220Srrs stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31; 4701266181Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 4702163953Srrs return; 4703163953Srrs } else { 4704224641Stuexen struct sctp_nets *netp; 4705224641Stuexen 4706166675Srrs if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 4707166675Srrs (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4708166675Srrs SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4709166675Srrs } 4710171943Srrs SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 4711172703Srrs SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 4712163953Srrs sctp_stop_timers_for_shutdown(stcb); 4713252960Stuexen if (asoc->alternate) { 4714252960Stuexen netp = asoc->alternate; 4715252960Stuexen } else { 4716252960Stuexen netp = asoc->primary_destination; 4717252960Stuexen } 4718224641Stuexen sctp_send_shutdown(stcb, netp); 4719163953Srrs sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 4720224641Stuexen stcb->sctp_ep, stcb, netp); 4721163953Srrs sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 4722224641Stuexen stcb->sctp_ep, stcb, netp); 4723163953Srrs } 4724163953Srrs return; 4725163953Srrs } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) && 4726163953Srrs (asoc->stream_queue_cnt == 0)) { 4727224641Stuexen struct sctp_nets *netp; 4728224641Stuexen 4729163953Srrs if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4730163953Srrs goto abort_out_now; 4731163953Srrs } 4732166675Srrs SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4733171943Srrs SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT); 4734172703Srrs SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 4735252960Stuexen sctp_stop_timers_for_shutdown(stcb); 4736252960Stuexen if (asoc->alternate) { 4737252960Stuexen netp = asoc->alternate; 4738252960Stuexen } else { 4739252960Stuexen netp = asoc->primary_destination; 4740252960Stuexen } 4741224641Stuexen sctp_send_shutdown_ack(stcb, netp); 4742163953Srrs sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 4743224641Stuexen stcb->sctp_ep, stcb, netp); 4744163953Srrs return; 4745163953Srrs } 4746163953Srrs } 4747163953Srrs /* 4748163953Srrs * Now here we are going to recycle net_ack for a different use... 4749163953Srrs * HEADS UP. 4750163953Srrs */ 4751163953Srrs TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4752163953Srrs net->net_ack = 0; 4753163953Srrs } 4754163953Srrs 4755163953Srrs /* 4756163953Srrs * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking 4757163953Srrs * to be done. Setting this_sack_lowest_newack to the cum_ack will 4758163953Srrs * automatically ensure that. 4759163953Srrs */ 4760216669Stuexen if ((asoc->sctp_cmt_on_off > 0) && 4761211944Stuexen SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && 4762211944Stuexen (cmt_dac_flag == 0)) { 4763163953Srrs this_sack_lowest_newack = cum_ack; 4764163953Srrs } 4765202526Stuexen if ((num_seg > 0) || (num_nr_seg > 0)) { 4766163953Srrs sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked, 4767163953Srrs biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved); 4768163953Srrs } 4769171440Srrs /* JRS - Use the congestion control given in the CC module */ 4770171440Srrs asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc); 4771163953Srrs 4772163953Srrs /* Now are we exiting loss recovery ? */ 4773163953Srrs if (will_exit_fast_recovery) { 4774163953Srrs /* Ok, we must exit fast recovery */ 4775163953Srrs asoc->fast_retran_loss_recovery = 0; 4776163953Srrs } 4777163953Srrs if ((asoc->sat_t3_loss_recovery) && 4778216825Stuexen SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) { 4779163953Srrs /* end satellite t3 loss recovery */ 4780163953Srrs asoc->sat_t3_loss_recovery = 0; 4781163953Srrs } 4782167598Srrs /* 4783167598Srrs * CMT Fast recovery 4784167598Srrs */ 4785163953Srrs TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4786163953Srrs if (net->will_exit_fast_recovery) { 4787163953Srrs /* Ok, we must exit fast recovery */ 4788163953Srrs net->fast_retran_loss_recovery = 0; 4789163953Srrs } 4790163953Srrs } 4791163953Srrs 4792163953Srrs /* Adjust and set the new rwnd value */ 4793179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 4794170744Srrs sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 4795210599Srrs asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd); 4796170744Srrs } 4797163953Srrs asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd, 4798210599Srrs (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 4799163953Srrs if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4800163953Srrs /* SWS sender side engages */ 4801163953Srrs asoc->peers_rwnd = 0; 4802163953Srrs } 4803168124Srrs if (asoc->peers_rwnd > old_rwnd) { 4804168124Srrs win_probe_recovery = 1; 4805168124Srrs } 4806163953Srrs /* 4807163953Srrs * Now we must setup so we have a timer up for anyone with 4808163953Srrs * outstanding data. 4809163953Srrs */ 4810168299Srrs done_once = 0; 4811165220Srrsagain: 4812165220Srrs j = 0; 4813163953Srrs TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4814168124Srrs if (win_probe_recovery && (net->window_probe)) { 4815168709Srrs win_probe_recovered = 1; 4816168124Srrs /*- 4817168124Srrs * Find first chunk that was used with 4818168124Srrs * window probe and clear the event. Put 4819168124Srrs * it back into the send queue as if has 4820168124Srrs * not been sent. 4821168124Srrs */ 4822168124Srrs TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4823168124Srrs if (tp1->window_probe) { 4824231038Stuexen sctp_window_probe_recovery(stcb, asoc, tp1); 4825168124Srrs break; 4826168124Srrs } 4827168124Srrs } 4828168124Srrs } 4829163953Srrs if (net->flight_size) { 4830165220Srrs j++; 4831202526Stuexen if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 4832202526Stuexen sctp_timer_start(SCTP_TIMER_TYPE_SEND, 4833202526Stuexen stcb->sctp_ep, stcb, net); 4834202526Stuexen } 4835189444Srrs if (net->window_probe) { 4836202526Stuexen net->window_probe = 0; 4837189444Srrs } 4838168709Srrs } else { 4839189444Srrs if (net->window_probe) { 4840189444Srrs /* 4841189444Srrs * In window probes we must assure a timer 4842189444Srrs * is still running there 4843189444Srrs */ 4844189444Srrs if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 4845189444Srrs sctp_timer_start(SCTP_TIMER_TYPE_SEND, 4846189444Srrs stcb->sctp_ep, stcb, net); 4847189444Srrs 4848189444Srrs } 4849189444Srrs } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 4850168709Srrs sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4851168709Srrs stcb, net, 4852168709Srrs SCTP_FROM_SCTP_INDATA + SCTP_LOC_22); 4853168709Srrs } 4854163953Srrs } 4855163953Srrs } 4856168299Srrs if ((j == 0) && 4857168299Srrs (!TAILQ_EMPTY(&asoc->sent_queue)) && 4858168299Srrs (asoc->sent_queue_retran_cnt == 0) && 4859168709Srrs (win_probe_recovered == 0) && 4860168299Srrs (done_once == 0)) { 4861189790Srrs /* 4862189790Srrs * huh, this should not happen unless all packets are 4863189790Srrs * PR-SCTP and marked to skip of course. 4864189790Srrs */ 4865189790Srrs if (sctp_fs_audit(asoc)) { 4866189790Srrs TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4867189790Srrs net->flight_size = 0; 4868165220Srrs } 4869189790Srrs asoc->total_flight = 0; 4870189790Srrs asoc->total_flight_count = 0; 4871189790Srrs asoc->sent_queue_retran_cnt = 0; 4872189790Srrs TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4873189790Srrs if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4874189790Srrs sctp_flight_size_increase(tp1); 4875189790Srrs sctp_total_flight_increase(stcb, tp1); 4876189790Srrs } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4877208854Srrs sctp_ucount_incr(asoc->sent_queue_retran_cnt); 4878189790Srrs } 4879189790Srrs } 4880165220Srrs } 4881168299Srrs done_once = 1; 4882165220Srrs goto again; 4883165220Srrs } 4884202526Stuexen /*********************************************/ 4885202526Stuexen /* Here we perform PR-SCTP procedures */ 4886202526Stuexen /* (section 4.2) */ 4887202526Stuexen /*********************************************/ 4888202526Stuexen /* C1. update advancedPeerAckPoint */ 4889216825Stuexen if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) { 4890189371Srrs asoc->advanced_peer_ack_point = cum_ack; 4891189371Srrs } 4892185694Srrs /* C2. try to further move advancedPeerAckPoint ahead */ 4893185694Srrs if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) { 4894185694Srrs struct sctp_tmit_chunk *lchk; 4895185694Srrs uint32_t old_adv_peer_ack_point; 4896185694Srrs 4897185694Srrs old_adv_peer_ack_point = asoc->advanced_peer_ack_point; 4898185694Srrs lchk = sctp_try_advance_peer_ack_point(stcb, asoc); 4899185694Srrs /* C3. See if we need to send a Fwd-TSN */ 4900216825Stuexen if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) { 4901185694Srrs /* 4902218129Srrs * ISSUE with ECN, see FWD-TSN processing. 4903185694Srrs */ 4904189790Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { 4905189790Srrs sctp_misc_ints(SCTP_FWD_TSN_CHECK, 4906189790Srrs 0xee, cum_ack, asoc->advanced_peer_ack_point, 4907189790Srrs old_adv_peer_ack_point); 4908189790Srrs } 4909216825Stuexen if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) { 4910185694Srrs send_forward_tsn(stcb, asoc); 4911189790Srrs } else if (lchk) { 4912189790Srrs /* try to FR fwd-tsn's that get lost too */ 4913210599Srrs if (lchk->rec.data.fwd_tsn_cnt >= 3) { 4914189790Srrs send_forward_tsn(stcb, asoc); 4915189790Srrs } 4916185694Srrs } 4917185694Srrs } 4918185694Srrs if (lchk) { 4919185694Srrs /* Assure a timer is up */ 4920185694Srrs sctp_timer_start(SCTP_TIMER_TYPE_SEND, 4921185694Srrs stcb->sctp_ep, stcb, lchk->whoTo); 4922185694Srrs } 4923185694Srrs } 4924179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) { 4925170744Srrs sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 4926170744Srrs a_rwnd, 4927170744Srrs stcb->asoc.peers_rwnd, 4928170744Srrs stcb->asoc.total_flight, 4929170744Srrs stcb->asoc.total_output_queue_size); 4930170744Srrs } 4931163953Srrs} 4932163953Srrs 4933163953Srrsvoid 4934231038Stuexensctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag) 4935163953Srrs{ 4936163953Srrs /* Copy cum-ack */ 4937163953Srrs uint32_t cum_ack, a_rwnd; 4938163953Srrs 4939163953Srrs cum_ack = ntohl(cp->cumulative_tsn_ack); 4940163953Srrs /* Arrange so a_rwnd does NOT change */ 4941163953Srrs a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight; 4942163953Srrs 4943163953Srrs /* Now call the express sack handling */ 4944218186Srrs sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0); 4945163953Srrs} 4946163953Srrs 4947163953Srrsstatic void 4948163953Srrssctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb, 4949163953Srrs struct sctp_stream_in *strmin) 4950163953Srrs{ 4951163953Srrs struct sctp_queued_to_read *ctl, *nctl; 4952163953Srrs struct sctp_association *asoc; 4953207983Srrs uint16_t tt; 4954163953Srrs 4955163953Srrs asoc = &stcb->asoc; 4956163953Srrs tt = strmin->last_sequence_delivered; 4957163953Srrs /* 4958163953Srrs * First deliver anything prior to and including the stream no that 4959163953Srrs * came in 4960163953Srrs */ 4961216822Stuexen TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) { 4962216825Stuexen if (SCTP_SSN_GE(tt, ctl->sinfo_ssn)) { 4963163953Srrs /* this is deliverable now */ 4964163953Srrs TAILQ_REMOVE(&strmin->inqueue, ctl, next); 4965163953Srrs /* subtract pending on streams */ 4966163953Srrs asoc->size_on_all_streams -= ctl->length; 4967163953Srrs sctp_ucount_decr(asoc->cnt_on_all_streams); 4968163953Srrs /* deliver it to at least the delivery-q */ 4969163953Srrs if (stcb->sctp_socket) { 4970206137Stuexen sctp_mark_non_revokable(asoc, ctl->sinfo_tsn); 4971163953Srrs sctp_add_to_readq(stcb->sctp_ep, stcb, 4972163953Srrs ctl, 4973195918Srrs &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED); 4974163953Srrs } 4975163953Srrs } else { 4976163953Srrs /* no more delivery now. */ 4977163953Srrs break; 4978163953Srrs } 4979163953Srrs } 4980163953Srrs /* 4981163953Srrs * now we must deliver things in queue the normal way if any are 4982163953Srrs * now ready. 4983163953Srrs */ 4984163953Srrs tt = strmin->last_sequence_delivered + 1; 4985216822Stuexen TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) { 4986163953Srrs if (tt == ctl->sinfo_ssn) { 4987163953Srrs /* this is deliverable now */ 4988163953Srrs TAILQ_REMOVE(&strmin->inqueue, ctl, next); 4989163953Srrs /* subtract pending on streams */ 4990163953Srrs asoc->size_on_all_streams -= ctl->length; 4991163953Srrs sctp_ucount_decr(asoc->cnt_on_all_streams); 4992163953Srrs /* deliver it to at least the delivery-q */ 4993163953Srrs strmin->last_sequence_delivered = ctl->sinfo_ssn; 4994163953Srrs if (stcb->sctp_socket) { 4995206137Stuexen sctp_mark_non_revokable(asoc, ctl->sinfo_tsn); 4996163953Srrs sctp_add_to_readq(stcb->sctp_ep, stcb, 4997163953Srrs ctl, 4998195918Srrs &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED); 4999205627Srrs 5000163953Srrs } 5001163953Srrs tt = strmin->last_sequence_delivered + 1; 5002163953Srrs } else { 5003163953Srrs break; 5004163953Srrs } 5005163953Srrs } 5006163953Srrs} 5007163953Srrs 5008190689Srrsstatic void 5009190689Srrssctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb, 5010190689Srrs struct sctp_association *asoc, 5011190689Srrs uint16_t stream, uint16_t seq) 5012190689Srrs{ 5013216822Stuexen struct sctp_tmit_chunk *chk, *nchk; 5014190689Srrs 5015216822Stuexen /* For each one on here see if we need to toss it */ 5016216822Stuexen /* 5017216822Stuexen * For now large messages held on the reasmqueue that are complete 5018216822Stuexen * will be tossed too. We could in theory do more work to spin 5019216822Stuexen * through and stop after dumping one msg aka seeing the start of a 5020216822Stuexen * new msg at the head, and call the delivery function... to see if 5021216822Stuexen * it can be delivered... But for now we just dump everything on the 5022216822Stuexen * queue. 5023216822Stuexen */ 5024216822Stuexen TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) { 5025190689Srrs /* 5026216822Stuexen * Do not toss it if on a different stream or marked for 5027216822Stuexen * unordered delivery in which case the stream sequence 5028216822Stuexen * number has no meaning. 5029190689Srrs */ 5030216822Stuexen if ((chk->rec.data.stream_number != stream) || 5031216822Stuexen ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) { 5032216822Stuexen continue; 5033216822Stuexen } 5034216822Stuexen if (chk->rec.data.stream_seq == seq) { 5035216822Stuexen /* It needs to be tossed */ 5036216822Stuexen TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 5037216825Stuexen if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) { 5038216822Stuexen asoc->tsn_last_delivered = chk->rec.data.TSN_seq; 5039216822Stuexen asoc->str_of_pdapi = chk->rec.data.stream_number; 5040216822Stuexen asoc->ssn_of_pdapi = chk->rec.data.stream_seq; 5041216822Stuexen asoc->fragment_flags = chk->rec.data.rcv_flags; 5042190689Srrs } 5043216822Stuexen asoc->size_on_reasm_queue -= chk->send_size; 5044216822Stuexen sctp_ucount_decr(asoc->cnt_on_reasm_queue); 5045190689Srrs 5046216822Stuexen /* Clear up any stream problem */ 5047216825Stuexen if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED && 5048216825Stuexen SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) { 5049190689Srrs /* 5050216822Stuexen * We must dump forward this streams 5051216822Stuexen * sequence number if the chunk is not 5052216822Stuexen * unordered that is being skipped. There is 5053216822Stuexen * a chance that if the peer does not 5054216822Stuexen * include the last fragment in its FWD-TSN 5055216822Stuexen * we WILL have a problem here since you 5056216822Stuexen * would have a partial chunk in queue that 5057216822Stuexen * may not be deliverable. Also if a Partial 5058216822Stuexen * delivery API as started the user may get 5059216822Stuexen * a partial chunk. The next read returning 5060216822Stuexen * a new chunk... really ugly but I see no 5061216822Stuexen * way around it! Maybe a notify?? 5062190689Srrs */ 5063216822Stuexen asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq; 5064190689Srrs } 5065216822Stuexen if (chk->data) { 5066216822Stuexen sctp_m_freem(chk->data); 5067216822Stuexen chk->data = NULL; 5068216822Stuexen } 5069221627Stuexen sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 5070216825Stuexen } else if (SCTP_SSN_GT(chk->rec.data.stream_seq, seq)) { 5071216822Stuexen /* 5072216822Stuexen * If the stream_seq is > than the purging one, we 5073216822Stuexen * are done 5074216822Stuexen */ 5075216822Stuexen break; 5076190689Srrs } 5077190689Srrs } 5078190689Srrs} 5079190689Srrs 5080190689Srrs 5081163953Srrsvoid 5082163953Srrssctp_handle_forward_tsn(struct sctp_tcb *stcb, 5083206137Stuexen struct sctp_forward_tsn_chunk *fwd, 5084206137Stuexen int *abort_flag, struct mbuf *m, int offset) 5085163953Srrs{ 5086163953Srrs /* The pr-sctp fwd tsn */ 5087163953Srrs /* 5088163953Srrs * here we will perform all the data receiver side steps for 5089163953Srrs * processing FwdTSN, as required in by pr-sctp draft: 5090163953Srrs * 5091163953Srrs * Assume we get FwdTSN(x): 5092163953Srrs * 5093163953Srrs * 1) update local cumTSN to x 2) try to further advance cumTSN to x + 5094163953Srrs * others we have 3) examine and update re-ordering queue on 5095163953Srrs * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to 5096163953Srrs * report where we are. 5097163953Srrs */ 5098163953Srrs struct sctp_association *asoc; 5099207963Srrs uint32_t new_cum_tsn, gap; 5100231038Stuexen unsigned int i, fwd_sz, m_size; 5101190689Srrs uint32_t str_seq; 5102163953Srrs struct sctp_stream_in *strm; 5103216822Stuexen struct sctp_tmit_chunk *chk, *nchk; 5104190689Srrs struct sctp_queued_to_read *ctl, *sv; 5105163953Srrs 5106163953Srrs asoc = &stcb->asoc; 5107163953Srrs if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) { 5108169420Srrs SCTPDBG(SCTP_DEBUG_INDATA1, 5109169420Srrs "Bad size too small/big fwd-tsn\n"); 5110163953Srrs return; 5111163953Srrs } 5112163953Srrs m_size = (stcb->asoc.mapping_array_size << 3); 5113163953Srrs /*************************************************************/ 5114163953Srrs /* 1. Here we update local cumTSN and shift the bitmap array */ 5115163953Srrs /*************************************************************/ 5116163953Srrs new_cum_tsn = ntohl(fwd->new_cumulative_tsn); 5117163953Srrs 5118216825Stuexen if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) { 5119163953Srrs /* Already got there ... */ 5120163953Srrs return; 5121163953Srrs } 5122163953Srrs /* 5123163953Srrs * now we know the new TSN is more advanced, let's find the actual 5124163953Srrs * gap 5125163953Srrs */ 5126206137Stuexen SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn); 5127205627Srrs asoc->cumulative_tsn = new_cum_tsn; 5128171990Srrs if (gap >= m_size) { 5129163953Srrs if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) { 5130266181Stuexen struct mbuf *op_err; 5131266181Stuexen char msg[SCTP_DIAG_INFO_LEN]; 5132169352Srrs 5133163953Srrs /* 5134163953Srrs * out of range (of single byte chunks in the rwnd I 5135169352Srrs * give out). This must be an attacker. 5136163953Srrs */ 5137169352Srrs *abort_flag = 1; 5138266181Stuexen snprintf(msg, sizeof(msg), 5139266181Stuexen "New cum ack %8.8x too high, highest TSN %8.8x", 5140266181Stuexen new_cum_tsn, asoc->highest_tsn_inside_map); 5141266181Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 5142169352Srrs stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33; 5143266181Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 5144163953Srrs return; 5145163953Srrs } 5146170091Srrs SCTP_STAT_INCR(sctps_fwdtsn_map_over); 5147205627Srrs 5148171990Srrs memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size); 5149171990Srrs asoc->mapping_array_base_tsn = new_cum_tsn + 1; 5150205627Srrs asoc->highest_tsn_inside_map = new_cum_tsn; 5151205627Srrs 5152206137Stuexen memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size); 5153205627Srrs asoc->highest_tsn_inside_nr_map = new_cum_tsn; 5154205627Srrs 5155179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 5156170744Srrs sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 5157170744Srrs } 5158171990Srrs } else { 5159171990Srrs SCTP_TCB_LOCK_ASSERT(stcb); 5160189790Srrs for (i = 0; i <= gap; i++) { 5161207963Srrs if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) && 5162207963Srrs !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) { 5163207963Srrs SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i); 5164216825Stuexen if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) { 5165207963Srrs asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i; 5166206137Stuexen } 5167206137Stuexen } 5168206137Stuexen } 5169163953Srrs } 5170163953Srrs /*************************************************************/ 5171163953Srrs /* 2. Clear up re-assembly queue */ 5172163953Srrs /*************************************************************/ 5173163953Srrs /* 5174163953Srrs * First service it if pd-api is up, just in case we can progress it 5175163953Srrs * forward 5176163953Srrs */ 5177163953Srrs if (asoc->fragmented_delivery_inprogress) { 5178163953Srrs sctp_service_reassembly(stcb, asoc); 5179163953Srrs } 5180216822Stuexen /* For each one on here see if we need to toss it */ 5181216822Stuexen /* 5182216822Stuexen * For now large messages held on the reasmqueue that are complete 5183216822Stuexen * will be tossed too. We could in theory do more work to spin 5184216822Stuexen * through and stop after dumping one msg aka seeing the start of a 5185216822Stuexen * new msg at the head, and call the delivery function... to see if 5186216822Stuexen * it can be delivered... But for now we just dump everything on the 5187216822Stuexen * queue. 5188216822Stuexen */ 5189216822Stuexen TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) { 5190216825Stuexen if (SCTP_TSN_GE(new_cum_tsn, chk->rec.data.TSN_seq)) { 5191216822Stuexen /* It needs to be tossed */ 5192216822Stuexen TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 5193216825Stuexen if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) { 5194216822Stuexen asoc->tsn_last_delivered = chk->rec.data.TSN_seq; 5195216822Stuexen asoc->str_of_pdapi = chk->rec.data.stream_number; 5196216822Stuexen asoc->ssn_of_pdapi = chk->rec.data.stream_seq; 5197216822Stuexen asoc->fragment_flags = chk->rec.data.rcv_flags; 5198216822Stuexen } 5199216822Stuexen asoc->size_on_reasm_queue -= chk->send_size; 5200216822Stuexen sctp_ucount_decr(asoc->cnt_on_reasm_queue); 5201163953Srrs 5202216822Stuexen /* Clear up any stream problem */ 5203216825Stuexen if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED && 5204216825Stuexen SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) { 5205163953Srrs /* 5206216822Stuexen * We must dump forward this streams 5207216822Stuexen * sequence number if the chunk is not 5208216822Stuexen * unordered that is being skipped. There is 5209216822Stuexen * a chance that if the peer does not 5210216822Stuexen * include the last fragment in its FWD-TSN 5211216822Stuexen * we WILL have a problem here since you 5212216822Stuexen * would have a partial chunk in queue that 5213216822Stuexen * may not be deliverable. Also if a Partial 5214216822Stuexen * delivery API as started the user may get 5215216822Stuexen * a partial chunk. The next read returning 5216216822Stuexen * a new chunk... really ugly but I see no 5217216822Stuexen * way around it! Maybe a notify?? 5218163953Srrs */ 5219216822Stuexen asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq; 5220163953Srrs } 5221216822Stuexen if (chk->data) { 5222216822Stuexen sctp_m_freem(chk->data); 5223216822Stuexen chk->data = NULL; 5224216822Stuexen } 5225221627Stuexen sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 5226216822Stuexen } else { 5227216822Stuexen /* 5228216822Stuexen * Ok we have gone beyond the end of the fwd-tsn's 5229216822Stuexen * mark. 5230216822Stuexen */ 5231216822Stuexen break; 5232163953Srrs } 5233163953Srrs } 5234190689Srrs /*******************************************************/ 5235190689Srrs /* 3. Update the PR-stream re-ordering queues and fix */ 5236190689Srrs /* delivery issues as needed. */ 5237190689Srrs /*******************************************************/ 5238163953Srrs fwd_sz -= sizeof(*fwd); 5239170992Srrs if (m && fwd_sz) { 5240163953Srrs /* New method. */ 5241170056Srrs unsigned int num_str; 5242170992Srrs struct sctp_strseq *stseq, strseqbuf; 5243163953Srrs 5244170992Srrs offset += sizeof(*fwd); 5245170992Srrs 5246190689Srrs SCTP_INP_READ_LOCK(stcb->sctp_ep); 5247163953Srrs num_str = fwd_sz / sizeof(struct sctp_strseq); 5248163953Srrs for (i = 0; i < num_str; i++) { 5249163953Srrs uint16_t st; 5250163953Srrs 5251170992Srrs stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset, 5252170992Srrs sizeof(struct sctp_strseq), 5253170992Srrs (uint8_t *) & strseqbuf); 5254170992Srrs offset += sizeof(struct sctp_strseq); 5255171990Srrs if (stseq == NULL) { 5256170992Srrs break; 5257171990Srrs } 5258163953Srrs /* Convert */ 5259172091Srrs st = ntohs(stseq->stream); 5260172091Srrs stseq->stream = st; 5261172091Srrs st = ntohs(stseq->sequence); 5262172091Srrs stseq->sequence = st; 5263190689Srrs 5264163953Srrs /* now process */ 5265190689Srrs 5266190689Srrs /* 5267190689Srrs * Ok we now look for the stream/seq on the read 5268190689Srrs * queue where its not all delivered. If we find it 5269190689Srrs * we transmute the read entry into a PDI_ABORTED. 5270190689Srrs */ 5271172091Srrs if (stseq->stream >= asoc->streamincnt) { 5272171990Srrs /* screwed up streams, stop! */ 5273171990Srrs break; 5274163953Srrs } 5275190689Srrs if ((asoc->str_of_pdapi == stseq->stream) && 5276190689Srrs (asoc->ssn_of_pdapi == stseq->sequence)) { 5277190689Srrs /* 5278190689Srrs * If this is the one we were partially 5279190689Srrs * delivering now then we no longer are. 5280190689Srrs * Note this will change with the reassembly 5281190689Srrs * re-write. 5282190689Srrs */ 5283190689Srrs asoc->fragmented_delivery_inprogress = 0; 5284190689Srrs } 5285190689Srrs sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence); 5286190689Srrs TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) { 5287190689Srrs if ((ctl->sinfo_stream == stseq->stream) && 5288190689Srrs (ctl->sinfo_ssn == stseq->sequence)) { 5289190689Srrs str_seq = (stseq->stream << 16) | stseq->sequence; 5290190689Srrs ctl->end_added = 1; 5291190689Srrs ctl->pdapi_aborted = 1; 5292190689Srrs sv = stcb->asoc.control_pdapi; 5293190689Srrs stcb->asoc.control_pdapi = ctl; 5294196260Stuexen sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION, 5295196260Stuexen stcb, 5296190689Srrs SCTP_PARTIAL_DELIVERY_ABORTED, 5297196260Stuexen (void *)&str_seq, 5298196260Stuexen SCTP_SO_NOT_LOCKED); 5299190689Srrs stcb->asoc.control_pdapi = sv; 5300190689Srrs break; 5301190689Srrs } else if ((ctl->sinfo_stream == stseq->stream) && 5302216825Stuexen SCTP_SSN_GT(ctl->sinfo_ssn, stseq->sequence)) { 5303190689Srrs /* We are past our victim SSN */ 5304190689Srrs break; 5305190689Srrs } 5306190689Srrs } 5307172091Srrs strm = &asoc->strmin[stseq->stream]; 5308216825Stuexen if (SCTP_SSN_GT(stseq->sequence, strm->last_sequence_delivered)) { 5309163953Srrs /* Update the sequence number */ 5310216825Stuexen strm->last_sequence_delivered = stseq->sequence; 5311163953Srrs } 5312163953Srrs /* now kick the stream the new way */ 5313172156Srrs /* sa_ignore NO_NULL_CHK */ 5314163953Srrs sctp_kick_prsctp_reorder_queue(stcb, strm); 5315163953Srrs } 5316190689Srrs SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5317163953Srrs } 5318207963Srrs /* 5319207963Srrs * Now slide thing forward. 5320207963Srrs */ 5321207963Srrs sctp_slide_mapping_arrays(stcb); 5322207963Srrs 5323212711Stuexen if (!TAILQ_EMPTY(&asoc->reasmqueue)) { 5324165647Srrs /* now lets kick out and check for more fragmented delivery */ 5325172156Srrs /* sa_ignore NO_NULL_CHK */ 5326165647Srrs sctp_deliver_reasm_check(stcb, &stcb->asoc); 5327165647Srrs } 5328163953Srrs} 5329