1163953Srrs/*- 2169382Srrs * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 3235828Stuexen * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 4235828Stuexen * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 5163953Srrs * 6163953Srrs * Redistribution and use in source and binary forms, with or without 7163953Srrs * modification, are permitted provided that the following conditions are met: 8163953Srrs * 9163953Srrs * a) Redistributions of source code must retain the above copyright notice, 10228653Stuexen * this list of conditions and the following disclaimer. 11163953Srrs * 12163953Srrs * b) Redistributions in binary form must reproduce the above copyright 13163953Srrs * notice, this list of conditions and the following disclaimer in 14228653Stuexen * the documentation and/or other materials provided with the distribution. 15163953Srrs * 16163953Srrs * c) Neither the name of Cisco Systems, Inc. nor the names of its 17163953Srrs * contributors may be used to endorse or promote products derived 18163953Srrs * from this software without specific prior written permission. 19163953Srrs * 20163953Srrs * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21163953Srrs * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 22163953Srrs * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23163953Srrs * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24163953Srrs * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25163953Srrs * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26163953Srrs * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27163953Srrs * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28163953Srrs * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29163953Srrs * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30163953Srrs * THE POSSIBILITY OF SUCH DAMAGE. 31163953Srrs */ 32163953Srrs 33163953Srrs#include <sys/cdefs.h> 34163953Srrs__FBSDID("$FreeBSD$"); 35163953Srrs 36163953Srrs#include <netinet/sctp_os.h> 37163953Srrs#include <netinet/sctp_var.h> 38167598Srrs#include <netinet/sctp_sysctl.h> 39163953Srrs#include <netinet/sctp_pcb.h> 40163953Srrs#include <netinet/sctp_header.h> 41163953Srrs#include <netinet/sctputil.h> 42163953Srrs#include <netinet/sctp_output.h> 43163953Srrs#include <netinet/sctp_input.h> 44163953Srrs#include <netinet/sctp_indata.h> 45163953Srrs#include <netinet/sctp_uio.h> 46163953Srrs#include <netinet/sctp_timer.h> 47163953Srrs 48163953Srrs 49163953Srrs/* 50163953Srrs * NOTES: On the outbound side of things I need to check the sack timer to 51163953Srrs * see if I should generate a sack into the chunk queue (if I have data to 52163953Srrs * send that is and will be sending it .. for bundling. 53163953Srrs * 54163953Srrs * The callback in sctp_usrreq.c will get called when the socket is read from. 55163953Srrs * This will cause sctp_service_queues() to get called on the top entry in 56163953Srrs * the list. 57163953Srrs */ 58163953Srrs 59170806Srrsvoid 60163953Srrssctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 61163953Srrs{ 62179783Srrs asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc); 63163953Srrs} 64163953Srrs 65163953Srrs/* Calculate what the rwnd would be */ 66170806Srrsuint32_t 67163953Srrssctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 68163953Srrs{ 69179783Srrs uint32_t calc = 0; 70163953Srrs 71163953Srrs /* 72163953Srrs * This is really set wrong with respect to a 1-2-m socket. Since 73163953Srrs * the sb_cc is the count that everyone as put up. When we re-write 74163953Srrs * sctp_soreceive then we will fix this so that ONLY this 75163953Srrs * associations data is taken into account. 76163953Srrs */ 77163953Srrs if (stcb->sctp_socket == NULL) 78163953Srrs return (calc); 79163953Srrs 80163953Srrs if (stcb->asoc.sb_cc == 0 && 81163953Srrs asoc->size_on_reasm_queue == 0 && 82163953Srrs asoc->size_on_all_streams == 0) { 83163953Srrs /* Full rwnd granted */ 84179783Srrs calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND); 85163953Srrs return (calc); 86163953Srrs } 87163953Srrs /* get actual space */ 88163953Srrs calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv); 89163953Srrs 90163953Srrs /* 91163953Srrs * take out what has NOT been put on socket queue and we yet hold 92163953Srrs * for putting up. 93163953Srrs */ 94210599Srrs calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue + 95210599Srrs asoc->cnt_on_reasm_queue * MSIZE)); 96210599Srrs calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams + 97210599Srrs asoc->cnt_on_all_streams * MSIZE)); 98163953Srrs 99163953Srrs if (calc == 0) { 100163953Srrs /* out of space */ 101163953Srrs return (calc); 102163953Srrs } 103163953Srrs /* what is the overhead of all these rwnd's */ 104171990Srrs calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len); 105179783Srrs /* 106179783Srrs * If the window gets too small due to ctrl-stuff, reduce it to 1, 107179783Srrs * even it is 0. SWS engaged 108179783Srrs */ 109179783Srrs if (calc < stcb->asoc.my_rwnd_control_len) { 110179783Srrs calc = 1; 111163953Srrs } 112179783Srrs return (calc); 113163953Srrs} 114163953Srrs 115163953Srrs 116163953Srrs 117163953Srrs/* 118163953Srrs * Build out our readq entry based on the incoming packet. 119163953Srrs */ 120163953Srrsstruct sctp_queued_to_read * 121163953Srrssctp_build_readq_entry(struct sctp_tcb *stcb, 122163953Srrs struct sctp_nets *net, 123163953Srrs uint32_t tsn, uint32_t ppid, 124163953Srrs uint32_t context, uint16_t stream_no, 125163953Srrs uint16_t stream_seq, uint8_t flags, 126163953Srrs struct mbuf *dm) 127163953Srrs{ 128163953Srrs struct sctp_queued_to_read *read_queue_e = NULL; 129163953Srrs 130163953Srrs sctp_alloc_a_readq(stcb, read_queue_e); 131163953Srrs if (read_queue_e == NULL) { 132163953Srrs goto failed_build; 133163953Srrs } 134163953Srrs read_queue_e->sinfo_stream = stream_no; 135163953Srrs read_queue_e->sinfo_ssn = stream_seq; 136163953Srrs read_queue_e->sinfo_flags = (flags << 8); 137163953Srrs read_queue_e->sinfo_ppid = ppid; 138228653Stuexen read_queue_e->sinfo_context = context; 139163953Srrs read_queue_e->sinfo_timetolive = 0; 140163953Srrs read_queue_e->sinfo_tsn = tsn; 141163953Srrs read_queue_e->sinfo_cumtsn = tsn; 142163953Srrs read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb); 143163953Srrs read_queue_e->whoFrom = net; 144163953Srrs read_queue_e->length = 0; 145163953Srrs atomic_add_int(&net->ref_count, 1); 146163953Srrs read_queue_e->data = dm; 147165647Srrs read_queue_e->spec_flags = 0; 148163953Srrs read_queue_e->tail_mbuf = NULL; 149169352Srrs read_queue_e->aux_data = NULL; 150163953Srrs read_queue_e->stcb = stcb; 151163953Srrs read_queue_e->port_from = stcb->rport; 152163953Srrs read_queue_e->do_not_ref_stcb = 0; 153163953Srrs read_queue_e->end_added = 0; 154168943Srrs read_queue_e->some_taken = 0; 155164085Srrs read_queue_e->pdapi_aborted = 0; 156163953Srrsfailed_build: 157163953Srrs return (read_queue_e); 158163953Srrs} 159163953Srrs 160163953Srrs 161163953Srrs/* 162163953Srrs * Build out our readq entry based on the incoming packet. 163163953Srrs */ 164163953Srrsstatic struct sctp_queued_to_read * 165163953Srrssctp_build_readq_entry_chk(struct sctp_tcb *stcb, 166163953Srrs struct sctp_tmit_chunk *chk) 167163953Srrs{ 168163953Srrs struct sctp_queued_to_read *read_queue_e = NULL; 169163953Srrs 170163953Srrs sctp_alloc_a_readq(stcb, read_queue_e); 171163953Srrs if (read_queue_e == NULL) { 172163953Srrs goto failed_build; 173163953Srrs } 174163953Srrs read_queue_e->sinfo_stream = chk->rec.data.stream_number; 175163953Srrs read_queue_e->sinfo_ssn = chk->rec.data.stream_seq; 176163953Srrs read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8); 177163953Srrs read_queue_e->sinfo_ppid = chk->rec.data.payloadtype; 178163953Srrs read_queue_e->sinfo_context = stcb->asoc.context; 179163953Srrs read_queue_e->sinfo_timetolive = 0; 180163953Srrs read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq; 181163953Srrs read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq; 182163953Srrs read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb); 183163953Srrs read_queue_e->whoFrom = chk->whoTo; 184169352Srrs read_queue_e->aux_data = NULL; 185163953Srrs read_queue_e->length = 0; 186163953Srrs atomic_add_int(&chk->whoTo->ref_count, 1); 187163953Srrs read_queue_e->data = chk->data; 188163953Srrs read_queue_e->tail_mbuf = NULL; 189163953Srrs read_queue_e->stcb = stcb; 190163953Srrs read_queue_e->port_from = stcb->rport; 191165647Srrs read_queue_e->spec_flags = 0; 192163953Srrs read_queue_e->do_not_ref_stcb = 0; 193163953Srrs read_queue_e->end_added = 0; 194168943Srrs read_queue_e->some_taken = 0; 195164085Srrs read_queue_e->pdapi_aborted = 0; 196163953Srrsfailed_build: 197163953Srrs return (read_queue_e); 198163953Srrs} 199163953Srrs 200163953Srrs 201163953Srrsstruct mbuf * 202223132Stuexensctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo) 203163953Srrs{ 204223132Stuexen struct sctp_extrcvinfo *seinfo; 205163953Srrs struct sctp_sndrcvinfo *outinfo; 206223132Stuexen struct sctp_rcvinfo *rcvinfo; 207223132Stuexen struct sctp_nxtinfo *nxtinfo; 208163953Srrs struct cmsghdr *cmh; 209163953Srrs struct mbuf *ret; 210163953Srrs int len; 211223132Stuexen int use_extended; 212223132Stuexen int provide_nxt; 213163953Srrs 214223132Stuexen if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && 215223132Stuexen sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && 216223132Stuexen sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { 217223132Stuexen /* user does not want any ancillary data */ 218163953Srrs return (NULL); 219163953Srrs } 220223132Stuexen len = 0; 221223132Stuexen if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) { 222223132Stuexen len += CMSG_SPACE(sizeof(struct sctp_rcvinfo)); 223223132Stuexen } 224223132Stuexen seinfo = (struct sctp_extrcvinfo *)sinfo; 225223132Stuexen if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) && 226294178Stuexen (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) { 227223132Stuexen provide_nxt = 1; 228294181Stuexen len += CMSG_SPACE(sizeof(struct sctp_nxtinfo)); 229163953Srrs } else { 230223132Stuexen provide_nxt = 0; 231163953Srrs } 232223132Stuexen if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { 233223132Stuexen if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) { 234223132Stuexen use_extended = 1; 235223132Stuexen len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo)); 236223132Stuexen } else { 237223132Stuexen use_extended = 0; 238223132Stuexen len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo)); 239223132Stuexen } 240223132Stuexen } else { 241223132Stuexen use_extended = 0; 242223132Stuexen } 243163953Srrs 244243882Sglebius ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 245163953Srrs if (ret == NULL) { 246163953Srrs /* No space */ 247163953Srrs return (ret); 248163953Srrs } 249223132Stuexen SCTP_BUF_LEN(ret) = 0; 250223132Stuexen 251223132Stuexen /* We need a CMSG header followed by the struct */ 252163953Srrs cmh = mtod(ret, struct cmsghdr *); 253268432Sdelphij /* 254268432Sdelphij * Make sure that there is no un-initialized padding between the 255268432Sdelphij * cmsg header and cmsg data and after the cmsg data. 256268432Sdelphij */ 257268432Sdelphij memset(cmh, 0, len); 258223132Stuexen if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) { 259223132Stuexen cmh->cmsg_level = IPPROTO_SCTP; 260223132Stuexen cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo)); 261223132Stuexen cmh->cmsg_type = SCTP_RCVINFO; 262223132Stuexen rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh); 263223132Stuexen rcvinfo->rcv_sid = sinfo->sinfo_stream; 264223132Stuexen rcvinfo->rcv_ssn = sinfo->sinfo_ssn; 265223132Stuexen rcvinfo->rcv_flags = sinfo->sinfo_flags; 266223132Stuexen rcvinfo->rcv_ppid = sinfo->sinfo_ppid; 267223132Stuexen rcvinfo->rcv_tsn = sinfo->sinfo_tsn; 268223132Stuexen rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn; 269223132Stuexen rcvinfo->rcv_context = sinfo->sinfo_context; 270223132Stuexen rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id; 271223132Stuexen cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo))); 272223132Stuexen SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo)); 273163953Srrs } 274223132Stuexen if (provide_nxt) { 275223132Stuexen cmh->cmsg_level = IPPROTO_SCTP; 276223132Stuexen cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo)); 277223132Stuexen cmh->cmsg_type = SCTP_NXTINFO; 278223132Stuexen nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh); 279294178Stuexen nxtinfo->nxt_sid = seinfo->serinfo_next_stream; 280223132Stuexen nxtinfo->nxt_flags = 0; 281294178Stuexen if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) { 282223132Stuexen nxtinfo->nxt_flags |= SCTP_UNORDERED; 283223132Stuexen } 284294178Stuexen if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) { 285223132Stuexen nxtinfo->nxt_flags |= SCTP_NOTIFICATION; 286223132Stuexen } 287294178Stuexen if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) { 288223132Stuexen nxtinfo->nxt_flags |= SCTP_COMPLETE; 289223132Stuexen } 290294178Stuexen nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid; 291294178Stuexen nxtinfo->nxt_length = seinfo->serinfo_next_length; 292294178Stuexen nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid; 293223132Stuexen cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo))); 294223132Stuexen SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo)); 295223132Stuexen } 296223132Stuexen if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { 297223132Stuexen cmh->cmsg_level = IPPROTO_SCTP; 298223132Stuexen outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh); 299223132Stuexen if (use_extended) { 300223132Stuexen cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo)); 301223132Stuexen cmh->cmsg_type = SCTP_EXTRCV; 302223132Stuexen memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo)); 303223132Stuexen SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo)); 304223132Stuexen } else { 305223132Stuexen cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 306223132Stuexen cmh->cmsg_type = SCTP_SNDRCV; 307223132Stuexen *outinfo = *sinfo; 308223132Stuexen SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo)); 309223132Stuexen } 310223132Stuexen } 311163953Srrs return (ret); 312163953Srrs} 313163953Srrs 314165647Srrs 315205627Srrsstatic void 316205627Srrssctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn) 317205627Srrs{ 318208902Srrs uint32_t gap, i, cumackp1; 319205627Srrs int fnd = 0; 320169352Srrs 321205627Srrs if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) { 322205627Srrs return; 323205627Srrs } 324208902Srrs cumackp1 = asoc->cumulative_tsn + 1; 325216825Stuexen if (SCTP_TSN_GT(cumackp1, tsn)) { 326208902Srrs /* 327208902Srrs * this tsn is behind the cum ack and thus we don't need to 328208902Srrs * worry about it being moved from one to the other. 329208902Srrs */ 330208902Srrs return; 331208902Srrs } 332205627Srrs SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn); 333205627Srrs if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) { 334234995Stuexen SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn); 335205627Srrs sctp_print_mapping_array(asoc); 336206137Stuexen#ifdef INVARIANTS 337205627Srrs panic("Things are really messed up now!!"); 338206137Stuexen#endif 339205627Srrs } 340205627Srrs SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 341205627Srrs SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap); 342216825Stuexen if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 343205627Srrs asoc->highest_tsn_inside_nr_map = tsn; 344205627Srrs } 345205627Srrs if (tsn == asoc->highest_tsn_inside_map) { 346205627Srrs /* We must back down to see what the new highest is */ 347216825Stuexen for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) { 348205627Srrs SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn); 349205627Srrs if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) { 350205627Srrs asoc->highest_tsn_inside_map = i; 351205627Srrs fnd = 1; 352205627Srrs break; 353205627Srrs } 354205627Srrs } 355205627Srrs if (!fnd) { 356205627Srrs asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1; 357205627Srrs } 358205627Srrs } 359205627Srrs} 360205627Srrs 361205627Srrs 362163953Srrs/* 363163953Srrs * We are delivering currently from the reassembly queue. We must continue to 364163953Srrs * deliver until we either: 1) run out of space. 2) run out of sequential 365163953Srrs * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag. 366163953Srrs */ 367163953Srrsstatic void 368163953Srrssctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc) 369163953Srrs{ 370216822Stuexen struct sctp_tmit_chunk *chk, *nchk; 371163953Srrs uint16_t nxt_todel; 372163953Srrs uint16_t stream_no; 373163953Srrs int end = 0; 374163953Srrs int cntDel; 375216822Stuexen struct sctp_queued_to_read *control, *ctl, *nctl; 376185694Srrs 377169420Srrs if (stcb == NULL) 378169420Srrs return; 379169420Srrs 380163953Srrs cntDel = stream_no = 0; 381169420Srrs if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 382172091Srrs (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) || 383169420Srrs (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 384172091Srrs /* socket above is long gone or going.. */ 385172091Srrsabandon: 386163953Srrs asoc->fragmented_delivery_inprogress = 0; 387216822Stuexen TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) { 388163953Srrs TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 389163953Srrs asoc->size_on_reasm_queue -= chk->send_size; 390163953Srrs sctp_ucount_decr(asoc->cnt_on_reasm_queue); 391163953Srrs /* 392163953Srrs * Lose the data pointer, since its in the socket 393163953Srrs * buffer 394163953Srrs */ 395163953Srrs if (chk->data) { 396163953Srrs sctp_m_freem(chk->data); 397163953Srrs chk->data = NULL; 398163953Srrs } 399163953Srrs /* Now free the address and data */ 400221627Stuexen sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 401169655Srrs /* sa_ignore FREED_MEMORY */ 402163953Srrs } 403163953Srrs return; 404163953Srrs } 405163953Srrs SCTP_TCB_LOCK_ASSERT(stcb); 406216822Stuexen TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) { 407163953Srrs if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) { 408163953Srrs /* Can't deliver more :< */ 409163953Srrs return; 410163953Srrs } 411163953Srrs stream_no = chk->rec.data.stream_number; 412163953Srrs nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1; 413163953Srrs if (nxt_todel != chk->rec.data.stream_seq && 414163953Srrs (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) { 415163953Srrs /* 416163953Srrs * Not the next sequence to deliver in its stream OR 417163953Srrs * unordered 418163953Srrs */ 419163953Srrs return; 420163953Srrs } 421163953Srrs if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 422163953Srrs 423163953Srrs control = sctp_build_readq_entry_chk(stcb, chk); 424163953Srrs if (control == NULL) { 425163953Srrs /* out of memory? */ 426163953Srrs return; 427163953Srrs } 428163953Srrs /* save it off for our future deliveries */ 429163953Srrs stcb->asoc.control_pdapi = control; 430163953Srrs if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) 431163953Srrs end = 1; 432163953Srrs else 433163953Srrs end = 0; 434206137Stuexen sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq); 435163953Srrs sctp_add_to_readq(stcb->sctp_ep, 436195918Srrs stcb, control, &stcb->sctp_socket->so_rcv, end, 437195918Srrs SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 438163953Srrs cntDel++; 439163953Srrs } else { 440163953Srrs if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) 441163953Srrs end = 1; 442163953Srrs else 443163953Srrs end = 0; 444206137Stuexen sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq); 445163953Srrs if (sctp_append_to_readq(stcb->sctp_ep, stcb, 446163953Srrs stcb->asoc.control_pdapi, 447163953Srrs chk->data, end, chk->rec.data.TSN_seq, 448163953Srrs &stcb->sctp_socket->so_rcv)) { 449163953Srrs /* 450163953Srrs * something is very wrong, either 451163953Srrs * control_pdapi is NULL, or the tail_mbuf 452163953Srrs * is corrupt, or there is a EOM already on 453163953Srrs * the mbuf chain. 454163953Srrs */ 455172091Srrs if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 456172091Srrs goto abandon; 457172091Srrs } else { 458182367Srrs#ifdef INVARIANTS 459172091Srrs if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) { 460172091Srrs panic("This should not happen control_pdapi NULL?"); 461172091Srrs } 462172091Srrs /* if we did not panic, it was a EOM */ 463172091Srrs panic("Bad chunking ??"); 464182367Srrs#else 465182367Srrs if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) { 466182367Srrs SCTP_PRINTF("This should not happen control_pdapi NULL?\n"); 467182367Srrs } 468182367Srrs SCTP_PRINTF("Bad chunking ??\n"); 469182367Srrs SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n"); 470182367Srrs 471182367Srrs#endif 472182367Srrs goto abandon; 473163953Srrs } 474163953Srrs } 475163953Srrs cntDel++; 476163953Srrs } 477163953Srrs /* pull it we did it */ 478163953Srrs TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 479163953Srrs if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 480163953Srrs asoc->fragmented_delivery_inprogress = 0; 481163953Srrs if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) { 482163953Srrs asoc->strmin[stream_no].last_sequence_delivered++; 483163953Srrs } 484163953Srrs if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) { 485163953Srrs SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 486163953Srrs } 487163953Srrs } else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 488163953Srrs /* 489163953Srrs * turn the flag back on since we just delivered 490163953Srrs * yet another one. 491163953Srrs */ 492163953Srrs asoc->fragmented_delivery_inprogress = 1; 493163953Srrs } 494163953Srrs asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq; 495163953Srrs asoc->last_flags_delivered = chk->rec.data.rcv_flags; 496163953Srrs asoc->last_strm_seq_delivered = chk->rec.data.stream_seq; 497163953Srrs asoc->last_strm_no_delivered = chk->rec.data.stream_number; 498163953Srrs 499163953Srrs asoc->tsn_last_delivered = chk->rec.data.TSN_seq; 500163953Srrs asoc->size_on_reasm_queue -= chk->send_size; 501163953Srrs sctp_ucount_decr(asoc->cnt_on_reasm_queue); 502163953Srrs /* free up the chk */ 503163953Srrs chk->data = NULL; 504221627Stuexen sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 505163953Srrs 506163953Srrs if (asoc->fragmented_delivery_inprogress == 0) { 507163953Srrs /* 508163953Srrs * Now lets see if we can deliver the next one on 509163953Srrs * the stream 510163953Srrs */ 511163953Srrs struct sctp_stream_in *strm; 512163953Srrs 513163953Srrs strm = &asoc->strmin[stream_no]; 514163953Srrs nxt_todel = strm->last_sequence_delivered + 1; 515216822Stuexen TAILQ_FOREACH_SAFE(ctl, &strm->inqueue, next, nctl) { 516216822Stuexen /* Deliver more if we can. */ 517216822Stuexen if (nxt_todel == ctl->sinfo_ssn) { 518216822Stuexen TAILQ_REMOVE(&strm->inqueue, ctl, next); 519216822Stuexen asoc->size_on_all_streams -= ctl->length; 520216822Stuexen sctp_ucount_decr(asoc->cnt_on_all_streams); 521216822Stuexen strm->last_sequence_delivered++; 522216822Stuexen sctp_mark_non_revokable(asoc, ctl->sinfo_tsn); 523216822Stuexen sctp_add_to_readq(stcb->sctp_ep, stcb, 524216822Stuexen ctl, 525216822Stuexen &stcb->sctp_socket->so_rcv, 1, 526216822Stuexen SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 527216822Stuexen } else { 528216822Stuexen break; 529163953Srrs } 530216822Stuexen nxt_todel = strm->last_sequence_delivered + 1; 531163953Srrs } 532165647Srrs break; 533163953Srrs } 534216822Stuexen } 535163953Srrs} 536163953Srrs 537163953Srrs/* 538163953Srrs * Queue the chunk either right into the socket buffer if it is the next one 539163953Srrs * to go OR put it in the correct place in the delivery queue. If we do 540163953Srrs * append to the so_buf, keep doing so until we are out of order. One big 541163953Srrs * question still remains, what to do when the socket buffer is FULL?? 542163953Srrs */ 543163953Srrsstatic void 544163953Srrssctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc, 545163953Srrs struct sctp_queued_to_read *control, int *abort_flag) 546163953Srrs{ 547163953Srrs /* 548163953Srrs * FIX-ME maybe? What happens when the ssn wraps? If we are getting 549163953Srrs * all the data in one stream this could happen quite rapidly. One 550163953Srrs * could use the TSN to keep track of things, but this scheme breaks 551163953Srrs * down in the other type of stream useage that could occur. Send a 552163953Srrs * single msg to stream 0, send 4Billion messages to stream 1, now 553163953Srrs * send a message to stream 0. You have a situation where the TSN 554163953Srrs * has wrapped but not in the stream. Is this worth worrying about 555163953Srrs * or should we just change our queue sort at the bottom to be by 556163953Srrs * TSN. 557163953Srrs * 558163953Srrs * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2 559163953Srrs * with TSN 1? If the peer is doing some sort of funky TSN/SSN 560163953Srrs * assignment this could happen... and I don't see how this would be 561163953Srrs * a violation. So for now I am undecided an will leave the sort by 562163953Srrs * SSN alone. Maybe a hybred approach is the answer 563163953Srrs * 564163953Srrs */ 565163953Srrs struct sctp_stream_in *strm; 566163953Srrs struct sctp_queued_to_read *at; 567163953Srrs int queue_needed; 568163953Srrs uint16_t nxt_todel; 569267723Stuexen struct mbuf *op_err; 570267723Stuexen char msg[SCTP_DIAG_INFO_LEN]; 571163953Srrs 572163953Srrs queue_needed = 1; 573163953Srrs asoc->size_on_all_streams += control->length; 574163953Srrs sctp_ucount_incr(asoc->cnt_on_all_streams); 575163953Srrs strm = &asoc->strmin[control->sinfo_stream]; 576163953Srrs nxt_todel = strm->last_sequence_delivered + 1; 577179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 578170744Srrs sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD); 579170744Srrs } 580169420Srrs SCTPDBG(SCTP_DEBUG_INDATA1, 581294143Stuexen "queue to stream called for sid:%u ssn:%u tsn:%u lastdel:%u nxt:%u\n", 582294143Stuexen (uint32_t) control->sinfo_stream, (uint32_t) control->sinfo_ssn, 583294143Stuexen (uint32_t) control->sinfo_tsn, 584294143Stuexen (uint32_t) strm->last_sequence_delivered, (uint32_t) nxt_todel); 585216825Stuexen if (SCTP_SSN_GE(strm->last_sequence_delivered, control->sinfo_ssn)) { 586163953Srrs /* The incoming sseq is behind where we last delivered? */ 587267723Stuexen SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n", 588169420Srrs control->sinfo_ssn, strm->last_sequence_delivered); 589178198Srrsprotocol_error: 590163953Srrs /* 591163953Srrs * throw it in the stream so it gets cleaned up in 592163953Srrs * association destruction 593163953Srrs */ 594163953Srrs TAILQ_INSERT_HEAD(&strm->inqueue, control, next); 595267723Stuexen snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 596267723Stuexen strm->last_sequence_delivered, control->sinfo_tsn, 597267723Stuexen control->sinfo_stream, control->sinfo_ssn); 598267723Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 599165220Srrs stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1; 600267723Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 601163953Srrs *abort_flag = 1; 602163953Srrs return; 603163953Srrs 604163953Srrs } 605294144Stuexen#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 606294144Stuexen struct socket *so; 607294144Stuexen 608294144Stuexen so = SCTP_INP_SO(stcb->sctp_ep); 609294144Stuexen atomic_add_int(&stcb->asoc.refcnt, 1); 610294144Stuexen SCTP_TCB_UNLOCK(stcb); 611294144Stuexen SCTP_SOCKET_LOCK(so, 1); 612294144Stuexen SCTP_TCB_LOCK(stcb); 613294144Stuexen atomic_subtract_int(&stcb->asoc.refcnt, 1); 614294144Stuexen if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 615294144Stuexen SCTP_SOCKET_UNLOCK(so, 1); 616294144Stuexen return; 617294144Stuexen } 618294144Stuexen#endif 619163953Srrs if (nxt_todel == control->sinfo_ssn) { 620163953Srrs /* can be delivered right away? */ 621179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 622170744Srrs sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL); 623170744Srrs } 624185694Srrs /* EY it wont be queued if it could be delivered directly */ 625163953Srrs queue_needed = 0; 626163953Srrs asoc->size_on_all_streams -= control->length; 627163953Srrs sctp_ucount_decr(asoc->cnt_on_all_streams); 628163953Srrs strm->last_sequence_delivered++; 629205627Srrs 630206137Stuexen sctp_mark_non_revokable(asoc, control->sinfo_tsn); 631163953Srrs sctp_add_to_readq(stcb->sctp_ep, stcb, 632163953Srrs control, 633195918Srrs &stcb->sctp_socket->so_rcv, 1, 634294144Stuexen SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED); 635216822Stuexen TAILQ_FOREACH_SAFE(control, &strm->inqueue, next, at) { 636163953Srrs /* all delivered */ 637163953Srrs nxt_todel = strm->last_sequence_delivered + 1; 638163953Srrs if (nxt_todel == control->sinfo_ssn) { 639163953Srrs TAILQ_REMOVE(&strm->inqueue, control, next); 640163953Srrs asoc->size_on_all_streams -= control->length; 641163953Srrs sctp_ucount_decr(asoc->cnt_on_all_streams); 642163953Srrs strm->last_sequence_delivered++; 643163953Srrs /* 644163953Srrs * We ignore the return of deliver_data here 645163953Srrs * since we always can hold the chunk on the 646163953Srrs * d-queue. And we have a finite number that 647163953Srrs * can be delivered from the strq. 648163953Srrs */ 649179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 650170744Srrs sctp_log_strm_del(control, NULL, 651170744Srrs SCTP_STR_LOG_FROM_IMMED_DEL); 652170744Srrs } 653206137Stuexen sctp_mark_non_revokable(asoc, control->sinfo_tsn); 654163953Srrs sctp_add_to_readq(stcb->sctp_ep, stcb, 655163953Srrs control, 656195918Srrs &stcb->sctp_socket->so_rcv, 1, 657195918Srrs SCTP_READ_LOCK_NOT_HELD, 658294144Stuexen SCTP_SO_LOCKED); 659163953Srrs continue; 660163953Srrs } 661163953Srrs break; 662163953Srrs } 663163953Srrs } 664163953Srrs if (queue_needed) { 665163953Srrs /* 666163953Srrs * Ok, we did not deliver this guy, find the correct place 667163953Srrs * to put it on the queue. 668163953Srrs */ 669216825Stuexen if (SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) { 670294144Stuexen#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 671294144Stuexen SCTP_SOCKET_UNLOCK(so, 1); 672294144Stuexen#endif 673178198Srrs goto protocol_error; 674178198Srrs } 675163953Srrs if (TAILQ_EMPTY(&strm->inqueue)) { 676163953Srrs /* Empty queue */ 677179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 678170744Srrs sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD); 679170744Srrs } 680163953Srrs TAILQ_INSERT_HEAD(&strm->inqueue, control, next); 681163953Srrs } else { 682163953Srrs TAILQ_FOREACH(at, &strm->inqueue, next) { 683216825Stuexen if (SCTP_SSN_GT(at->sinfo_ssn, control->sinfo_ssn)) { 684163953Srrs /* 685163953Srrs * one in queue is bigger than the 686163953Srrs * new one, insert before this one 687163953Srrs */ 688179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 689170744Srrs sctp_log_strm_del(control, at, 690170744Srrs SCTP_STR_LOG_FROM_INSERT_MD); 691170744Srrs } 692163953Srrs TAILQ_INSERT_BEFORE(at, control, next); 693163953Srrs break; 694163953Srrs } else if (at->sinfo_ssn == control->sinfo_ssn) { 695163953Srrs /* 696163953Srrs * Gak, He sent me a duplicate str 697163953Srrs * seq number 698163953Srrs */ 699163953Srrs /* 700163953Srrs * foo bar, I guess I will just free 701163953Srrs * this new guy, should we abort 702163953Srrs * too? FIX ME MAYBE? Or it COULD be 703163953Srrs * that the SSN's have wrapped. 704163953Srrs * Maybe I should compare to TSN 705163953Srrs * somehow... sigh for now just blow 706163953Srrs * away the chunk! 707163953Srrs */ 708163953Srrs 709163953Srrs if (control->data) 710163953Srrs sctp_m_freem(control->data); 711163953Srrs control->data = NULL; 712163953Srrs asoc->size_on_all_streams -= control->length; 713163953Srrs sctp_ucount_decr(asoc->cnt_on_all_streams); 714212711Stuexen if (control->whoFrom) { 715171158Srrs sctp_free_remote_addr(control->whoFrom); 716212711Stuexen control->whoFrom = NULL; 717212711Stuexen } 718163953Srrs sctp_free_a_readq(stcb, control); 719294144Stuexen#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 720294144Stuexen SCTP_SOCKET_UNLOCK(so, 1); 721294144Stuexen#endif 722163953Srrs return; 723163953Srrs } else { 724163953Srrs if (TAILQ_NEXT(at, next) == NULL) { 725163953Srrs /* 726163953Srrs * We are at the end, insert 727163953Srrs * it after this one 728163953Srrs */ 729179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 730170744Srrs sctp_log_strm_del(control, at, 731170744Srrs SCTP_STR_LOG_FROM_INSERT_TL); 732170744Srrs } 733163953Srrs TAILQ_INSERT_AFTER(&strm->inqueue, 734163953Srrs at, control, next); 735163953Srrs break; 736163953Srrs } 737163953Srrs } 738163953Srrs } 739163953Srrs } 740163953Srrs } 741294144Stuexen#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 742294144Stuexen SCTP_SOCKET_UNLOCK(so, 1); 743294144Stuexen#endif 744163953Srrs} 745163953Srrs 746163953Srrs/* 747163953Srrs * Returns two things: You get the total size of the deliverable parts of the 748163953Srrs * first fragmented message on the reassembly queue. And you get a 1 back if 749163953Srrs * all of the message is ready or a 0 back if the message is still incomplete 750163953Srrs */ 751163953Srrsstatic int 752163953Srrssctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size) 753163953Srrs{ 754163953Srrs struct sctp_tmit_chunk *chk; 755163953Srrs uint32_t tsn; 756163953Srrs 757163953Srrs *t_size = 0; 758163953Srrs chk = TAILQ_FIRST(&asoc->reasmqueue); 759163953Srrs if (chk == NULL) { 760163953Srrs /* nothing on the queue */ 761163953Srrs return (0); 762163953Srrs } 763163953Srrs if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) { 764163953Srrs /* Not a first on the queue */ 765163953Srrs return (0); 766163953Srrs } 767163953Srrs tsn = chk->rec.data.TSN_seq; 768216822Stuexen TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) { 769163953Srrs if (tsn != chk->rec.data.TSN_seq) { 770163953Srrs return (0); 771163953Srrs } 772163953Srrs *t_size += chk->send_size; 773163953Srrs if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 774163953Srrs return (1); 775163953Srrs } 776163953Srrs tsn++; 777163953Srrs } 778163953Srrs return (0); 779163953Srrs} 780163953Srrs 781163953Srrsstatic void 782163953Srrssctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc) 783163953Srrs{ 784163953Srrs struct sctp_tmit_chunk *chk; 785163953Srrs uint16_t nxt_todel; 786196260Stuexen uint32_t tsize, pd_point; 787163953Srrs 788165647Srrsdoit_again: 789163953Srrs chk = TAILQ_FIRST(&asoc->reasmqueue); 790163953Srrs if (chk == NULL) { 791163953Srrs /* Huh? */ 792163953Srrs asoc->size_on_reasm_queue = 0; 793163953Srrs asoc->cnt_on_reasm_queue = 0; 794163953Srrs return; 795163953Srrs } 796163953Srrs if (asoc->fragmented_delivery_inprogress == 0) { 797163953Srrs nxt_todel = 798163953Srrs asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1; 799163953Srrs if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) && 800163953Srrs (nxt_todel == chk->rec.data.stream_seq || 801163953Srrs (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) { 802163953Srrs /* 803163953Srrs * Yep the first one is here and its ok to deliver 804163953Srrs * but should we? 805163953Srrs */ 806196260Stuexen if (stcb->sctp_socket) { 807255337Stuexen pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT, 808196260Stuexen stcb->sctp_ep->partial_delivery_point); 809196260Stuexen } else { 810196260Stuexen pd_point = stcb->sctp_ep->partial_delivery_point; 811196260Stuexen } 812196260Stuexen if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) { 813163953Srrs /* 814163953Srrs * Yes, we setup to start reception, by 815163953Srrs * backing down the TSN just in case we 816163953Srrs * can't deliver. If we 817163953Srrs */ 818163953Srrs asoc->fragmented_delivery_inprogress = 1; 819163953Srrs asoc->tsn_last_delivered = 820163953Srrs chk->rec.data.TSN_seq - 1; 821163953Srrs asoc->str_of_pdapi = 822163953Srrs chk->rec.data.stream_number; 823163953Srrs asoc->ssn_of_pdapi = chk->rec.data.stream_seq; 824163953Srrs asoc->pdapi_ppid = chk->rec.data.payloadtype; 825163953Srrs asoc->fragment_flags = chk->rec.data.rcv_flags; 826163953Srrs sctp_service_reassembly(stcb, asoc); 827163953Srrs } 828163953Srrs } 829163953Srrs } else { 830165647Srrs /* 831165647Srrs * Service re-assembly will deliver stream data queued at 832165647Srrs * the end of fragmented delivery.. but it wont know to go 833165647Srrs * back and call itself again... we do that here with the 834165647Srrs * got doit_again 835165647Srrs */ 836163953Srrs sctp_service_reassembly(stcb, asoc); 837165647Srrs if (asoc->fragmented_delivery_inprogress == 0) { 838165647Srrs /* 839165647Srrs * finished our Fragmented delivery, could be more 840165647Srrs * waiting? 841165647Srrs */ 842165647Srrs goto doit_again; 843165647Srrs } 844163953Srrs } 845163953Srrs} 846163953Srrs 847163953Srrs/* 848163953Srrs * Dump onto the re-assembly queue, in its proper place. After dumping on the 849163953Srrs * queue, see if anthing can be delivered. If so pull it off (or as much as 850163953Srrs * we can. If we run out of space then we must dump what we can and set the 851163953Srrs * appropriate flag to say we queued what we could. 852163953Srrs */ 853163953Srrsstatic void 854163953Srrssctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc, 855163953Srrs struct sctp_tmit_chunk *chk, int *abort_flag) 856163953Srrs{ 857267723Stuexen struct mbuf *op_err; 858267723Stuexen char msg[SCTP_DIAG_INFO_LEN]; 859228907Stuexen uint32_t cum_ackp1, prev_tsn, post_tsn; 860163953Srrs struct sctp_tmit_chunk *at, *prev, *next; 861163953Srrs 862163953Srrs prev = next = NULL; 863163953Srrs cum_ackp1 = asoc->tsn_last_delivered + 1; 864163953Srrs if (TAILQ_EMPTY(&asoc->reasmqueue)) { 865163953Srrs /* This is the first one on the queue */ 866163953Srrs TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next); 867163953Srrs /* 868163953Srrs * we do not check for delivery of anything when only one 869163953Srrs * fragment is here 870163953Srrs */ 871163953Srrs asoc->size_on_reasm_queue = chk->send_size; 872163953Srrs sctp_ucount_incr(asoc->cnt_on_reasm_queue); 873163953Srrs if (chk->rec.data.TSN_seq == cum_ackp1) { 874163953Srrs if (asoc->fragmented_delivery_inprogress == 0 && 875163953Srrs (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) != 876163953Srrs SCTP_DATA_FIRST_FRAG) { 877163953Srrs /* 878163953Srrs * An empty queue, no delivery inprogress, 879163953Srrs * we hit the next one and it does NOT have 880163953Srrs * a FIRST fragment mark. 881163953Srrs */ 882169420Srrs SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n"); 883267723Stuexen snprintf(msg, sizeof(msg), 884267723Stuexen "Expected B-bit for TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 885267723Stuexen chk->rec.data.TSN_seq, 886267723Stuexen chk->rec.data.stream_number, 887267723Stuexen chk->rec.data.stream_seq); 888267723Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 889165220Srrs stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2; 890267723Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 891163953Srrs *abort_flag = 1; 892163953Srrs } else if (asoc->fragmented_delivery_inprogress && 893163953Srrs (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) { 894163953Srrs /* 895163953Srrs * We are doing a partial delivery and the 896163953Srrs * NEXT chunk MUST be either the LAST or 897163953Srrs * MIDDLE fragment NOT a FIRST 898163953Srrs */ 899169420Srrs SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n"); 900267723Stuexen snprintf(msg, sizeof(msg), 901267723Stuexen "Didn't expect B-bit for TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 902267723Stuexen chk->rec.data.TSN_seq, 903267723Stuexen chk->rec.data.stream_number, 904267723Stuexen chk->rec.data.stream_seq); 905267723Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 906165220Srrs stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3; 907267723Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 908163953Srrs *abort_flag = 1; 909163953Srrs } else if (asoc->fragmented_delivery_inprogress) { 910163953Srrs /* 911163953Srrs * Here we are ok with a MIDDLE or LAST 912163953Srrs * piece 913163953Srrs */ 914163953Srrs if (chk->rec.data.stream_number != 915163953Srrs asoc->str_of_pdapi) { 916163953Srrs /* Got to be the right STR No */ 917169420Srrs SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n", 918169420Srrs chk->rec.data.stream_number, 919169420Srrs asoc->str_of_pdapi); 920267723Stuexen snprintf(msg, sizeof(msg), 921267723Stuexen "Expected SID=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 922267723Stuexen asoc->str_of_pdapi, 923267723Stuexen chk->rec.data.TSN_seq, 924267723Stuexen chk->rec.data.stream_number, 925267723Stuexen chk->rec.data.stream_seq); 926267723Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 927165220Srrs stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4; 928267723Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 929163953Srrs *abort_flag = 1; 930163953Srrs } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) != 931163953Srrs SCTP_DATA_UNORDERED && 932206137Stuexen chk->rec.data.stream_seq != asoc->ssn_of_pdapi) { 933163953Srrs /* Got to be the right STR Seq */ 934169420Srrs SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n", 935169420Srrs chk->rec.data.stream_seq, 936169420Srrs asoc->ssn_of_pdapi); 937267723Stuexen snprintf(msg, sizeof(msg), 938267723Stuexen "Expected SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 939267723Stuexen asoc->ssn_of_pdapi, 940267723Stuexen chk->rec.data.TSN_seq, 941267723Stuexen chk->rec.data.stream_number, 942267723Stuexen chk->rec.data.stream_seq); 943267723Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 944165220Srrs stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5; 945267723Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 946163953Srrs *abort_flag = 1; 947163953Srrs } 948163953Srrs } 949163953Srrs } 950163953Srrs return; 951163953Srrs } 952163953Srrs /* Find its place */ 953163953Srrs TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) { 954216825Stuexen if (SCTP_TSN_GT(at->rec.data.TSN_seq, chk->rec.data.TSN_seq)) { 955163953Srrs /* 956163953Srrs * one in queue is bigger than the new one, insert 957163953Srrs * before this one 958163953Srrs */ 959163953Srrs /* A check */ 960163953Srrs asoc->size_on_reasm_queue += chk->send_size; 961163953Srrs sctp_ucount_incr(asoc->cnt_on_reasm_queue); 962163953Srrs next = at; 963163953Srrs TAILQ_INSERT_BEFORE(at, chk, sctp_next); 964163953Srrs break; 965163953Srrs } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) { 966163953Srrs /* Gak, He sent me a duplicate str seq number */ 967163953Srrs /* 968163953Srrs * foo bar, I guess I will just free this new guy, 969163953Srrs * should we abort too? FIX ME MAYBE? Or it COULD be 970163953Srrs * that the SSN's have wrapped. Maybe I should 971163953Srrs * compare to TSN somehow... sigh for now just blow 972163953Srrs * away the chunk! 973163953Srrs */ 974163953Srrs if (chk->data) { 975163953Srrs sctp_m_freem(chk->data); 976163953Srrs chk->data = NULL; 977163953Srrs } 978221627Stuexen sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 979163953Srrs return; 980163953Srrs } else { 981163953Srrs prev = at; 982163953Srrs if (TAILQ_NEXT(at, sctp_next) == NULL) { 983163953Srrs /* 984163953Srrs * We are at the end, insert it after this 985163953Srrs * one 986163953Srrs */ 987163953Srrs /* check it first */ 988163953Srrs asoc->size_on_reasm_queue += chk->send_size; 989163953Srrs sctp_ucount_incr(asoc->cnt_on_reasm_queue); 990163953Srrs TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next); 991163953Srrs break; 992163953Srrs } 993163953Srrs } 994163953Srrs } 995163953Srrs /* Now the audits */ 996163953Srrs if (prev) { 997163953Srrs prev_tsn = chk->rec.data.TSN_seq - 1; 998163953Srrs if (prev_tsn == prev->rec.data.TSN_seq) { 999163953Srrs /* 1000163953Srrs * Ok the one I am dropping onto the end is the 1001163953Srrs * NEXT. A bit of valdiation here. 1002163953Srrs */ 1003163953Srrs if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1004163953Srrs SCTP_DATA_FIRST_FRAG || 1005163953Srrs (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1006163953Srrs SCTP_DATA_MIDDLE_FRAG) { 1007163953Srrs /* 1008163953Srrs * Insert chk MUST be a MIDDLE or LAST 1009163953Srrs * fragment 1010163953Srrs */ 1011163953Srrs if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1012163953Srrs SCTP_DATA_FIRST_FRAG) { 1013169420Srrs SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n"); 1014169420Srrs SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n"); 1015267723Stuexen snprintf(msg, sizeof(msg), 1016267723Stuexen "Can't handle B-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 1017267723Stuexen chk->rec.data.TSN_seq, 1018267723Stuexen chk->rec.data.stream_number, 1019267723Stuexen chk->rec.data.stream_seq); 1020267723Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 1021165220Srrs stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6; 1022267723Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1023163953Srrs *abort_flag = 1; 1024163953Srrs return; 1025163953Srrs } 1026163953Srrs if (chk->rec.data.stream_number != 1027163953Srrs prev->rec.data.stream_number) { 1028163953Srrs /* 1029163953Srrs * Huh, need the correct STR here, 1030163953Srrs * they must be the same. 1031163953Srrs */ 1032267730Stuexen SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sid:%d not the same as at:%d\n", 1033169420Srrs chk->rec.data.stream_number, 1034169420Srrs prev->rec.data.stream_number); 1035267723Stuexen snprintf(msg, sizeof(msg), 1036267723Stuexen "Expect SID=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 1037267723Stuexen prev->rec.data.stream_number, 1038267723Stuexen chk->rec.data.TSN_seq, 1039267723Stuexen chk->rec.data.stream_number, 1040267723Stuexen chk->rec.data.stream_seq); 1041267723Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 1042165220Srrs stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7; 1043267723Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1044163953Srrs *abort_flag = 1; 1045163953Srrs return; 1046163953Srrs } 1047267732Stuexen if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != 1048267732Stuexen (prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) { 1049267732Stuexen /* 1050267732Stuexen * Huh, need the same ordering here, 1051267732Stuexen * they must be the same. 1052267732Stuexen */ 1053267732Stuexen SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, U-bit not constant\n"); 1054267732Stuexen snprintf(msg, sizeof(msg), 1055267732Stuexen "Expect U-bit=%d for TSN=%8.8x, got U-bit=%d", 1056267732Stuexen (prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0, 1057267732Stuexen chk->rec.data.TSN_seq, 1058267732Stuexen (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0); 1059267732Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 1060283822Stuexen stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8; 1061267732Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1062267732Stuexen *abort_flag = 1; 1063267732Stuexen return; 1064267732Stuexen } 1065163953Srrs if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 && 1066163953Srrs chk->rec.data.stream_seq != 1067163953Srrs prev->rec.data.stream_seq) { 1068163953Srrs /* 1069163953Srrs * Huh, need the correct STR here, 1070163953Srrs * they must be the same. 1071163953Srrs */ 1072169420Srrs SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n", 1073169420Srrs chk->rec.data.stream_seq, 1074169420Srrs prev->rec.data.stream_seq); 1075267723Stuexen snprintf(msg, sizeof(msg), 1076267723Stuexen "Expect SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 1077267723Stuexen prev->rec.data.stream_seq, 1078267723Stuexen chk->rec.data.TSN_seq, 1079267723Stuexen chk->rec.data.stream_number, 1080267723Stuexen chk->rec.data.stream_seq); 1081267723Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 1082283822Stuexen stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9; 1083267723Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1084163953Srrs *abort_flag = 1; 1085163953Srrs return; 1086163953Srrs } 1087163953Srrs } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1088163953Srrs SCTP_DATA_LAST_FRAG) { 1089163953Srrs /* Insert chk MUST be a FIRST */ 1090163953Srrs if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != 1091163953Srrs SCTP_DATA_FIRST_FRAG) { 1092169420Srrs SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n"); 1093267723Stuexen snprintf(msg, sizeof(msg), 1094267723Stuexen "Expect B-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 1095267723Stuexen chk->rec.data.TSN_seq, 1096267723Stuexen chk->rec.data.stream_number, 1097267723Stuexen chk->rec.data.stream_seq); 1098267723Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 1099283822Stuexen stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10; 1100267723Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1101163953Srrs *abort_flag = 1; 1102163953Srrs return; 1103163953Srrs } 1104163953Srrs } 1105163953Srrs } 1106163953Srrs } 1107163953Srrs if (next) { 1108163953Srrs post_tsn = chk->rec.data.TSN_seq + 1; 1109163953Srrs if (post_tsn == next->rec.data.TSN_seq) { 1110163953Srrs /* 1111163953Srrs * Ok the one I am inserting ahead of is my NEXT 1112163953Srrs * one. A bit of valdiation here. 1113163953Srrs */ 1114163953Srrs if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 1115163953Srrs /* Insert chk MUST be a last fragment */ 1116163953Srrs if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) 1117163953Srrs != SCTP_DATA_LAST_FRAG) { 1118169420Srrs SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n"); 1119169420Srrs SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n"); 1120267723Stuexen snprintf(msg, sizeof(msg), 1121267723Stuexen "Expect only E-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 1122267723Stuexen chk->rec.data.TSN_seq, 1123267723Stuexen chk->rec.data.stream_number, 1124267723Stuexen chk->rec.data.stream_seq); 1125267723Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 1126283822Stuexen stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11; 1127267723Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1128163953Srrs *abort_flag = 1; 1129163953Srrs return; 1130163953Srrs } 1131163953Srrs } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1132163953Srrs SCTP_DATA_MIDDLE_FRAG || 1133163953Srrs (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1134163953Srrs SCTP_DATA_LAST_FRAG) { 1135163953Srrs /* 1136163953Srrs * Insert chk CAN be MIDDLE or FIRST NOT 1137163953Srrs * LAST 1138163953Srrs */ 1139163953Srrs if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1140163953Srrs SCTP_DATA_LAST_FRAG) { 1141169420Srrs SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n"); 1142169420Srrs SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n"); 1143267723Stuexen snprintf(msg, sizeof(msg), 1144267723Stuexen "Didn't expect E-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 1145267723Stuexen chk->rec.data.TSN_seq, 1146267723Stuexen chk->rec.data.stream_number, 1147267723Stuexen chk->rec.data.stream_seq); 1148267723Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 1149283822Stuexen stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12; 1150267723Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1151163953Srrs *abort_flag = 1; 1152163953Srrs return; 1153163953Srrs } 1154163953Srrs if (chk->rec.data.stream_number != 1155163953Srrs next->rec.data.stream_number) { 1156163953Srrs /* 1157163953Srrs * Huh, need the correct STR here, 1158163953Srrs * they must be the same. 1159163953Srrs */ 1160169420Srrs SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n", 1161169420Srrs chk->rec.data.stream_number, 1162169420Srrs next->rec.data.stream_number); 1163267723Stuexen snprintf(msg, sizeof(msg), 1164267723Stuexen "Required SID %4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 1165267723Stuexen next->rec.data.stream_number, 1166267723Stuexen chk->rec.data.TSN_seq, 1167267723Stuexen chk->rec.data.stream_number, 1168267723Stuexen chk->rec.data.stream_seq); 1169267723Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 1170283822Stuexen stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13; 1171267723Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1172163953Srrs *abort_flag = 1; 1173163953Srrs return; 1174163953Srrs } 1175267732Stuexen if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != 1176267732Stuexen (next->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) { 1177267732Stuexen /* 1178267732Stuexen * Huh, need the same ordering here, 1179267732Stuexen * they must be the same. 1180267732Stuexen */ 1181267732Stuexen SCTPDBG(SCTP_DEBUG_INDATA1, "Next check - Gak, Evil plot, U-bit not constant\n"); 1182267732Stuexen snprintf(msg, sizeof(msg), 1183267732Stuexen "Expect U-bit=%d for TSN=%8.8x, got U-bit=%d", 1184267732Stuexen (next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0, 1185267732Stuexen chk->rec.data.TSN_seq, 1186267732Stuexen (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0); 1187267732Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 1188283822Stuexen stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14; 1189267732Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1190267732Stuexen *abort_flag = 1; 1191267732Stuexen return; 1192267732Stuexen } 1193163953Srrs if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 && 1194163953Srrs chk->rec.data.stream_seq != 1195163953Srrs next->rec.data.stream_seq) { 1196163953Srrs /* 1197163953Srrs * Huh, need the correct STR here, 1198163953Srrs * they must be the same. 1199163953Srrs */ 1200169420Srrs SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n", 1201169420Srrs chk->rec.data.stream_seq, 1202169420Srrs next->rec.data.stream_seq); 1203267723Stuexen snprintf(msg, sizeof(msg), 1204267723Stuexen "Required SSN %4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 1205267723Stuexen next->rec.data.stream_seq, 1206267723Stuexen chk->rec.data.TSN_seq, 1207267723Stuexen chk->rec.data.stream_number, 1208267723Stuexen chk->rec.data.stream_seq); 1209267723Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 1210283822Stuexen stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15; 1211267723Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1212163953Srrs *abort_flag = 1; 1213163953Srrs return; 1214163953Srrs } 1215163953Srrs } 1216163953Srrs } 1217163953Srrs } 1218163953Srrs /* Do we need to do some delivery? check */ 1219163953Srrs sctp_deliver_reasm_check(stcb, asoc); 1220163953Srrs} 1221163953Srrs 1222163953Srrs/* 1223163953Srrs * This is an unfortunate routine. It checks to make sure a evil guy is not 1224163953Srrs * stuffing us full of bad packet fragments. A broken peer could also do this 1225163953Srrs * but this is doubtful. It is to bad I must worry about evil crackers sigh 1226163953Srrs * :< more cycles. 1227163953Srrs */ 1228163953Srrsstatic int 1229163953Srrssctp_does_tsn_belong_to_reasm(struct sctp_association *asoc, 1230163953Srrs uint32_t TSN_seq) 1231163953Srrs{ 1232163953Srrs struct sctp_tmit_chunk *at; 1233163953Srrs uint32_t tsn_est; 1234163953Srrs 1235163953Srrs TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) { 1236216825Stuexen if (SCTP_TSN_GT(TSN_seq, at->rec.data.TSN_seq)) { 1237163953Srrs /* is it one bigger? */ 1238163953Srrs tsn_est = at->rec.data.TSN_seq + 1; 1239163953Srrs if (tsn_est == TSN_seq) { 1240163953Srrs /* yep. It better be a last then */ 1241163953Srrs if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != 1242163953Srrs SCTP_DATA_LAST_FRAG) { 1243163953Srrs /* 1244163953Srrs * Ok this guy belongs next to a guy 1245163953Srrs * that is NOT last, it should be a 1246163953Srrs * middle/last, not a complete 1247163953Srrs * chunk. 1248163953Srrs */ 1249163953Srrs return (1); 1250163953Srrs } else { 1251163953Srrs /* 1252163953Srrs * This guy is ok since its a LAST 1253163953Srrs * and the new chunk is a fully 1254163953Srrs * self- contained one. 1255163953Srrs */ 1256163953Srrs return (0); 1257163953Srrs } 1258163953Srrs } 1259163953Srrs } else if (TSN_seq == at->rec.data.TSN_seq) { 1260163953Srrs /* Software error since I have a dup? */ 1261163953Srrs return (1); 1262163953Srrs } else { 1263163953Srrs /* 1264163953Srrs * Ok, 'at' is larger than new chunk but does it 1265163953Srrs * need to be right before it. 1266163953Srrs */ 1267163953Srrs tsn_est = TSN_seq + 1; 1268163953Srrs if (tsn_est == at->rec.data.TSN_seq) { 1269163953Srrs /* Yep, It better be a first */ 1270163953Srrs if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != 1271163953Srrs SCTP_DATA_FIRST_FRAG) { 1272163953Srrs return (1); 1273163953Srrs } else { 1274163953Srrs return (0); 1275163953Srrs } 1276163953Srrs } 1277163953Srrs } 1278163953Srrs } 1279163953Srrs return (0); 1280163953Srrs} 1281163953Srrs 1282163953Srrsstatic int 1283163953Srrssctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc, 1284163953Srrs struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length, 1285163953Srrs struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag, 1286163953Srrs int *break_flag, int last_chunk) 1287163953Srrs{ 1288163953Srrs /* Process a data chunk */ 1289163953Srrs /* struct sctp_tmit_chunk *chk; */ 1290163953Srrs struct sctp_tmit_chunk *chk; 1291163953Srrs uint32_t tsn, gap; 1292163953Srrs struct mbuf *dmbuf; 1293228653Stuexen int the_len; 1294165647Srrs int need_reasm_check = 0; 1295163953Srrs uint16_t strmno, strmseq; 1296267723Stuexen struct mbuf *op_err; 1297267723Stuexen char msg[SCTP_DIAG_INFO_LEN]; 1298163953Srrs struct sctp_queued_to_read *control; 1299166675Srrs int ordered; 1300166675Srrs uint32_t protocol_id; 1301166675Srrs uint8_t chunk_flags; 1302169352Srrs struct sctp_stream_reset_list *liste; 1303163953Srrs 1304163953Srrs chk = NULL; 1305163953Srrs tsn = ntohl(ch->dp.tsn); 1306166675Srrs chunk_flags = ch->ch.chunk_flags; 1307179783Srrs if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) { 1308179783Srrs asoc->send_sack = 1; 1309179783Srrs } 1310166675Srrs protocol_id = ch->dp.protocol_id; 1311206758Stuexen ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0); 1312179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 1313171943Srrs sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS); 1314170744Srrs } 1315169420Srrs if (stcb == NULL) { 1316169420Srrs return (0); 1317169420Srrs } 1318170744Srrs SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn); 1319216825Stuexen if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) { 1320163953Srrs /* It is a duplicate */ 1321163953Srrs SCTP_STAT_INCR(sctps_recvdupdata); 1322163953Srrs if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1323163953Srrs /* Record a dup for the next outbound sack */ 1324163953Srrs asoc->dup_tsns[asoc->numduptsns] = tsn; 1325163953Srrs asoc->numduptsns++; 1326163953Srrs } 1327172703Srrs asoc->send_sack = 1; 1328163953Srrs return (0); 1329163953Srrs } 1330163953Srrs /* Calculate the number of TSN's between the base and this TSN */ 1331194355Srrs SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn); 1332163953Srrs if (gap >= (SCTP_MAPPING_ARRAY << 3)) { 1333163953Srrs /* Can't hold the bit in the mapping at max array, toss it */ 1334163953Srrs return (0); 1335163953Srrs } 1336163953Srrs if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) { 1337170091Srrs SCTP_TCB_LOCK_ASSERT(stcb); 1338170138Srrs if (sctp_expand_mapping_array(asoc, gap)) { 1339163953Srrs /* Can't expand, drop it */ 1340163953Srrs return (0); 1341163953Srrs } 1342163953Srrs } 1343216825Stuexen if (SCTP_TSN_GT(tsn, *high_tsn)) { 1344163953Srrs *high_tsn = tsn; 1345163953Srrs } 1346163953Srrs /* See if we have received this one already */ 1347205627Srrs if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) || 1348205627Srrs SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) { 1349163953Srrs SCTP_STAT_INCR(sctps_recvdupdata); 1350163953Srrs if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1351163953Srrs /* Record a dup for the next outbound sack */ 1352163953Srrs asoc->dup_tsns[asoc->numduptsns] = tsn; 1353163953Srrs asoc->numduptsns++; 1354163953Srrs } 1355167598Srrs asoc->send_sack = 1; 1356163953Srrs return (0); 1357163953Srrs } 1358163953Srrs /* 1359163953Srrs * Check to see about the GONE flag, duplicates would cause a sack 1360163953Srrs * to be sent up above 1361163953Srrs */ 1362169420Srrs if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 1363163953Srrs (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 1364267723Stuexen (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) { 1365163953Srrs /* 1366163953Srrs * wait a minute, this guy is gone, there is no longer a 1367163953Srrs * receiver. Send peer an ABORT! 1368163953Srrs */ 1369267723Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); 1370235360Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1371163953Srrs *abort_flag = 1; 1372163953Srrs return (0); 1373163953Srrs } 1374163953Srrs /* 1375163953Srrs * Now before going further we see if there is room. If NOT then we 1376163953Srrs * MAY let one through only IF this TSN is the one we are waiting 1377163953Srrs * for on a partial delivery API. 1378163953Srrs */ 1379163953Srrs 1380163953Srrs /* now do the tests */ 1381163953Srrs if (((asoc->cnt_on_all_streams + 1382163953Srrs asoc->cnt_on_reasm_queue + 1383179783Srrs asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) || 1384163953Srrs (((int)asoc->my_rwnd) <= 0)) { 1385163953Srrs /* 1386163953Srrs * When we have NO room in the rwnd we check to make sure 1387163953Srrs * the reader is doing its job... 1388163953Srrs */ 1389163953Srrs if (stcb->sctp_socket->so_rcv.sb_cc) { 1390163953Srrs /* some to read, wake-up */ 1391237565Stuexen#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1392172090Srrs struct socket *so; 1393172090Srrs 1394172090Srrs so = SCTP_INP_SO(stcb->sctp_ep); 1395172090Srrs atomic_add_int(&stcb->asoc.refcnt, 1); 1396172090Srrs SCTP_TCB_UNLOCK(stcb); 1397172090Srrs SCTP_SOCKET_LOCK(so, 1); 1398172090Srrs SCTP_TCB_LOCK(stcb); 1399172090Srrs atomic_subtract_int(&stcb->asoc.refcnt, 1); 1400172090Srrs if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 1401172090Srrs /* assoc was freed while we were unlocked */ 1402172090Srrs SCTP_SOCKET_UNLOCK(so, 1); 1403172090Srrs return (0); 1404172090Srrs } 1405172090Srrs#endif 1406163953Srrs sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 1407237565Stuexen#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1408172090Srrs SCTP_SOCKET_UNLOCK(so, 1); 1409172090Srrs#endif 1410163953Srrs } 1411163953Srrs /* now is it in the mapping array of what we have accepted? */ 1412216825Stuexen if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) && 1413216825Stuexen SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 1414163953Srrs /* Nope not in the valid range dump it */ 1415163953Srrs sctp_set_rwnd(stcb, asoc); 1416163953Srrs if ((asoc->cnt_on_all_streams + 1417163953Srrs asoc->cnt_on_reasm_queue + 1418179783Srrs asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) { 1419163953Srrs SCTP_STAT_INCR(sctps_datadropchklmt); 1420163953Srrs } else { 1421163953Srrs SCTP_STAT_INCR(sctps_datadroprwnd); 1422163953Srrs } 1423163953Srrs *break_flag = 1; 1424163953Srrs return (0); 1425163953Srrs } 1426163953Srrs } 1427163953Srrs strmno = ntohs(ch->dp.stream_id); 1428163953Srrs if (strmno >= asoc->streamincnt) { 1429294158Stuexen struct sctp_error_invalid_stream *cause; 1430163953Srrs 1431294158Stuexen op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream), 1432243882Sglebius 0, M_NOWAIT, 1, MT_DATA); 1433294158Stuexen if (op_err != NULL) { 1434163953Srrs /* add some space up front so prepend will work well */ 1435294158Stuexen SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 1436294158Stuexen cause = mtod(op_err, struct sctp_error_invalid_stream *); 1437163953Srrs /* 1438163953Srrs * Error causes are just param's and this one has 1439163953Srrs * two back to back phdr, one with the error type 1440163953Srrs * and size, the other with the streamid and a rsvd 1441163953Srrs */ 1442294158Stuexen SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream); 1443294158Stuexen cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM); 1444294158Stuexen cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream)); 1445294158Stuexen cause->stream_id = ch->dp.stream_id; 1446294158Stuexen cause->reserved = htons(0); 1447294158Stuexen sctp_queue_op_err(stcb, op_err); 1448163953Srrs } 1449163953Srrs SCTP_STAT_INCR(sctps_badsid); 1450170091Srrs SCTP_TCB_LOCK_ASSERT(stcb); 1451205627Srrs SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 1452216825Stuexen if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 1453205627Srrs asoc->highest_tsn_inside_nr_map = tsn; 1454185694Srrs } 1455169208Srrs if (tsn == (asoc->cumulative_tsn + 1)) { 1456169208Srrs /* Update cum-ack */ 1457169208Srrs asoc->cumulative_tsn = tsn; 1458169208Srrs } 1459163953Srrs return (0); 1460163953Srrs } 1461163953Srrs /* 1462163953Srrs * Before we continue lets validate that we are not being fooled by 1463163953Srrs * an evil attacker. We can only have 4k chunks based on our TSN 1464163953Srrs * spread allowed by the mapping array 512 * 8 bits, so there is no 1465163953Srrs * way our stream sequence numbers could have wrapped. We of course 1466163953Srrs * only validate the FIRST fragment so the bit must be set. 1467163953Srrs */ 1468163953Srrs strmseq = ntohs(ch->dp.stream_sequence); 1469166675Srrs#ifdef SCTP_ASOCLOG_OF_TSNS 1470171477Srrs SCTP_TCB_LOCK_ASSERT(stcb); 1471171477Srrs if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) { 1472171477Srrs asoc->tsn_in_at = 0; 1473171477Srrs asoc->tsn_in_wrapped = 1; 1474171477Srrs } 1475166675Srrs asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn; 1476166675Srrs asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno; 1477166675Srrs asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq; 1478168859Srrs asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length; 1479168859Srrs asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags; 1480171477Srrs asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb; 1481171477Srrs asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at; 1482171477Srrs asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1; 1483166675Srrs asoc->tsn_in_at++; 1484166675Srrs#endif 1485166675Srrs if ((chunk_flags & SCTP_DATA_FIRST_FRAG) && 1486170056Srrs (TAILQ_EMPTY(&asoc->resetHead)) && 1487166675Srrs (chunk_flags & SCTP_DATA_UNORDERED) == 0 && 1488216825Stuexen SCTP_SSN_GE(asoc->strmin[strmno].last_sequence_delivered, strmseq)) { 1489163953Srrs /* The incoming sseq is behind where we last delivered? */ 1490169420Srrs SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n", 1491169420Srrs strmseq, asoc->strmin[strmno].last_sequence_delivered); 1492163953Srrs 1493267723Stuexen snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 1494267723Stuexen asoc->strmin[strmno].last_sequence_delivered, 1495267723Stuexen tsn, strmno, strmseq); 1496267723Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 1497283822Stuexen stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16; 1498267723Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1499163953Srrs *abort_flag = 1; 1500163953Srrs return (0); 1501163953Srrs } 1502166675Srrs /************************************ 1503166675Srrs * From here down we may find ch-> invalid 1504166675Srrs * so its a good idea NOT to use it. 1505166675Srrs *************************************/ 1506166675Srrs 1507163953Srrs the_len = (chk_length - sizeof(struct sctp_data_chunk)); 1508163953Srrs if (last_chunk == 0) { 1509166023Srrs dmbuf = SCTP_M_COPYM(*m, 1510163953Srrs (offset + sizeof(struct sctp_data_chunk)), 1511243882Sglebius the_len, M_NOWAIT); 1512163953Srrs#ifdef SCTP_MBUF_LOGGING 1513179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 1514283708Stuexen sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY); 1515163953Srrs } 1516163953Srrs#endif 1517163953Srrs } else { 1518163953Srrs /* We can steal the last chunk */ 1519165647Srrs int l_len; 1520165647Srrs 1521163953Srrs dmbuf = *m; 1522163953Srrs /* lop off the top part */ 1523163953Srrs m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk))); 1524165647Srrs if (SCTP_BUF_NEXT(dmbuf) == NULL) { 1525165647Srrs l_len = SCTP_BUF_LEN(dmbuf); 1526165647Srrs } else { 1527165647Srrs /* 1528165647Srrs * need to count up the size hopefully does not hit 1529165647Srrs * this to often :-0 1530165647Srrs */ 1531165647Srrs struct mbuf *lat; 1532165647Srrs 1533165647Srrs l_len = 0; 1534228907Stuexen for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) { 1535165647Srrs l_len += SCTP_BUF_LEN(lat); 1536165647Srrs } 1537165647Srrs } 1538165647Srrs if (l_len > the_len) { 1539163953Srrs /* Trim the end round bytes off too */ 1540165647Srrs m_adj(dmbuf, -(l_len - the_len)); 1541163953Srrs } 1542163953Srrs } 1543163953Srrs if (dmbuf == NULL) { 1544163953Srrs SCTP_STAT_INCR(sctps_nomem); 1545163953Srrs return (0); 1546163953Srrs } 1547166675Srrs if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG && 1548163953Srrs asoc->fragmented_delivery_inprogress == 0 && 1549163953Srrs TAILQ_EMPTY(&asoc->resetHead) && 1550166675Srrs ((ordered == 0) || 1551216480Stuexen ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq && 1552163953Srrs TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) { 1553163953Srrs /* Candidate for express delivery */ 1554163953Srrs /* 1555163953Srrs * Its not fragmented, No PD-API is up, Nothing in the 1556163953Srrs * delivery queue, Its un-ordered OR ordered and the next to 1557163953Srrs * deliver AND nothing else is stuck on the stream queue, 1558163953Srrs * And there is room for it in the socket buffer. Lets just 1559163953Srrs * stuff it up the buffer.... 1560163953Srrs */ 1561163953Srrs 1562163953Srrs /* It would be nice to avoid this copy if we could :< */ 1563163953Srrs sctp_alloc_a_readq(stcb, control); 1564163953Srrs sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn, 1565166675Srrs protocol_id, 1566163953Srrs strmno, strmseq, 1567166675Srrs chunk_flags, 1568163953Srrs dmbuf); 1569163953Srrs if (control == NULL) { 1570163953Srrs goto failed_express_del; 1571163953Srrs } 1572212897Stuexen SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 1573216825Stuexen if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 1574212897Stuexen asoc->highest_tsn_inside_nr_map = tsn; 1575212897Stuexen } 1576195918Srrs sctp_add_to_readq(stcb->sctp_ep, stcb, 1577195918Srrs control, &stcb->sctp_socket->so_rcv, 1578195918Srrs 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 1579185694Srrs 1580166675Srrs if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) { 1581163953Srrs /* for ordered, bump what we delivered */ 1582163953Srrs asoc->strmin[strmno].last_sequence_delivered++; 1583163953Srrs } 1584163953Srrs SCTP_STAT_INCR(sctps_recvexpress); 1585179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 1586170744Srrs sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, 1587170744Srrs SCTP_STR_LOG_FROM_EXPRS_DEL); 1588170744Srrs } 1589163953Srrs control = NULL; 1590206137Stuexen 1591163953Srrs goto finish_express_del; 1592163953Srrs } 1593163953Srrsfailed_express_del: 1594163953Srrs /* If we reach here this is a new chunk */ 1595163953Srrs chk = NULL; 1596163953Srrs control = NULL; 1597163953Srrs /* Express for fragmented delivery? */ 1598163953Srrs if ((asoc->fragmented_delivery_inprogress) && 1599163953Srrs (stcb->asoc.control_pdapi) && 1600163953Srrs (asoc->str_of_pdapi == strmno) && 1601163953Srrs (asoc->ssn_of_pdapi == strmseq) 1602163953Srrs ) { 1603163953Srrs control = stcb->asoc.control_pdapi; 1604166675Srrs if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) { 1605163953Srrs /* Can't be another first? */ 1606163953Srrs goto failed_pdapi_express_del; 1607163953Srrs } 1608163953Srrs if (tsn == (control->sinfo_tsn + 1)) { 1609163953Srrs /* Yep, we can add it on */ 1610163953Srrs int end = 0; 1611163953Srrs 1612166675Srrs if (chunk_flags & SCTP_DATA_LAST_FRAG) { 1613163953Srrs end = 1; 1614163953Srrs } 1615163953Srrs if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end, 1616163953Srrs tsn, 1617163953Srrs &stcb->sctp_socket->so_rcv)) { 1618169420Srrs SCTP_PRINTF("Append fails end:%d\n", end); 1619163953Srrs goto failed_pdapi_express_del; 1620163953Srrs } 1621205627Srrs SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 1622216825Stuexen if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 1623205627Srrs asoc->highest_tsn_inside_nr_map = tsn; 1624185694Srrs } 1625163953Srrs SCTP_STAT_INCR(sctps_recvexpressm); 1626163953Srrs asoc->tsn_last_delivered = tsn; 1627166675Srrs asoc->fragment_flags = chunk_flags; 1628163953Srrs asoc->tsn_of_pdapi_last_delivered = tsn; 1629166675Srrs asoc->last_flags_delivered = chunk_flags; 1630163953Srrs asoc->last_strm_seq_delivered = strmseq; 1631163953Srrs asoc->last_strm_no_delivered = strmno; 1632163953Srrs if (end) { 1633163953Srrs /* clean up the flags and such */ 1634163953Srrs asoc->fragmented_delivery_inprogress = 0; 1635166675Srrs if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) { 1636165220Srrs asoc->strmin[strmno].last_sequence_delivered++; 1637165220Srrs } 1638163953Srrs stcb->asoc.control_pdapi = NULL; 1639165647Srrs if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) { 1640165647Srrs /* 1641165647Srrs * There could be another message 1642165647Srrs * ready 1643165647Srrs */ 1644165647Srrs need_reasm_check = 1; 1645165647Srrs } 1646163953Srrs } 1647163953Srrs control = NULL; 1648163953Srrs goto finish_express_del; 1649163953Srrs } 1650163953Srrs } 1651163953Srrsfailed_pdapi_express_del: 1652163953Srrs control = NULL; 1653205627Srrs if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) { 1654205627Srrs SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 1655216825Stuexen if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 1656205627Srrs asoc->highest_tsn_inside_nr_map = tsn; 1657205627Srrs } 1658205627Srrs } else { 1659205627Srrs SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap); 1660216825Stuexen if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) { 1661205627Srrs asoc->highest_tsn_inside_map = tsn; 1662205627Srrs } 1663205627Srrs } 1664166675Srrs if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) { 1665163953Srrs sctp_alloc_a_chunk(stcb, chk); 1666163953Srrs if (chk == NULL) { 1667163953Srrs /* No memory so we drop the chunk */ 1668163953Srrs SCTP_STAT_INCR(sctps_nomem); 1669163953Srrs if (last_chunk == 0) { 1670163953Srrs /* we copied it, free the copy */ 1671163953Srrs sctp_m_freem(dmbuf); 1672163953Srrs } 1673163953Srrs return (0); 1674163953Srrs } 1675163953Srrs chk->rec.data.TSN_seq = tsn; 1676163953Srrs chk->no_fr_allowed = 0; 1677163953Srrs chk->rec.data.stream_seq = strmseq; 1678163953Srrs chk->rec.data.stream_number = strmno; 1679166675Srrs chk->rec.data.payloadtype = protocol_id; 1680163953Srrs chk->rec.data.context = stcb->asoc.context; 1681163953Srrs chk->rec.data.doing_fast_retransmit = 0; 1682166675Srrs chk->rec.data.rcv_flags = chunk_flags; 1683163953Srrs chk->asoc = asoc; 1684163953Srrs chk->send_size = the_len; 1685163953Srrs chk->whoTo = net; 1686163953Srrs atomic_add_int(&net->ref_count, 1); 1687163953Srrs chk->data = dmbuf; 1688163953Srrs } else { 1689163953Srrs sctp_alloc_a_readq(stcb, control); 1690163953Srrs sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn, 1691166675Srrs protocol_id, 1692163953Srrs strmno, strmseq, 1693166675Srrs chunk_flags, 1694163953Srrs dmbuf); 1695163953Srrs if (control == NULL) { 1696163953Srrs /* No memory so we drop the chunk */ 1697163953Srrs SCTP_STAT_INCR(sctps_nomem); 1698163953Srrs if (last_chunk == 0) { 1699163953Srrs /* we copied it, free the copy */ 1700163953Srrs sctp_m_freem(dmbuf); 1701163953Srrs } 1702163953Srrs return (0); 1703163953Srrs } 1704163953Srrs control->length = the_len; 1705163953Srrs } 1706163953Srrs 1707163953Srrs /* Mark it as received */ 1708163953Srrs /* Now queue it where it belongs */ 1709163953Srrs if (control != NULL) { 1710163953Srrs /* First a sanity check */ 1711163953Srrs if (asoc->fragmented_delivery_inprogress) { 1712163953Srrs /* 1713163953Srrs * Ok, we have a fragmented delivery in progress if 1714163953Srrs * this chunk is next to deliver OR belongs in our 1715163953Srrs * view to the reassembly, the peer is evil or 1716163953Srrs * broken. 1717163953Srrs */ 1718163953Srrs uint32_t estimate_tsn; 1719163953Srrs 1720163953Srrs estimate_tsn = asoc->tsn_last_delivered + 1; 1721163953Srrs if (TAILQ_EMPTY(&asoc->reasmqueue) && 1722163953Srrs (estimate_tsn == control->sinfo_tsn)) { 1723163953Srrs /* Evil/Broke peer */ 1724163953Srrs sctp_m_freem(control->data); 1725163953Srrs control->data = NULL; 1726171158Srrs if (control->whoFrom) { 1727171158Srrs sctp_free_remote_addr(control->whoFrom); 1728171158Srrs control->whoFrom = NULL; 1729171158Srrs } 1730163953Srrs sctp_free_a_readq(stcb, control); 1731267723Stuexen snprintf(msg, sizeof(msg), "Reas. queue emtpy, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 1732267723Stuexen tsn, strmno, strmseq); 1733267723Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 1734283822Stuexen stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17; 1735267723Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1736163953Srrs *abort_flag = 1; 1737267733Stuexen if (last_chunk) { 1738267733Stuexen *m = NULL; 1739267733Stuexen } 1740163953Srrs return (0); 1741163953Srrs } else { 1742163953Srrs if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) { 1743163953Srrs sctp_m_freem(control->data); 1744163953Srrs control->data = NULL; 1745171158Srrs if (control->whoFrom) { 1746171158Srrs sctp_free_remote_addr(control->whoFrom); 1747171158Srrs control->whoFrom = NULL; 1748171158Srrs } 1749163953Srrs sctp_free_a_readq(stcb, control); 1750267723Stuexen snprintf(msg, sizeof(msg), "PD ongoing, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 1751267723Stuexen tsn, strmno, strmseq); 1752267723Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 1753283822Stuexen stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18; 1754267723Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1755163953Srrs *abort_flag = 1; 1756267733Stuexen if (last_chunk) { 1757267733Stuexen *m = NULL; 1758267733Stuexen } 1759163953Srrs return (0); 1760163953Srrs } 1761163953Srrs } 1762163953Srrs } else { 1763163953Srrs /* No PDAPI running */ 1764163953Srrs if (!TAILQ_EMPTY(&asoc->reasmqueue)) { 1765163953Srrs /* 1766163953Srrs * Reassembly queue is NOT empty validate 1767163953Srrs * that this tsn does not need to be in 1768163953Srrs * reasembly queue. If it does then our peer 1769163953Srrs * is broken or evil. 1770163953Srrs */ 1771163953Srrs if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) { 1772163953Srrs sctp_m_freem(control->data); 1773163953Srrs control->data = NULL; 1774171158Srrs if (control->whoFrom) { 1775171158Srrs sctp_free_remote_addr(control->whoFrom); 1776171158Srrs control->whoFrom = NULL; 1777171158Srrs } 1778163953Srrs sctp_free_a_readq(stcb, control); 1779267723Stuexen snprintf(msg, sizeof(msg), "No PD ongoing, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 1780267723Stuexen tsn, strmno, strmseq); 1781267723Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 1782283822Stuexen stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19; 1783267723Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1784163953Srrs *abort_flag = 1; 1785267733Stuexen if (last_chunk) { 1786267733Stuexen *m = NULL; 1787267733Stuexen } 1788163953Srrs return (0); 1789163953Srrs } 1790163953Srrs } 1791163953Srrs } 1792163953Srrs /* ok, if we reach here we have passed the sanity checks */ 1793166675Srrs if (chunk_flags & SCTP_DATA_UNORDERED) { 1794163953Srrs /* queue directly into socket buffer */ 1795206137Stuexen sctp_mark_non_revokable(asoc, control->sinfo_tsn); 1796163953Srrs sctp_add_to_readq(stcb->sctp_ep, stcb, 1797163953Srrs control, 1798195918Srrs &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 1799163953Srrs } else { 1800163953Srrs /* 1801163953Srrs * Special check for when streams are resetting. We 1802163953Srrs * could be more smart about this and check the 1803163953Srrs * actual stream to see if it is not being reset.. 1804163953Srrs * that way we would not create a HOLB when amongst 1805163953Srrs * streams being reset and those not being reset. 1806163953Srrs * 1807163953Srrs * We take complete messages that have a stream reset 1808163953Srrs * intervening (aka the TSN is after where our 1809163953Srrs * cum-ack needs to be) off and put them on a 1810163953Srrs * pending_reply_queue. The reassembly ones we do 1811163953Srrs * not have to worry about since they are all sorted 1812163953Srrs * and proceessed by TSN order. It is only the 1813163953Srrs * singletons I must worry about. 1814163953Srrs */ 1815163953Srrs if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 1816216825Stuexen SCTP_TSN_GT(tsn, liste->tsn)) { 1817163953Srrs /* 1818163953Srrs * yep its past where we need to reset... go 1819163953Srrs * ahead and queue it. 1820163953Srrs */ 1821163953Srrs if (TAILQ_EMPTY(&asoc->pending_reply_queue)) { 1822163953Srrs /* first one on */ 1823163953Srrs TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 1824163953Srrs } else { 1825216822Stuexen struct sctp_queued_to_read *ctlOn, 1826216822Stuexen *nctlOn; 1827163953Srrs unsigned char inserted = 0; 1828163953Srrs 1829216822Stuexen TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) { 1830216825Stuexen if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) { 1831216822Stuexen continue; 1832163953Srrs } else { 1833163953Srrs /* found it */ 1834163953Srrs TAILQ_INSERT_BEFORE(ctlOn, control, next); 1835163953Srrs inserted = 1; 1836163953Srrs break; 1837163953Srrs } 1838163953Srrs } 1839163953Srrs if (inserted == 0) { 1840163953Srrs /* 1841163953Srrs * must be put at end, use 1842163953Srrs * prevP (all setup from 1843163953Srrs * loop) to setup nextP. 1844163953Srrs */ 1845163953Srrs TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 1846163953Srrs } 1847163953Srrs } 1848163953Srrs } else { 1849163953Srrs sctp_queue_data_to_stream(stcb, asoc, control, abort_flag); 1850163953Srrs if (*abort_flag) { 1851267733Stuexen if (last_chunk) { 1852267733Stuexen *m = NULL; 1853267733Stuexen } 1854163953Srrs return (0); 1855163953Srrs } 1856163953Srrs } 1857163953Srrs } 1858163953Srrs } else { 1859163953Srrs /* Into the re-assembly queue */ 1860163953Srrs sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag); 1861163953Srrs if (*abort_flag) { 1862165220Srrs /* 1863165220Srrs * the assoc is now gone and chk was put onto the 1864165220Srrs * reasm queue, which has all been freed. 1865165220Srrs */ 1866267733Stuexen if (last_chunk) { 1867267733Stuexen *m = NULL; 1868267733Stuexen } 1869163953Srrs return (0); 1870163953Srrs } 1871163953Srrs } 1872163953Srrsfinish_express_del: 1873206840Stuexen if (tsn == (asoc->cumulative_tsn + 1)) { 1874206840Stuexen /* Update cum-ack */ 1875206840Stuexen asoc->cumulative_tsn = tsn; 1876206840Stuexen } 1877163953Srrs if (last_chunk) { 1878163953Srrs *m = NULL; 1879163953Srrs } 1880166675Srrs if (ordered) { 1881163953Srrs SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks); 1882163953Srrs } else { 1883163953Srrs SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks); 1884163953Srrs } 1885163953Srrs SCTP_STAT_INCR(sctps_recvdata); 1886163953Srrs /* Set it present please */ 1887179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 1888170744Srrs sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN); 1889170744Srrs } 1890179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 1891170744Srrs sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, 1892170744Srrs asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE); 1893170744Srrs } 1894169352Srrs /* check the special flag for stream resets */ 1895169352Srrs if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 1896216825Stuexen SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) { 1897169352Srrs /* 1898169352Srrs * we have finished working through the backlogged TSN's now 1899169352Srrs * time to reset streams. 1: call reset function. 2: free 1900169352Srrs * pending_reply space 3: distribute any chunks in 1901169352Srrs * pending_reply_queue. 1902169352Srrs */ 1903216822Stuexen struct sctp_queued_to_read *ctl, *nctl; 1904169352Srrs 1905240198Stuexen sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams); 1906169352Srrs TAILQ_REMOVE(&asoc->resetHead, liste, next_resp); 1907294140Stuexen sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED); 1908170091Srrs SCTP_FREE(liste, SCTP_M_STRESET); 1909169655Srrs /* sa_ignore FREED_MEMORY */ 1910169352Srrs liste = TAILQ_FIRST(&asoc->resetHead); 1911216822Stuexen if (TAILQ_EMPTY(&asoc->resetHead)) { 1912169352Srrs /* All can be removed */ 1913216822Stuexen TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) { 1914169352Srrs TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next); 1915169352Srrs sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag); 1916169352Srrs if (*abort_flag) { 1917169352Srrs return (0); 1918169352Srrs } 1919169352Srrs } 1920216822Stuexen } else { 1921216822Stuexen TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) { 1922216825Stuexen if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) { 1923216822Stuexen break; 1924216822Stuexen } 1925169352Srrs /* 1926169352Srrs * if ctl->sinfo_tsn is <= liste->tsn we can 1927169352Srrs * process it which is the NOT of 1928169352Srrs * ctl->sinfo_tsn > liste->tsn 1929169352Srrs */ 1930169352Srrs TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next); 1931169352Srrs sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag); 1932169352Srrs if (*abort_flag) { 1933169352Srrs return (0); 1934169352Srrs } 1935169352Srrs } 1936169352Srrs } 1937169352Srrs /* 1938169352Srrs * Now service re-assembly to pick up anything that has been 1939169352Srrs * held on reassembly queue? 1940169352Srrs */ 1941169352Srrs sctp_deliver_reasm_check(stcb, asoc); 1942169352Srrs need_reasm_check = 0; 1943169352Srrs } 1944165647Srrs if (need_reasm_check) { 1945165647Srrs /* Another one waits ? */ 1946165647Srrs sctp_deliver_reasm_check(stcb, asoc); 1947165647Srrs } 1948163953Srrs return (1); 1949163953Srrs} 1950163953Srrs 1951163953Srrsint8_t sctp_map_lookup_tab[256] = { 1952206137Stuexen 0, 1, 0, 2, 0, 1, 0, 3, 1953206137Stuexen 0, 1, 0, 2, 0, 1, 0, 4, 1954206137Stuexen 0, 1, 0, 2, 0, 1, 0, 3, 1955206137Stuexen 0, 1, 0, 2, 0, 1, 0, 5, 1956206137Stuexen 0, 1, 0, 2, 0, 1, 0, 3, 1957206137Stuexen 0, 1, 0, 2, 0, 1, 0, 4, 1958206137Stuexen 0, 1, 0, 2, 0, 1, 0, 3, 1959206137Stuexen 0, 1, 0, 2, 0, 1, 0, 6, 1960206137Stuexen 0, 1, 0, 2, 0, 1, 0, 3, 1961206137Stuexen 0, 1, 0, 2, 0, 1, 0, 4, 1962206137Stuexen 0, 1, 0, 2, 0, 1, 0, 3, 1963206137Stuexen 0, 1, 0, 2, 0, 1, 0, 5, 1964206137Stuexen 0, 1, 0, 2, 0, 1, 0, 3, 1965206137Stuexen 0, 1, 0, 2, 0, 1, 0, 4, 1966206137Stuexen 0, 1, 0, 2, 0, 1, 0, 3, 1967206137Stuexen 0, 1, 0, 2, 0, 1, 0, 7, 1968206137Stuexen 0, 1, 0, 2, 0, 1, 0, 3, 1969206137Stuexen 0, 1, 0, 2, 0, 1, 0, 4, 1970206137Stuexen 0, 1, 0, 2, 0, 1, 0, 3, 1971206137Stuexen 0, 1, 0, 2, 0, 1, 0, 5, 1972206137Stuexen 0, 1, 0, 2, 0, 1, 0, 3, 1973206137Stuexen 0, 1, 0, 2, 0, 1, 0, 4, 1974206137Stuexen 0, 1, 0, 2, 0, 1, 0, 3, 1975206137Stuexen 0, 1, 0, 2, 0, 1, 0, 6, 1976206137Stuexen 0, 1, 0, 2, 0, 1, 0, 3, 1977206137Stuexen 0, 1, 0, 2, 0, 1, 0, 4, 1978206137Stuexen 0, 1, 0, 2, 0, 1, 0, 3, 1979206137Stuexen 0, 1, 0, 2, 0, 1, 0, 5, 1980206137Stuexen 0, 1, 0, 2, 0, 1, 0, 3, 1981206137Stuexen 0, 1, 0, 2, 0, 1, 0, 4, 1982206137Stuexen 0, 1, 0, 2, 0, 1, 0, 3, 1983206137Stuexen 0, 1, 0, 2, 0, 1, 0, 8 1984163953Srrs}; 1985163953Srrs 1986163953Srrs 1987163953Srrsvoid 1988206137Stuexensctp_slide_mapping_arrays(struct sctp_tcb *stcb) 1989163953Srrs{ 1990163953Srrs /* 1991163953Srrs * Now we also need to check the mapping array in a couple of ways. 1992163953Srrs * 1) Did we move the cum-ack point? 1993208897Srrs * 1994208897Srrs * When you first glance at this you might think that all entries that 1995208897Srrs * make up the postion of the cum-ack would be in the nr-mapping 1996208897Srrs * array only.. i.e. things up to the cum-ack are always 1997208897Srrs * deliverable. Thats true with one exception, when its a fragmented 1998208897Srrs * message we may not deliver the data until some threshold (or all 1999208897Srrs * of it) is in place. So we must OR the nr_mapping_array and 2000208897Srrs * mapping_array to get a true picture of the cum-ack. 2001163953Srrs */ 2002163953Srrs struct sctp_association *asoc; 2003179783Srrs int at; 2004208897Srrs uint8_t val; 2005163953Srrs int slide_from, slide_end, lgap, distance; 2006205627Srrs uint32_t old_cumack, old_base, old_highest, highest_tsn; 2007163953Srrs 2008163953Srrs asoc = &stcb->asoc; 2009163953Srrs 2010163953Srrs old_cumack = asoc->cumulative_tsn; 2011163953Srrs old_base = asoc->mapping_array_base_tsn; 2012163953Srrs old_highest = asoc->highest_tsn_inside_map; 2013163953Srrs /* 2014163953Srrs * We could probably improve this a small bit by calculating the 2015163953Srrs * offset of the current cum-ack as the starting point. 2016163953Srrs */ 2017163953Srrs at = 0; 2018206137Stuexen for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) { 2019208952Srrs val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from]; 2020208897Srrs if (val == 0xff) { 2021163953Srrs at += 8; 2022163953Srrs } else { 2023163953Srrs /* there is a 0 bit */ 2024208897Srrs at += sctp_map_lookup_tab[val]; 2025163953Srrs break; 2026163953Srrs } 2027163953Srrs } 2028206137Stuexen asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1); 2029163953Srrs 2030216825Stuexen if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) && 2031216825Stuexen SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) { 2032165220Srrs#ifdef INVARIANTS 2033172090Srrs panic("huh, cumack 0x%x greater than high-tsn 0x%x in map", 2034172090Srrs asoc->cumulative_tsn, asoc->highest_tsn_inside_map); 2035163953Srrs#else 2036172090Srrs SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n", 2037172090Srrs asoc->cumulative_tsn, asoc->highest_tsn_inside_map); 2038205502Srrs sctp_print_mapping_array(asoc); 2039179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2040179783Srrs sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 2041179783Srrs } 2042163953Srrs asoc->highest_tsn_inside_map = asoc->cumulative_tsn; 2043185694Srrs asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn; 2044163953Srrs#endif 2045163953Srrs } 2046216825Stuexen if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { 2047205627Srrs highest_tsn = asoc->highest_tsn_inside_nr_map; 2048205627Srrs } else { 2049205627Srrs highest_tsn = asoc->highest_tsn_inside_map; 2050205627Srrs } 2051205627Srrs if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) { 2052163953Srrs /* The complete array was completed by a single FR */ 2053205627Srrs /* highest becomes the cum-ack */ 2054206758Stuexen int clr; 2055163953Srrs 2056206758Stuexen#ifdef INVARIANTS 2057206758Stuexen unsigned int i; 2058206758Stuexen 2059206758Stuexen#endif 2060206758Stuexen 2061163953Srrs /* clear the array */ 2062206137Stuexen clr = ((at + 7) >> 3); 2063171943Srrs if (clr > asoc->mapping_array_size) { 2064163953Srrs clr = asoc->mapping_array_size; 2065163953Srrs } 2066163953Srrs memset(asoc->mapping_array, 0, clr); 2067205627Srrs memset(asoc->nr_mapping_array, 0, clr); 2068206758Stuexen#ifdef INVARIANTS 2069206137Stuexen for (i = 0; i < asoc->mapping_array_size; i++) { 2070206137Stuexen if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) { 2071234995Stuexen SCTP_PRINTF("Error Mapping array's not clean at clear\n"); 2072206137Stuexen sctp_print_mapping_array(asoc); 2073206137Stuexen } 2074206137Stuexen } 2075206758Stuexen#endif 2076163953Srrs asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1; 2077205627Srrs asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn; 2078163953Srrs } else if (at >= 8) { 2079163953Srrs /* we can slide the mapping array down */ 2080179783Srrs /* slide_from holds where we hit the first NON 0xff byte */ 2081179783Srrs 2082163953Srrs /* 2083163953Srrs * now calculate the ceiling of the move using our highest 2084163953Srrs * TSN value 2085163953Srrs */ 2086205627Srrs SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn); 2087205627Srrs slide_end = (lgap >> 3); 2088163953Srrs if (slide_end < slide_from) { 2089205627Srrs sctp_print_mapping_array(asoc); 2090172396Srrs#ifdef INVARIANTS 2091163953Srrs panic("impossible slide"); 2092172396Srrs#else 2093234995Stuexen SCTP_PRINTF("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n", 2094205627Srrs lgap, slide_end, slide_from, at); 2095172396Srrs return; 2096172396Srrs#endif 2097163953Srrs } 2098179783Srrs if (slide_end > asoc->mapping_array_size) { 2099179783Srrs#ifdef INVARIANTS 2100179783Srrs panic("would overrun buffer"); 2101179783Srrs#else 2102234995Stuexen SCTP_PRINTF("Gak, would have overrun map end:%d slide_end:%d\n", 2103179783Srrs asoc->mapping_array_size, slide_end); 2104179783Srrs slide_end = asoc->mapping_array_size; 2105179783Srrs#endif 2106179783Srrs } 2107163953Srrs distance = (slide_end - slide_from) + 1; 2108179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2109170744Srrs sctp_log_map(old_base, old_cumack, old_highest, 2110170744Srrs SCTP_MAP_PREPARE_SLIDE); 2111170744Srrs sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end, 2112170744Srrs (uint32_t) lgap, SCTP_MAP_SLIDE_FROM); 2113170744Srrs } 2114163953Srrs if (distance + slide_from > asoc->mapping_array_size || 2115163953Srrs distance < 0) { 2116163953Srrs /* 2117163953Srrs * Here we do NOT slide forward the array so that 2118163953Srrs * hopefully when more data comes in to fill it up 2119163953Srrs * we will be able to slide it forward. Really I 2120163953Srrs * don't think this should happen :-0 2121163953Srrs */ 2122163953Srrs 2123179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2124170744Srrs sctp_log_map((uint32_t) distance, (uint32_t) slide_from, 2125170744Srrs (uint32_t) asoc->mapping_array_size, 2126170744Srrs SCTP_MAP_SLIDE_NONE); 2127170744Srrs } 2128163953Srrs } else { 2129163953Srrs int ii; 2130163953Srrs 2131163953Srrs for (ii = 0; ii < distance; ii++) { 2132206758Stuexen asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii]; 2133206758Stuexen asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii]; 2134205627Srrs 2135163953Srrs } 2136206281Stuexen for (ii = distance; ii < asoc->mapping_array_size; ii++) { 2137163953Srrs asoc->mapping_array[ii] = 0; 2138205627Srrs asoc->nr_mapping_array[ii] = 0; 2139163953Srrs } 2140206892Stuexen if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) { 2141206892Stuexen asoc->highest_tsn_inside_map += (slide_from << 3); 2142206892Stuexen } 2143206892Stuexen if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) { 2144206892Stuexen asoc->highest_tsn_inside_nr_map += (slide_from << 3); 2145206892Stuexen } 2146163953Srrs asoc->mapping_array_base_tsn += (slide_from << 3); 2147179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2148170744Srrs sctp_log_map(asoc->mapping_array_base_tsn, 2149170744Srrs asoc->cumulative_tsn, asoc->highest_tsn_inside_map, 2150170744Srrs SCTP_MAP_SLIDE_RESULT); 2151170744Srrs } 2152185694Srrs } 2153185694Srrs } 2154206137Stuexen} 2155206137Stuexen 2156206137Stuexenvoid 2157228653Stuexensctp_sack_check(struct sctp_tcb *stcb, int was_a_gap) 2158206137Stuexen{ 2159206137Stuexen struct sctp_association *asoc; 2160206137Stuexen uint32_t highest_tsn; 2161206137Stuexen 2162206137Stuexen asoc = &stcb->asoc; 2163216825Stuexen if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { 2164206137Stuexen highest_tsn = asoc->highest_tsn_inside_nr_map; 2165206137Stuexen } else { 2166206137Stuexen highest_tsn = asoc->highest_tsn_inside_map; 2167206137Stuexen } 2168206137Stuexen 2169185694Srrs /* 2170163953Srrs * Now we need to see if we need to queue a sack or just start the 2171163953Srrs * timer (if allowed). 2172163953Srrs */ 2173206137Stuexen if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 2174206137Stuexen /* 2175206137Stuexen * Ok special case, in SHUTDOWN-SENT case. here we maker 2176206137Stuexen * sure SACK timer is off and instead send a SHUTDOWN and a 2177206137Stuexen * SACK 2178206137Stuexen */ 2179206137Stuexen if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2180206137Stuexen sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 2181283822Stuexen stcb->sctp_ep, stcb, NULL, 2182283822Stuexen SCTP_FROM_SCTP_INDATA + SCTP_LOC_20); 2183206137Stuexen } 2184224641Stuexen sctp_send_shutdown(stcb, 2185224641Stuexen ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination)); 2186221627Stuexen sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 2187206137Stuexen } else { 2188206137Stuexen int is_a_gap; 2189163953Srrs 2190206137Stuexen /* is there a gap now ? */ 2191216825Stuexen is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn); 2192163953Srrs 2193206137Stuexen /* 2194206137Stuexen * CMT DAC algorithm: increase number of packets received 2195206137Stuexen * since last ack 2196206137Stuexen */ 2197206137Stuexen stcb->asoc.cmt_dac_pkts_rcvd++; 2198163953Srrs 2199206137Stuexen if ((stcb->asoc.send_sack == 1) || /* We need to send a 2200206137Stuexen * SACK */ 2201206137Stuexen ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no 2202206137Stuexen * longer is one */ 2203206137Stuexen (stcb->asoc.numduptsns) || /* we have dup's */ 2204206137Stuexen (is_a_gap) || /* is still a gap */ 2205206137Stuexen (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */ 2206206137Stuexen (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */ 2207206137Stuexen ) { 2208163953Srrs 2209216669Stuexen if ((stcb->asoc.sctp_cmt_on_off > 0) && 2210206137Stuexen (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) && 2211206137Stuexen (stcb->asoc.send_sack == 0) && 2212206137Stuexen (stcb->asoc.numduptsns == 0) && 2213206137Stuexen (stcb->asoc.delayed_ack) && 2214206137Stuexen (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) { 2215163953Srrs 2216206137Stuexen /* 2217206137Stuexen * CMT DAC algorithm: With CMT, delay acks 2218206137Stuexen * even in the face of 2219206137Stuexen * 2220206137Stuexen * reordering. Therefore, if acks that do not 2221206137Stuexen * have to be sent because of the above 2222206137Stuexen * reasons, will be delayed. That is, acks 2223206137Stuexen * that would have been sent due to gap 2224206137Stuexen * reports will be delayed with DAC. Start 2225206137Stuexen * the delayed ack timer. 2226206137Stuexen */ 2227206137Stuexen sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2228206137Stuexen stcb->sctp_ep, stcb, NULL); 2229163953Srrs } else { 2230206137Stuexen /* 2231206137Stuexen * Ok we must build a SACK since the timer 2232206137Stuexen * is pending, we got our first packet OR 2233206137Stuexen * there are gaps or duplicates. 2234206137Stuexen */ 2235206137Stuexen (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer); 2236221627Stuexen sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 2237163953Srrs } 2238206137Stuexen } else { 2239206137Stuexen if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2240206137Stuexen sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2241206137Stuexen stcb->sctp_ep, stcb, NULL); 2242206137Stuexen } 2243163953Srrs } 2244163953Srrs } 2245163953Srrs} 2246163953Srrs 2247163953Srrsvoid 2248163953Srrssctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc) 2249163953Srrs{ 2250163953Srrs struct sctp_tmit_chunk *chk; 2251196260Stuexen uint32_t tsize, pd_point; 2252163953Srrs uint16_t nxt_todel; 2253163953Srrs 2254163953Srrs if (asoc->fragmented_delivery_inprogress) { 2255163953Srrs sctp_service_reassembly(stcb, asoc); 2256163953Srrs } 2257163953Srrs /* Can we proceed further, i.e. the PD-API is complete */ 2258163953Srrs if (asoc->fragmented_delivery_inprogress) { 2259163953Srrs /* no */ 2260163953Srrs return; 2261163953Srrs } 2262163953Srrs /* 2263163953Srrs * Now is there some other chunk I can deliver from the reassembly 2264163953Srrs * queue. 2265163953Srrs */ 2266165647Srrsdoit_again: 2267163953Srrs chk = TAILQ_FIRST(&asoc->reasmqueue); 2268163953Srrs if (chk == NULL) { 2269163953Srrs asoc->size_on_reasm_queue = 0; 2270163953Srrs asoc->cnt_on_reasm_queue = 0; 2271163953Srrs return; 2272163953Srrs } 2273163953Srrs nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1; 2274163953Srrs if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) && 2275163953Srrs ((nxt_todel == chk->rec.data.stream_seq) || 2276163953Srrs (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) { 2277163953Srrs /* 2278163953Srrs * Yep the first one is here. We setup to start reception, 2279163953Srrs * by backing down the TSN just in case we can't deliver. 2280163953Srrs */ 2281163953Srrs 2282163953Srrs /* 2283163953Srrs * Before we start though either all of the message should 2284196509Stuexen * be here or the socket buffer max or nothing on the 2285163953Srrs * delivery queue and something can be delivered. 2286163953Srrs */ 2287196260Stuexen if (stcb->sctp_socket) { 2288255337Stuexen pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT, 2289196260Stuexen stcb->sctp_ep->partial_delivery_point); 2290196260Stuexen } else { 2291196260Stuexen pd_point = stcb->sctp_ep->partial_delivery_point; 2292196260Stuexen } 2293196260Stuexen if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) { 2294163953Srrs asoc->fragmented_delivery_inprogress = 1; 2295163953Srrs asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1; 2296163953Srrs asoc->str_of_pdapi = chk->rec.data.stream_number; 2297163953Srrs asoc->ssn_of_pdapi = chk->rec.data.stream_seq; 2298163953Srrs asoc->pdapi_ppid = chk->rec.data.payloadtype; 2299163953Srrs asoc->fragment_flags = chk->rec.data.rcv_flags; 2300163953Srrs sctp_service_reassembly(stcb, asoc); 2301165647Srrs if (asoc->fragmented_delivery_inprogress == 0) { 2302165647Srrs goto doit_again; 2303165647Srrs } 2304163953Srrs } 2305163953Srrs } 2306163953Srrs} 2307163953Srrs 2308163953Srrsint 2309163953Srrssctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length, 2310294147Stuexen struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2311294147Stuexen struct sctp_nets *net, uint32_t * high_tsn) 2312163953Srrs{ 2313163953Srrs struct sctp_data_chunk *ch, chunk_buf; 2314163953Srrs struct sctp_association *asoc; 2315163953Srrs int num_chunks = 0; /* number of control chunks processed */ 2316163953Srrs int stop_proc = 0; 2317163953Srrs int chk_length, break_flag, last_chunk; 2318216495Stuexen int abort_flag = 0, was_a_gap; 2319163953Srrs struct mbuf *m; 2320216495Stuexen uint32_t highest_tsn; 2321163953Srrs 2322163953Srrs /* set the rwnd */ 2323163953Srrs sctp_set_rwnd(stcb, &stcb->asoc); 2324163953Srrs 2325163953Srrs m = *mm; 2326163953Srrs SCTP_TCB_LOCK_ASSERT(stcb); 2327163953Srrs asoc = &stcb->asoc; 2328216825Stuexen if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { 2329216495Stuexen highest_tsn = asoc->highest_tsn_inside_nr_map; 2330216495Stuexen } else { 2331216495Stuexen highest_tsn = asoc->highest_tsn_inside_map; 2332163953Srrs } 2333216825Stuexen was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn); 2334163953Srrs /* 2335163953Srrs * setup where we got the last DATA packet from for any SACK that 2336163953Srrs * may need to go out. Don't bump the net. This is done ONLY when a 2337163953Srrs * chunk is assigned. 2338163953Srrs */ 2339163953Srrs asoc->last_data_chunk_from = net; 2340163953Srrs 2341169208Srrs /*- 2342163953Srrs * Now before we proceed we must figure out if this is a wasted 2343163953Srrs * cluster... i.e. it is a small packet sent in and yet the driver 2344163953Srrs * underneath allocated a full cluster for it. If so we must copy it 2345163953Srrs * to a smaller mbuf and free up the cluster mbuf. This will help 2346169208Srrs * with cluster starvation. Note for __Panda__ we don't do this 2347169208Srrs * since it has clusters all the way down to 64 bytes. 2348163953Srrs */ 2349166023Srrs if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) { 2350163953Srrs /* we only handle mbufs that are singletons.. not chains */ 2351243882Sglebius m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA); 2352163953Srrs if (m) { 2353163953Srrs /* ok lets see if we can copy the data up */ 2354163953Srrs caddr_t *from, *to; 2355163953Srrs 2356163953Srrs /* get the pointers and copy */ 2357163953Srrs to = mtod(m, caddr_t *); 2358163953Srrs from = mtod((*mm), caddr_t *); 2359165647Srrs memcpy(to, from, SCTP_BUF_LEN((*mm))); 2360163953Srrs /* copy the length and free up the old */ 2361165647Srrs SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm)); 2362163953Srrs sctp_m_freem(*mm); 2363163953Srrs /* sucess, back copy */ 2364163953Srrs *mm = m; 2365163953Srrs } else { 2366163953Srrs /* We are in trouble in the mbuf world .. yikes */ 2367163953Srrs m = *mm; 2368163953Srrs } 2369163953Srrs } 2370163953Srrs /* get pointer to the first chunk header */ 2371163953Srrs ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset, 2372163953Srrs sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf); 2373163953Srrs if (ch == NULL) { 2374163953Srrs return (1); 2375163953Srrs } 2376163953Srrs /* 2377163953Srrs * process all DATA chunks... 2378163953Srrs */ 2379163953Srrs *high_tsn = asoc->cumulative_tsn; 2380163953Srrs break_flag = 0; 2381167598Srrs asoc->data_pkts_seen++; 2382163953Srrs while (stop_proc == 0) { 2383163953Srrs /* validate chunk length */ 2384163953Srrs chk_length = ntohs(ch->ch.chunk_length); 2385163953Srrs if (length - *offset < chk_length) { 2386163953Srrs /* all done, mutulated chunk */ 2387163953Srrs stop_proc = 1; 2388228907Stuexen continue; 2389163953Srrs } 2390163953Srrs if (ch->ch.chunk_type == SCTP_DATA) { 2391267729Stuexen if ((size_t)chk_length < sizeof(struct sctp_data_chunk)) { 2392163953Srrs /* 2393163953Srrs * Need to send an abort since we had a 2394163953Srrs * invalid data chunk. 2395163953Srrs */ 2396163953Srrs struct mbuf *op_err; 2397267723Stuexen char msg[SCTP_DIAG_INFO_LEN]; 2398163953Srrs 2399267723Stuexen snprintf(msg, sizeof(msg), "DATA chunk of length %d", 2400267723Stuexen chk_length); 2401267723Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2402283822Stuexen stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21; 2403294147Stuexen sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 2404163953Srrs return (2); 2405163953Srrs } 2406267729Stuexen if ((size_t)chk_length == sizeof(struct sctp_data_chunk)) { 2407267729Stuexen /* 2408267729Stuexen * Need to send an abort since we had an 2409267729Stuexen * empty data chunk. 2410267729Stuexen */ 2411267729Stuexen struct mbuf *op_err; 2412267729Stuexen 2413267729Stuexen op_err = sctp_generate_no_user_data_cause(ch->dp.tsn); 2414283822Stuexen stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_22; 2415294147Stuexen sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 2416267729Stuexen return (2); 2417267729Stuexen } 2418163953Srrs#ifdef SCTP_AUDITING_ENABLED 2419163953Srrs sctp_audit_log(0xB1, 0); 2420163953Srrs#endif 2421163953Srrs if (SCTP_SIZE32(chk_length) == (length - *offset)) { 2422163953Srrs last_chunk = 1; 2423163953Srrs } else { 2424163953Srrs last_chunk = 0; 2425163953Srrs } 2426163953Srrs if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch, 2427163953Srrs chk_length, net, high_tsn, &abort_flag, &break_flag, 2428163953Srrs last_chunk)) { 2429163953Srrs num_chunks++; 2430163953Srrs } 2431163953Srrs if (abort_flag) 2432163953Srrs return (2); 2433163953Srrs 2434163953Srrs if (break_flag) { 2435163953Srrs /* 2436163953Srrs * Set because of out of rwnd space and no 2437163953Srrs * drop rep space left. 2438163953Srrs */ 2439163953Srrs stop_proc = 1; 2440228907Stuexen continue; 2441163953Srrs } 2442163953Srrs } else { 2443163953Srrs /* not a data chunk in the data region */ 2444163953Srrs switch (ch->ch.chunk_type) { 2445163953Srrs case SCTP_INITIATION: 2446163953Srrs case SCTP_INITIATION_ACK: 2447163953Srrs case SCTP_SELECTIVE_ACK: 2448228907Stuexen case SCTP_NR_SELECTIVE_ACK: 2449163953Srrs case SCTP_HEARTBEAT_REQUEST: 2450163953Srrs case SCTP_HEARTBEAT_ACK: 2451163953Srrs case SCTP_ABORT_ASSOCIATION: 2452163953Srrs case SCTP_SHUTDOWN: 2453163953Srrs case SCTP_SHUTDOWN_ACK: 2454163953Srrs case SCTP_OPERATION_ERROR: 2455163953Srrs case SCTP_COOKIE_ECHO: 2456163953Srrs case SCTP_COOKIE_ACK: 2457163953Srrs case SCTP_ECN_ECHO: 2458163953Srrs case SCTP_ECN_CWR: 2459163953Srrs case SCTP_SHUTDOWN_COMPLETE: 2460163953Srrs case SCTP_AUTHENTICATION: 2461163953Srrs case SCTP_ASCONF_ACK: 2462163953Srrs case SCTP_PACKET_DROPPED: 2463163953Srrs case SCTP_STREAM_RESET: 2464163953Srrs case SCTP_FORWARD_CUM_TSN: 2465163953Srrs case SCTP_ASCONF: 2466163953Srrs /* 2467163953Srrs * Now, what do we do with KNOWN chunks that 2468163953Srrs * are NOT in the right place? 2469163953Srrs * 2470163953Srrs * For now, I do nothing but ignore them. We 2471163953Srrs * may later want to add sysctl stuff to 2472163953Srrs * switch out and do either an ABORT() or 2473163953Srrs * possibly process them. 2474163953Srrs */ 2475179783Srrs if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) { 2476163953Srrs struct mbuf *op_err; 2477294145Stuexen char msg[SCTP_DIAG_INFO_LEN]; 2478163953Srrs 2479294146Stuexen snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x", 2480294145Stuexen ch->ch.chunk_type); 2481294145Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2482294147Stuexen sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 2483163953Srrs return (2); 2484163953Srrs } 2485163953Srrs break; 2486163953Srrs default: 2487163953Srrs /* unknown chunk type, use bit rules */ 2488163953Srrs if (ch->ch.chunk_type & 0x40) { 2489163953Srrs /* Add a error report to the queue */ 2490294158Stuexen struct mbuf *op_err; 2491294158Stuexen struct sctp_gen_error_cause *cause; 2492163953Srrs 2493294158Stuexen op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause), 2494294158Stuexen 0, M_NOWAIT, 1, MT_DATA); 2495294158Stuexen if (op_err != NULL) { 2496294158Stuexen cause = mtod(op_err, struct sctp_gen_error_cause *); 2497294158Stuexen cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK); 2498294158Stuexen cause->length = htons(chk_length + sizeof(struct sctp_gen_error_cause)); 2499294158Stuexen SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause); 2500294158Stuexen SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT); 2501294158Stuexen if (SCTP_BUF_NEXT(op_err) != NULL) { 2502294158Stuexen sctp_queue_op_err(stcb, op_err); 2503163953Srrs } else { 2504294158Stuexen sctp_m_freem(op_err); 2505163953Srrs } 2506163953Srrs } 2507163953Srrs } 2508163953Srrs if ((ch->ch.chunk_type & 0x80) == 0) { 2509163953Srrs /* discard the rest of this packet */ 2510163953Srrs stop_proc = 1; 2511163953Srrs } /* else skip this bad chunk and 2512163953Srrs * continue... */ 2513163953Srrs break; 2514228907Stuexen } /* switch of chunk type */ 2515163953Srrs } 2516163953Srrs *offset += SCTP_SIZE32(chk_length); 2517163953Srrs if ((*offset >= length) || stop_proc) { 2518163953Srrs /* no more data left in the mbuf chain */ 2519163953Srrs stop_proc = 1; 2520163953Srrs continue; 2521163953Srrs } 2522163953Srrs ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset, 2523163953Srrs sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf); 2524163953Srrs if (ch == NULL) { 2525163953Srrs *offset = length; 2526163953Srrs stop_proc = 1; 2527228907Stuexen continue; 2528163953Srrs } 2529228907Stuexen } 2530163953Srrs if (break_flag) { 2531163953Srrs /* 2532163953Srrs * we need to report rwnd overrun drops. 2533163953Srrs */ 2534237542Stuexen sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0); 2535163953Srrs } 2536163953Srrs if (num_chunks) { 2537163953Srrs /* 2538172090Srrs * Did we get data, if so update the time for auto-close and 2539163953Srrs * give peer credit for being alive. 2540163953Srrs */ 2541163953Srrs SCTP_STAT_INCR(sctps_recvpktwithdata); 2542179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 2543171943Srrs sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 2544171943Srrs stcb->asoc.overall_error_count, 2545171943Srrs 0, 2546171943Srrs SCTP_FROM_SCTP_INDATA, 2547171943Srrs __LINE__); 2548171943Srrs } 2549163953Srrs stcb->asoc.overall_error_count = 0; 2550169378Srrs (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd); 2551163953Srrs } 2552163953Srrs /* now service all of the reassm queue if needed */ 2553163953Srrs if (!(TAILQ_EMPTY(&asoc->reasmqueue))) 2554163953Srrs sctp_service_queues(stcb, asoc); 2555163953Srrs 2556163953Srrs if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 2557167598Srrs /* Assure that we ack right away */ 2558167598Srrs stcb->asoc.send_sack = 1; 2559163953Srrs } 2560163953Srrs /* Start a sack timer or QUEUE a SACK for sending */ 2561228653Stuexen sctp_sack_check(stcb, was_a_gap); 2562163953Srrs return (0); 2563163953Srrs} 2564163953Srrs 2565196507Srrsstatic int 2566196507Srrssctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn, 2567196507Srrs uint16_t frag_strt, uint16_t frag_end, int nr_sacking, 2568196507Srrs int *num_frs, 2569196507Srrs uint32_t * biggest_newly_acked_tsn, 2570196507Srrs uint32_t * this_sack_lowest_newack, 2571228653Stuexen int *rto_ok) 2572196507Srrs{ 2573196507Srrs struct sctp_tmit_chunk *tp1; 2574196507Srrs unsigned int theTSN; 2575206137Stuexen int j, wake_him = 0, circled = 0; 2576196507Srrs 2577196507Srrs /* Recover the tp1 we last saw */ 2578196507Srrs tp1 = *p_tp1; 2579196507Srrs if (tp1 == NULL) { 2580196507Srrs tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 2581196507Srrs } 2582196507Srrs for (j = frag_strt; j <= frag_end; j++) { 2583196507Srrs theTSN = j + last_tsn; 2584196507Srrs while (tp1) { 2585196507Srrs if (tp1->rec.data.doing_fast_retransmit) 2586196507Srrs (*num_frs) += 1; 2587196507Srrs 2588196507Srrs /*- 2589196507Srrs * CMT: CUCv2 algorithm. For each TSN being 2590196507Srrs * processed from the sent queue, track the 2591196507Srrs * next expected pseudo-cumack, or 2592196507Srrs * rtx_pseudo_cumack, if required. Separate 2593196507Srrs * cumack trackers for first transmissions, 2594196507Srrs * and retransmissions. 2595196507Srrs */ 2596283731Stuexen if ((tp1->sent < SCTP_DATAGRAM_RESEND) && 2597283731Stuexen (tp1->whoTo->find_pseudo_cumack == 1) && 2598196507Srrs (tp1->snd_count == 1)) { 2599196507Srrs tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq; 2600196507Srrs tp1->whoTo->find_pseudo_cumack = 0; 2601196507Srrs } 2602283731Stuexen if ((tp1->sent < SCTP_DATAGRAM_RESEND) && 2603283731Stuexen (tp1->whoTo->find_rtx_pseudo_cumack == 1) && 2604196507Srrs (tp1->snd_count > 1)) { 2605196507Srrs tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq; 2606196507Srrs tp1->whoTo->find_rtx_pseudo_cumack = 0; 2607196507Srrs } 2608196507Srrs if (tp1->rec.data.TSN_seq == theTSN) { 2609196507Srrs if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 2610196507Srrs /*- 2611196507Srrs * must be held until 2612196507Srrs * cum-ack passes 2613196507Srrs */ 2614196507Srrs if (tp1->sent < SCTP_DATAGRAM_RESEND) { 2615196507Srrs /*- 2616196507Srrs * If it is less than RESEND, it is 2617196507Srrs * now no-longer in flight. 2618196507Srrs * Higher values may already be set 2619196507Srrs * via previous Gap Ack Blocks... 2620196507Srrs * i.e. ACKED or RESEND. 2621196507Srrs */ 2622216825Stuexen if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, 2623216825Stuexen *biggest_newly_acked_tsn)) { 2624196507Srrs *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq; 2625196507Srrs } 2626196507Srrs /*- 2627196507Srrs * CMT: SFR algo (and HTNA) - set 2628196507Srrs * saw_newack to 1 for dest being 2629196507Srrs * newly acked. update 2630196507Srrs * this_sack_highest_newack if 2631196507Srrs * appropriate. 2632196507Srrs */ 2633196507Srrs if (tp1->rec.data.chunk_was_revoked == 0) 2634196507Srrs tp1->whoTo->saw_newack = 1; 2635196507Srrs 2636216825Stuexen if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, 2637216825Stuexen tp1->whoTo->this_sack_highest_newack)) { 2638196507Srrs tp1->whoTo->this_sack_highest_newack = 2639196507Srrs tp1->rec.data.TSN_seq; 2640196507Srrs } 2641196507Srrs /*- 2642196507Srrs * CMT DAC algo: also update 2643196507Srrs * this_sack_lowest_newack 2644196507Srrs */ 2645196507Srrs if (*this_sack_lowest_newack == 0) { 2646196507Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 2647196507Srrs sctp_log_sack(*this_sack_lowest_newack, 2648196507Srrs last_tsn, 2649196507Srrs tp1->rec.data.TSN_seq, 2650196507Srrs 0, 2651196507Srrs 0, 2652196507Srrs SCTP_LOG_TSN_ACKED); 2653196507Srrs } 2654196507Srrs *this_sack_lowest_newack = tp1->rec.data.TSN_seq; 2655196507Srrs } 2656196507Srrs /*- 2657196507Srrs * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp 2658196507Srrs * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set 2659196507Srrs * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be 2660196507Srrs * updated. Also trigger search for the next expected (rtx-)pseudo-cumack. 2661196507Srrs * Separate pseudo_cumack trackers for first transmissions and 2662196507Srrs * retransmissions. 2663196507Srrs */ 2664196507Srrs if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) { 2665196507Srrs if (tp1->rec.data.chunk_was_revoked == 0) { 2666196507Srrs tp1->whoTo->new_pseudo_cumack = 1; 2667196507Srrs } 2668196507Srrs tp1->whoTo->find_pseudo_cumack = 1; 2669196507Srrs } 2670196507Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 2671196507Srrs sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); 2672196507Srrs } 2673196507Srrs if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) { 2674196507Srrs if (tp1->rec.data.chunk_was_revoked == 0) { 2675196507Srrs tp1->whoTo->new_pseudo_cumack = 1; 2676196507Srrs } 2677196507Srrs tp1->whoTo->find_rtx_pseudo_cumack = 1; 2678196507Srrs } 2679196507Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 2680196507Srrs sctp_log_sack(*biggest_newly_acked_tsn, 2681196507Srrs last_tsn, 2682196507Srrs tp1->rec.data.TSN_seq, 2683196507Srrs frag_strt, 2684196507Srrs frag_end, 2685196507Srrs SCTP_LOG_TSN_ACKED); 2686196507Srrs } 2687196507Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 2688196507Srrs sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP, 2689196507Srrs tp1->whoTo->flight_size, 2690196507Srrs tp1->book_size, 2691196507Srrs (uintptr_t) tp1->whoTo, 2692196507Srrs tp1->rec.data.TSN_seq); 2693196507Srrs } 2694196507Srrs sctp_flight_size_decrease(tp1); 2695219057Srrs if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 2696219057Srrs (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 2697219057Srrs tp1); 2698219057Srrs } 2699196507Srrs sctp_total_flight_decrease(stcb, tp1); 2700196507Srrs 2701196507Srrs tp1->whoTo->net_ack += tp1->send_size; 2702196507Srrs if (tp1->snd_count < 2) { 2703196507Srrs /*- 2704196507Srrs * True non-retransmited chunk 2705196507Srrs */ 2706196507Srrs tp1->whoTo->net_ack2 += tp1->send_size; 2707196507Srrs 2708196507Srrs /*- 2709196507Srrs * update RTO too ? 2710196507Srrs */ 2711196507Srrs if (tp1->do_rtt) { 2712219397Srrs if (*rto_ok) { 2713219397Srrs tp1->whoTo->RTO = 2714219397Srrs sctp_calculate_rto(stcb, 2715219397Srrs &stcb->asoc, 2716219397Srrs tp1->whoTo, 2717219397Srrs &tp1->sent_rcv_time, 2718219397Srrs sctp_align_safe_nocopy, 2719219397Srrs SCTP_RTT_FROM_DATA); 2720219397Srrs *rto_ok = 0; 2721219397Srrs } 2722219397Srrs if (tp1->whoTo->rto_needed == 0) { 2723219397Srrs tp1->whoTo->rto_needed = 1; 2724219397Srrs } 2725196507Srrs tp1->do_rtt = 0; 2726196507Srrs } 2727196507Srrs } 2728196507Srrs } 2729196507Srrs if (tp1->sent <= SCTP_DATAGRAM_RESEND) { 2730216825Stuexen if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, 2731216825Stuexen stcb->asoc.this_sack_highest_gap)) { 2732196507Srrs stcb->asoc.this_sack_highest_gap = 2733196507Srrs tp1->rec.data.TSN_seq; 2734196507Srrs } 2735196507Srrs if (tp1->sent == SCTP_DATAGRAM_RESEND) { 2736196507Srrs sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt); 2737196507Srrs#ifdef SCTP_AUDITING_ENABLED 2738196507Srrs sctp_audit_log(0xB2, 2739196507Srrs (stcb->asoc.sent_queue_retran_cnt & 0x000000ff)); 2740196507Srrs#endif 2741196507Srrs } 2742196507Srrs } 2743196507Srrs /*- 2744196507Srrs * All chunks NOT UNSENT fall through here and are marked 2745196507Srrs * (leave PR-SCTP ones that are to skip alone though) 2746196507Srrs */ 2747242709Stuexen if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) && 2748243157Stuexen (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) { 2749196507Srrs tp1->sent = SCTP_DATAGRAM_MARKED; 2750242709Stuexen } 2751196507Srrs if (tp1->rec.data.chunk_was_revoked) { 2752196507Srrs /* deflate the cwnd */ 2753196507Srrs tp1->whoTo->cwnd -= tp1->book_size; 2754196507Srrs tp1->rec.data.chunk_was_revoked = 0; 2755196507Srrs } 2756196507Srrs /* NR Sack code here */ 2757243157Stuexen if (nr_sacking && 2758243157Stuexen (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) { 2759243157Stuexen if (stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) { 2760243157Stuexen stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues--; 2761243157Stuexen#ifdef INVARIANTS 2762243157Stuexen } else { 2763243157Stuexen panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number); 2764243157Stuexen#endif 2765243157Stuexen } 2766294210Stuexen if ((stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) && 2767294210Stuexen (stcb->asoc.strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) && 2768294210Stuexen TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.stream_number].outqueue)) { 2769294210Stuexen stcb->asoc.trigger_reset = 1; 2770294210Stuexen } 2771243157Stuexen tp1->sent = SCTP_DATAGRAM_NR_ACKED; 2772196507Srrs if (tp1->data) { 2773196507Srrs /* 2774196507Srrs * sa_ignore 2775196507Srrs * NO_NULL_CHK 2776196507Srrs */ 2777196507Srrs sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 2778196507Srrs sctp_m_freem(tp1->data); 2779206137Stuexen tp1->data = NULL; 2780196507Srrs } 2781196507Srrs wake_him++; 2782196507Srrs } 2783196507Srrs } 2784196507Srrs break; 2785196507Srrs } /* if (tp1->TSN_seq == theTSN) */ 2786216825Stuexen if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) { 2787196507Srrs break; 2788216825Stuexen } 2789196507Srrs tp1 = TAILQ_NEXT(tp1, sctp_next); 2790206137Stuexen if ((tp1 == NULL) && (circled == 0)) { 2791206137Stuexen circled++; 2792206137Stuexen tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 2793206137Stuexen } 2794196507Srrs } /* end while (tp1) */ 2795196507Srrs if (tp1 == NULL) { 2796206137Stuexen circled = 0; 2797196507Srrs tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 2798196507Srrs } 2799206137Stuexen /* In case the fragments were not in order we must reset */ 2800196507Srrs } /* end for (j = fragStart */ 2801196507Srrs *p_tp1 = tp1; 2802196507Srrs return (wake_him); /* Return value only used for nr-sack */ 2803196507Srrs} 2804196507Srrs 2805196507Srrs 2806202526Stuexenstatic int 2807170781Srrssctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc, 2808202526Stuexen uint32_t last_tsn, uint32_t * biggest_tsn_acked, 2809165647Srrs uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack, 2810228653Stuexen int num_seg, int num_nr_seg, int *rto_ok) 2811163953Srrs{ 2812170781Srrs struct sctp_gap_ack_block *frag, block; 2813163953Srrs struct sctp_tmit_chunk *tp1; 2814196507Srrs int i; 2815163953Srrs int num_frs = 0; 2816202526Stuexen int chunk_freed; 2817202526Stuexen int non_revocable; 2818216188Stuexen uint16_t frag_strt, frag_end, prev_frag_end; 2819163953Srrs 2820216188Stuexen tp1 = TAILQ_FIRST(&asoc->sent_queue); 2821216188Stuexen prev_frag_end = 0; 2822202526Stuexen chunk_freed = 0; 2823202526Stuexen 2824202526Stuexen for (i = 0; i < (num_seg + num_nr_seg); i++) { 2825216188Stuexen if (i == num_seg) { 2826216188Stuexen prev_frag_end = 0; 2827216188Stuexen tp1 = TAILQ_FIRST(&asoc->sent_queue); 2828216188Stuexen } 2829202526Stuexen frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset, 2830202526Stuexen sizeof(struct sctp_gap_ack_block), (uint8_t *) & block); 2831202526Stuexen *offset += sizeof(block); 2832202526Stuexen if (frag == NULL) { 2833202526Stuexen return (chunk_freed); 2834202526Stuexen } 2835163953Srrs frag_strt = ntohs(frag->start); 2836163953Srrs frag_end = ntohs(frag->end); 2837216188Stuexen 2838163953Srrs if (frag_strt > frag_end) { 2839216188Stuexen /* This gap report is malformed, skip it. */ 2840163953Srrs continue; 2841163953Srrs } 2842216188Stuexen if (frag_strt <= prev_frag_end) { 2843216188Stuexen /* This gap report is not in order, so restart. */ 2844163953Srrs tp1 = TAILQ_FIRST(&asoc->sent_queue); 2845163953Srrs } 2846216825Stuexen if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) { 2847216188Stuexen *biggest_tsn_acked = last_tsn + frag_end; 2848216188Stuexen } 2849202526Stuexen if (i < num_seg) { 2850202526Stuexen non_revocable = 0; 2851202526Stuexen } else { 2852202526Stuexen non_revocable = 1; 2853170781Srrs } 2854202526Stuexen if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end, 2855202526Stuexen non_revocable, &num_frs, biggest_newly_acked_tsn, 2856228653Stuexen this_sack_lowest_newack, rto_ok)) { 2857202526Stuexen chunk_freed = 1; 2858202526Stuexen } 2859216188Stuexen prev_frag_end = frag_end; 2860163953Srrs } 2861179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 2862170744Srrs if (num_frs) 2863170744Srrs sctp_log_fr(*biggest_tsn_acked, 2864170744Srrs *biggest_newly_acked_tsn, 2865170744Srrs last_tsn, SCTP_FR_LOG_BIGGEST_TSNS); 2866170744Srrs } 2867202526Stuexen return (chunk_freed); 2868163953Srrs} 2869163953Srrs 2870163953Srrsstatic void 2871168709Srrssctp_check_for_revoked(struct sctp_tcb *stcb, 2872168709Srrs struct sctp_association *asoc, uint32_t cumack, 2873204040Stuexen uint32_t biggest_tsn_acked) 2874163953Srrs{ 2875163953Srrs struct sctp_tmit_chunk *tp1; 2876163953Srrs 2877216822Stuexen TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 2878216825Stuexen if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) { 2879163953Srrs /* 2880163953Srrs * ok this guy is either ACK or MARKED. If it is 2881163953Srrs * ACKED it has been previously acked but not this 2882163953Srrs * time i.e. revoked. If it is MARKED it was ACK'ed 2883163953Srrs * again. 2884163953Srrs */ 2885216825Stuexen if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) { 2886169208Srrs break; 2887216825Stuexen } 2888163953Srrs if (tp1->sent == SCTP_DATAGRAM_ACKED) { 2889163953Srrs /* it has been revoked */ 2890167598Srrs tp1->sent = SCTP_DATAGRAM_SENT; 2891167598Srrs tp1->rec.data.chunk_was_revoked = 1; 2892167598Srrs /* 2893167598Srrs * We must add this stuff back in to assure 2894167598Srrs * timers and such get started. 2895167598Srrs */ 2896179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 2897170744Srrs sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, 2898170744Srrs tp1->whoTo->flight_size, 2899170744Srrs tp1->book_size, 2900170744Srrs (uintptr_t) tp1->whoTo, 2901170744Srrs tp1->rec.data.TSN_seq); 2902170744Srrs } 2903168709Srrs sctp_flight_size_increase(tp1); 2904168709Srrs sctp_total_flight_increase(stcb, tp1); 2905167598Srrs /* 2906167598Srrs * We inflate the cwnd to compensate for our 2907167598Srrs * artificial inflation of the flight_size. 2908167598Srrs */ 2909167598Srrs tp1->whoTo->cwnd += tp1->book_size; 2910179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 2911170744Srrs sctp_log_sack(asoc->last_acked_seq, 2912170744Srrs cumack, 2913170744Srrs tp1->rec.data.TSN_seq, 2914170744Srrs 0, 2915170744Srrs 0, 2916170744Srrs SCTP_LOG_TSN_REVOKED); 2917170744Srrs } 2918163953Srrs } else if (tp1->sent == SCTP_DATAGRAM_MARKED) { 2919163953Srrs /* it has been re-acked in this SACK */ 2920163953Srrs tp1->sent = SCTP_DATAGRAM_ACKED; 2921163953Srrs } 2922163953Srrs } 2923163953Srrs if (tp1->sent == SCTP_DATAGRAM_UNSENT) 2924163953Srrs break; 2925163953Srrs } 2926163953Srrs} 2927163953Srrs 2928185694Srrs 2929163953Srrsstatic void 2930163953Srrssctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc, 2931204040Stuexen uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved) 2932163953Srrs{ 2933163953Srrs struct sctp_tmit_chunk *tp1; 2934163953Srrs int strike_flag = 0; 2935163953Srrs struct timeval now; 2936163953Srrs int tot_retrans = 0; 2937163953Srrs uint32_t sending_seq; 2938163953Srrs struct sctp_nets *net; 2939163953Srrs int num_dests_sacked = 0; 2940163953Srrs 2941163953Srrs /* 2942163953Srrs * select the sending_seq, this is either the next thing ready to be 2943163953Srrs * sent but not transmitted, OR, the next seq we assign. 2944163953Srrs */ 2945163953Srrs tp1 = TAILQ_FIRST(&stcb->asoc.send_queue); 2946163953Srrs if (tp1 == NULL) { 2947163953Srrs sending_seq = asoc->sending_seq; 2948163953Srrs } else { 2949163953Srrs sending_seq = tp1->rec.data.TSN_seq; 2950163953Srrs } 2951163953Srrs 2952163953Srrs /* CMT DAC algo: finding out if SACK is a mixed SACK */ 2953216669Stuexen if ((asoc->sctp_cmt_on_off > 0) && 2954211944Stuexen SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 2955163953Srrs TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 2956163953Srrs if (net->saw_newack) 2957163953Srrs num_dests_sacked++; 2958163953Srrs } 2959163953Srrs } 2960270357Stuexen if (stcb->asoc.prsctp_supported) { 2961169378Srrs (void)SCTP_GETTIME_TIMEVAL(&now); 2962163953Srrs } 2963216822Stuexen TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 2964163953Srrs strike_flag = 0; 2965163953Srrs if (tp1->no_fr_allowed) { 2966163953Srrs /* this one had a timeout or something */ 2967163953Srrs continue; 2968163953Srrs } 2969179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 2970170744Srrs if (tp1->sent < SCTP_DATAGRAM_RESEND) 2971170744Srrs sctp_log_fr(biggest_tsn_newly_acked, 2972170744Srrs tp1->rec.data.TSN_seq, 2973170744Srrs tp1->sent, 2974170744Srrs SCTP_FR_LOG_CHECK_STRIKE); 2975170744Srrs } 2976216825Stuexen if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) || 2977163953Srrs tp1->sent == SCTP_DATAGRAM_UNSENT) { 2978163953Srrs /* done */ 2979163953Srrs break; 2980163953Srrs } 2981270357Stuexen if (stcb->asoc.prsctp_supported) { 2982163953Srrs if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) { 2983163953Srrs /* Is it expired? */ 2984212801Stuexen if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { 2985163953Srrs /* Yes so drop it */ 2986163953Srrs if (tp1->data != NULL) { 2987235416Stuexen (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1, 2988189790Srrs SCTP_SO_NOT_LOCKED); 2989163953Srrs } 2990163953Srrs continue; 2991163953Srrs } 2992163953Srrs } 2993163953Srrs } 2994216825Stuexen if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) { 2995163953Srrs /* we are beyond the tsn in the sack */ 2996163953Srrs break; 2997163953Srrs } 2998163953Srrs if (tp1->sent >= SCTP_DATAGRAM_RESEND) { 2999163953Srrs /* either a RESEND, ACKED, or MARKED */ 3000163953Srrs /* skip */ 3001210599Srrs if (tp1->sent == SCTP_FORWARD_TSN_SKIP) { 3002210599Srrs /* Continue strikin FWD-TSN chunks */ 3003210599Srrs tp1->rec.data.fwd_tsn_cnt++; 3004210599Srrs } 3005163953Srrs continue; 3006163953Srrs } 3007163953Srrs /* 3008163953Srrs * CMT : SFR algo (covers part of DAC and HTNA as well) 3009163953Srrs */ 3010169420Srrs if (tp1->whoTo && tp1->whoTo->saw_newack == 0) { 3011163953Srrs /* 3012163953Srrs * No new acks were receieved for data sent to this 3013163953Srrs * dest. Therefore, according to the SFR algo for 3014163953Srrs * CMT, no data sent to this dest can be marked for 3015168709Srrs * FR using this SACK. 3016163953Srrs */ 3017163953Srrs continue; 3018216825Stuexen } else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq, 3019216825Stuexen tp1->whoTo->this_sack_highest_newack)) { 3020163953Srrs /* 3021163953Srrs * CMT: New acks were receieved for data sent to 3022163953Srrs * this dest. But no new acks were seen for data 3023163953Srrs * sent after tp1. Therefore, according to the SFR 3024163953Srrs * algo for CMT, tp1 cannot be marked for FR using 3025163953Srrs * this SACK. This step covers part of the DAC algo 3026163953Srrs * and the HTNA algo as well. 3027163953Srrs */ 3028163953Srrs continue; 3029163953Srrs } 3030163953Srrs /* 3031163953Srrs * Here we check to see if we were have already done a FR 3032163953Srrs * and if so we see if the biggest TSN we saw in the sack is 3033163953Srrs * smaller than the recovery point. If so we don't strike 3034163953Srrs * the tsn... otherwise we CAN strike the TSN. 3035163953Srrs */ 3036163953Srrs /* 3037167598Srrs * @@@ JRI: Check for CMT if (accum_moved && 3038167598Srrs * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 3039167598Srrs * 0)) { 3040163953Srrs */ 3041167598Srrs if (accum_moved && asoc->fast_retran_loss_recovery) { 3042163953Srrs /* 3043163953Srrs * Strike the TSN if in fast-recovery and cum-ack 3044163953Srrs * moved. 3045163953Srrs */ 3046179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3047170744Srrs sctp_log_fr(biggest_tsn_newly_acked, 3048170744Srrs tp1->rec.data.TSN_seq, 3049170744Srrs tp1->sent, 3050170744Srrs SCTP_FR_LOG_STRIKE_CHUNK); 3051170744Srrs } 3052168124Srrs if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3053168124Srrs tp1->sent++; 3054168124Srrs } 3055216669Stuexen if ((asoc->sctp_cmt_on_off > 0) && 3056211944Stuexen SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3057163953Srrs /* 3058163953Srrs * CMT DAC algorithm: If SACK flag is set to 3059163953Srrs * 0, then lowest_newack test will not pass 3060163953Srrs * because it would have been set to the 3061163953Srrs * cumack earlier. If not already to be 3062163953Srrs * rtx'd, If not a mixed sack and if tp1 is 3063163953Srrs * not between two sacked TSNs, then mark by 3064168709Srrs * one more. NOTE that we are marking by one 3065168709Srrs * additional time since the SACK DAC flag 3066168709Srrs * indicates that two packets have been 3067168709Srrs * received after this missing TSN. 3068163953Srrs */ 3069168124Srrs if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3070216825Stuexen SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) { 3071179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3072170744Srrs sctp_log_fr(16 + num_dests_sacked, 3073170744Srrs tp1->rec.data.TSN_seq, 3074170744Srrs tp1->sent, 3075170744Srrs SCTP_FR_LOG_STRIKE_CHUNK); 3076170744Srrs } 3077163953Srrs tp1->sent++; 3078163953Srrs } 3079163953Srrs } 3080211944Stuexen } else if ((tp1->rec.data.doing_fast_retransmit) && 3081211944Stuexen (asoc->sctp_cmt_on_off == 0)) { 3082163953Srrs /* 3083163953Srrs * For those that have done a FR we must take 3084163953Srrs * special consideration if we strike. I.e the 3085163953Srrs * biggest_newly_acked must be higher than the 3086163953Srrs * sending_seq at the time we did the FR. 3087163953Srrs */ 3088168124Srrs if ( 3089163953Srrs#ifdef SCTP_FR_TO_ALTERNATE 3090163953Srrs /* 3091163953Srrs * If FR's go to new networks, then we must only do 3092163953Srrs * this for singly homed asoc's. However if the FR's 3093163953Srrs * go to the same network (Armando's work) then its 3094163953Srrs * ok to FR multiple times. 3095163953Srrs */ 3096168124Srrs (asoc->numnets < 2) 3097163953Srrs#else 3098168124Srrs (1) 3099163953Srrs#endif 3100168124Srrs ) { 3101168124Srrs 3102216825Stuexen if (SCTP_TSN_GE(biggest_tsn_newly_acked, 3103163953Srrs tp1->rec.data.fast_retran_tsn)) { 3104163953Srrs /* 3105163953Srrs * Strike the TSN, since this ack is 3106163953Srrs * beyond where things were when we 3107163953Srrs * did a FR. 3108163953Srrs */ 3109179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3110170744Srrs sctp_log_fr(biggest_tsn_newly_acked, 3111170744Srrs tp1->rec.data.TSN_seq, 3112170744Srrs tp1->sent, 3113170744Srrs SCTP_FR_LOG_STRIKE_CHUNK); 3114170744Srrs } 3115168124Srrs if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3116168124Srrs tp1->sent++; 3117168124Srrs } 3118163953Srrs strike_flag = 1; 3119216669Stuexen if ((asoc->sctp_cmt_on_off > 0) && 3120211944Stuexen SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3121163953Srrs /* 3122163953Srrs * CMT DAC algorithm: If 3123163953Srrs * SACK flag is set to 0, 3124163953Srrs * then lowest_newack test 3125163953Srrs * will not pass because it 3126163953Srrs * would have been set to 3127163953Srrs * the cumack earlier. If 3128163953Srrs * not already to be rtx'd, 3129163953Srrs * If not a mixed sack and 3130163953Srrs * if tp1 is not between two 3131163953Srrs * sacked TSNs, then mark by 3132168709Srrs * one more. NOTE that we 3133168709Srrs * are marking by one 3134168709Srrs * additional time since the 3135168709Srrs * SACK DAC flag indicates 3136168709Srrs * that two packets have 3137168709Srrs * been received after this 3138168709Srrs * missing TSN. 3139163953Srrs */ 3140168124Srrs if ((tp1->sent < SCTP_DATAGRAM_RESEND) && 3141168124Srrs (num_dests_sacked == 1) && 3142216825Stuexen SCTP_TSN_GT(this_sack_lowest_newack, 3143216825Stuexen tp1->rec.data.TSN_seq)) { 3144179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3145170744Srrs sctp_log_fr(32 + num_dests_sacked, 3146170744Srrs tp1->rec.data.TSN_seq, 3147170744Srrs tp1->sent, 3148170744Srrs SCTP_FR_LOG_STRIKE_CHUNK); 3149170744Srrs } 3150168124Srrs if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3151168124Srrs tp1->sent++; 3152168124Srrs } 3153163953Srrs } 3154163953Srrs } 3155163953Srrs } 3156163953Srrs } 3157163953Srrs /* 3158167598Srrs * JRI: TODO: remove code for HTNA algo. CMT's SFR 3159167598Srrs * algo covers HTNA. 3160163953Srrs */ 3161216825Stuexen } else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, 3162216825Stuexen biggest_tsn_newly_acked)) { 3163163953Srrs /* 3164163953Srrs * We don't strike these: This is the HTNA 3165163953Srrs * algorithm i.e. we don't strike If our TSN is 3166163953Srrs * larger than the Highest TSN Newly Acked. 3167163953Srrs */ 3168163953Srrs ; 3169163953Srrs } else { 3170163953Srrs /* Strike the TSN */ 3171179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3172170744Srrs sctp_log_fr(biggest_tsn_newly_acked, 3173170744Srrs tp1->rec.data.TSN_seq, 3174170744Srrs tp1->sent, 3175170744Srrs SCTP_FR_LOG_STRIKE_CHUNK); 3176170744Srrs } 3177168124Srrs if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3178168124Srrs tp1->sent++; 3179168124Srrs } 3180216669Stuexen if ((asoc->sctp_cmt_on_off > 0) && 3181211944Stuexen SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3182163953Srrs /* 3183163953Srrs * CMT DAC algorithm: If SACK flag is set to 3184163953Srrs * 0, then lowest_newack test will not pass 3185163953Srrs * because it would have been set to the 3186163953Srrs * cumack earlier. If not already to be 3187163953Srrs * rtx'd, If not a mixed sack and if tp1 is 3188163953Srrs * not between two sacked TSNs, then mark by 3189168709Srrs * one more. NOTE that we are marking by one 3190168709Srrs * additional time since the SACK DAC flag 3191168709Srrs * indicates that two packets have been 3192168709Srrs * received after this missing TSN. 3193163953Srrs */ 3194168124Srrs if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3195216825Stuexen SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) { 3196179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3197170744Srrs sctp_log_fr(48 + num_dests_sacked, 3198170744Srrs tp1->rec.data.TSN_seq, 3199170744Srrs tp1->sent, 3200170744Srrs SCTP_FR_LOG_STRIKE_CHUNK); 3201170744Srrs } 3202163953Srrs tp1->sent++; 3203163953Srrs } 3204163953Srrs } 3205163953Srrs } 3206163953Srrs if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3207163953Srrs struct sctp_nets *alt; 3208163953Srrs 3209191049Srrs /* fix counts and things */ 3210191049Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3211191049Srrs sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND, 3212191049Srrs (tp1->whoTo ? (tp1->whoTo->flight_size) : 0), 3213191049Srrs tp1->book_size, 3214191049Srrs (uintptr_t) tp1->whoTo, 3215191049Srrs tp1->rec.data.TSN_seq); 3216191049Srrs } 3217191049Srrs if (tp1->whoTo) { 3218191049Srrs tp1->whoTo->net_ack++; 3219191049Srrs sctp_flight_size_decrease(tp1); 3220219057Srrs if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 3221219057Srrs (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 3222219057Srrs tp1); 3223219057Srrs } 3224191049Srrs } 3225191049Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 3226191049Srrs sctp_log_rwnd(SCTP_INCREASE_PEER_RWND, 3227191049Srrs asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); 3228191049Srrs } 3229191049Srrs /* add back to the rwnd */ 3230191049Srrs asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); 3231191049Srrs 3232191049Srrs /* remove from the total flight */ 3233191049Srrs sctp_total_flight_decrease(stcb, tp1); 3234191049Srrs 3235270357Stuexen if ((stcb->asoc.prsctp_supported) && 3236207191Stuexen (PR_SCTP_RTX_ENABLED(tp1->flags))) { 3237207191Stuexen /* 3238207191Stuexen * Has it been retransmitted tv_sec times? - 3239207191Stuexen * we store the retran count there. 3240207191Stuexen */ 3241207191Stuexen if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) { 3242207191Stuexen /* Yes, so drop it */ 3243207191Stuexen if (tp1->data != NULL) { 3244235416Stuexen (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1, 3245207191Stuexen SCTP_SO_NOT_LOCKED); 3246207191Stuexen } 3247207191Stuexen /* Make sure to flag we had a FR */ 3248207191Stuexen tp1->whoTo->net_ack++; 3249207191Stuexen continue; 3250207191Stuexen } 3251207191Stuexen } 3252234995Stuexen /* 3253234995Stuexen * SCTP_PRINTF("OK, we are now ready to FR this 3254234995Stuexen * guy\n"); 3255234995Stuexen */ 3256179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3257170744Srrs sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count, 3258170744Srrs 0, SCTP_FR_MARKED); 3259170744Srrs } 3260163953Srrs if (strike_flag) { 3261163953Srrs /* This is a subsequent FR */ 3262163953Srrs SCTP_STAT_INCR(sctps_sendmultfastretrans); 3263163953Srrs } 3264168124Srrs sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 3265216669Stuexen if (asoc->sctp_cmt_on_off > 0) { 3266163953Srrs /* 3267163953Srrs * CMT: Using RTX_SSTHRESH policy for CMT. 3268163953Srrs * If CMT is being used, then pick dest with 3269163953Srrs * largest ssthresh for any retransmission. 3270163953Srrs */ 3271163953Srrs tp1->no_fr_allowed = 1; 3272163953Srrs alt = tp1->whoTo; 3273169655Srrs /* sa_ignore NO_NULL_CHK */ 3274211944Stuexen if (asoc->sctp_cmt_pf > 0) { 3275171440Srrs /* 3276171440Srrs * JRS 5/18/07 - If CMT PF is on, 3277171440Srrs * use the PF version of 3278171440Srrs * find_alt_net() 3279171440Srrs */ 3280171440Srrs alt = sctp_find_alternate_net(stcb, alt, 2); 3281171440Srrs } else { 3282171440Srrs /* 3283171440Srrs * JRS 5/18/07 - If only CMT is on, 3284171440Srrs * use the CMT version of 3285171440Srrs * find_alt_net() 3286171440Srrs */ 3287171531Srrs /* sa_ignore NO_NULL_CHK */ 3288171440Srrs alt = sctp_find_alternate_net(stcb, alt, 1); 3289171440Srrs } 3290169420Srrs if (alt == NULL) { 3291169420Srrs alt = tp1->whoTo; 3292169420Srrs } 3293163953Srrs /* 3294163953Srrs * CUCv2: If a different dest is picked for 3295163953Srrs * the retransmission, then new 3296163953Srrs * (rtx-)pseudo_cumack needs to be tracked 3297163953Srrs * for orig dest. Let CUCv2 track new (rtx-) 3298163953Srrs * pseudo-cumack always. 3299163953Srrs */ 3300169420Srrs if (tp1->whoTo) { 3301169420Srrs tp1->whoTo->find_pseudo_cumack = 1; 3302169420Srrs tp1->whoTo->find_rtx_pseudo_cumack = 1; 3303169420Srrs } 3304163953Srrs } else {/* CMT is OFF */ 3305163953Srrs 3306163953Srrs#ifdef SCTP_FR_TO_ALTERNATE 3307163953Srrs /* Can we find an alternate? */ 3308163953Srrs alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0); 3309163953Srrs#else 3310163953Srrs /* 3311163953Srrs * default behavior is to NOT retransmit 3312163953Srrs * FR's to an alternate. Armando Caro's 3313163953Srrs * paper details why. 3314163953Srrs */ 3315163953Srrs alt = tp1->whoTo; 3316163953Srrs#endif 3317163953Srrs } 3318163953Srrs 3319163953Srrs tp1->rec.data.doing_fast_retransmit = 1; 3320163953Srrs tot_retrans++; 3321163953Srrs /* mark the sending seq for possible subsequent FR's */ 3322163953Srrs /* 3323234995Stuexen * SCTP_PRINTF("Marking TSN for FR new value %x\n", 3324163953Srrs * (uint32_t)tpi->rec.data.TSN_seq); 3325163953Srrs */ 3326163953Srrs if (TAILQ_EMPTY(&asoc->send_queue)) { 3327163953Srrs /* 3328163953Srrs * If the queue of send is empty then its 3329163953Srrs * the next sequence number that will be 3330163953Srrs * assigned so we subtract one from this to 3331163953Srrs * get the one we last sent. 3332163953Srrs */ 3333163953Srrs tp1->rec.data.fast_retran_tsn = sending_seq; 3334163953Srrs } else { 3335163953Srrs /* 3336163953Srrs * If there are chunks on the send queue 3337163953Srrs * (unsent data that has made it from the 3338163953Srrs * stream queues but not out the door, we 3339163953Srrs * take the first one (which will have the 3340163953Srrs * lowest TSN) and subtract one to get the 3341163953Srrs * one we last sent. 3342163953Srrs */ 3343163953Srrs struct sctp_tmit_chunk *ttt; 3344163953Srrs 3345163953Srrs ttt = TAILQ_FIRST(&asoc->send_queue); 3346163953Srrs tp1->rec.data.fast_retran_tsn = 3347163953Srrs ttt->rec.data.TSN_seq; 3348163953Srrs } 3349163953Srrs 3350163953Srrs if (tp1->do_rtt) { 3351163953Srrs /* 3352163953Srrs * this guy had a RTO calculation pending on 3353163953Srrs * it, cancel it 3354163953Srrs */ 3355228907Stuexen if ((tp1->whoTo != NULL) && 3356228907Stuexen (tp1->whoTo->rto_needed == 0)) { 3357219397Srrs tp1->whoTo->rto_needed = 1; 3358219397Srrs } 3359163953Srrs tp1->do_rtt = 0; 3360163953Srrs } 3361163953Srrs if (alt != tp1->whoTo) { 3362163953Srrs /* yes, there is an alternate. */ 3363163953Srrs sctp_free_remote_addr(tp1->whoTo); 3364169655Srrs /* sa_ignore FREED_MEMORY */ 3365163953Srrs tp1->whoTo = alt; 3366163953Srrs atomic_add_int(&alt->ref_count, 1); 3367163953Srrs } 3368163953Srrs } 3369216822Stuexen } 3370163953Srrs} 3371163953Srrs 3372163953Srrsstruct sctp_tmit_chunk * 3373163953Srrssctp_try_advance_peer_ack_point(struct sctp_tcb *stcb, 3374163953Srrs struct sctp_association *asoc) 3375163953Srrs{ 3376163953Srrs struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL; 3377163953Srrs struct timeval now; 3378163953Srrs int now_filled = 0; 3379163953Srrs 3380270357Stuexen if (asoc->prsctp_supported == 0) { 3381163953Srrs return (NULL); 3382163953Srrs } 3383216822Stuexen TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { 3384163953Srrs if (tp1->sent != SCTP_FORWARD_TSN_SKIP && 3385242708Stuexen tp1->sent != SCTP_DATAGRAM_RESEND && 3386243157Stuexen tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 3387163953Srrs /* no chance to advance, out of here */ 3388163953Srrs break; 3389163953Srrs } 3390189790Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { 3391242709Stuexen if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) || 3392243157Stuexen (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) { 3393189790Srrs sctp_misc_ints(SCTP_FWD_TSN_CHECK, 3394189790Srrs asoc->advanced_peer_ack_point, 3395189790Srrs tp1->rec.data.TSN_seq, 0, 0); 3396189790Srrs } 3397189790Srrs } 3398163953Srrs if (!PR_SCTP_ENABLED(tp1->flags)) { 3399163953Srrs /* 3400163953Srrs * We can't fwd-tsn past any that are reliable aka 3401163953Srrs * retransmitted until the asoc fails. 3402163953Srrs */ 3403163953Srrs break; 3404163953Srrs } 3405163953Srrs if (!now_filled) { 3406169378Srrs (void)SCTP_GETTIME_TIMEVAL(&now); 3407163953Srrs now_filled = 1; 3408163953Srrs } 3409163953Srrs /* 3410163953Srrs * now we got a chunk which is marked for another 3411163953Srrs * retransmission to a PR-stream but has run out its chances 3412163953Srrs * already maybe OR has been marked to skip now. Can we skip 3413163953Srrs * it if its a resend? 3414163953Srrs */ 3415163953Srrs if (tp1->sent == SCTP_DATAGRAM_RESEND && 3416163953Srrs (PR_SCTP_TTL_ENABLED(tp1->flags))) { 3417163953Srrs /* 3418163953Srrs * Now is this one marked for resend and its time is 3419163953Srrs * now up? 3420163953Srrs */ 3421163953Srrs if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { 3422163953Srrs /* Yes so drop it */ 3423163953Srrs if (tp1->data) { 3424169420Srrs (void)sctp_release_pr_sctp_chunk(stcb, tp1, 3425235416Stuexen 1, SCTP_SO_NOT_LOCKED); 3426163953Srrs } 3427163953Srrs } else { 3428163953Srrs /* 3429163953Srrs * No, we are done when hit one for resend 3430163953Srrs * whos time as not expired. 3431163953Srrs */ 3432163953Srrs break; 3433163953Srrs } 3434163953Srrs } 3435163953Srrs /* 3436163953Srrs * Ok now if this chunk is marked to drop it we can clean up 3437163953Srrs * the chunk, advance our peer ack point and we can check 3438163953Srrs * the next chunk. 3439163953Srrs */ 3440242708Stuexen if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) || 3441243157Stuexen (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) { 3442163953Srrs /* advance PeerAckPoint goes forward */ 3443216825Stuexen if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) { 3444189790Srrs asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq; 3445189790Srrs a_adv = tp1; 3446189790Srrs } else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) { 3447189790Srrs /* No update but we do save the chk */ 3448189790Srrs a_adv = tp1; 3449189790Srrs } 3450163953Srrs } else { 3451163953Srrs /* 3452163953Srrs * If it is still in RESEND we can advance no 3453163953Srrs * further 3454163953Srrs */ 3455163953Srrs break; 3456163953Srrs } 3457163953Srrs } 3458163953Srrs return (a_adv); 3459163953Srrs} 3460163953Srrs 3461189790Srrsstatic int 3462168709Srrssctp_fs_audit(struct sctp_association *asoc) 3463168299Srrs{ 3464168299Srrs struct sctp_tmit_chunk *chk; 3465168299Srrs int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0; 3466283821Stuexen int ret; 3467168299Srrs 3468283821Stuexen#ifndef INVARIANTS 3469283821Stuexen int entry_flight, entry_cnt; 3470283821Stuexen 3471283821Stuexen#endif 3472283821Stuexen 3473283821Stuexen ret = 0; 3474283821Stuexen#ifndef INVARIANTS 3475189790Srrs entry_flight = asoc->total_flight; 3476189790Srrs entry_cnt = asoc->total_flight_count; 3477283821Stuexen#endif 3478189790Srrs if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt) 3479189790Srrs return (0); 3480189790Srrs 3481168299Srrs TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 3482168299Srrs if (chk->sent < SCTP_DATAGRAM_RESEND) { 3483234995Stuexen SCTP_PRINTF("Chk TSN:%u size:%d inflight cnt:%d\n", 3484189790Srrs chk->rec.data.TSN_seq, 3485189790Srrs chk->send_size, 3486234995Stuexen chk->snd_count); 3487168299Srrs inflight++; 3488168299Srrs } else if (chk->sent == SCTP_DATAGRAM_RESEND) { 3489168299Srrs resend++; 3490168299Srrs } else if (chk->sent < SCTP_DATAGRAM_ACKED) { 3491168299Srrs inbetween++; 3492168299Srrs } else if (chk->sent > SCTP_DATAGRAM_ACKED) { 3493168299Srrs above++; 3494168299Srrs } else { 3495168299Srrs acked++; 3496168299Srrs } 3497168299Srrs } 3498168859Srrs 3499168709Srrs if ((inflight > 0) || (inbetween > 0)) { 3500168859Srrs#ifdef INVARIANTS 3501168709Srrs panic("Flight size-express incorrect? \n"); 3502168859Srrs#else 3503234995Stuexen SCTP_PRINTF("asoc->total_flight:%d cnt:%d\n", 3504189790Srrs entry_flight, entry_cnt); 3505189790Srrs 3506189790Srrs SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n", 3507189790Srrs inflight, inbetween, resend, above, acked); 3508189790Srrs ret = 1; 3509168859Srrs#endif 3510168709Srrs } 3511189790Srrs return (ret); 3512168299Srrs} 3513168299Srrs 3514168709Srrs 3515168709Srrsstatic void 3516168709Srrssctp_window_probe_recovery(struct sctp_tcb *stcb, 3517168709Srrs struct sctp_association *asoc, 3518168709Srrs struct sctp_tmit_chunk *tp1) 3519168709Srrs{ 3520189371Srrs tp1->window_probe = 0; 3521189444Srrs if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) { 3522189371Srrs /* TSN's skipped we do NOT move back. */ 3523189371Srrs sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD, 3524283731Stuexen tp1->whoTo ? tp1->whoTo->flight_size : 0, 3525189371Srrs tp1->book_size, 3526189371Srrs (uintptr_t) tp1->whoTo, 3527189371Srrs tp1->rec.data.TSN_seq); 3528189371Srrs return; 3529189371Srrs } 3530189444Srrs /* First setup this by shrinking flight */ 3531219057Srrs if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 3532219057Srrs (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 3533219057Srrs tp1); 3534219057Srrs } 3535189444Srrs sctp_flight_size_decrease(tp1); 3536189444Srrs sctp_total_flight_decrease(stcb, tp1); 3537189444Srrs /* Now mark for resend */ 3538189444Srrs tp1->sent = SCTP_DATAGRAM_RESEND; 3539208854Srrs sctp_ucount_incr(asoc->sent_queue_retran_cnt); 3540208854Srrs 3541179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3542170744Srrs sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP, 3543170744Srrs tp1->whoTo->flight_size, 3544170744Srrs tp1->book_size, 3545170744Srrs (uintptr_t) tp1->whoTo, 3546170744Srrs tp1->rec.data.TSN_seq); 3547170744Srrs } 3548168709Srrs} 3549168709Srrs 3550163953Srrsvoid 3551163953Srrssctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack, 3552218186Srrs uint32_t rwnd, int *abort_now, int ecne_seen) 3553163953Srrs{ 3554163953Srrs struct sctp_nets *net; 3555163953Srrs struct sctp_association *asoc; 3556163953Srrs struct sctp_tmit_chunk *tp1, *tp2; 3557168124Srrs uint32_t old_rwnd; 3558168124Srrs int win_probe_recovery = 0; 3559168709Srrs int win_probe_recovered = 0; 3560169208Srrs int j, done_once = 0; 3561219397Srrs int rto_ok = 1; 3562163953Srrs 3563179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) { 3564170744Srrs sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack, 3565170744Srrs rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd); 3566170744Srrs } 3567163953Srrs SCTP_TCB_LOCK_ASSERT(stcb); 3568171477Srrs#ifdef SCTP_ASOCLOG_OF_TSNS 3569171477Srrs stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack; 3570171477Srrs stcb->asoc.cumack_log_at++; 3571171477Srrs if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) { 3572171477Srrs stcb->asoc.cumack_log_at = 0; 3573171477Srrs } 3574171477Srrs#endif 3575163953Srrs asoc = &stcb->asoc; 3576169208Srrs old_rwnd = asoc->peers_rwnd; 3577216825Stuexen if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) { 3578168124Srrs /* old ack */ 3579168124Srrs return; 3580169208Srrs } else if (asoc->last_acked_seq == cumack) { 3581169208Srrs /* Window update sack */ 3582169208Srrs asoc->peers_rwnd = sctp_sbspace_sub(rwnd, 3583210599Srrs (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 3584169208Srrs if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 3585169208Srrs /* SWS sender side engages */ 3586169208Srrs asoc->peers_rwnd = 0; 3587169208Srrs } 3588169208Srrs if (asoc->peers_rwnd > old_rwnd) { 3589169208Srrs goto again; 3590169208Srrs } 3591169208Srrs return; 3592168124Srrs } 3593163953Srrs /* First setup for CC stuff */ 3594163953Srrs TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3595218072Srrs if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) { 3596218072Srrs /* Drag along the window_tsn for cwr's */ 3597218072Srrs net->cwr_window_tsn = cumack; 3598218072Srrs } 3599163953Srrs net->prev_cwnd = net->cwnd; 3600163953Srrs net->net_ack = 0; 3601163953Srrs net->net_ack2 = 0; 3602167695Srrs 3603167695Srrs /* 3604167695Srrs * CMT: Reset CUC and Fast recovery algo variables before 3605167695Srrs * SACK processing 3606167695Srrs */ 3607167695Srrs net->new_pseudo_cumack = 0; 3608167695Srrs net->will_exit_fast_recovery = 0; 3609219057Srrs if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) { 3610219057Srrs (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net); 3611219057Srrs } 3612163953Srrs } 3613179783Srrs if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) { 3614165647Srrs uint32_t send_s; 3615165647Srrs 3616168709Srrs if (!TAILQ_EMPTY(&asoc->sent_queue)) { 3617168709Srrs tp1 = TAILQ_LAST(&asoc->sent_queue, 3618168709Srrs sctpchunk_listhead); 3619168709Srrs send_s = tp1->rec.data.TSN_seq + 1; 3620168709Srrs } else { 3621165647Srrs send_s = asoc->sending_seq; 3622165647Srrs } 3623216825Stuexen if (SCTP_TSN_GE(cumack, send_s)) { 3624267723Stuexen struct mbuf *op_err; 3625267723Stuexen char msg[SCTP_DIAG_INFO_LEN]; 3626168709Srrs 3627165647Srrs *abort_now = 1; 3628165647Srrs /* XXX */ 3629283740Stuexen snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x", 3630267723Stuexen cumack, send_s); 3631267723Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 3632283822Stuexen stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_23; 3633267723Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 3634165647Srrs return; 3635165647Srrs } 3636165647Srrs } 3637163953Srrs asoc->this_sack_highest_gap = cumack; 3638179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 3639171943Srrs sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 3640171943Srrs stcb->asoc.overall_error_count, 3641171943Srrs 0, 3642171943Srrs SCTP_FROM_SCTP_INDATA, 3643171943Srrs __LINE__); 3644171943Srrs } 3645163953Srrs stcb->asoc.overall_error_count = 0; 3646216825Stuexen if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) { 3647168124Srrs /* process the new consecutive TSN first */ 3648216822Stuexen TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { 3649216825Stuexen if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) { 3650171477Srrs if (tp1->sent == SCTP_DATAGRAM_UNSENT) { 3651234995Stuexen SCTP_PRINTF("Warning, an unsent is now acked?\n"); 3652171477Srrs } 3653171477Srrs if (tp1->sent < SCTP_DATAGRAM_ACKED) { 3654163953Srrs /* 3655171477Srrs * If it is less than ACKED, it is 3656171477Srrs * now no-longer in flight. Higher 3657171477Srrs * values may occur during marking 3658163953Srrs */ 3659171477Srrs if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3660179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3661171477Srrs sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, 3662171477Srrs tp1->whoTo->flight_size, 3663171477Srrs tp1->book_size, 3664171477Srrs (uintptr_t) tp1->whoTo, 3665171477Srrs tp1->rec.data.TSN_seq); 3666168124Srrs } 3667171477Srrs sctp_flight_size_decrease(tp1); 3668219057Srrs if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 3669219057Srrs (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 3670219057Srrs tp1); 3671219057Srrs } 3672172156Srrs /* sa_ignore NO_NULL_CHK */ 3673171477Srrs sctp_total_flight_decrease(stcb, tp1); 3674171477Srrs } 3675171477Srrs tp1->whoTo->net_ack += tp1->send_size; 3676171477Srrs if (tp1->snd_count < 2) { 3677163953Srrs /* 3678171477Srrs * True non-retransmited 3679171477Srrs * chunk 3680163953Srrs */ 3681171477Srrs tp1->whoTo->net_ack2 += 3682171477Srrs tp1->send_size; 3683163953Srrs 3684171477Srrs /* update RTO too? */ 3685171477Srrs if (tp1->do_rtt) { 3686219397Srrs if (rto_ok) { 3687219397Srrs tp1->whoTo->RTO = 3688219397Srrs /* 3689219397Srrs * sa_ignore 3690219397Srrs * NO_NULL_CH 3691219397Srrs * K 3692219397Srrs */ 3693219397Srrs sctp_calculate_rto(stcb, 3694219397Srrs asoc, tp1->whoTo, 3695219397Srrs &tp1->sent_rcv_time, 3696219397Srrs sctp_align_safe_nocopy, 3697219397Srrs SCTP_RTT_FROM_DATA); 3698219397Srrs rto_ok = 0; 3699219397Srrs } 3700219397Srrs if (tp1->whoTo->rto_needed == 0) { 3701219397Srrs tp1->whoTo->rto_needed = 1; 3702219397Srrs } 3703171477Srrs tp1->do_rtt = 0; 3704170744Srrs } 3705168124Srrs } 3706171477Srrs /* 3707171477Srrs * CMT: CUCv2 algorithm. From the 3708171477Srrs * cumack'd TSNs, for each TSN being 3709171477Srrs * acked for the first time, set the 3710171477Srrs * following variables for the 3711171477Srrs * corresp destination. 3712171477Srrs * new_pseudo_cumack will trigger a 3713171477Srrs * cwnd update. 3714171477Srrs * find_(rtx_)pseudo_cumack will 3715171477Srrs * trigger search for the next 3716171477Srrs * expected (rtx-)pseudo-cumack. 3717171477Srrs */ 3718171477Srrs tp1->whoTo->new_pseudo_cumack = 1; 3719171477Srrs tp1->whoTo->find_pseudo_cumack = 1; 3720171477Srrs tp1->whoTo->find_rtx_pseudo_cumack = 1; 3721171477Srrs 3722179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 3723172156Srrs /* sa_ignore NO_NULL_CHK */ 3724171477Srrs sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); 3725168124Srrs } 3726163953Srrs } 3727171477Srrs if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3728171477Srrs sctp_ucount_decr(asoc->sent_queue_retran_cnt); 3729171477Srrs } 3730171477Srrs if (tp1->rec.data.chunk_was_revoked) { 3731171477Srrs /* deflate the cwnd */ 3732171477Srrs tp1->whoTo->cwnd -= tp1->book_size; 3733171477Srrs tp1->rec.data.chunk_was_revoked = 0; 3734171477Srrs } 3735243157Stuexen if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 3736242714Stuexen if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) { 3737242714Stuexen asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--; 3738242714Stuexen#ifdef INVARIANTS 3739242714Stuexen } else { 3740242714Stuexen panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number); 3741242714Stuexen#endif 3742242714Stuexen } 3743242714Stuexen } 3744294210Stuexen if ((asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) && 3745294210Stuexen (asoc->strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) && 3746294210Stuexen TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.stream_number].outqueue)) { 3747294210Stuexen asoc->trigger_reset = 1; 3748294210Stuexen } 3749171477Srrs TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 3750171477Srrs if (tp1->data) { 3751172156Srrs /* sa_ignore NO_NULL_CHK */ 3752171477Srrs sctp_free_bufspace(stcb, asoc, tp1, 1); 3753171477Srrs sctp_m_freem(tp1->data); 3754216822Stuexen tp1->data = NULL; 3755171477Srrs } 3756179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 3757171477Srrs sctp_log_sack(asoc->last_acked_seq, 3758171477Srrs cumack, 3759171477Srrs tp1->rec.data.TSN_seq, 3760171477Srrs 0, 3761171477Srrs 0, 3762171477Srrs SCTP_LOG_FREE_SENT); 3763171477Srrs } 3764171477Srrs asoc->sent_queue_cnt--; 3765221627Stuexen sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED); 3766168124Srrs } else { 3767168124Srrs break; 3768163953Srrs } 3769168124Srrs } 3770171477Srrs 3771163953Srrs } 3772172156Srrs /* sa_ignore NO_NULL_CHK */ 3773163953Srrs if (stcb->sctp_socket) { 3774237565Stuexen#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3775172090Srrs struct socket *so; 3776172090Srrs 3777172090Srrs#endif 3778163953Srrs SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 3779179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 3780172156Srrs /* sa_ignore NO_NULL_CHK */ 3781228653Stuexen sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK); 3782170744Srrs } 3783237565Stuexen#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3784172090Srrs so = SCTP_INP_SO(stcb->sctp_ep); 3785172090Srrs atomic_add_int(&stcb->asoc.refcnt, 1); 3786172090Srrs SCTP_TCB_UNLOCK(stcb); 3787172090Srrs SCTP_SOCKET_LOCK(so, 1); 3788172090Srrs SCTP_TCB_LOCK(stcb); 3789172090Srrs atomic_subtract_int(&stcb->asoc.refcnt, 1); 3790172090Srrs if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 3791172090Srrs /* assoc was freed while we were unlocked */ 3792172090Srrs SCTP_SOCKET_UNLOCK(so, 1); 3793172090Srrs return; 3794172090Srrs } 3795172090Srrs#endif 3796163953Srrs sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 3797237565Stuexen#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3798172090Srrs SCTP_SOCKET_UNLOCK(so, 1); 3799172090Srrs#endif 3800163953Srrs } else { 3801179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 3802228653Stuexen sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK); 3803170744Srrs } 3804163953Srrs } 3805163953Srrs 3806171440Srrs /* JRS - Use the congestion control given in the CC module */ 3807224641Stuexen if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) { 3808224641Stuexen TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3809224641Stuexen if (net->net_ack2 > 0) { 3810224641Stuexen /* 3811224641Stuexen * Karn's rule applies to clearing error 3812224641Stuexen * count, this is optional. 3813224641Stuexen */ 3814224641Stuexen net->error_count = 0; 3815224641Stuexen if (!(net->dest_state & SCTP_ADDR_REACHABLE)) { 3816224641Stuexen /* addr came good */ 3817224641Stuexen net->dest_state |= SCTP_ADDR_REACHABLE; 3818224641Stuexen sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 3819235414Stuexen 0, (void *)net, SCTP_SO_NOT_LOCKED); 3820224641Stuexen } 3821224641Stuexen if (net == stcb->asoc.primary_destination) { 3822224641Stuexen if (stcb->asoc.alternate) { 3823224641Stuexen /* 3824224641Stuexen * release the alternate, 3825224641Stuexen * primary is good 3826224641Stuexen */ 3827224641Stuexen sctp_free_remote_addr(stcb->asoc.alternate); 3828224641Stuexen stcb->asoc.alternate = NULL; 3829224641Stuexen } 3830224641Stuexen } 3831224641Stuexen if (net->dest_state & SCTP_ADDR_PF) { 3832224641Stuexen net->dest_state &= ~SCTP_ADDR_PF; 3833283822Stuexen sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, 3834283822Stuexen stcb->sctp_ep, stcb, net, 3835283822Stuexen SCTP_FROM_SCTP_INDATA + SCTP_LOC_24); 3836224641Stuexen sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); 3837224641Stuexen asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net); 3838224641Stuexen /* Done with this net */ 3839224641Stuexen net->net_ack = 0; 3840224641Stuexen } 3841224641Stuexen /* restore any doubled timers */ 3842224641Stuexen net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 3843224641Stuexen if (net->RTO < stcb->asoc.minrto) { 3844224641Stuexen net->RTO = stcb->asoc.minrto; 3845224641Stuexen } 3846224641Stuexen if (net->RTO > stcb->asoc.maxrto) { 3847224641Stuexen net->RTO = stcb->asoc.maxrto; 3848224641Stuexen } 3849224641Stuexen } 3850224641Stuexen } 3851171440Srrs asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0); 3852224641Stuexen } 3853163953Srrs asoc->last_acked_seq = cumack; 3854168124Srrs 3855163953Srrs if (TAILQ_EMPTY(&asoc->sent_queue)) { 3856163953Srrs /* nothing left in-flight */ 3857163953Srrs TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3858163953Srrs net->flight_size = 0; 3859163953Srrs net->partial_bytes_acked = 0; 3860163953Srrs } 3861163953Srrs asoc->total_flight = 0; 3862163953Srrs asoc->total_flight_count = 0; 3863163953Srrs } 3864163953Srrs /* RWND update */ 3865163953Srrs asoc->peers_rwnd = sctp_sbspace_sub(rwnd, 3866210599Srrs (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 3867163953Srrs if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 3868163953Srrs /* SWS sender side engages */ 3869163953Srrs asoc->peers_rwnd = 0; 3870163953Srrs } 3871168124Srrs if (asoc->peers_rwnd > old_rwnd) { 3872168124Srrs win_probe_recovery = 1; 3873168124Srrs } 3874163953Srrs /* Now assure a timer where data is queued at */ 3875165220Srrsagain: 3876165220Srrs j = 0; 3877163953Srrs TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3878189444Srrs int to_ticks; 3879189444Srrs 3880168124Srrs if (win_probe_recovery && (net->window_probe)) { 3881168709Srrs win_probe_recovered = 1; 3882168124Srrs /* 3883168124Srrs * Find first chunk that was used with window probe 3884168124Srrs * and clear the sent 3885168124Srrs */ 3886169655Srrs /* sa_ignore FREED_MEMORY */ 3887168124Srrs TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 3888168124Srrs if (tp1->window_probe) { 3889202526Stuexen /* move back to data send queue */ 3890228653Stuexen sctp_window_probe_recovery(stcb, asoc, tp1); 3891168124Srrs break; 3892168124Srrs } 3893168124Srrs } 3894168124Srrs } 3895189444Srrs if (net->RTO == 0) { 3896189444Srrs to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 3897189444Srrs } else { 3898189444Srrs to_ticks = MSEC_TO_TICKS(net->RTO); 3899189444Srrs } 3900163953Srrs if (net->flight_size) { 3901165220Srrs j++; 3902169420Srrs (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks, 3903163953Srrs sctp_timeout_handler, &net->rxt_timer); 3904189444Srrs if (net->window_probe) { 3905189444Srrs net->window_probe = 0; 3906189444Srrs } 3907163953Srrs } else { 3908189444Srrs if (net->window_probe) { 3909189444Srrs /* 3910189444Srrs * In window probes we must assure a timer 3911189444Srrs * is still running there 3912189444Srrs */ 3913189444Srrs net->window_probe = 0; 3914189444Srrs if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 3915189444Srrs SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks, 3916189444Srrs sctp_timeout_handler, &net->rxt_timer); 3917189444Srrs } 3918189444Srrs } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 3919163953Srrs sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 3920165220Srrs stcb, net, 3921283822Stuexen SCTP_FROM_SCTP_INDATA + SCTP_LOC_25); 3922163953Srrs } 3923163953Srrs } 3924163953Srrs } 3925168299Srrs if ((j == 0) && 3926168299Srrs (!TAILQ_EMPTY(&asoc->sent_queue)) && 3927168299Srrs (asoc->sent_queue_retran_cnt == 0) && 3928168709Srrs (win_probe_recovered == 0) && 3929168299Srrs (done_once == 0)) { 3930189790Srrs /* 3931189790Srrs * huh, this should not happen unless all packets are 3932189790Srrs * PR-SCTP and marked to skip of course. 3933189790Srrs */ 3934189790Srrs if (sctp_fs_audit(asoc)) { 3935189790Srrs TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3936202526Stuexen net->flight_size = 0; 3937165220Srrs } 3938189790Srrs asoc->total_flight = 0; 3939189790Srrs asoc->total_flight_count = 0; 3940189790Srrs asoc->sent_queue_retran_cnt = 0; 3941189790Srrs TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 3942189790Srrs if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3943189790Srrs sctp_flight_size_increase(tp1); 3944189790Srrs sctp_total_flight_increase(stcb, tp1); 3945189790Srrs } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3946208854Srrs sctp_ucount_incr(asoc->sent_queue_retran_cnt); 3947189790Srrs } 3948189790Srrs } 3949165220Srrs } 3950168299Srrs done_once = 1; 3951165220Srrs goto again; 3952165220Srrs } 3953163953Srrs /**********************************/ 3954163953Srrs /* Now what about shutdown issues */ 3955163953Srrs /**********************************/ 3956163953Srrs if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 3957163953Srrs /* nothing left on sendqueue.. consider done */ 3958163953Srrs /* clean up */ 3959163953Srrs if ((asoc->stream_queue_cnt == 1) && 3960163953Srrs ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 3961163953Srrs (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) && 3962163953Srrs (asoc->locked_on_sending) 3963163953Srrs ) { 3964163953Srrs struct sctp_stream_queue_pending *sp; 3965163953Srrs 3966163953Srrs /* 3967163953Srrs * I may be in a state where we got all across.. but 3968163953Srrs * cannot write more due to a shutdown... we abort 3969163953Srrs * since the user did not indicate EOR in this case. 3970163953Srrs * The sp will be cleaned during free of the asoc. 3971163953Srrs */ 3972163953Srrs sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue), 3973163953Srrs sctp_streamhead); 3974171990Srrs if ((sp) && (sp->length == 0)) { 3975171990Srrs /* Let cleanup code purge it */ 3976171990Srrs if (sp->msg_is_complete) { 3977171990Srrs asoc->stream_queue_cnt--; 3978171990Srrs } else { 3979171990Srrs asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 3980171990Srrs asoc->locked_on_sending = NULL; 3981171990Srrs asoc->stream_queue_cnt--; 3982171990Srrs } 3983163953Srrs } 3984163953Srrs } 3985163953Srrs if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 3986163953Srrs (asoc->stream_queue_cnt == 0)) { 3987163953Srrs if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 3988163953Srrs /* Need to abort here */ 3989267723Stuexen struct mbuf *op_err; 3990163953Srrs 3991163953Srrs abort_out_now: 3992163953Srrs *abort_now = 1; 3993163953Srrs /* XXX */ 3994267723Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, ""); 3995283822Stuexen stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_26; 3996267723Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 3997294219Stuexen return; 3998163953Srrs } else { 3999224641Stuexen struct sctp_nets *netp; 4000224641Stuexen 4001166675Srrs if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 4002166675Srrs (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4003166675Srrs SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4004166675Srrs } 4005171943Srrs SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 4006172703Srrs SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 4007163953Srrs sctp_stop_timers_for_shutdown(stcb); 4008224641Stuexen if (asoc->alternate) { 4009224641Stuexen netp = asoc->alternate; 4010224641Stuexen } else { 4011224641Stuexen netp = asoc->primary_destination; 4012224641Stuexen } 4013224641Stuexen sctp_send_shutdown(stcb, netp); 4014163953Srrs sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 4015224641Stuexen stcb->sctp_ep, stcb, netp); 4016163953Srrs sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 4017224641Stuexen stcb->sctp_ep, stcb, netp); 4018163953Srrs } 4019163953Srrs } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) && 4020163953Srrs (asoc->stream_queue_cnt == 0)) { 4021224641Stuexen struct sctp_nets *netp; 4022224641Stuexen 4023163953Srrs if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4024163953Srrs goto abort_out_now; 4025163953Srrs } 4026166675Srrs SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4027171943Srrs SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT); 4028172703Srrs SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 4029246588Stuexen sctp_stop_timers_for_shutdown(stcb); 4030246588Stuexen if (asoc->alternate) { 4031246588Stuexen netp = asoc->alternate; 4032246588Stuexen } else { 4033246588Stuexen netp = asoc->primary_destination; 4034246588Stuexen } 4035224641Stuexen sctp_send_shutdown_ack(stcb, netp); 4036163953Srrs sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 4037224641Stuexen stcb->sctp_ep, stcb, netp); 4038163953Srrs } 4039163953Srrs } 4040189371Srrs /*********************************************/ 4041189371Srrs /* Here we perform PR-SCTP procedures */ 4042189371Srrs /* (section 4.2) */ 4043189371Srrs /*********************************************/ 4044189371Srrs /* C1. update advancedPeerAckPoint */ 4045216825Stuexen if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) { 4046189371Srrs asoc->advanced_peer_ack_point = cumack; 4047189371Srrs } 4048185694Srrs /* PR-Sctp issues need to be addressed too */ 4049270357Stuexen if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) { 4050185694Srrs struct sctp_tmit_chunk *lchk; 4051185694Srrs uint32_t old_adv_peer_ack_point; 4052185694Srrs 4053185694Srrs old_adv_peer_ack_point = asoc->advanced_peer_ack_point; 4054185694Srrs lchk = sctp_try_advance_peer_ack_point(stcb, asoc); 4055185694Srrs /* C3. See if we need to send a Fwd-TSN */ 4056216825Stuexen if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) { 4057185694Srrs /* 4058218129Srrs * ISSUE with ECN, see FWD-TSN processing. 4059185694Srrs */ 4060216825Stuexen if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) { 4061185694Srrs send_forward_tsn(stcb, asoc); 4062189790Srrs } else if (lchk) { 4063189790Srrs /* try to FR fwd-tsn's that get lost too */ 4064210599Srrs if (lchk->rec.data.fwd_tsn_cnt >= 3) { 4065189790Srrs send_forward_tsn(stcb, asoc); 4066189790Srrs } 4067185694Srrs } 4068185694Srrs } 4069185694Srrs if (lchk) { 4070185694Srrs /* Assure a timer is up */ 4071185694Srrs sctp_timer_start(SCTP_TIMER_TYPE_SEND, 4072185694Srrs stcb->sctp_ep, stcb, lchk->whoTo); 4073185694Srrs } 4074185694Srrs } 4075179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) { 4076170744Srrs sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 4077170744Srrs rwnd, 4078170744Srrs stcb->asoc.peers_rwnd, 4079170744Srrs stcb->asoc.total_flight, 4080170744Srrs stcb->asoc.total_output_queue_size); 4081170744Srrs } 4082163953Srrs} 4083163953Srrs 4084163953Srrsvoid 4085202526Stuexensctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup, 4086228653Stuexen struct sctp_tcb *stcb, 4087202526Stuexen uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup, 4088202526Stuexen int *abort_now, uint8_t flags, 4089218186Srrs uint32_t cum_ack, uint32_t rwnd, int ecne_seen) 4090163953Srrs{ 4091163953Srrs struct sctp_association *asoc; 4092163953Srrs struct sctp_tmit_chunk *tp1, *tp2; 4093202526Stuexen uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack; 4094163953Srrs uint16_t wake_him = 0; 4095168709Srrs uint32_t send_s = 0; 4096163953Srrs long j; 4097163953Srrs int accum_moved = 0; 4098163953Srrs int will_exit_fast_recovery = 0; 4099168124Srrs uint32_t a_rwnd, old_rwnd; 4100168124Srrs int win_probe_recovery = 0; 4101168709Srrs int win_probe_recovered = 0; 4102163953Srrs struct sctp_nets *net = NULL; 4103168299Srrs int done_once; 4104219397Srrs int rto_ok = 1; 4105163953Srrs uint8_t reneged_all = 0; 4106163953Srrs uint8_t cmt_dac_flag; 4107163953Srrs 4108163953Srrs /* 4109163953Srrs * we take any chance we can to service our queues since we cannot 4110163953Srrs * get awoken when the socket is read from :< 4111163953Srrs */ 4112163953Srrs /* 4113163953Srrs * Now perform the actual SACK handling: 1) Verify that it is not an 4114163953Srrs * old sack, if so discard. 2) If there is nothing left in the send 4115163953Srrs * queue (cum-ack is equal to last acked) then you have a duplicate 4116163953Srrs * too, update any rwnd change and verify no timers are running. 4117163953Srrs * then return. 3) Process any new consequtive data i.e. cum-ack 4118163953Srrs * moved process these first and note that it moved. 4) Process any 4119163953Srrs * sack blocks. 5) Drop any acked from the queue. 6) Check for any 4120163953Srrs * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left, 4121163953Srrs * sync up flightsizes and things, stop all timers and also check 4122163953Srrs * for shutdown_pending state. If so then go ahead and send off the 4123163953Srrs * shutdown. If in shutdown recv, send off the shutdown-ack and 4124163953Srrs * start that timer, Ret. 9) Strike any non-acked things and do FR 4125163953Srrs * procedure if needed being sure to set the FR flag. 10) Do pr-sctp 4126163953Srrs * procedures. 11) Apply any FR penalties. 12) Assure we will SACK 4127163953Srrs * if in shutdown_recv state. 4128163953Srrs */ 4129163953Srrs SCTP_TCB_LOCK_ASSERT(stcb); 4130163953Srrs /* CMT DAC algo */ 4131163953Srrs this_sack_lowest_newack = 0; 4132163953Srrs SCTP_STAT_INCR(sctps_slowpath_sack); 4133202526Stuexen last_tsn = cum_ack; 4134202526Stuexen cmt_dac_flag = flags & SCTP_SACK_CMT_DAC; 4135171477Srrs#ifdef SCTP_ASOCLOG_OF_TSNS 4136171477Srrs stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack; 4137171477Srrs stcb->asoc.cumack_log_at++; 4138171477Srrs if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) { 4139171477Srrs stcb->asoc.cumack_log_at = 0; 4140171477Srrs } 4141171477Srrs#endif 4142169208Srrs a_rwnd = rwnd; 4143163953Srrs 4144202526Stuexen if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) { 4145202526Stuexen sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack, 4146202526Stuexen rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd); 4147202526Stuexen } 4148168124Srrs old_rwnd = stcb->asoc.peers_rwnd; 4149179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 4150171943Srrs sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4151171943Srrs stcb->asoc.overall_error_count, 4152171943Srrs 0, 4153171943Srrs SCTP_FROM_SCTP_INDATA, 4154171943Srrs __LINE__); 4155171943Srrs } 4156163953Srrs stcb->asoc.overall_error_count = 0; 4157163953Srrs asoc = &stcb->asoc; 4158179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4159170744Srrs sctp_log_sack(asoc->last_acked_seq, 4160170744Srrs cum_ack, 4161170744Srrs 0, 4162170744Srrs num_seg, 4163170744Srrs num_dup, 4164170744Srrs SCTP_LOG_NEW_SACK); 4165170744Srrs } 4166224641Stuexen if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) { 4167202526Stuexen uint16_t i; 4168170781Srrs uint32_t *dupdata, dblock; 4169163953Srrs 4170202526Stuexen for (i = 0; i < num_dup; i++) { 4171202526Stuexen dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t), 4172170781Srrs sizeof(uint32_t), (uint8_t *) & dblock); 4173202526Stuexen if (dupdata == NULL) { 4174202526Stuexen break; 4175163953Srrs } 4176202526Stuexen sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED); 4177163953Srrs } 4178163953Srrs } 4179179783Srrs if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) { 4180168709Srrs /* reality check */ 4181168709Srrs if (!TAILQ_EMPTY(&asoc->sent_queue)) { 4182168709Srrs tp1 = TAILQ_LAST(&asoc->sent_queue, 4183168709Srrs sctpchunk_listhead); 4184168709Srrs send_s = tp1->rec.data.TSN_seq + 1; 4185168709Srrs } else { 4186206137Stuexen tp1 = NULL; 4187168709Srrs send_s = asoc->sending_seq; 4188168709Srrs } 4189216825Stuexen if (SCTP_TSN_GE(cum_ack, send_s)) { 4190267723Stuexen struct mbuf *op_err; 4191267723Stuexen char msg[SCTP_DIAG_INFO_LEN]; 4192168709Srrs 4193163953Srrs /* 4194163953Srrs * no way, we have not even sent this TSN out yet. 4195163953Srrs * Peer is hopelessly messed up with us. 4196163953Srrs */ 4197234995Stuexen SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n", 4198206137Stuexen cum_ack, send_s); 4199206137Stuexen if (tp1) { 4200234995Stuexen SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1:%p\n", 4201240148Stuexen tp1->rec.data.TSN_seq, (void *)tp1); 4202206137Stuexen } 4203163953Srrs hopeless_peer: 4204163953Srrs *abort_now = 1; 4205163953Srrs /* XXX */ 4206283740Stuexen snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x", 4207267723Stuexen cum_ack, send_s); 4208267723Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 4209283822Stuexen stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_27; 4210267723Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 4211163953Srrs return; 4212163953Srrs } 4213163953Srrs } 4214163953Srrs /**********************/ 4215163953Srrs /* 1) check the range */ 4216163953Srrs /**********************/ 4217216825Stuexen if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) { 4218163953Srrs /* acking something behind */ 4219163953Srrs return; 4220163953Srrs } 4221163953Srrs /* update the Rwnd of the peer */ 4222163953Srrs if (TAILQ_EMPTY(&asoc->sent_queue) && 4223163953Srrs TAILQ_EMPTY(&asoc->send_queue) && 4224202526Stuexen (asoc->stream_queue_cnt == 0)) { 4225163953Srrs /* nothing left on send/sent and strmq */ 4226179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 4227170744Srrs sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 4228170744Srrs asoc->peers_rwnd, 0, 0, a_rwnd); 4229170744Srrs } 4230163953Srrs asoc->peers_rwnd = a_rwnd; 4231163953Srrs if (asoc->sent_queue_retran_cnt) { 4232163953Srrs asoc->sent_queue_retran_cnt = 0; 4233163953Srrs } 4234163953Srrs if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4235163953Srrs /* SWS sender side engages */ 4236163953Srrs asoc->peers_rwnd = 0; 4237163953Srrs } 4238163953Srrs /* stop any timers */ 4239163953Srrs TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4240163953Srrs sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4241283822Stuexen stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28); 4242163953Srrs net->partial_bytes_acked = 0; 4243163953Srrs net->flight_size = 0; 4244163953Srrs } 4245163953Srrs asoc->total_flight = 0; 4246163953Srrs asoc->total_flight_count = 0; 4247163953Srrs return; 4248163953Srrs } 4249163953Srrs /* 4250163953Srrs * We init netAckSz and netAckSz2 to 0. These are used to track 2 4251163953Srrs * things. The total byte count acked is tracked in netAckSz AND 4252163953Srrs * netAck2 is used to track the total bytes acked that are un- 4253163953Srrs * amibguious and were never retransmitted. We track these on a per 4254163953Srrs * destination address basis. 4255163953Srrs */ 4256163953Srrs TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4257218072Srrs if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) { 4258218072Srrs /* Drag along the window_tsn for cwr's */ 4259218072Srrs net->cwr_window_tsn = cum_ack; 4260218072Srrs } 4261163953Srrs net->prev_cwnd = net->cwnd; 4262163953Srrs net->net_ack = 0; 4263163953Srrs net->net_ack2 = 0; 4264163953Srrs 4265163953Srrs /* 4266167598Srrs * CMT: Reset CUC and Fast recovery algo variables before 4267167598Srrs * SACK processing 4268163953Srrs */ 4269163953Srrs net->new_pseudo_cumack = 0; 4270163953Srrs net->will_exit_fast_recovery = 0; 4271219057Srrs if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) { 4272219057Srrs (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net); 4273219057Srrs } 4274163953Srrs } 4275163953Srrs /* process the new consecutive TSN first */ 4276216822Stuexen TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4277216825Stuexen if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) { 4278163953Srrs if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 4279163953Srrs accum_moved = 1; 4280163953Srrs if (tp1->sent < SCTP_DATAGRAM_ACKED) { 4281163953Srrs /* 4282163953Srrs * If it is less than ACKED, it is 4283163953Srrs * now no-longer in flight. Higher 4284163953Srrs * values may occur during marking 4285163953Srrs */ 4286163953Srrs if ((tp1->whoTo->dest_state & 4287163953Srrs SCTP_ADDR_UNCONFIRMED) && 4288163953Srrs (tp1->snd_count < 2)) { 4289163953Srrs /* 4290163953Srrs * If there was no retran 4291163953Srrs * and the address is 4292163953Srrs * un-confirmed and we sent 4293163953Srrs * there and are now 4294163953Srrs * sacked.. its confirmed, 4295163953Srrs * mark it so. 4296163953Srrs */ 4297163953Srrs tp1->whoTo->dest_state &= 4298163953Srrs ~SCTP_ADDR_UNCONFIRMED; 4299163953Srrs } 4300168709Srrs if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4301179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 4302170744Srrs sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, 4303170744Srrs tp1->whoTo->flight_size, 4304170744Srrs tp1->book_size, 4305170744Srrs (uintptr_t) tp1->whoTo, 4306170744Srrs tp1->rec.data.TSN_seq); 4307170744Srrs } 4308168709Srrs sctp_flight_size_decrease(tp1); 4309168709Srrs sctp_total_flight_decrease(stcb, tp1); 4310219057Srrs if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 4311219057Srrs (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 4312219057Srrs tp1); 4313219057Srrs } 4314163953Srrs } 4315163953Srrs tp1->whoTo->net_ack += tp1->send_size; 4316163953Srrs 4317163953Srrs /* CMT SFR and DAC algos */ 4318163953Srrs this_sack_lowest_newack = tp1->rec.data.TSN_seq; 4319163953Srrs tp1->whoTo->saw_newack = 1; 4320163953Srrs 4321163953Srrs if (tp1->snd_count < 2) { 4322163953Srrs /* 4323163953Srrs * True non-retransmited 4324163953Srrs * chunk 4325163953Srrs */ 4326163953Srrs tp1->whoTo->net_ack2 += 4327163953Srrs tp1->send_size; 4328163953Srrs 4329163953Srrs /* update RTO too? */ 4330163953Srrs if (tp1->do_rtt) { 4331219397Srrs if (rto_ok) { 4332219397Srrs tp1->whoTo->RTO = 4333219397Srrs sctp_calculate_rto(stcb, 4334219397Srrs asoc, tp1->whoTo, 4335219397Srrs &tp1->sent_rcv_time, 4336219397Srrs sctp_align_safe_nocopy, 4337219397Srrs SCTP_RTT_FROM_DATA); 4338219397Srrs rto_ok = 0; 4339219397Srrs } 4340219397Srrs if (tp1->whoTo->rto_needed == 0) { 4341219397Srrs tp1->whoTo->rto_needed = 1; 4342219397Srrs } 4343163953Srrs tp1->do_rtt = 0; 4344163953Srrs } 4345163953Srrs } 4346163953Srrs /* 4347163953Srrs * CMT: CUCv2 algorithm. From the 4348163953Srrs * cumack'd TSNs, for each TSN being 4349163953Srrs * acked for the first time, set the 4350163953Srrs * following variables for the 4351163953Srrs * corresp destination. 4352163953Srrs * new_pseudo_cumack will trigger a 4353163953Srrs * cwnd update. 4354163953Srrs * find_(rtx_)pseudo_cumack will 4355163953Srrs * trigger search for the next 4356163953Srrs * expected (rtx-)pseudo-cumack. 4357163953Srrs */ 4358163953Srrs tp1->whoTo->new_pseudo_cumack = 1; 4359163953Srrs tp1->whoTo->find_pseudo_cumack = 1; 4360163953Srrs tp1->whoTo->find_rtx_pseudo_cumack = 1; 4361163953Srrs 4362163953Srrs 4363179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4364170744Srrs sctp_log_sack(asoc->last_acked_seq, 4365170744Srrs cum_ack, 4366170744Srrs tp1->rec.data.TSN_seq, 4367170744Srrs 0, 4368170744Srrs 0, 4369170744Srrs SCTP_LOG_TSN_ACKED); 4370170744Srrs } 4371179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 4372170744Srrs sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); 4373170744Srrs } 4374163953Srrs } 4375163953Srrs if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4376163953Srrs sctp_ucount_decr(asoc->sent_queue_retran_cnt); 4377163953Srrs#ifdef SCTP_AUDITING_ENABLED 4378163953Srrs sctp_audit_log(0xB3, 4379163953Srrs (asoc->sent_queue_retran_cnt & 0x000000ff)); 4380163953Srrs#endif 4381163953Srrs } 4382167598Srrs if (tp1->rec.data.chunk_was_revoked) { 4383167598Srrs /* deflate the cwnd */ 4384167598Srrs tp1->whoTo->cwnd -= tp1->book_size; 4385167598Srrs tp1->rec.data.chunk_was_revoked = 0; 4386167598Srrs } 4387243157Stuexen if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 4388243157Stuexen tp1->sent = SCTP_DATAGRAM_ACKED; 4389243157Stuexen } 4390163953Srrs } 4391163953Srrs } else { 4392163953Srrs break; 4393163953Srrs } 4394163953Srrs } 4395163953Srrs biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn; 4396163953Srrs /* always set this up to cum-ack */ 4397163953Srrs asoc->this_sack_highest_gap = last_tsn; 4398163953Srrs 4399202526Stuexen if ((num_seg > 0) || (num_nr_seg > 0)) { 4400163953Srrs 4401163953Srrs /* 4402163953Srrs * CMT: SFR algo (and HTNA) - this_sack_highest_newack has 4403163953Srrs * to be greater than the cumack. Also reset saw_newack to 0 4404163953Srrs * for all dests. 4405163953Srrs */ 4406163953Srrs TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4407163953Srrs net->saw_newack = 0; 4408163953Srrs net->this_sack_highest_newack = last_tsn; 4409163953Srrs } 4410163953Srrs 4411163953Srrs /* 4412163953Srrs * thisSackHighestGap will increase while handling NEW 4413163953Srrs * segments this_sack_highest_newack will increase while 4414163953Srrs * handling NEWLY ACKED chunks. this_sack_lowest_newack is 4415163953Srrs * used for CMT DAC algo. saw_newack will also change. 4416163953Srrs */ 4417202526Stuexen if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked, 4418202526Stuexen &biggest_tsn_newly_acked, &this_sack_lowest_newack, 4419228653Stuexen num_seg, num_nr_seg, &rto_ok)) { 4420202526Stuexen wake_him++; 4421202526Stuexen } 4422179783Srrs if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) { 4423163953Srrs /* 4424163953Srrs * validate the biggest_tsn_acked in the gap acks if 4425163953Srrs * strict adherence is wanted. 4426163953Srrs */ 4427216825Stuexen if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) { 4428163953Srrs /* 4429163953Srrs * peer is either confused or we are under 4430163953Srrs * attack. We must abort. 4431163953Srrs */ 4432234995Stuexen SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n", 4433234995Stuexen biggest_tsn_acked, send_s); 4434163953Srrs goto hopeless_peer; 4435163953Srrs } 4436163953Srrs } 4437163953Srrs } 4438163953Srrs /*******************************************/ 4439163953Srrs /* cancel ALL T3-send timer if accum moved */ 4440163953Srrs /*******************************************/ 4441216669Stuexen if (asoc->sctp_cmt_on_off > 0) { 4442163953Srrs TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4443163953Srrs if (net->new_pseudo_cumack) 4444163953Srrs sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4445165220Srrs stcb, net, 4446283822Stuexen SCTP_FROM_SCTP_INDATA + SCTP_LOC_29); 4447163953Srrs 4448163953Srrs } 4449163953Srrs } else { 4450163953Srrs if (accum_moved) { 4451163953Srrs TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4452163953Srrs sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4453283822Stuexen stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30); 4454163953Srrs } 4455163953Srrs } 4456163953Srrs } 4457163953Srrs /********************************************/ 4458216188Stuexen /* drop the acked chunks from the sentqueue */ 4459163953Srrs /********************************************/ 4460163953Srrs asoc->last_acked_seq = cum_ack; 4461163953Srrs 4462216669Stuexen TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { 4463216825Stuexen if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) { 4464163953Srrs break; 4465163953Srrs } 4466243157Stuexen if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 4467242714Stuexen if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) { 4468242714Stuexen asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--; 4469242714Stuexen#ifdef INVARIANTS 4470242714Stuexen } else { 4471242714Stuexen panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number); 4472242714Stuexen#endif 4473242714Stuexen } 4474163953Srrs } 4475294210Stuexen if ((asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) && 4476294210Stuexen (asoc->strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) && 4477294210Stuexen TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.stream_number].outqueue)) { 4478294210Stuexen asoc->trigger_reset = 1; 4479294210Stuexen } 4480163953Srrs TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 4481255190Stuexen if (PR_SCTP_ENABLED(tp1->flags)) { 4482163953Srrs if (asoc->pr_sctp_cnt != 0) 4483163953Srrs asoc->pr_sctp_cnt--; 4484163953Srrs } 4485216669Stuexen asoc->sent_queue_cnt--; 4486163953Srrs if (tp1->data) { 4487172156Srrs /* sa_ignore NO_NULL_CHK */ 4488163953Srrs sctp_free_bufspace(stcb, asoc, tp1, 1); 4489163953Srrs sctp_m_freem(tp1->data); 4490216669Stuexen tp1->data = NULL; 4491270357Stuexen if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) { 4492163953Srrs asoc->sent_queue_cnt_removeable--; 4493163953Srrs } 4494163953Srrs } 4495179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4496170744Srrs sctp_log_sack(asoc->last_acked_seq, 4497170744Srrs cum_ack, 4498170744Srrs tp1->rec.data.TSN_seq, 4499170744Srrs 0, 4500170744Srrs 0, 4501170744Srrs SCTP_LOG_FREE_SENT); 4502170744Srrs } 4503221627Stuexen sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED); 4504163953Srrs wake_him++; 4505216669Stuexen } 4506216669Stuexen if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) { 4507216669Stuexen#ifdef INVARIANTS 4508216669Stuexen panic("Warning flight size is postive and should be 0"); 4509216669Stuexen#else 4510216669Stuexen SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n", 4511216669Stuexen asoc->total_flight); 4512216669Stuexen#endif 4513216669Stuexen asoc->total_flight = 0; 4514216669Stuexen } 4515172156Srrs /* sa_ignore NO_NULL_CHK */ 4516163953Srrs if ((wake_him) && (stcb->sctp_socket)) { 4517237565Stuexen#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4518172090Srrs struct socket *so; 4519172090Srrs 4520172090Srrs#endif 4521163953Srrs SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 4522179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4523228653Stuexen sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK); 4524170744Srrs } 4525237565Stuexen#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4526172090Srrs so = SCTP_INP_SO(stcb->sctp_ep); 4527172090Srrs atomic_add_int(&stcb->asoc.refcnt, 1); 4528172090Srrs SCTP_TCB_UNLOCK(stcb); 4529172090Srrs SCTP_SOCKET_LOCK(so, 1); 4530172090Srrs SCTP_TCB_LOCK(stcb); 4531172090Srrs atomic_subtract_int(&stcb->asoc.refcnt, 1); 4532172090Srrs if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 4533172090Srrs /* assoc was freed while we were unlocked */ 4534172090Srrs SCTP_SOCKET_UNLOCK(so, 1); 4535172090Srrs return; 4536172090Srrs } 4537172090Srrs#endif 4538163953Srrs sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 4539237565Stuexen#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4540172090Srrs SCTP_SOCKET_UNLOCK(so, 1); 4541172090Srrs#endif 4542163953Srrs } else { 4543179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4544228653Stuexen sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK); 4545170744Srrs } 4546163953Srrs } 4547163953Srrs 4548167598Srrs if (asoc->fast_retran_loss_recovery && accum_moved) { 4549216825Stuexen if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) { 4550163953Srrs /* Setup so we will exit RFC2582 fast recovery */ 4551163953Srrs will_exit_fast_recovery = 1; 4552163953Srrs } 4553163953Srrs } 4554163953Srrs /* 4555163953Srrs * Check for revoked fragments: 4556163953Srrs * 4557163953Srrs * if Previous sack - Had no frags then we can't have any revoked if 4558163953Srrs * Previous sack - Had frag's then - If we now have frags aka 4559163953Srrs * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked 4560163953Srrs * some of them. else - The peer revoked all ACKED fragments, since 4561163953Srrs * we had some before and now we have NONE. 4562163953Srrs */ 4563163953Srrs 4564216188Stuexen if (num_seg) { 4565168709Srrs sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked); 4566216188Stuexen asoc->saw_sack_with_frags = 1; 4567216188Stuexen } else if (asoc->saw_sack_with_frags) { 4568163953Srrs int cnt_revoked = 0; 4569163953Srrs 4570216822Stuexen /* Peer revoked all dg's marked or acked */ 4571216822Stuexen TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4572216822Stuexen if (tp1->sent == SCTP_DATAGRAM_ACKED) { 4573216822Stuexen tp1->sent = SCTP_DATAGRAM_SENT; 4574216822Stuexen if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 4575216822Stuexen sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, 4576216822Stuexen tp1->whoTo->flight_size, 4577216822Stuexen tp1->book_size, 4578216822Stuexen (uintptr_t) tp1->whoTo, 4579216822Stuexen tp1->rec.data.TSN_seq); 4580163953Srrs } 4581216822Stuexen sctp_flight_size_increase(tp1); 4582216822Stuexen sctp_total_flight_increase(stcb, tp1); 4583216822Stuexen tp1->rec.data.chunk_was_revoked = 1; 4584216822Stuexen /* 4585216822Stuexen * To ensure that this increase in 4586216822Stuexen * flightsize, which is artificial, does not 4587216822Stuexen * throttle the sender, we also increase the 4588216822Stuexen * cwnd artificially. 4589216822Stuexen */ 4590216822Stuexen tp1->whoTo->cwnd += tp1->book_size; 4591216822Stuexen cnt_revoked++; 4592163953Srrs } 4593163953Srrs } 4594216822Stuexen if (cnt_revoked) { 4595216822Stuexen reneged_all = 1; 4596216822Stuexen } 4597163953Srrs asoc->saw_sack_with_frags = 0; 4598163953Srrs } 4599216188Stuexen if (num_nr_seg > 0) 4600216188Stuexen asoc->saw_sack_with_nr_frags = 1; 4601163953Srrs else 4602216188Stuexen asoc->saw_sack_with_nr_frags = 0; 4603163953Srrs 4604171440Srrs /* JRS - Use the congestion control given in the CC module */ 4605224641Stuexen if (ecne_seen == 0) { 4606224641Stuexen TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4607224641Stuexen if (net->net_ack2 > 0) { 4608224641Stuexen /* 4609224641Stuexen * Karn's rule applies to clearing error 4610224641Stuexen * count, this is optional. 4611224641Stuexen */ 4612224641Stuexen net->error_count = 0; 4613224641Stuexen if (!(net->dest_state & SCTP_ADDR_REACHABLE)) { 4614224641Stuexen /* addr came good */ 4615224641Stuexen net->dest_state |= SCTP_ADDR_REACHABLE; 4616224641Stuexen sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 4617235414Stuexen 0, (void *)net, SCTP_SO_NOT_LOCKED); 4618224641Stuexen } 4619224641Stuexen if (net == stcb->asoc.primary_destination) { 4620224641Stuexen if (stcb->asoc.alternate) { 4621224641Stuexen /* 4622224641Stuexen * release the alternate, 4623224641Stuexen * primary is good 4624224641Stuexen */ 4625224641Stuexen sctp_free_remote_addr(stcb->asoc.alternate); 4626224641Stuexen stcb->asoc.alternate = NULL; 4627224641Stuexen } 4628224641Stuexen } 4629224641Stuexen if (net->dest_state & SCTP_ADDR_PF) { 4630224641Stuexen net->dest_state &= ~SCTP_ADDR_PF; 4631283822Stuexen sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, 4632283822Stuexen stcb->sctp_ep, stcb, net, 4633283822Stuexen SCTP_FROM_SCTP_INDATA + SCTP_LOC_31); 4634224641Stuexen sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); 4635224641Stuexen asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net); 4636224641Stuexen /* Done with this net */ 4637224641Stuexen net->net_ack = 0; 4638224641Stuexen } 4639224641Stuexen /* restore any doubled timers */ 4640224641Stuexen net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 4641224641Stuexen if (net->RTO < stcb->asoc.minrto) { 4642224641Stuexen net->RTO = stcb->asoc.minrto; 4643224641Stuexen } 4644224641Stuexen if (net->RTO > stcb->asoc.maxrto) { 4645224641Stuexen net->RTO = stcb->asoc.maxrto; 4646224641Stuexen } 4647224641Stuexen } 4648224641Stuexen } 4649218186Srrs asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery); 4650224641Stuexen } 4651163953Srrs if (TAILQ_EMPTY(&asoc->sent_queue)) { 4652163953Srrs /* nothing left in-flight */ 4653163953Srrs TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4654163953Srrs /* stop all timers */ 4655163953Srrs sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4656283822Stuexen stcb, net, 4657283822Stuexen SCTP_FROM_SCTP_INDATA + SCTP_LOC_32); 4658163953Srrs net->flight_size = 0; 4659163953Srrs net->partial_bytes_acked = 0; 4660163953Srrs } 4661163953Srrs asoc->total_flight = 0; 4662163953Srrs asoc->total_flight_count = 0; 4663163953Srrs } 4664163953Srrs /**********************************/ 4665163953Srrs /* Now what about shutdown issues */ 4666163953Srrs /**********************************/ 4667163953Srrs if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 4668163953Srrs /* nothing left on sendqueue.. consider done */ 4669179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 4670170744Srrs sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 4671170744Srrs asoc->peers_rwnd, 0, 0, a_rwnd); 4672170744Srrs } 4673163953Srrs asoc->peers_rwnd = a_rwnd; 4674163953Srrs if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4675163953Srrs /* SWS sender side engages */ 4676163953Srrs asoc->peers_rwnd = 0; 4677163953Srrs } 4678163953Srrs /* clean up */ 4679163953Srrs if ((asoc->stream_queue_cnt == 1) && 4680163953Srrs ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4681163953Srrs (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) && 4682163953Srrs (asoc->locked_on_sending) 4683163953Srrs ) { 4684163953Srrs struct sctp_stream_queue_pending *sp; 4685163953Srrs 4686163953Srrs /* 4687163953Srrs * I may be in a state where we got all across.. but 4688163953Srrs * cannot write more due to a shutdown... we abort 4689163953Srrs * since the user did not indicate EOR in this case. 4690163953Srrs */ 4691163953Srrs sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue), 4692163953Srrs sctp_streamhead); 4693171990Srrs if ((sp) && (sp->length == 0)) { 4694163953Srrs asoc->locked_on_sending = NULL; 4695171990Srrs if (sp->msg_is_complete) { 4696171990Srrs asoc->stream_queue_cnt--; 4697171990Srrs } else { 4698171990Srrs asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 4699171990Srrs asoc->stream_queue_cnt--; 4700171990Srrs } 4701163953Srrs } 4702163953Srrs } 4703163953Srrs if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 4704163953Srrs (asoc->stream_queue_cnt == 0)) { 4705163953Srrs if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4706163953Srrs /* Need to abort here */ 4707267723Stuexen struct mbuf *op_err; 4708163953Srrs 4709163953Srrs abort_out_now: 4710163953Srrs *abort_now = 1; 4711163953Srrs /* XXX */ 4712267723Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, ""); 4713283822Stuexen stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33; 4714267723Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 4715163953Srrs return; 4716163953Srrs } else { 4717224641Stuexen struct sctp_nets *netp; 4718224641Stuexen 4719166675Srrs if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 4720166675Srrs (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4721166675Srrs SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4722166675Srrs } 4723171943Srrs SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 4724172703Srrs SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 4725163953Srrs sctp_stop_timers_for_shutdown(stcb); 4726246588Stuexen if (asoc->alternate) { 4727246588Stuexen netp = asoc->alternate; 4728246588Stuexen } else { 4729246588Stuexen netp = asoc->primary_destination; 4730246588Stuexen } 4731224641Stuexen sctp_send_shutdown(stcb, netp); 4732163953Srrs sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 4733224641Stuexen stcb->sctp_ep, stcb, netp); 4734163953Srrs sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 4735224641Stuexen stcb->sctp_ep, stcb, netp); 4736163953Srrs } 4737163953Srrs return; 4738163953Srrs } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) && 4739163953Srrs (asoc->stream_queue_cnt == 0)) { 4740224641Stuexen struct sctp_nets *netp; 4741224641Stuexen 4742163953Srrs if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4743163953Srrs goto abort_out_now; 4744163953Srrs } 4745166675Srrs SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4746171943Srrs SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT); 4747172703Srrs SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 4748246588Stuexen sctp_stop_timers_for_shutdown(stcb); 4749246588Stuexen if (asoc->alternate) { 4750246588Stuexen netp = asoc->alternate; 4751246588Stuexen } else { 4752246588Stuexen netp = asoc->primary_destination; 4753246588Stuexen } 4754224641Stuexen sctp_send_shutdown_ack(stcb, netp); 4755163953Srrs sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 4756224641Stuexen stcb->sctp_ep, stcb, netp); 4757163953Srrs return; 4758163953Srrs } 4759163953Srrs } 4760163953Srrs /* 4761163953Srrs * Now here we are going to recycle net_ack for a different use... 4762163953Srrs * HEADS UP. 4763163953Srrs */ 4764163953Srrs TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4765163953Srrs net->net_ack = 0; 4766163953Srrs } 4767163953Srrs 4768163953Srrs /* 4769163953Srrs * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking 4770163953Srrs * to be done. Setting this_sack_lowest_newack to the cum_ack will 4771163953Srrs * automatically ensure that. 4772163953Srrs */ 4773216669Stuexen if ((asoc->sctp_cmt_on_off > 0) && 4774211944Stuexen SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && 4775211944Stuexen (cmt_dac_flag == 0)) { 4776163953Srrs this_sack_lowest_newack = cum_ack; 4777163953Srrs } 4778202526Stuexen if ((num_seg > 0) || (num_nr_seg > 0)) { 4779163953Srrs sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked, 4780163953Srrs biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved); 4781163953Srrs } 4782171440Srrs /* JRS - Use the congestion control given in the CC module */ 4783171440Srrs asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc); 4784163953Srrs 4785163953Srrs /* Now are we exiting loss recovery ? */ 4786163953Srrs if (will_exit_fast_recovery) { 4787163953Srrs /* Ok, we must exit fast recovery */ 4788163953Srrs asoc->fast_retran_loss_recovery = 0; 4789163953Srrs } 4790163953Srrs if ((asoc->sat_t3_loss_recovery) && 4791216825Stuexen SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) { 4792163953Srrs /* end satellite t3 loss recovery */ 4793163953Srrs asoc->sat_t3_loss_recovery = 0; 4794163953Srrs } 4795167598Srrs /* 4796167598Srrs * CMT Fast recovery 4797167598Srrs */ 4798163953Srrs TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4799163953Srrs if (net->will_exit_fast_recovery) { 4800163953Srrs /* Ok, we must exit fast recovery */ 4801163953Srrs net->fast_retran_loss_recovery = 0; 4802163953Srrs } 4803163953Srrs } 4804163953Srrs 4805163953Srrs /* Adjust and set the new rwnd value */ 4806179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 4807170744Srrs sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 4808210599Srrs asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd); 4809170744Srrs } 4810163953Srrs asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd, 4811210599Srrs (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 4812163953Srrs if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4813163953Srrs /* SWS sender side engages */ 4814163953Srrs asoc->peers_rwnd = 0; 4815163953Srrs } 4816168124Srrs if (asoc->peers_rwnd > old_rwnd) { 4817168124Srrs win_probe_recovery = 1; 4818168124Srrs } 4819163953Srrs /* 4820163953Srrs * Now we must setup so we have a timer up for anyone with 4821163953Srrs * outstanding data. 4822163953Srrs */ 4823168299Srrs done_once = 0; 4824165220Srrsagain: 4825165220Srrs j = 0; 4826163953Srrs TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4827168124Srrs if (win_probe_recovery && (net->window_probe)) { 4828168709Srrs win_probe_recovered = 1; 4829168124Srrs /*- 4830168124Srrs * Find first chunk that was used with 4831168124Srrs * window probe and clear the event. Put 4832168124Srrs * it back into the send queue as if has 4833168124Srrs * not been sent. 4834168124Srrs */ 4835168124Srrs TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4836168124Srrs if (tp1->window_probe) { 4837228653Stuexen sctp_window_probe_recovery(stcb, asoc, tp1); 4838168124Srrs break; 4839168124Srrs } 4840168124Srrs } 4841168124Srrs } 4842163953Srrs if (net->flight_size) { 4843165220Srrs j++; 4844202526Stuexen if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 4845202526Stuexen sctp_timer_start(SCTP_TIMER_TYPE_SEND, 4846202526Stuexen stcb->sctp_ep, stcb, net); 4847202526Stuexen } 4848189444Srrs if (net->window_probe) { 4849202526Stuexen net->window_probe = 0; 4850189444Srrs } 4851168709Srrs } else { 4852189444Srrs if (net->window_probe) { 4853189444Srrs /* 4854189444Srrs * In window probes we must assure a timer 4855189444Srrs * is still running there 4856189444Srrs */ 4857189444Srrs if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 4858189444Srrs sctp_timer_start(SCTP_TIMER_TYPE_SEND, 4859189444Srrs stcb->sctp_ep, stcb, net); 4860189444Srrs 4861189444Srrs } 4862189444Srrs } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 4863168709Srrs sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4864168709Srrs stcb, net, 4865283822Stuexen SCTP_FROM_SCTP_INDATA + SCTP_LOC_34); 4866168709Srrs } 4867163953Srrs } 4868163953Srrs } 4869168299Srrs if ((j == 0) && 4870168299Srrs (!TAILQ_EMPTY(&asoc->sent_queue)) && 4871168299Srrs (asoc->sent_queue_retran_cnt == 0) && 4872168709Srrs (win_probe_recovered == 0) && 4873168299Srrs (done_once == 0)) { 4874189790Srrs /* 4875189790Srrs * huh, this should not happen unless all packets are 4876189790Srrs * PR-SCTP and marked to skip of course. 4877189790Srrs */ 4878189790Srrs if (sctp_fs_audit(asoc)) { 4879189790Srrs TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4880189790Srrs net->flight_size = 0; 4881165220Srrs } 4882189790Srrs asoc->total_flight = 0; 4883189790Srrs asoc->total_flight_count = 0; 4884189790Srrs asoc->sent_queue_retran_cnt = 0; 4885189790Srrs TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4886189790Srrs if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4887189790Srrs sctp_flight_size_increase(tp1); 4888189790Srrs sctp_total_flight_increase(stcb, tp1); 4889189790Srrs } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4890208854Srrs sctp_ucount_incr(asoc->sent_queue_retran_cnt); 4891189790Srrs } 4892189790Srrs } 4893165220Srrs } 4894168299Srrs done_once = 1; 4895165220Srrs goto again; 4896165220Srrs } 4897202526Stuexen /*********************************************/ 4898202526Stuexen /* Here we perform PR-SCTP procedures */ 4899202526Stuexen /* (section 4.2) */ 4900202526Stuexen /*********************************************/ 4901202526Stuexen /* C1. update advancedPeerAckPoint */ 4902216825Stuexen if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) { 4903189371Srrs asoc->advanced_peer_ack_point = cum_ack; 4904189371Srrs } 4905185694Srrs /* C2. try to further move advancedPeerAckPoint ahead */ 4906270357Stuexen if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) { 4907185694Srrs struct sctp_tmit_chunk *lchk; 4908185694Srrs uint32_t old_adv_peer_ack_point; 4909185694Srrs 4910185694Srrs old_adv_peer_ack_point = asoc->advanced_peer_ack_point; 4911185694Srrs lchk = sctp_try_advance_peer_ack_point(stcb, asoc); 4912185694Srrs /* C3. See if we need to send a Fwd-TSN */ 4913216825Stuexen if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) { 4914185694Srrs /* 4915218129Srrs * ISSUE with ECN, see FWD-TSN processing. 4916185694Srrs */ 4917189790Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { 4918189790Srrs sctp_misc_ints(SCTP_FWD_TSN_CHECK, 4919189790Srrs 0xee, cum_ack, asoc->advanced_peer_ack_point, 4920189790Srrs old_adv_peer_ack_point); 4921189790Srrs } 4922216825Stuexen if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) { 4923185694Srrs send_forward_tsn(stcb, asoc); 4924189790Srrs } else if (lchk) { 4925189790Srrs /* try to FR fwd-tsn's that get lost too */ 4926210599Srrs if (lchk->rec.data.fwd_tsn_cnt >= 3) { 4927189790Srrs send_forward_tsn(stcb, asoc); 4928189790Srrs } 4929185694Srrs } 4930185694Srrs } 4931185694Srrs if (lchk) { 4932185694Srrs /* Assure a timer is up */ 4933185694Srrs sctp_timer_start(SCTP_TIMER_TYPE_SEND, 4934185694Srrs stcb->sctp_ep, stcb, lchk->whoTo); 4935185694Srrs } 4936185694Srrs } 4937179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) { 4938170744Srrs sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 4939170744Srrs a_rwnd, 4940170744Srrs stcb->asoc.peers_rwnd, 4941170744Srrs stcb->asoc.total_flight, 4942170744Srrs stcb->asoc.total_output_queue_size); 4943170744Srrs } 4944163953Srrs} 4945163953Srrs 4946163953Srrsvoid 4947228653Stuexensctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag) 4948163953Srrs{ 4949163953Srrs /* Copy cum-ack */ 4950163953Srrs uint32_t cum_ack, a_rwnd; 4951163953Srrs 4952163953Srrs cum_ack = ntohl(cp->cumulative_tsn_ack); 4953163953Srrs /* Arrange so a_rwnd does NOT change */ 4954163953Srrs a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight; 4955163953Srrs 4956163953Srrs /* Now call the express sack handling */ 4957218186Srrs sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0); 4958163953Srrs} 4959163953Srrs 4960163953Srrsstatic void 4961163953Srrssctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb, 4962163953Srrs struct sctp_stream_in *strmin) 4963163953Srrs{ 4964163953Srrs struct sctp_queued_to_read *ctl, *nctl; 4965163953Srrs struct sctp_association *asoc; 4966207983Srrs uint16_t tt; 4967163953Srrs 4968163953Srrs asoc = &stcb->asoc; 4969163953Srrs tt = strmin->last_sequence_delivered; 4970163953Srrs /* 4971163953Srrs * First deliver anything prior to and including the stream no that 4972163953Srrs * came in 4973163953Srrs */ 4974216822Stuexen TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) { 4975216825Stuexen if (SCTP_SSN_GE(tt, ctl->sinfo_ssn)) { 4976163953Srrs /* this is deliverable now */ 4977163953Srrs TAILQ_REMOVE(&strmin->inqueue, ctl, next); 4978163953Srrs /* subtract pending on streams */ 4979163953Srrs asoc->size_on_all_streams -= ctl->length; 4980163953Srrs sctp_ucount_decr(asoc->cnt_on_all_streams); 4981163953Srrs /* deliver it to at least the delivery-q */ 4982163953Srrs if (stcb->sctp_socket) { 4983206137Stuexen sctp_mark_non_revokable(asoc, ctl->sinfo_tsn); 4984163953Srrs sctp_add_to_readq(stcb->sctp_ep, stcb, 4985163953Srrs ctl, 4986195918Srrs &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED); 4987163953Srrs } 4988163953Srrs } else { 4989163953Srrs /* no more delivery now. */ 4990163953Srrs break; 4991163953Srrs } 4992163953Srrs } 4993163953Srrs /* 4994163953Srrs * now we must deliver things in queue the normal way if any are 4995163953Srrs * now ready. 4996163953Srrs */ 4997163953Srrs tt = strmin->last_sequence_delivered + 1; 4998216822Stuexen TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) { 4999163953Srrs if (tt == ctl->sinfo_ssn) { 5000163953Srrs /* this is deliverable now */ 5001163953Srrs TAILQ_REMOVE(&strmin->inqueue, ctl, next); 5002163953Srrs /* subtract pending on streams */ 5003163953Srrs asoc->size_on_all_streams -= ctl->length; 5004163953Srrs sctp_ucount_decr(asoc->cnt_on_all_streams); 5005163953Srrs /* deliver it to at least the delivery-q */ 5006163953Srrs strmin->last_sequence_delivered = ctl->sinfo_ssn; 5007163953Srrs if (stcb->sctp_socket) { 5008206137Stuexen sctp_mark_non_revokable(asoc, ctl->sinfo_tsn); 5009163953Srrs sctp_add_to_readq(stcb->sctp_ep, stcb, 5010163953Srrs ctl, 5011195918Srrs &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED); 5012205627Srrs 5013163953Srrs } 5014163953Srrs tt = strmin->last_sequence_delivered + 1; 5015163953Srrs } else { 5016163953Srrs break; 5017163953Srrs } 5018163953Srrs } 5019163953Srrs} 5020163953Srrs 5021190689Srrsstatic void 5022190689Srrssctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb, 5023190689Srrs struct sctp_association *asoc, 5024190689Srrs uint16_t stream, uint16_t seq) 5025190689Srrs{ 5026216822Stuexen struct sctp_tmit_chunk *chk, *nchk; 5027190689Srrs 5028216822Stuexen /* For each one on here see if we need to toss it */ 5029216822Stuexen /* 5030216822Stuexen * For now large messages held on the reasmqueue that are complete 5031216822Stuexen * will be tossed too. We could in theory do more work to spin 5032216822Stuexen * through and stop after dumping one msg aka seeing the start of a 5033216822Stuexen * new msg at the head, and call the delivery function... to see if 5034216822Stuexen * it can be delivered... But for now we just dump everything on the 5035216822Stuexen * queue. 5036216822Stuexen */ 5037216822Stuexen TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) { 5038190689Srrs /* 5039216822Stuexen * Do not toss it if on a different stream or marked for 5040216822Stuexen * unordered delivery in which case the stream sequence 5041216822Stuexen * number has no meaning. 5042190689Srrs */ 5043216822Stuexen if ((chk->rec.data.stream_number != stream) || 5044216822Stuexen ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) { 5045216822Stuexen continue; 5046216822Stuexen } 5047216822Stuexen if (chk->rec.data.stream_seq == seq) { 5048216822Stuexen /* It needs to be tossed */ 5049216822Stuexen TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 5050216825Stuexen if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) { 5051216822Stuexen asoc->tsn_last_delivered = chk->rec.data.TSN_seq; 5052216822Stuexen asoc->str_of_pdapi = chk->rec.data.stream_number; 5053216822Stuexen asoc->ssn_of_pdapi = chk->rec.data.stream_seq; 5054216822Stuexen asoc->fragment_flags = chk->rec.data.rcv_flags; 5055190689Srrs } 5056216822Stuexen asoc->size_on_reasm_queue -= chk->send_size; 5057216822Stuexen sctp_ucount_decr(asoc->cnt_on_reasm_queue); 5058190689Srrs 5059216822Stuexen /* Clear up any stream problem */ 5060216825Stuexen if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED && 5061216825Stuexen SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) { 5062190689Srrs /* 5063216822Stuexen * We must dump forward this streams 5064216822Stuexen * sequence number if the chunk is not 5065216822Stuexen * unordered that is being skipped. There is 5066216822Stuexen * a chance that if the peer does not 5067216822Stuexen * include the last fragment in its FWD-TSN 5068216822Stuexen * we WILL have a problem here since you 5069216822Stuexen * would have a partial chunk in queue that 5070216822Stuexen * may not be deliverable. Also if a Partial 5071216822Stuexen * delivery API as started the user may get 5072216822Stuexen * a partial chunk. The next read returning 5073216822Stuexen * a new chunk... really ugly but I see no 5074216822Stuexen * way around it! Maybe a notify?? 5075190689Srrs */ 5076216822Stuexen asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq; 5077190689Srrs } 5078216822Stuexen if (chk->data) { 5079216822Stuexen sctp_m_freem(chk->data); 5080216822Stuexen chk->data = NULL; 5081216822Stuexen } 5082221627Stuexen sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 5083216825Stuexen } else if (SCTP_SSN_GT(chk->rec.data.stream_seq, seq)) { 5084216822Stuexen /* 5085216822Stuexen * If the stream_seq is > than the purging one, we 5086216822Stuexen * are done 5087216822Stuexen */ 5088216822Stuexen break; 5089190689Srrs } 5090190689Srrs } 5091190689Srrs} 5092190689Srrs 5093190689Srrs 5094163953Srrsvoid 5095163953Srrssctp_handle_forward_tsn(struct sctp_tcb *stcb, 5096206137Stuexen struct sctp_forward_tsn_chunk *fwd, 5097206137Stuexen int *abort_flag, struct mbuf *m, int offset) 5098163953Srrs{ 5099163953Srrs /* The pr-sctp fwd tsn */ 5100163953Srrs /* 5101163953Srrs * here we will perform all the data receiver side steps for 5102163953Srrs * processing FwdTSN, as required in by pr-sctp draft: 5103163953Srrs * 5104163953Srrs * Assume we get FwdTSN(x): 5105163953Srrs * 5106163953Srrs * 1) update local cumTSN to x 2) try to further advance cumTSN to x + 5107163953Srrs * others we have 3) examine and update re-ordering queue on 5108163953Srrs * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to 5109163953Srrs * report where we are. 5110163953Srrs */ 5111163953Srrs struct sctp_association *asoc; 5112207963Srrs uint32_t new_cum_tsn, gap; 5113228653Stuexen unsigned int i, fwd_sz, m_size; 5114190689Srrs uint32_t str_seq; 5115163953Srrs struct sctp_stream_in *strm; 5116216822Stuexen struct sctp_tmit_chunk *chk, *nchk; 5117190689Srrs struct sctp_queued_to_read *ctl, *sv; 5118163953Srrs 5119163953Srrs asoc = &stcb->asoc; 5120163953Srrs if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) { 5121169420Srrs SCTPDBG(SCTP_DEBUG_INDATA1, 5122169420Srrs "Bad size too small/big fwd-tsn\n"); 5123163953Srrs return; 5124163953Srrs } 5125163953Srrs m_size = (stcb->asoc.mapping_array_size << 3); 5126163953Srrs /*************************************************************/ 5127163953Srrs /* 1. Here we update local cumTSN and shift the bitmap array */ 5128163953Srrs /*************************************************************/ 5129163953Srrs new_cum_tsn = ntohl(fwd->new_cumulative_tsn); 5130163953Srrs 5131216825Stuexen if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) { 5132163953Srrs /* Already got there ... */ 5133163953Srrs return; 5134163953Srrs } 5135163953Srrs /* 5136163953Srrs * now we know the new TSN is more advanced, let's find the actual 5137163953Srrs * gap 5138163953Srrs */ 5139206137Stuexen SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn); 5140205627Srrs asoc->cumulative_tsn = new_cum_tsn; 5141171990Srrs if (gap >= m_size) { 5142163953Srrs if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) { 5143267723Stuexen struct mbuf *op_err; 5144267723Stuexen char msg[SCTP_DIAG_INFO_LEN]; 5145169352Srrs 5146163953Srrs /* 5147163953Srrs * out of range (of single byte chunks in the rwnd I 5148169352Srrs * give out). This must be an attacker. 5149163953Srrs */ 5150169352Srrs *abort_flag = 1; 5151267723Stuexen snprintf(msg, sizeof(msg), 5152267723Stuexen "New cum ack %8.8x too high, highest TSN %8.8x", 5153267723Stuexen new_cum_tsn, asoc->highest_tsn_inside_map); 5154267723Stuexen op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 5155283822Stuexen stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_35; 5156267723Stuexen sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 5157163953Srrs return; 5158163953Srrs } 5159170091Srrs SCTP_STAT_INCR(sctps_fwdtsn_map_over); 5160205627Srrs 5161171990Srrs memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size); 5162171990Srrs asoc->mapping_array_base_tsn = new_cum_tsn + 1; 5163205627Srrs asoc->highest_tsn_inside_map = new_cum_tsn; 5164205627Srrs 5165206137Stuexen memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size); 5166205627Srrs asoc->highest_tsn_inside_nr_map = new_cum_tsn; 5167205627Srrs 5168179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 5169170744Srrs sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 5170170744Srrs } 5171171990Srrs } else { 5172171990Srrs SCTP_TCB_LOCK_ASSERT(stcb); 5173189790Srrs for (i = 0; i <= gap; i++) { 5174207963Srrs if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) && 5175207963Srrs !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) { 5176207963Srrs SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i); 5177216825Stuexen if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) { 5178207963Srrs asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i; 5179206137Stuexen } 5180206137Stuexen } 5181206137Stuexen } 5182163953Srrs } 5183163953Srrs /*************************************************************/ 5184163953Srrs /* 2. Clear up re-assembly queue */ 5185163953Srrs /*************************************************************/ 5186163953Srrs /* 5187163953Srrs * First service it if pd-api is up, just in case we can progress it 5188163953Srrs * forward 5189163953Srrs */ 5190163953Srrs if (asoc->fragmented_delivery_inprogress) { 5191163953Srrs sctp_service_reassembly(stcb, asoc); 5192163953Srrs } 5193216822Stuexen /* For each one on here see if we need to toss it */ 5194216822Stuexen /* 5195216822Stuexen * For now large messages held on the reasmqueue that are complete 5196216822Stuexen * will be tossed too. We could in theory do more work to spin 5197216822Stuexen * through and stop after dumping one msg aka seeing the start of a 5198216822Stuexen * new msg at the head, and call the delivery function... to see if 5199216822Stuexen * it can be delivered... But for now we just dump everything on the 5200216822Stuexen * queue. 5201216822Stuexen */ 5202216822Stuexen TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) { 5203216825Stuexen if (SCTP_TSN_GE(new_cum_tsn, chk->rec.data.TSN_seq)) { 5204216822Stuexen /* It needs to be tossed */ 5205216822Stuexen TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 5206216825Stuexen if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) { 5207216822Stuexen asoc->tsn_last_delivered = chk->rec.data.TSN_seq; 5208216822Stuexen asoc->str_of_pdapi = chk->rec.data.stream_number; 5209216822Stuexen asoc->ssn_of_pdapi = chk->rec.data.stream_seq; 5210216822Stuexen asoc->fragment_flags = chk->rec.data.rcv_flags; 5211216822Stuexen } 5212216822Stuexen asoc->size_on_reasm_queue -= chk->send_size; 5213216822Stuexen sctp_ucount_decr(asoc->cnt_on_reasm_queue); 5214163953Srrs 5215216822Stuexen /* Clear up any stream problem */ 5216216825Stuexen if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED && 5217216825Stuexen SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) { 5218163953Srrs /* 5219216822Stuexen * We must dump forward this streams 5220216822Stuexen * sequence number if the chunk is not 5221216822Stuexen * unordered that is being skipped. There is 5222216822Stuexen * a chance that if the peer does not 5223216822Stuexen * include the last fragment in its FWD-TSN 5224216822Stuexen * we WILL have a problem here since you 5225216822Stuexen * would have a partial chunk in queue that 5226216822Stuexen * may not be deliverable. Also if a Partial 5227216822Stuexen * delivery API as started the user may get 5228216822Stuexen * a partial chunk. The next read returning 5229216822Stuexen * a new chunk... really ugly but I see no 5230216822Stuexen * way around it! Maybe a notify?? 5231163953Srrs */ 5232216822Stuexen asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq; 5233163953Srrs } 5234216822Stuexen if (chk->data) { 5235216822Stuexen sctp_m_freem(chk->data); 5236216822Stuexen chk->data = NULL; 5237216822Stuexen } 5238221627Stuexen sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 5239216822Stuexen } else { 5240216822Stuexen /* 5241216822Stuexen * Ok we have gone beyond the end of the fwd-tsn's 5242216822Stuexen * mark. 5243216822Stuexen */ 5244216822Stuexen break; 5245163953Srrs } 5246163953Srrs } 5247190689Srrs /*******************************************************/ 5248190689Srrs /* 3. Update the PR-stream re-ordering queues and fix */ 5249190689Srrs /* delivery issues as needed. */ 5250190689Srrs /*******************************************************/ 5251163953Srrs fwd_sz -= sizeof(*fwd); 5252170992Srrs if (m && fwd_sz) { 5253163953Srrs /* New method. */ 5254170056Srrs unsigned int num_str; 5255170992Srrs struct sctp_strseq *stseq, strseqbuf; 5256163953Srrs 5257170992Srrs offset += sizeof(*fwd); 5258170992Srrs 5259190689Srrs SCTP_INP_READ_LOCK(stcb->sctp_ep); 5260163953Srrs num_str = fwd_sz / sizeof(struct sctp_strseq); 5261163953Srrs for (i = 0; i < num_str; i++) { 5262163953Srrs uint16_t st; 5263163953Srrs 5264170992Srrs stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset, 5265170992Srrs sizeof(struct sctp_strseq), 5266170992Srrs (uint8_t *) & strseqbuf); 5267170992Srrs offset += sizeof(struct sctp_strseq); 5268171990Srrs if (stseq == NULL) { 5269170992Srrs break; 5270171990Srrs } 5271163953Srrs /* Convert */ 5272172091Srrs st = ntohs(stseq->stream); 5273172091Srrs stseq->stream = st; 5274172091Srrs st = ntohs(stseq->sequence); 5275172091Srrs stseq->sequence = st; 5276190689Srrs 5277163953Srrs /* now process */ 5278190689Srrs 5279190689Srrs /* 5280190689Srrs * Ok we now look for the stream/seq on the read 5281190689Srrs * queue where its not all delivered. If we find it 5282190689Srrs * we transmute the read entry into a PDI_ABORTED. 5283190689Srrs */ 5284172091Srrs if (stseq->stream >= asoc->streamincnt) { 5285171990Srrs /* screwed up streams, stop! */ 5286171990Srrs break; 5287163953Srrs } 5288190689Srrs if ((asoc->str_of_pdapi == stseq->stream) && 5289190689Srrs (asoc->ssn_of_pdapi == stseq->sequence)) { 5290190689Srrs /* 5291190689Srrs * If this is the one we were partially 5292190689Srrs * delivering now then we no longer are. 5293190689Srrs * Note this will change with the reassembly 5294190689Srrs * re-write. 5295190689Srrs */ 5296190689Srrs asoc->fragmented_delivery_inprogress = 0; 5297190689Srrs } 5298190689Srrs sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence); 5299190689Srrs TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) { 5300190689Srrs if ((ctl->sinfo_stream == stseq->stream) && 5301190689Srrs (ctl->sinfo_ssn == stseq->sequence)) { 5302190689Srrs str_seq = (stseq->stream << 16) | stseq->sequence; 5303190689Srrs ctl->end_added = 1; 5304190689Srrs ctl->pdapi_aborted = 1; 5305190689Srrs sv = stcb->asoc.control_pdapi; 5306190689Srrs stcb->asoc.control_pdapi = ctl; 5307196260Stuexen sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION, 5308196260Stuexen stcb, 5309190689Srrs SCTP_PARTIAL_DELIVERY_ABORTED, 5310196260Stuexen (void *)&str_seq, 5311196260Stuexen SCTP_SO_NOT_LOCKED); 5312190689Srrs stcb->asoc.control_pdapi = sv; 5313190689Srrs break; 5314190689Srrs } else if ((ctl->sinfo_stream == stseq->stream) && 5315216825Stuexen SCTP_SSN_GT(ctl->sinfo_ssn, stseq->sequence)) { 5316190689Srrs /* We are past our victim SSN */ 5317190689Srrs break; 5318190689Srrs } 5319190689Srrs } 5320172091Srrs strm = &asoc->strmin[stseq->stream]; 5321216825Stuexen if (SCTP_SSN_GT(stseq->sequence, strm->last_sequence_delivered)) { 5322163953Srrs /* Update the sequence number */ 5323216825Stuexen strm->last_sequence_delivered = stseq->sequence; 5324163953Srrs } 5325163953Srrs /* now kick the stream the new way */ 5326172156Srrs /* sa_ignore NO_NULL_CHK */ 5327163953Srrs sctp_kick_prsctp_reorder_queue(stcb, strm); 5328163953Srrs } 5329190689Srrs SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5330163953Srrs } 5331207963Srrs /* 5332207963Srrs * Now slide thing forward. 5333207963Srrs */ 5334207963Srrs sctp_slide_mapping_arrays(stcb); 5335207963Srrs 5336212711Stuexen if (!TAILQ_EMPTY(&asoc->reasmqueue)) { 5337165647Srrs /* now lets kick out and check for more fragmented delivery */ 5338172156Srrs /* sa_ignore NO_NULL_CHK */ 5339165647Srrs sctp_deliver_reasm_check(stcb, &stcb->asoc); 5340165647Srrs } 5341163953Srrs} 5342