1/*-
2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * a) Redistributions of source code must retain the above copyright notice,
10 *    this list of conditions and the following disclaimer.
11 *
12 * b) Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in
14 *    the documentation and/or other materials provided with the distribution.
15 *
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 *    contributors may be used to endorse or promote products derived
18 *    from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD$");
35
36#ifndef _NETINET_SCTP_STRUCTS_H_
37#define _NETINET_SCTP_STRUCTS_H_
38
39#include <netinet/sctp_os.h>
40#include <netinet/sctp_header.h>
41#include <netinet/sctp_auth.h>
42
43struct sctp_timer {
44	sctp_os_timer_t timer;
45
46	int type;
47	/*
48	 * Depending on the timer type these will be setup and cast with the
49	 * appropriate entity.
50	 */
51	void *ep;
52	void *tcb;
53	void *net;
54	void *vnet;
55
56	/* for sanity checking */
57	void *self;
58	uint32_t ticks;
59	uint32_t stopped_from;
60};
61
62
63struct sctp_foo_stuff {
64	struct sctp_inpcb *inp;
65	uint32_t lineno;
66	uint32_t ticks;
67	int updown;
68};
69
70
71/*
72 * This is the information we track on each interface that we know about from
73 * the distant end.
74 */
75TAILQ_HEAD(sctpnetlisthead, sctp_nets);
76
77struct sctp_stream_reset_list {
78	TAILQ_ENTRY(sctp_stream_reset_list) next_resp;
79	uint32_t tsn;
80	uint32_t number_entries;
81	uint16_t list_of_streams[];
82};
83
84TAILQ_HEAD(sctp_resethead, sctp_stream_reset_list);
85
86/*
87 * Users of the iterator need to malloc a iterator with a call to
88 * sctp_initiate_iterator(inp_func, assoc_func, inp_func,  pcb_flags, pcb_features,
89 *     asoc_state, void-ptr-arg, uint32-arg, end_func, inp);
90 *
91 * Use the following two defines if you don't care what pcb flags are on the EP
92 * and/or you don't care what state the association is in.
93 *
94 * Note that if you specify an INP as the last argument then ONLY each
95 * association of that single INP will be executed upon. Note that the pcb
96 * flags STILL apply so if the inp you specify has different pcb_flags then
97 * what you put in pcb_flags nothing will happen. use SCTP_PCB_ANY_FLAGS to
98 * assure the inp you specify gets treated.
99 */
100#define SCTP_PCB_ANY_FLAGS	0x00000000
101#define SCTP_PCB_ANY_FEATURES	0x00000000
102#define SCTP_ASOC_ANY_STATE	0x00000000
103
104typedef void (*asoc_func) (struct sctp_inpcb *, struct sctp_tcb *, void *ptr,
105         uint32_t val);
106typedef int (*inp_func) (struct sctp_inpcb *, void *ptr, uint32_t val);
107typedef void (*end_func) (void *ptr, uint32_t val);
108
109#if defined(__FreeBSD__) && defined(SCTP_MCORE_INPUT) && defined(SMP)
110/* whats on the mcore control struct */
111struct sctp_mcore_queue {
112	TAILQ_ENTRY(sctp_mcore_queue) next;
113	struct vnet *vn;
114	struct mbuf *m;
115	int off;
116	int v6;
117};
118
119TAILQ_HEAD(sctp_mcore_qhead, sctp_mcore_queue);
120
121struct sctp_mcore_ctrl {
122	SCTP_PROCESS_STRUCT thread_proc;
123	struct sctp_mcore_qhead que;
124	struct mtx core_mtx;
125	struct mtx que_mtx;
126	int running;
127	int cpuid;
128};
129
130
131#endif
132
133
134struct sctp_iterator {
135	TAILQ_ENTRY(sctp_iterator) sctp_nxt_itr;
136	struct vnet *vn;
137	struct sctp_timer tmr;
138	struct sctp_inpcb *inp;	/* current endpoint */
139	struct sctp_tcb *stcb;	/* current* assoc */
140	struct sctp_inpcb *next_inp;	/* special hook to skip to */
141	asoc_func function_assoc;	/* per assoc function */
142	inp_func function_inp;	/* per endpoint function */
143	inp_func function_inp_end;	/* end INP function */
144	end_func function_atend;/* iterator completion function */
145	void *pointer;		/* pointer for apply func to use */
146	uint32_t val;		/* value for apply func to use */
147	uint32_t pcb_flags;	/* endpoint flags being checked */
148	uint32_t pcb_features;	/* endpoint features being checked */
149	uint32_t asoc_state;	/* assoc state being checked */
150	uint32_t iterator_flags;
151	uint8_t no_chunk_output;
152	uint8_t done_current_ep;
153};
154
155/* iterator_flags values */
156#define SCTP_ITERATOR_DO_ALL_INP	0x00000001
157#define SCTP_ITERATOR_DO_SINGLE_INP	0x00000002
158
159
160TAILQ_HEAD(sctpiterators, sctp_iterator);
161
162struct sctp_copy_all {
163	struct sctp_inpcb *inp;	/* ep */
164	struct mbuf *m;
165	struct sctp_sndrcvinfo sndrcv;
166	int sndlen;
167	int cnt_sent;
168	int cnt_failed;
169};
170
171struct sctp_asconf_iterator {
172	struct sctpladdr list_of_work;
173	int cnt;
174};
175
176struct iterator_control {
177	struct mtx ipi_iterator_wq_mtx;
178	struct mtx it_mtx;
179	SCTP_PROCESS_STRUCT thread_proc;
180	struct sctpiterators iteratorhead;
181	struct sctp_iterator *cur_it;
182	uint32_t iterator_running;
183	uint32_t iterator_flags;
184};
185
186#define SCTP_ITERATOR_STOP_CUR_IT	0x00000004
187#define SCTP_ITERATOR_STOP_CUR_INP	0x00000008
188
189struct sctp_net_route {
190	sctp_rtentry_t *ro_rt;
191	void *ro_lle;
192	void *ro_ia;
193	int ro_flags;
194	union sctp_sockstore _l_addr;	/* remote peer addr */
195	struct sctp_ifa *_s_addr;	/* our selected src addr */
196};
197
198struct htcp {
199	uint16_t alpha;		/* Fixed point arith, << 7 */
200	uint8_t beta;		/* Fixed point arith, << 7 */
201	uint8_t modeswitch;	/* Delay modeswitch until we had at least one
202				 * congestion event */
203	uint32_t last_cong;	/* Time since last congestion event end */
204	uint32_t undo_last_cong;
205	uint16_t bytes_acked;
206	uint32_t bytecount;
207	uint32_t minRTT;
208	uint32_t maxRTT;
209
210	uint32_t undo_maxRTT;
211	uint32_t undo_old_maxB;
212
213	/* Bandwidth estimation */
214	uint32_t minB;
215	uint32_t maxB;
216	uint32_t old_maxB;
217	uint32_t Bi;
218	uint32_t lasttime;
219};
220
221struct rtcc_cc {
222	struct timeval tls;	/* The time we started the sending  */
223	uint64_t lbw;		/* Our last estimated bw */
224	uint64_t lbw_rtt;	/* RTT at bw estimate */
225	uint64_t bw_bytes;	/* The total bytes since this sending began */
226	uint64_t bw_tot_time;	/* The total time since sending began */
227	uint64_t new_tot_time;	/* temp holding the new value */
228	uint64_t bw_bytes_at_last_rttc;	/* What bw_bytes was at last rtt calc */
229	uint32_t cwnd_at_bw_set;/* Cwnd at last bw saved - lbw */
230	uint32_t vol_reduce;	/* cnt of voluntary reductions */
231	uint16_t steady_step;	/* The number required to be in steady state */
232	uint16_t step_cnt;	/* The current number */
233	uint8_t ret_from_eq;	/* When all things are equal what do I return
234				 * 0/1 - 1 no cc advance */
235	uint8_t use_dccc_ecn;	/* Flag to enable DCCC ECN */
236	uint8_t tls_needs_set;	/* Flag to indicate we need to set tls 0 or 1
237				 * means set at send 2 not */
238	uint8_t last_step_state;/* Last state if steady state stepdown is on */
239	uint8_t rtt_set_this_sack;	/* Flag saying this sack had RTT calc
240					 * on it */
241	uint8_t last_inst_ind;	/* Last saved inst indication */
242};
243
244
245struct sctp_nets {
246	TAILQ_ENTRY(sctp_nets) sctp_next;	/* next link */
247
248	/*
249	 * Things on the top half may be able to be split into a common
250	 * structure shared by all.
251	 */
252	struct sctp_timer pmtu_timer;
253	struct sctp_timer hb_timer;
254
255	/*
256	 * The following two in combination equate to a route entry for v6
257	 * or v4.
258	 */
259	struct sctp_net_route ro;
260
261	/* mtu discovered so far */
262	uint32_t mtu;
263	uint32_t ssthresh;	/* not sure about this one for split */
264	uint32_t last_cwr_tsn;
265	uint32_t cwr_window_tsn;
266	uint32_t ecn_ce_pkt_cnt;
267	uint32_t lost_cnt;
268	/* smoothed average things for RTT and RTO itself */
269	int lastsa;
270	int lastsv;
271	uint64_t rtt;		/* last measured rtt value in us */
272	unsigned int RTO;
273
274	/* This is used for SHUTDOWN/SHUTDOWN-ACK/SEND or INIT timers */
275	struct sctp_timer rxt_timer;
276
277	/* last time in seconds I sent to it */
278	struct timeval last_sent_time;
279	union cc_control_data {
280		struct htcp htcp_ca;	/* JRS - struct used in HTCP algorithm */
281		struct rtcc_cc rtcc;	/* rtcc module cc stuff  */
282	}               cc_mod;
283	int ref_count;
284
285	/* Congestion stats per destination */
286	/*
287	 * flight size variables and such, sorry Vern, I could not avoid
288	 * this if I wanted performance :>
289	 */
290	uint32_t flight_size;
291	uint32_t cwnd;		/* actual cwnd */
292	uint32_t prev_cwnd;	/* cwnd before any processing */
293	uint32_t ecn_prev_cwnd;	/* ECN prev cwnd at first ecn_echo seen in new
294				 * window */
295	uint32_t partial_bytes_acked;	/* in CA tracks when to incr a MTU */
296	/* tracking variables to avoid the aloc/free in sack processing */
297	unsigned int net_ack;
298	unsigned int net_ack2;
299
300	/*
301	 * JRS - 5/8/07 - Variable to track last time a destination was
302	 * active for CMT PF
303	 */
304	uint32_t last_active;
305
306	/*
307	 * CMT variables (iyengar@cis.udel.edu)
308	 */
309	uint32_t this_sack_highest_newack;	/* tracks highest TSN newly
310						 * acked for a given dest in
311						 * the current SACK. Used in
312						 * SFR and HTNA algos */
313	uint32_t pseudo_cumack;	/* CMT CUC algorithm. Maintains next expected
314				 * pseudo-cumack for this destination */
315	uint32_t rtx_pseudo_cumack;	/* CMT CUC algorithm. Maintains next
316					 * expected pseudo-cumack for this
317					 * destination */
318
319	/* CMT fast recovery variables */
320	uint32_t fast_recovery_tsn;
321	uint32_t heartbeat_random1;
322	uint32_t heartbeat_random2;
323#ifdef INET6
324	uint32_t flowlabel;
325#endif
326	uint8_t dscp;
327
328	struct timeval start_time;	/* time when this net was created */
329	uint32_t marked_retrans;/* number or DATA chunks marked for timer
330				 * based retransmissions */
331	uint32_t marked_fastretrans;
332	uint32_t heart_beat_delay;	/* Heart Beat delay in ms */
333
334	/* if this guy is ok or not ... status */
335	uint16_t dest_state;
336	/* number of timeouts to consider the destination unreachable */
337	uint16_t failure_threshold;
338	/* number of timeouts to consider the destination potentially failed */
339	uint16_t pf_threshold;
340	/* error stats on the destination */
341	uint16_t error_count;
342	/* UDP port number in case of UDP tunneling */
343	uint16_t port;
344
345	uint8_t fast_retran_loss_recovery;
346	uint8_t will_exit_fast_recovery;
347	/* Flags that probably can be combined into dest_state */
348	uint8_t fast_retran_ip;	/* fast retransmit in progress */
349	uint8_t hb_responded;
350	uint8_t saw_newack;	/* CMT's SFR algorithm flag */
351	uint8_t src_addr_selected;	/* if we split we move */
352	uint8_t indx_of_eligible_next_to_use;
353	uint8_t addr_is_local;	/* its a local address (if known) could move
354				 * in split */
355
356	/*
357	 * CMT variables (iyengar@cis.udel.edu)
358	 */
359	uint8_t find_pseudo_cumack;	/* CMT CUC algorithm. Flag used to
360					 * find a new pseudocumack. This flag
361					 * is set after a new pseudo-cumack
362					 * has been received and indicates
363					 * that the sender should find the
364					 * next pseudo-cumack expected for
365					 * this destination */
366	uint8_t find_rtx_pseudo_cumack;	/* CMT CUCv2 algorithm. Flag used to
367					 * find a new rtx-pseudocumack. This
368					 * flag is set after a new
369					 * rtx-pseudo-cumack has been received
370					 * and indicates that the sender
371					 * should find the next
372					 * rtx-pseudo-cumack expected for this
373					 * destination */
374	uint8_t new_pseudo_cumack;	/* CMT CUC algorithm. Flag used to
375					 * indicate if a new pseudo-cumack or
376					 * rtx-pseudo-cumack has been received */
377	uint8_t window_probe;	/* Doing a window probe? */
378	uint8_t RTO_measured;	/* Have we done the first measure */
379	uint8_t last_hs_used;	/* index into the last HS table entry we used */
380	uint8_t lan_type;
381	uint8_t rto_needed;
382	uint32_t flowid;
383#ifdef INVARIANTS
384	uint8_t flowidset;
385#endif
386};
387
388
389struct sctp_data_chunkrec {
390	uint32_t TSN_seq;	/* the TSN of this transmit */
391	uint16_t stream_seq;	/* the stream sequence number of this transmit */
392	uint16_t stream_number;	/* the stream number of this guy */
393	uint32_t payloadtype;
394	uint32_t context;	/* from send */
395	uint32_t cwnd_at_send;
396	/*
397	 * part of the Highest sacked algorithm to be able to stroke counts
398	 * on ones that are FR'd.
399	 */
400	uint32_t fast_retran_tsn;	/* sending_seq at the time of FR */
401	struct timeval timetodrop;	/* time we drop it from queue */
402	uint8_t doing_fast_retransmit;
403	uint8_t rcv_flags;	/* flags pulled from data chunk on inbound for
404				 * outbound holds sending flags for PR-SCTP. */
405	uint8_t state_flags;
406	uint8_t chunk_was_revoked;
407	uint8_t fwd_tsn_cnt;
408};
409
410TAILQ_HEAD(sctpchunk_listhead, sctp_tmit_chunk);
411
412/* The lower byte is used to enumerate PR_SCTP policies */
413#define CHUNK_FLAGS_PR_SCTP_TTL	        SCTP_PR_SCTP_TTL
414#define CHUNK_FLAGS_PR_SCTP_BUF	        SCTP_PR_SCTP_BUF
415#define CHUNK_FLAGS_PR_SCTP_RTX         SCTP_PR_SCTP_RTX
416
417/* The upper byte is used as a bit mask */
418#define CHUNK_FLAGS_FRAGMENT_OK	        0x0100
419
420struct chk_id {
421	uint16_t id;
422	uint16_t can_take_data;
423};
424
425
426struct sctp_tmit_chunk {
427	union {
428		struct sctp_data_chunkrec data;
429		struct chk_id chunk_id;
430	}     rec;
431	struct sctp_association *asoc;	/* bp to asoc this belongs to */
432	struct timeval sent_rcv_time;	/* filled in if RTT being calculated */
433	struct mbuf *data;	/* pointer to mbuf chain of data */
434	struct mbuf *last_mbuf;	/* pointer to last mbuf in chain */
435	struct sctp_nets *whoTo;
436	          TAILQ_ENTRY(sctp_tmit_chunk) sctp_next;	/* next link */
437	int32_t sent;		/* the send status */
438	uint16_t snd_count;	/* number of times I sent */
439	uint16_t flags;		/* flags, such as FRAGMENT_OK */
440	uint16_t send_size;
441	uint16_t book_size;
442	uint16_t mbcnt;
443	uint16_t auth_keyid;
444	uint8_t holds_key_ref;	/* flag if auth keyid refcount is held */
445	uint8_t pad_inplace;
446	uint8_t do_rtt;
447	uint8_t book_size_scale;
448	uint8_t no_fr_allowed;
449	uint8_t copy_by_ref;
450	uint8_t window_probe;
451};
452
453/*
454 * The first part of this structure MUST be the entire sinfo structure. Maybe
455 * I should have made it a sub structure... we can circle back later and do
456 * that if we want.
457 */
458struct sctp_queued_to_read {	/* sinfo structure Pluse more */
459	uint16_t sinfo_stream;	/* off the wire */
460	uint16_t sinfo_ssn;	/* off the wire */
461	uint16_t sinfo_flags;	/* SCTP_UNORDERED from wire use SCTP_EOF for
462				 * EOR */
463	uint32_t sinfo_ppid;	/* off the wire */
464	uint32_t sinfo_context;	/* pick this up from assoc def context? */
465	uint32_t sinfo_timetolive;	/* not used by kernel */
466	uint32_t sinfo_tsn;	/* Use this in reassembly as first TSN */
467	uint32_t sinfo_cumtsn;	/* Use this in reassembly as last TSN */
468	sctp_assoc_t sinfo_assoc_id;	/* our assoc id */
469	/* Non sinfo stuff */
470	uint32_t length;	/* length of data */
471	uint32_t held_length;	/* length held in sb */
472	struct sctp_nets *whoFrom;	/* where it came from */
473	struct mbuf *data;	/* front of the mbuf chain of data with
474				 * PKT_HDR */
475	struct mbuf *tail_mbuf;	/* used for multi-part data */
476	struct mbuf *aux_data;	/* used to hold/cache  control if o/s does not
477				 * take it from us */
478	struct sctp_tcb *stcb;	/* assoc, used for window update */
479	         TAILQ_ENTRY(sctp_queued_to_read) next;
480	uint16_t port_from;
481	uint16_t spec_flags;	/* Flags to hold the notification field */
482	uint8_t do_not_ref_stcb;
483	uint8_t end_added;
484	uint8_t pdapi_aborted;
485	uint8_t some_taken;
486};
487
488/* This data structure will be on the outbound
489 * stream queues. Data will be pulled off from
490 * the front of the mbuf data and chunk-ified
491 * by the output routines. We will custom
492 * fit every chunk we pull to the send/sent
493 * queue to make up the next full packet
494 * if we can. An entry cannot be removed
495 * from the stream_out queue until
496 * the msg_is_complete flag is set. This
497 * means at times data/tail_mbuf MIGHT
498 * be NULL.. If that occurs it happens
499 * for one of two reasons. Either the user
500 * is blocked on a send() call and has not
501 * awoken to copy more data down... OR
502 * the user is in the explict MSG_EOR mode
503 * and wrote some data, but has not completed
504 * sending.
505 */
506struct sctp_stream_queue_pending {
507	struct mbuf *data;
508	struct mbuf *tail_mbuf;
509	struct timeval ts;
510	struct sctp_nets *net;
511	          TAILQ_ENTRY(sctp_stream_queue_pending) next;
512	          TAILQ_ENTRY(sctp_stream_queue_pending) ss_next;
513	uint32_t length;
514	uint32_t timetolive;
515	uint32_t ppid;
516	uint32_t context;
517	uint16_t sinfo_flags;
518	uint16_t stream;
519	uint16_t act_flags;
520	uint16_t auth_keyid;
521	uint8_t holds_key_ref;
522	uint8_t msg_is_complete;
523	uint8_t some_taken;
524	uint8_t sender_all_done;
525	uint8_t put_last_out;
526	uint8_t discard_rest;
527};
528
529/*
530 * this struct contains info that is used to track inbound stream data and
531 * help with ordering.
532 */
533TAILQ_HEAD(sctpwheelunrel_listhead, sctp_stream_in);
534struct sctp_stream_in {
535	struct sctp_readhead inqueue;
536	uint16_t stream_no;
537	uint16_t last_sequence_delivered;	/* used for re-order */
538	uint8_t delivery_started;
539};
540
541TAILQ_HEAD(sctpwheel_listhead, sctp_stream_out);
542TAILQ_HEAD(sctplist_listhead, sctp_stream_queue_pending);
543
544/* Round-robin schedulers */
545struct ss_rr {
546	/* next link in wheel */
547	TAILQ_ENTRY(sctp_stream_out) next_spoke;
548};
549
550/* Priority scheduler */
551struct ss_prio {
552	/* next link in wheel */
553	TAILQ_ENTRY(sctp_stream_out) next_spoke;
554	/* priority id */
555	uint16_t priority;
556};
557
558/* Fair Bandwidth scheduler */
559struct ss_fb {
560	/* next link in wheel */
561	TAILQ_ENTRY(sctp_stream_out) next_spoke;
562	/* stores message size */
563	int32_t rounds;
564};
565
566/*
567 * This union holds all data necessary for
568 * different stream schedulers.
569 */
570union scheduling_data {
571	struct sctpwheel_listhead out_wheel;
572	struct sctplist_listhead out_list;
573};
574
575/*
576 * This union holds all parameters per stream
577 * necessary for different stream schedulers.
578 */
579union scheduling_parameters {
580	struct ss_rr rr;
581	struct ss_prio prio;
582	struct ss_fb fb;
583};
584
585/* This struct is used to track the traffic on outbound streams */
586struct sctp_stream_out {
587	struct sctp_streamhead outqueue;
588	union scheduling_parameters ss_params;
589	uint32_t chunks_on_queues;
590	uint16_t stream_no;
591	uint16_t next_sequence_send;	/* next one I expect to send out */
592	uint8_t last_msg_incomplete;
593};
594
595/* used to keep track of the addresses yet to try to add/delete */
596TAILQ_HEAD(sctp_asconf_addrhead, sctp_asconf_addr);
597struct sctp_asconf_addr {
598	TAILQ_ENTRY(sctp_asconf_addr) next;
599	struct sctp_asconf_addr_param ap;
600	struct sctp_ifa *ifa;	/* save the ifa for add/del ip */
601	uint8_t sent;		/* has this been sent yet? */
602	uint8_t special_del;	/* not to be used in lookup */
603};
604
605struct sctp_scoping {
606	uint8_t ipv4_addr_legal;
607	uint8_t ipv6_addr_legal;
608	uint8_t loopback_scope;
609	uint8_t ipv4_local_scope;
610	uint8_t local_scope;
611	uint8_t site_scope;
612};
613
614#define SCTP_TSN_LOG_SIZE 40
615
616struct sctp_tsn_log {
617	void *stcb;
618	uint32_t tsn;
619	uint16_t strm;
620	uint16_t seq;
621	uint16_t sz;
622	uint16_t flgs;
623	uint16_t in_pos;
624	uint16_t in_out;
625};
626
627#define SCTP_FS_SPEC_LOG_SIZE 200
628struct sctp_fs_spec_log {
629	uint32_t sent;
630	uint32_t total_flight;
631	uint32_t tsn;
632	uint16_t book;
633	uint8_t incr;
634	uint8_t decr;
635};
636
637/* This struct is here to cut out the compatiabilty
638 * pad that bulks up both the inp and stcb. The non
639 * pad portion MUST stay in complete sync with
640 * sctp_sndrcvinfo... i.e. if sinfo_xxxx is added
641 * this must be done here too.
642 */
643struct sctp_nonpad_sndrcvinfo {
644	uint16_t sinfo_stream;
645	uint16_t sinfo_ssn;
646	uint16_t sinfo_flags;
647	uint32_t sinfo_ppid;
648	uint32_t sinfo_context;
649	uint32_t sinfo_timetolive;
650	uint32_t sinfo_tsn;
651	uint32_t sinfo_cumtsn;
652	sctp_assoc_t sinfo_assoc_id;
653	uint16_t sinfo_keynumber;
654	uint16_t sinfo_keynumber_valid;
655};
656
657/*
658 * JRS - Structure to hold function pointers to the functions responsible
659 * for congestion control.
660 */
661
662struct sctp_cc_functions {
663	void (*sctp_set_initial_cc_param) (struct sctp_tcb *stcb, struct sctp_nets *net);
664	void (*sctp_cwnd_update_after_sack) (struct sctp_tcb *stcb,
665	         struct sctp_association *asoc,
666	         int accum_moved, int reneged_all, int will_exit);
667	void (*sctp_cwnd_update_exit_pf) (struct sctp_tcb *stcb, struct sctp_nets *net);
668	void (*sctp_cwnd_update_after_fr) (struct sctp_tcb *stcb,
669	         struct sctp_association *asoc);
670	void (*sctp_cwnd_update_after_timeout) (struct sctp_tcb *stcb,
671	         struct sctp_nets *net);
672	void (*sctp_cwnd_update_after_ecn_echo) (struct sctp_tcb *stcb,
673	         struct sctp_nets *net, int in_window, int num_pkt_lost);
674	void (*sctp_cwnd_update_after_packet_dropped) (struct sctp_tcb *stcb,
675	         struct sctp_nets *net, struct sctp_pktdrop_chunk *cp,
676	         uint32_t * bottle_bw, uint32_t * on_queue);
677	void (*sctp_cwnd_update_after_output) (struct sctp_tcb *stcb,
678	         struct sctp_nets *net, int burst_limit);
679	void (*sctp_cwnd_update_packet_transmitted) (struct sctp_tcb *stcb,
680	         struct sctp_nets *net);
681	void (*sctp_cwnd_update_tsn_acknowledged) (struct sctp_nets *net,
682	         struct sctp_tmit_chunk *);
683	void (*sctp_cwnd_new_transmission_begins) (struct sctp_tcb *stcb,
684	         struct sctp_nets *net);
685	void (*sctp_cwnd_prepare_net_for_sack) (struct sctp_tcb *stcb,
686	         struct sctp_nets *net);
687	int (*sctp_cwnd_socket_option) (struct sctp_tcb *stcb, int set, struct sctp_cc_option *);
688	void (*sctp_rtt_calculated) (struct sctp_tcb *, struct sctp_nets *, struct timeval *);
689};
690
691/*
692 * RS - Structure to hold function pointers to the functions responsible
693 * for stream scheduling.
694 */
695struct sctp_ss_functions {
696	void (*sctp_ss_init) (struct sctp_tcb *stcb, struct sctp_association *asoc,
697	         int holds_lock);
698	void (*sctp_ss_clear) (struct sctp_tcb *stcb, struct sctp_association *asoc,
699	         int clear_values, int holds_lock);
700	void (*sctp_ss_init_stream) (struct sctp_stream_out *strq, struct sctp_stream_out *with_strq);
701	void (*sctp_ss_add_to_stream) (struct sctp_tcb *stcb, struct sctp_association *asoc,
702	         struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp, int holds_lock);
703	int (*sctp_ss_is_empty) (struct sctp_tcb *stcb, struct sctp_association *asoc);
704	void (*sctp_ss_remove_from_stream) (struct sctp_tcb *stcb, struct sctp_association *asoc,
705	         struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp, int holds_lock);
706	struct sctp_stream_out *(*sctp_ss_select_stream) (struct sctp_tcb *stcb,
707	                    struct sctp_nets *net, struct sctp_association *asoc);
708	void (*sctp_ss_scheduled) (struct sctp_tcb *stcb, struct sctp_nets *net,
709	         struct sctp_association *asoc, struct sctp_stream_out *strq, int moved_how_much);
710	void (*sctp_ss_packet_done) (struct sctp_tcb *stcb, struct sctp_nets *net,
711	         struct sctp_association *asoc);
712	int (*sctp_ss_get_value) (struct sctp_tcb *stcb, struct sctp_association *asoc,
713	        struct sctp_stream_out *strq, uint16_t * value);
714	int (*sctp_ss_set_value) (struct sctp_tcb *stcb, struct sctp_association *asoc,
715	        struct sctp_stream_out *strq, uint16_t value);
716};
717
718/* used to save ASCONF chunks for retransmission */
719TAILQ_HEAD(sctp_asconf_head, sctp_asconf);
720struct sctp_asconf {
721	TAILQ_ENTRY(sctp_asconf) next;
722	uint32_t serial_number;
723	uint16_t snd_count;
724	struct mbuf *data;
725	uint16_t len;
726};
727
728/* used to save ASCONF-ACK chunks for retransmission */
729TAILQ_HEAD(sctp_asconf_ackhead, sctp_asconf_ack);
730struct sctp_asconf_ack {
731	TAILQ_ENTRY(sctp_asconf_ack) next;
732	uint32_t serial_number;
733	struct sctp_nets *last_sent_to;
734	struct mbuf *data;
735	uint16_t len;
736};
737
738/*
739 * Here we have information about each individual association that we track.
740 * We probably in production would be more dynamic. But for ease of
741 * implementation we will have a fixed array that we hunt for in a linear
742 * fashion.
743 */
744struct sctp_association {
745	/* association state */
746	int state;
747
748	/* queue of pending addrs to add/delete */
749	struct sctp_asconf_addrhead asconf_queue;
750
751	struct timeval time_entered;	/* time we entered state */
752	struct timeval time_last_rcvd;
753	struct timeval time_last_sent;
754	struct timeval time_last_sat_advance;
755	struct sctp_nonpad_sndrcvinfo def_send;
756
757	/* timers and such */
758	struct sctp_timer dack_timer;	/* Delayed ack timer */
759	struct sctp_timer asconf_timer;	/* asconf */
760	struct sctp_timer strreset_timer;	/* stream reset */
761	struct sctp_timer shut_guard_timer;	/* shutdown guard */
762	struct sctp_timer autoclose_timer;	/* automatic close timer */
763	struct sctp_timer delayed_event_timer;	/* timer for delayed events */
764	struct sctp_timer delete_prim_timer;	/* deleting primary dst */
765
766	/* list of restricted local addresses */
767	struct sctpladdr sctp_restricted_addrs;
768
769	/* last local address pending deletion (waiting for an address add) */
770	struct sctp_ifa *asconf_addr_del_pending;
771	/* Deleted primary destination (used to stop timer) */
772	struct sctp_nets *deleted_primary;
773
774	struct sctpnetlisthead nets;	/* remote address list */
775
776	/* Free chunk list */
777	struct sctpchunk_listhead free_chunks;
778
779	/* Control chunk queue */
780	struct sctpchunk_listhead control_send_queue;
781
782	/* ASCONF chunk queue */
783	struct sctpchunk_listhead asconf_send_queue;
784
785	/*
786	 * Once a TSN hits the wire it is moved to the sent_queue. We
787	 * maintain two counts here (don't know if any but retran_cnt is
788	 * needed). The idea is that the sent_queue_retran_cnt reflects how
789	 * many chunks have been marked for retranmission by either T3-rxt
790	 * or FR.
791	 */
792	struct sctpchunk_listhead sent_queue;
793	struct sctpchunk_listhead send_queue;
794
795	/* re-assembly queue for fragmented chunks on the inbound path */
796	struct sctpchunk_listhead reasmqueue;
797
798	/* Scheduling queues */
799	union scheduling_data ss_data;
800
801	/*
802	 * This pointer will be set to NULL most of the time. But when we
803	 * have a fragmented message, where we could not get out all of the
804	 * message at the last send then this will point to the stream to go
805	 * get data from.
806	 */
807	struct sctp_stream_out *locked_on_sending;
808
809	/* If an iterator is looking at me, this is it */
810	struct sctp_iterator *stcb_starting_point_for_iterator;
811
812	/* ASCONF save the last ASCONF-ACK so we can resend it if necessary */
813	struct sctp_asconf_ackhead asconf_ack_sent;
814
815	/*
816	 * pointer to last stream reset queued to control queue by us with
817	 * requests.
818	 */
819	struct sctp_tmit_chunk *str_reset;
820	/*
821	 * if Source Address Selection happening, this will rotate through
822	 * the link list.
823	 */
824	struct sctp_laddr *last_used_address;
825
826	/* stream arrays */
827	struct sctp_stream_in *strmin;
828	struct sctp_stream_out *strmout;
829	uint8_t *mapping_array;
830	/* primary destination to use */
831	struct sctp_nets *primary_destination;
832	struct sctp_nets *alternate;	/* If primary is down or PF */
833	/* For CMT */
834	struct sctp_nets *last_net_cmt_send_started;
835	/* last place I got a data chunk from */
836	struct sctp_nets *last_data_chunk_from;
837	/* last place I got a control from */
838	struct sctp_nets *last_control_chunk_from;
839
840	/* circular looking for output selection */
841	struct sctp_stream_out *last_out_stream;
842
843	/*
844	 * wait to the point the cum-ack passes req->send_reset_at_tsn for
845	 * any req on the list.
846	 */
847	struct sctp_resethead resetHead;
848
849	/* queue of chunks waiting to be sent into the local stack */
850	struct sctp_readhead pending_reply_queue;
851
852	/* JRS - the congestion control functions are in this struct */
853	struct sctp_cc_functions cc_functions;
854	/*
855	 * JRS - value to store the currently loaded congestion control
856	 * module
857	 */
858	uint32_t congestion_control_module;
859	/* RS - the stream scheduling functions are in this struct */
860	struct sctp_ss_functions ss_functions;
861	/* RS - value to store the currently loaded stream scheduling module */
862	uint32_t stream_scheduling_module;
863
864	uint32_t vrf_id;
865
866	uint32_t cookie_preserve_req;
867	/* ASCONF next seq I am sending out, inits at init-tsn */
868	uint32_t asconf_seq_out;
869	uint32_t asconf_seq_out_acked;
870	/* ASCONF last received ASCONF from peer, starts at peer's TSN-1 */
871	uint32_t asconf_seq_in;
872
873	/* next seq I am sending in str reset messages */
874	uint32_t str_reset_seq_out;
875	/* next seq I am expecting in str reset messages */
876	uint32_t str_reset_seq_in;
877
878	/* various verification tag information */
879	uint32_t my_vtag;	/* The tag to be used. if assoc is re-initited
880				 * by remote end, and I have unlocked this
881				 * will be regenerated to a new random value. */
882	uint32_t peer_vtag;	/* The peers last tag */
883
884	uint32_t my_vtag_nonce;
885	uint32_t peer_vtag_nonce;
886
887	uint32_t assoc_id;
888
889	/* This is the SCTP fragmentation threshold */
890	uint32_t smallest_mtu;
891
892	/*
893	 * Special hook for Fast retransmit, allows us to track the highest
894	 * TSN that is NEW in this SACK if gap ack blocks are present.
895	 */
896	uint32_t this_sack_highest_gap;
897
898	/*
899	 * The highest consecutive TSN that has been acked by peer on my
900	 * sends
901	 */
902	uint32_t last_acked_seq;
903
904	/* The next TSN that I will use in sending. */
905	uint32_t sending_seq;
906
907	/* Original seq number I used ??questionable to keep?? */
908	uint32_t init_seq_number;
909
910
911	/* The Advanced Peer Ack Point, as required by the PR-SCTP */
912	/* (A1 in Section 4.2) */
913	uint32_t advanced_peer_ack_point;
914
915	/*
916	 * The highest consequetive TSN at the bottom of the mapping array
917	 * (for his sends).
918	 */
919	uint32_t cumulative_tsn;
920	/*
921	 * Used to track the mapping array and its offset bits. This MAY be
922	 * lower then cumulative_tsn.
923	 */
924	uint32_t mapping_array_base_tsn;
925	/*
926	 * used to track highest TSN we have received and is listed in the
927	 * mapping array.
928	 */
929	uint32_t highest_tsn_inside_map;
930
931	/* EY - new NR variables used for nr_sack based on mapping_array */
932	uint8_t *nr_mapping_array;
933	uint32_t highest_tsn_inside_nr_map;
934
935	uint32_t fast_recovery_tsn;
936	uint32_t sat_t3_recovery_tsn;
937	uint32_t tsn_last_delivered;
938	/*
939	 * For the pd-api we should re-write this a bit more efficent. We
940	 * could have multiple sctp_queued_to_read's that we are building at
941	 * once. Now we only do this when we get ready to deliver to the
942	 * socket buffer. Note that we depend on the fact that the struct is
943	 * "stuck" on the read queue until we finish all the pd-api.
944	 */
945	struct sctp_queued_to_read *control_pdapi;
946
947	uint32_t tsn_of_pdapi_last_delivered;
948	uint32_t pdapi_ppid;
949	uint32_t context;
950	uint32_t last_reset_action[SCTP_MAX_RESET_PARAMS];
951	uint32_t last_sending_seq[SCTP_MAX_RESET_PARAMS];
952	uint32_t last_base_tsnsent[SCTP_MAX_RESET_PARAMS];
953#ifdef SCTP_ASOCLOG_OF_TSNS
954	/*
955	 * special log  - This adds considerable size to the asoc, but
956	 * provides a log that you can use to detect problems via kgdb.
957	 */
958	struct sctp_tsn_log in_tsnlog[SCTP_TSN_LOG_SIZE];
959	struct sctp_tsn_log out_tsnlog[SCTP_TSN_LOG_SIZE];
960	uint32_t cumack_log[SCTP_TSN_LOG_SIZE];
961	uint32_t cumack_logsnt[SCTP_TSN_LOG_SIZE];
962	uint16_t tsn_in_at;
963	uint16_t tsn_out_at;
964	uint16_t tsn_in_wrapped;
965	uint16_t tsn_out_wrapped;
966	uint16_t cumack_log_at;
967	uint16_t cumack_log_atsnt;
968#endif				/* SCTP_ASOCLOG_OF_TSNS */
969#ifdef SCTP_FS_SPEC_LOG
970	struct sctp_fs_spec_log fslog[SCTP_FS_SPEC_LOG_SIZE];
971	uint16_t fs_index;
972#endif
973
974	/*
975	 * window state information and smallest MTU that I use to bound
976	 * segmentation
977	 */
978	uint32_t peers_rwnd;
979	uint32_t my_rwnd;
980	uint32_t my_last_reported_rwnd;
981	uint32_t sctp_frag_point;
982
983	uint32_t total_output_queue_size;
984
985	uint32_t sb_cc;		/* shadow of sb_cc */
986	uint32_t sb_send_resv;	/* amount reserved on a send */
987	uint32_t my_rwnd_control_len;	/* shadow of sb_mbcnt used for rwnd
988					 * control */
989#ifdef INET6
990	uint32_t default_flowlabel;
991#endif
992	uint32_t pr_sctp_cnt;
993	int ctrl_queue_cnt;	/* could be removed  REM - NO IT CAN'T!! RRS */
994	/*
995	 * All outbound datagrams queue into this list from the individual
996	 * stream queue. Here they get assigned a TSN and then await
997	 * sending. The stream seq comes when it is first put in the
998	 * individual str queue
999	 */
1000	unsigned int stream_queue_cnt;
1001	unsigned int send_queue_cnt;
1002	unsigned int sent_queue_cnt;
1003	unsigned int sent_queue_cnt_removeable;
1004	/*
1005	 * Number on sent queue that are marked for retran until this value
1006	 * is 0 we only send one packet of retran'ed data.
1007	 */
1008	unsigned int sent_queue_retran_cnt;
1009
1010	unsigned int size_on_reasm_queue;
1011	unsigned int cnt_on_reasm_queue;
1012	unsigned int fwd_tsn_cnt;
1013	/* amount of data (bytes) currently in flight (on all destinations) */
1014	unsigned int total_flight;
1015	/* Total book size in flight */
1016	unsigned int total_flight_count;	/* count of chunks used with
1017						 * book total */
1018	/* count of destinaton nets and list of destination nets */
1019	unsigned int numnets;
1020
1021	/* Total error count on this association */
1022	unsigned int overall_error_count;
1023
1024	unsigned int cnt_msg_on_sb;
1025
1026	/* All stream count of chunks for delivery */
1027	unsigned int size_on_all_streams;
1028	unsigned int cnt_on_all_streams;
1029
1030	/* Heart Beat delay in ms */
1031	uint32_t heart_beat_delay;
1032
1033	/* autoclose */
1034	unsigned int sctp_autoclose_ticks;
1035
1036	/* how many preopen streams we have */
1037	unsigned int pre_open_streams;
1038
1039	/* How many streams I support coming into me */
1040	unsigned int max_inbound_streams;
1041
1042	/* the cookie life I award for any cookie, in seconds */
1043	unsigned int cookie_life;
1044	/* time to delay acks for */
1045	unsigned int delayed_ack;
1046	unsigned int old_delayed_ack;
1047	unsigned int sack_freq;
1048	unsigned int data_pkts_seen;
1049
1050	unsigned int numduptsns;
1051	int dup_tsns[SCTP_MAX_DUP_TSNS];
1052	unsigned int initial_init_rto_max;	/* initial RTO for INIT's */
1053	unsigned int initial_rto;	/* initial send RTO */
1054	unsigned int minrto;	/* per assoc RTO-MIN */
1055	unsigned int maxrto;	/* per assoc RTO-MAX */
1056
1057	/* authentication fields */
1058	sctp_auth_chklist_t *local_auth_chunks;
1059	sctp_auth_chklist_t *peer_auth_chunks;
1060	sctp_hmaclist_t *local_hmacs;	/* local HMACs supported */
1061	sctp_hmaclist_t *peer_hmacs;	/* peer HMACs supported */
1062	struct sctp_keyhead shared_keys;	/* assoc's shared keys */
1063	sctp_authinfo_t authinfo;	/* randoms, cached keys */
1064	/*
1065	 * refcnt to block freeing when a sender or receiver is off coping
1066	 * user data in.
1067	 */
1068	uint32_t refcnt;
1069	uint32_t chunks_on_out_queue;	/* total chunks floating around,
1070					 * locked by send socket buffer */
1071	uint32_t peers_adaptation;
1072	uint16_t peer_hmac_id;	/* peer HMAC id to send */
1073
1074	/*
1075	 * Being that we have no bag to collect stale cookies, and that we
1076	 * really would not want to anyway.. we will count them in this
1077	 * counter. We of course feed them to the pigeons right away (I have
1078	 * always thought of pigeons as flying rats).
1079	 */
1080	uint16_t stale_cookie_count;
1081
1082	/*
1083	 * For the partial delivery API, if up, invoked this is what last
1084	 * TSN I delivered
1085	 */
1086	uint16_t str_of_pdapi;
1087	uint16_t ssn_of_pdapi;
1088
1089	/* counts of actual built streams. Allocation may be more however */
1090	/* could re-arrange to optimize space here. */
1091	uint16_t streamincnt;
1092	uint16_t streamoutcnt;
1093	uint16_t strm_realoutsize;
1094	uint16_t strm_pending_add_size;
1095	/* my maximum number of retrans of INIT and SEND */
1096	/* copied from SCTP but should be individually setable */
1097	uint16_t max_init_times;
1098	uint16_t max_send_times;
1099
1100	uint16_t def_net_failure;
1101
1102	uint16_t def_net_pf_threshold;
1103
1104	/*
1105	 * lock flag: 0 is ok to send, 1+ (duals as a retran count) is
1106	 * awaiting ACK
1107	 */
1108	uint16_t mapping_array_size;
1109
1110	uint16_t last_strm_seq_delivered;
1111	uint16_t last_strm_no_delivered;
1112
1113	uint16_t last_revoke_count;
1114	int16_t num_send_timers_up;
1115
1116	uint16_t stream_locked_on;
1117	uint16_t ecn_echo_cnt_onq;
1118
1119	uint16_t free_chunk_cnt;
1120	uint8_t stream_locked;
1121	uint8_t authenticated;	/* packet authenticated ok */
1122	/*
1123	 * This flag indicates that a SACK need to be sent. Initially this
1124	 * is 1 to send the first sACK immediately.
1125	 */
1126	uint8_t send_sack;
1127
1128	/* max burst of new packets into the network */
1129	uint32_t max_burst;
1130	/* max burst of fast retransmit packets */
1131	uint32_t fr_max_burst;
1132
1133	uint8_t sat_network;	/* RTT is in range of sat net or greater */
1134	uint8_t sat_network_lockout;	/* lockout code */
1135	uint8_t burst_limit_applied;	/* Burst limit in effect at last send? */
1136	/* flag goes on when we are doing a partial delivery api */
1137	uint8_t hb_random_values[4];
1138	uint8_t fragmented_delivery_inprogress;
1139	uint8_t fragment_flags;
1140	uint8_t last_flags_delivered;
1141	uint8_t hb_ect_randombit;
1142	uint8_t hb_random_idx;
1143	uint8_t default_dscp;
1144	uint8_t asconf_del_pending;	/* asconf delete last addr pending */
1145
1146	/*
1147	 * This value, plus all other ack'd but above cum-ack is added
1148	 * together to cross check against the bit that we have yet to
1149	 * define (probably in the SACK). When the cum-ack is updated, this
1150	 * sum is updated as well.
1151	 */
1152
1153	/* Flag to tell if ECN is allowed */
1154	uint8_t ecn_allowed;
1155
1156	/* Did the peer make the stream config (add out) request */
1157	uint8_t peer_req_out;
1158
1159	/* flag to indicate if peer can do asconf */
1160	uint8_t peer_supports_asconf;
1161	/* EY - flag to indicate if peer can do nr_sack */
1162	uint8_t peer_supports_nr_sack;
1163	/* pr-sctp support flag */
1164	uint8_t peer_supports_prsctp;
1165	/* peer authentication support flag */
1166	uint8_t peer_supports_auth;
1167	/* stream resets are supported by the peer */
1168	uint8_t peer_supports_strreset;
1169	uint8_t local_strreset_support;
1170
1171	uint8_t peer_supports_nat;
1172	/*
1173	 * packet drop's are supported by the peer, we don't really care
1174	 * about this but we bookkeep it anyway.
1175	 */
1176	uint8_t peer_supports_pktdrop;
1177
1178	struct sctp_scoping scope;
1179	/* flags to handle send alternate net tracking */
1180	uint8_t used_alt_onsack;
1181	uint8_t used_alt_asconfack;
1182	uint8_t fast_retran_loss_recovery;
1183	uint8_t sat_t3_loss_recovery;
1184	uint8_t dropped_special_cnt;
1185	uint8_t seen_a_sack_this_pkt;
1186	uint8_t stream_reset_outstanding;
1187	uint8_t stream_reset_out_is_outstanding;
1188	uint8_t delayed_connection;
1189	uint8_t ifp_had_enobuf;
1190	uint8_t saw_sack_with_frags;
1191	uint8_t saw_sack_with_nr_frags;
1192	uint8_t in_asocid_hash;
1193	uint8_t assoc_up_sent;
1194	uint8_t adaptation_needed;
1195	uint8_t adaptation_sent;
1196	/* CMT variables */
1197	uint8_t cmt_dac_pkts_rcvd;
1198	uint8_t sctp_cmt_on_off;
1199	uint8_t iam_blocking;
1200	uint8_t cookie_how[8];
1201	/* EY 05/05/08 - NR_SACK variable */
1202	uint8_t sctp_nr_sack_on_off;
1203	/* JRS 5/21/07 - CMT PF variable */
1204	uint8_t sctp_cmt_pf;
1205	uint8_t use_precise_time;
1206	uint64_t sctp_features;
1207	uint16_t port;		/* remote UDP encapsulation port */
1208	/*
1209	 * The mapping array is used to track out of order sequences above
1210	 * last_acked_seq. 0 indicates packet missing 1 indicates packet
1211	 * rec'd. We slide it up every time we raise last_acked_seq and 0
1212	 * trailing locactions out.  If I get a TSN above the array
1213	 * mappingArraySz, I discard the datagram and let retransmit happen.
1214	 */
1215	uint32_t marked_retrans;
1216	uint32_t timoinit;
1217	uint32_t timodata;
1218	uint32_t timosack;
1219	uint32_t timoshutdown;
1220	uint32_t timoheartbeat;
1221	uint32_t timocookie;
1222	uint32_t timoshutdownack;
1223	struct timeval start_time;
1224	struct timeval discontinuity_time;
1225};
1226
1227#endif
1228