1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2010-2012, by Michael Tuexen. All rights reserved.
5 * Copyright (c) 2010-2012, by Randall Stewart. All rights reserved.
6 * Copyright (c) 2010-2012, by Robin Seggelmann. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * a) Redistributions of source code must retain the above copyright notice,
12 *    this list of conditions and the following disclaimer.
13 *
14 * b) Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in
16 *    the documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <netinet/sctp_os.h>
32#include <netinet/sctp_pcb.h>
33
34/*
35 * Default simple round-robin algorithm.
36 * Just iterates the streams in the order they appear.
37 */
38
39static void
40sctp_ss_default_add(struct sctp_tcb *, struct sctp_association *,
41    struct sctp_stream_out *,
42    struct sctp_stream_queue_pending *);
43
44static void
45sctp_ss_default_remove(struct sctp_tcb *, struct sctp_association *,
46    struct sctp_stream_out *,
47    struct sctp_stream_queue_pending *);
48
49static void
50sctp_ss_default_init(struct sctp_tcb *stcb, struct sctp_association *asoc)
51{
52	uint16_t i;
53
54	SCTP_TCB_LOCK_ASSERT(stcb);
55
56	asoc->ss_data.locked_on_sending = NULL;
57	asoc->ss_data.last_out_stream = NULL;
58	TAILQ_INIT(&asoc->ss_data.out.wheel);
59	/*
60	 * If there is data in the stream queues already, the scheduler of
61	 * an existing association has been changed. We need to add all
62	 * stream queues to the wheel.
63	 */
64	for (i = 0; i < asoc->streamoutcnt; i++) {
65		stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, asoc,
66		    &asoc->strmout[i],
67		    NULL);
68	}
69	return;
70}
71
72static void
73sctp_ss_default_clear(struct sctp_tcb *stcb, struct sctp_association *asoc,
74    bool clear_values SCTP_UNUSED)
75{
76	SCTP_TCB_LOCK_ASSERT(stcb);
77
78	while (!TAILQ_EMPTY(&asoc->ss_data.out.wheel)) {
79		struct sctp_stream_out *strq;
80
81		strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
82		KASSERT(strq->ss_params.scheduled, ("strq %p not scheduled", (void *)strq));
83		TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.ss.rr.next_spoke);
84		strq->ss_params.scheduled = false;
85	}
86	asoc->ss_data.last_out_stream = NULL;
87	return;
88}
89
90static void
91sctp_ss_default_init_stream(struct sctp_tcb *stcb, struct sctp_stream_out *strq, struct sctp_stream_out *with_strq)
92{
93	SCTP_TCB_LOCK_ASSERT(stcb);
94
95	if (with_strq != NULL) {
96		if (stcb->asoc.ss_data.locked_on_sending == with_strq) {
97			stcb->asoc.ss_data.locked_on_sending = strq;
98		}
99		if (stcb->asoc.ss_data.last_out_stream == with_strq) {
100			stcb->asoc.ss_data.last_out_stream = strq;
101		}
102	}
103	strq->ss_params.scheduled = false;
104	return;
105}
106
107static void
108sctp_ss_default_add(struct sctp_tcb *stcb, struct sctp_association *asoc,
109    struct sctp_stream_out *strq,
110    struct sctp_stream_queue_pending *sp SCTP_UNUSED)
111{
112	SCTP_TCB_LOCK_ASSERT(stcb);
113
114	/* Add to wheel if not already on it and stream queue not empty */
115	if (!TAILQ_EMPTY(&strq->outqueue) && !strq->ss_params.scheduled) {
116		TAILQ_INSERT_TAIL(&asoc->ss_data.out.wheel,
117		    strq, ss_params.ss.rr.next_spoke);
118		strq->ss_params.scheduled = true;
119	}
120	return;
121}
122
123static bool
124sctp_ss_default_is_empty(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc)
125{
126	SCTP_TCB_LOCK_ASSERT(stcb);
127
128	return (TAILQ_EMPTY(&asoc->ss_data.out.wheel));
129}
130
131static void
132sctp_ss_default_remove(struct sctp_tcb *stcb, struct sctp_association *asoc,
133    struct sctp_stream_out *strq,
134    struct sctp_stream_queue_pending *sp SCTP_UNUSED)
135{
136	SCTP_TCB_LOCK_ASSERT(stcb);
137
138	/*
139	 * Remove from wheel if stream queue is empty and actually is on the
140	 * wheel
141	 */
142	if (TAILQ_EMPTY(&strq->outqueue) && strq->ss_params.scheduled) {
143		if (asoc->ss_data.last_out_stream == strq) {
144			asoc->ss_data.last_out_stream = TAILQ_PREV(asoc->ss_data.last_out_stream,
145			    sctpwheel_listhead,
146			    ss_params.ss.rr.next_spoke);
147			if (asoc->ss_data.last_out_stream == NULL) {
148				asoc->ss_data.last_out_stream = TAILQ_LAST(&asoc->ss_data.out.wheel,
149				    sctpwheel_listhead);
150			}
151			if (asoc->ss_data.last_out_stream == strq) {
152				asoc->ss_data.last_out_stream = NULL;
153			}
154		}
155		if (asoc->ss_data.locked_on_sending == strq) {
156			asoc->ss_data.locked_on_sending = NULL;
157		}
158		TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.ss.rr.next_spoke);
159		strq->ss_params.scheduled = false;
160	}
161	return;
162}
163
164static struct sctp_stream_out *
165sctp_ss_default_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net,
166    struct sctp_association *asoc)
167{
168	struct sctp_stream_out *strq, *strqt;
169
170	SCTP_TCB_LOCK_ASSERT(stcb);
171
172	if (asoc->ss_data.locked_on_sending != NULL) {
173		KASSERT(asoc->ss_data.locked_on_sending->ss_params.scheduled,
174		    ("locked_on_sending %p not scheduled",
175		    (void *)asoc->ss_data.locked_on_sending));
176		return (asoc->ss_data.locked_on_sending);
177	}
178	strqt = asoc->ss_data.last_out_stream;
179	KASSERT(strqt == NULL || strqt->ss_params.scheduled,
180	    ("last_out_stream %p not scheduled", (void *)strqt));
181default_again:
182	/* Find the next stream to use */
183	if (strqt == NULL) {
184		strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
185	} else {
186		strq = TAILQ_NEXT(strqt, ss_params.ss.rr.next_spoke);
187		if (strq == NULL) {
188			strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
189		}
190	}
191	KASSERT(strq == NULL || strq->ss_params.scheduled,
192	    ("strq %p not scheduled", (void *)strq));
193
194	/*
195	 * If CMT is off, we must validate that the stream in question has
196	 * the first item pointed towards are network destination requested
197	 * by the caller. Note that if we turn out to be locked to a stream
198	 * (assigning TSN's then we must stop, since we cannot look for
199	 * another stream with data to send to that destination). In CMT's
200	 * case, by skipping this check, we will send one data packet
201	 * towards the requested net.
202	 */
203	if (net != NULL && strq != NULL &&
204	    SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) {
205		if (TAILQ_FIRST(&strq->outqueue) &&
206		    TAILQ_FIRST(&strq->outqueue)->net != NULL &&
207		    TAILQ_FIRST(&strq->outqueue)->net != net) {
208			if (strq == asoc->ss_data.last_out_stream) {
209				return (NULL);
210			} else {
211				strqt = strq;
212				goto default_again;
213			}
214		}
215	}
216	return (strq);
217}
218
219static void
220sctp_ss_default_scheduled(struct sctp_tcb *stcb,
221    struct sctp_nets *net SCTP_UNUSED,
222    struct sctp_association *asoc,
223    struct sctp_stream_out *strq,
224    int moved_how_much SCTP_UNUSED)
225{
226	struct sctp_stream_queue_pending *sp;
227
228	KASSERT(strq != NULL, ("strq is NULL"));
229	KASSERT(strq->ss_params.scheduled, ("strq %p is not scheduled", (void *)strq));
230	SCTP_TCB_LOCK_ASSERT(stcb);
231
232	asoc->ss_data.last_out_stream = strq;
233	if (asoc->idata_supported == 0) {
234		sp = TAILQ_FIRST(&strq->outqueue);
235		if ((sp != NULL) && (sp->some_taken == 1)) {
236			asoc->ss_data.locked_on_sending = strq;
237		} else {
238			asoc->ss_data.locked_on_sending = NULL;
239		}
240	} else {
241		asoc->ss_data.locked_on_sending = NULL;
242	}
243	return;
244}
245
246static void
247sctp_ss_default_packet_done(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net SCTP_UNUSED,
248    struct sctp_association *asoc SCTP_UNUSED)
249{
250	SCTP_TCB_LOCK_ASSERT(stcb);
251
252	/* Nothing to be done here */
253	return;
254}
255
256static int
257sctp_ss_default_get_value(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc SCTP_UNUSED,
258    struct sctp_stream_out *strq SCTP_UNUSED, uint16_t *value SCTP_UNUSED)
259{
260	SCTP_TCB_LOCK_ASSERT(stcb);
261
262	/* Nothing to be done here */
263	return (-1);
264}
265
266static int
267sctp_ss_default_set_value(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc SCTP_UNUSED,
268    struct sctp_stream_out *strq SCTP_UNUSED, uint16_t value SCTP_UNUSED)
269{
270	SCTP_TCB_LOCK_ASSERT(stcb);
271
272	/* Nothing to be done here */
273	return (-1);
274}
275
276static bool
277sctp_ss_default_is_user_msgs_incomplete(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc)
278{
279	struct sctp_stream_out *strq;
280	struct sctp_stream_queue_pending *sp;
281
282	SCTP_TCB_LOCK_ASSERT(stcb);
283
284	if (asoc->stream_queue_cnt != 1) {
285		return (false);
286	}
287	strq = asoc->ss_data.locked_on_sending;
288	if (strq == NULL) {
289		return (false);
290	}
291	sp = TAILQ_FIRST(&strq->outqueue);
292	if (sp == NULL) {
293		return (false);
294	}
295	return (sp->msg_is_complete == 0);
296}
297
298/*
299 * Real round-robin algorithm.
300 * Always iterates the streams in ascending order.
301 */
302static void
303sctp_ss_rr_add(struct sctp_tcb *stcb, struct sctp_association *asoc,
304    struct sctp_stream_out *strq,
305    struct sctp_stream_queue_pending *sp SCTP_UNUSED)
306{
307	struct sctp_stream_out *strqt;
308
309	SCTP_TCB_LOCK_ASSERT(stcb);
310
311	if (!TAILQ_EMPTY(&strq->outqueue) && !strq->ss_params.scheduled) {
312		if (TAILQ_EMPTY(&asoc->ss_data.out.wheel)) {
313			TAILQ_INSERT_HEAD(&asoc->ss_data.out.wheel, strq, ss_params.ss.rr.next_spoke);
314		} else {
315			strqt = TAILQ_FIRST(&asoc->ss_data.out.wheel);
316			while (strqt != NULL && (strqt->sid < strq->sid)) {
317				strqt = TAILQ_NEXT(strqt, ss_params.ss.rr.next_spoke);
318			}
319			if (strqt != NULL) {
320				TAILQ_INSERT_BEFORE(strqt, strq, ss_params.ss.rr.next_spoke);
321			} else {
322				TAILQ_INSERT_TAIL(&asoc->ss_data.out.wheel, strq, ss_params.ss.rr.next_spoke);
323			}
324		}
325		strq->ss_params.scheduled = true;
326	}
327	return;
328}
329
330/*
331 * Real round-robin per packet algorithm.
332 * Always iterates the streams in ascending order and
333 * only fills messages of the same stream in a packet.
334 */
335static struct sctp_stream_out *
336sctp_ss_rrp_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net SCTP_UNUSED,
337    struct sctp_association *asoc)
338{
339	SCTP_TCB_LOCK_ASSERT(stcb);
340
341	return (asoc->ss_data.last_out_stream);
342}
343
344static void
345sctp_ss_rrp_packet_done(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net,
346    struct sctp_association *asoc)
347{
348	struct sctp_stream_out *strq, *strqt;
349
350	SCTP_TCB_LOCK_ASSERT(stcb);
351
352	strqt = asoc->ss_data.last_out_stream;
353	KASSERT(strqt == NULL || strqt->ss_params.scheduled,
354	    ("last_out_stream %p not scheduled", (void *)strqt));
355rrp_again:
356	/* Find the next stream to use */
357	if (strqt == NULL) {
358		strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
359	} else {
360		strq = TAILQ_NEXT(strqt, ss_params.ss.rr.next_spoke);
361		if (strq == NULL) {
362			strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
363		}
364	}
365	KASSERT(strq == NULL || strq->ss_params.scheduled,
366	    ("strq %p not scheduled", (void *)strq));
367
368	/*
369	 * If CMT is off, we must validate that the stream in question has
370	 * the first item pointed towards are network destination requested
371	 * by the caller. Note that if we turn out to be locked to a stream
372	 * (assigning TSN's then we must stop, since we cannot look for
373	 * another stream with data to send to that destination). In CMT's
374	 * case, by skipping this check, we will send one data packet
375	 * towards the requested net.
376	 */
377	if (net != NULL && strq != NULL &&
378	    SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) {
379		if (TAILQ_FIRST(&strq->outqueue) &&
380		    TAILQ_FIRST(&strq->outqueue)->net != NULL &&
381		    TAILQ_FIRST(&strq->outqueue)->net != net) {
382			if (strq == asoc->ss_data.last_out_stream) {
383				strq = NULL;
384			} else {
385				strqt = strq;
386				goto rrp_again;
387			}
388		}
389	}
390	asoc->ss_data.last_out_stream = strq;
391	return;
392}
393
394/*
395 * Priority algorithm.
396 * Always prefers streams based on their priority id.
397 */
398static void
399sctp_ss_prio_clear(struct sctp_tcb *stcb, struct sctp_association *asoc,
400    bool clear_values)
401{
402	SCTP_TCB_LOCK_ASSERT(stcb);
403
404	while (!TAILQ_EMPTY(&asoc->ss_data.out.wheel)) {
405		struct sctp_stream_out *strq;
406
407		strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
408		KASSERT(strq->ss_params.scheduled, ("strq %p not scheduled", (void *)strq));
409		if (clear_values) {
410			strq->ss_params.ss.prio.priority = 0;
411		}
412		TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.ss.prio.next_spoke);
413		strq->ss_params.scheduled = false;
414	}
415	asoc->ss_data.last_out_stream = NULL;
416	return;
417}
418
419static void
420sctp_ss_prio_init_stream(struct sctp_tcb *stcb, struct sctp_stream_out *strq, struct sctp_stream_out *with_strq)
421{
422	SCTP_TCB_LOCK_ASSERT(stcb);
423
424	if (with_strq != NULL) {
425		if (stcb->asoc.ss_data.locked_on_sending == with_strq) {
426			stcb->asoc.ss_data.locked_on_sending = strq;
427		}
428		if (stcb->asoc.ss_data.last_out_stream == with_strq) {
429			stcb->asoc.ss_data.last_out_stream = strq;
430		}
431	}
432	strq->ss_params.scheduled = false;
433	if (with_strq != NULL) {
434		strq->ss_params.ss.prio.priority = with_strq->ss_params.ss.prio.priority;
435	} else {
436		strq->ss_params.ss.prio.priority = 0;
437	}
438	return;
439}
440
441static void
442sctp_ss_prio_add(struct sctp_tcb *stcb, struct sctp_association *asoc,
443    struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp SCTP_UNUSED)
444{
445	struct sctp_stream_out *strqt;
446
447	SCTP_TCB_LOCK_ASSERT(stcb);
448
449	/* Add to wheel if not already on it and stream queue not empty */
450	if (!TAILQ_EMPTY(&strq->outqueue) && !strq->ss_params.scheduled) {
451		if (TAILQ_EMPTY(&asoc->ss_data.out.wheel)) {
452			TAILQ_INSERT_HEAD(&asoc->ss_data.out.wheel, strq, ss_params.ss.prio.next_spoke);
453		} else {
454			strqt = TAILQ_FIRST(&asoc->ss_data.out.wheel);
455			while (strqt != NULL && strqt->ss_params.ss.prio.priority < strq->ss_params.ss.prio.priority) {
456				strqt = TAILQ_NEXT(strqt, ss_params.ss.prio.next_spoke);
457			}
458			if (strqt != NULL) {
459				TAILQ_INSERT_BEFORE(strqt, strq, ss_params.ss.prio.next_spoke);
460			} else {
461				TAILQ_INSERT_TAIL(&asoc->ss_data.out.wheel, strq, ss_params.ss.prio.next_spoke);
462			}
463		}
464		strq->ss_params.scheduled = true;
465	}
466	return;
467}
468
469static void
470sctp_ss_prio_remove(struct sctp_tcb *stcb, struct sctp_association *asoc,
471    struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp SCTP_UNUSED)
472{
473	SCTP_TCB_LOCK_ASSERT(stcb);
474
475	/*
476	 * Remove from wheel if stream queue is empty and actually is on the
477	 * wheel
478	 */
479	if (TAILQ_EMPTY(&strq->outqueue) && strq->ss_params.scheduled) {
480		if (asoc->ss_data.last_out_stream == strq) {
481			asoc->ss_data.last_out_stream = TAILQ_PREV(asoc->ss_data.last_out_stream,
482			    sctpwheel_listhead,
483			    ss_params.ss.prio.next_spoke);
484			if (asoc->ss_data.last_out_stream == NULL) {
485				asoc->ss_data.last_out_stream = TAILQ_LAST(&asoc->ss_data.out.wheel,
486				    sctpwheel_listhead);
487			}
488			if (asoc->ss_data.last_out_stream == strq) {
489				asoc->ss_data.last_out_stream = NULL;
490			}
491		}
492		if (asoc->ss_data.locked_on_sending == strq) {
493			asoc->ss_data.locked_on_sending = NULL;
494		}
495		TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.ss.prio.next_spoke);
496		strq->ss_params.scheduled = false;
497	}
498	return;
499}
500
501static struct sctp_stream_out *
502sctp_ss_prio_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net,
503    struct sctp_association *asoc)
504{
505	struct sctp_stream_out *strq, *strqt, *strqn;
506
507	SCTP_TCB_LOCK_ASSERT(stcb);
508
509	if (asoc->ss_data.locked_on_sending != NULL) {
510		KASSERT(asoc->ss_data.locked_on_sending->ss_params.scheduled,
511		    ("locked_on_sending %p not scheduled",
512		    (void *)asoc->ss_data.locked_on_sending));
513		return (asoc->ss_data.locked_on_sending);
514	}
515	strqt = asoc->ss_data.last_out_stream;
516	KASSERT(strqt == NULL || strqt->ss_params.scheduled,
517	    ("last_out_stream %p not scheduled", (void *)strqt));
518prio_again:
519	/* Find the next stream to use */
520	if (strqt == NULL) {
521		strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
522	} else {
523		strqn = TAILQ_NEXT(strqt, ss_params.ss.prio.next_spoke);
524		if (strqn != NULL &&
525		    strqn->ss_params.ss.prio.priority == strqt->ss_params.ss.prio.priority) {
526			strq = strqn;
527		} else {
528			strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
529		}
530	}
531	KASSERT(strq == NULL || strq->ss_params.scheduled,
532	    ("strq %p not scheduled", (void *)strq));
533
534	/*
535	 * If CMT is off, we must validate that the stream in question has
536	 * the first item pointed towards are network destination requested
537	 * by the caller. Note that if we turn out to be locked to a stream
538	 * (assigning TSN's then we must stop, since we cannot look for
539	 * another stream with data to send to that destination). In CMT's
540	 * case, by skipping this check, we will send one data packet
541	 * towards the requested net.
542	 */
543	if (net != NULL && strq != NULL &&
544	    SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) {
545		if (TAILQ_FIRST(&strq->outqueue) &&
546		    TAILQ_FIRST(&strq->outqueue)->net != NULL &&
547		    TAILQ_FIRST(&strq->outqueue)->net != net) {
548			if (strq == asoc->ss_data.last_out_stream) {
549				return (NULL);
550			} else {
551				strqt = strq;
552				goto prio_again;
553			}
554		}
555	}
556	return (strq);
557}
558
559static int
560sctp_ss_prio_get_value(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc SCTP_UNUSED,
561    struct sctp_stream_out *strq, uint16_t *value)
562{
563	SCTP_TCB_LOCK_ASSERT(stcb);
564
565	if (strq == NULL) {
566		return (-1);
567	}
568	*value = strq->ss_params.ss.prio.priority;
569	return (1);
570}
571
572static int
573sctp_ss_prio_set_value(struct sctp_tcb *stcb, struct sctp_association *asoc,
574    struct sctp_stream_out *strq, uint16_t value)
575{
576	SCTP_TCB_LOCK_ASSERT(stcb);
577
578	if (strq == NULL) {
579		return (-1);
580	}
581	strq->ss_params.ss.prio.priority = value;
582	sctp_ss_prio_remove(stcb, asoc, strq, NULL);
583	sctp_ss_prio_add(stcb, asoc, strq, NULL);
584	return (1);
585}
586
587/*
588 * Fair bandwidth algorithm.
589 * Maintains an equal throughput per stream.
590 */
591static void
592sctp_ss_fb_clear(struct sctp_tcb *stcb, struct sctp_association *asoc,
593    bool clear_values)
594{
595	SCTP_TCB_LOCK_ASSERT(stcb);
596
597	while (!TAILQ_EMPTY(&asoc->ss_data.out.wheel)) {
598		struct sctp_stream_out *strq;
599
600		strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
601		KASSERT(strq->ss_params.scheduled, ("strq %p not scheduled", (void *)strq));
602		if (clear_values) {
603			strq->ss_params.ss.fb.rounds = -1;
604		}
605		TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.ss.fb.next_spoke);
606		strq->ss_params.scheduled = false;
607	}
608	asoc->ss_data.last_out_stream = NULL;
609	return;
610}
611
612static void
613sctp_ss_fb_init_stream(struct sctp_tcb *stcb, struct sctp_stream_out *strq, struct sctp_stream_out *with_strq)
614{
615	SCTP_TCB_LOCK_ASSERT(stcb);
616
617	if (with_strq != NULL) {
618		if (stcb->asoc.ss_data.locked_on_sending == with_strq) {
619			stcb->asoc.ss_data.locked_on_sending = strq;
620		}
621		if (stcb->asoc.ss_data.last_out_stream == with_strq) {
622			stcb->asoc.ss_data.last_out_stream = strq;
623		}
624	}
625	strq->ss_params.scheduled = false;
626	if (with_strq != NULL) {
627		strq->ss_params.ss.fb.rounds = with_strq->ss_params.ss.fb.rounds;
628	} else {
629		strq->ss_params.ss.fb.rounds = -1;
630	}
631	return;
632}
633
634static void
635sctp_ss_fb_add(struct sctp_tcb *stcb, struct sctp_association *asoc,
636    struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp SCTP_UNUSED)
637{
638	SCTP_TCB_LOCK_ASSERT(stcb);
639
640	if (!TAILQ_EMPTY(&strq->outqueue) && !strq->ss_params.scheduled) {
641		if (strq->ss_params.ss.fb.rounds < 0)
642			strq->ss_params.ss.fb.rounds = TAILQ_FIRST(&strq->outqueue)->length;
643		TAILQ_INSERT_TAIL(&asoc->ss_data.out.wheel, strq, ss_params.ss.fb.next_spoke);
644		strq->ss_params.scheduled = true;
645	}
646	return;
647}
648
649static void
650sctp_ss_fb_remove(struct sctp_tcb *stcb, struct sctp_association *asoc,
651    struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp SCTP_UNUSED)
652{
653	SCTP_TCB_LOCK_ASSERT(stcb);
654
655	/*
656	 * Remove from wheel if stream queue is empty and actually is on the
657	 * wheel
658	 */
659	if (TAILQ_EMPTY(&strq->outqueue) && strq->ss_params.scheduled) {
660		if (asoc->ss_data.last_out_stream == strq) {
661			asoc->ss_data.last_out_stream = TAILQ_PREV(asoc->ss_data.last_out_stream,
662			    sctpwheel_listhead,
663			    ss_params.ss.fb.next_spoke);
664			if (asoc->ss_data.last_out_stream == NULL) {
665				asoc->ss_data.last_out_stream = TAILQ_LAST(&asoc->ss_data.out.wheel,
666				    sctpwheel_listhead);
667			}
668			if (asoc->ss_data.last_out_stream == strq) {
669				asoc->ss_data.last_out_stream = NULL;
670			}
671		}
672		if (asoc->ss_data.locked_on_sending == strq) {
673			asoc->ss_data.locked_on_sending = NULL;
674		}
675		TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.ss.fb.next_spoke);
676		strq->ss_params.scheduled = false;
677	}
678	return;
679}
680
681static struct sctp_stream_out *
682sctp_ss_fb_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net,
683    struct sctp_association *asoc)
684{
685	struct sctp_stream_out *strq = NULL, *strqt;
686
687	SCTP_TCB_LOCK_ASSERT(stcb);
688
689	if (asoc->ss_data.locked_on_sending != NULL) {
690		KASSERT(asoc->ss_data.locked_on_sending->ss_params.scheduled,
691		    ("locked_on_sending %p not scheduled",
692		    (void *)asoc->ss_data.locked_on_sending));
693		return (asoc->ss_data.locked_on_sending);
694	}
695	if (asoc->ss_data.last_out_stream == NULL ||
696	    TAILQ_FIRST(&asoc->ss_data.out.wheel) == TAILQ_LAST(&asoc->ss_data.out.wheel, sctpwheel_listhead)) {
697		strqt = TAILQ_FIRST(&asoc->ss_data.out.wheel);
698	} else {
699		strqt = TAILQ_NEXT(asoc->ss_data.last_out_stream, ss_params.ss.fb.next_spoke);
700	}
701	do {
702		if ((strqt != NULL) &&
703		    ((SCTP_BASE_SYSCTL(sctp_cmt_on_off) > 0) ||
704		    (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0 &&
705		    (net == NULL || (TAILQ_FIRST(&strqt->outqueue) && TAILQ_FIRST(&strqt->outqueue)->net == NULL) ||
706		    (net != NULL && TAILQ_FIRST(&strqt->outqueue) && TAILQ_FIRST(&strqt->outqueue)->net != NULL &&
707		    TAILQ_FIRST(&strqt->outqueue)->net == net))))) {
708			if ((strqt->ss_params.ss.fb.rounds >= 0) &&
709			    ((strq == NULL) ||
710			    (strqt->ss_params.ss.fb.rounds < strq->ss_params.ss.fb.rounds))) {
711				strq = strqt;
712			}
713		}
714		if (strqt != NULL) {
715			strqt = TAILQ_NEXT(strqt, ss_params.ss.fb.next_spoke);
716		} else {
717			strqt = TAILQ_FIRST(&asoc->ss_data.out.wheel);
718		}
719	} while (strqt != strq);
720	return (strq);
721}
722
723static void
724sctp_ss_fb_scheduled(struct sctp_tcb *stcb, struct sctp_nets *net SCTP_UNUSED,
725    struct sctp_association *asoc, struct sctp_stream_out *strq,
726    int moved_how_much SCTP_UNUSED)
727{
728	struct sctp_stream_queue_pending *sp;
729	struct sctp_stream_out *strqt;
730	int subtract;
731
732	SCTP_TCB_LOCK_ASSERT(stcb);
733
734	if (asoc->idata_supported == 0) {
735		sp = TAILQ_FIRST(&strq->outqueue);
736		if ((sp != NULL) && (sp->some_taken == 1)) {
737			asoc->ss_data.locked_on_sending = strq;
738		} else {
739			asoc->ss_data.locked_on_sending = NULL;
740		}
741	} else {
742		asoc->ss_data.locked_on_sending = NULL;
743	}
744	subtract = strq->ss_params.ss.fb.rounds;
745	TAILQ_FOREACH(strqt, &asoc->ss_data.out.wheel, ss_params.ss.fb.next_spoke) {
746		strqt->ss_params.ss.fb.rounds -= subtract;
747		if (strqt->ss_params.ss.fb.rounds < 0)
748			strqt->ss_params.ss.fb.rounds = 0;
749	}
750	if (TAILQ_FIRST(&strq->outqueue)) {
751		strq->ss_params.ss.fb.rounds = TAILQ_FIRST(&strq->outqueue)->length;
752	} else {
753		strq->ss_params.ss.fb.rounds = -1;
754	}
755	asoc->ss_data.last_out_stream = strq;
756	return;
757}
758
759/*
760 * First-come, first-serve algorithm.
761 * Maintains the order provided by the application.
762 */
763static void
764sctp_ss_fcfs_add(struct sctp_tcb *stcb, struct sctp_association *asoc,
765    struct sctp_stream_out *strq SCTP_UNUSED,
766    struct sctp_stream_queue_pending *sp);
767
768static void
769sctp_ss_fcfs_init(struct sctp_tcb *stcb, struct sctp_association *asoc)
770{
771	uint32_t x, n = 0, add_more = 1;
772	struct sctp_stream_queue_pending *sp;
773	uint16_t i;
774
775	SCTP_TCB_LOCK_ASSERT(stcb);
776
777	TAILQ_INIT(&asoc->ss_data.out.list);
778	/*
779	 * If there is data in the stream queues already, the scheduler of
780	 * an existing association has been changed. We can only cycle
781	 * through the stream queues and add everything to the FCFS queue.
782	 */
783	while (add_more) {
784		add_more = 0;
785		for (i = 0; i < asoc->streamoutcnt; i++) {
786			sp = TAILQ_FIRST(&asoc->strmout[i].outqueue);
787			x = 0;
788			/* Find n. message in current stream queue */
789			while (sp != NULL && x < n) {
790				sp = TAILQ_NEXT(sp, next);
791				x++;
792			}
793			if (sp != NULL) {
794				sctp_ss_fcfs_add(stcb, asoc, &asoc->strmout[i], sp);
795				add_more = 1;
796			}
797		}
798		n++;
799	}
800	return;
801}
802
803static void
804sctp_ss_fcfs_clear(struct sctp_tcb *stcb, struct sctp_association *asoc,
805    bool clear_values SCTP_UNUSED)
806{
807	struct sctp_stream_queue_pending *sp;
808
809	SCTP_TCB_LOCK_ASSERT(stcb);
810
811	while (!TAILQ_EMPTY(&asoc->ss_data.out.list)) {
812		sp = TAILQ_FIRST(&asoc->ss_data.out.list);
813		KASSERT(sp->scheduled, ("sp %p not scheduled", (void *)sp));
814		TAILQ_REMOVE(&asoc->ss_data.out.list, sp, ss_next);
815		sp->scheduled = false;
816	}
817	asoc->ss_data.last_out_stream = NULL;
818	return;
819}
820
821static void
822sctp_ss_fcfs_init_stream(struct sctp_tcb *stcb, struct sctp_stream_out *strq, struct sctp_stream_out *with_strq)
823{
824	SCTP_TCB_LOCK_ASSERT(stcb);
825
826	if (with_strq != NULL) {
827		if (stcb->asoc.ss_data.locked_on_sending == with_strq) {
828			stcb->asoc.ss_data.locked_on_sending = strq;
829		}
830		if (stcb->asoc.ss_data.last_out_stream == with_strq) {
831			stcb->asoc.ss_data.last_out_stream = strq;
832		}
833	}
834	strq->ss_params.scheduled = false;
835	return;
836}
837
838static void
839sctp_ss_fcfs_add(struct sctp_tcb *stcb, struct sctp_association *asoc,
840    struct sctp_stream_out *strq SCTP_UNUSED, struct sctp_stream_queue_pending *sp)
841{
842	SCTP_TCB_LOCK_ASSERT(stcb);
843
844	if (!sp->scheduled) {
845		TAILQ_INSERT_TAIL(&asoc->ss_data.out.list, sp, ss_next);
846		sp->scheduled = true;
847	}
848	return;
849}
850
851static bool
852sctp_ss_fcfs_is_empty(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc)
853{
854	SCTP_TCB_LOCK_ASSERT(stcb);
855
856	return (TAILQ_EMPTY(&asoc->ss_data.out.list));
857}
858
859static void
860sctp_ss_fcfs_remove(struct sctp_tcb *stcb, struct sctp_association *asoc,
861    struct sctp_stream_out *strq SCTP_UNUSED, struct sctp_stream_queue_pending *sp)
862{
863	SCTP_TCB_LOCK_ASSERT(stcb);
864
865	if (sp->scheduled) {
866		TAILQ_REMOVE(&asoc->ss_data.out.list, sp, ss_next);
867		sp->scheduled = false;
868	}
869	return;
870}
871
872static struct sctp_stream_out *
873sctp_ss_fcfs_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net,
874    struct sctp_association *asoc)
875{
876	struct sctp_stream_out *strq;
877	struct sctp_stream_queue_pending *sp;
878
879	SCTP_TCB_LOCK_ASSERT(stcb);
880
881	if (asoc->ss_data.locked_on_sending) {
882		return (asoc->ss_data.locked_on_sending);
883	}
884	sp = TAILQ_FIRST(&asoc->ss_data.out.list);
885default_again:
886	if (sp != NULL) {
887		strq = &asoc->strmout[sp->sid];
888	} else {
889		strq = NULL;
890	}
891
892	/*
893	 * If CMT is off, we must validate that the stream in question has
894	 * the first item pointed towards are network destination requested
895	 * by the caller. Note that if we turn out to be locked to a stream
896	 * (assigning TSN's then we must stop, since we cannot look for
897	 * another stream with data to send to that destination). In CMT's
898	 * case, by skipping this check, we will send one data packet
899	 * towards the requested net.
900	 */
901	if (net != NULL && strq != NULL &&
902	    SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) {
903		if (TAILQ_FIRST(&strq->outqueue) &&
904		    TAILQ_FIRST(&strq->outqueue)->net != NULL &&
905		    TAILQ_FIRST(&strq->outqueue)->net != net) {
906			sp = TAILQ_NEXT(sp, ss_next);
907			goto default_again;
908		}
909	}
910	return (strq);
911}
912
913static void
914sctp_ss_fcfs_scheduled(struct sctp_tcb *stcb,
915    struct sctp_nets *net SCTP_UNUSED,
916    struct sctp_association *asoc,
917    struct sctp_stream_out *strq,
918    int moved_how_much SCTP_UNUSED)
919{
920	struct sctp_stream_queue_pending *sp;
921
922	KASSERT(strq != NULL, ("strq is NULL"));
923	asoc->ss_data.last_out_stream = strq;
924	if (asoc->idata_supported == 0) {
925		sp = TAILQ_FIRST(&strq->outqueue);
926		if ((sp != NULL) && (sp->some_taken == 1)) {
927			asoc->ss_data.locked_on_sending = strq;
928		} else {
929			asoc->ss_data.locked_on_sending = NULL;
930		}
931	} else {
932		asoc->ss_data.locked_on_sending = NULL;
933	}
934	return;
935}
936
937const struct sctp_ss_functions sctp_ss_functions[] = {
938/* SCTP_SS_DEFAULT */
939	{
940		.sctp_ss_init = sctp_ss_default_init,
941		.sctp_ss_clear = sctp_ss_default_clear,
942		.sctp_ss_init_stream = sctp_ss_default_init_stream,
943		.sctp_ss_add_to_stream = sctp_ss_default_add,
944		.sctp_ss_is_empty = sctp_ss_default_is_empty,
945		.sctp_ss_remove_from_stream = sctp_ss_default_remove,
946		.sctp_ss_select_stream = sctp_ss_default_select,
947		.sctp_ss_scheduled = sctp_ss_default_scheduled,
948		.sctp_ss_packet_done = sctp_ss_default_packet_done,
949		.sctp_ss_get_value = sctp_ss_default_get_value,
950		.sctp_ss_set_value = sctp_ss_default_set_value,
951		.sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete
952	},
953/* SCTP_SS_RR */
954	{
955		.sctp_ss_init = sctp_ss_default_init,
956		.sctp_ss_clear = sctp_ss_default_clear,
957		.sctp_ss_init_stream = sctp_ss_default_init_stream,
958		.sctp_ss_add_to_stream = sctp_ss_rr_add,
959		.sctp_ss_is_empty = sctp_ss_default_is_empty,
960		.sctp_ss_remove_from_stream = sctp_ss_default_remove,
961		.sctp_ss_select_stream = sctp_ss_default_select,
962		.sctp_ss_scheduled = sctp_ss_default_scheduled,
963		.sctp_ss_packet_done = sctp_ss_default_packet_done,
964		.sctp_ss_get_value = sctp_ss_default_get_value,
965		.sctp_ss_set_value = sctp_ss_default_set_value,
966		.sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete
967	},
968/* SCTP_SS_RR_PKT */
969	{
970		.sctp_ss_init = sctp_ss_default_init,
971		.sctp_ss_clear = sctp_ss_default_clear,
972		.sctp_ss_init_stream = sctp_ss_default_init_stream,
973		.sctp_ss_add_to_stream = sctp_ss_rr_add,
974		.sctp_ss_is_empty = sctp_ss_default_is_empty,
975		.sctp_ss_remove_from_stream = sctp_ss_default_remove,
976		.sctp_ss_select_stream = sctp_ss_rrp_select,
977		.sctp_ss_scheduled = sctp_ss_default_scheduled,
978		.sctp_ss_packet_done = sctp_ss_rrp_packet_done,
979		.sctp_ss_get_value = sctp_ss_default_get_value,
980		.sctp_ss_set_value = sctp_ss_default_set_value,
981		.sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete
982	},
983/* SCTP_SS_PRIO */
984	{
985		.sctp_ss_init = sctp_ss_default_init,
986		.sctp_ss_clear = sctp_ss_prio_clear,
987		.sctp_ss_init_stream = sctp_ss_prio_init_stream,
988		.sctp_ss_add_to_stream = sctp_ss_prio_add,
989		.sctp_ss_is_empty = sctp_ss_default_is_empty,
990		.sctp_ss_remove_from_stream = sctp_ss_prio_remove,
991		.sctp_ss_select_stream = sctp_ss_prio_select,
992		.sctp_ss_scheduled = sctp_ss_default_scheduled,
993		.sctp_ss_packet_done = sctp_ss_default_packet_done,
994		.sctp_ss_get_value = sctp_ss_prio_get_value,
995		.sctp_ss_set_value = sctp_ss_prio_set_value,
996		.sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete
997	},
998/* SCTP_SS_FB */
999	{
1000		.sctp_ss_init = sctp_ss_default_init,
1001		.sctp_ss_clear = sctp_ss_fb_clear,
1002		.sctp_ss_init_stream = sctp_ss_fb_init_stream,
1003		.sctp_ss_add_to_stream = sctp_ss_fb_add,
1004		.sctp_ss_is_empty = sctp_ss_default_is_empty,
1005		.sctp_ss_remove_from_stream = sctp_ss_fb_remove,
1006		.sctp_ss_select_stream = sctp_ss_fb_select,
1007		.sctp_ss_scheduled = sctp_ss_fb_scheduled,
1008		.sctp_ss_packet_done = sctp_ss_default_packet_done,
1009		.sctp_ss_get_value = sctp_ss_default_get_value,
1010		.sctp_ss_set_value = sctp_ss_default_set_value,
1011		.sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete
1012	},
1013/* SCTP_SS_FCFS */
1014	{
1015		.sctp_ss_init = sctp_ss_fcfs_init,
1016		.sctp_ss_clear = sctp_ss_fcfs_clear,
1017		.sctp_ss_init_stream = sctp_ss_fcfs_init_stream,
1018		.sctp_ss_add_to_stream = sctp_ss_fcfs_add,
1019		.sctp_ss_is_empty = sctp_ss_fcfs_is_empty,
1020		.sctp_ss_remove_from_stream = sctp_ss_fcfs_remove,
1021		.sctp_ss_select_stream = sctp_ss_fcfs_select,
1022		.sctp_ss_scheduled = sctp_ss_fcfs_scheduled,
1023		.sctp_ss_packet_done = sctp_ss_default_packet_done,
1024		.sctp_ss_get_value = sctp_ss_default_get_value,
1025		.sctp_ss_set_value = sctp_ss_default_set_value,
1026		.sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete
1027	}
1028};
1029