1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * a) Redistributions of source code must retain the above copyright notice,
12 *   this list of conditions and the following disclaimer.
13 *
14 * b) Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in
16 *   the documentation and/or other materials provided with the distribution.
17 *
18 * c) Neither the name of Cisco Systems, Inc. nor the names of its
19 *    contributors may be used to endorse or promote products derived
20 *    from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#ifndef _NETINET_SCTP_LOCK_BSD_H_
36#define _NETINET_SCTP_LOCK_BSD_H_
37
38/*
39 * General locking concepts: The goal of our locking is to of course provide
40 * consistency and yet minimize overhead. We will attempt to use
41 * non-recursive locks which are supposed to be quite inexpensive. Now in
42 * order to do this the goal is that most functions are not aware of locking.
43 * Once we have a TCB we lock it and unlock when we are through. This means
44 * that the TCB lock is kind-of a "global" lock when working on an
45 * association. Caution must be used when asserting a TCB_LOCK since if we
46 * recurse we deadlock.
47 *
48 * Most other locks (INP and INFO) attempt to localize the locking i.e. we try
49 * to contain the lock and unlock within the function that needs to lock it.
50 * This sometimes mean we do extra locks and unlocks and lose a bit of
51 * efficiency, but if the performance statements about non-recursive locks are
52 * true this should not be a problem.  One issue that arises with this only
53 * lock when needed is that if an implicit association setup is done we have
54 * a problem. If at the time I lookup an association I have NULL in the tcb
55 * return, by the time I call to create the association some other processor
56 * could have created it. This is what the CREATE lock on the endpoint.
57 * Places where we will be implicitly creating the association OR just
58 * creating an association (the connect call) will assert the CREATE_INP
59 * lock. This will assure us that during all the lookup of INP and INFO if
60 * another creator is also locking/looking up we can gate the two to
61 * synchronize. So the CREATE_INP lock is also another one we must use
62 * extreme caution in locking to make sure we don't hit a re-entrancy issue.
63 *
64 */
65
66/*
67 * When working with the global SCTP lists we lock and unlock the INP_INFO
68 * lock. So when we go to lookup an association we will want to do a
69 * SCTP_INP_INFO_RLOCK() and then when we want to add a new association to
70 * the SCTP_BASE_INFO() list's we will do a SCTP_INP_INFO_WLOCK().
71 */
72
73#define SCTP_IPI_COUNT_INIT()
74
75#define SCTP_STATLOG_INIT_LOCK()
76#define SCTP_STATLOG_DESTROY()
77#define SCTP_STATLOG_LOCK()
78#define SCTP_STATLOG_UNLOCK()
79
80#define SCTP_INP_INFO_LOCK_INIT() do {					\
81	rw_init(&SCTP_BASE_INFO(ipi_ep_mtx), "sctp-info");		\
82} while (0)
83
84#define SCTP_INP_INFO_LOCK_DESTROY() do { 				\
85	if (rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx))) {			\
86		rw_wunlock(&SCTP_BASE_INFO(ipi_ep_mtx));		\
87	}								\
88	rw_destroy(&SCTP_BASE_INFO(ipi_ep_mtx));			\
89} while (0)
90
91#define SCTP_INP_INFO_RLOCK() do { 					\
92	rw_rlock(&SCTP_BASE_INFO(ipi_ep_mtx));				\
93} while (0)
94
95#define SCTP_INP_INFO_WLOCK() do { 					\
96	rw_wlock(&SCTP_BASE_INFO(ipi_ep_mtx));				\
97} while (0)
98
99#define SCTP_INP_INFO_RUNLOCK() do {					\
100	rw_runlock(&SCTP_BASE_INFO(ipi_ep_mtx));			\
101} while (0)
102
103#define SCTP_INP_INFO_WUNLOCK() do {					\
104	rw_wunlock(&SCTP_BASE_INFO(ipi_ep_mtx));			\
105} while (0)
106
107#define SCTP_INP_INFO_LOCK_ASSERT() do {				\
108	rw_assert(&SCTP_BASE_INFO(ipi_ep_mtx), RA_LOCKED);		\
109} while (0)
110
111#define SCTP_INP_INFO_RLOCK_ASSERT() do {				\
112	rw_assert(&SCTP_BASE_INFO(ipi_ep_mtx), RA_RLOCKED);		\
113} while (0)
114
115#define SCTP_INP_INFO_WLOCK_ASSERT() do {				\
116	rw_assert(&SCTP_BASE_INFO(ipi_ep_mtx), RA_WLOCKED);		\
117} while (0)
118
119#define SCTP_MCORE_QLOCK_INIT(cpstr) do {				\
120	mtx_init(&(cpstr)->que_mtx, "sctp-mcore_queue","queue_lock",	\
121	         MTX_DEF | MTX_DUPOK);					\
122} while (0)
123
124#define SCTP_MCORE_QDESTROY(cpstr) do {					\
125	if (mtx_owned(&(cpstr)->core_mtx)) {				\
126		mtx_unlock(&(cpstr)->que_mtx);				\
127	}								\
128	mtx_destroy(&(cpstr)->que_mtx);					\
129} while (0)
130
131#define SCTP_MCORE_QLOCK(cpstr) do {					\
132	mtx_lock(&(cpstr)->que_mtx);					\
133} while (0)
134
135#define SCTP_MCORE_QUNLOCK(cpstr) do {					\
136	mtx_unlock(&(cpstr)->que_mtx);					\
137} while (0)
138
139#define SCTP_MCORE_LOCK_INIT(cpstr) do {				\
140	mtx_init(&(cpstr)->core_mtx, "sctp-cpulck","cpu_proc_lock",	\
141	         MTX_DEF | MTX_DUPOK);					\
142} while (0)
143
144#define SCTP_MCORE_DESTROY(cpstr) do {					\
145	if (mtx_owned(&(cpstr)->core_mtx)) {				\
146		mtx_unlock(&(cpstr)->core_mtx);				\
147	}								\
148	mtx_destroy(&(cpstr)->core_mtx);				\
149} while (0)
150
151#define SCTP_MCORE_LOCK(cpstr) do {					\
152	mtx_lock(&(cpstr)->core_mtx);					\
153} while (0)
154
155#define SCTP_MCORE_UNLOCK(cpstr) do {					\
156	mtx_unlock(&(cpstr)->core_mtx);					\
157} while (0)
158
159#define SCTP_IPI_ADDR_INIT() do {					\
160	rw_init(&SCTP_BASE_INFO(ipi_addr_mtx), "sctp-addr");		\
161} while (0)
162
163#define SCTP_IPI_ADDR_DESTROY() do {					\
164	if (rw_wowned(&SCTP_BASE_INFO(ipi_addr_mtx))) {			\
165		rw_wunlock(&SCTP_BASE_INFO(ipi_addr_mtx));		\
166	}								\
167	rw_destroy(&SCTP_BASE_INFO(ipi_addr_mtx));			\
168} while (0)
169
170#define SCTP_IPI_ADDR_RLOCK()	do { 					\
171	rw_rlock(&SCTP_BASE_INFO(ipi_addr_mtx));			\
172} while (0)
173
174#define SCTP_IPI_ADDR_WLOCK()	do { 					\
175	rw_wlock(&SCTP_BASE_INFO(ipi_addr_mtx));			\
176} while (0)
177
178#define SCTP_IPI_ADDR_RUNLOCK() do {					\
179	rw_runlock(&SCTP_BASE_INFO(ipi_addr_mtx));			\
180} while (0)
181
182#define SCTP_IPI_ADDR_WUNLOCK() do {					\
183	rw_wunlock(&SCTP_BASE_INFO(ipi_addr_mtx));			\
184} while (0)
185
186#define SCTP_IPI_ADDR_LOCK_ASSERT() do {				\
187	rw_assert(&SCTP_BASE_INFO(ipi_addr_mtx), RA_LOCKED);		\
188} while (0)
189
190#define SCTP_IPI_ADDR_WLOCK_ASSERT() do {				\
191	rw_assert(&SCTP_BASE_INFO(ipi_addr_mtx), RA_WLOCKED);		\
192} while (0)
193
194#define SCTP_IPI_ITERATOR_WQ_INIT() do {				\
195	mtx_init(&sctp_it_ctl.ipi_iterator_wq_mtx, "sctp-it-wq",	\
196	         "sctp_it_wq", MTX_DEF);				\
197} while (0)
198
199#define SCTP_IPI_ITERATOR_WQ_DESTROY() do {				\
200	mtx_destroy(&sctp_it_ctl.ipi_iterator_wq_mtx);			\
201} while (0)
202
203#define SCTP_IPI_ITERATOR_WQ_LOCK() do { 				\
204	mtx_lock(&sctp_it_ctl.ipi_iterator_wq_mtx);			\
205} while (0)
206
207#define SCTP_IPI_ITERATOR_WQ_UNLOCK() do {				\
208	mtx_unlock(&sctp_it_ctl.ipi_iterator_wq_mtx);			\
209} while (0)
210
211#define SCTP_IP_PKTLOG_INIT() do {					\
212	mtx_init(&SCTP_BASE_INFO(ipi_pktlog_mtx), "sctp-pktlog",	\
213	         "packetlog", MTX_DEF);					\
214} while (0)
215
216#define SCTP_IP_PKTLOG_DESTROY() do {					\
217	mtx_destroy(&SCTP_BASE_INFO(ipi_pktlog_mtx));			\
218} while (0)
219
220#define SCTP_IP_PKTLOG_LOCK()	do { 					\
221	mtx_lock(&SCTP_BASE_INFO(ipi_pktlog_mtx));			\
222} while (0)
223
224#define SCTP_IP_PKTLOG_UNLOCK() do {					\
225	mtx_unlock(&SCTP_BASE_INFO(ipi_pktlog_mtx));			\
226} while (0)
227
228/*
229 * The INP locks we will use for locking an SCTP endpoint, so for example if
230 * we want to change something at the endpoint level for example random_store
231 * or cookie secrets we lock the INP level.
232 */
233
234#define SCTP_INP_READ_LOCK_INIT(_inp) do {				\
235	mtx_init(&(_inp)->inp_rdata_mtx, "sctp-read", "inpr",		\
236	         MTX_DEF | MTX_DUPOK);					\
237} while (0)
238
239#define SCTP_INP_READ_LOCK_DESTROY(_inp) do {				\
240	mtx_destroy(&(_inp)->inp_rdata_mtx);				\
241} while (0)
242
243#define SCTP_INP_READ_LOCK(_inp) do {					\
244	mtx_lock(&(_inp)->inp_rdata_mtx);				\
245} while (0)
246
247#define SCTP_INP_READ_UNLOCK(_inp) do {					\
248	mtx_unlock(&(_inp)->inp_rdata_mtx);				\
249} while (0)
250
251#define SCTP_INP_READ_LOCK_ASSERT(_inp) do {				\
252	KASSERT(mtx_owned(&(_inp)->inp_rdata_mtx),			\
253	        ("Don't own INP read queue lock"));			\
254} while (0)
255
256#define SCTP_INP_LOCK_INIT(_inp) do {					\
257	mtx_init(&(_inp)->inp_mtx, "sctp-inp", "inp",			\
258	         MTX_DEF | MTX_DUPOK);					\
259} while (0)
260
261#define SCTP_INP_LOCK_DESTROY(_inp) do {				\
262	mtx_destroy(&(_inp)->inp_mtx);					\
263} while (0)
264
265#define SCTP_INP_LOCK_CONTENDED(_inp)					\
266	((_inp)->inp_mtx.mtx_lock & MTX_CONTESTED)
267
268#define SCTP_INP_READ_CONTENDED(_inp)					\
269	((_inp)->inp_rdata_mtx.mtx_lock & MTX_CONTESTED)
270
271#ifdef SCTP_LOCK_LOGGING
272#define SCTP_INP_RLOCK(_inp)	do { 					\
273	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) \
274		sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_INP);		\
275	mtx_lock(&(_inp)->inp_mtx);					\
276} while (0)
277
278#define SCTP_INP_WLOCK(_inp)	do { 					\
279	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) \
280		sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_INP);		\
281	mtx_lock(&(_inp)->inp_mtx);					\
282} while (0)
283#else
284#define SCTP_INP_RLOCK(_inp) do { 					\
285	mtx_lock(&(_inp)->inp_mtx);					\
286} while (0)
287
288#define SCTP_INP_WLOCK(_inp) do { 					\
289	mtx_lock(&(_inp)->inp_mtx);					\
290} while (0)
291#endif
292
293#define SCTP_INP_RUNLOCK(_inp) do {					\
294	mtx_unlock(&(_inp)->inp_mtx);					\
295} while (0)
296
297#define SCTP_INP_WUNLOCK(_inp) do {					\
298	mtx_unlock(&(_inp)->inp_mtx);					\
299} while (0)
300
301#define SCTP_INP_RLOCK_ASSERT(_inp) do {				\
302	KASSERT(mtx_owned(&(_inp)->inp_mtx),				\
303	        ("Don't own INP read lock"));				\
304} while (0)
305
306#define SCTP_INP_WLOCK_ASSERT(_inp) do {				\
307	KASSERT(mtx_owned(&(_inp)->inp_mtx),				\
308	        ("Don't own INP write lock"));				\
309} while (0)
310
311#define SCTP_INP_INCR_REF(_inp) atomic_add_int(&((_inp)->refcount), 1)
312#define SCTP_INP_DECR_REF(_inp) atomic_add_int(&((_inp)->refcount), -1)
313
314#define SCTP_ASOC_CREATE_LOCK_INIT(_inp) do {				\
315	mtx_init(&(_inp)->inp_create_mtx, "sctp-create", "inp_create",	\
316		 MTX_DEF | MTX_DUPOK);					\
317} while (0)
318
319#define SCTP_ASOC_CREATE_LOCK_DESTROY(_inp) do {			\
320	mtx_destroy(&(_inp)->inp_create_mtx);				\
321} while (0)
322
323#ifdef SCTP_LOCK_LOGGING
324#define SCTP_ASOC_CREATE_LOCK(_inp) do {				\
325	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) \
326		sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_CREATE);	\
327	mtx_lock(&(_inp)->inp_create_mtx);				\
328} while (0)
329#else
330#define SCTP_ASOC_CREATE_LOCK(_inp) do {				\
331	mtx_lock(&(_inp)->inp_create_mtx);				\
332} while (0)
333#endif
334
335#define SCTP_ASOC_CREATE_UNLOCK(_inp) do {				\
336	mtx_unlock(&(_inp)->inp_create_mtx);				\
337} while (0)
338
339#define SCTP_ASOC_CREATE_LOCK_CONTENDED(_inp)				\
340	((_inp)->inp_create_mtx.mtx_lock & MTX_CONTESTED)
341
342/*
343 * For the majority of things (once we have found the association) we will
344 * lock the actual association mutex. This will protect all the assoiciation
345 * level queues and streams and such. We will need to lock the socket layer
346 * when we stuff data up into the receiving sb_mb. I.e. we will need to do an
347 * extra SOCKBUF_LOCK(&so->so_rcv) even though the association is locked.
348 */
349
350#define SCTP_TCB_LOCK_INIT(_tcb) do {					\
351	mtx_init(&(_tcb)->tcb_mtx, "sctp-tcb", "tcb",			\
352	         MTX_DEF | MTX_DUPOK);					\
353} while (0)
354
355#define SCTP_TCB_LOCK_DESTROY(_tcb) do {				\
356	mtx_destroy(&(_tcb)->tcb_mtx);					\
357} while (0)
358
359#ifdef SCTP_LOCK_LOGGING
360#define SCTP_TCB_LOCK(_tcb) do {					\
361	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) \
362		sctp_log_lock(_tcb->sctp_ep, _tcb, SCTP_LOG_LOCK_TCB);	\
363	mtx_lock(&(_tcb)->tcb_mtx);					\
364} while (0)
365#else
366#define SCTP_TCB_LOCK(_tcb) do {					\
367	mtx_lock(&(_tcb)->tcb_mtx);					\
368} while (0)
369
370#endif
371
372#define SCTP_TCB_TRYLOCK(_tcb) 						\
373	mtx_trylock(&(_tcb)->tcb_mtx)
374
375#define SCTP_TCB_UNLOCK(_tcb) do {					\
376	mtx_unlock(&(_tcb)->tcb_mtx);					\
377} while (0)
378
379#define SCTP_TCB_UNLOCK_IFOWNED(_tcb) do {				\
380	if (mtx_owned(&(_tcb)->tcb_mtx))				\
381		mtx_unlock(&(_tcb)->tcb_mtx);				\
382} while (0)
383
384#define SCTP_TCB_LOCK_ASSERT(_tcb) do {					\
385	KASSERT(mtx_owned(&(_tcb)->tcb_mtx),				\
386	        ("Don't own TCB lock"));				\
387} while (0)
388
389#define SCTP_ITERATOR_LOCK_INIT() do {					\
390	mtx_init(&sctp_it_ctl.it_mtx, "sctp-it", "iterator", MTX_DEF);	\
391} while (0)
392
393#define SCTP_ITERATOR_LOCK_DESTROY() do {				\
394	mtx_destroy(&sctp_it_ctl.it_mtx);				\
395} while (0)
396
397#define SCTP_ITERATOR_LOCK() \
398	do {								\
399		KASSERT(!mtx_owned(&sctp_it_ctl.it_mtx),		\
400		        ("Own the iterator lock"));			\
401		mtx_lock(&sctp_it_ctl.it_mtx);				\
402	} while (0)
403
404#define SCTP_ITERATOR_UNLOCK() do {					\
405	mtx_unlock(&sctp_it_ctl.it_mtx);				\
406} while (0)
407
408#define SCTP_WQ_ADDR_INIT() do {					\
409	mtx_init(&SCTP_BASE_INFO(wq_addr_mtx),				\
410	         "sctp-addr-wq","sctp_addr_wq", MTX_DEF);		\
411} while (0)
412
413#define SCTP_WQ_ADDR_DESTROY() do  {					\
414	if (mtx_owned(&SCTP_BASE_INFO(wq_addr_mtx))) {			\
415		mtx_unlock(&SCTP_BASE_INFO(wq_addr_mtx));		\
416	}								\
417	mtx_destroy(&SCTP_BASE_INFO(wq_addr_mtx)); \
418} while (0)
419
420#define SCTP_WQ_ADDR_LOCK()	do {					\
421	mtx_lock(&SCTP_BASE_INFO(wq_addr_mtx));				\
422} while (0)
423
424#define SCTP_WQ_ADDR_UNLOCK() do {					\
425		mtx_unlock(&SCTP_BASE_INFO(wq_addr_mtx));		\
426} while (0)
427
428#define SCTP_WQ_ADDR_LOCK_ASSERT() do {					\
429	KASSERT(mtx_owned(&SCTP_BASE_INFO(wq_addr_mtx)),		\
430	        ("Don't own the ADDR-WQ lock"));			\
431} while (0)
432
433#define SCTP_INCR_EP_COUNT() do {					\
434	atomic_add_int(&SCTP_BASE_INFO(ipi_count_ep), 1);		\
435} while (0)
436
437#define SCTP_DECR_EP_COUNT() do {					\
438	atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_ep), 1);		\
439} while (0)
440
441#define SCTP_INCR_ASOC_COUNT() do {					\
442	atomic_add_int(&SCTP_BASE_INFO(ipi_count_asoc), 1);		\
443} while (0)
444
445#define SCTP_DECR_ASOC_COUNT() do {					\
446	atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_asoc), 1);	\
447} while (0)
448
449#define SCTP_INCR_LADDR_COUNT() do {					\
450	atomic_add_int(&SCTP_BASE_INFO(ipi_count_laddr), 1);		\
451} while (0)
452
453#define SCTP_DECR_LADDR_COUNT() do {					\
454	atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_laddr), 1); 	\
455} while (0)
456
457#define SCTP_INCR_RADDR_COUNT() do {					\
458	atomic_add_int(&SCTP_BASE_INFO(ipi_count_raddr), 1);		\
459} while (0)
460
461#define SCTP_DECR_RADDR_COUNT() do {					\
462	atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_raddr),1);	\
463} while (0)
464
465#define SCTP_INCR_CHK_COUNT() do {					\
466	atomic_add_int(&SCTP_BASE_INFO(ipi_count_chunk), 1);		\
467} while (0)
468
469#define SCTP_DECR_CHK_COUNT() do {					\
470	KASSERT(SCTP_BASE_INFO(ipi_count_chunk) > 0,			\
471	        ("ipi_count_chunk would become negative"));		\
472	if (SCTP_BASE_INFO(ipi_count_chunk) != 0)			\
473		atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_chunk),	\
474		                    1);					\
475} while (0)
476
477#define SCTP_INCR_READQ_COUNT() do {					\
478	atomic_add_int(&SCTP_BASE_INFO(ipi_count_readq), 1);		\
479} while (0)
480
481#define SCTP_DECR_READQ_COUNT() do {					\
482	atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_readq), 1);	\
483} while (0)
484
485#define SCTP_INCR_STRMOQ_COUNT() do {					\
486	atomic_add_int(&SCTP_BASE_INFO(ipi_count_strmoq), 1);		\
487} while (0)
488
489#define SCTP_DECR_STRMOQ_COUNT() do {					\
490	atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_strmoq), 1);	\
491} while (0)
492
493#if defined(SCTP_SO_LOCK_TESTING)
494#define SCTP_INP_SO(sctpinp)						\
495	(sctpinp)->ip_inp.inp.inp_socket
496#define SCTP_SOCKET_LOCK(so, refcnt)
497#define SCTP_SOCKET_UNLOCK(so, refcnt)
498#endif
499
500#endif
501