1/*-
2 * Copyright (c) 1982, 1986, 1988, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the University nor the names of its contributors
14 *    may be used to endorse or promote products derived from this software
15 *    without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29#ifndef _FBSD_COMPAT_SYS_MBUF_FBSD_H_
30#define _FBSD_COMPAT_SYS_MBUF_FBSD_H_
31
32/*
33 * Return the address of the start of the buffer associated with an mbuf,
34 * handling external storage, packet-header mbufs, and regular data mbufs.
35 */
36#define	M_START(m)							\
37	(((m)->m_flags & M_EXT) ? (m)->m_ext.ext_buf :			\
38	 ((m)->m_flags & M_PKTHDR) ? &(m)->m_pktdat[0] :		\
39	 &(m)->m_dat[0])
40
41/*
42 * Return the size of the buffer associated with an mbuf, handling external
43 * storage, packet-header mbufs, and regular data mbufs.
44 */
45#define	M_SIZE(m)							\
46	(((m)->m_flags & M_EXT) ? (m)->m_ext.ext_size :			\
47	 ((m)->m_flags & M_PKTHDR) ? MHLEN :				\
48	 MLEN)
49
50/*
51 * Set the m_data pointer of a newly allocated mbuf to place an object of the
52 * specified size at the end of the mbuf, longword aligned.
53 *
54 * NB: Historically, we had M_ALIGN(), MH_ALIGN(), and MEXT_ALIGN() as
55 * separate macros, each asserting that it was called at the proper moment.
56 * This required callers to themselves test the storage type and call the
57 * right one.  Rather than require callers to be aware of those layout
58 * decisions, we centralize here.
59 */
60static __inline void
61m_align(struct mbuf *m, int len)
62{
63#ifdef INVARIANTS
64	const char *msg = "%s: not a virgin mbuf";
65#endif
66	int adjust;
67
68	KASSERT(m->m_data == M_START(m), (msg, __func__));
69
70	adjust = M_SIZE(m) - len;
71	m->m_data += adjust &~ (sizeof(long)-1);
72}
73
74#define	M_ALIGN(m, len)		m_align(m, len)
75#define	MH_ALIGN(m, len)	m_align(m, len)
76#define	MEXT_ALIGN(m, len)	m_align(m, len)
77
78/*
79 * Evaluate TRUE if it's safe to write to the mbuf m's data region (this can
80 * be both the local data payload, or an external buffer area, depending on
81 * whether M_EXT is set).
82 */
83#define	M_WRITABLE(m)	(!((m)->m_flags & M_RDONLY) &&			\
84			 (!(((m)->m_flags & M_EXT)) ||			\
85			 (m_extrefcnt(m) == 1)))
86
87/*
88 * Compute the amount of space available before the current start of data in
89 * an mbuf.
90 *
91 * The M_WRITABLE() is a temporary, conservative safety measure: the burden
92 * of checking writability of the mbuf data area rests solely with the caller.
93 *
94 * NB: In previous versions, M_LEADINGSPACE() would only check M_WRITABLE()
95 * for mbufs with external storage.  We now allow mbuf-embedded data to be
96 * read-only as well.
97 */
98#define	M_LEADINGSPACE(m)						\
99	(M_WRITABLE(m) ? ((m)->m_data - M_START(m)) : 0)
100
101/*
102 * Compute the amount of space available after the end of data in an mbuf.
103 *
104 * The M_WRITABLE() is a temporary, conservative safety measure: the burden
105 * of checking writability of the mbuf data area rests solely with the caller.
106 *
107 * NB: In previous versions, M_TRAILINGSPACE() would only check M_WRITABLE()
108 * for mbufs with external storage.  We now allow mbuf-embedded data to be
109 * read-only as well.
110 */
111#define	M_TRAILINGSPACE(m)						\
112	(M_WRITABLE(m) ?						\
113		((M_START(m) + M_SIZE(m)) - ((m)->m_data + (m)->m_len)) : 0)
114
115/*
116 * Arrange to prepend space of size plen to mbuf m.
117 * If a new mbuf must be allocated, how specifies whether to wait.
118 * If the allocation fails, the original mbuf chain is freed and m is
119 * set to NULL.
120 */
121#define	M_PREPEND(m, plen, how) do {					\
122	struct mbuf **_mmp = &(m);					\
123	struct mbuf *_mm = *_mmp;					\
124	int _mplen = (plen);						\
125	int __mhow = (how);						\
126									\
127	MBUF_CHECKSLEEP(how);						\
128	if (M_LEADINGSPACE(_mm) >= _mplen) {				\
129		_mm->m_data -= _mplen;					\
130		_mm->m_len += _mplen;					\
131	} else								\
132		_mm = m_prepend(_mm, _mplen, __mhow);			\
133	if (_mm != NULL && _mm->m_flags & M_PKTHDR)			\
134		_mm->m_pkthdr.len += _mplen;				\
135	*_mmp = _mm;							\
136} while (0)
137
138static __inline void
139m_clrprotoflags(struct mbuf *m)
140{
141	while (m) {
142		m->m_flags &= ~M_PROTOFLAGS;
143		m = m->m_next;
144	}
145}
146
147static inline u_int
148m_extrefcnt(struct mbuf *m)
149{
150	KASSERT(m->m_flags & M_EXT, ("%s: M_EXT missing", __func__));
151
152	return ((m->m_ext.ext_flags & EXT_FLAG_EMBREF) ? m->m_ext.ext_count :
153		*m->m_ext.ext_cnt);
154}
155
156static __inline int
157m_gettype(int size)
158{
159	int type = 0;
160
161	switch (size) {
162	case MCLBYTES:
163		type = EXT_CLUSTER;
164		break;
165#if MJUMPAGESIZE != MCLBYTES
166	case MJUMPAGESIZE:
167		type = EXT_JUMBOP;
168		break;
169#endif
170	case MJUM9BYTES:
171		type = EXT_JUMBO9;
172		break;
173	default:
174		panic("%s: invalid cluster size %d", __func__, size);
175	}
176
177	return (type);
178}
179
180/*
181 * XXX: m_cljset() is a dangerous API.  One must attach only a new,
182 * unreferenced cluster to an mbuf(9).  It is not possible to assert
183 * that, so care can be taken only by users of the API.
184 */
185static __inline void
186m_cljset(struct mbuf *m, void *cl, int type)
187{
188	int size = 0;
189
190	switch (type) {
191	case EXT_CLUSTER:
192		size = MCLBYTES;
193		break;
194#if MJUMPAGESIZE != MCLBYTES
195	case EXT_JUMBOP:
196		size = MJUMPAGESIZE;
197		break;
198#endif
199	case EXT_JUMBO9:
200		size = MJUM9BYTES;
201		break;
202	default:
203		panic("%s: unknown cluster type %d", __func__, type);
204		break;
205	}
206
207	m->m_data = m->m_ext.ext_buf = (caddr_t)cl;
208	m->m_ext.ext_size = size;
209	m->m_ext.ext_type = type;
210	m->m_ext.ext_flags = EXT_FLAG_EMBREF;
211	m->m_ext.ext_count = 1;
212	m->m_flags |= M_EXT;
213}
214
215/* These are for OpenBSD compatibility. */
216#define	MTAG_ABI_COMPAT		0		/* compatibility ABI */
217
218static __inline struct m_tag *
219m_tag_find(struct mbuf *m, uint16_t type, struct m_tag *start)
220{
221	return (SLIST_EMPTY(&m->m_pkthdr.tags) ? (struct m_tag *)NULL :
222		m_tag_locate(m, MTAG_ABI_COMPAT, type, start));
223}
224
225/* mbufq */
226
227struct mbufq {
228	STAILQ_HEAD(, mbuf)	mq_head;
229	int			mq_len;
230	int			mq_maxlen;
231};
232
233static inline void
234mbufq_init(struct mbufq *mq, int maxlen)
235{
236	STAILQ_INIT(&mq->mq_head);
237	mq->mq_maxlen = maxlen;
238	mq->mq_len = 0;
239}
240
241static inline struct mbuf *
242mbufq_flush(struct mbufq *mq)
243{
244	struct mbuf *m;
245
246	m = STAILQ_FIRST(&mq->mq_head);
247	STAILQ_INIT(&mq->mq_head);
248	mq->mq_len = 0;
249	return (m);
250}
251
252static inline void
253mbufq_drain(struct mbufq *mq)
254{
255	struct mbuf *m, *n;
256
257	n = mbufq_flush(mq);
258	while ((m = n) != NULL) {
259		n = STAILQ_NEXT(m, m_stailqpkt);
260		m_freem(m);
261	}
262}
263
264static inline struct mbuf *
265mbufq_first(const struct mbufq *mq)
266{
267	return (STAILQ_FIRST(&mq->mq_head));
268}
269
270static inline struct mbuf *
271mbufq_last(const struct mbufq *mq)
272{
273	return (STAILQ_LAST(&mq->mq_head, mbuf, m_stailqpkt));
274}
275
276static inline int
277mbufq_full(const struct mbufq *mq)
278{
279	return (mq->mq_len >= mq->mq_maxlen);
280}
281
282static inline int
283mbufq_len(const struct mbufq *mq)
284{
285	return (mq->mq_len);
286}
287
288static inline int
289mbufq_enqueue(struct mbufq *mq, struct mbuf *m)
290{
291
292	if (mbufq_full(mq))
293		return (ENOBUFS);
294	STAILQ_INSERT_TAIL(&mq->mq_head, m, m_stailqpkt);
295	mq->mq_len++;
296	return (0);
297}
298
299static inline struct mbuf *
300mbufq_dequeue(struct mbufq *mq)
301{
302	struct mbuf *m;
303
304	m = STAILQ_FIRST(&mq->mq_head);
305	if (m) {
306		STAILQ_REMOVE_HEAD(&mq->mq_head, m_stailqpkt);
307		m->m_nextpkt = NULL;
308		mq->mq_len--;
309	}
310	return (m);
311}
312
313static inline void
314mbufq_prepend(struct mbufq *mq, struct mbuf *m)
315{
316
317	STAILQ_INSERT_HEAD(&mq->mq_head, m, m_stailqpkt);
318	mq->mq_len++;
319}
320
321
322/*
323 * Note: this doesn't enforce the maximum list size for dst.
324 */
325static inline void
326mbufq_concat(struct mbufq *mq_dst, struct mbufq *mq_src)
327{
328
329	mq_dst->mq_len += mq_src->mq_len;
330	STAILQ_CONCAT(&mq_dst->mq_head, &mq_src->mq_head);
331	mq_src->mq_len = 0;
332}
333
334#endif
335