1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2007-2009 Kip Macy <kmacy@freebsd.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 */
29
30#ifndef	_SYS_BUF_RING_H_
31#define	_SYS_BUF_RING_H_
32
33#include <machine/cpu.h>
34
35#ifdef DEBUG_BUFRING
36#include <sys/lock.h>
37#include <sys/mutex.h>
38#endif
39
40struct buf_ring {
41	volatile uint32_t	br_prod_head;
42	volatile uint32_t	br_prod_tail;
43	int              	br_prod_size;
44	int              	br_prod_mask;
45	uint64_t		br_drops;
46	volatile uint32_t	br_cons_head __aligned(CACHE_LINE_SIZE);
47	volatile uint32_t	br_cons_tail;
48	int		 	br_cons_size;
49	int              	br_cons_mask;
50#ifdef DEBUG_BUFRING
51	struct mtx		*br_lock;
52#endif
53	void			*br_ring[0] __aligned(CACHE_LINE_SIZE);
54};
55
56/*
57 * multi-producer safe lock-free ring buffer enqueue
58 *
59 */
60static __inline int
61buf_ring_enqueue(struct buf_ring *br, void *buf)
62{
63	uint32_t prod_head, prod_next, cons_tail;
64#ifdef DEBUG_BUFRING
65	int i;
66
67	/*
68	 * Note: It is possible to encounter an mbuf that was removed
69	 * via drbr_peek(), and then re-added via drbr_putback() and
70	 * trigger a spurious panic.
71	 */
72	for (i = br->br_cons_head; i != br->br_prod_head;
73	     i = ((i + 1) & br->br_cons_mask))
74		if (br->br_ring[i] == buf)
75			panic("buf=%p already enqueue at %d prod=%d cons=%d",
76			    buf, i, br->br_prod_tail, br->br_cons_tail);
77#endif
78	critical_enter();
79	do {
80		prod_head = br->br_prod_head;
81		prod_next = (prod_head + 1) & br->br_prod_mask;
82		cons_tail = br->br_cons_tail;
83
84		if (prod_next == cons_tail) {
85			rmb();
86			if (prod_head == br->br_prod_head &&
87			    cons_tail == br->br_cons_tail) {
88				br->br_drops++;
89				critical_exit();
90				return (ENOBUFS);
91			}
92			continue;
93		}
94	} while (!atomic_cmpset_acq_int(&br->br_prod_head, prod_head, prod_next));
95#ifdef DEBUG_BUFRING
96	if (br->br_ring[prod_head] != NULL)
97		panic("dangling value in enqueue");
98#endif
99	br->br_ring[prod_head] = buf;
100
101	/*
102	 * If there are other enqueues in progress
103	 * that preceded us, we need to wait for them
104	 * to complete
105	 */
106	while (br->br_prod_tail != prod_head)
107		cpu_spinwait();
108	atomic_store_rel_int(&br->br_prod_tail, prod_next);
109	critical_exit();
110	return (0);
111}
112
113/*
114 * multi-consumer safe dequeue
115 *
116 */
117static __inline void *
118buf_ring_dequeue_mc(struct buf_ring *br)
119{
120	uint32_t cons_head, cons_next;
121	void *buf;
122
123	critical_enter();
124	do {
125		cons_head = br->br_cons_head;
126		cons_next = (cons_head + 1) & br->br_cons_mask;
127
128		if (cons_head == br->br_prod_tail) {
129			critical_exit();
130			return (NULL);
131		}
132	} while (!atomic_cmpset_acq_int(&br->br_cons_head, cons_head, cons_next));
133
134	buf = br->br_ring[cons_head];
135#ifdef DEBUG_BUFRING
136	br->br_ring[cons_head] = NULL;
137#endif
138	/*
139	 * If there are other dequeues in progress
140	 * that preceded us, we need to wait for them
141	 * to complete
142	 */
143	while (br->br_cons_tail != cons_head)
144		cpu_spinwait();
145
146	atomic_store_rel_int(&br->br_cons_tail, cons_next);
147	critical_exit();
148
149	return (buf);
150}
151
152/*
153 * single-consumer dequeue
154 * use where dequeue is protected by a lock
155 * e.g. a network driver's tx queue lock
156 */
157static __inline void *
158buf_ring_dequeue_sc(struct buf_ring *br)
159{
160	uint32_t cons_head, cons_next;
161#ifdef PREFETCH_DEFINED
162	uint32_t cons_next_next;
163#endif
164	uint32_t prod_tail;
165	void *buf;
166
167	/*
168	 * This is a workaround to allow using buf_ring on ARM and ARM64.
169	 * ARM64TODO: Fix buf_ring in a generic way.
170	 * REMARKS: It is suspected that br_cons_head does not require
171	 *   load_acq operation, but this change was extensively tested
172	 *   and confirmed it's working. To be reviewed once again in
173	 *   FreeBSD-12.
174	 *
175	 * Preventing following situation:
176
177	 * Core(0) - buf_ring_enqueue()                                       Core(1) - buf_ring_dequeue_sc()
178	 * -----------------------------------------                                       ----------------------------------------------
179	 *
180	 *                                                                                cons_head = br->br_cons_head;
181	 * atomic_cmpset_acq_32(&br->br_prod_head, ...));
182	 *                                                                                buf = br->br_ring[cons_head];     <see <1>>
183	 * br->br_ring[prod_head] = buf;
184	 * atomic_store_rel_32(&br->br_prod_tail, ...);
185	 *                                                                                prod_tail = br->br_prod_tail;
186	 *                                                                                if (cons_head == prod_tail)
187	 *                                                                                        return (NULL);
188	 *                                                                                <condition is false and code uses invalid(old) buf>`
189	 *
190	 * <1> Load (on core 1) from br->br_ring[cons_head] can be reordered (speculative readed) by CPU.
191	 */
192#if defined(__arm__) || defined(__aarch64__)
193	cons_head = atomic_load_acq_32(&br->br_cons_head);
194#else
195	cons_head = br->br_cons_head;
196#endif
197	prod_tail = atomic_load_acq_32(&br->br_prod_tail);
198
199	cons_next = (cons_head + 1) & br->br_cons_mask;
200#ifdef PREFETCH_DEFINED
201	cons_next_next = (cons_head + 2) & br->br_cons_mask;
202#endif
203
204	if (cons_head == prod_tail)
205		return (NULL);
206
207#ifdef PREFETCH_DEFINED
208	if (cons_next != prod_tail) {
209		prefetch(br->br_ring[cons_next]);
210		if (cons_next_next != prod_tail)
211			prefetch(br->br_ring[cons_next_next]);
212	}
213#endif
214	br->br_cons_head = cons_next;
215	buf = br->br_ring[cons_head];
216
217#ifdef DEBUG_BUFRING
218	br->br_ring[cons_head] = NULL;
219	if (!mtx_owned(br->br_lock))
220		panic("lock not held on single consumer dequeue");
221	if (br->br_cons_tail != cons_head)
222		panic("inconsistent list cons_tail=%d cons_head=%d",
223		    br->br_cons_tail, cons_head);
224#endif
225	br->br_cons_tail = cons_next;
226	return (buf);
227}
228
229/*
230 * single-consumer advance after a peek
231 * use where it is protected by a lock
232 * e.g. a network driver's tx queue lock
233 */
234static __inline void
235buf_ring_advance_sc(struct buf_ring *br)
236{
237	uint32_t cons_head, cons_next;
238	uint32_t prod_tail;
239
240	cons_head = br->br_cons_head;
241	prod_tail = br->br_prod_tail;
242
243	cons_next = (cons_head + 1) & br->br_cons_mask;
244	if (cons_head == prod_tail)
245		return;
246	br->br_cons_head = cons_next;
247#ifdef DEBUG_BUFRING
248	br->br_ring[cons_head] = NULL;
249#endif
250	br->br_cons_tail = cons_next;
251}
252
253/*
254 * Used to return a buffer (most likely already there)
255 * to the top of the ring. The caller should *not*
256 * have used any dequeue to pull it out of the ring
257 * but instead should have used the peek() function.
258 * This is normally used where the transmit queue
259 * of a driver is full, and an mbuf must be returned.
260 * Most likely whats in the ring-buffer is what
261 * is being put back (since it was not removed), but
262 * sometimes the lower transmit function may have
263 * done a pullup or other function that will have
264 * changed it. As an optimization we always put it
265 * back (since jhb says the store is probably cheaper),
266 * if we have to do a multi-queue version we will need
267 * the compare and an atomic.
268 */
269static __inline void
270buf_ring_putback_sc(struct buf_ring *br, void *new)
271{
272	KASSERT(br->br_cons_head != br->br_prod_tail,
273		("Buf-Ring has none in putback")) ;
274	br->br_ring[br->br_cons_head] = new;
275}
276
277/*
278 * return a pointer to the first entry in the ring
279 * without modifying it, or NULL if the ring is empty
280 * race-prone if not protected by a lock
281 */
282static __inline void *
283buf_ring_peek(struct buf_ring *br)
284{
285
286#ifdef DEBUG_BUFRING
287	if ((br->br_lock != NULL) && !mtx_owned(br->br_lock))
288		panic("lock not held on single consumer dequeue");
289#endif
290	/*
291	 * I believe it is safe to not have a memory barrier
292	 * here because we control cons and tail is worst case
293	 * a lagging indicator so we worst case we might
294	 * return NULL immediately after a buffer has been enqueued
295	 */
296	if (br->br_cons_head == br->br_prod_tail)
297		return (NULL);
298
299	return (br->br_ring[br->br_cons_head]);
300}
301
302static __inline void *
303buf_ring_peek_clear_sc(struct buf_ring *br)
304{
305#ifdef DEBUG_BUFRING
306	void *ret;
307
308	if (!mtx_owned(br->br_lock))
309		panic("lock not held on single consumer dequeue");
310#endif
311
312	if (br->br_cons_head == br->br_prod_tail)
313		return (NULL);
314
315#if defined(__arm__) || defined(__aarch64__)
316	/*
317	 * The barrier is required there on ARM and ARM64 to ensure, that
318	 * br->br_ring[br->br_cons_head] will not be fetched before the above
319	 * condition is checked.
320	 * Without the barrier, it is possible, that buffer will be fetched
321	 * before the enqueue will put mbuf into br, then, in the meantime, the
322	 * enqueue will update the array and the br_prod_tail, and the
323	 * conditional check will be true, so we will return previously fetched
324	 * (and invalid) buffer.
325	 */
326	atomic_thread_fence_acq();
327#endif
328
329#ifdef DEBUG_BUFRING
330	/*
331	 * Single consumer, i.e. cons_head will not move while we are
332	 * running, so atomic_swap_ptr() is not necessary here.
333	 */
334	ret = br->br_ring[br->br_cons_head];
335	br->br_ring[br->br_cons_head] = NULL;
336	return (ret);
337#else
338	return (br->br_ring[br->br_cons_head]);
339#endif
340}
341
342static __inline int
343buf_ring_full(struct buf_ring *br)
344{
345
346	return (((br->br_prod_head + 1) & br->br_prod_mask) == br->br_cons_tail);
347}
348
349static __inline int
350buf_ring_empty(struct buf_ring *br)
351{
352
353	return (br->br_cons_head == br->br_prod_tail);
354}
355
356static __inline int
357buf_ring_count(struct buf_ring *br)
358{
359
360	return ((br->br_prod_size + br->br_prod_tail - br->br_cons_tail)
361	    & br->br_prod_mask);
362}
363
364struct buf_ring *buf_ring_alloc(int count, struct malloc_type *type, int flags,
365    struct mtx *);
366void buf_ring_free(struct buf_ring *br, struct malloc_type *type);
367
368#endif
369