1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * dma-fence-array: aggregate fences to be waited together
4 *
5 * Copyright (C) 2016 Collabora Ltd
6 * Copyright (C) 2016 Advanced Micro Devices, Inc.
7 * Authors:
8 *	Gustavo Padovan <gustavo@padovan.org>
9 *	Christian K��nig <christian.koenig@amd.com>
10 */
11
12#include <linux/export.h>
13#include <linux/slab.h>
14#include <linux/dma-fence-array.h>
15
16#define PENDING_ERROR 1
17
18static const char *dma_fence_array_get_driver_name(struct dma_fence *fence)
19{
20	return "dma_fence_array";
21}
22
23static const char *dma_fence_array_get_timeline_name(struct dma_fence *fence)
24{
25	return "unbound";
26}
27
28static void dma_fence_array_set_pending_error(struct dma_fence_array *array,
29					      int error)
30{
31	/*
32	 * Propagate the first error reported by any of our fences, but only
33	 * before we ourselves are signaled.
34	 */
35	if (error)
36		cmpxchg(&array->base.error, PENDING_ERROR, error);
37}
38
39static void dma_fence_array_clear_pending_error(struct dma_fence_array *array)
40{
41	/* Clear the error flag if not actually set. */
42	cmpxchg(&array->base.error, PENDING_ERROR, 0);
43}
44
45static void irq_dma_fence_array_work(struct irq_work *wrk)
46{
47	struct dma_fence_array *array = container_of(wrk, typeof(*array), work);
48
49	dma_fence_array_clear_pending_error(array);
50
51	dma_fence_signal(&array->base);
52	dma_fence_put(&array->base);
53}
54
55static void dma_fence_array_cb_func(struct dma_fence *f,
56				    struct dma_fence_cb *cb)
57{
58	struct dma_fence_array_cb *array_cb =
59		container_of(cb, struct dma_fence_array_cb, cb);
60	struct dma_fence_array *array = array_cb->array;
61
62	dma_fence_array_set_pending_error(array, f->error);
63
64	if (atomic_dec_and_test(&array->num_pending))
65		irq_work_queue(&array->work);
66	else
67		dma_fence_put(&array->base);
68}
69
70static bool dma_fence_array_enable_signaling(struct dma_fence *fence)
71{
72	struct dma_fence_array *array = to_dma_fence_array(fence);
73	struct dma_fence_array_cb *cb = (void *)(&array[1]);
74	unsigned i;
75
76	for (i = 0; i < array->num_fences; ++i) {
77		cb[i].array = array;
78		/*
79		 * As we may report that the fence is signaled before all
80		 * callbacks are complete, we need to take an additional
81		 * reference count on the array so that we do not free it too
82		 * early. The core fence handling will only hold the reference
83		 * until we signal the array as complete (but that is now
84		 * insufficient).
85		 */
86		dma_fence_get(&array->base);
87		if (dma_fence_add_callback(array->fences[i], &cb[i].cb,
88					   dma_fence_array_cb_func)) {
89			int error = array->fences[i]->error;
90
91			dma_fence_array_set_pending_error(array, error);
92			dma_fence_put(&array->base);
93			if (atomic_dec_and_test(&array->num_pending)) {
94				dma_fence_array_clear_pending_error(array);
95				return false;
96			}
97		}
98	}
99
100	return true;
101}
102
103static bool dma_fence_array_signaled(struct dma_fence *fence)
104{
105	struct dma_fence_array *array = to_dma_fence_array(fence);
106
107	if (atomic_read(&array->num_pending) > 0)
108		return false;
109
110	dma_fence_array_clear_pending_error(array);
111	return true;
112}
113
114static void dma_fence_array_release(struct dma_fence *fence)
115{
116	struct dma_fence_array *array = to_dma_fence_array(fence);
117	unsigned i;
118
119	for (i = 0; i < array->num_fences; ++i)
120		dma_fence_put(array->fences[i]);
121
122	kfree(array->fences);
123	dma_fence_free(fence);
124}
125
126static void dma_fence_array_set_deadline(struct dma_fence *fence,
127					 ktime_t deadline)
128{
129	struct dma_fence_array *array = to_dma_fence_array(fence);
130	unsigned i;
131
132	for (i = 0; i < array->num_fences; ++i)
133		dma_fence_set_deadline(array->fences[i], deadline);
134}
135
136const struct dma_fence_ops dma_fence_array_ops = {
137	.get_driver_name = dma_fence_array_get_driver_name,
138	.get_timeline_name = dma_fence_array_get_timeline_name,
139	.enable_signaling = dma_fence_array_enable_signaling,
140	.signaled = dma_fence_array_signaled,
141	.release = dma_fence_array_release,
142	.set_deadline = dma_fence_array_set_deadline,
143};
144EXPORT_SYMBOL(dma_fence_array_ops);
145
146/**
147 * dma_fence_array_create - Create a custom fence array
148 * @num_fences:		[in]	number of fences to add in the array
149 * @fences:		[in]	array containing the fences
150 * @context:		[in]	fence context to use
151 * @seqno:		[in]	sequence number to use
152 * @signal_on_any:	[in]	signal on any fence in the array
153 *
154 * Allocate a dma_fence_array object and initialize the base fence with
155 * dma_fence_init().
156 * In case of error it returns NULL.
157 *
158 * The caller should allocate the fences array with num_fences size
159 * and fill it with the fences it wants to add to the object. Ownership of this
160 * array is taken and dma_fence_put() is used on each fence on release.
161 *
162 * If @signal_on_any is true the fence array signals if any fence in the array
163 * signals, otherwise it signals when all fences in the array signal.
164 */
165struct dma_fence_array *dma_fence_array_create(int num_fences,
166					       struct dma_fence **fences,
167					       u64 context, unsigned seqno,
168					       bool signal_on_any)
169{
170	struct dma_fence_array *array;
171	size_t size = sizeof(*array);
172
173	WARN_ON(!num_fences || !fences);
174
175	/* Allocate the callback structures behind the array. */
176	size += num_fences * sizeof(struct dma_fence_array_cb);
177	array = kzalloc(size, GFP_KERNEL);
178	if (!array)
179		return NULL;
180
181	spin_lock_init(&array->lock);
182	dma_fence_init(&array->base, &dma_fence_array_ops, &array->lock,
183		       context, seqno);
184	init_irq_work(&array->work, irq_dma_fence_array_work);
185
186	array->num_fences = num_fences;
187	atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences);
188	array->fences = fences;
189
190	array->base.error = PENDING_ERROR;
191
192	/*
193	 * dma_fence_array objects should never contain any other fence
194	 * containers or otherwise we run into recursion and potential kernel
195	 * stack overflow on operations on the dma_fence_array.
196	 *
197	 * The correct way of handling this is to flatten out the array by the
198	 * caller instead.
199	 *
200	 * Enforce this here by checking that we don't create a dma_fence_array
201	 * with any container inside.
202	 */
203	while (num_fences--)
204		WARN_ON(dma_fence_is_container(fences[num_fences]));
205
206	return array;
207}
208EXPORT_SYMBOL(dma_fence_array_create);
209
210/**
211 * dma_fence_match_context - Check if all fences are from the given context
212 * @fence:		[in]	fence or fence array
213 * @context:		[in]	fence context to check all fences against
214 *
215 * Checks the provided fence or, for a fence array, all fences in the array
216 * against the given context. Returns false if any fence is from a different
217 * context.
218 */
219bool dma_fence_match_context(struct dma_fence *fence, u64 context)
220{
221	struct dma_fence_array *array = to_dma_fence_array(fence);
222	unsigned i;
223
224	if (!dma_fence_is_array(fence))
225		return fence->context == context;
226
227	for (i = 0; i < array->num_fences; i++) {
228		if (array->fences[i]->context != context)
229			return false;
230	}
231
232	return true;
233}
234EXPORT_SYMBOL(dma_fence_match_context);
235
236struct dma_fence *dma_fence_array_first(struct dma_fence *head)
237{
238	struct dma_fence_array *array;
239
240	if (!head)
241		return NULL;
242
243	array = to_dma_fence_array(head);
244	if (!array)
245		return head;
246
247	if (!array->num_fences)
248		return NULL;
249
250	return array->fences[0];
251}
252EXPORT_SYMBOL(dma_fence_array_first);
253
254struct dma_fence *dma_fence_array_next(struct dma_fence *head,
255				       unsigned int index)
256{
257	struct dma_fence_array *array = to_dma_fence_array(head);
258
259	if (!array || index >= array->num_fences)
260		return NULL;
261
262	return array->fences[index];
263}
264EXPORT_SYMBOL(dma_fence_array_next);
265