1254721Semaste/*
2254721Semaste * Copyright �� 2017 Intel Corporation
3254721Semaste *
4254721Semaste * Permission is hereby granted, free of charge, to any person obtaining a
5254721Semaste * copy of this software and associated documentation files (the "Software"),
6254721Semaste * to deal in the Software without restriction, including without limitation
7254721Semaste * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8254721Semaste * and/or sell copies of the Software, and to permit persons to whom the
9254721Semaste * Software is furnished to do so, subject to the following conditions:
10254721Semaste *
11254721Semaste * The above copyright notice and this permission notice (including the next
12254721Semaste * paragraph) shall be included in all copies or substantial portions of the
13254721Semaste * Software.
14254721Semaste *
15254721Semaste * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16254721Semaste * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17254721Semaste * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18254721Semaste * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19254721Semaste * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20254721Semaste * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21254721Semaste * IN THE SOFTWARE.
22254721Semaste *
23254721Semaste */
24254721Semaste
25254721Semaste#include <linux/completion.h>
26254721Semaste#include <linux/delay.h>
27254721Semaste#include <linux/prime_numbers.h>
28254721Semaste
29254721Semaste#include "../i915_selftest.h"
30254721Semaste
31254721Semastestatic int
32254721Semastefence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
33254721Semaste{
34254721Semaste	switch (state) {
35254721Semaste	case FENCE_COMPLETE:
36254721Semaste		break;
37254721Semaste
38254721Semaste	case FENCE_FREE:
39254721Semaste		/* Leave the fence for the caller to free it after testing */
40254721Semaste		break;
41254721Semaste	}
42254721Semaste
43254721Semaste	return NOTIFY_DONE;
44254721Semaste}
45254721Semaste
46254721Semastestatic struct i915_sw_fence *alloc_fence(void)
47254721Semaste{
48254721Semaste	struct i915_sw_fence *fence;
49254721Semaste
50254721Semaste	fence = kmalloc(sizeof(*fence), GFP_KERNEL);
51254721Semaste	if (!fence)
52254721Semaste		return NULL;
53254721Semaste
54254721Semaste	i915_sw_fence_init(fence, fence_notify);
55254721Semaste	return fence;
56254721Semaste}
57254721Semaste
58254721Semastestatic void free_fence(struct i915_sw_fence *fence)
59254721Semaste{
60254721Semaste	i915_sw_fence_fini(fence);
61254721Semaste	kfree(fence);
62254721Semaste}
63254721Semaste
64254721Semastestatic int __test_self(struct i915_sw_fence *fence)
65254721Semaste{
66254721Semaste	if (i915_sw_fence_done(fence))
67254721Semaste		return -EINVAL;
68254721Semaste
69254721Semaste	i915_sw_fence_commit(fence);
70254721Semaste	if (!i915_sw_fence_done(fence))
71254721Semaste		return -EINVAL;
72254721Semaste
73254721Semaste	i915_sw_fence_wait(fence);
74254721Semaste	if (!i915_sw_fence_done(fence))
75254721Semaste		return -EINVAL;
76254721Semaste
77254721Semaste	return 0;
78254721Semaste}
79254721Semaste
80254721Semastestatic int test_self(void *arg)
81254721Semaste{
82254721Semaste	struct i915_sw_fence *fence;
83254721Semaste	int ret;
84254721Semaste
85254721Semaste	/* Test i915_sw_fence signaling and completion testing */
86254721Semaste	fence = alloc_fence();
87254721Semaste	if (!fence)
88254721Semaste		return -ENOMEM;
89254721Semaste
90254721Semaste	ret = __test_self(fence);
91254721Semaste
92254721Semaste	free_fence(fence);
93254721Semaste	return ret;
94254721Semaste}
95254721Semaste
96254721Semastestatic int test_dag(void *arg)
97254721Semaste{
98254721Semaste	struct i915_sw_fence *A, *B, *C;
99254721Semaste	int ret = -EINVAL;
100254721Semaste
101254721Semaste	/* Test detection of cycles within the i915_sw_fence graphs */
102254721Semaste	if (!IS_ENABLED(CONFIG_DRM_I915_SW_FENCE_CHECK_DAG))
103254721Semaste		return 0;
104254721Semaste
105254721Semaste	A = alloc_fence();
106254721Semaste	if (!A)
107254721Semaste		return -ENOMEM;
108254721Semaste
109254721Semaste	if (i915_sw_fence_await_sw_fence_gfp(A, A, GFP_KERNEL) != -EINVAL) {
110254721Semaste		pr_err("recursive cycle not detected (AA)\n");
111254721Semaste		goto err_A;
112254721Semaste	}
113254721Semaste
114254721Semaste	B = alloc_fence();
115254721Semaste	if (!B) {
116254721Semaste		ret = -ENOMEM;
117254721Semaste		goto err_A;
118254721Semaste	}
119254721Semaste
120254721Semaste	i915_sw_fence_await_sw_fence_gfp(A, B, GFP_KERNEL);
121254721Semaste	if (i915_sw_fence_await_sw_fence_gfp(B, A, GFP_KERNEL) != -EINVAL) {
122254721Semaste		pr_err("single depth cycle not detected (BAB)\n");
123254721Semaste		goto err_B;
124254721Semaste	}
125254721Semaste
126254721Semaste	C = alloc_fence();
127254721Semaste	if (!C) {
128254721Semaste		ret = -ENOMEM;
129254721Semaste		goto err_B;
130254721Semaste	}
131254721Semaste
132254721Semaste	if (i915_sw_fence_await_sw_fence_gfp(B, C, GFP_KERNEL) == -EINVAL) {
133254721Semaste		pr_err("invalid cycle detected\n");
134254721Semaste		goto err_C;
135254721Semaste	}
136254721Semaste	if (i915_sw_fence_await_sw_fence_gfp(C, B, GFP_KERNEL) != -EINVAL) {
137254721Semaste		pr_err("single depth cycle not detected (CBC)\n");
138254721Semaste		goto err_C;
139254721Semaste	}
140254721Semaste	if (i915_sw_fence_await_sw_fence_gfp(C, A, GFP_KERNEL) != -EINVAL) {
141254721Semaste		pr_err("cycle not detected (BA, CB, AC)\n");
142254721Semaste		goto err_C;
143254721Semaste	}
144254721Semaste	if (i915_sw_fence_await_sw_fence_gfp(A, C, GFP_KERNEL) == -EINVAL) {
145254721Semaste		pr_err("invalid cycle detected\n");
146254721Semaste		goto err_C;
147254721Semaste	}
148254721Semaste
149254721Semaste	i915_sw_fence_commit(A);
150254721Semaste	i915_sw_fence_commit(B);
151254721Semaste	i915_sw_fence_commit(C);
152254721Semaste
153254721Semaste	ret = 0;
154254721Semaste	if (!i915_sw_fence_done(C)) {
155254721Semaste		pr_err("fence C not done\n");
156254721Semaste		ret = -EINVAL;
157254721Semaste	}
158254721Semaste	if (!i915_sw_fence_done(B)) {
159254721Semaste		pr_err("fence B not done\n");
160254721Semaste		ret = -EINVAL;
161254721Semaste	}
162254721Semaste	if (!i915_sw_fence_done(A)) {
163254721Semaste		pr_err("fence A not done\n");
164254721Semaste		ret = -EINVAL;
165254721Semaste	}
166254721Semasteerr_C:
167254721Semaste	free_fence(C);
168254721Semasteerr_B:
169254721Semaste	free_fence(B);
170254721Semasteerr_A:
171254721Semaste	free_fence(A);
172254721Semaste	return ret;
173254721Semaste}
174254721Semaste
175254721Semastestatic int test_AB(void *arg)
176254721Semaste{
177254721Semaste	struct i915_sw_fence *A, *B;
178254721Semaste	int ret;
179254721Semaste
180254721Semaste	/* Test i915_sw_fence (A) waiting on an event source (B) */
181254721Semaste	A = alloc_fence();
182254721Semaste	if (!A)
183254721Semaste		return -ENOMEM;
184254721Semaste	B = alloc_fence();
185254721Semaste	if (!B) {
186254721Semaste		ret = -ENOMEM;
187254721Semaste		goto err_A;
188254721Semaste	}
189254721Semaste
190254721Semaste	ret = i915_sw_fence_await_sw_fence_gfp(A, B, GFP_KERNEL);
191254721Semaste	if (ret < 0)
192254721Semaste		goto err_B;
193254721Semaste	if (ret == 0) {
194254721Semaste		pr_err("Incorrectly reported fence A was complete before await\n");
195254721Semaste		ret = -EINVAL;
196254721Semaste		goto err_B;
197254721Semaste	}
198254721Semaste
199254721Semaste	ret = -EINVAL;
200254721Semaste	i915_sw_fence_commit(A);
201254721Semaste	if (i915_sw_fence_done(A))
202254721Semaste		goto err_B;
203254721Semaste
204254721Semaste	i915_sw_fence_commit(B);
205254721Semaste	if (!i915_sw_fence_done(B)) {
206254721Semaste		pr_err("Fence B is not done\n");
207254721Semaste		goto err_B;
208254721Semaste	}
209254721Semaste
210254721Semaste	if (!i915_sw_fence_done(A)) {
211254721Semaste		pr_err("Fence A is not done\n");
212254721Semaste		goto err_B;
213254721Semaste	}
214254721Semaste
215254721Semaste	ret = 0;
216254721Semasteerr_B:
217254721Semaste	free_fence(B);
218254721Semasteerr_A:
219254721Semaste	free_fence(A);
220254721Semaste	return ret;
221254721Semaste}
222254721Semaste
223254721Semastestatic int test_ABC(void *arg)
224254721Semaste{
225254721Semaste	struct i915_sw_fence *A, *B, *C;
226254721Semaste	int ret;
227254721Semaste
228254721Semaste	/* Test a chain of fences, A waits on B who waits on C */
229254721Semaste	A = alloc_fence();
230254721Semaste	if (!A)
231254721Semaste		return -ENOMEM;
232254721Semaste
233254721Semaste	B = alloc_fence();
234254721Semaste	if (!B) {
235254721Semaste		ret = -ENOMEM;
236254721Semaste		goto err_A;
237254721Semaste	}
238254721Semaste
239254721Semaste	C = alloc_fence();
240254721Semaste	if (!C) {
241254721Semaste		ret = -ENOMEM;
242254721Semaste		goto err_B;
243254721Semaste	}
244254721Semaste
245254721Semaste	ret = i915_sw_fence_await_sw_fence_gfp(A, B, GFP_KERNEL);
246254721Semaste	if (ret < 0)
247254721Semaste		goto err_C;
248254721Semaste	if (ret == 0) {
249254721Semaste		pr_err("Incorrectly reported fence B was complete before await\n");
250254721Semaste		goto err_C;
251	}
252
253	ret = i915_sw_fence_await_sw_fence_gfp(B, C, GFP_KERNEL);
254	if (ret < 0)
255		goto err_C;
256	if (ret == 0) {
257		pr_err("Incorrectly reported fence C was complete before await\n");
258		goto err_C;
259	}
260
261	ret = -EINVAL;
262	i915_sw_fence_commit(A);
263	if (i915_sw_fence_done(A)) {
264		pr_err("Fence A completed early\n");
265		goto err_C;
266	}
267
268	i915_sw_fence_commit(B);
269	if (i915_sw_fence_done(B)) {
270		pr_err("Fence B completed early\n");
271		goto err_C;
272	}
273
274	if (i915_sw_fence_done(A)) {
275		pr_err("Fence A completed early (after signaling B)\n");
276		goto err_C;
277	}
278
279	i915_sw_fence_commit(C);
280
281	ret = 0;
282	if (!i915_sw_fence_done(C)) {
283		pr_err("Fence C not done\n");
284		ret = -EINVAL;
285	}
286	if (!i915_sw_fence_done(B)) {
287		pr_err("Fence B not done\n");
288		ret = -EINVAL;
289	}
290	if (!i915_sw_fence_done(A)) {
291		pr_err("Fence A not done\n");
292		ret = -EINVAL;
293	}
294err_C:
295	free_fence(C);
296err_B:
297	free_fence(B);
298err_A:
299	free_fence(A);
300	return ret;
301}
302
303static int test_AB_C(void *arg)
304{
305	struct i915_sw_fence *A, *B, *C;
306	int ret = -EINVAL;
307
308	/* Test multiple fences (AB) waiting on a single event (C) */
309	A = alloc_fence();
310	if (!A)
311		return -ENOMEM;
312
313	B = alloc_fence();
314	if (!B) {
315		ret = -ENOMEM;
316		goto err_A;
317	}
318
319	C = alloc_fence();
320	if (!C) {
321		ret = -ENOMEM;
322		goto err_B;
323	}
324
325	ret = i915_sw_fence_await_sw_fence_gfp(A, C, GFP_KERNEL);
326	if (ret < 0)
327		goto err_C;
328	if (ret == 0) {
329		ret = -EINVAL;
330		goto err_C;
331	}
332
333	ret = i915_sw_fence_await_sw_fence_gfp(B, C, GFP_KERNEL);
334	if (ret < 0)
335		goto err_C;
336	if (ret == 0) {
337		ret = -EINVAL;
338		goto err_C;
339	}
340
341	i915_sw_fence_commit(A);
342	i915_sw_fence_commit(B);
343
344	ret = 0;
345	if (i915_sw_fence_done(A)) {
346		pr_err("Fence A completed early\n");
347		ret = -EINVAL;
348	}
349
350	if (i915_sw_fence_done(B)) {
351		pr_err("Fence B completed early\n");
352		ret = -EINVAL;
353	}
354
355	i915_sw_fence_commit(C);
356	if (!i915_sw_fence_done(C)) {
357		pr_err("Fence C not done\n");
358		ret = -EINVAL;
359	}
360
361	if (!i915_sw_fence_done(B)) {
362		pr_err("Fence B not done\n");
363		ret = -EINVAL;
364	}
365
366	if (!i915_sw_fence_done(A)) {
367		pr_err("Fence A not done\n");
368		ret = -EINVAL;
369	}
370
371err_C:
372	free_fence(C);
373err_B:
374	free_fence(B);
375err_A:
376	free_fence(A);
377	return ret;
378}
379
380static int test_C_AB(void *arg)
381{
382	struct i915_sw_fence *A, *B, *C;
383	int ret;
384
385	/* Test multiple event sources (A,B) for a single fence (C) */
386	A = alloc_fence();
387	if (!A)
388		return -ENOMEM;
389
390	B = alloc_fence();
391	if (!B) {
392		ret = -ENOMEM;
393		goto err_A;
394	}
395
396	C = alloc_fence();
397	if (!C) {
398		ret = -ENOMEM;
399		goto err_B;
400	}
401
402	ret = i915_sw_fence_await_sw_fence_gfp(C, A, GFP_KERNEL);
403	if (ret < 0)
404		goto err_C;
405	if (ret == 0) {
406		ret = -EINVAL;
407		goto err_C;
408	}
409
410	ret = i915_sw_fence_await_sw_fence_gfp(C, B, GFP_KERNEL);
411	if (ret < 0)
412		goto err_C;
413	if (ret == 0) {
414		ret = -EINVAL;
415		goto err_C;
416	}
417
418	ret = 0;
419	i915_sw_fence_commit(C);
420	if (i915_sw_fence_done(C))
421		ret = -EINVAL;
422
423	i915_sw_fence_commit(A);
424	i915_sw_fence_commit(B);
425
426	if (!i915_sw_fence_done(A)) {
427		pr_err("Fence A not done\n");
428		ret = -EINVAL;
429	}
430
431	if (!i915_sw_fence_done(B)) {
432		pr_err("Fence B not done\n");
433		ret = -EINVAL;
434	}
435
436	if (!i915_sw_fence_done(C)) {
437		pr_err("Fence C not done\n");
438		ret = -EINVAL;
439	}
440
441err_C:
442	free_fence(C);
443err_B:
444	free_fence(B);
445err_A:
446	free_fence(A);
447	return ret;
448}
449
450static int test_chain(void *arg)
451{
452	int nfences = 4096;
453	struct i915_sw_fence **fences;
454	int ret, i;
455
456	/* Test a long chain of fences */
457	fences = kmalloc_array(nfences, sizeof(*fences), GFP_KERNEL);
458	if (!fences)
459		return -ENOMEM;
460
461	for (i = 0; i < nfences; i++) {
462		fences[i] = alloc_fence();
463		if (!fences[i]) {
464			nfences = i;
465			ret = -ENOMEM;
466			goto err;
467		}
468
469		if (i > 0) {
470			ret = i915_sw_fence_await_sw_fence_gfp(fences[i],
471							       fences[i - 1],
472							       GFP_KERNEL);
473			if (ret < 0) {
474				nfences = i + 1;
475				goto err;
476			}
477
478			i915_sw_fence_commit(fences[i]);
479		}
480	}
481
482	ret = 0;
483	for (i = nfences; --i; ) {
484		if (i915_sw_fence_done(fences[i])) {
485			if (ret == 0)
486				pr_err("Fence[%d] completed early\n", i);
487			ret = -EINVAL;
488		}
489	}
490	i915_sw_fence_commit(fences[0]);
491	for (i = 0; ret == 0 && i < nfences; i++) {
492		if (!i915_sw_fence_done(fences[i])) {
493			pr_err("Fence[%d] is not done\n", i);
494			ret = -EINVAL;
495		}
496	}
497
498err:
499	for (i = 0; i < nfences; i++)
500		free_fence(fences[i]);
501	kfree(fences);
502	return ret;
503}
504
505struct task_ipc {
506	struct work_struct work;
507	struct completion started;
508	struct i915_sw_fence *in, *out;
509	int value;
510};
511
512static void task_ipc(struct work_struct *work)
513{
514	struct task_ipc *ipc = container_of(work, typeof(*ipc), work);
515
516	complete(&ipc->started);
517
518	i915_sw_fence_wait(ipc->in);
519	smp_store_mb(ipc->value, 1);
520	i915_sw_fence_commit(ipc->out);
521}
522
523static int test_ipc(void *arg)
524{
525	struct task_ipc ipc;
526	struct workqueue_struct *wq;
527	int ret = 0;
528
529	wq = alloc_workqueue("i1915-selftest", 0, 0);
530	if (wq == NULL)
531		return -ENOMEM;
532
533	/* Test use of i915_sw_fence as an interprocess signaling mechanism */
534	ipc.in = alloc_fence();
535	if (!ipc.in) {
536		ret = -ENOMEM;
537		goto err_work;
538	}
539	ipc.out = alloc_fence();
540	if (!ipc.out) {
541		ret = -ENOMEM;
542		goto err_in;
543	}
544
545	/* use a completion to avoid chicken-and-egg testing */
546	init_completion(&ipc.started);
547
548	ipc.value = 0;
549	INIT_WORK_ONSTACK(&ipc.work, task_ipc);
550	queue_work(wq, &ipc.work);
551
552	wait_for_completion(&ipc.started);
553
554	usleep_range(1000, 2000);
555	if (READ_ONCE(ipc.value)) {
556		pr_err("worker updated value before i915_sw_fence was signaled\n");
557		ret = -EINVAL;
558	}
559
560	i915_sw_fence_commit(ipc.in);
561	i915_sw_fence_wait(ipc.out);
562
563	if (!READ_ONCE(ipc.value)) {
564		pr_err("worker signaled i915_sw_fence before value was posted\n");
565		ret = -EINVAL;
566	}
567
568	flush_work(&ipc.work);
569	destroy_work_on_stack(&ipc.work);
570	free_fence(ipc.out);
571err_in:
572	free_fence(ipc.in);
573err_work:
574	destroy_workqueue(wq);
575
576	return ret;
577}
578
579static int test_timer(void *arg)
580{
581	unsigned long target, delay;
582	struct timed_fence tf;
583
584	preempt_disable();
585	timed_fence_init(&tf, target = jiffies);
586	if (!i915_sw_fence_done(&tf.fence)) {
587		pr_err("Fence with immediate expiration not signaled\n");
588		goto err;
589	}
590	preempt_enable();
591	timed_fence_fini(&tf);
592
593	for_each_prime_number(delay, i915_selftest.timeout_jiffies/2) {
594		preempt_disable();
595		timed_fence_init(&tf, target = jiffies + delay);
596		if (i915_sw_fence_done(&tf.fence)) {
597			pr_err("Fence with future expiration (%lu jiffies) already signaled\n", delay);
598			goto err;
599		}
600		preempt_enable();
601
602		i915_sw_fence_wait(&tf.fence);
603
604		preempt_disable();
605		if (!i915_sw_fence_done(&tf.fence)) {
606			pr_err("Fence not signaled after wait\n");
607			goto err;
608		}
609		if (time_before(jiffies, target)) {
610			pr_err("Fence signaled too early, target=%lu, now=%lu\n",
611			       target, jiffies);
612			goto err;
613		}
614		preempt_enable();
615		timed_fence_fini(&tf);
616	}
617
618	return 0;
619
620err:
621	preempt_enable();
622	timed_fence_fini(&tf);
623	return -EINVAL;
624}
625
626static const char *mock_name(struct dma_fence *fence)
627{
628	return "mock";
629}
630
631static const struct dma_fence_ops mock_fence_ops = {
632	.get_driver_name = mock_name,
633	.get_timeline_name = mock_name,
634};
635
636static DEFINE_SPINLOCK(mock_fence_lock);
637
638static struct dma_fence *alloc_dma_fence(void)
639{
640	struct dma_fence *dma;
641
642	dma = kmalloc(sizeof(*dma), GFP_KERNEL);
643	if (dma)
644		dma_fence_init(dma, &mock_fence_ops, &mock_fence_lock, 0, 0);
645
646	return dma;
647}
648
649static struct i915_sw_fence *
650wrap_dma_fence(struct dma_fence *dma, unsigned long delay)
651{
652	struct i915_sw_fence *fence;
653	int err;
654
655	fence = alloc_fence();
656	if (!fence)
657		return ERR_PTR(-ENOMEM);
658
659	err = i915_sw_fence_await_dma_fence(fence, dma, delay, GFP_NOWAIT);
660	i915_sw_fence_commit(fence);
661	if (err < 0) {
662		free_fence(fence);
663		return ERR_PTR(err);
664	}
665
666	return fence;
667}
668
669static int test_dma_fence(void *arg)
670{
671	struct i915_sw_fence *timeout = NULL, *not = NULL;
672	unsigned long delay = i915_selftest.timeout_jiffies;
673	unsigned long end, sleep;
674	struct dma_fence *dma;
675	int err;
676
677	dma = alloc_dma_fence();
678	if (!dma)
679		return -ENOMEM;
680
681	timeout = wrap_dma_fence(dma, delay);
682	if (IS_ERR(timeout)) {
683		err = PTR_ERR(timeout);
684		goto err;
685	}
686
687	not = wrap_dma_fence(dma, 0);
688	if (IS_ERR(not)) {
689		err = PTR_ERR(not);
690		goto err;
691	}
692
693	err = -EINVAL;
694	if (i915_sw_fence_done(timeout) || i915_sw_fence_done(not)) {
695		pr_err("Fences immediately signaled\n");
696		goto err;
697	}
698
699	/* We round the timeout for the fence up to the next second */
700	end = round_jiffies_up(jiffies + delay);
701
702	sleep = jiffies_to_usecs(delay) / 3;
703	usleep_range(sleep, 2 * sleep);
704	if (time_after(jiffies, end)) {
705		pr_debug("Slept too long, delay=%lu, (target=%lu, now=%lu) skipping\n",
706			 delay, end, jiffies);
707		goto skip;
708	}
709
710	if (i915_sw_fence_done(timeout) || i915_sw_fence_done(not)) {
711		pr_err("Fences signaled too early\n");
712		goto err;
713	}
714
715	if (!wait_event_timeout(timeout->wait,
716				i915_sw_fence_done(timeout),
717				2 * (end - jiffies) + 1)) {
718		pr_err("Timeout fence unsignaled!\n");
719		goto err;
720	}
721
722	if (i915_sw_fence_done(not)) {
723		pr_err("No timeout fence signaled!\n");
724		goto err;
725	}
726
727skip:
728	dma_fence_signal(dma);
729
730	if (!i915_sw_fence_done(timeout) || !i915_sw_fence_done(not)) {
731		pr_err("Fences unsignaled\n");
732		goto err;
733	}
734
735	free_fence(not);
736	free_fence(timeout);
737	dma_fence_put(dma);
738
739	return 0;
740
741err:
742	dma_fence_signal(dma);
743	if (!IS_ERR_OR_NULL(timeout))
744		free_fence(timeout);
745	if (!IS_ERR_OR_NULL(not))
746		free_fence(not);
747	dma_fence_put(dma);
748	return err;
749}
750
751int i915_sw_fence_mock_selftests(void)
752{
753	static const struct i915_subtest tests[] = {
754		SUBTEST(test_self),
755		SUBTEST(test_dag),
756		SUBTEST(test_AB),
757		SUBTEST(test_ABC),
758		SUBTEST(test_AB_C),
759		SUBTEST(test_C_AB),
760		SUBTEST(test_chain),
761		SUBTEST(test_ipc),
762		SUBTEST(test_timer),
763		SUBTEST(test_dma_fence),
764	};
765
766	return i915_subtests(tests, NULL);
767}
768