1/* gamma_dma.c -- DMA support for GMX 2000 -*- linux-c -*-
2 * Created: Fri Mar 19 14:30:16 1999 by faith@precisioninsight.com
3 *
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 *    Rickard E. (Rik) Faith <faith@valinux.com>
29 *
30 */
31
32#include "gamma.h"
33#include "drmP.h"
34#include "gamma_drv.h"
35
36#include <linux/interrupt.h>	/* For task queue support */
37#include <linux/delay.h>
38
39
40static inline void gamma_dma_dispatch(drm_device_t *dev, unsigned long address,
41				      unsigned long length)
42{
43	drm_gamma_private_t *dev_priv =
44		(drm_gamma_private_t *)dev->dev_private;
45
46	GAMMA_WRITE(GAMMA_DMAADDRESS, virt_to_phys((void *)address));
47	while (GAMMA_READ(GAMMA_GCOMMANDSTATUS) != 4)
48		;
49	GAMMA_WRITE(GAMMA_DMACOUNT, length / 4);
50}
51
52void gamma_dma_quiescent_single(drm_device_t *dev)
53{
54	drm_gamma_private_t *dev_priv =
55		(drm_gamma_private_t *)dev->dev_private;
56
57	while (GAMMA_READ(GAMMA_DMACOUNT))
58		;
59	while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
60		;
61
62	GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10);
63	GAMMA_WRITE(GAMMA_SYNC, 0);
64
65	do {
66		while (!GAMMA_READ(GAMMA_OUTFIFOWORDS))
67			;
68	} while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG);
69}
70
71void gamma_dma_quiescent_dual(drm_device_t *dev)
72{
73	drm_gamma_private_t *dev_priv =
74		(drm_gamma_private_t *)dev->dev_private;
75
76	while (GAMMA_READ(GAMMA_DMACOUNT))
77		;
78	while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
79		;
80
81	GAMMA_WRITE(GAMMA_BROADCASTMASK, 3);
82
83	GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10);
84	GAMMA_WRITE(GAMMA_SYNC, 0);
85
86				/* Read from first MX */
87	do {
88		while (!GAMMA_READ(GAMMA_OUTFIFOWORDS))
89			;
90	} while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG);
91
92				/* Read from second MX */
93	do {
94		while (!GAMMA_READ(GAMMA_OUTFIFOWORDS + 0x10000))
95			;
96	} while (GAMMA_READ(GAMMA_OUTPUTFIFO + 0x10000) != GAMMA_SYNC_TAG);
97}
98
99void gamma_dma_ready(drm_device_t *dev)
100{
101	drm_gamma_private_t *dev_priv =
102		(drm_gamma_private_t *)dev->dev_private;
103
104	while (GAMMA_READ(GAMMA_DMACOUNT))
105		;
106}
107
108static inline int gamma_dma_is_ready(drm_device_t *dev)
109{
110	drm_gamma_private_t *dev_priv =
111		(drm_gamma_private_t *)dev->dev_private;
112
113	return !GAMMA_READ(GAMMA_DMACOUNT);
114}
115
116void gamma_dma_service(int irq, void *device, struct pt_regs *regs)
117{
118	drm_device_t	    *dev      = (drm_device_t *)device;
119	drm_device_dma_t    *dma      = dev->dma;
120	drm_gamma_private_t *dev_priv =
121		(drm_gamma_private_t *)dev->dev_private;
122
123	atomic_inc(&dev->counts[6]); /* _DRM_STAT_IRQ */
124	GAMMA_WRITE(GAMMA_GDELAYTIMER, 0xc350/2); /* 0x05S */
125	GAMMA_WRITE(GAMMA_GCOMMANDINTFLAGS, 8);
126	GAMMA_WRITE(GAMMA_GINTFLAGS, 0x2001);
127	if (gamma_dma_is_ready(dev)) {
128				/* Free previous buffer */
129		if (test_and_set_bit(0, &dev->dma_flag)) return;
130		if (dma->this_buffer) {
131			gamma_free_buffer(dev, dma->this_buffer);
132			dma->this_buffer = NULL;
133		}
134		clear_bit(0, &dev->dma_flag);
135
136				/* Dispatch new buffer */
137		queue_task(&dev->tq, &tq_immediate);
138		mark_bh(IMMEDIATE_BH);
139	}
140}
141
142/* Only called by gamma_dma_schedule. */
143static int gamma_do_dma(drm_device_t *dev, int locked)
144{
145	unsigned long	 address;
146	unsigned long	 length;
147	drm_buf_t	 *buf;
148	int		 retcode = 0;
149	drm_device_dma_t *dma = dev->dma;
150#if DRM_DMA_HISTOGRAM
151	cycles_t	 dma_start, dma_stop;
152#endif
153
154	if (test_and_set_bit(0, &dev->dma_flag)) return -EBUSY;
155
156#if DRM_DMA_HISTOGRAM
157	dma_start = get_cycles();
158#endif
159
160	if (!dma->next_buffer) {
161		DRM_ERROR("No next_buffer\n");
162		clear_bit(0, &dev->dma_flag);
163		return -EINVAL;
164	}
165
166	buf	= dma->next_buffer;
167	address = (unsigned long)buf->address;
168	length	= buf->used;
169
170	DRM_DEBUG("context %d, buffer %d (%ld bytes)\n",
171		  buf->context, buf->idx, length);
172
173	if (buf->list == DRM_LIST_RECLAIM) {
174		gamma_clear_next_buffer(dev);
175		gamma_free_buffer(dev, buf);
176		clear_bit(0, &dev->dma_flag);
177		return -EINVAL;
178	}
179
180	if (!length) {
181		DRM_ERROR("0 length buffer\n");
182		gamma_clear_next_buffer(dev);
183		gamma_free_buffer(dev, buf);
184		clear_bit(0, &dev->dma_flag);
185		return 0;
186	}
187
188	if (!gamma_dma_is_ready(dev)) {
189		clear_bit(0, &dev->dma_flag);
190		return -EBUSY;
191	}
192
193	if (buf->while_locked) {
194		if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
195			DRM_ERROR("Dispatching buffer %d from pid %d"
196				  " \"while locked\", but no lock held\n",
197				  buf->idx, buf->pid);
198		}
199	} else {
200		if (!locked && !gamma_lock_take(&dev->lock.hw_lock->lock,
201					      DRM_KERNEL_CONTEXT)) {
202			clear_bit(0, &dev->dma_flag);
203			return -EBUSY;
204		}
205	}
206
207	if (dev->last_context != buf->context
208	    && !(dev->queuelist[buf->context]->flags
209		 & _DRM_CONTEXT_PRESERVED)) {
210				/* PRE: dev->last_context != buf->context */
211		if (DRM(context_switch)(dev, dev->last_context,
212					buf->context)) {
213			DRM(clear_next_buffer)(dev);
214			DRM(free_buffer)(dev, buf);
215		}
216		retcode = -EBUSY;
217		goto cleanup;
218
219				/* POST: we will wait for the context
220				   switch and will dispatch on a later call
221				   when dev->last_context == buf->context.
222				   NOTE WE HOLD THE LOCK THROUGHOUT THIS
223				   TIME! */
224	}
225
226	gamma_clear_next_buffer(dev);
227	buf->pending	 = 1;
228	buf->waiting	 = 0;
229	buf->list	 = DRM_LIST_PEND;
230#if DRM_DMA_HISTOGRAM
231	buf->time_dispatched = get_cycles();
232#endif
233
234	gamma_dma_dispatch(dev, address, length);
235	gamma_free_buffer(dev, dma->this_buffer);
236	dma->this_buffer = buf;
237
238	atomic_inc(&dev->counts[7]); /* _DRM_STAT_DMA */
239	atomic_add(length, &dev->counts[8]); /* _DRM_STAT_PRIMARY */
240
241	if (!buf->while_locked && !dev->context_flag && !locked) {
242		if (gamma_lock_free(dev, &dev->lock.hw_lock->lock,
243				  DRM_KERNEL_CONTEXT)) {
244			DRM_ERROR("\n");
245		}
246	}
247cleanup:
248
249	clear_bit(0, &dev->dma_flag);
250
251#if DRM_DMA_HISTOGRAM
252	dma_stop = get_cycles();
253	atomic_inc(&dev->histo.dma[gamma_histogram_slot(dma_stop - dma_start)]);
254#endif
255
256	return retcode;
257}
258
259static void gamma_dma_timer_bh(unsigned long dev)
260{
261	gamma_dma_schedule((drm_device_t *)dev, 0);
262}
263
264void gamma_dma_immediate_bh(void *dev)
265{
266	gamma_dma_schedule(dev, 0);
267}
268
269int gamma_dma_schedule(drm_device_t *dev, int locked)
270{
271	int		 next;
272	drm_queue_t	 *q;
273	drm_buf_t	 *buf;
274	int		 retcode   = 0;
275	int		 processed = 0;
276	int		 missed;
277	int		 expire	   = 20;
278	drm_device_dma_t *dma	   = dev->dma;
279#if DRM_DMA_HISTOGRAM
280	cycles_t	 schedule_start;
281#endif
282
283	if (test_and_set_bit(0, &dev->interrupt_flag)) {
284				/* Not reentrant */
285		atomic_inc(&dev->counts[10]); /* _DRM_STAT_MISSED */
286		return -EBUSY;
287	}
288	missed = atomic_read(&dev->counts[10]);
289
290#if DRM_DMA_HISTOGRAM
291	schedule_start = get_cycles();
292#endif
293
294again:
295	if (dev->context_flag) {
296		clear_bit(0, &dev->interrupt_flag);
297		return -EBUSY;
298	}
299	if (dma->next_buffer) {
300				/* Unsent buffer that was previously
301				   selected, but that couldn't be sent
302				   because the lock could not be obtained
303				   or the DMA engine wasn't ready.  Try
304				   again. */
305		if (!(retcode = gamma_do_dma(dev, locked))) ++processed;
306	} else {
307		do {
308			next = gamma_select_queue(dev, gamma_dma_timer_bh);
309			if (next >= 0) {
310				q   = dev->queuelist[next];
311				buf = gamma_waitlist_get(&q->waitlist);
312				dma->next_buffer = buf;
313				dma->next_queue	 = q;
314				if (buf && buf->list == DRM_LIST_RECLAIM) {
315					gamma_clear_next_buffer(dev);
316					gamma_free_buffer(dev, buf);
317				}
318			}
319		} while (next >= 0 && !dma->next_buffer);
320		if (dma->next_buffer) {
321			if (!(retcode = gamma_do_dma(dev, locked))) {
322				++processed;
323			}
324		}
325	}
326
327	if (--expire) {
328		if (missed != atomic_read(&dev->counts[10])) {
329			if (gamma_dma_is_ready(dev)) goto again;
330		}
331		if (processed && gamma_dma_is_ready(dev)) {
332			processed = 0;
333			goto again;
334		}
335	}
336
337	clear_bit(0, &dev->interrupt_flag);
338
339#if DRM_DMA_HISTOGRAM
340	atomic_inc(&dev->histo.schedule[gamma_histogram_slot(get_cycles()
341							   - schedule_start)]);
342#endif
343	return retcode;
344}
345
346static int gamma_dma_priority(drm_device_t *dev, drm_dma_t *d)
347{
348	unsigned long	  address;
349	unsigned long	  length;
350	int		  must_free = 0;
351	int		  retcode   = 0;
352	int		  i;
353	int		  idx;
354	drm_buf_t	  *buf;
355	drm_buf_t	  *last_buf = NULL;
356	drm_device_dma_t  *dma	    = dev->dma;
357	DECLARE_WAITQUEUE(entry, current);
358
359				/* Turn off interrupt handling */
360	while (test_and_set_bit(0, &dev->interrupt_flag)) {
361		schedule();
362		if (signal_pending(current)) return -EINTR;
363	}
364	if (!(d->flags & _DRM_DMA_WHILE_LOCKED)) {
365		while (!gamma_lock_take(&dev->lock.hw_lock->lock,
366				      DRM_KERNEL_CONTEXT)) {
367			schedule();
368			if (signal_pending(current)) {
369				clear_bit(0, &dev->interrupt_flag);
370				return -EINTR;
371			}
372		}
373		++must_free;
374	}
375
376	for (i = 0; i < d->send_count; i++) {
377		idx = d->send_indices[i];
378		if (idx < 0 || idx >= dma->buf_count) {
379			DRM_ERROR("Index %d (of %d max)\n",
380				  d->send_indices[i], dma->buf_count - 1);
381			continue;
382		}
383		buf = dma->buflist[ idx ];
384		if (buf->pid != current->pid) {
385			DRM_ERROR("Process %d using buffer owned by %d\n",
386				  current->pid, buf->pid);
387			retcode = -EINVAL;
388			goto cleanup;
389		}
390		if (buf->list != DRM_LIST_NONE) {
391			DRM_ERROR("Process %d using %d's buffer on list %d\n",
392				  current->pid, buf->pid, buf->list);
393			retcode = -EINVAL;
394			goto cleanup;
395		}
396				/* This isn't a race condition on
397				   buf->list, since our concern is the
398				   buffer reclaim during the time the
399				   process closes the /dev/drm? handle, so
400				   it can't also be doing DMA. */
401		buf->list	  = DRM_LIST_PRIO;
402		buf->used	  = d->send_sizes[i];
403		buf->context	  = d->context;
404		buf->while_locked = d->flags & _DRM_DMA_WHILE_LOCKED;
405		address		  = (unsigned long)buf->address;
406		length		  = buf->used;
407		if (!length) {
408			DRM_ERROR("0 length buffer\n");
409		}
410		if (buf->pending) {
411			DRM_ERROR("Sending pending buffer:"
412				  " buffer %d, offset %d\n",
413				  d->send_indices[i], i);
414			retcode = -EINVAL;
415			goto cleanup;
416		}
417		if (buf->waiting) {
418			DRM_ERROR("Sending waiting buffer:"
419				  " buffer %d, offset %d\n",
420				  d->send_indices[i], i);
421			retcode = -EINVAL;
422			goto cleanup;
423		}
424		buf->pending = 1;
425
426		if (dev->last_context != buf->context
427		    && !(dev->queuelist[buf->context]->flags
428			 & _DRM_CONTEXT_PRESERVED)) {
429			add_wait_queue(&dev->context_wait, &entry);
430			current->state = TASK_INTERRUPTIBLE;
431				/* PRE: dev->last_context != buf->context */
432			DRM(context_switch)(dev, dev->last_context,
433					    buf->context);
434				/* POST: we will wait for the context
435				   switch and will dispatch on a later call
436				   when dev->last_context == buf->context.
437				   NOTE WE HOLD THE LOCK THROUGHOUT THIS
438				   TIME! */
439			schedule();
440			current->state = TASK_RUNNING;
441			remove_wait_queue(&dev->context_wait, &entry);
442			if (signal_pending(current)) {
443				retcode = -EINTR;
444				goto cleanup;
445			}
446			if (dev->last_context != buf->context) {
447				DRM_ERROR("Context mismatch: %d %d\n",
448					  dev->last_context,
449					  buf->context);
450			}
451		}
452
453#if DRM_DMA_HISTOGRAM
454		buf->time_queued     = get_cycles();
455		buf->time_dispatched = buf->time_queued;
456#endif
457		gamma_dma_dispatch(dev, address, length);
458		atomic_inc(&dev->counts[9]); /* _DRM_STAT_SPECIAL */
459		atomic_add(length, &dev->counts[8]); /* _DRM_STAT_PRIMARY */
460
461		if (last_buf) {
462			gamma_free_buffer(dev, last_buf);
463		}
464		last_buf = buf;
465	}
466
467
468cleanup:
469	if (last_buf) {
470		gamma_dma_ready(dev);
471		gamma_free_buffer(dev, last_buf);
472	}
473
474	if (must_free && !dev->context_flag) {
475		if (gamma_lock_free(dev, &dev->lock.hw_lock->lock,
476				  DRM_KERNEL_CONTEXT)) {
477			DRM_ERROR("\n");
478		}
479	}
480	clear_bit(0, &dev->interrupt_flag);
481	return retcode;
482}
483
484static int gamma_dma_send_buffers(drm_device_t *dev, drm_dma_t *d)
485{
486	DECLARE_WAITQUEUE(entry, current);
487	drm_buf_t	  *last_buf = NULL;
488	int		  retcode   = 0;
489	drm_device_dma_t  *dma	    = dev->dma;
490
491	if (d->flags & _DRM_DMA_BLOCK) {
492		last_buf = dma->buflist[d->send_indices[d->send_count-1]];
493		add_wait_queue(&last_buf->dma_wait, &entry);
494	}
495
496	if ((retcode = gamma_dma_enqueue(dev, d))) {
497		if (d->flags & _DRM_DMA_BLOCK)
498			remove_wait_queue(&last_buf->dma_wait, &entry);
499		return retcode;
500	}
501
502	gamma_dma_schedule(dev, 0);
503
504	if (d->flags & _DRM_DMA_BLOCK) {
505		DRM_DEBUG("%d waiting\n", current->pid);
506		for (;;) {
507			current->state = TASK_INTERRUPTIBLE;
508			if (!last_buf->waiting && !last_buf->pending)
509				break; /* finished */
510			schedule();
511			if (signal_pending(current)) {
512				retcode = -EINTR; /* Can't restart */
513				break;
514			}
515		}
516		current->state = TASK_RUNNING;
517		DRM_DEBUG("%d running\n", current->pid);
518		remove_wait_queue(&last_buf->dma_wait, &entry);
519		if (!retcode
520		    || (last_buf->list==DRM_LIST_PEND && !last_buf->pending)) {
521			if (!waitqueue_active(&last_buf->dma_wait)) {
522				gamma_free_buffer(dev, last_buf);
523			}
524		}
525		if (retcode) {
526			DRM_ERROR("ctx%d w%d p%d c%d i%d l%d %d/%d\n",
527				  d->context,
528				  last_buf->waiting,
529				  last_buf->pending,
530				  DRM_WAITCOUNT(dev, d->context),
531				  last_buf->idx,
532				  last_buf->list,
533				  last_buf->pid,
534				  current->pid);
535		}
536	}
537	return retcode;
538}
539
540int gamma_dma(struct inode *inode, struct file *filp, unsigned int cmd,
541	      unsigned long arg)
542{
543	drm_file_t	  *priv	    = filp->private_data;
544	drm_device_t	  *dev	    = priv->dev;
545	drm_device_dma_t  *dma	    = dev->dma;
546	int		  retcode   = 0;
547	drm_dma_t	  d;
548
549	if (copy_from_user(&d, (drm_dma_t *)arg, sizeof(d)))
550		return -EFAULT;
551
552	if (d.send_count < 0 || d.send_count > dma->buf_count) {
553		DRM_ERROR("Process %d trying to send %d buffers (of %d max)\n",
554			  current->pid, d.send_count, dma->buf_count);
555		return -EINVAL;
556	}
557
558	if (d.request_count < 0 || d.request_count > dma->buf_count) {
559		DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
560			  current->pid, d.request_count, dma->buf_count);
561		return -EINVAL;
562	}
563
564	if (d.send_count) {
565		if (d.flags & _DRM_DMA_PRIORITY)
566			retcode = gamma_dma_priority(dev, &d);
567		else
568			retcode = gamma_dma_send_buffers(dev, &d);
569	}
570
571	d.granted_count = 0;
572
573	if (!retcode && d.request_count) {
574		retcode = gamma_dma_get_buffers(dev, &d);
575	}
576
577	DRM_DEBUG("%d returning, granted = %d\n",
578		  current->pid, d.granted_count);
579	if (copy_to_user((drm_dma_t *)arg, &d, sizeof(d)))
580		return -EFAULT;
581
582	return retcode;
583}
584