1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 *          Alex Deucher
26 *          Jerome Glisse
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD$");
31
32#include <dev/drm2/drmP.h>
33#include <dev/drm2/radeon/radeon_drm.h>
34#include "radeon.h"
35#include "radeon_gem.h"
36
37int radeon_gem_object_init(struct drm_gem_object *obj)
38{
39	panic("radeon_gem_object_init() must not be called");
40
41	return 0;
42}
43
44void radeon_gem_object_free(struct drm_gem_object *gobj)
45{
46	struct radeon_bo *robj = gem_to_radeon_bo(gobj);
47
48	if (robj) {
49#ifdef FREEBSD_WIP
50		if (robj->gem_base.import_attach)
51			drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
52#endif /* FREEBSD_WIP */
53		radeon_bo_unref(&robj);
54	}
55}
56
57int radeon_gem_object_create(struct radeon_device *rdev, int size,
58				int alignment, int initial_domain,
59				bool discardable, bool kernel,
60				struct drm_gem_object **obj)
61{
62	struct radeon_bo *robj;
63	unsigned long max_size;
64	int r;
65
66	*obj = NULL;
67	/* At least align on page size */
68	if (alignment < PAGE_SIZE) {
69		alignment = PAGE_SIZE;
70	}
71
72	/* maximun bo size is the minimun btw visible vram and gtt size */
73	max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
74	if (size > max_size) {
75		DRM_ERROR("%s:%d alloc size %dMb bigger than %ldMb limit\n",
76		       __func__, __LINE__, size >> 20, max_size >> 20);
77		return -ENOMEM;
78	}
79
80retry:
81	r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj);
82	if (r) {
83		if (r != -ERESTARTSYS) {
84			if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
85				initial_domain |= RADEON_GEM_DOMAIN_GTT;
86				goto retry;
87			}
88			DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
89				  size, initial_domain, alignment, r);
90		}
91		return r;
92	}
93	*obj = &robj->gem_base;
94
95	sx_xlock(&rdev->gem.mutex);
96	list_add_tail(&robj->list, &rdev->gem.objects);
97	sx_xunlock(&rdev->gem.mutex);
98
99	return 0;
100}
101
102static int radeon_gem_set_domain(struct drm_gem_object *gobj,
103			  uint32_t rdomain, uint32_t wdomain)
104{
105	struct radeon_bo *robj;
106	uint32_t domain;
107	int r;
108
109	/* FIXME: reeimplement */
110	robj = gem_to_radeon_bo(gobj);
111	/* work out where to validate the buffer to */
112	domain = wdomain;
113	if (!domain) {
114		domain = rdomain;
115	}
116	if (!domain) {
117		/* Do nothings */
118		DRM_ERROR("Set domain without domain !\n");
119		return 0;
120	}
121	if (domain == RADEON_GEM_DOMAIN_CPU) {
122		/* Asking for cpu access wait for object idle */
123		r = radeon_bo_wait(robj, NULL, false);
124		if (r) {
125			DRM_ERROR("Failed to wait for object !\n");
126			return r;
127		}
128	}
129	return 0;
130}
131
132int radeon_gem_init(struct radeon_device *rdev)
133{
134	INIT_LIST_HEAD(&rdev->gem.objects);
135	return 0;
136}
137
138void radeon_gem_fini(struct radeon_device *rdev)
139{
140	radeon_bo_force_delete(rdev);
141}
142
143/*
144 * Call from drm_gem_handle_create which appear in both new and open ioctl
145 * case.
146 */
147int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
148{
149	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
150	struct radeon_device *rdev = rbo->rdev;
151	struct radeon_fpriv *fpriv = file_priv->driver_priv;
152	struct radeon_vm *vm = &fpriv->vm;
153	struct radeon_bo_va *bo_va;
154	int r;
155
156	if (rdev->family < CHIP_CAYMAN) {
157		return 0;
158	}
159
160	r = radeon_bo_reserve(rbo, false);
161	if (r) {
162		return r;
163	}
164
165	bo_va = radeon_vm_bo_find(vm, rbo);
166	if (!bo_va) {
167		bo_va = radeon_vm_bo_add(rdev, vm, rbo);
168	} else {
169		++bo_va->ref_count;
170	}
171	radeon_bo_unreserve(rbo);
172
173	return 0;
174}
175
176void radeon_gem_object_close(struct drm_gem_object *obj,
177			     struct drm_file *file_priv)
178{
179	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
180	struct radeon_device *rdev = rbo->rdev;
181	struct radeon_fpriv *fpriv = file_priv->driver_priv;
182	struct radeon_vm *vm = &fpriv->vm;
183	struct radeon_bo_va *bo_va;
184	int r;
185
186	if (rdev->family < CHIP_CAYMAN) {
187		return;
188	}
189
190	r = radeon_bo_reserve(rbo, true);
191	if (r) {
192		dev_err(rdev->dev, "leaking bo va because "
193			"we fail to reserve bo (%d)\n", r);
194		return;
195	}
196	bo_va = radeon_vm_bo_find(vm, rbo);
197	if (bo_va) {
198		if (--bo_va->ref_count == 0) {
199			radeon_vm_bo_rmv(rdev, bo_va);
200		}
201	}
202	radeon_bo_unreserve(rbo);
203}
204
205static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
206{
207	if (r == -EDEADLK) {
208		r = radeon_gpu_reset(rdev);
209		if (!r)
210			r = -EAGAIN;
211	}
212	return r;
213}
214
215/*
216 * GEM ioctls.
217 */
218int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
219			  struct drm_file *filp)
220{
221	struct radeon_device *rdev = dev->dev_private;
222	struct drm_radeon_gem_info *args = data;
223	struct ttm_mem_type_manager *man;
224	unsigned i;
225
226	man = &rdev->mman.bdev.man[TTM_PL_VRAM];
227
228	args->vram_size = rdev->mc.real_vram_size;
229	args->vram_visible = (u64)man->size << PAGE_SHIFT;
230	if (rdev->stollen_vga_memory)
231		args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
232	args->vram_visible -= radeon_fbdev_total_size(rdev);
233	args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024;
234	for(i = 0; i < RADEON_NUM_RINGS; ++i)
235		args->gart_size -= rdev->ring[i].ring_size;
236	return 0;
237}
238
239int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
240			   struct drm_file *filp)
241{
242	/* TODO: implement */
243	DRM_ERROR("unimplemented %s\n", __func__);
244	return -ENOSYS;
245}
246
247int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
248			    struct drm_file *filp)
249{
250	/* TODO: implement */
251	DRM_ERROR("unimplemented %s\n", __func__);
252	return -ENOSYS;
253}
254
255int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
256			    struct drm_file *filp)
257{
258	struct radeon_device *rdev = dev->dev_private;
259	struct drm_radeon_gem_create *args = data;
260	struct drm_gem_object *gobj;
261	uint32_t handle;
262	int r;
263
264	sx_slock(&rdev->exclusive_lock);
265	/* create a gem object to contain this object in */
266	args->size = roundup(args->size, PAGE_SIZE);
267	r = radeon_gem_object_create(rdev, args->size, args->alignment,
268					args->initial_domain, false,
269					false, &gobj);
270	if (r) {
271		if (r == -ERESTARTSYS)
272			r = -EINTR;
273		sx_sunlock(&rdev->exclusive_lock);
274		r = radeon_gem_handle_lockup(rdev, r);
275		return r;
276	}
277	r = drm_gem_handle_create(filp, gobj, &handle);
278	/* drop reference from allocate - handle holds it now */
279	drm_gem_object_unreference_unlocked(gobj);
280	if (r) {
281		sx_sunlock(&rdev->exclusive_lock);
282		r = radeon_gem_handle_lockup(rdev, r);
283		return r;
284	}
285	args->handle = handle;
286	sx_sunlock(&rdev->exclusive_lock);
287	return 0;
288}
289
290int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
291				struct drm_file *filp)
292{
293	/* transition the BO to a domain -
294	 * just validate the BO into a certain domain */
295	struct radeon_device *rdev = dev->dev_private;
296	struct drm_radeon_gem_set_domain *args = data;
297	struct drm_gem_object *gobj;
298	struct radeon_bo *robj;
299	int r;
300
301	/* for now if someone requests domain CPU -
302	 * just make sure the buffer is finished with */
303	sx_slock(&rdev->exclusive_lock);
304
305	/* just do a BO wait for now */
306	gobj = drm_gem_object_lookup(dev, filp, args->handle);
307	if (gobj == NULL) {
308		sx_sunlock(&rdev->exclusive_lock);
309		return -ENOENT;
310	}
311	robj = gem_to_radeon_bo(gobj);
312
313	r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
314
315	drm_gem_object_unreference_unlocked(gobj);
316	sx_sunlock(&rdev->exclusive_lock);
317	r = radeon_gem_handle_lockup(robj->rdev, r);
318	return r;
319}
320
321int radeon_mode_dumb_mmap(struct drm_file *filp,
322			  struct drm_device *dev,
323			  uint32_t handle, uint64_t *offset_p)
324{
325	struct drm_gem_object *gobj;
326	struct radeon_bo *robj;
327
328	gobj = drm_gem_object_lookup(dev, filp, handle);
329	if (gobj == NULL) {
330		return -ENOENT;
331	}
332	robj = gem_to_radeon_bo(gobj);
333	*offset_p = radeon_bo_mmap_offset(robj);
334	drm_gem_object_unreference_unlocked(gobj);
335	return 0;
336}
337
338int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
339			  struct drm_file *filp)
340{
341	struct drm_radeon_gem_mmap *args = data;
342
343	return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
344}
345
346int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
347			  struct drm_file *filp)
348{
349	struct radeon_device *rdev = dev->dev_private;
350	struct drm_radeon_gem_busy *args = data;
351	struct drm_gem_object *gobj;
352	struct radeon_bo *robj;
353	int r;
354	uint32_t cur_placement = 0;
355
356	gobj = drm_gem_object_lookup(dev, filp, args->handle);
357	if (gobj == NULL) {
358		return -ENOENT;
359	}
360	robj = gem_to_radeon_bo(gobj);
361	r = radeon_bo_wait(robj, &cur_placement, true);
362	switch (cur_placement) {
363	case TTM_PL_VRAM:
364		args->domain = RADEON_GEM_DOMAIN_VRAM;
365		break;
366	case TTM_PL_TT:
367		args->domain = RADEON_GEM_DOMAIN_GTT;
368		break;
369	case TTM_PL_SYSTEM:
370		args->domain = RADEON_GEM_DOMAIN_CPU;
371	default:
372		break;
373	}
374	drm_gem_object_unreference_unlocked(gobj);
375	r = radeon_gem_handle_lockup(rdev, r);
376	return r;
377}
378
379int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
380			      struct drm_file *filp)
381{
382	struct radeon_device *rdev = dev->dev_private;
383	struct drm_radeon_gem_wait_idle *args = data;
384	struct drm_gem_object *gobj;
385	struct radeon_bo *robj;
386	int r;
387
388	gobj = drm_gem_object_lookup(dev, filp, args->handle);
389	if (gobj == NULL) {
390		return -ENOENT;
391	}
392	robj = gem_to_radeon_bo(gobj);
393	r = radeon_bo_wait(robj, NULL, false);
394	/* callback hw specific functions if any */
395	if (rdev->asic->ioctl_wait_idle)
396		robj->rdev->asic->ioctl_wait_idle(rdev, robj);
397	drm_gem_object_unreference_unlocked(gobj);
398	if (r == -ERESTARTSYS)
399		r = -EINTR;
400	r = radeon_gem_handle_lockup(rdev, r);
401	return r;
402}
403
404int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
405				struct drm_file *filp)
406{
407	struct drm_radeon_gem_set_tiling *args = data;
408	struct drm_gem_object *gobj;
409	struct radeon_bo *robj;
410	int r = 0;
411
412	DRM_DEBUG("%d \n", args->handle);
413	gobj = drm_gem_object_lookup(dev, filp, args->handle);
414	if (gobj == NULL)
415		return -ENOENT;
416	robj = gem_to_radeon_bo(gobj);
417	r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
418	drm_gem_object_unreference_unlocked(gobj);
419	return r;
420}
421
422int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
423				struct drm_file *filp)
424{
425	struct drm_radeon_gem_get_tiling *args = data;
426	struct drm_gem_object *gobj;
427	struct radeon_bo *rbo;
428	int r = 0;
429
430	DRM_DEBUG("\n");
431	gobj = drm_gem_object_lookup(dev, filp, args->handle);
432	if (gobj == NULL)
433		return -ENOENT;
434	rbo = gem_to_radeon_bo(gobj);
435	r = radeon_bo_reserve(rbo, false);
436	if (unlikely(r != 0))
437		goto out;
438	radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
439	radeon_bo_unreserve(rbo);
440out:
441	drm_gem_object_unreference_unlocked(gobj);
442	return r;
443}
444
445int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
446			  struct drm_file *filp)
447{
448	struct drm_radeon_gem_va *args = data;
449	struct drm_gem_object *gobj;
450	struct radeon_device *rdev = dev->dev_private;
451	struct radeon_fpriv *fpriv = filp->driver_priv;
452	struct radeon_bo *rbo;
453	struct radeon_bo_va *bo_va;
454	u32 invalid_flags;
455	int r = 0;
456
457	if (!rdev->vm_manager.enabled) {
458		args->operation = RADEON_VA_RESULT_ERROR;
459		return -ENOTTY;
460	}
461
462	/* !! DONT REMOVE !!
463	 * We don't support vm_id yet, to be sure we don't have have broken
464	 * userspace, reject anyone trying to use non 0 value thus moving
465	 * forward we can use those fields without breaking existant userspace
466	 */
467	if (args->vm_id) {
468		args->operation = RADEON_VA_RESULT_ERROR;
469		return -EINVAL;
470	}
471
472	if (args->offset < RADEON_VA_RESERVED_SIZE) {
473		dev_err(dev->dev,
474			"offset 0x%lX is in reserved area 0x%X\n",
475			(unsigned long)args->offset,
476			RADEON_VA_RESERVED_SIZE);
477		args->operation = RADEON_VA_RESULT_ERROR;
478		return -EINVAL;
479	}
480
481	/* don't remove, we need to enforce userspace to set the snooped flag
482	 * otherwise we will endup with broken userspace and we won't be able
483	 * to enable this feature without adding new interface
484	 */
485	invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
486	if ((args->flags & invalid_flags)) {
487		dev_err(dev->dev, "invalid flags 0x%08X vs 0x%08X\n",
488			args->flags, invalid_flags);
489		args->operation = RADEON_VA_RESULT_ERROR;
490		return -EINVAL;
491	}
492	if (!(args->flags & RADEON_VM_PAGE_SNOOPED)) {
493		dev_err(dev->dev, "only supported snooped mapping for now\n");
494		args->operation = RADEON_VA_RESULT_ERROR;
495		return -EINVAL;
496	}
497
498	switch (args->operation) {
499	case RADEON_VA_MAP:
500	case RADEON_VA_UNMAP:
501		break;
502	default:
503		dev_err(dev->dev, "unsupported operation %d\n",
504			args->operation);
505		args->operation = RADEON_VA_RESULT_ERROR;
506		return -EINVAL;
507	}
508
509	gobj = drm_gem_object_lookup(dev, filp, args->handle);
510	if (gobj == NULL) {
511		args->operation = RADEON_VA_RESULT_ERROR;
512		return -ENOENT;
513	}
514	rbo = gem_to_radeon_bo(gobj);
515	r = radeon_bo_reserve(rbo, false);
516	if (r) {
517		args->operation = RADEON_VA_RESULT_ERROR;
518		drm_gem_object_unreference_unlocked(gobj);
519		return r;
520	}
521	bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
522	if (!bo_va) {
523		args->operation = RADEON_VA_RESULT_ERROR;
524		drm_gem_object_unreference_unlocked(gobj);
525		return -ENOENT;
526	}
527
528	switch (args->operation) {
529	case RADEON_VA_MAP:
530		if (bo_va->soffset) {
531			args->operation = RADEON_VA_RESULT_VA_EXIST;
532			args->offset = bo_va->soffset;
533			goto out;
534		}
535		r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
536		break;
537	case RADEON_VA_UNMAP:
538		r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
539		break;
540	default:
541		break;
542	}
543	args->operation = RADEON_VA_RESULT_OK;
544	if (r) {
545		args->operation = RADEON_VA_RESULT_ERROR;
546	}
547out:
548	radeon_bo_unreserve(rbo);
549	drm_gem_object_unreference_unlocked(gobj);
550	return r;
551}
552
553int radeon_mode_dumb_create(struct drm_file *file_priv,
554			    struct drm_device *dev,
555			    struct drm_mode_create_dumb *args)
556{
557	struct radeon_device *rdev = dev->dev_private;
558	struct drm_gem_object *gobj;
559	uint32_t handle;
560	int r;
561
562	args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
563	args->size = args->pitch * args->height;
564	args->size = roundup2(args->size, PAGE_SIZE);
565
566	r = radeon_gem_object_create(rdev, args->size, 0,
567				     RADEON_GEM_DOMAIN_VRAM,
568				     false, ttm_bo_type_device,
569				     &gobj);
570	if (r)
571		return -ENOMEM;
572
573	r = drm_gem_handle_create(file_priv, gobj, &handle);
574	/* drop reference from allocate - handle holds it now */
575	drm_gem_object_unreference_unlocked(gobj);
576	if (r) {
577		return r;
578	}
579	args->handle = handle;
580	return 0;
581}
582
583int radeon_mode_dumb_destroy(struct drm_file *file_priv,
584			     struct drm_device *dev,
585			     uint32_t handle)
586{
587	return drm_gem_handle_delete(file_priv, handle);
588}
589