1// SPDX-License-Identifier: GPL-2.0 OR MIT
2/**************************************************************************
3 *
4 * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_bo.h"
29#include "vmwgfx_drv.h"
30#include <drm/ttm/ttm_placement.h>
31
32static const struct ttm_place vram_placement_flags = {
33	.fpfn = 0,
34	.lpfn = 0,
35	.mem_type = TTM_PL_VRAM,
36	.flags = 0
37};
38
39static const struct ttm_place sys_placement_flags = {
40	.fpfn = 0,
41	.lpfn = 0,
42	.mem_type = TTM_PL_SYSTEM,
43	.flags = 0
44};
45
46struct ttm_placement vmw_vram_placement = {
47	.num_placement = 1,
48	.placement = &vram_placement_flags,
49};
50
51struct ttm_placement vmw_sys_placement = {
52	.num_placement = 1,
53	.placement = &sys_placement_flags,
54};
55
56const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
57
58/**
59 * __vmw_piter_non_sg_next: Helper functions to advance
60 * a struct vmw_piter iterator.
61 *
62 * @viter: Pointer to the iterator.
63 *
64 * These functions return false if past the end of the list,
65 * true otherwise. Functions are selected depending on the current
66 * DMA mapping mode.
67 */
68static bool __vmw_piter_non_sg_next(struct vmw_piter *viter)
69{
70	return ++(viter->i) < viter->num_pages;
71}
72
73static bool __vmw_piter_sg_next(struct vmw_piter *viter)
74{
75	bool ret = __vmw_piter_non_sg_next(viter);
76
77	return __sg_page_iter_dma_next(&viter->iter) && ret;
78}
79
80
81static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter)
82{
83	return viter->addrs[viter->i];
84}
85
86static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter)
87{
88	return sg_page_iter_dma_address(&viter->iter);
89}
90
91
92/**
93 * vmw_piter_start - Initialize a struct vmw_piter.
94 *
95 * @viter: Pointer to the iterator to initialize
96 * @vsgt: Pointer to a struct vmw_sg_table to initialize from
97 * @p_offset: Pointer offset used to update current array position
98 *
99 * Note that we're following the convention of __sg_page_iter_start, so that
100 * the iterator doesn't point to a valid page after initialization; it has
101 * to be advanced one step first.
102 */
103void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
104		     unsigned long p_offset)
105{
106	viter->i = p_offset - 1;
107	viter->num_pages = vsgt->num_pages;
108	viter->pages = vsgt->pages;
109	switch (vsgt->mode) {
110	case vmw_dma_alloc_coherent:
111		viter->next = &__vmw_piter_non_sg_next;
112		viter->dma_address = &__vmw_piter_dma_addr;
113		viter->addrs = vsgt->addrs;
114		break;
115	case vmw_dma_map_populate:
116	case vmw_dma_map_bind:
117		viter->next = &__vmw_piter_sg_next;
118		viter->dma_address = &__vmw_piter_sg_addr;
119		__sg_page_iter_start(&viter->iter.base, vsgt->sgt->sgl,
120				     vsgt->sgt->orig_nents, p_offset);
121		break;
122	default:
123		BUG();
124	}
125}
126
127/**
128 * vmw_ttm_unmap_from_dma - unmap  device addresses previsouly mapped for
129 * TTM pages
130 *
131 * @vmw_tt: Pointer to a struct vmw_ttm_backend
132 *
133 * Used to free dma mappings previously mapped by vmw_ttm_map_for_dma.
134 */
135static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
136{
137	struct device *dev = vmw_tt->dev_priv->drm.dev;
138
139	dma_unmap_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0);
140	vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents;
141}
142
143/**
144 * vmw_ttm_map_for_dma - map TTM pages to get device addresses
145 *
146 * @vmw_tt: Pointer to a struct vmw_ttm_backend
147 *
148 * This function is used to get device addresses from the kernel DMA layer.
149 * However, it's violating the DMA API in that when this operation has been
150 * performed, it's illegal for the CPU to write to the pages without first
151 * unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is
152 * therefore only legal to call this function if we know that the function
153 * dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most
154 * a CPU write buffer flush.
155 */
156static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
157{
158	struct device *dev = vmw_tt->dev_priv->drm.dev;
159
160	return dma_map_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0);
161}
162
163/**
164 * vmw_ttm_map_dma - Make sure TTM pages are visible to the device
165 *
166 * @vmw_tt: Pointer to a struct vmw_ttm_tt
167 *
168 * Select the correct function for and make sure the TTM pages are
169 * visible to the device. Allocate storage for the device mappings.
170 * If a mapping has already been performed, indicated by the storage
171 * pointer being non NULL, the function returns success.
172 */
173static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
174{
175	struct vmw_private *dev_priv = vmw_tt->dev_priv;
176	struct vmw_sg_table *vsgt = &vmw_tt->vsgt;
177	int ret = 0;
178
179	if (vmw_tt->mapped)
180		return 0;
181
182	vsgt->mode = dev_priv->map_mode;
183	vsgt->pages = vmw_tt->dma_ttm.pages;
184	vsgt->num_pages = vmw_tt->dma_ttm.num_pages;
185	vsgt->addrs = vmw_tt->dma_ttm.dma_address;
186	vsgt->sgt = NULL;
187
188	switch (dev_priv->map_mode) {
189	case vmw_dma_map_bind:
190	case vmw_dma_map_populate:
191		if (vmw_tt->dma_ttm.page_flags  & TTM_TT_FLAG_EXTERNAL) {
192			vsgt->sgt = vmw_tt->dma_ttm.sg;
193		} else {
194			vsgt->sgt = &vmw_tt->sgt;
195			ret = sg_alloc_table_from_pages_segment(&vmw_tt->sgt,
196				vsgt->pages, vsgt->num_pages, 0,
197				(unsigned long)vsgt->num_pages << PAGE_SHIFT,
198				dma_get_max_seg_size(dev_priv->drm.dev),
199				GFP_KERNEL);
200			if (ret)
201				goto out_sg_alloc_fail;
202		}
203
204		ret = vmw_ttm_map_for_dma(vmw_tt);
205		if (unlikely(ret != 0))
206			goto out_map_fail;
207
208		break;
209	default:
210		break;
211	}
212
213	vmw_tt->mapped = true;
214	return 0;
215
216out_map_fail:
217	drm_warn(&dev_priv->drm, "VSG table map failed!");
218	sg_free_table(vsgt->sgt);
219	vsgt->sgt = NULL;
220out_sg_alloc_fail:
221	return ret;
222}
223
224/**
225 * vmw_ttm_unmap_dma - Tear down any TTM page device mappings
226 *
227 * @vmw_tt: Pointer to a struct vmw_ttm_tt
228 *
229 * Tear down any previously set up device DMA mappings and free
230 * any storage space allocated for them. If there are no mappings set up,
231 * this function is a NOP.
232 */
233static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
234{
235	struct vmw_private *dev_priv = vmw_tt->dev_priv;
236
237	if (!vmw_tt->vsgt.sgt)
238		return;
239
240	switch (dev_priv->map_mode) {
241	case vmw_dma_map_bind:
242	case vmw_dma_map_populate:
243		vmw_ttm_unmap_from_dma(vmw_tt);
244		sg_free_table(vmw_tt->vsgt.sgt);
245		vmw_tt->vsgt.sgt = NULL;
246		break;
247	default:
248		break;
249	}
250	vmw_tt->mapped = false;
251}
252
253/**
254 * vmw_bo_sg_table - Return a struct vmw_sg_table object for a
255 * TTM buffer object
256 *
257 * @bo: Pointer to a struct ttm_buffer_object
258 *
259 * Returns a pointer to a struct vmw_sg_table object. The object should
260 * not be freed after use.
261 * Note that for the device addresses to be valid, the buffer object must
262 * either be reserved or pinned.
263 */
264const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo)
265{
266	struct vmw_ttm_tt *vmw_tt =
267		container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm);
268
269	return &vmw_tt->vsgt;
270}
271
272
273static int vmw_ttm_bind(struct ttm_device *bdev,
274			struct ttm_tt *ttm, struct ttm_resource *bo_mem)
275{
276	struct vmw_ttm_tt *vmw_be =
277		container_of(ttm, struct vmw_ttm_tt, dma_ttm);
278	int ret = 0;
279
280	if (!bo_mem)
281		return -EINVAL;
282
283	if (vmw_be->bound)
284		return 0;
285
286	ret = vmw_ttm_map_dma(vmw_be);
287	if (unlikely(ret != 0))
288		return ret;
289
290	vmw_be->gmr_id = bo_mem->start;
291	vmw_be->mem_type = bo_mem->mem_type;
292
293	switch (bo_mem->mem_type) {
294	case VMW_PL_GMR:
295		ret = vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
296				    ttm->num_pages, vmw_be->gmr_id);
297		break;
298	case VMW_PL_MOB:
299		if (unlikely(vmw_be->mob == NULL)) {
300			vmw_be->mob =
301				vmw_mob_create(ttm->num_pages);
302			if (unlikely(vmw_be->mob == NULL))
303				return -ENOMEM;
304		}
305
306		ret = vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob,
307				    &vmw_be->vsgt, ttm->num_pages,
308				    vmw_be->gmr_id);
309		break;
310	case VMW_PL_SYSTEM:
311		/* Nothing to be done for a system bind */
312		break;
313	default:
314		BUG();
315	}
316	vmw_be->bound = true;
317	return ret;
318}
319
320static void vmw_ttm_unbind(struct ttm_device *bdev,
321			   struct ttm_tt *ttm)
322{
323	struct vmw_ttm_tt *vmw_be =
324		container_of(ttm, struct vmw_ttm_tt, dma_ttm);
325
326	if (!vmw_be->bound)
327		return;
328
329	switch (vmw_be->mem_type) {
330	case VMW_PL_GMR:
331		vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
332		break;
333	case VMW_PL_MOB:
334		vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob);
335		break;
336	case VMW_PL_SYSTEM:
337		break;
338	default:
339		BUG();
340	}
341
342	if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind)
343		vmw_ttm_unmap_dma(vmw_be);
344	vmw_be->bound = false;
345}
346
347
348static void vmw_ttm_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
349{
350	struct vmw_ttm_tt *vmw_be =
351		container_of(ttm, struct vmw_ttm_tt, dma_ttm);
352
353	vmw_ttm_unmap_dma(vmw_be);
354	ttm_tt_fini(ttm);
355	if (vmw_be->mob)
356		vmw_mob_destroy(vmw_be->mob);
357
358	kfree(vmw_be);
359}
360
361
362static int vmw_ttm_populate(struct ttm_device *bdev,
363			    struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
364{
365	bool external = (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
366
367	if (ttm_tt_is_populated(ttm))
368		return 0;
369
370	if (external && ttm->sg)
371		return  drm_prime_sg_to_dma_addr_array(ttm->sg,
372						       ttm->dma_address,
373						       ttm->num_pages);
374
375	return ttm_pool_alloc(&bdev->pool, ttm, ctx);
376}
377
378static void vmw_ttm_unpopulate(struct ttm_device *bdev,
379			       struct ttm_tt *ttm)
380{
381	struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
382						 dma_ttm);
383	bool external = (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
384
385	if (external)
386		return;
387
388	vmw_ttm_unbind(bdev, ttm);
389
390	if (vmw_tt->mob) {
391		vmw_mob_destroy(vmw_tt->mob);
392		vmw_tt->mob = NULL;
393	}
394
395	vmw_ttm_unmap_dma(vmw_tt);
396
397	ttm_pool_free(&bdev->pool, ttm);
398}
399
400static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
401					uint32_t page_flags)
402{
403	struct vmw_ttm_tt *vmw_be;
404	int ret;
405	bool external = bo->type == ttm_bo_type_sg;
406
407	vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
408	if (!vmw_be)
409		return NULL;
410
411	vmw_be->dev_priv = vmw_priv_from_ttm(bo->bdev);
412	vmw_be->mob = NULL;
413
414	if (external)
415		page_flags |= TTM_TT_FLAG_EXTERNAL | TTM_TT_FLAG_EXTERNAL_MAPPABLE;
416
417	if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent || external)
418		ret = ttm_sg_tt_init(&vmw_be->dma_ttm, bo, page_flags,
419				     ttm_cached);
420	else
421		ret = ttm_tt_init(&vmw_be->dma_ttm, bo, page_flags,
422				  ttm_cached, 0);
423	if (unlikely(ret != 0))
424		goto out_no_init;
425
426	return &vmw_be->dma_ttm;
427out_no_init:
428	kfree(vmw_be);
429	return NULL;
430}
431
432static void vmw_evict_flags(struct ttm_buffer_object *bo,
433		     struct ttm_placement *placement)
434{
435	*placement = vmw_sys_placement;
436}
437
438static int vmw_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
439{
440	struct vmw_private *dev_priv = vmw_priv_from_ttm(bdev);
441
442	switch (mem->mem_type) {
443	case TTM_PL_SYSTEM:
444	case VMW_PL_SYSTEM:
445	case VMW_PL_GMR:
446	case VMW_PL_MOB:
447		return 0;
448	case TTM_PL_VRAM:
449		mem->bus.offset = (mem->start << PAGE_SHIFT) +
450			dev_priv->vram_start;
451		mem->bus.is_iomem = true;
452		mem->bus.caching = ttm_cached;
453		break;
454	default:
455		return -EINVAL;
456	}
457	return 0;
458}
459
460/**
461 * vmw_move_notify - TTM move_notify_callback
462 *
463 * @bo: The TTM buffer object about to move.
464 * @old_mem: The old memory where we move from
465 * @new_mem: The struct ttm_resource indicating to what memory
466 *       region the move is taking place.
467 *
468 * Calls move_notify for all subsystems needing it.
469 * (currently only resources).
470 */
471static void vmw_move_notify(struct ttm_buffer_object *bo,
472			    struct ttm_resource *old_mem,
473			    struct ttm_resource *new_mem)
474{
475	vmw_bo_move_notify(bo, new_mem);
476	vmw_query_move_notify(bo, old_mem, new_mem);
477}
478
479
480/**
481 * vmw_swap_notify - TTM move_notify_callback
482 *
483 * @bo: The TTM buffer object about to be swapped out.
484 */
485static void vmw_swap_notify(struct ttm_buffer_object *bo)
486{
487	vmw_bo_swap_notify(bo);
488	(void) ttm_bo_wait(bo, false, false);
489}
490
491static bool vmw_memtype_is_system(uint32_t mem_type)
492{
493	return mem_type == TTM_PL_SYSTEM || mem_type == VMW_PL_SYSTEM;
494}
495
496static int vmw_move(struct ttm_buffer_object *bo,
497		    bool evict,
498		    struct ttm_operation_ctx *ctx,
499		    struct ttm_resource *new_mem,
500		    struct ttm_place *hop)
501{
502	struct ttm_resource_manager *new_man;
503	struct ttm_resource_manager *old_man = NULL;
504	int ret = 0;
505
506	new_man = ttm_manager_type(bo->bdev, new_mem->mem_type);
507	if (bo->resource)
508		old_man = ttm_manager_type(bo->bdev, bo->resource->mem_type);
509
510	if (new_man->use_tt && !vmw_memtype_is_system(new_mem->mem_type)) {
511		ret = vmw_ttm_bind(bo->bdev, bo->ttm, new_mem);
512		if (ret)
513			return ret;
514	}
515
516	if (!bo->resource || (bo->resource->mem_type == TTM_PL_SYSTEM &&
517			      bo->ttm == NULL)) {
518		ttm_bo_move_null(bo, new_mem);
519		return 0;
520	}
521
522	vmw_move_notify(bo, bo->resource, new_mem);
523
524	if (old_man && old_man->use_tt && new_man->use_tt) {
525		if (vmw_memtype_is_system(bo->resource->mem_type)) {
526			ttm_bo_move_null(bo, new_mem);
527			return 0;
528		}
529		ret = ttm_bo_wait_ctx(bo, ctx);
530		if (ret)
531			goto fail;
532
533		vmw_ttm_unbind(bo->bdev, bo->ttm);
534		ttm_resource_free(bo, &bo->resource);
535		ttm_bo_assign_mem(bo, new_mem);
536		return 0;
537	} else {
538		ret = ttm_bo_move_memcpy(bo, ctx, new_mem);
539		if (ret)
540			goto fail;
541	}
542	return 0;
543fail:
544	vmw_move_notify(bo, new_mem, bo->resource);
545	return ret;
546}
547
548struct ttm_device_funcs vmw_bo_driver = {
549	.ttm_tt_create = &vmw_ttm_tt_create,
550	.ttm_tt_populate = &vmw_ttm_populate,
551	.ttm_tt_unpopulate = &vmw_ttm_unpopulate,
552	.ttm_tt_destroy = &vmw_ttm_destroy,
553	.eviction_valuable = ttm_bo_eviction_valuable,
554	.evict_flags = vmw_evict_flags,
555	.move = vmw_move,
556	.swap_notify = vmw_swap_notify,
557	.io_mem_reserve = &vmw_ttm_io_mem_reserve,
558};
559
560int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
561			       size_t bo_size, u32 domain,
562			       struct vmw_bo **bo_p)
563{
564	struct ttm_operation_ctx ctx = {
565		.interruptible = false,
566		.no_wait_gpu = false
567	};
568	struct vmw_bo *vbo;
569	int ret;
570	struct vmw_bo_params bo_params = {
571		.domain = domain,
572		.busy_domain = domain,
573		.bo_type = ttm_bo_type_kernel,
574		.size = bo_size,
575		.pin = true
576	};
577
578	ret = vmw_bo_create(dev_priv, &bo_params, &vbo);
579	if (unlikely(ret != 0))
580		return ret;
581
582	ret = ttm_bo_reserve(&vbo->tbo, false, true, NULL);
583	BUG_ON(ret != 0);
584	ret = vmw_ttm_populate(vbo->tbo.bdev, vbo->tbo.ttm, &ctx);
585	if (likely(ret == 0)) {
586		struct vmw_ttm_tt *vmw_tt =
587			container_of(vbo->tbo.ttm, struct vmw_ttm_tt, dma_ttm);
588		ret = vmw_ttm_map_dma(vmw_tt);
589	}
590
591	ttm_bo_unreserve(&vbo->tbo);
592
593	if (likely(ret == 0))
594		*bo_p = vbo;
595	return ret;
596}
597