1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30/* $FreeBSD$ */
31
32#ifndef _TTM_BO_DRIVER_H_
33#define _TTM_BO_DRIVER_H_
34
35#include <dev/drm2/drmP.h>
36#include <dev/drm2/ttm/ttm_bo_api.h>
37#include <dev/drm2/ttm/ttm_memory.h>
38#include <dev/drm2/ttm/ttm_module.h>
39#include <dev/drm2/drm_global.h>
40#include <sys/rwlock.h>
41#include <sys/tree.h>
42
43struct ttm_backend_func {
44	/**
45	 * struct ttm_backend_func member bind
46	 *
47	 * @ttm: Pointer to a struct ttm_tt.
48	 * @bo_mem: Pointer to a struct ttm_mem_reg describing the
49	 * memory type and location for binding.
50	 *
51	 * Bind the backend pages into the aperture in the location
52	 * indicated by @bo_mem. This function should be able to handle
53	 * differences between aperture and system page sizes.
54	 */
55	int (*bind) (struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
56
57	/**
58	 * struct ttm_backend_func member unbind
59	 *
60	 * @ttm: Pointer to a struct ttm_tt.
61	 *
62	 * Unbind previously bound backend pages. This function should be
63	 * able to handle differences between aperture and system page sizes.
64	 */
65	int (*unbind) (struct ttm_tt *ttm);
66
67	/**
68	 * struct ttm_backend_func member destroy
69	 *
70	 * @ttm: Pointer to a struct ttm_tt.
71	 *
72	 * Destroy the backend. This will be call back from ttm_tt_destroy so
73	 * don't call ttm_tt_destroy from the callback or infinite loop.
74	 */
75	void (*destroy) (struct ttm_tt *ttm);
76};
77
78#define TTM_PAGE_FLAG_WRITE           (1 << 3)
79#define TTM_PAGE_FLAG_SWAPPED         (1 << 4)
80#define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5)
81#define TTM_PAGE_FLAG_ZERO_ALLOC      (1 << 6)
82#define TTM_PAGE_FLAG_DMA32           (1 << 7)
83#define TTM_PAGE_FLAG_SG              (1 << 8)
84
85enum ttm_caching_state {
86	tt_uncached,
87	tt_wc,
88	tt_cached
89};
90
91/**
92 * struct ttm_tt
93 *
94 * @bdev: Pointer to a struct ttm_bo_device.
95 * @func: Pointer to a struct ttm_backend_func that describes
96 * the backend methods.
97 * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL
98 * pointer.
99 * @pages: Array of pages backing the data.
100 * @num_pages: Number of pages in the page array.
101 * @bdev: Pointer to the current struct ttm_bo_device.
102 * @be: Pointer to the ttm backend.
103 * @swap_storage: Pointer to shmem struct file for swap storage.
104 * @caching_state: The current caching state of the pages.
105 * @state: The current binding state of the pages.
106 *
107 * This is a structure holding the pages, caching- and aperture binding
108 * status for a buffer object that isn't backed by fixed (VRAM / AGP)
109 * memory.
110 */
111
112struct ttm_tt {
113	struct ttm_bo_device *bdev;
114	struct ttm_backend_func *func;
115	struct vm_page *dummy_read_page;
116	struct vm_page **pages;
117	uint32_t page_flags;
118	unsigned long num_pages;
119	struct sg_table *sg; /* for SG objects via dma-buf */
120	struct ttm_bo_global *glob;
121	struct vm_object *swap_storage;
122	enum ttm_caching_state caching_state;
123	enum {
124		tt_bound,
125		tt_unbound,
126		tt_unpopulated,
127	} state;
128};
129
130/**
131 * struct ttm_dma_tt
132 *
133 * @ttm: Base ttm_tt struct.
134 * @dma_address: The DMA (bus) addresses of the pages
135 * @pages_list: used by some page allocation backend
136 *
137 * This is a structure holding the pages, caching- and aperture binding
138 * status for a buffer object that isn't backed by fixed (VRAM / AGP)
139 * memory.
140 */
141struct ttm_dma_tt {
142	struct ttm_tt ttm;
143	dma_addr_t *dma_address;
144	struct list_head pages_list;
145};
146
147#define TTM_MEMTYPE_FLAG_FIXED         (1 << 0)	/* Fixed (on-card) PCI memory */
148#define TTM_MEMTYPE_FLAG_MAPPABLE      (1 << 1)	/* Memory mappable */
149#define TTM_MEMTYPE_FLAG_CMA           (1 << 3)	/* Can't map aperture */
150
151struct ttm_mem_type_manager;
152
153struct ttm_mem_type_manager_func {
154	/**
155	 * struct ttm_mem_type_manager member init
156	 *
157	 * @man: Pointer to a memory type manager.
158	 * @p_size: Implementation dependent, but typically the size of the
159	 * range to be managed in pages.
160	 *
161	 * Called to initialize a private range manager. The function is
162	 * expected to initialize the man::priv member.
163	 * Returns 0 on success, negative error code on failure.
164	 */
165	int  (*init)(struct ttm_mem_type_manager *man, unsigned long p_size);
166
167	/**
168	 * struct ttm_mem_type_manager member takedown
169	 *
170	 * @man: Pointer to a memory type manager.
171	 *
172	 * Called to undo the setup done in init. All allocated resources
173	 * should be freed.
174	 */
175	int  (*takedown)(struct ttm_mem_type_manager *man);
176
177	/**
178	 * struct ttm_mem_type_manager member get_node
179	 *
180	 * @man: Pointer to a memory type manager.
181	 * @bo: Pointer to the buffer object we're allocating space for.
182	 * @placement: Placement details.
183	 * @mem: Pointer to a struct ttm_mem_reg to be filled in.
184	 *
185	 * This function should allocate space in the memory type managed
186	 * by @man. Placement details if
187	 * applicable are given by @placement. If successful,
188	 * @mem::mm_node should be set to a non-null value, and
189	 * @mem::start should be set to a value identifying the beginning
190	 * of the range allocated, and the function should return zero.
191	 * If the memory region accommodate the buffer object, @mem::mm_node
192	 * should be set to NULL, and the function should return 0.
193	 * If a system error occurred, preventing the request to be fulfilled,
194	 * the function should return a negative error code.
195	 *
196	 * Note that @mem::mm_node will only be dereferenced by
197	 * struct ttm_mem_type_manager functions and optionally by the driver,
198	 * which has knowledge of the underlying type.
199	 *
200	 * This function may not be called from within atomic context, so
201	 * an implementation can and must use either a mutex or a spinlock to
202	 * protect any data structures managing the space.
203	 */
204	int  (*get_node)(struct ttm_mem_type_manager *man,
205			 struct ttm_buffer_object *bo,
206			 struct ttm_placement *placement,
207			 struct ttm_mem_reg *mem);
208
209	/**
210	 * struct ttm_mem_type_manager member put_node
211	 *
212	 * @man: Pointer to a memory type manager.
213	 * @mem: Pointer to a struct ttm_mem_reg to be filled in.
214	 *
215	 * This function frees memory type resources previously allocated
216	 * and that are identified by @mem::mm_node and @mem::start. May not
217	 * be called from within atomic context.
218	 */
219	void (*put_node)(struct ttm_mem_type_manager *man,
220			 struct ttm_mem_reg *mem);
221
222	/**
223	 * struct ttm_mem_type_manager member debug
224	 *
225	 * @man: Pointer to a memory type manager.
226	 * @prefix: Prefix to be used in printout to identify the caller.
227	 *
228	 * This function is called to print out the state of the memory
229	 * type manager to aid debugging of out-of-memory conditions.
230	 * It may not be called from within atomic context.
231	 */
232	void (*debug)(struct ttm_mem_type_manager *man, const char *prefix);
233};
234
235/**
236 * struct ttm_mem_type_manager
237 *
238 * @has_type: The memory type has been initialized.
239 * @use_type: The memory type is enabled.
240 * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
241 * managed by this memory type.
242 * @gpu_offset: If used, the GPU offset of the first managed page of
243 * fixed memory or the first managed location in an aperture.
244 * @size: Size of the managed region.
245 * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
246 * as defined in ttm_placement_common.h
247 * @default_caching: The default caching policy used for a buffer object
248 * placed in this memory type if the user doesn't provide one.
249 * @func: structure pointer implementing the range manager. See above
250 * @priv: Driver private closure for @func.
251 * @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures
252 * @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions
253 * reserved by the TTM vm system.
254 * @io_reserve_lru: Optional lru list for unreserving io mem regions.
255 * @io_reserve_fastpath: Only use bdev::driver::io_mem_reserve to obtain
256 * static information. bdev::driver::io_mem_free is never used.
257 * @lru: The lru list for this memory type.
258 *
259 * This structure is used to identify and manage memory types for a device.
260 * It's set up by the ttm_bo_driver::init_mem_type method.
261 */
262
263
264
265struct ttm_mem_type_manager {
266	struct ttm_bo_device *bdev;
267
268	/*
269	 * No protection. Constant from start.
270	 */
271
272	bool has_type;
273	bool use_type;
274	uint32_t flags;
275	unsigned long gpu_offset;
276	uint64_t size;
277	uint32_t available_caching;
278	uint32_t default_caching;
279	const struct ttm_mem_type_manager_func *func;
280	void *priv;
281	struct sx io_reserve_mutex;
282	bool use_io_reserve_lru;
283	bool io_reserve_fastpath;
284
285	/*
286	 * Protected by @io_reserve_mutex:
287	 */
288
289	struct list_head io_reserve_lru;
290
291	/*
292	 * Protected by the global->lru_lock.
293	 */
294
295	struct list_head lru;
296};
297
298/**
299 * struct ttm_bo_driver
300 *
301 * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
302 * @invalidate_caches: Callback to invalidate read caches when a buffer object
303 * has been evicted.
304 * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager
305 * structure.
306 * @evict_flags: Callback to obtain placement flags when a buffer is evicted.
307 * @move: Callback for a driver to hook in accelerated functions to
308 * move a buffer.
309 * If set to NULL, a potentially slow memcpy() move is used.
310 * @sync_obj_signaled: See ttm_fence_api.h
311 * @sync_obj_wait: See ttm_fence_api.h
312 * @sync_obj_flush: See ttm_fence_api.h
313 * @sync_obj_unref: See ttm_fence_api.h
314 * @sync_obj_ref: See ttm_fence_api.h
315 */
316
317struct ttm_bo_driver {
318	/**
319	 * ttm_tt_create
320	 *
321	 * @bdev: pointer to a struct ttm_bo_device:
322	 * @size: Size of the data needed backing.
323	 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
324	 * @dummy_read_page: See struct ttm_bo_device.
325	 *
326	 * Create a struct ttm_tt to back data with system memory pages.
327	 * No pages are actually allocated.
328	 * Returns:
329	 * NULL: Out of memory.
330	 */
331	struct ttm_tt *(*ttm_tt_create)(struct ttm_bo_device *bdev,
332					unsigned long size,
333					uint32_t page_flags,
334					struct vm_page *dummy_read_page);
335
336	/**
337	 * ttm_tt_populate
338	 *
339	 * @ttm: The struct ttm_tt to contain the backing pages.
340	 *
341	 * Allocate all backing pages
342	 * Returns:
343	 * -ENOMEM: Out of memory.
344	 */
345	int (*ttm_tt_populate)(struct ttm_tt *ttm);
346
347	/**
348	 * ttm_tt_unpopulate
349	 *
350	 * @ttm: The struct ttm_tt to contain the backing pages.
351	 *
352	 * Free all backing page
353	 */
354	void (*ttm_tt_unpopulate)(struct ttm_tt *ttm);
355
356	/**
357	 * struct ttm_bo_driver member invalidate_caches
358	 *
359	 * @bdev: the buffer object device.
360	 * @flags: new placement of the rebound buffer object.
361	 *
362	 * A previosly evicted buffer has been rebound in a
363	 * potentially new location. Tell the driver that it might
364	 * consider invalidating read (texture) caches on the next command
365	 * submission as a consequence.
366	 */
367
368	int (*invalidate_caches) (struct ttm_bo_device *bdev, uint32_t flags);
369	int (*init_mem_type) (struct ttm_bo_device *bdev, uint32_t type,
370			      struct ttm_mem_type_manager *man);
371	/**
372	 * struct ttm_bo_driver member evict_flags:
373	 *
374	 * @bo: the buffer object to be evicted
375	 *
376	 * Return the bo flags for a buffer which is not mapped to the hardware.
377	 * These will be placed in proposed_flags so that when the move is
378	 * finished, they'll end up in bo->mem.flags
379	 */
380
381	 void(*evict_flags) (struct ttm_buffer_object *bo,
382				struct ttm_placement *placement);
383	/**
384	 * struct ttm_bo_driver member move:
385	 *
386	 * @bo: the buffer to move
387	 * @evict: whether this motion is evicting the buffer from
388	 * the graphics address space
389	 * @interruptible: Use interruptible sleeps if possible when sleeping.
390	 * @no_wait: whether this should give up and return -EBUSY
391	 * if this move would require sleeping
392	 * @new_mem: the new memory region receiving the buffer
393	 *
394	 * Move a buffer between two memory regions.
395	 */
396	int (*move) (struct ttm_buffer_object *bo,
397		     bool evict, bool interruptible,
398		     bool no_wait_gpu,
399		     struct ttm_mem_reg *new_mem);
400
401	/**
402	 * struct ttm_bo_driver_member verify_access
403	 *
404	 * @bo: Pointer to a buffer object.
405	 * @filp: Pointer to a struct file trying to access the object.
406	 * FreeBSD: use devfs_get_cdevpriv etc.
407	 *
408	 * Called from the map / write / read methods to verify that the
409	 * caller is permitted to access the buffer object.
410	 * This member may be set to NULL, which will refuse this kind of
411	 * access for all buffer objects.
412	 * This function should return 0 if access is granted, -EPERM otherwise.
413	 */
414	int (*verify_access) (struct ttm_buffer_object *bo);
415
416	/**
417	 * In case a driver writer dislikes the TTM fence objects,
418	 * the driver writer can replace those with sync objects of
419	 * his / her own. If it turns out that no driver writer is
420	 * using these. I suggest we remove these hooks and plug in
421	 * fences directly. The bo driver needs the following functionality:
422	 * See the corresponding functions in the fence object API
423	 * documentation.
424	 */
425
426	bool (*sync_obj_signaled) (void *sync_obj);
427	int (*sync_obj_wait) (void *sync_obj,
428			      bool lazy, bool interruptible);
429	int (*sync_obj_flush) (void *sync_obj);
430	void (*sync_obj_unref) (void **sync_obj);
431	void *(*sync_obj_ref) (void *sync_obj);
432
433	/* hook to notify driver about a driver move so it
434	 * can do tiling things */
435	void (*move_notify)(struct ttm_buffer_object *bo,
436			    struct ttm_mem_reg *new_mem);
437	/* notify the driver we are taking a fault on this BO
438	 * and have reserved it */
439	int (*fault_reserve_notify)(struct ttm_buffer_object *bo);
440
441	/**
442	 * notify the driver that we're about to swap out this bo
443	 */
444	void (*swap_notify) (struct ttm_buffer_object *bo);
445
446	/**
447	 * Driver callback on when mapping io memory (for bo_move_memcpy
448	 * for instance). TTM will take care to call io_mem_free whenever
449	 * the mapping is not use anymore. io_mem_reserve & io_mem_free
450	 * are balanced.
451	 */
452	int (*io_mem_reserve)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
453	void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
454};
455
456/**
457 * struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global.
458 */
459
460struct ttm_bo_global_ref {
461	struct drm_global_reference ref;
462	struct ttm_mem_global *mem_glob;
463};
464
465/**
466 * struct ttm_bo_global - Buffer object driver global data.
467 *
468 * @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
469 * @dummy_read_page: Pointer to a dummy page used for mapping requests
470 * of unpopulated pages.
471 * @shrink: A shrink callback object used for buffer object swap.
472 * @device_list_mutex: Mutex protecting the device list.
473 * This mutex is held while traversing the device list for pm options.
474 * @lru_lock: Spinlock protecting the bo subsystem lru lists.
475 * @device_list: List of buffer object devices.
476 * @swap_lru: Lru list of buffer objects used for swapping.
477 */
478
479struct ttm_bo_global {
480	u_int kobj_ref;
481
482	/**
483	 * Constant after init.
484	 */
485
486	struct ttm_mem_global *mem_glob;
487	struct vm_page *dummy_read_page;
488	struct ttm_mem_shrink shrink;
489	struct sx device_list_mutex;
490	struct mtx lru_lock;
491
492	/**
493	 * Protected by device_list_mutex.
494	 */
495	struct list_head device_list;
496
497	/**
498	 * Protected by the lru_lock.
499	 */
500	struct list_head swap_lru;
501
502	/**
503	 * Internal protection.
504	 */
505	atomic_t bo_count;
506};
507
508
509#define TTM_NUM_MEM_TYPES 8
510
511#define TTM_BO_PRIV_FLAG_MOVING  0	/* Buffer object is moving and needs
512					   idling before CPU mapping */
513#define TTM_BO_PRIV_FLAG_MAX 1
514/**
515 * struct ttm_bo_device - Buffer object driver device-specific data.
516 *
517 * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
518 * @man: An array of mem_type_managers.
519 * @fence_lock: Protects the synchronizing members on *all* bos belonging
520 * to this device.
521 * @addr_space_mm: Range manager for the device address space.
522 * lru_lock: Spinlock that protects the buffer+device lru lists and
523 * ddestroy lists.
524 * @val_seq: Current validation sequence.
525 * @dev_mapping: A pointer to the struct address_space representing the
526 * device address space.
527 * @wq: Work queue structure for the delayed delete workqueue.
528 *
529 */
530
531struct ttm_bo_device {
532
533	/*
534	 * Constant after bo device init / atomic.
535	 */
536	struct list_head device_list;
537	struct ttm_bo_global *glob;
538	struct ttm_bo_driver *driver;
539	struct rwlock vm_lock;
540	struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
541	struct mtx fence_lock;
542	/*
543	 * Protected by the vm lock.
544	 */
545	RB_HEAD(ttm_bo_device_buffer_objects, ttm_buffer_object) addr_space_rb;
546	struct drm_mm addr_space_mm;
547
548	/*
549	 * Protected by the global:lru lock.
550	 */
551	struct list_head ddestroy;
552	uint32_t val_seq;
553
554	/*
555	 * Protected by load / firstopen / lastclose /unload sync.
556	 */
557
558	struct address_space *dev_mapping;
559
560	/*
561	 * Internal protection.
562	 */
563
564	struct timeout_task wq;
565
566	bool need_dma32;
567};
568
569/**
570 * ttm_flag_masked
571 *
572 * @old: Pointer to the result and original value.
573 * @new: New value of bits.
574 * @mask: Mask of bits to change.
575 *
576 * Convenience function to change a number of bits identified by a mask.
577 */
578
579static inline uint32_t
580ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
581{
582	*old ^= (*old ^ new) & mask;
583	return *old;
584}
585
586/**
587 * ttm_tt_init
588 *
589 * @ttm: The struct ttm_tt.
590 * @bdev: pointer to a struct ttm_bo_device:
591 * @size: Size of the data needed backing.
592 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
593 * @dummy_read_page: See struct ttm_bo_device.
594 *
595 * Create a struct ttm_tt to back data with system memory pages.
596 * No pages are actually allocated.
597 * Returns:
598 * NULL: Out of memory.
599 */
600extern int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
601			unsigned long size, uint32_t page_flags,
602			struct vm_page *dummy_read_page);
603extern int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
604			   unsigned long size, uint32_t page_flags,
605			   struct vm_page *dummy_read_page);
606
607/**
608 * ttm_tt_fini
609 *
610 * @ttm: the ttm_tt structure.
611 *
612 * Free memory of ttm_tt structure
613 */
614extern void ttm_tt_fini(struct ttm_tt *ttm);
615extern void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
616
617/**
618 * ttm_ttm_bind:
619 *
620 * @ttm: The struct ttm_tt containing backing pages.
621 * @bo_mem: The struct ttm_mem_reg identifying the binding location.
622 *
623 * Bind the pages of @ttm to an aperture location identified by @bo_mem
624 */
625extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
626
627/**
628 * ttm_ttm_destroy:
629 *
630 * @ttm: The struct ttm_tt.
631 *
632 * Unbind, unpopulate and destroy common struct ttm_tt.
633 */
634extern void ttm_tt_destroy(struct ttm_tt *ttm);
635
636/**
637 * ttm_ttm_unbind:
638 *
639 * @ttm: The struct ttm_tt.
640 *
641 * Unbind a struct ttm_tt.
642 */
643extern void ttm_tt_unbind(struct ttm_tt *ttm);
644
645/**
646 * ttm_tt_swapin:
647 *
648 * @ttm: The struct ttm_tt.
649 *
650 * Swap in a previously swap out ttm_tt.
651 */
652extern int ttm_tt_swapin(struct ttm_tt *ttm);
653
654/**
655 * ttm_tt_cache_flush:
656 *
657 * @pages: An array of pointers to struct page:s to flush.
658 * @num_pages: Number of pages to flush.
659 *
660 * Flush the data of the indicated pages from the cpu caches.
661 * This is used when changing caching attributes of the pages from
662 * cache-coherent.
663 */
664extern void ttm_tt_cache_flush(struct vm_page *pages[], unsigned long num_pages);
665
666/**
667 * ttm_tt_set_placement_caching:
668 *
669 * @ttm A struct ttm_tt the backing pages of which will change caching policy.
670 * @placement: Flag indicating the desired caching policy.
671 *
672 * This function will change caching policy of any default kernel mappings of
673 * the pages backing @ttm. If changing from cached to uncached or
674 * write-combined,
675 * all CPU caches will first be flushed to make sure the data of the pages
676 * hit RAM. This function may be very costly as it involves global TLB
677 * and cache flushes and potential page splitting / combining.
678 */
679extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
680extern int ttm_tt_swapout(struct ttm_tt *ttm,
681			  struct vm_object *persistent_swap_storage);
682
683/*
684 * ttm_bo.c
685 */
686
687/**
688 * ttm_mem_reg_is_pci
689 *
690 * @bdev: Pointer to a struct ttm_bo_device.
691 * @mem: A valid struct ttm_mem_reg.
692 *
693 * Returns true if the memory described by @mem is PCI memory,
694 * false otherwise.
695 */
696extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
697				   struct ttm_mem_reg *mem);
698
699/**
700 * ttm_bo_mem_space
701 *
702 * @bo: Pointer to a struct ttm_buffer_object. the data of which
703 * we want to allocate space for.
704 * @proposed_placement: Proposed new placement for the buffer object.
705 * @mem: A struct ttm_mem_reg.
706 * @interruptible: Sleep interruptible when sliping.
707 * @no_wait_gpu: Return immediately if the GPU is busy.
708 *
709 * Allocate memory space for the buffer object pointed to by @bo, using
710 * the placement flags in @mem, potentially evicting other idle buffer objects.
711 * This function may sleep while waiting for space to become available.
712 * Returns:
713 * -EBUSY: No space available (only if no_wait == 1).
714 * -ENOMEM: Could not allocate memory for the buffer object, either due to
715 * fragmentation or concurrent allocators.
716 * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
717 */
718extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
719				struct ttm_placement *placement,
720				struct ttm_mem_reg *mem,
721				bool interruptible,
722				bool no_wait_gpu);
723
724extern void ttm_bo_mem_put(struct ttm_buffer_object *bo,
725			   struct ttm_mem_reg *mem);
726extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
727				  struct ttm_mem_reg *mem);
728
729extern void ttm_bo_global_release(struct drm_global_reference *ref);
730extern int ttm_bo_global_init(struct drm_global_reference *ref);
731
732extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
733
734/**
735 * ttm_bo_device_init
736 *
737 * @bdev: A pointer to a struct ttm_bo_device to initialize.
738 * @glob: A pointer to an initialized struct ttm_bo_global.
739 * @driver: A pointer to a struct ttm_bo_driver set up by the caller.
740 * @file_page_offset: Offset into the device address space that is available
741 * for buffer data. This ensures compatibility with other users of the
742 * address space.
743 *
744 * Initializes a struct ttm_bo_device:
745 * Returns:
746 * !0: Failure.
747 */
748extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
749			      struct ttm_bo_global *glob,
750			      struct ttm_bo_driver *driver,
751			      uint64_t file_page_offset, bool need_dma32);
752
753/**
754 * ttm_bo_unmap_virtual
755 *
756 * @bo: tear down the virtual mappings for this BO
757 */
758extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
759
760/**
761 * ttm_bo_unmap_virtual
762 *
763 * @bo: tear down the virtual mappings for this BO
764 *
765 * The caller must take ttm_mem_io_lock before calling this function.
766 */
767extern void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo);
768
769extern int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo);
770extern void ttm_mem_io_free_vm(struct ttm_buffer_object *bo);
771extern int ttm_mem_io_lock(struct ttm_mem_type_manager *man,
772			   bool interruptible);
773extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
774
775
776/**
777 * ttm_bo_reserve:
778 *
779 * @bo: A pointer to a struct ttm_buffer_object.
780 * @interruptible: Sleep interruptible if waiting.
781 * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
782 * @use_sequence: If @bo is already reserved, Only sleep waiting for
783 * it to become unreserved if @sequence < (@bo)->sequence.
784 *
785 * Locks a buffer object for validation. (Or prevents other processes from
786 * locking it for validation) and removes it from lru lists, while taking
787 * a number of measures to prevent deadlocks.
788 *
789 * Deadlocks may occur when two processes try to reserve multiple buffers in
790 * different order, either by will or as a result of a buffer being evicted
791 * to make room for a buffer already reserved. (Buffers are reserved before
792 * they are evicted). The following algorithm prevents such deadlocks from
793 * occurring:
794 * Processes attempting to reserve multiple buffers other than for eviction,
795 * (typically execbuf), should first obtain a unique 32-bit
796 * validation sequence number,
797 * and call this function with @use_sequence == 1 and @sequence == the unique
798 * sequence number. If upon call of this function, the buffer object is already
799 * reserved, the validation sequence is checked against the validation
800 * sequence of the process currently reserving the buffer,
801 * and if the current validation sequence is greater than that of the process
802 * holding the reservation, the function returns -EAGAIN. Otherwise it sleeps
803 * waiting for the buffer to become unreserved, after which it retries
804 * reserving.
805 * The caller should, when receiving an -EAGAIN error
806 * release all its buffer reservations, wait for @bo to become unreserved, and
807 * then rerun the validation with the same validation sequence. This procedure
808 * will always guarantee that the process with the lowest validation sequence
809 * will eventually succeed, preventing both deadlocks and starvation.
810 *
811 * Returns:
812 * -EAGAIN: The reservation may cause a deadlock.
813 * Release all buffer reservations, wait for @bo to become unreserved and
814 * try again. (only if use_sequence == 1).
815 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
816 * a signal. Release all buffer reservations and return to user-space.
817 * -EBUSY: The function needed to sleep, but @no_wait was true
818 * -EDEADLK: Bo already reserved using @sequence. This error code will only
819 * be returned if @use_sequence is set to true.
820 */
821extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
822			  bool interruptible,
823			  bool no_wait, bool use_sequence, uint32_t sequence);
824
825/**
826 * ttm_bo_reserve_slowpath_nolru:
827 * @bo: A pointer to a struct ttm_buffer_object.
828 * @interruptible: Sleep interruptible if waiting.
829 * @sequence: Set (@bo)->sequence to this value after lock
830 *
831 * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
832 * from all our other reservations. Because there are no other reservations
833 * held by us, this function cannot deadlock any more.
834 *
835 * Will not remove reserved buffers from the lru lists.
836 * Otherwise identical to ttm_bo_reserve_slowpath.
837 */
838extern int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
839					 bool interruptible,
840					 uint32_t sequence);
841
842
843/**
844 * ttm_bo_reserve_slowpath:
845 * @bo: A pointer to a struct ttm_buffer_object.
846 * @interruptible: Sleep interruptible if waiting.
847 * @sequence: Set (@bo)->sequence to this value after lock
848 *
849 * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
850 * from all our other reservations. Because there are no other reservations
851 * held by us, this function cannot deadlock any more.
852 */
853extern int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
854				   bool interruptible, uint32_t sequence);
855
856/**
857 * ttm_bo_reserve_nolru:
858 *
859 * @bo: A pointer to a struct ttm_buffer_object.
860 * @interruptible: Sleep interruptible if waiting.
861 * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
862 * @use_sequence: If @bo is already reserved, Only sleep waiting for
863 * it to become unreserved if @sequence < (@bo)->sequence.
864 *
865 * Will not remove reserved buffers from the lru lists.
866 * Otherwise identical to ttm_bo_reserve.
867 *
868 * Returns:
869 * -EAGAIN: The reservation may cause a deadlock.
870 * Release all buffer reservations, wait for @bo to become unreserved and
871 * try again. (only if use_sequence == 1).
872 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
873 * a signal. Release all buffer reservations and return to user-space.
874 * -EBUSY: The function needed to sleep, but @no_wait was true
875 * -EDEADLK: Bo already reserved using @sequence. This error code will only
876 * be returned if @use_sequence is set to true.
877 */
878extern int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
879				 bool interruptible,
880				 bool no_wait, bool use_sequence,
881				 uint32_t sequence);
882
883/**
884 * ttm_bo_unreserve
885 *
886 * @bo: A pointer to a struct ttm_buffer_object.
887 *
888 * Unreserve a previous reservation of @bo.
889 */
890extern void ttm_bo_unreserve(struct ttm_buffer_object *bo);
891
892/**
893 * ttm_bo_unreserve_locked
894 *
895 * @bo: A pointer to a struct ttm_buffer_object.
896 *
897 * Unreserve a previous reservation of @bo.
898 * Needs to be called with struct ttm_bo_global::lru_lock held.
899 */
900extern void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo);
901
902/*
903 * ttm_bo_util.c
904 */
905
906/**
907 * ttm_bo_move_ttm
908 *
909 * @bo: A pointer to a struct ttm_buffer_object.
910 * @evict: 1: This is an eviction. Don't try to pipeline.
911 * @no_wait_gpu: Return immediately if the GPU is busy.
912 * @new_mem: struct ttm_mem_reg indicating where to move.
913 *
914 * Optimized move function for a buffer object with both old and
915 * new placement backed by a TTM. The function will, if successful,
916 * free any old aperture space, and set (@new_mem)->mm_node to NULL,
917 * and update the (@bo)->mem placement flags. If unsuccessful, the old
918 * data remains untouched, and it's up to the caller to free the
919 * memory space indicated by @new_mem.
920 * Returns:
921 * !0: Failure.
922 */
923
924extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
925			   bool evict, bool no_wait_gpu,
926			   struct ttm_mem_reg *new_mem);
927
928/**
929 * ttm_bo_move_memcpy
930 *
931 * @bo: A pointer to a struct ttm_buffer_object.
932 * @evict: 1: This is an eviction. Don't try to pipeline.
933 * @no_wait_gpu: Return immediately if the GPU is busy.
934 * @new_mem: struct ttm_mem_reg indicating where to move.
935 *
936 * Fallback move function for a mappable buffer object in mappable memory.
937 * The function will, if successful,
938 * free any old aperture space, and set (@new_mem)->mm_node to NULL,
939 * and update the (@bo)->mem placement flags. If unsuccessful, the old
940 * data remains untouched, and it's up to the caller to free the
941 * memory space indicated by @new_mem.
942 * Returns:
943 * !0: Failure.
944 */
945
946extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
947			      bool evict, bool no_wait_gpu,
948			      struct ttm_mem_reg *new_mem);
949
950/**
951 * ttm_bo_free_old_node
952 *
953 * @bo: A pointer to a struct ttm_buffer_object.
954 *
955 * Utility function to free an old placement after a successful move.
956 */
957extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
958
959/**
960 * ttm_bo_move_accel_cleanup.
961 *
962 * @bo: A pointer to a struct ttm_buffer_object.
963 * @sync_obj: A sync object that signals when moving is complete.
964 * @evict: This is an evict move. Don't return until the buffer is idle.
965 * @no_wait_gpu: Return immediately if the GPU is busy.
966 * @new_mem: struct ttm_mem_reg indicating where to move.
967 *
968 * Accelerated move function to be called when an accelerated move
969 * has been scheduled. The function will create a new temporary buffer object
970 * representing the old placement, and put the sync object on both buffer
971 * objects. After that the newly created buffer object is unref'd to be
972 * destroyed when the move is complete. This will help pipeline
973 * buffer moves.
974 */
975
976extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
977				     void *sync_obj,
978				     bool evict, bool no_wait_gpu,
979				     struct ttm_mem_reg *new_mem);
980/**
981 * ttm_io_prot
982 *
983 * @c_state: Caching state.
984 * @tmp: Page protection flag for a normal, cached mapping.
985 *
986 * Utility function that returns the pgprot_t that should be used for
987 * setting up a PTE with the caching model indicated by @c_state.
988 */
989extern vm_memattr_t ttm_io_prot(uint32_t caching_flags);
990
991extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
992
993#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
994#define TTM_HAS_AGP
995#include <linux/agp_backend.h>
996
997/**
998 * ttm_agp_tt_create
999 *
1000 * @bdev: Pointer to a struct ttm_bo_device.
1001 * @bridge: The agp bridge this device is sitting on.
1002 * @size: Size of the data needed backing.
1003 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
1004 * @dummy_read_page: See struct ttm_bo_device.
1005 *
1006 *
1007 * Create a TTM backend that uses the indicated AGP bridge as an aperture
1008 * for TT memory. This function uses the linux agpgart interface to
1009 * bind and unbind memory backing a ttm_tt.
1010 */
1011extern struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
1012					struct agp_bridge_data *bridge,
1013					unsigned long size, uint32_t page_flags,
1014					struct vm_page *dummy_read_page);
1015int ttm_agp_tt_populate(struct ttm_tt *ttm);
1016void ttm_agp_tt_unpopulate(struct ttm_tt *ttm);
1017#endif
1018
1019int	ttm_bo_cmp_rb_tree_items(struct ttm_buffer_object *a,
1020	    struct ttm_buffer_object *b);
1021
1022RB_PROTOTYPE(ttm_bo_device_buffer_objects, ttm_buffer_object, vm_rb,
1023    ttm_bo_cmp_rb_tree_items);
1024
1025#endif
1026