1// SPDX-License-Identifier: MIT
2/*
3 * Copyright �� 2021 Intel Corporation
4 */
5
6#include <linux/slab.h>
7
8#include <drm/ttm/ttm_placement.h>
9#include <drm/ttm/ttm_bo.h>
10
11#include <drm/drm_buddy.h>
12
13#include "i915_ttm_buddy_manager.h"
14
15#include "i915_gem.h"
16
17struct i915_ttm_buddy_manager {
18	struct ttm_resource_manager manager;
19	struct drm_buddy mm;
20	struct list_head reserved;
21	struct mutex lock;
22	unsigned long visible_size;
23	unsigned long visible_avail;
24	unsigned long visible_reserved;
25	u64 default_page_size;
26};
27
28static struct i915_ttm_buddy_manager *
29to_buddy_manager(struct ttm_resource_manager *man)
30{
31	return container_of(man, struct i915_ttm_buddy_manager, manager);
32}
33
34static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
35				    struct ttm_buffer_object *bo,
36				    const struct ttm_place *place,
37				    struct ttm_resource **res)
38{
39	struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
40	struct i915_ttm_buddy_resource *bman_res;
41	struct drm_buddy *mm = &bman->mm;
42	unsigned long n_pages, lpfn;
43	u64 min_page_size;
44	u64 size;
45	int err;
46
47	lpfn = place->lpfn;
48	if (!lpfn)
49		lpfn = man->size;
50
51	bman_res = kzalloc(sizeof(*bman_res), GFP_KERNEL);
52	if (!bman_res)
53		return -ENOMEM;
54
55	ttm_resource_init(bo, place, &bman_res->base);
56	INIT_LIST_HEAD(&bman_res->blocks);
57	bman_res->mm = mm;
58
59	if (place->flags & TTM_PL_FLAG_TOPDOWN)
60		bman_res->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION;
61
62	if (place->flags & TTM_PL_FLAG_CONTIGUOUS)
63		bman_res->flags |= DRM_BUDDY_CONTIGUOUS_ALLOCATION;
64
65	if (place->fpfn || lpfn != man->size)
66		bman_res->flags |= DRM_BUDDY_RANGE_ALLOCATION;
67
68	GEM_BUG_ON(!bman_res->base.size);
69	size = bman_res->base.size;
70
71	min_page_size = bman->default_page_size;
72	if (bo->page_alignment)
73		min_page_size = bo->page_alignment << PAGE_SHIFT;
74
75	GEM_BUG_ON(min_page_size < mm->chunk_size);
76	GEM_BUG_ON(!IS_ALIGNED(size, min_page_size));
77
78	if (size > lpfn << PAGE_SHIFT) {
79		err = -E2BIG;
80		goto err_free_res;
81	}
82
83	n_pages = size >> ilog2(mm->chunk_size);
84
85	mutex_lock(&bman->lock);
86	if (lpfn <= bman->visible_size && n_pages > bman->visible_avail) {
87		mutex_unlock(&bman->lock);
88		err = -ENOSPC;
89		goto err_free_res;
90	}
91
92	err = drm_buddy_alloc_blocks(mm, (u64)place->fpfn << PAGE_SHIFT,
93				     (u64)lpfn << PAGE_SHIFT,
94				     (u64)n_pages << PAGE_SHIFT,
95				     min_page_size,
96				     &bman_res->blocks,
97				     bman_res->flags);
98	if (unlikely(err))
99		goto err_free_blocks;
100
101	if (lpfn <= bman->visible_size) {
102		bman_res->used_visible_size = PFN_UP(bman_res->base.size);
103	} else {
104		struct drm_buddy_block *block;
105
106		list_for_each_entry(block, &bman_res->blocks, link) {
107			unsigned long start =
108				drm_buddy_block_offset(block) >> PAGE_SHIFT;
109
110			if (start < bman->visible_size) {
111				unsigned long end = start +
112					(drm_buddy_block_size(mm, block) >> PAGE_SHIFT);
113
114				bman_res->used_visible_size +=
115					min(end, bman->visible_size) - start;
116			}
117		}
118	}
119
120	if (bman_res->used_visible_size)
121		bman->visible_avail -= bman_res->used_visible_size;
122
123	mutex_unlock(&bman->lock);
124
125	*res = &bman_res->base;
126	return 0;
127
128err_free_blocks:
129	drm_buddy_free_list(mm, &bman_res->blocks, 0);
130	mutex_unlock(&bman->lock);
131err_free_res:
132	ttm_resource_fini(man, &bman_res->base);
133	kfree(bman_res);
134	return err;
135}
136
137static void i915_ttm_buddy_man_free(struct ttm_resource_manager *man,
138				    struct ttm_resource *res)
139{
140	struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res);
141	struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
142
143	mutex_lock(&bman->lock);
144	drm_buddy_free_list(&bman->mm, &bman_res->blocks, 0);
145	bman->visible_avail += bman_res->used_visible_size;
146	mutex_unlock(&bman->lock);
147
148	ttm_resource_fini(man, res);
149	kfree(bman_res);
150}
151
152static bool i915_ttm_buddy_man_intersects(struct ttm_resource_manager *man,
153					  struct ttm_resource *res,
154					  const struct ttm_place *place,
155					  size_t size)
156{
157	struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res);
158	struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
159	struct drm_buddy *mm = &bman->mm;
160	struct drm_buddy_block *block;
161
162	if (!place->fpfn && !place->lpfn)
163		return true;
164
165	GEM_BUG_ON(!place->lpfn);
166
167	/*
168	 * If we just want something mappable then we can quickly check
169	 * if the current victim resource is using any of the CPU
170	 * visible portion.
171	 */
172	if (!place->fpfn &&
173	    place->lpfn == i915_ttm_buddy_man_visible_size(man))
174		return bman_res->used_visible_size > 0;
175
176	/* Check each drm buddy block individually */
177	list_for_each_entry(block, &bman_res->blocks, link) {
178		unsigned long fpfn =
179			drm_buddy_block_offset(block) >> PAGE_SHIFT;
180		unsigned long lpfn = fpfn +
181			(drm_buddy_block_size(mm, block) >> PAGE_SHIFT);
182
183		if (place->fpfn < lpfn && place->lpfn > fpfn)
184			return true;
185	}
186
187	return false;
188}
189
190static bool i915_ttm_buddy_man_compatible(struct ttm_resource_manager *man,
191					  struct ttm_resource *res,
192					  const struct ttm_place *place,
193					  size_t size)
194{
195	struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res);
196	struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
197	struct drm_buddy *mm = &bman->mm;
198	struct drm_buddy_block *block;
199
200	if (!place->fpfn && !place->lpfn)
201		return true;
202
203	GEM_BUG_ON(!place->lpfn);
204
205	if (!place->fpfn &&
206	    place->lpfn == i915_ttm_buddy_man_visible_size(man))
207		return bman_res->used_visible_size == PFN_UP(res->size);
208
209	/* Check each drm buddy block individually */
210	list_for_each_entry(block, &bman_res->blocks, link) {
211		unsigned long fpfn =
212			drm_buddy_block_offset(block) >> PAGE_SHIFT;
213		unsigned long lpfn = fpfn +
214			(drm_buddy_block_size(mm, block) >> PAGE_SHIFT);
215
216		if (fpfn < place->fpfn || lpfn > place->lpfn)
217			return false;
218	}
219
220	return true;
221}
222
223static void i915_ttm_buddy_man_debug(struct ttm_resource_manager *man,
224				     struct drm_printer *printer)
225{
226	struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
227	struct drm_buddy_block *block;
228
229	mutex_lock(&bman->lock);
230	drm_printf(printer, "default_page_size: %lluKiB\n",
231		   bman->default_page_size >> 10);
232	drm_printf(printer, "visible_avail: %lluMiB\n",
233		   (u64)bman->visible_avail << PAGE_SHIFT >> 20);
234	drm_printf(printer, "visible_size: %lluMiB\n",
235		   (u64)bman->visible_size << PAGE_SHIFT >> 20);
236	drm_printf(printer, "visible_reserved: %lluMiB\n",
237		   (u64)bman->visible_reserved << PAGE_SHIFT >> 20);
238
239	drm_buddy_print(&bman->mm, printer);
240
241	drm_printf(printer, "reserved:\n");
242	list_for_each_entry(block, &bman->reserved, link)
243		drm_buddy_block_print(&bman->mm, block, printer);
244	mutex_unlock(&bman->lock);
245}
246
247static const struct ttm_resource_manager_func i915_ttm_buddy_manager_func = {
248	.alloc = i915_ttm_buddy_man_alloc,
249	.free = i915_ttm_buddy_man_free,
250	.intersects = i915_ttm_buddy_man_intersects,
251	.compatible = i915_ttm_buddy_man_compatible,
252	.debug = i915_ttm_buddy_man_debug,
253};
254
255/**
256 * i915_ttm_buddy_man_init - Setup buddy allocator based ttm manager
257 * @bdev: The ttm device
258 * @type: Memory type we want to manage
259 * @use_tt: Set use_tt for the manager
260 * @size: The size in bytes to manage
261 * @visible_size: The CPU visible size in bytes to manage
262 * @default_page_size: The default minimum page size in bytes for allocations,
263 * this must be at least as large as @chunk_size, and can be overridden by
264 * setting the BO page_alignment, to be larger or smaller as needed.
265 * @chunk_size: The minimum page size in bytes for our allocations i.e
266 * order-zero
267 *
268 * Note that the starting address is assumed to be zero here, since this
269 * simplifies keeping the property where allocated blocks having natural
270 * power-of-two alignment. So long as the real starting address is some large
271 * power-of-two, or naturally start from zero, then this should be fine.  Also
272 * the &i915_ttm_buddy_man_reserve interface can be used to preserve alignment
273 * if say there is some unusable range from the start of the region. We can
274 * revisit this in the future and make the interface accept an actual starting
275 * offset and let it take care of the rest.
276 *
277 * Note that if the @size is not aligned to the @chunk_size then we perform the
278 * required rounding to get the usable size. The final size in pages can be
279 * taken from &ttm_resource_manager.size.
280 *
281 * Return: 0 on success, negative error code on failure.
282 */
283int i915_ttm_buddy_man_init(struct ttm_device *bdev,
284			    unsigned int type, bool use_tt,
285			    u64 size, u64 visible_size, u64 default_page_size,
286			    u64 chunk_size)
287{
288	struct ttm_resource_manager *man;
289	struct i915_ttm_buddy_manager *bman;
290	int err;
291
292	bman = kzalloc(sizeof(*bman), GFP_KERNEL);
293	if (!bman)
294		return -ENOMEM;
295
296	err = drm_buddy_init(&bman->mm, size, chunk_size);
297	if (err)
298		goto err_free_bman;
299
300	mutex_init(&bman->lock);
301	INIT_LIST_HEAD(&bman->reserved);
302	GEM_BUG_ON(default_page_size < chunk_size);
303	bman->default_page_size = default_page_size;
304	bman->visible_size = visible_size >> PAGE_SHIFT;
305	bman->visible_avail = bman->visible_size;
306
307	man = &bman->manager;
308	man->use_tt = use_tt;
309	man->func = &i915_ttm_buddy_manager_func;
310	ttm_resource_manager_init(man, bdev, bman->mm.size >> PAGE_SHIFT);
311
312	ttm_resource_manager_set_used(man, true);
313	ttm_set_driver_manager(bdev, type, man);
314
315	return 0;
316
317err_free_bman:
318	kfree(bman);
319	return err;
320}
321
322/**
323 * i915_ttm_buddy_man_fini - Destroy the buddy allocator ttm manager
324 * @bdev: The ttm device
325 * @type: Memory type we want to manage
326 *
327 * Note that if we reserved anything with &i915_ttm_buddy_man_reserve, this will
328 * also be freed for us here.
329 *
330 * Return: 0 on success, negative error code on failure.
331 */
332int i915_ttm_buddy_man_fini(struct ttm_device *bdev, unsigned int type)
333{
334	struct ttm_resource_manager *man = ttm_manager_type(bdev, type);
335	struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
336	struct drm_buddy *mm = &bman->mm;
337	int ret;
338
339	ttm_resource_manager_set_used(man, false);
340
341	ret = ttm_resource_manager_evict_all(bdev, man);
342	if (ret)
343		return ret;
344
345	ttm_set_driver_manager(bdev, type, NULL);
346
347	mutex_lock(&bman->lock);
348	drm_buddy_free_list(mm, &bman->reserved, 0);
349	drm_buddy_fini(mm);
350	bman->visible_avail += bman->visible_reserved;
351	WARN_ON_ONCE(bman->visible_avail != bman->visible_size);
352	mutex_unlock(&bman->lock);
353
354	ttm_resource_manager_cleanup(man);
355	kfree(bman);
356
357	return 0;
358}
359
360/**
361 * i915_ttm_buddy_man_reserve - Reserve address range
362 * @man: The buddy allocator ttm manager
363 * @start: The offset in bytes, where the region start is assumed to be zero
364 * @size: The size in bytes
365 *
366 * Note that the starting address for the region is always assumed to be zero.
367 *
368 * Return: 0 on success, negative error code on failure.
369 */
370int i915_ttm_buddy_man_reserve(struct ttm_resource_manager *man,
371			       u64 start, u64 size)
372{
373	struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
374	struct drm_buddy *mm = &bman->mm;
375	unsigned long fpfn = start >> PAGE_SHIFT;
376	unsigned long flags = 0;
377	int ret;
378
379	flags |= DRM_BUDDY_RANGE_ALLOCATION;
380
381	mutex_lock(&bman->lock);
382	ret = drm_buddy_alloc_blocks(mm, start,
383				     start + size,
384				     size, mm->chunk_size,
385				     &bman->reserved,
386				     flags);
387
388	if (fpfn < bman->visible_size) {
389		unsigned long lpfn = fpfn + (size >> PAGE_SHIFT);
390		unsigned long visible = min(lpfn, bman->visible_size) - fpfn;
391
392		bman->visible_reserved += visible;
393		bman->visible_avail -= visible;
394	}
395	mutex_unlock(&bman->lock);
396
397	return ret;
398}
399
400/**
401 * i915_ttm_buddy_man_visible_size - Return the size of the CPU visible portion
402 * in pages.
403 * @man: The buddy allocator ttm manager
404 */
405u64 i915_ttm_buddy_man_visible_size(struct ttm_resource_manager *man)
406{
407	struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
408
409	return bman->visible_size;
410}
411
412/**
413 * i915_ttm_buddy_man_avail - Query the avail tracking for the manager.
414 *
415 * @man: The buddy allocator ttm manager
416 * @avail: The total available memory in pages for the entire manager.
417 * @visible_avail: The total available memory in pages for the CPU visible
418 * portion. Note that this will always give the same value as @avail on
419 * configurations that don't have a small BAR.
420 */
421void i915_ttm_buddy_man_avail(struct ttm_resource_manager *man,
422			      u64 *avail, u64 *visible_avail)
423{
424	struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
425
426	mutex_lock(&bman->lock);
427	*avail = bman->mm.avail >> PAGE_SHIFT;
428	*visible_avail = bman->visible_avail;
429	mutex_unlock(&bman->lock);
430}
431
432#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
433void i915_ttm_buddy_man_force_visible_size(struct ttm_resource_manager *man,
434					   u64 size)
435{
436	struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
437
438	bman->visible_size = size;
439}
440#endif
441