1// SPDX-License-Identifier: GPL-2.0-only OR MIT
2/* Copyright (c) 2023 Imagination Technologies Ltd. */
3
4#include "pvr_device.h"
5#include "pvr_fw_mips.h"
6#include "pvr_gem.h"
7#include "pvr_mmu.h"
8#include "pvr_rogue_mips.h"
9#include "pvr_vm.h"
10#include "pvr_vm_mips.h"
11
12#include <drm/drm_managed.h>
13#include <linux/dma-mapping.h>
14#include <linux/err.h>
15#include <linux/slab.h>
16#include <linux/types.h>
17#include <linux/vmalloc.h>
18
19/**
20 * pvr_vm_mips_init() - Initialise MIPS FW pagetable
21 * @pvr_dev: Target PowerVR device.
22 *
23 * Returns:
24 *  * 0 on success,
25 *  * -%EINVAL,
26 *  * Any error returned by pvr_gem_object_create(), or
27 *  * And error returned by pvr_gem_object_vmap().
28 */
29int
30pvr_vm_mips_init(struct pvr_device *pvr_dev)
31{
32	u32 pt_size = 1 << ROGUE_MIPSFW_LOG2_PAGETABLE_SIZE_4K(pvr_dev);
33	struct device *dev = from_pvr_device(pvr_dev)->dev;
34	struct pvr_fw_mips_data *mips_data;
35	u32 phys_bus_width;
36	int page_nr;
37	int err;
38
39	/* Page table size must be at most ROGUE_MIPSFW_MAX_NUM_PAGETABLE_PAGES * 4k pages. */
40	if (pt_size > ROGUE_MIPSFW_MAX_NUM_PAGETABLE_PAGES * SZ_4K)
41		return -EINVAL;
42
43	if (PVR_FEATURE_VALUE(pvr_dev, phys_bus_width, &phys_bus_width))
44		return -EINVAL;
45
46	mips_data = drmm_kzalloc(from_pvr_device(pvr_dev), sizeof(*mips_data), GFP_KERNEL);
47	if (!mips_data)
48		return -ENOMEM;
49
50	for (page_nr = 0; page_nr < PVR_MIPS_PT_PAGE_COUNT; page_nr++) {
51		mips_data->pt_pages[page_nr] = alloc_page(GFP_KERNEL | __GFP_ZERO);
52		if (!mips_data->pt_pages[page_nr]) {
53			err = -ENOMEM;
54			goto err_free_pages;
55		}
56
57		mips_data->pt_dma_addr[page_nr] = dma_map_page(dev, mips_data->pt_pages[page_nr], 0,
58							       PAGE_SIZE, DMA_TO_DEVICE);
59		if (dma_mapping_error(dev, mips_data->pt_dma_addr[page_nr])) {
60			err = -ENOMEM;
61			__free_page(mips_data->pt_pages[page_nr]);
62			goto err_free_pages;
63		}
64	}
65
66	mips_data->pt = vmap(mips_data->pt_pages, pt_size >> PAGE_SHIFT, VM_MAP,
67			     pgprot_writecombine(PAGE_KERNEL));
68	if (!mips_data->pt) {
69		err = -ENOMEM;
70		goto err_free_pages;
71	}
72
73	mips_data->pfn_mask = (phys_bus_width > 32) ? ROGUE_MIPSFW_ENTRYLO_PFN_MASK_ABOVE_32BIT :
74						      ROGUE_MIPSFW_ENTRYLO_PFN_MASK;
75
76	mips_data->cache_policy = (phys_bus_width > 32) ? ROGUE_MIPSFW_CACHED_POLICY_ABOVE_32BIT :
77							  ROGUE_MIPSFW_CACHED_POLICY;
78
79	pvr_dev->fw_dev.processor_data.mips_data = mips_data;
80
81	return 0;
82
83err_free_pages:
84	while (--page_nr >= 0) {
85		dma_unmap_page(from_pvr_device(pvr_dev)->dev,
86			       mips_data->pt_dma_addr[page_nr], PAGE_SIZE, DMA_TO_DEVICE);
87
88		__free_page(mips_data->pt_pages[page_nr]);
89	}
90
91	return err;
92}
93
94/**
95 * pvr_vm_mips_fini() - Release MIPS FW pagetable
96 * @pvr_dev: Target PowerVR device.
97 */
98void
99pvr_vm_mips_fini(struct pvr_device *pvr_dev)
100{
101	struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
102	struct pvr_fw_mips_data *mips_data = fw_dev->processor_data.mips_data;
103	int page_nr;
104
105	vunmap(mips_data->pt);
106	for (page_nr = PVR_MIPS_PT_PAGE_COUNT - 1; page_nr >= 0; page_nr--) {
107		dma_unmap_page(from_pvr_device(pvr_dev)->dev,
108			       mips_data->pt_dma_addr[page_nr], PAGE_SIZE, DMA_TO_DEVICE);
109
110		__free_page(mips_data->pt_pages[page_nr]);
111	}
112
113	fw_dev->processor_data.mips_data = NULL;
114}
115
116static u32
117get_mips_pte_flags(bool read, bool write, u32 cache_policy)
118{
119	u32 flags = 0;
120
121	if (read && write) /* Read/write. */
122		flags |= ROGUE_MIPSFW_ENTRYLO_DIRTY_EN;
123	else if (write)    /* Write only. */
124		flags |= ROGUE_MIPSFW_ENTRYLO_READ_INHIBIT_EN;
125	else
126		WARN_ON(!read);
127
128	flags |= cache_policy << ROGUE_MIPSFW_ENTRYLO_CACHE_POLICY_SHIFT;
129
130	flags |= ROGUE_MIPSFW_ENTRYLO_VALID_EN | ROGUE_MIPSFW_ENTRYLO_GLOBAL_EN;
131
132	return flags;
133}
134
135/**
136 * pvr_vm_mips_map() - Map a FW object into MIPS address space
137 * @pvr_dev: Target PowerVR device.
138 * @fw_obj: FW object to map.
139 *
140 * Returns:
141 *  * 0 on success,
142 *  * -%EINVAL if object does not reside within FW address space, or
143 *  * Any error returned by pvr_fw_object_get_dma_addr().
144 */
145int
146pvr_vm_mips_map(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
147{
148	struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
149	struct pvr_fw_mips_data *mips_data = fw_dev->processor_data.mips_data;
150	struct pvr_gem_object *pvr_obj = fw_obj->gem;
151	const u64 start = fw_obj->fw_mm_node.start;
152	const u64 size = fw_obj->fw_mm_node.size;
153	u64 end;
154	u32 cache_policy;
155	u32 pte_flags;
156	s32 start_pfn;
157	s32 end_pfn;
158	s32 pfn;
159	int err;
160
161	if (check_add_overflow(start, size - 1, &end))
162		return -EINVAL;
163
164	if (start < ROGUE_FW_HEAP_BASE ||
165	    start >= ROGUE_FW_HEAP_BASE + fw_dev->fw_heap_info.raw_size ||
166	    end < ROGUE_FW_HEAP_BASE ||
167	    end >= ROGUE_FW_HEAP_BASE + fw_dev->fw_heap_info.raw_size ||
168	    (start & ROGUE_MIPSFW_PAGE_MASK_4K) ||
169	    ((end + 1) & ROGUE_MIPSFW_PAGE_MASK_4K))
170		return -EINVAL;
171
172	start_pfn = (start & fw_dev->fw_heap_info.offset_mask) >> ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K;
173	end_pfn = (end & fw_dev->fw_heap_info.offset_mask) >> ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K;
174
175	if (pvr_obj->flags & PVR_BO_FW_FLAGS_DEVICE_UNCACHED)
176		cache_policy = ROGUE_MIPSFW_UNCACHED_CACHE_POLICY;
177	else
178		cache_policy = mips_data->cache_policy;
179
180	pte_flags = get_mips_pte_flags(true, true, cache_policy);
181
182	for (pfn = start_pfn; pfn <= end_pfn; pfn++) {
183		dma_addr_t dma_addr;
184		u32 pte;
185
186		err = pvr_fw_object_get_dma_addr(fw_obj,
187						 (pfn - start_pfn) <<
188						 ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K,
189						 &dma_addr);
190		if (err)
191			goto err_unmap_pages;
192
193		pte = ((dma_addr >> ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K)
194		       << ROGUE_MIPSFW_ENTRYLO_PFN_SHIFT) & mips_data->pfn_mask;
195		pte |= pte_flags;
196
197		WRITE_ONCE(mips_data->pt[pfn], pte);
198	}
199
200	pvr_mmu_flush_request_all(pvr_dev);
201
202	return 0;
203
204err_unmap_pages:
205	while (--pfn >= start_pfn)
206		WRITE_ONCE(mips_data->pt[pfn], 0);
207
208	pvr_mmu_flush_request_all(pvr_dev);
209	WARN_ON(pvr_mmu_flush_exec(pvr_dev, true));
210
211	return err;
212}
213
214/**
215 * pvr_vm_mips_unmap() - Unmap a FW object into MIPS address space
216 * @pvr_dev: Target PowerVR device.
217 * @fw_obj: FW object to unmap.
218 */
219void
220pvr_vm_mips_unmap(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
221{
222	struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
223	struct pvr_fw_mips_data *mips_data = fw_dev->processor_data.mips_data;
224	const u64 start = fw_obj->fw_mm_node.start;
225	const u64 size = fw_obj->fw_mm_node.size;
226	const u64 end = start + size;
227
228	const u32 start_pfn = (start & fw_dev->fw_heap_info.offset_mask) >>
229			      ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K;
230	const u32 end_pfn = (end & fw_dev->fw_heap_info.offset_mask) >>
231			    ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K;
232
233	for (u32 pfn = start_pfn; pfn < end_pfn; pfn++)
234		WRITE_ONCE(mips_data->pt[pfn], 0);
235
236	pvr_mmu_flush_request_all(pvr_dev);
237	WARN_ON(pvr_mmu_flush_exec(pvr_dev, true));
238}
239