1// SPDX-License-Identifier: GPL-2.0-only OR MIT
2/* Copyright (c) 2023 Imagination Technologies Ltd. */
3
4#include "pvr_device.h"
5#include "pvr_device_info.h"
6
7#include "pvr_fw.h"
8#include "pvr_params.h"
9#include "pvr_power.h"
10#include "pvr_queue.h"
11#include "pvr_rogue_cr_defs.h"
12#include "pvr_stream.h"
13#include "pvr_vm.h"
14
15#include <drm/drm_print.h>
16
17#include <linux/bitfield.h>
18#include <linux/clk.h>
19#include <linux/compiler_attributes.h>
20#include <linux/compiler_types.h>
21#include <linux/dma-mapping.h>
22#include <linux/err.h>
23#include <linux/firmware.h>
24#include <linux/gfp.h>
25#include <linux/interrupt.h>
26#include <linux/platform_device.h>
27#include <linux/pm_runtime.h>
28#include <linux/slab.h>
29#include <linux/stddef.h>
30#include <linux/types.h>
31#include <linux/workqueue.h>
32
33/* Major number for the supported version of the firmware. */
34#define PVR_FW_VERSION_MAJOR 1
35
36/**
37 * pvr_device_reg_init() - Initialize kernel access to a PowerVR device's
38 * control registers.
39 * @pvr_dev: Target PowerVR device.
40 *
41 * Sets struct pvr_device->regs.
42 *
43 * This method of mapping the device control registers into memory ensures that
44 * they are unmapped when the driver is detached (i.e. no explicit cleanup is
45 * required).
46 *
47 * Return:
48 *  * 0 on success, or
49 *  * Any error returned by devm_platform_ioremap_resource().
50 */
51static int
52pvr_device_reg_init(struct pvr_device *pvr_dev)
53{
54	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
55	struct platform_device *plat_dev = to_platform_device(drm_dev->dev);
56	struct resource *regs_resource;
57	void __iomem *regs;
58
59	pvr_dev->regs_resource = NULL;
60	pvr_dev->regs = NULL;
61
62	regs = devm_platform_get_and_ioremap_resource(plat_dev, 0, &regs_resource);
63	if (IS_ERR(regs))
64		return dev_err_probe(drm_dev->dev, PTR_ERR(regs),
65				     "failed to ioremap gpu registers\n");
66
67	pvr_dev->regs = regs;
68	pvr_dev->regs_resource = regs_resource;
69
70	return 0;
71}
72
73/**
74 * pvr_device_clk_init() - Initialize clocks required by a PowerVR device
75 * @pvr_dev: Target PowerVR device.
76 *
77 * Sets struct pvr_device->core_clk, struct pvr_device->sys_clk and
78 * struct pvr_device->mem_clk.
79 *
80 * Three clocks are required by the PowerVR device: core, sys and mem. On
81 * return, this function guarantees that the clocks are in one of the following
82 * states:
83 *
84 *  * All successfully initialized,
85 *  * Core errored, sys and mem uninitialized,
86 *  * Core deinitialized, sys errored, mem uninitialized, or
87 *  * Core and sys deinitialized, mem errored.
88 *
89 * Return:
90 *  * 0 on success,
91 *  * Any error returned by devm_clk_get(), or
92 *  * Any error returned by devm_clk_get_optional().
93 */
94static int pvr_device_clk_init(struct pvr_device *pvr_dev)
95{
96	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
97	struct clk *core_clk;
98	struct clk *sys_clk;
99	struct clk *mem_clk;
100
101	core_clk = devm_clk_get(drm_dev->dev, "core");
102	if (IS_ERR(core_clk))
103		return dev_err_probe(drm_dev->dev, PTR_ERR(core_clk),
104				     "failed to get core clock\n");
105
106	sys_clk = devm_clk_get_optional(drm_dev->dev, "sys");
107	if (IS_ERR(sys_clk))
108		return dev_err_probe(drm_dev->dev, PTR_ERR(sys_clk),
109				     "failed to get sys clock\n");
110
111	mem_clk = devm_clk_get_optional(drm_dev->dev, "mem");
112	if (IS_ERR(mem_clk))
113		return dev_err_probe(drm_dev->dev, PTR_ERR(mem_clk),
114				     "failed to get mem clock\n");
115
116	pvr_dev->core_clk = core_clk;
117	pvr_dev->sys_clk = sys_clk;
118	pvr_dev->mem_clk = mem_clk;
119
120	return 0;
121}
122
123/**
124 * pvr_device_process_active_queues() - Process all queue related events.
125 * @pvr_dev: PowerVR device to check
126 *
127 * This is called any time we receive a FW event. It iterates over all
128 * active queues and calls pvr_queue_process() on them.
129 */
130static void pvr_device_process_active_queues(struct pvr_device *pvr_dev)
131{
132	struct pvr_queue *queue, *tmp_queue;
133	LIST_HEAD(active_queues);
134
135	mutex_lock(&pvr_dev->queues.lock);
136
137	/* Move all active queues to a temporary list. Queues that remain
138	 * active after we're done processing them are re-inserted to
139	 * the queues.active list by pvr_queue_process().
140	 */
141	list_splice_init(&pvr_dev->queues.active, &active_queues);
142
143	list_for_each_entry_safe(queue, tmp_queue, &active_queues, node)
144		pvr_queue_process(queue);
145
146	mutex_unlock(&pvr_dev->queues.lock);
147}
148
149static irqreturn_t pvr_device_irq_thread_handler(int irq, void *data)
150{
151	struct pvr_device *pvr_dev = data;
152	irqreturn_t ret = IRQ_NONE;
153
154	/* We are in the threaded handler, we can keep dequeuing events until we
155	 * don't see any. This should allow us to reduce the number of interrupts
156	 * when the GPU is receiving a massive amount of short jobs.
157	 */
158	while (pvr_fw_irq_pending(pvr_dev)) {
159		pvr_fw_irq_clear(pvr_dev);
160
161		if (pvr_dev->fw_dev.booted) {
162			pvr_fwccb_process(pvr_dev);
163			pvr_kccb_wake_up_waiters(pvr_dev);
164			pvr_device_process_active_queues(pvr_dev);
165		}
166
167		pm_runtime_mark_last_busy(from_pvr_device(pvr_dev)->dev);
168
169		ret = IRQ_HANDLED;
170	}
171
172	/* Unmask FW irqs before returning, so new interrupts can be received. */
173	pvr_fw_irq_enable(pvr_dev);
174	return ret;
175}
176
177static irqreturn_t pvr_device_irq_handler(int irq, void *data)
178{
179	struct pvr_device *pvr_dev = data;
180
181	if (!pvr_fw_irq_pending(pvr_dev))
182		return IRQ_NONE; /* Spurious IRQ - ignore. */
183
184	/* Mask the FW interrupts before waking up the thread. Will be unmasked
185	 * when the thread handler is done processing events.
186	 */
187	pvr_fw_irq_disable(pvr_dev);
188	return IRQ_WAKE_THREAD;
189}
190
191/**
192 * pvr_device_irq_init() - Initialise IRQ required by a PowerVR device
193 * @pvr_dev: Target PowerVR device.
194 *
195 * Returns:
196 *  * 0 on success,
197 *  * Any error returned by platform_get_irq_byname(), or
198 *  * Any error returned by request_irq().
199 */
200static int
201pvr_device_irq_init(struct pvr_device *pvr_dev)
202{
203	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
204	struct platform_device *plat_dev = to_platform_device(drm_dev->dev);
205
206	init_waitqueue_head(&pvr_dev->kccb.rtn_q);
207
208	pvr_dev->irq = platform_get_irq(plat_dev, 0);
209	if (pvr_dev->irq < 0)
210		return pvr_dev->irq;
211
212	/* Clear any pending events before requesting the IRQ line. */
213	pvr_fw_irq_clear(pvr_dev);
214	pvr_fw_irq_enable(pvr_dev);
215
216	return request_threaded_irq(pvr_dev->irq, pvr_device_irq_handler,
217				    pvr_device_irq_thread_handler,
218				    IRQF_SHARED, "gpu", pvr_dev);
219}
220
221/**
222 * pvr_device_irq_fini() - Deinitialise IRQ required by a PowerVR device
223 * @pvr_dev: Target PowerVR device.
224 */
225static void
226pvr_device_irq_fini(struct pvr_device *pvr_dev)
227{
228	free_irq(pvr_dev->irq, pvr_dev);
229}
230
231/**
232 * pvr_build_firmware_filename() - Construct a PowerVR firmware filename
233 * @pvr_dev: Target PowerVR device.
234 * @base: First part of the filename.
235 * @major: Major version number.
236 *
237 * A PowerVR firmware filename consists of three parts separated by underscores
238 * (``'_'``) along with a '.fw' file suffix. The first part is the exact value
239 * of @base, the second part is the hardware version string derived from @pvr_fw
240 * and the final part is the firmware version number constructed from @major with
241 * a 'v' prefix, e.g. powervr/rogue_4.40.2.51_v1.fw.
242 *
243 * The returned string will have been slab allocated and must be freed with
244 * kfree().
245 *
246 * Return:
247 *  * The constructed filename on success, or
248 *  * Any error returned by kasprintf().
249 */
250static char *
251pvr_build_firmware_filename(struct pvr_device *pvr_dev, const char *base,
252			    u8 major)
253{
254	struct pvr_gpu_id *gpu_id = &pvr_dev->gpu_id;
255
256	return kasprintf(GFP_KERNEL, "%s_%d.%d.%d.%d_v%d.fw", base, gpu_id->b,
257			 gpu_id->v, gpu_id->n, gpu_id->c, major);
258}
259
260static void
261pvr_release_firmware(void *data)
262{
263	struct pvr_device *pvr_dev = data;
264
265	release_firmware(pvr_dev->fw_dev.firmware);
266}
267
268/**
269 * pvr_request_firmware() - Load firmware for a PowerVR device
270 * @pvr_dev: Target PowerVR device.
271 *
272 * See pvr_build_firmware_filename() for details on firmware file naming.
273 *
274 * Return:
275 *  * 0 on success,
276 *  * Any error returned by pvr_build_firmware_filename(), or
277 *  * Any error returned by request_firmware().
278 */
279static int
280pvr_request_firmware(struct pvr_device *pvr_dev)
281{
282	struct drm_device *drm_dev = &pvr_dev->base;
283	char *filename;
284	const struct firmware *fw;
285	int err;
286
287	filename = pvr_build_firmware_filename(pvr_dev, "powervr/rogue",
288					       PVR_FW_VERSION_MAJOR);
289	if (!filename)
290		return -ENOMEM;
291
292	/*
293	 * This function takes a copy of &filename, meaning we can free our
294	 * instance before returning.
295	 */
296	err = request_firmware(&fw, filename, pvr_dev->base.dev);
297	if (err) {
298		drm_err(drm_dev, "failed to load firmware %s (err=%d)\n",
299			filename, err);
300		goto err_free_filename;
301	}
302
303	drm_info(drm_dev, "loaded firmware %s\n", filename);
304	kfree(filename);
305
306	pvr_dev->fw_dev.firmware = fw;
307
308	return devm_add_action_or_reset(drm_dev->dev, pvr_release_firmware, pvr_dev);
309
310err_free_filename:
311	kfree(filename);
312
313	return err;
314}
315
316/**
317 * pvr_load_gpu_id() - Load a PowerVR device's GPU ID (BVNC) from control registers.
318 *
319 * Sets struct pvr_dev.gpu_id.
320 *
321 * @pvr_dev: Target PowerVR device.
322 */
323static void
324pvr_load_gpu_id(struct pvr_device *pvr_dev)
325{
326	struct pvr_gpu_id *gpu_id = &pvr_dev->gpu_id;
327	u64 bvnc;
328
329	/*
330	 * Try reading the BVNC using the newer (cleaner) method first. If the
331	 * B value is zero, fall back to the older method.
332	 */
333	bvnc = pvr_cr_read64(pvr_dev, ROGUE_CR_CORE_ID__PBVNC);
334
335	gpu_id->b = PVR_CR_FIELD_GET(bvnc, CORE_ID__PBVNC__BRANCH_ID);
336	if (gpu_id->b != 0) {
337		gpu_id->v = PVR_CR_FIELD_GET(bvnc, CORE_ID__PBVNC__VERSION_ID);
338		gpu_id->n = PVR_CR_FIELD_GET(bvnc, CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS);
339		gpu_id->c = PVR_CR_FIELD_GET(bvnc, CORE_ID__PBVNC__CONFIG_ID);
340	} else {
341		u32 core_rev = pvr_cr_read32(pvr_dev, ROGUE_CR_CORE_REVISION);
342		u32 core_id = pvr_cr_read32(pvr_dev, ROGUE_CR_CORE_ID);
343		u16 core_id_config = PVR_CR_FIELD_GET(core_id, CORE_ID_CONFIG);
344
345		gpu_id->b = PVR_CR_FIELD_GET(core_rev, CORE_REVISION_MAJOR);
346		gpu_id->v = PVR_CR_FIELD_GET(core_rev, CORE_REVISION_MINOR);
347		gpu_id->n = FIELD_GET(0xFF00, core_id_config);
348		gpu_id->c = FIELD_GET(0x00FF, core_id_config);
349	}
350}
351
352/**
353 * pvr_set_dma_info() - Set PowerVR device DMA information
354 * @pvr_dev: Target PowerVR device.
355 *
356 * Sets the DMA mask and max segment size for the PowerVR device.
357 *
358 * Return:
359 *  * 0 on success,
360 *  * Any error returned by PVR_FEATURE_VALUE(), or
361 *  * Any error returned by dma_set_mask().
362 */
363
364static int
365pvr_set_dma_info(struct pvr_device *pvr_dev)
366{
367	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
368	u16 phys_bus_width;
369	int err;
370
371	err = PVR_FEATURE_VALUE(pvr_dev, phys_bus_width, &phys_bus_width);
372	if (err) {
373		drm_err(drm_dev, "Failed to get device physical bus width\n");
374		return err;
375	}
376
377	err = dma_set_mask(drm_dev->dev, DMA_BIT_MASK(phys_bus_width));
378	if (err) {
379		drm_err(drm_dev, "Failed to set DMA mask (err=%d)\n", err);
380		return err;
381	}
382
383	dma_set_max_seg_size(drm_dev->dev, UINT_MAX);
384
385	return 0;
386}
387
388/**
389 * pvr_device_gpu_init() - GPU-specific initialization for a PowerVR device
390 * @pvr_dev: Target PowerVR device.
391 *
392 * The following steps are taken to ensure the device is ready:
393 *
394 *  1. Read the hardware version information from control registers,
395 *  2. Initialise the hardware feature information,
396 *  3. Setup the device DMA information,
397 *  4. Setup the device-scoped memory context, and
398 *  5. Load firmware into the device.
399 *
400 * Return:
401 *  * 0 on success,
402 *  * -%ENODEV if the GPU is not supported,
403 *  * Any error returned by pvr_set_dma_info(),
404 *  * Any error returned by pvr_memory_context_init(), or
405 *  * Any error returned by pvr_request_firmware().
406 */
407static int
408pvr_device_gpu_init(struct pvr_device *pvr_dev)
409{
410	int err;
411
412	pvr_load_gpu_id(pvr_dev);
413
414	err = pvr_request_firmware(pvr_dev);
415	if (err)
416		return err;
417
418	err = pvr_fw_validate_init_device_info(pvr_dev);
419	if (err)
420		return err;
421
422	if (PVR_HAS_FEATURE(pvr_dev, meta))
423		pvr_dev->fw_dev.processor_type = PVR_FW_PROCESSOR_TYPE_META;
424	else if (PVR_HAS_FEATURE(pvr_dev, mips))
425		pvr_dev->fw_dev.processor_type = PVR_FW_PROCESSOR_TYPE_MIPS;
426	else if (PVR_HAS_FEATURE(pvr_dev, riscv_fw_processor))
427		pvr_dev->fw_dev.processor_type = PVR_FW_PROCESSOR_TYPE_RISCV;
428	else
429		return -EINVAL;
430
431	pvr_stream_create_musthave_masks(pvr_dev);
432
433	err = pvr_set_dma_info(pvr_dev);
434	if (err)
435		return err;
436
437	if (pvr_dev->fw_dev.processor_type != PVR_FW_PROCESSOR_TYPE_MIPS) {
438		pvr_dev->kernel_vm_ctx = pvr_vm_create_context(pvr_dev, false);
439		if (IS_ERR(pvr_dev->kernel_vm_ctx))
440			return PTR_ERR(pvr_dev->kernel_vm_ctx);
441	}
442
443	err = pvr_fw_init(pvr_dev);
444	if (err)
445		goto err_vm_ctx_put;
446
447	return 0;
448
449err_vm_ctx_put:
450	if (pvr_dev->fw_dev.processor_type != PVR_FW_PROCESSOR_TYPE_MIPS) {
451		pvr_vm_context_put(pvr_dev->kernel_vm_ctx);
452		pvr_dev->kernel_vm_ctx = NULL;
453	}
454
455	return err;
456}
457
458/**
459 * pvr_device_gpu_fini() - GPU-specific deinitialization for a PowerVR device
460 * @pvr_dev: Target PowerVR device.
461 */
462static void
463pvr_device_gpu_fini(struct pvr_device *pvr_dev)
464{
465	pvr_fw_fini(pvr_dev);
466
467	if (pvr_dev->fw_dev.processor_type != PVR_FW_PROCESSOR_TYPE_MIPS) {
468		WARN_ON(!pvr_vm_context_put(pvr_dev->kernel_vm_ctx));
469		pvr_dev->kernel_vm_ctx = NULL;
470	}
471}
472
473/**
474 * pvr_device_init() - Initialize a PowerVR device
475 * @pvr_dev: Target PowerVR device.
476 *
477 * If this function returns successfully, the device will have been fully
478 * initialized. Otherwise, any parts of the device initialized before an error
479 * occurs will be de-initialized before returning.
480 *
481 * NOTE: The initialization steps currently taken are the bare minimum required
482 *       to read from the control registers. The device is unlikely to function
483 *       until further initialization steps are added. [This note should be
484 *       removed when that happens.]
485 *
486 * Return:
487 *  * 0 on success,
488 *  * Any error returned by pvr_device_reg_init(),
489 *  * Any error returned by pvr_device_clk_init(), or
490 *  * Any error returned by pvr_device_gpu_init().
491 */
492int
493pvr_device_init(struct pvr_device *pvr_dev)
494{
495	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
496	struct device *dev = drm_dev->dev;
497	int err;
498
499	/*
500	 * Setup device parameters. We do this first in case other steps
501	 * depend on them.
502	 */
503	err = pvr_device_params_init(&pvr_dev->params);
504	if (err)
505		return err;
506
507	/* Enable and initialize clocks required for the device to operate. */
508	err = pvr_device_clk_init(pvr_dev);
509	if (err)
510		return err;
511
512	/* Explicitly power the GPU so we can access control registers before the FW is booted. */
513	err = pm_runtime_resume_and_get(dev);
514	if (err)
515		return err;
516
517	/* Map the control registers into memory. */
518	err = pvr_device_reg_init(pvr_dev);
519	if (err)
520		goto err_pm_runtime_put;
521
522	/* Perform GPU-specific initialization steps. */
523	err = pvr_device_gpu_init(pvr_dev);
524	if (err)
525		goto err_pm_runtime_put;
526
527	err = pvr_device_irq_init(pvr_dev);
528	if (err)
529		goto err_device_gpu_fini;
530
531	pm_runtime_put(dev);
532
533	return 0;
534
535err_device_gpu_fini:
536	pvr_device_gpu_fini(pvr_dev);
537
538err_pm_runtime_put:
539	pm_runtime_put_sync_suspend(dev);
540
541	return err;
542}
543
544/**
545 * pvr_device_fini() - Deinitialize a PowerVR device
546 * @pvr_dev: Target PowerVR device.
547 */
548void
549pvr_device_fini(struct pvr_device *pvr_dev)
550{
551	/*
552	 * Deinitialization stages are performed in reverse order compared to
553	 * the initialization stages in pvr_device_init().
554	 */
555	pvr_device_irq_fini(pvr_dev);
556	pvr_device_gpu_fini(pvr_dev);
557}
558
559bool
560pvr_device_has_uapi_quirk(struct pvr_device *pvr_dev, u32 quirk)
561{
562	switch (quirk) {
563	case 47217:
564		return PVR_HAS_QUIRK(pvr_dev, 47217);
565	case 48545:
566		return PVR_HAS_QUIRK(pvr_dev, 48545);
567	case 49927:
568		return PVR_HAS_QUIRK(pvr_dev, 49927);
569	case 51764:
570		return PVR_HAS_QUIRK(pvr_dev, 51764);
571	case 62269:
572		return PVR_HAS_QUIRK(pvr_dev, 62269);
573	default:
574		return false;
575	};
576}
577
578bool
579pvr_device_has_uapi_enhancement(struct pvr_device *pvr_dev, u32 enhancement)
580{
581	switch (enhancement) {
582	case 35421:
583		return PVR_HAS_ENHANCEMENT(pvr_dev, 35421);
584	case 42064:
585		return PVR_HAS_ENHANCEMENT(pvr_dev, 42064);
586	default:
587		return false;
588	};
589}
590
591/**
592 * pvr_device_has_feature() - Look up device feature based on feature definition
593 * @pvr_dev: Device pointer.
594 * @feature: Feature to look up. Should be one of %PVR_FEATURE_*.
595 *
596 * Returns:
597 *  * %true if feature is present on device, or
598 *  * %false if feature is not present on device.
599 */
600bool
601pvr_device_has_feature(struct pvr_device *pvr_dev, u32 feature)
602{
603	switch (feature) {
604	case PVR_FEATURE_CLUSTER_GROUPING:
605		return PVR_HAS_FEATURE(pvr_dev, cluster_grouping);
606
607	case PVR_FEATURE_COMPUTE_MORTON_CAPABLE:
608		return PVR_HAS_FEATURE(pvr_dev, compute_morton_capable);
609
610	case PVR_FEATURE_FB_CDC_V4:
611		return PVR_HAS_FEATURE(pvr_dev, fb_cdc_v4);
612
613	case PVR_FEATURE_GPU_MULTICORE_SUPPORT:
614		return PVR_HAS_FEATURE(pvr_dev, gpu_multicore_support);
615
616	case PVR_FEATURE_ISP_ZLS_D24_S8_PACKING_OGL_MODE:
617		return PVR_HAS_FEATURE(pvr_dev, isp_zls_d24_s8_packing_ogl_mode);
618
619	case PVR_FEATURE_S7_TOP_INFRASTRUCTURE:
620		return PVR_HAS_FEATURE(pvr_dev, s7_top_infrastructure);
621
622	case PVR_FEATURE_TESSELLATION:
623		return PVR_HAS_FEATURE(pvr_dev, tessellation);
624
625	case PVR_FEATURE_TPU_DM_GLOBAL_REGISTERS:
626		return PVR_HAS_FEATURE(pvr_dev, tpu_dm_global_registers);
627
628	case PVR_FEATURE_VDM_DRAWINDIRECT:
629		return PVR_HAS_FEATURE(pvr_dev, vdm_drawindirect);
630
631	case PVR_FEATURE_VDM_OBJECT_LEVEL_LLS:
632		return PVR_HAS_FEATURE(pvr_dev, vdm_object_level_lls);
633
634	case PVR_FEATURE_ZLS_SUBTILE:
635		return PVR_HAS_FEATURE(pvr_dev, zls_subtile);
636
637	/* Derived features. */
638	case PVR_FEATURE_CDM_USER_MODE_QUEUE: {
639		u8 cdm_control_stream_format = 0;
640
641		PVR_FEATURE_VALUE(pvr_dev, cdm_control_stream_format, &cdm_control_stream_format);
642		return (cdm_control_stream_format >= 2 && cdm_control_stream_format <= 4);
643	}
644
645	case PVR_FEATURE_REQUIRES_FB_CDC_ZLS_SETUP:
646		if (PVR_HAS_FEATURE(pvr_dev, fbcdc_algorithm)) {
647			u8 fbcdc_algorithm = 0;
648
649			PVR_FEATURE_VALUE(pvr_dev, fbcdc_algorithm, &fbcdc_algorithm);
650			return (fbcdc_algorithm < 3 || PVR_HAS_FEATURE(pvr_dev, fb_cdc_v4));
651		}
652		return false;
653
654	default:
655		WARN(true, "Looking up undefined feature %u\n", feature);
656		return false;
657	}
658}
659