1/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
2/* Copyright (c) 2023 Imagination Technologies Ltd. */
3
4#ifndef PVR_CONTEXT_H
5#define PVR_CONTEXT_H
6
7#include <drm/gpu_scheduler.h>
8
9#include <linux/compiler_attributes.h>
10#include <linux/dma-fence.h>
11#include <linux/kref.h>
12#include <linux/types.h>
13#include <linux/xarray.h>
14#include <uapi/drm/pvr_drm.h>
15
16#include "pvr_cccb.h"
17#include "pvr_device.h"
18#include "pvr_queue.h"
19
20/* Forward declaration from pvr_gem.h. */
21struct pvr_fw_object;
22
23enum pvr_context_priority {
24	PVR_CTX_PRIORITY_LOW = 0,
25	PVR_CTX_PRIORITY_MEDIUM,
26	PVR_CTX_PRIORITY_HIGH,
27};
28
29/**
30 * struct pvr_context - Context data
31 */
32struct pvr_context {
33	/** @ref_count: Refcount for context. */
34	struct kref ref_count;
35
36	/** @pvr_dev: Pointer to owning device. */
37	struct pvr_device *pvr_dev;
38
39	/** @vm_ctx: Pointer to associated VM context. */
40	struct pvr_vm_context *vm_ctx;
41
42	/** @type: Type of context. */
43	enum drm_pvr_ctx_type type;
44
45	/** @flags: Context flags. */
46	u32 flags;
47
48	/** @priority: Context priority*/
49	enum pvr_context_priority priority;
50
51	/** @fw_obj: FW object representing FW-side context data. */
52	struct pvr_fw_object *fw_obj;
53
54	/** @data: Pointer to local copy of FW context data. */
55	void *data;
56
57	/** @data_size: Size of FW context data, in bytes. */
58	u32 data_size;
59
60	/** @ctx_id: FW context ID. */
61	u32 ctx_id;
62
63	/**
64	 * @faulty: Set to 1 when the context queues had unfinished job when
65	 * a GPU reset happened.
66	 *
67	 * In that case, the context is in an inconsistent state and can't be
68	 * used anymore.
69	 */
70	atomic_t faulty;
71
72	/** @queues: Union containing all kind of queues. */
73	union {
74		struct {
75			/** @geometry: Geometry queue. */
76			struct pvr_queue *geometry;
77
78			/** @fragment: Fragment queue. */
79			struct pvr_queue *fragment;
80		};
81
82		/** @compute: Compute queue. */
83		struct pvr_queue *compute;
84
85		/** @compute: Transfer queue. */
86		struct pvr_queue *transfer;
87	} queues;
88};
89
90static __always_inline struct pvr_queue *
91pvr_context_get_queue_for_job(struct pvr_context *ctx, enum drm_pvr_job_type type)
92{
93	switch (type) {
94	case DRM_PVR_JOB_TYPE_GEOMETRY:
95		return ctx->type == DRM_PVR_CTX_TYPE_RENDER ? ctx->queues.geometry : NULL;
96	case DRM_PVR_JOB_TYPE_FRAGMENT:
97		return ctx->type == DRM_PVR_CTX_TYPE_RENDER ? ctx->queues.fragment : NULL;
98	case DRM_PVR_JOB_TYPE_COMPUTE:
99		return ctx->type == DRM_PVR_CTX_TYPE_COMPUTE ? ctx->queues.compute : NULL;
100	case DRM_PVR_JOB_TYPE_TRANSFER_FRAG:
101		return ctx->type == DRM_PVR_CTX_TYPE_TRANSFER_FRAG ? ctx->queues.transfer : NULL;
102	}
103
104	return NULL;
105}
106
107/**
108 * pvr_context_get() - Take additional reference on context.
109 * @ctx: Context pointer.
110 *
111 * Call pvr_context_put() to release.
112 *
113 * Returns:
114 *  * The requested context on success, or
115 *  * %NULL if no context pointer passed.
116 */
117static __always_inline struct pvr_context *
118pvr_context_get(struct pvr_context *ctx)
119{
120	if (ctx)
121		kref_get(&ctx->ref_count);
122
123	return ctx;
124}
125
126/**
127 * pvr_context_lookup() - Lookup context pointer from handle and file.
128 * @pvr_file: Pointer to pvr_file structure.
129 * @handle: Context handle.
130 *
131 * Takes reference on context. Call pvr_context_put() to release.
132 *
133 * Return:
134 *  * The requested context on success, or
135 *  * %NULL on failure (context does not exist, or does not belong to @pvr_file).
136 */
137static __always_inline struct pvr_context *
138pvr_context_lookup(struct pvr_file *pvr_file, u32 handle)
139{
140	struct pvr_context *ctx;
141
142	/* Take the array lock to protect against context removal.  */
143	xa_lock(&pvr_file->ctx_handles);
144	ctx = pvr_context_get(xa_load(&pvr_file->ctx_handles, handle));
145	xa_unlock(&pvr_file->ctx_handles);
146
147	return ctx;
148}
149
150/**
151 * pvr_context_lookup_id() - Lookup context pointer from ID.
152 * @pvr_dev: Device pointer.
153 * @id: FW context ID.
154 *
155 * Takes reference on context. Call pvr_context_put() to release.
156 *
157 * Return:
158 *  * The requested context on success, or
159 *  * %NULL on failure (context does not exist).
160 */
161static __always_inline struct pvr_context *
162pvr_context_lookup_id(struct pvr_device *pvr_dev, u32 id)
163{
164	struct pvr_context *ctx;
165
166	/* Take the array lock to protect against context removal.  */
167	xa_lock(&pvr_dev->ctx_ids);
168
169	/* Contexts are removed from the ctx_ids set in the context release path,
170	 * meaning the ref_count reached zero before they get removed. We need
171	 * to make sure we're not trying to acquire a context that's being
172	 * destroyed.
173	 */
174	ctx = xa_load(&pvr_dev->ctx_ids, id);
175	if (!kref_get_unless_zero(&ctx->ref_count))
176		ctx = NULL;
177
178	xa_unlock(&pvr_dev->ctx_ids);
179
180	return ctx;
181}
182
183static __always_inline u32
184pvr_context_get_fw_addr(struct pvr_context *ctx)
185{
186	u32 ctx_fw_addr = 0;
187
188	pvr_fw_object_get_fw_addr(ctx->fw_obj, &ctx_fw_addr);
189
190	return ctx_fw_addr;
191}
192
193void pvr_context_put(struct pvr_context *ctx);
194
195int pvr_context_create(struct pvr_file *pvr_file, struct drm_pvr_ioctl_create_context_args *args);
196
197int pvr_context_destroy(struct pvr_file *pvr_file, u32 handle);
198
199void pvr_destroy_contexts_for_file(struct pvr_file *pvr_file);
200
201void pvr_context_device_init(struct pvr_device *pvr_dev);
202
203void pvr_context_device_fini(struct pvr_device *pvr_dev);
204
205#endif /* PVR_CONTEXT_H */
206