10Sduke/*-
20Sduke * SPDX-License-Identifier: MIT OR GPL-2.0-only
30Sduke *
42362Sohair * Copyright �� 2015 Julien Grall
50Sduke * Copyright �� 2013 Spectra Logic Corporation
60Sduke * Copyright �� 2018 John Baldwin/The FreeBSD Foundation
70Sduke * Copyright �� 2019 Roger Pau Monn��/Citrix Systems R&D
80Sduke * Copyright �� 2021 Elliott Mitchell
90Sduke *
100Sduke * This file may be distributed separately from the Linux kernel, or
110Sduke * incorporated into other software packages, subject to the following license:
120Sduke *
130Sduke * Permission is hereby granted, free of charge, to any person obtaining a copy
140Sduke * of this source file (the "Software"), to deal in the Software without
150Sduke * restriction, including without limitation the rights to use, copy, modify,
160Sduke * merge, publish, distribute, sublicense, and/or sell copies of the Software,
170Sduke * and to permit persons to whom the Software is furnished to do so, subject to
180Sduke * the following conditions:
190Sduke *
200Sduke * The above copyright notice and this permission notice shall be included in
212362Sohair * all copies or substantial portions of the Software.
222362Sohair *
232362Sohair * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
240Sduke * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
250Sduke * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
260Sduke * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
270Sduke * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
280Sduke * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
290Sduke * IN THE SOFTWARE.
300Sduke */
310Sduke
320Sduke#include <sys/param.h>
330Sduke#include <sys/systm.h>
340Sduke#include <sys/bus.h>
350Sduke#include <sys/malloc.h>
360Sduke#include <sys/kernel.h>
370Sduke#include <sys/limits.h>
380Sduke#include <sys/lock.h>
390Sduke#include <sys/mutex.h>
400Sduke#include <sys/interrupt.h>
410Sduke#include <sys/pcpu.h>
420Sduke#include <sys/proc.h>
430Sduke#include <sys/smp.h>
440Sduke#include <sys/stddef.h>
450Sduke
460Sduke#include <xen/xen-os.h>
470Sduke#include <xen/xen_intr.h>
480Sduke#include <machine/xen/arch-intr.h>
490Sduke
500Sduke#include <x86/apicvar.h>
510Sduke
520Sduke/************************ Xen x86 interrupt interface ************************/
530Sduke
540Sduke/*
550Sduke * Pointers to the interrupt counters
560Sduke */
57DPCPU_DEFINE_STATIC(u_long *, pintrcnt);
58
59static void
60xen_intrcnt_init(void *dummy __unused)
61{
62	unsigned int i;
63
64	if (!xen_domain())
65		return;
66
67	CPU_FOREACH(i) {
68		char buf[MAXCOMLEN + 1];
69
70		snprintf(buf, sizeof(buf), "cpu%d:xen", i);
71		intrcnt_add(buf, DPCPU_ID_PTR(i, pintrcnt));
72	}
73}
74SYSINIT(xen_intrcnt_init, SI_SUB_INTR, SI_ORDER_MIDDLE, xen_intrcnt_init, NULL);
75
76/*
77 * Transition from assembly language, called from
78 * sys/{amd64/amd64|i386/i386}/apic_vector.S
79 */
80extern void xen_arch_intr_handle_upcall(struct trapframe *);
81void
82xen_arch_intr_handle_upcall(struct trapframe *trap_frame)
83{
84	struct trapframe *old;
85
86	/*
87	 * Disable preemption in order to always check and fire events
88	 * on the right vCPU
89	 */
90	critical_enter();
91
92	++*DPCPU_GET(pintrcnt);
93
94	++curthread->td_intr_nesting_level;
95	old = curthread->td_intr_frame;
96	curthread->td_intr_frame = trap_frame;
97
98	xen_intr_handle_upcall(NULL);
99
100	curthread->td_intr_frame = old;
101	--curthread->td_intr_nesting_level;
102
103	if (xen_evtchn_needs_ack)
104		lapic_eoi();
105
106	critical_exit();
107}
108
109/******************************** EVTCHN PIC *********************************/
110
111static MALLOC_DEFINE(M_XENINTR, "xen_intr", "Xen Interrupt Services");
112
113/*
114 * Lock for x86-related structures.  Notably modifying
115 * xen_intr_auto_vector_count, and allocating interrupts require this lock be
116 * held.
117 */
118static struct mtx	xen_intr_x86_lock;
119
120static u_int		first_evtchn_irq;
121
122static u_int		xen_intr_auto_vector_count;
123
124/*
125 * list of released isrcs
126 * This is meant to overlay struct xenisrc, with only the xen_arch_isrc_t
127 * portion being preserved, everything else can be wiped.
128 */
129struct avail_list {
130	xen_arch_isrc_t preserve;
131	SLIST_ENTRY(avail_list) free;
132};
133static SLIST_HEAD(free, avail_list) avail_list =
134    SLIST_HEAD_INITIALIZER(avail_list);
135
136void
137xen_intr_alloc_irqs(void)
138{
139
140	if (num_io_irqs > UINT_MAX - NR_EVENT_CHANNELS)
141		panic("IRQ allocation overflow (num_msi_irqs too high?)");
142	first_evtchn_irq = num_io_irqs;
143	num_io_irqs += NR_EVENT_CHANNELS;
144}
145
146static void
147xen_intr_pic_enable_source(struct intsrc *isrc)
148{
149
150	_Static_assert(offsetof(struct xenisrc, xi_arch.intsrc) == 0,
151	    "xi_arch MUST be at top of xenisrc for x86");
152	xen_intr_enable_source((struct xenisrc *)isrc);
153}
154
155/*
156 * Perform any necessary end-of-interrupt acknowledgements.
157 *
158 * \param isrc  The interrupt source to EOI.
159 */
160static void
161xen_intr_pic_disable_source(struct intsrc *isrc, int eoi)
162{
163
164	_Static_assert(offsetof(struct xenisrc, xi_arch.intsrc) == 0,
165	    "xi_arch MUST be at top of xenisrc for x86");
166	xen_intr_disable_source((struct xenisrc *)isrc);
167}
168
169static void
170xen_intr_pic_eoi_source(struct intsrc *isrc)
171{
172
173	/* Nothing to do on end-of-interrupt */
174}
175
176static void
177xen_intr_pic_enable_intr(struct intsrc *isrc)
178{
179
180	_Static_assert(offsetof(struct xenisrc, xi_arch.intsrc) == 0,
181	    "xi_arch MUST be at top of xenisrc for x86");
182	xen_intr_enable_intr((struct xenisrc *)isrc);
183}
184
185static void
186xen_intr_pic_disable_intr(struct intsrc *isrc)
187{
188
189	_Static_assert(offsetof(struct xenisrc, xi_arch.intsrc) == 0,
190	    "xi_arch MUST be at top of xenisrc for x86");
191	xen_intr_disable_intr((struct xenisrc *)isrc);
192}
193
194/**
195 * Determine the global interrupt vector number for
196 * a Xen interrupt source.
197 *
198 * \param isrc  The interrupt source to query.
199 *
200 * \return  The vector number corresponding to the given interrupt source.
201 */
202static int
203xen_intr_pic_vector(struct intsrc *isrc)
204{
205
206	_Static_assert(offsetof(struct xenisrc, xi_arch.intsrc) == 0,
207	    "xi_arch MUST be at top of xenisrc for x86");
208
209	return (((struct xenisrc *)isrc)->xi_arch.vector);
210}
211
212/**
213 * Determine whether or not interrupt events are pending on the
214 * the given interrupt source.
215 *
216 * \param isrc  The interrupt source to query.
217 *
218 * \returns  0 if no events are pending, otherwise non-zero.
219 */
220static int
221xen_intr_pic_source_pending(struct intsrc *isrc)
222{
223	/*
224	 * EventChannels are edge triggered and never masked.
225	 * There can be no pending events.
226	 */
227	return (0);
228}
229
230/**
231 * Prepare this PIC for system suspension.
232 */
233static void
234xen_intr_pic_suspend(struct pic *pic)
235{
236
237	/* Nothing to do on suspend */
238}
239
240static void
241xen_intr_pic_resume(struct pic *pic, bool suspend_cancelled)
242{
243
244	if (!suspend_cancelled)
245		xen_intr_resume();
246}
247
248/**
249 * Perform configuration of an interrupt source.
250 *
251 * \param isrc  The interrupt source to configure.
252 * \param trig  Edge or level.
253 * \param pol   Active high or low.
254 *
255 * \returns  0 if no events are pending, otherwise non-zero.
256 */
257static int
258xen_intr_pic_config_intr(struct intsrc *isrc, enum intr_trigger trig,
259    enum intr_polarity pol)
260{
261	/* Configuration is only possible via the evtchn apis. */
262	return (ENODEV);
263}
264
265
266static int
267xen_intr_pic_assign_cpu(struct intsrc *isrc, u_int apic_id)
268{
269
270	_Static_assert(offsetof(struct xenisrc, xi_arch.intsrc) == 0,
271	    "xi_arch MUST be at top of xenisrc for x86");
272	return (xen_intr_assign_cpu((struct xenisrc *)isrc,
273	    apic_cpuid(apic_id)));
274}
275
276/**
277 * PIC interface for all event channel port types except physical IRQs.
278 */
279static struct pic xen_intr_pic = {
280	.pic_enable_source  = xen_intr_pic_enable_source,
281	.pic_disable_source = xen_intr_pic_disable_source,
282	.pic_eoi_source     = xen_intr_pic_eoi_source,
283	.pic_enable_intr    = xen_intr_pic_enable_intr,
284	.pic_disable_intr   = xen_intr_pic_disable_intr,
285	.pic_vector         = xen_intr_pic_vector,
286	.pic_source_pending = xen_intr_pic_source_pending,
287	.pic_suspend        = xen_intr_pic_suspend,
288	.pic_resume         = xen_intr_pic_resume,
289	.pic_config_intr    = xen_intr_pic_config_intr,
290	.pic_assign_cpu     = xen_intr_pic_assign_cpu,
291};
292
293/******************************* ARCH wrappers *******************************/
294
295void
296xen_arch_intr_init(void)
297{
298	int error;
299
300	mtx_init(&xen_intr_x86_lock, "xen-x86-table-lock", NULL, MTX_DEF);
301
302	error = intr_register_pic(&xen_intr_pic);
303	if (error != 0)
304		panic("%s(): failed registering Xen/x86 PIC, error=%d\n",
305		    __func__, error);
306}
307
308/**
309 * Allocate a Xen interrupt source object.
310 *
311 * \param type  The type of interrupt source to create.
312 *
313 * \return  A pointer to a newly allocated Xen interrupt source
314 *          object or NULL.
315 */
316struct xenisrc *
317xen_arch_intr_alloc(void)
318{
319	static int warned;
320	struct xenisrc *isrc;
321	unsigned int vector;
322	int error;
323
324	mtx_lock(&xen_intr_x86_lock);
325	isrc = (struct xenisrc *)SLIST_FIRST(&avail_list);
326	if (isrc != NULL) {
327		SLIST_REMOVE_HEAD(&avail_list, free);
328		mtx_unlock(&xen_intr_x86_lock);
329
330		KASSERT(isrc->xi_arch.intsrc.is_pic == &xen_intr_pic,
331		    ("interrupt not owned by Xen code?"));
332
333		KASSERT(isrc->xi_arch.intsrc.is_handlers == 0,
334		    ("Free evtchn still has handlers"));
335
336		return (isrc);
337	}
338
339	if (xen_intr_auto_vector_count >= NR_EVENT_CHANNELS) {
340		if (!warned) {
341			warned = 1;
342			printf("%s: Xen interrupts exhausted.\n", __func__);
343		}
344		mtx_unlock(&xen_intr_x86_lock);
345		return (NULL);
346	}
347
348	vector = first_evtchn_irq + xen_intr_auto_vector_count;
349	xen_intr_auto_vector_count++;
350
351	KASSERT((intr_lookup_source(vector) == NULL),
352	    ("Trying to use an already allocated vector"));
353
354	mtx_unlock(&xen_intr_x86_lock);
355	isrc = malloc(sizeof(*isrc), M_XENINTR, M_WAITOK | M_ZERO);
356	isrc->xi_arch.intsrc.is_pic = &xen_intr_pic;
357	isrc->xi_arch.vector = vector;
358	error = intr_register_source(&isrc->xi_arch.intsrc);
359	if (error != 0)
360		panic("%s(): failed registering interrupt %u, error=%d\n",
361		    __func__, vector, error);
362
363	return (isrc);
364}
365
366void
367xen_arch_intr_release(struct xenisrc *isrc)
368{
369
370	KASSERT(isrc->xi_arch.intsrc.is_handlers == 0,
371	    ("Release called, but xenisrc still in use"));
372
373	_Static_assert(sizeof(struct xenisrc) >= sizeof(struct avail_list),
374	    "unused structure MUST be no larger than in-use structure");
375	_Static_assert(offsetof(struct xenisrc, xi_arch) ==
376	    offsetof(struct avail_list, preserve),
377	    "unused structure does not properly overlay in-use structure");
378
379	mtx_lock(&xen_intr_x86_lock);
380	SLIST_INSERT_HEAD(&avail_list, (struct avail_list *)isrc, free);
381	mtx_unlock(&xen_intr_x86_lock);
382}
383