1/*	$OpenBSD: intr.c,v 1.51 2021/03/11 11:16:56 jsg Exp $	*/
2
3/*
4 * Copyright (c) 2002-2004 Michael Shalayeff
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26 * THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <sys/param.h>
30#include <sys/systm.h>
31#include <sys/kernel.h>
32#include <sys/device.h>
33#include <sys/evcount.h>
34#include <sys/malloc.h>
35#include <sys/atomic.h>
36
37#include <uvm/uvm_extern.h>
38
39#include <machine/autoconf.h>
40#include <machine/frame.h>
41#include <machine/reg.h>
42
43struct hppa_iv {
44	char pri;
45	char irq;
46	char flags;
47#define	HPPA_IV_CALL	0x01
48#define	HPPA_IV_SOFT	0x02
49	char pad;
50	int pad2;
51	int (*handler)(void *);
52	void *arg;
53	u_int bit;
54	struct hppa_iv *share;
55	struct hppa_iv *next;
56	struct evcount *cnt;
57} __packed;
58
59struct hppa_iv intr_store[8*2*CPU_NINTS] __attribute__ ((aligned(32))),
60    *intr_more = intr_store, *intr_list;
61struct hppa_iv intr_table[CPU_NINTS] __attribute__ ((aligned(32))) = {
62	{ IPL_SOFTCLOCK, 0, HPPA_IV_SOFT, 0, 0, NULL },
63	{ IPL_SOFTNET  , 0, HPPA_IV_SOFT, 0, 0, NULL },
64	{ 0 },
65	{ 0 },
66	{ IPL_SOFTTTY  , 0, HPPA_IV_SOFT, 0, 0, NULL }
67};
68volatile u_long imask[NIPL] = {
69	0,
70	1 << (IPL_SOFTCLOCK - 1),
71	1 << (IPL_SOFTNET - 1),
72	0,
73	0,
74	1 << (IPL_SOFTTTY - 1)
75};
76
77#ifdef DIAGNOSTIC
78void
79splassert_check(int wantipl, const char *func)
80{
81	struct cpu_info *ci = curcpu();
82
83	if (ci->ci_cpl < wantipl)
84		splassert_fail(wantipl, ci->ci_cpl, func);
85}
86#endif
87
88void
89cpu_intr_init(void)
90{
91	struct cpu_info *ci = curcpu();
92	struct hppa_iv *iv;
93	int level, bit;
94	u_long mask;
95
96	mask = ci->ci_mask | SOFTINT_MASK;
97
98	/* map the shared ints */
99	while (intr_list) {
100		iv = intr_list;
101		intr_list = iv->next;
102		bit = ffs(imask[(int)iv->pri]);
103		if (!bit--) {
104			bit = ffs(~mask);
105			if (!bit--)
106				panic("cpu_intr_init: out of bits");
107
108			iv->next = NULL;
109			iv->bit = 1 << bit;
110			intr_table[bit] = *iv;
111			mask |= (1 << bit);
112			imask[(int)iv->pri] |= (1 << bit);
113		} else {
114			iv->bit = 1 << bit;
115			iv->next = intr_table[bit].next;
116			intr_table[bit].next = iv;
117		}
118	}
119
120	for (level = 0; level < NIPL - 1; level++)
121		imask[level + 1] |= imask[level];
122
123	/* XXX the whacky trick is to prevent hardclock from happening */
124	mfctl(CR_ITMR, mask);
125	mtctl(mask - 1, CR_ITMR);
126
127	mtctl(ci->ci_mask, CR_EIEM);
128	/* ack the unwanted interrupts */
129	mfctl(CR_EIRR, mask);
130	mtctl(mask & (1U << 31), CR_EIRR);
131
132	/* in spl*() we trust, clock is started in initclocks() */
133	ci->ci_psw |= PSL_I;
134	ssm(PSL_I, mask);
135}
136
137/*
138 * Find an available, non-shared interrupt bit.
139 * Returns -1 if all interrupt bits are in use.
140 */
141int
142cpu_intr_findirq(void)
143{
144	int irq;
145
146	for (irq = 0; irq < CPU_NINTS; irq++)
147		if (intr_table[irq].handler == NULL &&
148		    intr_table[irq].pri == 0)
149			return irq;
150
151	return -1;
152}
153
154void *
155cpu_intr_map(void *v, int pri, int irq, int (*handler)(void *), void *arg,
156    const char *name)
157{
158	struct hppa_iv *iv, *pv = v, *ivb = pv->next;
159	struct evcount *cnt;
160
161	if (irq < 0 || irq >= CPU_NINTS)
162		return (NULL);
163
164	cnt = (struct evcount *)malloc(sizeof *cnt, M_DEVBUF, M_NOWAIT);
165	if (!cnt)
166		return (NULL);
167
168	iv = &ivb[irq];
169	if (iv->handler) {
170		if (!pv->share) {
171			free(cnt, M_DEVBUF, sizeof *cnt);
172			return (NULL);
173		} else {
174			iv = pv->share;
175			pv->share = iv->share;
176			iv->share = ivb[irq].share;
177			ivb[irq].share = iv;
178		}
179	}
180
181	evcount_attach(cnt, name, NULL);
182	iv->pri = pri;
183	iv->irq = irq;
184	iv->flags = 0;
185	iv->handler = handler;
186	iv->arg = arg;
187	iv->cnt = cnt;
188	iv->next = intr_list;
189	intr_list = iv;
190
191	return (iv);
192}
193
194void *
195cpu_intr_establish(int pri, int irq, int (*handler)(void *), void *arg,
196    const char *name)
197{
198	struct cpu_info *ci = curcpu();
199	struct hppa_iv *iv, *ev;
200	struct evcount *cnt;
201
202	if (irq < 0 || irq >= CPU_NINTS || intr_table[irq].handler)
203		return (NULL);
204
205	if ((intr_table[irq].flags & HPPA_IV_SOFT) != 0)
206		return (NULL);
207
208	cnt = (struct evcount *)malloc(sizeof *cnt, M_DEVBUF, M_NOWAIT);
209	if (!cnt)
210		return (NULL);
211
212	ci->ci_mask |= (1 << irq);
213	imask[pri] |= (1 << irq);
214
215	iv = &intr_table[irq];
216	iv->pri = pri;
217	iv->irq = irq;
218	iv->bit = 1 << irq;
219	iv->flags = 0;
220	iv->handler = handler;
221	iv->arg = arg;
222	iv->cnt = cnt;
223	iv->next = NULL;
224	iv->share = NULL;
225
226	if (pri == IPL_NESTED) {
227		iv->flags = HPPA_IV_CALL;
228		iv->next = intr_more;
229		intr_more += 2 * CPU_NINTS;
230		for (ev = iv->next + CPU_NINTS; ev < intr_more; ev++)
231			ev->share = iv->share, iv->share = ev;
232		free(cnt, M_DEVBUF, sizeof *cnt);
233		iv->cnt = NULL;
234	} else if (name == NULL) {
235		free(cnt, M_DEVBUF, sizeof *cnt);
236		iv->cnt = NULL;
237	} else
238		evcount_attach(cnt, name, NULL);
239
240	return (iv);
241}
242
243void
244cpu_intr(void *v)
245{
246	struct cpu_info *ci = curcpu();
247	struct trapframe *frame = v;
248	struct hppa_iv *iv;
249	int pri, r, s, bit;
250	u_long mask;
251	void *arg;
252
253	mtctl(0, CR_EIEM);
254
255	s = ci->ci_cpl;
256	if (ci->ci_in_intr++)
257		frame->tf_flags |= TFF_INTR;
258
259	/* Process higher priority interrupts first. */
260	for (pri = NIPL - 1; pri > s; pri--) {
261
262		mask = imask[pri] ^ imask[pri - 1];
263
264		while (ci->ci_ipending & mask) {
265			bit = fls(ci->ci_ipending & mask) - 1;
266			iv = &intr_table[bit];
267
268			ci->ci_ipending &= ~(1L << bit);
269
270			if (iv->flags & HPPA_IV_CALL)
271				continue;
272
273			uvmexp.intrs++;
274			if (iv->flags & HPPA_IV_SOFT)
275				uvmexp.softs++;
276
277			ci->ci_cpl = iv->pri;
278			mtctl(frame->tf_eiem, CR_EIEM);
279
280#ifdef MULTIPROCESSOR
281			if (pri < IPL_CLOCK)
282				__mp_lock(&kernel_lock);
283#endif
284
285			for (r = iv->flags & HPPA_IV_SOFT;
286			     iv && iv->handler; iv = iv->next) {
287				/* no arg means pass the frame */
288				arg = iv->arg ? iv->arg : v;
289				if ((iv->handler)(arg) == 1) {
290					if (iv->cnt)
291						iv->cnt->ec_count++;
292					r |= 1;
293				}
294			}
295#if 0	/* XXX this does not work, lasi gives us double ints */
296			if (!r) {
297				ci->ci_cpl = 0;
298				printf("stray interrupt %d\n", bit);
299			}
300#endif
301
302#ifdef MULTIPROCESSOR
303			if (pri < IPL_CLOCK)
304				__mp_unlock(&kernel_lock);
305#endif
306			mtctl(0, CR_EIEM);
307		}
308	}
309	ci->ci_in_intr--;
310	ci->ci_cpl = s;
311
312	mtctl(frame->tf_eiem, CR_EIEM);
313}
314
315void
316intr_barrier(void *cookie)
317{
318	sched_barrier(NULL);
319}
320
321void *
322softintr_establish(int pri, void (*handler)(void *), void *arg)
323{
324	struct hppa_iv *iv;
325	int irq;
326
327	if (pri == IPL_TTY)
328		pri = IPL_SOFTTTY;
329
330	irq = pri - 1;
331	iv = &intr_table[irq];
332	if ((iv->flags & HPPA_IV_SOFT) == 0 || iv->pri != pri)
333		return (NULL);
334
335	if (iv->handler) {
336		struct hppa_iv *nv;
337
338		nv = malloc(sizeof *iv, M_DEVBUF, M_NOWAIT);
339		if (!nv)
340			return (NULL);
341		while (iv->next)
342			iv = iv->next;
343		iv->next = nv;
344		iv = nv;
345	} else
346		imask[pri] |= (1 << irq);
347
348	iv->pri = pri;
349	iv->irq = 0;
350	iv->bit = 1 << irq;
351	iv->flags = HPPA_IV_SOFT;
352	iv->handler = (int (*)(void *))handler;	/* XXX */
353	iv->arg = arg;
354	iv->cnt = NULL;
355	iv->next = NULL;
356	iv->share = NULL;
357
358	return (iv);
359}
360
361void
362softintr_disestablish(void *cookie)
363{
364	struct hppa_iv *iv = cookie;
365	int irq = iv->pri - 1;
366
367	if (&intr_table[irq] == cookie) {
368		if (iv->next) {
369			struct hppa_iv *nv = iv->next;
370
371			iv->handler = nv->handler;
372			iv->arg = nv->arg;
373			iv->next = nv->next;
374			free(nv, M_DEVBUF, sizeof *nv);
375			return;
376		} else {
377			iv->handler = NULL;
378			iv->arg = NULL;
379			return;
380		}
381	}
382
383	for (iv = &intr_table[irq]; iv; iv = iv->next) {
384		if (iv->next == cookie) {
385			iv->next = iv->next->next;
386			free(cookie, M_DEVBUF, 0);
387			return;
388		}
389	}
390}
391
392void
393softintr_schedule(void *cookie)
394{
395	struct hppa_iv *iv = cookie;
396
397	atomic_setbits_long(&curcpu()->ci_ipending, 1 << (iv->pri - 1));
398}
399