1/* $NetBSD: intr.c,v 1.12 2003/07/15 00:24:41 lukem Exp $ */ 2 3/*- 4 * Copyright (c) 2004 Olivier Houchard. 5 * Copyright (c) 1994-1998 Mark Brinicombe. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Mark Brinicombe 19 * for the NetBSD Project. 20 * 4. The name of the company nor the name of the author may be used to 21 * endorse or promote products derived from this software without specific 22 * prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 25 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 26 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 27 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 28 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 29 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 30 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * Soft interrupt and other generic interrupt functions. 37 */ 38 39#include <sys/cdefs.h> 40__FBSDID("$FreeBSD$"); 41#include <sys/param.h> 42#include <sys/systm.h> 43#include <sys/syslog.h> 44#include <sys/kernel.h> 45#include <sys/malloc.h> 46#include <sys/proc.h> 47#include <sys/bus.h> 48#include <sys/interrupt.h> 49#include <sys/conf.h> 50#include <machine/atomic.h> 51#include <machine/intr.h> 52#include <machine/cpu.h> 53 54#define INTRNAME_LEN (MAXCOMLEN + 1) 55 56typedef void (*mask_fn)(void *); 57 58static struct intr_event *intr_events[NIRQ]; 59 60void arm_irq_handler(struct trapframe *); 61 62void (*arm_post_filter)(void *) = NULL; 63int (*arm_config_irq)(int irq, enum intr_trigger trig, 64 enum intr_polarity pol) = NULL; 65 66/* Data for statistics reporting. */ 67u_long intrcnt[NIRQ]; 68char intrnames[NIRQ * INTRNAME_LEN]; 69size_t sintrcnt = sizeof(intrcnt); 70size_t sintrnames = sizeof(intrnames); 71 72/* 73 * Pre-format intrnames into an array of fixed-size strings containing spaces. 74 * This allows us to avoid the need for an intermediate table of indices into 75 * the names and counts arrays, while still meeting the requirements and 76 * assumptions of vmstat(8) and the kdb "show intrcnt" command, the two 77 * consumers of this data. 78 */ 79static void 80intr_init(void *unused) 81{ 82 int i; 83 84 for (i = 0; i < NIRQ; ++i) { 85 snprintf(&intrnames[i * INTRNAME_LEN], INTRNAME_LEN, "%-*s", 86 INTRNAME_LEN - 1, ""); 87 } 88} 89 90SYSINIT(intr_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_init, NULL); 91 92void 93arm_setup_irqhandler(const char *name, driver_filter_t *filt, 94 void (*hand)(void*), void *arg, int irq, int flags, void **cookiep) 95{ 96 struct intr_event *event; 97 int error; 98 99 if (irq < 0 || irq >= NIRQ) 100 return; 101 event = intr_events[irq]; 102 if (event == NULL) { 103 error = intr_event_create(&event, (void *)irq, 0, irq, 104 (mask_fn)arm_mask_irq, (mask_fn)arm_unmask_irq, 105 arm_post_filter, NULL, "intr%d:", irq); 106 if (error) 107 return; 108 intr_events[irq] = event; 109 snprintf(&intrnames[irq * INTRNAME_LEN], INTRNAME_LEN, 110 "irq%d: %-*s", irq, INTRNAME_LEN - 1, name); 111 } 112 intr_event_add_handler(event, name, filt, hand, arg, 113 intr_priority(flags), flags, cookiep); 114} 115 116int 117arm_remove_irqhandler(int irq, void *cookie) 118{ 119 struct intr_event *event; 120 int error; 121 122 event = intr_events[irq]; 123 arm_mask_irq(irq); 124 125 error = intr_event_remove_handler(cookie); 126 127 if (!TAILQ_EMPTY(&event->ie_handlers)) 128 arm_unmask_irq(irq); 129 return (error); 130} 131 132void dosoftints(void); 133void 134dosoftints(void) 135{ 136} 137 138void 139arm_irq_handler(struct trapframe *frame) 140{ 141 struct intr_event *event; 142 int i; 143 144 PCPU_INC(cnt.v_intr); 145 i = -1; 146 while ((i = arm_get_next_irq(i)) != -1) { 147 intrcnt[i]++; 148 event = intr_events[i]; 149 if (intr_event_handle(event, frame) != 0) { 150 /* XXX: Log stray IRQs */ 151 arm_mask_irq(i); 152 } 153 } 154} 155 156/* 157 * arm_irq_memory_barrier() 158 * 159 * Ensure all writes to device memory have reached devices before proceeding. 160 * 161 * This is intended to be called from the post-filter and post-thread routines 162 * of an interrupt controller implementation. A peripheral device driver should 163 * use bus_space_barrier() if it needs to ensure a write has reached the 164 * hardware for some reason other than clearing interrupt conditions. 165 * 166 * The need for this function arises from the ARM weak memory ordering model. 167 * Writes to locations mapped with the Device attribute bypass any caches, but 168 * are buffered. Multiple writes to the same device will be observed by that 169 * device in the order issued by the cpu. Writes to different devices may 170 * appear at those devices in a different order than issued by the cpu. That 171 * is, if the cpu writes to device A then device B, the write to device B could 172 * complete before the write to device A. 173 * 174 * Consider a typical device interrupt handler which services the interrupt and 175 * writes to a device status-acknowledge register to clear the interrupt before 176 * returning. That write is posted to the L2 controller which "immediately" 177 * places it in a store buffer and automatically drains that buffer. This can 178 * be less immediate than you'd think... There may be no free slots in the store 179 * buffers, so an existing buffer has to be drained first to make room. The 180 * target bus may be busy with other traffic (such as DMA for various devices), 181 * delaying the drain of the store buffer for some indeterminate time. While 182 * all this delay is happening, execution proceeds on the CPU, unwinding its way 183 * out of the interrupt call stack to the point where the interrupt driver code 184 * is ready to EOI and unmask the interrupt. The interrupt controller may be 185 * accessed via a faster bus than the hardware whose handler just ran; the write 186 * to unmask and EOI the interrupt may complete quickly while the device write 187 * to ack and clear the interrupt source is still lingering in a store buffer 188 * waiting for access to a slower bus. With the interrupt unmasked at the 189 * interrupt controller but still active at the device, as soon as interrupts 190 * are enabled on the core the device re-interrupts immediately: now you've got 191 * a spurious interrupt on your hands. 192 * 193 * The right way to fix this problem is for every device driver to use the 194 * proper bus_space_barrier() calls in its interrupt handler. For ARM a single 195 * barrier call at the end of the handler would work. This would have to be 196 * done to every driver in the system, not just arm-specific drivers. 197 * 198 * Another potential fix is to map all device memory as Strongly-Ordered rather 199 * than Device memory, which takes the store buffers out of the picture. This 200 * has a pretty big impact on overall system performance, because each strongly 201 * ordered memory access causes all L2 store buffers to be drained. 202 * 203 * A compromise solution is to have the interrupt controller implementation call 204 * this function to establish a barrier between writes to the interrupt-source 205 * device and writes to the interrupt controller device. 206 * 207 * This takes the interrupt number as an argument, and currently doesn't use it. 208 * The plan is that maybe some day there is a way to flag certain interrupts as 209 * "memory barrier safe" and we can avoid this overhead with them. 210 */ 211void 212arm_irq_memory_barrier(uintptr_t irq) 213{ 214 215 dsb(); 216 cpu_l2cache_drain_writebuf(); 217} 218 219