1/*
2 *  boot.c - Architecture-Specific Low-Level ACPI Boot Support
3 *
4 *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
5 *  Copyright (C) 2001 Jun Nakajima <jun.nakajima@intel.com>
6 *
7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 *
9 *  This program is free software; you can redistribute it and/or modify
10 *  it under the terms of the GNU General Public License as published by
11 *  the Free Software Foundation; either version 2 of the License, or
12 *  (at your option) any later version.
13 *
14 *  This program is distributed in the hope that it will be useful,
15 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
16 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17 *  GNU General Public License for more details.
18 *
19 *  You should have received a copy of the GNU General Public License
20 *  along with this program; if not, write to the Free Software
21 *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
22 *
23 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
24 */
25
26#include <linux/init.h>
27#include <linux/acpi.h>
28#include <linux/acpi_pmtmr.h>
29#include <linux/efi.h>
30#include <linux/cpumask.h>
31#include <linux/module.h>
32#include <linux/dmi.h>
33#include <linux/irq.h>
34#include <linux/bootmem.h>
35#include <linux/ioport.h>
36
37#include <asm/pgtable.h>
38#include <asm/io_apic.h>
39#include <asm/apic.h>
40#include <asm/io.h>
41#include <asm/mpspec.h>
42
43static int __initdata acpi_force = 0;
44
45#ifdef	CONFIG_ACPI
46int acpi_disabled = 0;
47#else
48int acpi_disabled = 1;
49#endif
50EXPORT_SYMBOL(acpi_disabled);
51
52#ifdef	CONFIG_X86_64
53
54#include <asm/proto.h>
55
56static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return 0; }
57
58
59#else				/* X86 */
60
61#ifdef	CONFIG_X86_LOCAL_APIC
62#include <mach_apic.h>
63#include <mach_mpparse.h>
64#endif				/* CONFIG_X86_LOCAL_APIC */
65
66#endif				/* X86 */
67
68#define BAD_MADT_ENTRY(entry, end) (					    \
69		(!entry) || (unsigned long)entry + sizeof(*entry) > end ||  \
70		((struct acpi_subtable_header *)entry)->length < sizeof(*entry))
71
72#define PREFIX			"ACPI: "
73
74int acpi_noirq;				/* skip ACPI IRQ initialization */
75int acpi_pci_disabled __initdata;	/* skip ACPI PCI scan and IRQ initialization */
76int acpi_ht __initdata = 1;	/* enable HT */
77
78int acpi_lapic;
79int acpi_ioapic;
80int acpi_strict;
81EXPORT_SYMBOL(acpi_strict);
82
83u8 acpi_sci_flags __initdata;
84int acpi_sci_override_gsi __initdata;
85int acpi_skip_timer_override __initdata;
86int acpi_use_timer_override __initdata;
87
88#ifdef CONFIG_X86_LOCAL_APIC
89static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
90#endif
91
92#ifndef __HAVE_ARCH_CMPXCHG
93#warning ACPI uses CMPXCHG, i486 and later hardware
94#endif
95
96/* --------------------------------------------------------------------------
97                              Boot-time Configuration
98   -------------------------------------------------------------------------- */
99
100/*
101 * The default interrupt routing model is PIC (8259).  This gets
102 * overriden if IOAPICs are enumerated (below).
103 */
104enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC;
105
106#ifdef	CONFIG_X86_64
107
108/* rely on all ACPI tables being in the direct mapping */
109char *__acpi_map_table(unsigned long phys_addr, unsigned long size)
110{
111	if (!phys_addr || !size)
112		return NULL;
113
114	if (phys_addr+size <= (end_pfn_map << PAGE_SHIFT) + PAGE_SIZE)
115		return __va(phys_addr);
116
117	return NULL;
118}
119
120#else
121
122/*
123 * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END,
124 * to map the target physical address. The problem is that set_fixmap()
125 * provides a single page, and it is possible that the page is not
126 * sufficient.
127 * By using this area, we can map up to MAX_IO_APICS pages temporarily,
128 * i.e. until the next __va_range() call.
129 *
130 * Important Safety Note:  The fixed I/O APIC page numbers are *subtracted*
131 * from the fixed base.  That's why we start at FIX_IO_APIC_BASE_END and
132 * count idx down while incrementing the phys address.
133 */
134char *__acpi_map_table(unsigned long phys, unsigned long size)
135{
136	unsigned long base, offset, mapped_size;
137	int idx;
138
139	if (phys + size < 8 * 1024 * 1024)
140		return __va(phys);
141
142	offset = phys & (PAGE_SIZE - 1);
143	mapped_size = PAGE_SIZE - offset;
144	set_fixmap(FIX_ACPI_END, phys);
145	base = fix_to_virt(FIX_ACPI_END);
146
147	/*
148	 * Most cases can be covered by the below.
149	 */
150	idx = FIX_ACPI_END;
151	while (mapped_size < size) {
152		if (--idx < FIX_ACPI_BEGIN)
153			return NULL;	/* cannot handle this */
154		phys += PAGE_SIZE;
155		set_fixmap(idx, phys);
156		mapped_size += PAGE_SIZE;
157	}
158
159	return ((unsigned char *)base + offset);
160}
161#endif
162
163#ifdef CONFIG_PCI_MMCONFIG
164/* The physical address of the MMCONFIG aperture.  Set from ACPI tables. */
165struct acpi_mcfg_allocation *pci_mmcfg_config;
166int pci_mmcfg_config_num;
167
168int __init acpi_parse_mcfg(struct acpi_table_header *header)
169{
170	struct acpi_table_mcfg *mcfg;
171	unsigned long i;
172	int config_size;
173
174	if (!header)
175		return -EINVAL;
176
177	mcfg = (struct acpi_table_mcfg *)header;
178
179	/* how many config structures do we have */
180	pci_mmcfg_config_num = 0;
181	i = header->length - sizeof(struct acpi_table_mcfg);
182	while (i >= sizeof(struct acpi_mcfg_allocation)) {
183		++pci_mmcfg_config_num;
184		i -= sizeof(struct acpi_mcfg_allocation);
185	};
186	if (pci_mmcfg_config_num == 0) {
187		printk(KERN_ERR PREFIX "MMCONFIG has no entries\n");
188		return -ENODEV;
189	}
190
191	config_size = pci_mmcfg_config_num * sizeof(*pci_mmcfg_config);
192	pci_mmcfg_config = kmalloc(config_size, GFP_KERNEL);
193	if (!pci_mmcfg_config) {
194		printk(KERN_WARNING PREFIX
195		       "No memory for MCFG config tables\n");
196		return -ENOMEM;
197	}
198
199	memcpy(pci_mmcfg_config, &mcfg[1], config_size);
200	for (i = 0; i < pci_mmcfg_config_num; ++i) {
201		if (pci_mmcfg_config[i].address > 0xFFFFFFFF) {
202			printk(KERN_ERR PREFIX
203			       "MMCONFIG not in low 4GB of memory\n");
204			kfree(pci_mmcfg_config);
205			pci_mmcfg_config_num = 0;
206			return -ENODEV;
207		}
208	}
209
210	return 0;
211}
212#endif				/* CONFIG_PCI_MMCONFIG */
213
214#ifdef CONFIG_X86_LOCAL_APIC
215static int __init acpi_parse_madt(struct acpi_table_header *table)
216{
217	struct acpi_table_madt *madt = NULL;
218
219	if (!cpu_has_apic)
220		return -EINVAL;
221
222	madt = (struct acpi_table_madt *)table;
223	if (!madt) {
224		printk(KERN_WARNING PREFIX "Unable to map MADT\n");
225		return -ENODEV;
226	}
227
228	if (madt->address) {
229		acpi_lapic_addr = (u64) madt->address;
230
231		printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n",
232		       madt->address);
233	}
234
235	acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id);
236
237	return 0;
238}
239
240static int __init
241acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end)
242{
243	struct acpi_madt_local_apic *processor = NULL;
244
245	processor = (struct acpi_madt_local_apic *)header;
246
247	if (BAD_MADT_ENTRY(processor, end))
248		return -EINVAL;
249
250	acpi_table_print_madt_entry(header);
251
252	/*
253	 * We need to register disabled CPU as well to permit
254	 * counting disabled CPUs. This allows us to size
255	 * cpus_possible_map more accurately, to permit
256	 * to not preallocating memory for all NR_CPUS
257	 * when we use CPU hotplug.
258	 */
259	mp_register_lapic(processor->id,	/* APIC ID */
260			  processor->lapic_flags & ACPI_MADT_ENABLED);	/* Enabled? */
261
262	return 0;
263}
264
265static int __init
266acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header,
267			  const unsigned long end)
268{
269	struct acpi_madt_local_apic_override *lapic_addr_ovr = NULL;
270
271	lapic_addr_ovr = (struct acpi_madt_local_apic_override *)header;
272
273	if (BAD_MADT_ENTRY(lapic_addr_ovr, end))
274		return -EINVAL;
275
276	acpi_lapic_addr = lapic_addr_ovr->address;
277
278	return 0;
279}
280
281static int __init
282acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long end)
283{
284	struct acpi_madt_local_apic_nmi *lapic_nmi = NULL;
285
286	lapic_nmi = (struct acpi_madt_local_apic_nmi *)header;
287
288	if (BAD_MADT_ENTRY(lapic_nmi, end))
289		return -EINVAL;
290
291	acpi_table_print_madt_entry(header);
292
293	if (lapic_nmi->lint != 1)
294		printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
295
296	return 0;
297}
298
299#endif				/*CONFIG_X86_LOCAL_APIC */
300
301#ifdef CONFIG_X86_IO_APIC
302
303static int __init
304acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end)
305{
306	struct acpi_madt_io_apic *ioapic = NULL;
307
308	ioapic = (struct acpi_madt_io_apic *)header;
309
310	if (BAD_MADT_ENTRY(ioapic, end))
311		return -EINVAL;
312
313	acpi_table_print_madt_entry(header);
314
315	mp_register_ioapic(ioapic->id,
316			   ioapic->address, ioapic->global_irq_base);
317
318	return 0;
319}
320
321/*
322 * Parse Interrupt Source Override for the ACPI SCI
323 */
324static void __init acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
325{
326	if (trigger == 0)	/* compatible SCI trigger is level */
327		trigger = 3;
328
329	if (polarity == 0)	/* compatible SCI polarity is low */
330		polarity = 3;
331
332	/* Command-line over-ride via acpi_sci= */
333	if (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK)
334		trigger = (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK) >> 2;
335
336	if (acpi_sci_flags & ACPI_MADT_POLARITY_MASK)
337		polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK;
338
339	/*
340	 * mp_config_acpi_legacy_irqs() already setup IRQs < 16
341	 * If GSI is < 16, this will update its flags,
342	 * else it will create a new mp_irqs[] entry.
343	 */
344	mp_override_legacy_irq(gsi, polarity, trigger, gsi);
345
346	/*
347	 * stash over-ride to indicate we've been here
348	 * and for later update of acpi_gbl_FADT
349	 */
350	acpi_sci_override_gsi = gsi;
351	return;
352}
353
354static int __init
355acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
356		       const unsigned long end)
357{
358	struct acpi_madt_interrupt_override *intsrc = NULL;
359
360	intsrc = (struct acpi_madt_interrupt_override *)header;
361
362	if (BAD_MADT_ENTRY(intsrc, end))
363		return -EINVAL;
364
365	acpi_table_print_madt_entry(header);
366
367	if (intsrc->source_irq == acpi_gbl_FADT.sci_interrupt) {
368		acpi_sci_ioapic_setup(intsrc->global_irq,
369				      intsrc->inti_flags & ACPI_MADT_POLARITY_MASK,
370				      (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2);
371		return 0;
372	}
373
374	if (acpi_skip_timer_override &&
375	    intsrc->source_irq == 0 && intsrc->global_irq == 2) {
376		printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
377		return 0;
378	}
379
380	mp_override_legacy_irq(intsrc->source_irq,
381				intsrc->inti_flags & ACPI_MADT_POLARITY_MASK,
382				(intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2,
383				intsrc->global_irq);
384
385	return 0;
386}
387
388static int __init
389acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end)
390{
391	struct acpi_madt_nmi_source *nmi_src = NULL;
392
393	nmi_src = (struct acpi_madt_nmi_source *)header;
394
395	if (BAD_MADT_ENTRY(nmi_src, end))
396		return -EINVAL;
397
398	acpi_table_print_madt_entry(header);
399
400	/* TBD: Support nimsrc entries? */
401
402	return 0;
403}
404
405#endif				/* CONFIG_X86_IO_APIC */
406
407/*
408 * acpi_pic_sci_set_trigger()
409 *
410 * use ELCR to set PIC-mode trigger type for SCI
411 *
412 * If a PIC-mode SCI is not recognized or gives spurious IRQ7's
413 * it may require Edge Trigger -- use "acpi_sci=edge"
414 *
415 * Port 0x4d0-4d1 are ECLR1 and ECLR2, the Edge/Level Control Registers
416 * for the 8259 PIC.  bit[n] = 1 means irq[n] is Level, otherwise Edge.
417 * ECLR1 is IRQ's 0-7 (IRQ 0, 1, 2 must be 0)
418 * ECLR2 is IRQ's 8-15 (IRQ 8, 13 must be 0)
419 */
420
421void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
422{
423	unsigned int mask = 1 << irq;
424	unsigned int old, new;
425
426	/* Real old ELCR mask */
427	old = inb(0x4d0) | (inb(0x4d1) << 8);
428
429	/*
430	 * If we use ACPI to set PCI irq's, then we should clear ELCR
431	 * since we will set it correctly as we enable the PCI irq
432	 * routing.
433	 */
434	new = acpi_noirq ? old : 0;
435
436	/*
437	 * Update SCI information in the ELCR, it isn't in the PCI
438	 * routing tables..
439	 */
440	switch (trigger) {
441	case 1:		/* Edge - clear */
442		new &= ~mask;
443		break;
444	case 3:		/* Level - set */
445		new |= mask;
446		break;
447	}
448
449	if (old == new)
450		return;
451
452	printk(PREFIX "setting ELCR to %04x (from %04x)\n", new, old);
453	outb(new, 0x4d0);
454	outb(new >> 8, 0x4d1);
455}
456
457int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
458{
459	*irq = gsi;
460	return 0;
461}
462
463/*
464 * success: return IRQ number (>=0)
465 * failure: return < 0
466 */
467int acpi_register_gsi(u32 gsi, int triggering, int polarity)
468{
469	unsigned int irq;
470	unsigned int plat_gsi = gsi;
471
472#ifdef CONFIG_PCI
473	/*
474	 * Make sure all (legacy) PCI IRQs are set as level-triggered.
475	 */
476	if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
477		extern void eisa_set_level_irq(unsigned int irq);
478
479		if (triggering == ACPI_LEVEL_SENSITIVE)
480			eisa_set_level_irq(gsi);
481	}
482#endif
483
484#ifdef CONFIG_X86_IO_APIC
485	if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) {
486		plat_gsi = mp_register_gsi(gsi, triggering, polarity);
487	}
488#endif
489	acpi_gsi_to_irq(plat_gsi, &irq);
490	return irq;
491}
492
493EXPORT_SYMBOL(acpi_register_gsi);
494
495/*
496 *  ACPI based hotplug support for CPU
497 */
498#ifdef CONFIG_ACPI_HOTPLUG_CPU
499int acpi_map_lsapic(acpi_handle handle, int *pcpu)
500{
501	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
502	union acpi_object *obj;
503	struct acpi_madt_local_apic *lapic;
504	cpumask_t tmp_map, new_map;
505	u8 physid;
506	int cpu;
507
508	if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
509		return -EINVAL;
510
511	if (!buffer.length || !buffer.pointer)
512		return -EINVAL;
513
514	obj = buffer.pointer;
515	if (obj->type != ACPI_TYPE_BUFFER ||
516	    obj->buffer.length < sizeof(*lapic)) {
517		kfree(buffer.pointer);
518		return -EINVAL;
519	}
520
521	lapic = (struct acpi_madt_local_apic *)obj->buffer.pointer;
522
523	if (lapic->header.type != ACPI_MADT_TYPE_LOCAL_APIC ||
524	    !(lapic->lapic_flags & ACPI_MADT_ENABLED)) {
525		kfree(buffer.pointer);
526		return -EINVAL;
527	}
528
529	physid = lapic->id;
530
531	kfree(buffer.pointer);
532	buffer.length = ACPI_ALLOCATE_BUFFER;
533	buffer.pointer = NULL;
534
535	tmp_map = cpu_present_map;
536	mp_register_lapic(physid, lapic->lapic_flags & ACPI_MADT_ENABLED);
537
538	/*
539	 * If mp_register_lapic successfully generates a new logical cpu
540	 * number, then the following will get us exactly what was mapped
541	 */
542	cpus_andnot(new_map, cpu_present_map, tmp_map);
543	if (cpus_empty(new_map)) {
544		printk ("Unable to map lapic to logical cpu number\n");
545		return -EINVAL;
546	}
547
548	cpu = first_cpu(new_map);
549
550	*pcpu = cpu;
551	return 0;
552}
553
554EXPORT_SYMBOL(acpi_map_lsapic);
555
556int acpi_unmap_lsapic(int cpu)
557{
558	x86_cpu_to_apicid[cpu] = -1;
559	cpu_clear(cpu, cpu_present_map);
560	num_processors--;
561
562	return (0);
563}
564
565EXPORT_SYMBOL(acpi_unmap_lsapic);
566#endif				/* CONFIG_ACPI_HOTPLUG_CPU */
567
568int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
569{
570	/* TBD */
571	return -EINVAL;
572}
573
574EXPORT_SYMBOL(acpi_register_ioapic);
575
576int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base)
577{
578	/* TBD */
579	return -EINVAL;
580}
581
582EXPORT_SYMBOL(acpi_unregister_ioapic);
583
584static unsigned long __init
585acpi_scan_rsdp(unsigned long start, unsigned long length)
586{
587	unsigned long offset = 0;
588	unsigned long sig_len = sizeof("RSD PTR ") - 1;
589
590	/*
591	 * Scan all 16-byte boundaries of the physical memory region for the
592	 * RSDP signature.
593	 */
594	for (offset = 0; offset < length; offset += 16) {
595		if (strncmp((char *)(phys_to_virt(start) + offset), "RSD PTR ", sig_len))
596			continue;
597		return (start + offset);
598	}
599
600	return 0;
601}
602
603static int __init acpi_parse_sbf(struct acpi_table_header *table)
604{
605	struct acpi_table_boot *sb;
606
607	sb = (struct acpi_table_boot *)table;
608	if (!sb) {
609		printk(KERN_WARNING PREFIX "Unable to map SBF\n");
610		return -ENODEV;
611	}
612
613	sbf_port = sb->cmos_index;	/* Save CMOS port */
614
615	return 0;
616}
617
618#ifdef CONFIG_HPET_TIMER
619#include <asm/hpet.h>
620
621static int __init acpi_parse_hpet(struct acpi_table_header *table)
622{
623	struct acpi_table_hpet *hpet_tbl;
624
625	hpet_tbl = (struct acpi_table_hpet *)table;
626	if (!hpet_tbl) {
627		printk(KERN_WARNING PREFIX "Unable to map HPET\n");
628		return -ENODEV;
629	}
630
631	if (hpet_tbl->address.space_id != ACPI_SPACE_MEM) {
632		printk(KERN_WARNING PREFIX "HPET timers must be located in "
633		       "memory.\n");
634		return -1;
635	}
636
637	hpet_address = hpet_tbl->address.address;
638	printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
639	       hpet_tbl->id, hpet_address);
640
641	return 0;
642}
643#else
644#define	acpi_parse_hpet	NULL
645#endif
646
647static int __init acpi_parse_fadt(struct acpi_table_header *table)
648{
649
650#ifdef CONFIG_X86_PM_TIMER
651	/* detect the location of the ACPI PM Timer */
652	if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) {
653		/* FADT rev. 2 */
654		if (acpi_gbl_FADT.xpm_timer_block.space_id !=
655		    ACPI_ADR_SPACE_SYSTEM_IO)
656			return 0;
657
658		pmtmr_ioport = acpi_gbl_FADT.xpm_timer_block.address;
659		/*
660		 * "X" fields are optional extensions to the original V1.0
661		 * fields, so we must selectively expand V1.0 fields if the
662		 * corresponding X field is zero.
663	 	 */
664		if (!pmtmr_ioport)
665			pmtmr_ioport = acpi_gbl_FADT.pm_timer_block;
666	} else {
667		/* FADT rev. 1 */
668		pmtmr_ioport = acpi_gbl_FADT.pm_timer_block;
669	}
670	if (pmtmr_ioport)
671		printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n",
672		       pmtmr_ioport);
673#endif
674	return 0;
675}
676
677unsigned long __init acpi_find_rsdp(void)
678{
679	unsigned long rsdp_phys = 0;
680
681	if (efi_enabled) {
682		if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
683			return efi.acpi20;
684		else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
685			return efi.acpi;
686	}
687	/*
688	 * Scan memory looking for the RSDP signature. First search EBDA (low
689	 * memory) paragraphs and then search upper memory (E0000-FFFFF).
690	 */
691	rsdp_phys = acpi_scan_rsdp(0, 0x400);
692	if (!rsdp_phys)
693		rsdp_phys = acpi_scan_rsdp(0xE0000, 0x20000);
694
695	return rsdp_phys;
696}
697
698#ifdef	CONFIG_X86_LOCAL_APIC
699/*
700 * Parse LAPIC entries in MADT
701 * returns 0 on success, < 0 on error
702 */
703static int __init acpi_parse_madt_lapic_entries(void)
704{
705	int count;
706
707	if (!cpu_has_apic)
708		return -ENODEV;
709
710	/*
711	 * Note that the LAPIC address is obtained from the MADT (32-bit value)
712	 * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value).
713	 */
714
715	count =
716	    acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE,
717				  acpi_parse_lapic_addr_ovr, 0);
718	if (count < 0) {
719		printk(KERN_ERR PREFIX
720		       "Error parsing LAPIC address override entry\n");
721		return count;
722	}
723
724	mp_register_lapic_address(acpi_lapic_addr);
725
726	count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC, acpi_parse_lapic,
727				      MAX_APICS);
728	if (!count) {
729		printk(KERN_ERR PREFIX "No LAPIC entries present\n");
730		/* TBD: Cleanup to allow fallback to MPS */
731		return -ENODEV;
732	} else if (count < 0) {
733		printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n");
734		/* TBD: Cleanup to allow fallback to MPS */
735		return count;
736	}
737
738	count =
739	    acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0);
740	if (count < 0) {
741		printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
742		/* TBD: Cleanup to allow fallback to MPS */
743		return count;
744	}
745	return 0;
746}
747#endif				/* CONFIG_X86_LOCAL_APIC */
748
749#ifdef	CONFIG_X86_IO_APIC
750/*
751 * Parse IOAPIC related entries in MADT
752 * returns 0 on success, < 0 on error
753 */
754static int __init acpi_parse_madt_ioapic_entries(void)
755{
756	int count;
757
758	/*
759	 * ACPI interpreter is required to complete interrupt setup,
760	 * so if it is off, don't enumerate the io-apics with ACPI.
761	 * If MPS is present, it will handle them,
762	 * otherwise the system will stay in PIC mode
763	 */
764	if (acpi_disabled || acpi_noirq) {
765		return -ENODEV;
766	}
767
768	if (!cpu_has_apic)
769		return -ENODEV;
770
771	/*
772	 * if "noapic" boot option, don't look for IO-APICs
773	 */
774	if (skip_ioapic_setup) {
775		printk(KERN_INFO PREFIX "Skipping IOAPIC probe "
776		       "due to 'noapic' option.\n");
777		return -ENODEV;
778	}
779
780	count =
781	    acpi_table_parse_madt(ACPI_MADT_TYPE_IO_APIC, acpi_parse_ioapic,
782				  MAX_IO_APICS);
783	if (!count) {
784		printk(KERN_ERR PREFIX "No IOAPIC entries present\n");
785		return -ENODEV;
786	} else if (count < 0) {
787		printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n");
788		return count;
789	}
790
791	count =
792	    acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr,
793				  NR_IRQ_VECTORS);
794	if (count < 0) {
795		printk(KERN_ERR PREFIX
796		       "Error parsing interrupt source overrides entry\n");
797		/* TBD: Cleanup to allow fallback to MPS */
798		return count;
799	}
800
801	/*
802	 * If BIOS did not supply an INT_SRC_OVR for the SCI
803	 * pretend we got one so we can set the SCI flags.
804	 */
805	if (!acpi_sci_override_gsi)
806		acpi_sci_ioapic_setup(acpi_gbl_FADT.sci_interrupt, 0, 0);
807
808	/* Fill in identity legacy mapings where no override */
809	mp_config_acpi_legacy_irqs();
810
811	count =
812	    acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src,
813				  NR_IRQ_VECTORS);
814	if (count < 0) {
815		printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
816		/* TBD: Cleanup to allow fallback to MPS */
817		return count;
818	}
819
820	return 0;
821}
822#else
823static inline int acpi_parse_madt_ioapic_entries(void)
824{
825	return -1;
826}
827#endif	/* !CONFIG_X86_IO_APIC */
828
829static void __init acpi_process_madt(void)
830{
831#ifdef CONFIG_X86_LOCAL_APIC
832	int error;
833
834	if (!acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) {
835
836		/*
837		 * Parse MADT LAPIC entries
838		 */
839		error = acpi_parse_madt_lapic_entries();
840		if (!error) {
841			acpi_lapic = 1;
842
843#ifdef CONFIG_X86_GENERICARCH
844			generic_bigsmp_probe();
845#endif
846			/*
847			 * Parse MADT IO-APIC entries
848			 */
849			error = acpi_parse_madt_ioapic_entries();
850			if (!error) {
851				acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC;
852				acpi_irq_balance_set(NULL);
853				acpi_ioapic = 1;
854
855				smp_found_config = 1;
856				setup_apic_routing();
857			}
858		}
859		if (error == -EINVAL) {
860			/*
861			 * Dell Precision Workstation 410, 610 come here.
862			 */
863			printk(KERN_ERR PREFIX
864			       "Invalid BIOS MADT, disabling ACPI\n");
865			disable_acpi();
866		}
867	}
868#endif
869	return;
870}
871
872#ifdef __i386__
873
874static int __init disable_acpi_irq(struct dmi_system_id *d)
875{
876	if (!acpi_force) {
877		printk(KERN_NOTICE "%s detected: force use of acpi=noirq\n",
878		       d->ident);
879		acpi_noirq_set();
880	}
881	return 0;
882}
883
884static int __init disable_acpi_pci(struct dmi_system_id *d)
885{
886	if (!acpi_force) {
887		printk(KERN_NOTICE "%s detected: force use of pci=noacpi\n",
888		       d->ident);
889		acpi_disable_pci();
890	}
891	return 0;
892}
893
894static int __init dmi_disable_acpi(struct dmi_system_id *d)
895{
896	if (!acpi_force) {
897		printk(KERN_NOTICE "%s detected: acpi off\n", d->ident);
898		disable_acpi();
899	} else {
900		printk(KERN_NOTICE
901		       "Warning: DMI blacklist says broken, but acpi forced\n");
902	}
903	return 0;
904}
905
906/*
907 * Limit ACPI to CPU enumeration for HT
908 */
909static int __init force_acpi_ht(struct dmi_system_id *d)
910{
911	if (!acpi_force) {
912		printk(KERN_NOTICE "%s detected: force use of acpi=ht\n",
913		       d->ident);
914		disable_acpi();
915		acpi_ht = 1;
916	} else {
917		printk(KERN_NOTICE
918		       "Warning: acpi=force overrules DMI blacklist: acpi=ht\n");
919	}
920	return 0;
921}
922
923/*
924 * If your system is blacklisted here, but you find that acpi=force
925 * works for you, please contact acpi-devel@sourceforge.net
926 */
927static struct dmi_system_id __initdata acpi_dmi_table[] = {
928	/*
929	 * Boxes that need ACPI disabled
930	 */
931	{
932	 .callback = dmi_disable_acpi,
933	 .ident = "IBM Thinkpad",
934	 .matches = {
935		     DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
936		     DMI_MATCH(DMI_BOARD_NAME, "2629H1G"),
937		     },
938	 },
939
940	/*
941	 * Boxes that need acpi=ht
942	 */
943	{
944	 .callback = force_acpi_ht,
945	 .ident = "FSC Primergy T850",
946	 .matches = {
947		     DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
948		     DMI_MATCH(DMI_PRODUCT_NAME, "PRIMERGY T850"),
949		     },
950	 },
951	{
952	 .callback = force_acpi_ht,
953	 .ident = "DELL GX240",
954	 .matches = {
955		     DMI_MATCH(DMI_BOARD_VENDOR, "Dell Computer Corporation"),
956		     DMI_MATCH(DMI_BOARD_NAME, "OptiPlex GX240"),
957		     },
958	 },
959	{
960	 .callback = force_acpi_ht,
961	 .ident = "HP VISUALIZE NT Workstation",
962	 .matches = {
963		     DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
964		     DMI_MATCH(DMI_PRODUCT_NAME, "HP VISUALIZE NT Workstation"),
965		     },
966	 },
967	{
968	 .callback = force_acpi_ht,
969	 .ident = "Compaq Workstation W8000",
970	 .matches = {
971		     DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
972		     DMI_MATCH(DMI_PRODUCT_NAME, "Workstation W8000"),
973		     },
974	 },
975	{
976	 .callback = force_acpi_ht,
977	 .ident = "ASUS P4B266",
978	 .matches = {
979		     DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
980		     DMI_MATCH(DMI_BOARD_NAME, "P4B266"),
981		     },
982	 },
983	{
984	 .callback = force_acpi_ht,
985	 .ident = "ASUS P2B-DS",
986	 .matches = {
987		     DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
988		     DMI_MATCH(DMI_BOARD_NAME, "P2B-DS"),
989		     },
990	 },
991	{
992	 .callback = force_acpi_ht,
993	 .ident = "ASUS CUR-DLS",
994	 .matches = {
995		     DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
996		     DMI_MATCH(DMI_BOARD_NAME, "CUR-DLS"),
997		     },
998	 },
999	{
1000	 .callback = force_acpi_ht,
1001	 .ident = "ABIT i440BX-W83977",
1002	 .matches = {
1003		     DMI_MATCH(DMI_BOARD_VENDOR, "ABIT <http://www.abit.com>"),
1004		     DMI_MATCH(DMI_BOARD_NAME, "i440BX-W83977 (BP6)"),
1005		     },
1006	 },
1007	{
1008	 .callback = force_acpi_ht,
1009	 .ident = "IBM Bladecenter",
1010	 .matches = {
1011		     DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
1012		     DMI_MATCH(DMI_BOARD_NAME, "IBM eServer BladeCenter HS20"),
1013		     },
1014	 },
1015	{
1016	 .callback = force_acpi_ht,
1017	 .ident = "IBM eServer xSeries 360",
1018	 .matches = {
1019		     DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
1020		     DMI_MATCH(DMI_BOARD_NAME, "eServer xSeries 360"),
1021		     },
1022	 },
1023	{
1024	 .callback = force_acpi_ht,
1025	 .ident = "IBM eserver xSeries 330",
1026	 .matches = {
1027		     DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
1028		     DMI_MATCH(DMI_BOARD_NAME, "eserver xSeries 330"),
1029		     },
1030	 },
1031	{
1032	 .callback = force_acpi_ht,
1033	 .ident = "IBM eserver xSeries 440",
1034	 .matches = {
1035		     DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
1036		     DMI_MATCH(DMI_PRODUCT_NAME, "eserver xSeries 440"),
1037		     },
1038	 },
1039
1040	/*
1041	 * Boxes that need ACPI PCI IRQ routing disabled
1042	 */
1043	{
1044	 .callback = disable_acpi_irq,
1045	 .ident = "ASUS A7V",
1046	 .matches = {
1047		     DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC"),
1048		     DMI_MATCH(DMI_BOARD_NAME, "<A7V>"),
1049		     /* newer BIOS, Revision 1011, does work */
1050		     DMI_MATCH(DMI_BIOS_VERSION,
1051			       "ASUS A7V ACPI BIOS Revision 1007"),
1052		     },
1053	 },
1054	{
1055		/*
1056		 * Latest BIOS for IBM 600E (1.16) has bad pcinum
1057		 * for LPC bridge, which is needed for the PCI
1058		 * interrupt links to work. DSDT fix is in bug 5966.
1059		 * 2645, 2646 model numbers are shared with 600/600E/600X
1060		 */
1061	 .callback = disable_acpi_irq,
1062	 .ident = "IBM Thinkpad 600 Series 2645",
1063	 .matches = {
1064		     DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
1065		     DMI_MATCH(DMI_BOARD_NAME, "2645"),
1066		     },
1067	 },
1068	{
1069	 .callback = disable_acpi_irq,
1070	 .ident = "IBM Thinkpad 600 Series 2646",
1071	 .matches = {
1072		     DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
1073		     DMI_MATCH(DMI_BOARD_NAME, "2646"),
1074		     },
1075	 },
1076	/*
1077	 * Boxes that need ACPI PCI IRQ routing and PCI scan disabled
1078	 */
1079	{			/* _BBN 0 bug */
1080	 .callback = disable_acpi_pci,
1081	 .ident = "ASUS PR-DLS",
1082	 .matches = {
1083		     DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
1084		     DMI_MATCH(DMI_BOARD_NAME, "PR-DLS"),
1085		     DMI_MATCH(DMI_BIOS_VERSION,
1086			       "ASUS PR-DLS ACPI BIOS Revision 1010"),
1087		     DMI_MATCH(DMI_BIOS_DATE, "03/21/2003")
1088		     },
1089	 },
1090	{
1091	 .callback = disable_acpi_pci,
1092	 .ident = "Acer TravelMate 36x Laptop",
1093	 .matches = {
1094		     DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
1095		     DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
1096		     },
1097	 },
1098	{}
1099};
1100
1101#endif				/* __i386__ */
1102
1103/*
1104 * acpi_boot_table_init() and acpi_boot_init()
1105 *  called from setup_arch(), always.
1106 *	1. checksums all tables
1107 *	2. enumerates lapics
1108 *	3. enumerates io-apics
1109 *
1110 * acpi_table_init() is separate to allow reading SRAT without
1111 * other side effects.
1112 *
1113 * side effects of acpi_boot_init:
1114 *	acpi_lapic = 1 if LAPIC found
1115 *	acpi_ioapic = 1 if IOAPIC found
1116 *	if (acpi_lapic && acpi_ioapic) smp_found_config = 1;
1117 *	if acpi_blacklisted() acpi_disabled = 1;
1118 *	acpi_irq_model=...
1119 *	...
1120 *
1121 * return value: (currently ignored)
1122 *	0: success
1123 *	!0: failure
1124 */
1125
1126int __init acpi_boot_table_init(void)
1127{
1128	int error;
1129
1130#ifdef __i386__
1131	dmi_check_system(acpi_dmi_table);
1132#endif
1133
1134	/*
1135	 * If acpi_disabled, bail out
1136	 * One exception: acpi=ht continues far enough to enumerate LAPICs
1137	 */
1138	if (acpi_disabled && !acpi_ht)
1139		return 1;
1140
1141	/*
1142	 * Initialize the ACPI boot-time table parser.
1143	 */
1144	error = acpi_table_init();
1145	if (error) {
1146		disable_acpi();
1147		return error;
1148	}
1149
1150	acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
1151
1152	/*
1153	 * blacklist may disable ACPI entirely
1154	 */
1155	error = acpi_blacklisted();
1156	if (error) {
1157		if (acpi_force) {
1158			printk(KERN_WARNING PREFIX "acpi=force override\n");
1159		} else {
1160			printk(KERN_WARNING PREFIX "Disabling ACPI support\n");
1161			disable_acpi();
1162			return error;
1163		}
1164	}
1165
1166	return 0;
1167}
1168
1169int __init acpi_boot_init(void)
1170{
1171	/*
1172	 * If acpi_disabled, bail out
1173	 * One exception: acpi=ht continues far enough to enumerate LAPICs
1174	 */
1175	if (acpi_disabled && !acpi_ht)
1176		return 1;
1177
1178	acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
1179
1180	/*
1181	 * set sci_int and PM timer address
1182	 */
1183	acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt);
1184
1185	/*
1186	 * Process the Multiple APIC Description Table (MADT), if present
1187	 */
1188	acpi_process_madt();
1189
1190	acpi_table_parse(ACPI_SIG_HPET, acpi_parse_hpet);
1191
1192	return 0;
1193}
1194
1195static int __init parse_acpi(char *arg)
1196{
1197	if (!arg)
1198		return -EINVAL;
1199
1200	/* "acpi=off" disables both ACPI table parsing and interpreter */
1201	if (strcmp(arg, "off") == 0) {
1202		disable_acpi();
1203	}
1204	/* acpi=force to over-ride black-list */
1205	else if (strcmp(arg, "force") == 0) {
1206		acpi_force = 1;
1207		acpi_ht = 1;
1208		acpi_disabled = 0;
1209	}
1210	/* acpi=strict disables out-of-spec workarounds */
1211	else if (strcmp(arg, "strict") == 0) {
1212		acpi_strict = 1;
1213	}
1214	/* Limit ACPI just to boot-time to enable HT */
1215	else if (strcmp(arg, "ht") == 0) {
1216		if (!acpi_force)
1217			disable_acpi();
1218		acpi_ht = 1;
1219	}
1220	/* "acpi=noirq" disables ACPI interrupt routing */
1221	else if (strcmp(arg, "noirq") == 0) {
1222		acpi_noirq_set();
1223	} else {
1224		/* Core will printk when we return error. */
1225		return -EINVAL;
1226	}
1227	return 0;
1228}
1229early_param("acpi", parse_acpi);
1230
1231static int __init parse_pci(char *arg)
1232{
1233	if (arg && strcmp(arg, "noacpi") == 0)
1234		acpi_disable_pci();
1235	return 0;
1236}
1237early_param("pci", parse_pci);
1238
1239#ifdef CONFIG_X86_IO_APIC
1240static int __init parse_acpi_skip_timer_override(char *arg)
1241{
1242	acpi_skip_timer_override = 1;
1243	return 0;
1244}
1245early_param("acpi_skip_timer_override", parse_acpi_skip_timer_override);
1246
1247static int __init parse_acpi_use_timer_override(char *arg)
1248{
1249	acpi_use_timer_override = 1;
1250	return 0;
1251}
1252early_param("acpi_use_timer_override", parse_acpi_use_timer_override);
1253#endif /* CONFIG_X86_IO_APIC */
1254
1255static int __init setup_acpi_sci(char *s)
1256{
1257	if (!s)
1258		return -EINVAL;
1259	if (!strcmp(s, "edge"))
1260		acpi_sci_flags =  ACPI_MADT_TRIGGER_EDGE |
1261			(acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK);
1262	else if (!strcmp(s, "level"))
1263		acpi_sci_flags = ACPI_MADT_TRIGGER_LEVEL |
1264			(acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK);
1265	else if (!strcmp(s, "high"))
1266		acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_HIGH |
1267			(acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK);
1268	else if (!strcmp(s, "low"))
1269		acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_LOW |
1270			(acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK);
1271	else
1272		return -EINVAL;
1273	return 0;
1274}
1275early_param("acpi_sci", setup_acpi_sci);
1276
1277int __acpi_acquire_global_lock(unsigned int *lock)
1278{
1279	unsigned int old, new, val;
1280	do {
1281		old = *lock;
1282		new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1));
1283		val = cmpxchg(lock, old, new);
1284	} while (unlikely (val != old));
1285	return (new < 3) ? -1 : 0;
1286}
1287
1288int __acpi_release_global_lock(unsigned int *lock)
1289{
1290	unsigned int old, new, val;
1291	do {
1292		old = *lock;
1293		new = old & ~0x3;
1294		val = cmpxchg(lock, old, new);
1295	} while (unlikely (val != old));
1296	return old & 0x1;
1297}
1298