mca.c revision 314350
1/*-
2 * Copyright (c) 2009 Hudson River Trading LLC
3 * Written by: John H. Baldwin <jhb@FreeBSD.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28/*
29 * Support for x86 machine check architecture.
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: stable/10/sys/x86/x86/mca.c 314350 2017-02-27 17:20:49Z avg $");
34
35#ifdef __amd64__
36#define	DEV_APIC
37#else
38#include "opt_apic.h"
39#endif
40
41#include <sys/param.h>
42#include <sys/bus.h>
43#include <sys/interrupt.h>
44#include <sys/kernel.h>
45#include <sys/lock.h>
46#include <sys/malloc.h>
47#include <sys/mutex.h>
48#include <sys/proc.h>
49#include <sys/sched.h>
50#include <sys/smp.h>
51#include <sys/sysctl.h>
52#include <sys/systm.h>
53#include <sys/taskqueue.h>
54#include <machine/intr_machdep.h>
55#include <machine/apicvar.h>
56#include <machine/cpu.h>
57#include <machine/cputypes.h>
58#include <x86/mca.h>
59#include <machine/md_var.h>
60#include <machine/specialreg.h>
61
62/* Modes for mca_scan() */
63enum scan_mode {
64	POLLED,
65	MCE,
66	CMCI,
67};
68
69#ifdef DEV_APIC
70/*
71 * State maintained for each monitored MCx bank to control the
72 * corrected machine check interrupt threshold.
73 */
74struct cmc_state {
75	int	max_threshold;
76	time_t	last_intr;
77};
78#endif
79
80struct mca_internal {
81	struct mca_record rec;
82	int		logged;
83	STAILQ_ENTRY(mca_internal) link;
84};
85
86static MALLOC_DEFINE(M_MCA, "MCA", "Machine Check Architecture");
87
88static volatile int mca_count;	/* Number of records stored. */
89static int mca_banks;		/* Number of per-CPU register banks. */
90
91static SYSCTL_NODE(_hw, OID_AUTO, mca, CTLFLAG_RD, NULL,
92    "Machine Check Architecture");
93
94static int mca_enabled = 1;
95TUNABLE_INT("hw.mca.enabled", &mca_enabled);
96SYSCTL_INT(_hw_mca, OID_AUTO, enabled, CTLFLAG_RDTUN, &mca_enabled, 0,
97    "Administrative toggle for machine check support");
98
99static int amd10h_L1TP = 1;
100TUNABLE_INT("hw.mca.amd10h_L1TP", &amd10h_L1TP);
101SYSCTL_INT(_hw_mca, OID_AUTO, amd10h_L1TP, CTLFLAG_RDTUN, &amd10h_L1TP, 0,
102    "Administrative toggle for logging of level one TLB parity (L1TP) errors");
103
104static int intel6h_HSD131;
105TUNABLE_INT("hw.mca.intel6h_hsd131", &intel6h_HSD131);
106SYSCTL_INT(_hw_mca, OID_AUTO, intel6h_HSD131, CTLFLAG_RDTUN, &intel6h_HSD131, 0,
107    "Administrative toggle for logging of spurious corrected errors");
108
109int workaround_erratum383;
110SYSCTL_INT(_hw_mca, OID_AUTO, erratum383, CTLFLAG_RD, &workaround_erratum383, 0,
111    "Is the workaround for Erratum 383 on AMD Family 10h processors enabled?");
112
113static STAILQ_HEAD(, mca_internal) mca_freelist;
114static int mca_freecount;
115static STAILQ_HEAD(, mca_internal) mca_records;
116static struct callout mca_timer;
117static int mca_ticks = 3600;	/* Check hourly by default. */
118static struct taskqueue *mca_tq;
119static struct task mca_refill_task, mca_scan_task;
120static struct mtx mca_lock;
121
122#ifdef DEV_APIC
123static struct cmc_state **cmc_state;	/* Indexed by cpuid, bank */
124static int cmc_throttle = 60;	/* Time in seconds to throttle CMCI. */
125#endif
126
127static int
128sysctl_positive_int(SYSCTL_HANDLER_ARGS)
129{
130	int error, value;
131
132	value = *(int *)arg1;
133	error = sysctl_handle_int(oidp, &value, 0, req);
134	if (error || req->newptr == NULL)
135		return (error);
136	if (value <= 0)
137		return (EINVAL);
138	*(int *)arg1 = value;
139	return (0);
140}
141
142static int
143sysctl_mca_records(SYSCTL_HANDLER_ARGS)
144{
145	int *name = (int *)arg1;
146	u_int namelen = arg2;
147	struct mca_record record;
148	struct mca_internal *rec;
149	int i;
150
151	if (namelen != 1)
152		return (EINVAL);
153
154	if (name[0] < 0 || name[0] >= mca_count)
155		return (EINVAL);
156
157	mtx_lock_spin(&mca_lock);
158	if (name[0] >= mca_count) {
159		mtx_unlock_spin(&mca_lock);
160		return (EINVAL);
161	}
162	i = 0;
163	STAILQ_FOREACH(rec, &mca_records, link) {
164		if (i == name[0]) {
165			record = rec->rec;
166			break;
167		}
168		i++;
169	}
170	mtx_unlock_spin(&mca_lock);
171	return (SYSCTL_OUT(req, &record, sizeof(record)));
172}
173
174static const char *
175mca_error_ttype(uint16_t mca_error)
176{
177
178	switch ((mca_error & 0x000c) >> 2) {
179	case 0:
180		return ("I");
181	case 1:
182		return ("D");
183	case 2:
184		return ("G");
185	}
186	return ("?");
187}
188
189static const char *
190mca_error_level(uint16_t mca_error)
191{
192
193	switch (mca_error & 0x0003) {
194	case 0:
195		return ("L0");
196	case 1:
197		return ("L1");
198	case 2:
199		return ("L2");
200	case 3:
201		return ("LG");
202	}
203	return ("L?");
204}
205
206static const char *
207mca_error_request(uint16_t mca_error)
208{
209
210	switch ((mca_error & 0x00f0) >> 4) {
211	case 0x0:
212		return ("ERR");
213	case 0x1:
214		return ("RD");
215	case 0x2:
216		return ("WR");
217	case 0x3:
218		return ("DRD");
219	case 0x4:
220		return ("DWR");
221	case 0x5:
222		return ("IRD");
223	case 0x6:
224		return ("PREFETCH");
225	case 0x7:
226		return ("EVICT");
227	case 0x8:
228		return ("SNOOP");
229	}
230	return ("???");
231}
232
233static const char *
234mca_error_mmtype(uint16_t mca_error)
235{
236
237	switch ((mca_error & 0x70) >> 4) {
238	case 0x0:
239		return ("GEN");
240	case 0x1:
241		return ("RD");
242	case 0x2:
243		return ("WR");
244	case 0x3:
245		return ("AC");
246	case 0x4:
247		return ("MS");
248	}
249	return ("???");
250}
251
252static int
253mca_mute(const struct mca_record *rec)
254{
255
256	/*
257	 * Skip spurious corrected parity errors generated by Intel Haswell-
258	 * and Broadwell-based CPUs (see HSD131, HSM142, HSW131 and BDM48
259	 * erratum respectively), unless reporting is enabled.
260	 * Note that these errors also have been observed with the D0-stepping
261	 * of Haswell, while at least initially the CPU specification updates
262	 * suggested only the C0-stepping to be affected.  Similarly, Celeron
263	 * 2955U with a CPU ID of 0x45 apparently are also concerned with the
264	 * same problem, with HSM142 only referring to 0x3c and 0x46.
265	 */
266	if (cpu_vendor_id == CPU_VENDOR_INTEL &&
267	    CPUID_TO_FAMILY(cpu_id) == 0x6 &&
268	    (CPUID_TO_MODEL(cpu_id) == 0x3c ||	/* HSD131, HSM142, HSW131 */
269	    CPUID_TO_MODEL(cpu_id) == 0x3d ||	/* BDM48 */
270	    CPUID_TO_MODEL(cpu_id) == 0x45 ||
271	    CPUID_TO_MODEL(cpu_id) == 0x46) &&	/* HSM142 */
272	    rec->mr_bank == 0 &&
273	    (rec->mr_status & 0xa0000000ffffffff) == 0x80000000000f0005 &&
274	    !intel6h_HSD131)
275	    	return (1);
276
277	return (0);
278}
279
280/* Dump details about a single machine check. */
281static void
282mca_log(const struct mca_record *rec)
283{
284	uint16_t mca_error;
285
286	if (mca_mute(rec))
287	    	return;
288
289	printf("MCA: Bank %d, Status 0x%016llx\n", rec->mr_bank,
290	    (long long)rec->mr_status);
291	printf("MCA: Global Cap 0x%016llx, Status 0x%016llx\n",
292	    (long long)rec->mr_mcg_cap, (long long)rec->mr_mcg_status);
293	printf("MCA: Vendor \"%s\", ID 0x%x, APIC ID %d\n", cpu_vendor,
294	    rec->mr_cpu_id, rec->mr_apic_id);
295	printf("MCA: CPU %d ", rec->mr_cpu);
296	if (rec->mr_status & MC_STATUS_UC)
297		printf("UNCOR ");
298	else {
299		printf("COR ");
300		if (rec->mr_mcg_cap & MCG_CAP_CMCI_P)
301			printf("(%lld) ", ((long long)rec->mr_status &
302			    MC_STATUS_COR_COUNT) >> 38);
303	}
304	if (rec->mr_status & MC_STATUS_PCC)
305		printf("PCC ");
306	if (rec->mr_status & MC_STATUS_OVER)
307		printf("OVER ");
308	mca_error = rec->mr_status & MC_STATUS_MCA_ERROR;
309	switch (mca_error) {
310		/* Simple error codes. */
311	case 0x0000:
312		printf("no error");
313		break;
314	case 0x0001:
315		printf("unclassified error");
316		break;
317	case 0x0002:
318		printf("ucode ROM parity error");
319		break;
320	case 0x0003:
321		printf("external error");
322		break;
323	case 0x0004:
324		printf("FRC error");
325		break;
326	case 0x0005:
327		printf("internal parity error");
328		break;
329	case 0x0400:
330		printf("internal timer error");
331		break;
332	default:
333		if ((mca_error & 0xfc00) == 0x0400) {
334			printf("internal error %x", mca_error & 0x03ff);
335			break;
336		}
337
338		/* Compound error codes. */
339
340		/* Memory hierarchy error. */
341		if ((mca_error & 0xeffc) == 0x000c) {
342			printf("%s memory error", mca_error_level(mca_error));
343			break;
344		}
345
346		/* TLB error. */
347		if ((mca_error & 0xeff0) == 0x0010) {
348			printf("%sTLB %s error", mca_error_ttype(mca_error),
349			    mca_error_level(mca_error));
350			break;
351		}
352
353		/* Memory controller error. */
354		if ((mca_error & 0xef80) == 0x0080) {
355			printf("%s channel ", mca_error_mmtype(mca_error));
356			if ((mca_error & 0x000f) != 0x000f)
357				printf("%d", mca_error & 0x000f);
358			else
359				printf("??");
360			printf(" memory error");
361			break;
362		}
363
364		/* Cache error. */
365		if ((mca_error & 0xef00) == 0x0100) {
366			printf("%sCACHE %s %s error",
367			    mca_error_ttype(mca_error),
368			    mca_error_level(mca_error),
369			    mca_error_request(mca_error));
370			break;
371		}
372
373		/* Bus and/or Interconnect error. */
374		if ((mca_error & 0xe800) == 0x0800) {
375			printf("BUS%s ", mca_error_level(mca_error));
376			switch ((mca_error & 0x0600) >> 9) {
377			case 0:
378				printf("Source");
379				break;
380			case 1:
381				printf("Responder");
382				break;
383			case 2:
384				printf("Observer");
385				break;
386			default:
387				printf("???");
388				break;
389			}
390			printf(" %s ", mca_error_request(mca_error));
391			switch ((mca_error & 0x000c) >> 2) {
392			case 0:
393				printf("Memory");
394				break;
395			case 2:
396				printf("I/O");
397				break;
398			case 3:
399				printf("Other");
400				break;
401			default:
402				printf("???");
403				break;
404			}
405			if (mca_error & 0x0100)
406				printf(" timed out");
407			break;
408		}
409
410		printf("unknown error %x", mca_error);
411		break;
412	}
413	printf("\n");
414	if (rec->mr_status & MC_STATUS_ADDRV)
415		printf("MCA: Address 0x%llx\n", (long long)rec->mr_addr);
416	if (rec->mr_status & MC_STATUS_MISCV)
417		printf("MCA: Misc 0x%llx\n", (long long)rec->mr_misc);
418}
419
420static int
421mca_check_status(int bank, struct mca_record *rec)
422{
423	uint64_t status;
424	u_int p[4];
425
426	status = rdmsr(MSR_MC_STATUS(bank));
427	if (!(status & MC_STATUS_VAL))
428		return (0);
429
430	/* Save exception information. */
431	rec->mr_status = status;
432	rec->mr_bank = bank;
433	rec->mr_addr = 0;
434	if (status & MC_STATUS_ADDRV)
435		rec->mr_addr = rdmsr(MSR_MC_ADDR(bank));
436	rec->mr_misc = 0;
437	if (status & MC_STATUS_MISCV)
438		rec->mr_misc = rdmsr(MSR_MC_MISC(bank));
439	rec->mr_tsc = rdtsc();
440	rec->mr_apic_id = PCPU_GET(apic_id);
441	rec->mr_mcg_cap = rdmsr(MSR_MCG_CAP);
442	rec->mr_mcg_status = rdmsr(MSR_MCG_STATUS);
443	rec->mr_cpu_id = cpu_id;
444	rec->mr_cpu_vendor_id = cpu_vendor_id;
445	rec->mr_cpu = PCPU_GET(cpuid);
446
447	/*
448	 * Clear machine check.  Don't do this for uncorrectable
449	 * errors so that the BIOS can see them.
450	 */
451	if (!(rec->mr_status & (MC_STATUS_PCC | MC_STATUS_UC))) {
452		wrmsr(MSR_MC_STATUS(bank), 0);
453		do_cpuid(0, p);
454	}
455	return (1);
456}
457
458static void
459mca_fill_freelist(void)
460{
461	struct mca_internal *rec;
462	int desired;
463
464	/*
465	 * Ensure we have at least one record for each bank and one
466	 * record per CPU.
467	 */
468	desired = imax(mp_ncpus, mca_banks);
469	mtx_lock_spin(&mca_lock);
470	while (mca_freecount < desired) {
471		mtx_unlock_spin(&mca_lock);
472		rec = malloc(sizeof(*rec), M_MCA, M_WAITOK);
473		mtx_lock_spin(&mca_lock);
474		STAILQ_INSERT_TAIL(&mca_freelist, rec, link);
475		mca_freecount++;
476	}
477	mtx_unlock_spin(&mca_lock);
478}
479
480static void
481mca_refill(void *context, int pending)
482{
483
484	mca_fill_freelist();
485}
486
487static void
488mca_record_entry(enum scan_mode mode, const struct mca_record *record)
489{
490	struct mca_internal *rec;
491
492	if (mode == POLLED) {
493		rec = malloc(sizeof(*rec), M_MCA, M_WAITOK);
494		mtx_lock_spin(&mca_lock);
495	} else {
496		mtx_lock_spin(&mca_lock);
497		rec = STAILQ_FIRST(&mca_freelist);
498		if (rec == NULL) {
499			printf("MCA: Unable to allocate space for an event.\n");
500			mca_log(record);
501			mtx_unlock_spin(&mca_lock);
502			return;
503		}
504		STAILQ_REMOVE_HEAD(&mca_freelist, link);
505		mca_freecount--;
506	}
507
508	rec->rec = *record;
509	rec->logged = 0;
510	STAILQ_INSERT_TAIL(&mca_records, rec, link);
511	mca_count++;
512	mtx_unlock_spin(&mca_lock);
513	if (mode == CMCI)
514		taskqueue_enqueue_fast(mca_tq, &mca_refill_task);
515}
516
517#ifdef DEV_APIC
518/*
519 * Update the interrupt threshold for a CMCI.  The strategy is to use
520 * a low trigger that interrupts as soon as the first event occurs.
521 * However, if a steady stream of events arrive, the threshold is
522 * increased until the interrupts are throttled to once every
523 * cmc_throttle seconds or the periodic scan.  If a periodic scan
524 * finds that the threshold is too high, it is lowered.
525 */
526static void
527cmci_update(enum scan_mode mode, int bank, int valid, struct mca_record *rec)
528{
529	struct cmc_state *cc;
530	uint64_t ctl;
531	u_int delta;
532	int count, limit;
533
534	/* Fetch the current limit for this bank. */
535	cc = &cmc_state[PCPU_GET(cpuid)][bank];
536	ctl = rdmsr(MSR_MC_CTL2(bank));
537	count = (rec->mr_status & MC_STATUS_COR_COUNT) >> 38;
538	delta = (u_int)(time_uptime - cc->last_intr);
539
540	/*
541	 * If an interrupt was received less than cmc_throttle seconds
542	 * since the previous interrupt and the count from the current
543	 * event is greater than or equal to the current threshold,
544	 * double the threshold up to the max.
545	 */
546	if (mode == CMCI && valid) {
547		limit = ctl & MC_CTL2_THRESHOLD;
548		if (delta < cmc_throttle && count >= limit &&
549		    limit < cc->max_threshold) {
550			limit = min(limit << 1, cc->max_threshold);
551			ctl &= ~MC_CTL2_THRESHOLD;
552			ctl |= limit;
553			wrmsr(MSR_MC_CTL2(bank), limit);
554		}
555		cc->last_intr = time_uptime;
556		return;
557	}
558
559	/*
560	 * When the banks are polled, check to see if the threshold
561	 * should be lowered.
562	 */
563	if (mode != POLLED)
564		return;
565
566	/* If a CMCI occured recently, do nothing for now. */
567	if (delta < cmc_throttle)
568		return;
569
570	/*
571	 * Compute a new limit based on the average rate of events per
572	 * cmc_throttle seconds since the last interrupt.
573	 */
574	if (valid) {
575		count = (rec->mr_status & MC_STATUS_COR_COUNT) >> 38;
576		limit = count * cmc_throttle / delta;
577		if (limit <= 0)
578			limit = 1;
579		else if (limit > cc->max_threshold)
580			limit = cc->max_threshold;
581	} else
582		limit = 1;
583	if ((ctl & MC_CTL2_THRESHOLD) != limit) {
584		ctl &= ~MC_CTL2_THRESHOLD;
585		ctl |= limit;
586		wrmsr(MSR_MC_CTL2(bank), limit);
587	}
588}
589#endif
590
591/*
592 * This scans all the machine check banks of the current CPU to see if
593 * there are any machine checks.  Any non-recoverable errors are
594 * reported immediately via mca_log().  The current thread must be
595 * pinned when this is called.  The 'mode' parameter indicates if we
596 * are being called from the MC exception handler, the CMCI handler,
597 * or the periodic poller.  In the MC exception case this function
598 * returns true if the system is restartable.  Otherwise, it returns a
599 * count of the number of valid MC records found.
600 */
601static int
602mca_scan(enum scan_mode mode)
603{
604	struct mca_record rec;
605	uint64_t mcg_cap, ucmask;
606	int count, i, recoverable, valid;
607
608	count = 0;
609	recoverable = 1;
610	ucmask = MC_STATUS_UC | MC_STATUS_PCC;
611
612	/* When handling a MCE#, treat the OVER flag as non-restartable. */
613	if (mode == MCE)
614		ucmask |= MC_STATUS_OVER;
615	mcg_cap = rdmsr(MSR_MCG_CAP);
616	for (i = 0; i < (mcg_cap & MCG_CAP_COUNT); i++) {
617#ifdef DEV_APIC
618		/*
619		 * For a CMCI, only check banks this CPU is
620		 * responsible for.
621		 */
622		if (mode == CMCI && !(PCPU_GET(cmci_mask) & 1 << i))
623			continue;
624#endif
625
626		valid = mca_check_status(i, &rec);
627		if (valid) {
628			count++;
629			if (rec.mr_status & ucmask) {
630				recoverable = 0;
631				mtx_lock_spin(&mca_lock);
632				mca_log(&rec);
633				mtx_unlock_spin(&mca_lock);
634			}
635			mca_record_entry(mode, &rec);
636		}
637
638#ifdef DEV_APIC
639		/*
640		 * If this is a bank this CPU monitors via CMCI,
641		 * update the threshold.
642		 */
643		if (PCPU_GET(cmci_mask) & 1 << i)
644			cmci_update(mode, i, valid, &rec);
645#endif
646	}
647	if (mode == POLLED)
648		mca_fill_freelist();
649	return (mode == MCE ? recoverable : count);
650}
651
652/*
653 * Scan the machine check banks on all CPUs by binding to each CPU in
654 * turn.  If any of the CPUs contained new machine check records, log
655 * them to the console.
656 */
657static void
658mca_scan_cpus(void *context, int pending)
659{
660	struct mca_internal *mca;
661	struct thread *td;
662	int count, cpu;
663
664	mca_fill_freelist();
665	td = curthread;
666	count = 0;
667	thread_lock(td);
668	CPU_FOREACH(cpu) {
669		sched_bind(td, cpu);
670		thread_unlock(td);
671		count += mca_scan(POLLED);
672		thread_lock(td);
673		sched_unbind(td);
674	}
675	thread_unlock(td);
676	if (count != 0) {
677		mtx_lock_spin(&mca_lock);
678		STAILQ_FOREACH(mca, &mca_records, link) {
679			if (!mca->logged) {
680				mca->logged = 1;
681				mca_log(&mca->rec);
682			}
683		}
684		mtx_unlock_spin(&mca_lock);
685	}
686}
687
688static void
689mca_periodic_scan(void *arg)
690{
691
692	taskqueue_enqueue_fast(mca_tq, &mca_scan_task);
693	callout_reset(&mca_timer, mca_ticks * hz, mca_periodic_scan, NULL);
694}
695
696static int
697sysctl_mca_scan(SYSCTL_HANDLER_ARGS)
698{
699	int error, i;
700
701	i = 0;
702	error = sysctl_handle_int(oidp, &i, 0, req);
703	if (error)
704		return (error);
705	if (i)
706		taskqueue_enqueue_fast(mca_tq, &mca_scan_task);
707	return (0);
708}
709
710static void
711mca_createtq(void *dummy)
712{
713	if (mca_banks <= 0)
714		return;
715
716	mca_tq = taskqueue_create_fast("mca", M_WAITOK,
717	    taskqueue_thread_enqueue, &mca_tq);
718	taskqueue_start_threads(&mca_tq, 1, PI_SWI(SWI_TQ), "mca taskq");
719}
720SYSINIT(mca_createtq, SI_SUB_CONFIGURE, SI_ORDER_ANY, mca_createtq, NULL);
721
722static void
723mca_startup(void *dummy)
724{
725
726	if (mca_banks <= 0)
727		return;
728
729	callout_reset(&mca_timer, mca_ticks * hz, mca_periodic_scan, NULL);
730}
731SYSINIT(mca_startup, SI_SUB_SMP, SI_ORDER_ANY, mca_startup, NULL);
732
733#ifdef DEV_APIC
734static void
735cmci_setup(void)
736{
737	int i;
738
739	cmc_state = malloc((mp_maxid + 1) * sizeof(struct cmc_state *), M_MCA,
740	    M_WAITOK);
741	for (i = 0; i <= mp_maxid; i++)
742		cmc_state[i] = malloc(sizeof(struct cmc_state) * mca_banks,
743		    M_MCA, M_WAITOK | M_ZERO);
744	SYSCTL_ADD_PROC(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO,
745	    "cmc_throttle", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
746	    &cmc_throttle, 0, sysctl_positive_int, "I",
747	    "Interval in seconds to throttle corrected MC interrupts");
748}
749#endif
750
751static void
752mca_setup(uint64_t mcg_cap)
753{
754
755	/*
756	 * On AMD Family 10h processors, unless logging of level one TLB
757	 * parity (L1TP) errors is disabled, enable the recommended workaround
758	 * for Erratum 383.
759	 */
760	if (cpu_vendor_id == CPU_VENDOR_AMD &&
761	    CPUID_TO_FAMILY(cpu_id) == 0x10 && amd10h_L1TP)
762		workaround_erratum383 = 1;
763
764	mca_banks = mcg_cap & MCG_CAP_COUNT;
765	mtx_init(&mca_lock, "mca", NULL, MTX_SPIN);
766	STAILQ_INIT(&mca_records);
767	TASK_INIT(&mca_scan_task, 0, mca_scan_cpus, NULL);
768	callout_init(&mca_timer, CALLOUT_MPSAFE);
769	STAILQ_INIT(&mca_freelist);
770	TASK_INIT(&mca_refill_task, 0, mca_refill, NULL);
771	mca_fill_freelist();
772	SYSCTL_ADD_INT(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO,
773	    "count", CTLFLAG_RD, (int *)(uintptr_t)&mca_count, 0,
774	    "Record count");
775	SYSCTL_ADD_PROC(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO,
776	    "interval", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, &mca_ticks,
777	    0, sysctl_positive_int, "I",
778	    "Periodic interval in seconds to scan for machine checks");
779	SYSCTL_ADD_NODE(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO,
780	    "records", CTLFLAG_RD, sysctl_mca_records, "Machine check records");
781	SYSCTL_ADD_PROC(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO,
782	    "force_scan", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0,
783	    sysctl_mca_scan, "I", "Force an immediate scan for machine checks");
784#ifdef DEV_APIC
785	if (mcg_cap & MCG_CAP_CMCI_P)
786		cmci_setup();
787#endif
788}
789
790#ifdef DEV_APIC
791/*
792 * See if we should monitor CMCI for this bank.  If CMCI_EN is already
793 * set in MC_CTL2, then another CPU is responsible for this bank, so
794 * ignore it.  If CMCI_EN returns zero after being set, then this bank
795 * does not support CMCI_EN.  If this CPU sets CMCI_EN, then it should
796 * now monitor this bank.
797 */
798static void
799cmci_monitor(int i)
800{
801	struct cmc_state *cc;
802	uint64_t ctl;
803
804	KASSERT(i < mca_banks, ("CPU %d has more MC banks", PCPU_GET(cpuid)));
805
806	ctl = rdmsr(MSR_MC_CTL2(i));
807	if (ctl & MC_CTL2_CMCI_EN)
808		/* Already monitored by another CPU. */
809		return;
810
811	/* Set the threshold to one event for now. */
812	ctl &= ~MC_CTL2_THRESHOLD;
813	ctl |= MC_CTL2_CMCI_EN | 1;
814	wrmsr(MSR_MC_CTL2(i), ctl);
815	ctl = rdmsr(MSR_MC_CTL2(i));
816	if (!(ctl & MC_CTL2_CMCI_EN))
817		/* This bank does not support CMCI. */
818		return;
819
820	cc = &cmc_state[PCPU_GET(cpuid)][i];
821
822	/* Determine maximum threshold. */
823	ctl &= ~MC_CTL2_THRESHOLD;
824	ctl |= 0x7fff;
825	wrmsr(MSR_MC_CTL2(i), ctl);
826	ctl = rdmsr(MSR_MC_CTL2(i));
827	cc->max_threshold = ctl & MC_CTL2_THRESHOLD;
828
829	/* Start off with a threshold of 1. */
830	ctl &= ~MC_CTL2_THRESHOLD;
831	ctl |= 1;
832	wrmsr(MSR_MC_CTL2(i), ctl);
833
834	/* Mark this bank as monitored. */
835	PCPU_SET(cmci_mask, PCPU_GET(cmci_mask) | 1 << i);
836}
837
838/*
839 * For resume, reset the threshold for any banks we monitor back to
840 * one and throw away the timestamp of the last interrupt.
841 */
842static void
843cmci_resume(int i)
844{
845	struct cmc_state *cc;
846	uint64_t ctl;
847
848	KASSERT(i < mca_banks, ("CPU %d has more MC banks", PCPU_GET(cpuid)));
849
850	/* Ignore banks not monitored by this CPU. */
851	if (!(PCPU_GET(cmci_mask) & 1 << i))
852		return;
853
854	cc = &cmc_state[PCPU_GET(cpuid)][i];
855	cc->last_intr = 0;
856	ctl = rdmsr(MSR_MC_CTL2(i));
857	ctl &= ~MC_CTL2_THRESHOLD;
858	ctl |= MC_CTL2_CMCI_EN | 1;
859	wrmsr(MSR_MC_CTL2(i), ctl);
860}
861#endif
862
863/*
864 * Initializes per-CPU machine check registers and enables corrected
865 * machine check interrupts.
866 */
867static void
868_mca_init(int boot)
869{
870	uint64_t mcg_cap;
871	uint64_t ctl, mask;
872	int i, skip;
873
874	/* MCE is required. */
875	if (!mca_enabled || !(cpu_feature & CPUID_MCE))
876		return;
877
878	if (cpu_feature & CPUID_MCA) {
879		if (boot)
880			PCPU_SET(cmci_mask, 0);
881
882		mcg_cap = rdmsr(MSR_MCG_CAP);
883		if (mcg_cap & MCG_CAP_CTL_P)
884			/* Enable MCA features. */
885			wrmsr(MSR_MCG_CTL, MCG_CTL_ENABLE);
886		if (PCPU_GET(cpuid) == 0 && boot)
887			mca_setup(mcg_cap);
888
889		/*
890		 * Disable logging of level one TLB parity (L1TP) errors by
891		 * the data cache as an alternative workaround for AMD Family
892		 * 10h Erratum 383.  Unlike the recommended workaround, there
893		 * is no performance penalty to this workaround.  However,
894		 * L1TP errors will go unreported.
895		 */
896		if (cpu_vendor_id == CPU_VENDOR_AMD &&
897		    CPUID_TO_FAMILY(cpu_id) == 0x10 && !amd10h_L1TP) {
898			mask = rdmsr(MSR_MC0_CTL_MASK);
899			if ((mask & (1UL << 5)) == 0)
900				wrmsr(MSR_MC0_CTL_MASK, mask | (1UL << 5));
901		}
902		for (i = 0; i < (mcg_cap & MCG_CAP_COUNT); i++) {
903			/* By default enable logging of all errors. */
904			ctl = 0xffffffffffffffffUL;
905			skip = 0;
906
907			if (cpu_vendor_id == CPU_VENDOR_INTEL) {
908				/*
909				 * For P6 models before Nehalem MC0_CTL is
910				 * always enabled and reserved.
911				 */
912				if (i == 0 && CPUID_TO_FAMILY(cpu_id) == 0x6
913				    && CPUID_TO_MODEL(cpu_id) < 0x1a)
914					skip = 1;
915			} else if (cpu_vendor_id == CPU_VENDOR_AMD) {
916				/* BKDG for Family 10h: unset GartTblWkEn. */
917				if (i == 4 && CPUID_TO_FAMILY(cpu_id) >= 0xf)
918					ctl &= ~(1UL << 10);
919			}
920
921			if (!skip)
922				wrmsr(MSR_MC_CTL(i), ctl);
923
924#ifdef DEV_APIC
925			if (mcg_cap & MCG_CAP_CMCI_P) {
926				if (boot)
927					cmci_monitor(i);
928				else
929					cmci_resume(i);
930			}
931#endif
932
933			/* Clear all errors. */
934			wrmsr(MSR_MC_STATUS(i), 0);
935		}
936
937#ifdef DEV_APIC
938		if (PCPU_GET(cmci_mask) != 0 && boot)
939			lapic_enable_cmc();
940#endif
941	}
942
943	load_cr4(rcr4() | CR4_MCE);
944}
945
946/* Must be executed on each CPU during boot. */
947void
948mca_init(void)
949{
950
951	_mca_init(1);
952}
953
954/* Must be executed on each CPU during resume. */
955void
956mca_resume(void)
957{
958
959	_mca_init(0);
960}
961
962/*
963 * The machine check registers for the BSP cannot be initialized until
964 * the local APIC is initialized.  This happens at SI_SUB_CPU,
965 * SI_ORDER_SECOND.
966 */
967static void
968mca_init_bsp(void *arg __unused)
969{
970
971	mca_init();
972}
973SYSINIT(mca_init_bsp, SI_SUB_CPU, SI_ORDER_ANY, mca_init_bsp, NULL);
974
975/* Called when a machine check exception fires. */
976void
977mca_intr(void)
978{
979	uint64_t mcg_status;
980	int old_count, recoverable;
981
982	if (!(cpu_feature & CPUID_MCA)) {
983		/*
984		 * Just print the values of the old Pentium registers
985		 * and panic.
986		 */
987		printf("MC Type: 0x%jx  Address: 0x%jx\n",
988		    (uintmax_t)rdmsr(MSR_P5_MC_TYPE),
989		    (uintmax_t)rdmsr(MSR_P5_MC_ADDR));
990		panic("Machine check");
991	}
992
993	/* Scan the banks and check for any non-recoverable errors. */
994	old_count = mca_count;
995	recoverable = mca_scan(MCE);
996	mcg_status = rdmsr(MSR_MCG_STATUS);
997	if (!(mcg_status & MCG_STATUS_RIPV))
998		recoverable = 0;
999
1000	if (!recoverable) {
1001		/*
1002		 * Wait for at least one error to be logged before
1003		 * panic'ing.  Some errors will assert a machine check
1004		 * on all CPUs, but only certain CPUs will find a valid
1005		 * bank to log.
1006		 */
1007		while (mca_count == old_count)
1008			cpu_spinwait();
1009
1010		panic("Unrecoverable machine check exception");
1011	}
1012
1013	/* Clear MCIP. */
1014	wrmsr(MSR_MCG_STATUS, mcg_status & ~MCG_STATUS_MCIP);
1015}
1016
1017#ifdef DEV_APIC
1018/* Called for a CMCI (correctable machine check interrupt). */
1019void
1020cmc_intr(void)
1021{
1022	struct mca_internal *mca;
1023	int count;
1024
1025	/*
1026	 * Serialize MCA bank scanning to prevent collisions from
1027	 * sibling threads.
1028	 */
1029	count = mca_scan(CMCI);
1030
1031	/* If we found anything, log them to the console. */
1032	if (count != 0) {
1033		mtx_lock_spin(&mca_lock);
1034		STAILQ_FOREACH(mca, &mca_records, link) {
1035			if (!mca->logged) {
1036				mca->logged = 1;
1037				mca_log(&mca->rec);
1038			}
1039		}
1040		mtx_unlock_spin(&mca_lock);
1041	}
1042}
1043#endif
1044