mca.c revision 283927
1/*- 2 * Copyright (c) 2009 Hudson River Trading LLC 3 * Written by: John H. Baldwin <jhb@FreeBSD.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28/* 29 * Support for x86 machine check architecture. 30 */ 31 32#include <sys/cdefs.h> 33__FBSDID("$FreeBSD: stable/10/sys/x86/x86/mca.c 283927 2015-06-02 19:20:39Z jhb $"); 34 35#ifdef __amd64__ 36#define DEV_APIC 37#else 38#include "opt_apic.h" 39#endif 40 41#include <sys/param.h> 42#include <sys/bus.h> 43#include <sys/interrupt.h> 44#include <sys/kernel.h> 45#include <sys/lock.h> 46#include <sys/malloc.h> 47#include <sys/mutex.h> 48#include <sys/proc.h> 49#include <sys/sched.h> 50#include <sys/smp.h> 51#include <sys/sysctl.h> 52#include <sys/systm.h> 53#include <sys/taskqueue.h> 54#include <machine/intr_machdep.h> 55#include <machine/apicvar.h> 56#include <machine/cpu.h> 57#include <machine/cputypes.h> 58#include <x86/mca.h> 59#include <machine/md_var.h> 60#include <machine/specialreg.h> 61 62/* Modes for mca_scan() */ 63enum scan_mode { 64 POLLED, 65 MCE, 66 CMCI, 67}; 68 69#ifdef DEV_APIC 70/* 71 * State maintained for each monitored MCx bank to control the 72 * corrected machine check interrupt threshold. 73 */ 74struct cmc_state { 75 int max_threshold; 76 int last_intr; 77}; 78#endif 79 80struct mca_internal { 81 struct mca_record rec; 82 int logged; 83 STAILQ_ENTRY(mca_internal) link; 84}; 85 86static MALLOC_DEFINE(M_MCA, "MCA", "Machine Check Architecture"); 87 88static volatile int mca_count; /* Number of records stored. */ 89static int mca_banks; /* Number of per-CPU register banks. */ 90 91static SYSCTL_NODE(_hw, OID_AUTO, mca, CTLFLAG_RD, NULL, 92 "Machine Check Architecture"); 93 94static int mca_enabled = 1; 95TUNABLE_INT("hw.mca.enabled", &mca_enabled); 96SYSCTL_INT(_hw_mca, OID_AUTO, enabled, CTLFLAG_RDTUN, &mca_enabled, 0, 97 "Administrative toggle for machine check support"); 98 99static int amd10h_L1TP = 1; 100TUNABLE_INT("hw.mca.amd10h_L1TP", &amd10h_L1TP); 101SYSCTL_INT(_hw_mca, OID_AUTO, amd10h_L1TP, CTLFLAG_RDTUN, &amd10h_L1TP, 0, 102 "Administrative toggle for logging of level one TLB parity (L1TP) errors"); 103 104static int intel6h_HSD131; 105TUNABLE_INT("hw.mca.intel6h_hsd131", &intel6h_HSD131); 106SYSCTL_INT(_hw_mca, OID_AUTO, intel6h_HSD131, CTLFLAG_RDTUN, &intel6h_HSD131, 0, 107 "Administrative toggle for logging of spurious corrected errors"); 108 109int workaround_erratum383; 110SYSCTL_INT(_hw_mca, OID_AUTO, erratum383, CTLFLAG_RD, &workaround_erratum383, 0, 111 "Is the workaround for Erratum 383 on AMD Family 10h processors enabled?"); 112 113static STAILQ_HEAD(, mca_internal) mca_freelist; 114static int mca_freecount; 115static STAILQ_HEAD(, mca_internal) mca_records; 116static struct callout mca_timer; 117static int mca_ticks = 3600; /* Check hourly by default. */ 118static struct taskqueue *mca_tq; 119static struct task mca_refill_task, mca_scan_task; 120static struct mtx mca_lock; 121 122#ifdef DEV_APIC 123static struct cmc_state **cmc_state; /* Indexed by cpuid, bank */ 124static int cmc_throttle = 60; /* Time in seconds to throttle CMCI. */ 125#endif 126 127static int 128sysctl_positive_int(SYSCTL_HANDLER_ARGS) 129{ 130 int error, value; 131 132 value = *(int *)arg1; 133 error = sysctl_handle_int(oidp, &value, 0, req); 134 if (error || req->newptr == NULL) 135 return (error); 136 if (value <= 0) 137 return (EINVAL); 138 *(int *)arg1 = value; 139 return (0); 140} 141 142static int 143sysctl_mca_records(SYSCTL_HANDLER_ARGS) 144{ 145 int *name = (int *)arg1; 146 u_int namelen = arg2; 147 struct mca_record record; 148 struct mca_internal *rec; 149 int i; 150 151 if (namelen != 1) 152 return (EINVAL); 153 154 if (name[0] < 0 || name[0] >= mca_count) 155 return (EINVAL); 156 157 mtx_lock_spin(&mca_lock); 158 if (name[0] >= mca_count) { 159 mtx_unlock_spin(&mca_lock); 160 return (EINVAL); 161 } 162 i = 0; 163 STAILQ_FOREACH(rec, &mca_records, link) { 164 if (i == name[0]) { 165 record = rec->rec; 166 break; 167 } 168 i++; 169 } 170 mtx_unlock_spin(&mca_lock); 171 return (SYSCTL_OUT(req, &record, sizeof(record))); 172} 173 174static const char * 175mca_error_ttype(uint16_t mca_error) 176{ 177 178 switch ((mca_error & 0x000c) >> 2) { 179 case 0: 180 return ("I"); 181 case 1: 182 return ("D"); 183 case 2: 184 return ("G"); 185 } 186 return ("?"); 187} 188 189static const char * 190mca_error_level(uint16_t mca_error) 191{ 192 193 switch (mca_error & 0x0003) { 194 case 0: 195 return ("L0"); 196 case 1: 197 return ("L1"); 198 case 2: 199 return ("L2"); 200 case 3: 201 return ("LG"); 202 } 203 return ("L?"); 204} 205 206static const char * 207mca_error_request(uint16_t mca_error) 208{ 209 210 switch ((mca_error & 0x00f0) >> 4) { 211 case 0x0: 212 return ("ERR"); 213 case 0x1: 214 return ("RD"); 215 case 0x2: 216 return ("WR"); 217 case 0x3: 218 return ("DRD"); 219 case 0x4: 220 return ("DWR"); 221 case 0x5: 222 return ("IRD"); 223 case 0x6: 224 return ("PREFETCH"); 225 case 0x7: 226 return ("EVICT"); 227 case 0x8: 228 return ("SNOOP"); 229 } 230 return ("???"); 231} 232 233static const char * 234mca_error_mmtype(uint16_t mca_error) 235{ 236 237 switch ((mca_error & 0x70) >> 4) { 238 case 0x0: 239 return ("GEN"); 240 case 0x1: 241 return ("RD"); 242 case 0x2: 243 return ("WR"); 244 case 0x3: 245 return ("AC"); 246 case 0x4: 247 return ("MS"); 248 } 249 return ("???"); 250} 251 252static int __nonnull(1) 253mca_mute(const struct mca_record *rec) 254{ 255 256 /* 257 * Skip spurious corrected parity errors generated by desktop Haswell 258 * (see HSD131 erratum) unless reporting is enabled. 259 * Note that these errors also have been observed with D0-stepping, 260 * while the revision 014 desktop Haswell specification update only 261 * talks about C0-stepping. 262 */ 263 if (rec->mr_cpu_vendor_id == CPU_VENDOR_INTEL && 264 rec->mr_cpu_id == 0x306c3 && rec->mr_bank == 0 && 265 rec->mr_status == 0x90000040000f0005 && !intel6h_HSD131) 266 return (1); 267 268 return (0); 269} 270 271/* Dump details about a single machine check. */ 272static void __nonnull(1) 273mca_log(const struct mca_record *rec) 274{ 275 uint16_t mca_error; 276 277 if (mca_mute(rec)) 278 return; 279 280 printf("MCA: Bank %d, Status 0x%016llx\n", rec->mr_bank, 281 (long long)rec->mr_status); 282 printf("MCA: Global Cap 0x%016llx, Status 0x%016llx\n", 283 (long long)rec->mr_mcg_cap, (long long)rec->mr_mcg_status); 284 printf("MCA: Vendor \"%s\", ID 0x%x, APIC ID %d\n", cpu_vendor, 285 rec->mr_cpu_id, rec->mr_apic_id); 286 printf("MCA: CPU %d ", rec->mr_cpu); 287 if (rec->mr_status & MC_STATUS_UC) 288 printf("UNCOR "); 289 else { 290 printf("COR "); 291 if (rec->mr_mcg_cap & MCG_CAP_CMCI_P) 292 printf("(%lld) ", ((long long)rec->mr_status & 293 MC_STATUS_COR_COUNT) >> 38); 294 } 295 if (rec->mr_status & MC_STATUS_PCC) 296 printf("PCC "); 297 if (rec->mr_status & MC_STATUS_OVER) 298 printf("OVER "); 299 mca_error = rec->mr_status & MC_STATUS_MCA_ERROR; 300 switch (mca_error) { 301 /* Simple error codes. */ 302 case 0x0000: 303 printf("no error"); 304 break; 305 case 0x0001: 306 printf("unclassified error"); 307 break; 308 case 0x0002: 309 printf("ucode ROM parity error"); 310 break; 311 case 0x0003: 312 printf("external error"); 313 break; 314 case 0x0004: 315 printf("FRC error"); 316 break; 317 case 0x0005: 318 printf("internal parity error"); 319 break; 320 case 0x0400: 321 printf("internal timer error"); 322 break; 323 default: 324 if ((mca_error & 0xfc00) == 0x0400) { 325 printf("internal error %x", mca_error & 0x03ff); 326 break; 327 } 328 329 /* Compound error codes. */ 330 331 /* Memory hierarchy error. */ 332 if ((mca_error & 0xeffc) == 0x000c) { 333 printf("%s memory error", mca_error_level(mca_error)); 334 break; 335 } 336 337 /* TLB error. */ 338 if ((mca_error & 0xeff0) == 0x0010) { 339 printf("%sTLB %s error", mca_error_ttype(mca_error), 340 mca_error_level(mca_error)); 341 break; 342 } 343 344 /* Memory controller error. */ 345 if ((mca_error & 0xef80) == 0x0080) { 346 printf("%s channel ", mca_error_mmtype(mca_error)); 347 if ((mca_error & 0x000f) != 0x000f) 348 printf("%d", mca_error & 0x000f); 349 else 350 printf("??"); 351 printf(" memory error"); 352 break; 353 } 354 355 /* Cache error. */ 356 if ((mca_error & 0xef00) == 0x0100) { 357 printf("%sCACHE %s %s error", 358 mca_error_ttype(mca_error), 359 mca_error_level(mca_error), 360 mca_error_request(mca_error)); 361 break; 362 } 363 364 /* Bus and/or Interconnect error. */ 365 if ((mca_error & 0xe800) == 0x0800) { 366 printf("BUS%s ", mca_error_level(mca_error)); 367 switch ((mca_error & 0x0600) >> 9) { 368 case 0: 369 printf("Source"); 370 break; 371 case 1: 372 printf("Responder"); 373 break; 374 case 2: 375 printf("Observer"); 376 break; 377 default: 378 printf("???"); 379 break; 380 } 381 printf(" %s ", mca_error_request(mca_error)); 382 switch ((mca_error & 0x000c) >> 2) { 383 case 0: 384 printf("Memory"); 385 break; 386 case 2: 387 printf("I/O"); 388 break; 389 case 3: 390 printf("Other"); 391 break; 392 default: 393 printf("???"); 394 break; 395 } 396 if (mca_error & 0x0100) 397 printf(" timed out"); 398 break; 399 } 400 401 printf("unknown error %x", mca_error); 402 break; 403 } 404 printf("\n"); 405 if (rec->mr_status & MC_STATUS_ADDRV) 406 printf("MCA: Address 0x%llx\n", (long long)rec->mr_addr); 407 if (rec->mr_status & MC_STATUS_MISCV) 408 printf("MCA: Misc 0x%llx\n", (long long)rec->mr_misc); 409} 410 411static int __nonnull(2) 412mca_check_status(int bank, struct mca_record *rec) 413{ 414 uint64_t status; 415 u_int p[4]; 416 417 status = rdmsr(MSR_MC_STATUS(bank)); 418 if (!(status & MC_STATUS_VAL)) 419 return (0); 420 421 /* Save exception information. */ 422 rec->mr_status = status; 423 rec->mr_bank = bank; 424 rec->mr_addr = 0; 425 if (status & MC_STATUS_ADDRV) 426 rec->mr_addr = rdmsr(MSR_MC_ADDR(bank)); 427 rec->mr_misc = 0; 428 if (status & MC_STATUS_MISCV) 429 rec->mr_misc = rdmsr(MSR_MC_MISC(bank)); 430 rec->mr_tsc = rdtsc(); 431 rec->mr_apic_id = PCPU_GET(apic_id); 432 rec->mr_mcg_cap = rdmsr(MSR_MCG_CAP); 433 rec->mr_mcg_status = rdmsr(MSR_MCG_STATUS); 434 rec->mr_cpu_id = cpu_id; 435 rec->mr_cpu_vendor_id = cpu_vendor_id; 436 rec->mr_cpu = PCPU_GET(cpuid); 437 438 /* 439 * Clear machine check. Don't do this for uncorrectable 440 * errors so that the BIOS can see them. 441 */ 442 if (!(rec->mr_status & (MC_STATUS_PCC | MC_STATUS_UC))) { 443 wrmsr(MSR_MC_STATUS(bank), 0); 444 do_cpuid(0, p); 445 } 446 return (1); 447} 448 449static void 450mca_fill_freelist(void) 451{ 452 struct mca_internal *rec; 453 int desired; 454 455 /* 456 * Ensure we have at least one record for each bank and one 457 * record per CPU. 458 */ 459 desired = imax(mp_ncpus, mca_banks); 460 mtx_lock_spin(&mca_lock); 461 while (mca_freecount < desired) { 462 mtx_unlock_spin(&mca_lock); 463 rec = malloc(sizeof(*rec), M_MCA, M_WAITOK); 464 mtx_lock_spin(&mca_lock); 465 STAILQ_INSERT_TAIL(&mca_freelist, rec, link); 466 mca_freecount++; 467 } 468 mtx_unlock_spin(&mca_lock); 469} 470 471static void 472mca_refill(void *context, int pending) 473{ 474 475 mca_fill_freelist(); 476} 477 478static void __nonnull(2) 479mca_record_entry(enum scan_mode mode, const struct mca_record *record) 480{ 481 struct mca_internal *rec; 482 483 if (mode == POLLED) { 484 rec = malloc(sizeof(*rec), M_MCA, M_WAITOK); 485 mtx_lock_spin(&mca_lock); 486 } else { 487 mtx_lock_spin(&mca_lock); 488 rec = STAILQ_FIRST(&mca_freelist); 489 if (rec == NULL) { 490 printf("MCA: Unable to allocate space for an event.\n"); 491 mca_log(record); 492 mtx_unlock_spin(&mca_lock); 493 return; 494 } 495 STAILQ_REMOVE_HEAD(&mca_freelist, link); 496 mca_freecount--; 497 } 498 499 rec->rec = *record; 500 rec->logged = 0; 501 STAILQ_INSERT_TAIL(&mca_records, rec, link); 502 mca_count++; 503 mtx_unlock_spin(&mca_lock); 504 if (mode == CMCI) 505 taskqueue_enqueue_fast(mca_tq, &mca_refill_task); 506} 507 508#ifdef DEV_APIC 509/* 510 * Update the interrupt threshold for a CMCI. The strategy is to use 511 * a low trigger that interrupts as soon as the first event occurs. 512 * However, if a steady stream of events arrive, the threshold is 513 * increased until the interrupts are throttled to once every 514 * cmc_throttle seconds or the periodic scan. If a periodic scan 515 * finds that the threshold is too high, it is lowered. 516 */ 517static void 518cmci_update(enum scan_mode mode, int bank, int valid, struct mca_record *rec) 519{ 520 struct cmc_state *cc; 521 uint64_t ctl; 522 u_int delta; 523 int count, limit; 524 525 /* Fetch the current limit for this bank. */ 526 cc = &cmc_state[PCPU_GET(cpuid)][bank]; 527 ctl = rdmsr(MSR_MC_CTL2(bank)); 528 count = (rec->mr_status & MC_STATUS_COR_COUNT) >> 38; 529 delta = (u_int)(ticks - cc->last_intr); 530 531 /* 532 * If an interrupt was received less than cmc_throttle seconds 533 * since the previous interrupt and the count from the current 534 * event is greater than or equal to the current threshold, 535 * double the threshold up to the max. 536 */ 537 if (mode == CMCI && valid) { 538 limit = ctl & MC_CTL2_THRESHOLD; 539 if (delta < cmc_throttle && count >= limit && 540 limit < cc->max_threshold) { 541 limit = min(limit << 1, cc->max_threshold); 542 ctl &= ~MC_CTL2_THRESHOLD; 543 ctl |= limit; 544 wrmsr(MSR_MC_CTL2(bank), limit); 545 } 546 cc->last_intr = ticks; 547 return; 548 } 549 550 /* 551 * When the banks are polled, check to see if the threshold 552 * should be lowered. 553 */ 554 if (mode != POLLED) 555 return; 556 557 /* If a CMCI occured recently, do nothing for now. */ 558 if (delta < cmc_throttle) 559 return; 560 561 /* 562 * Compute a new limit based on the average rate of events per 563 * cmc_throttle seconds since the last interrupt. 564 */ 565 if (valid) { 566 count = (rec->mr_status & MC_STATUS_COR_COUNT) >> 38; 567 limit = count * cmc_throttle / delta; 568 if (limit <= 0) 569 limit = 1; 570 else if (limit > cc->max_threshold) 571 limit = cc->max_threshold; 572 } else 573 limit = 1; 574 if ((ctl & MC_CTL2_THRESHOLD) != limit) { 575 ctl &= ~MC_CTL2_THRESHOLD; 576 ctl |= limit; 577 wrmsr(MSR_MC_CTL2(bank), limit); 578 } 579} 580#endif 581 582/* 583 * This scans all the machine check banks of the current CPU to see if 584 * there are any machine checks. Any non-recoverable errors are 585 * reported immediately via mca_log(). The current thread must be 586 * pinned when this is called. The 'mode' parameter indicates if we 587 * are being called from the MC exception handler, the CMCI handler, 588 * or the periodic poller. In the MC exception case this function 589 * returns true if the system is restartable. Otherwise, it returns a 590 * count of the number of valid MC records found. 591 */ 592static int 593mca_scan(enum scan_mode mode) 594{ 595 struct mca_record rec; 596 uint64_t mcg_cap, ucmask; 597 int count, i, recoverable, valid; 598 599 count = 0; 600 recoverable = 1; 601 ucmask = MC_STATUS_UC | MC_STATUS_PCC; 602 603 /* When handling a MCE#, treat the OVER flag as non-restartable. */ 604 if (mode == MCE) 605 ucmask |= MC_STATUS_OVER; 606 mcg_cap = rdmsr(MSR_MCG_CAP); 607 for (i = 0; i < (mcg_cap & MCG_CAP_COUNT); i++) { 608#ifdef DEV_APIC 609 /* 610 * For a CMCI, only check banks this CPU is 611 * responsible for. 612 */ 613 if (mode == CMCI && !(PCPU_GET(cmci_mask) & 1 << i)) 614 continue; 615#endif 616 617 valid = mca_check_status(i, &rec); 618 if (valid) { 619 count++; 620 if (rec.mr_status & ucmask) { 621 recoverable = 0; 622 mtx_lock_spin(&mca_lock); 623 mca_log(&rec); 624 mtx_unlock_spin(&mca_lock); 625 } 626 mca_record_entry(mode, &rec); 627 } 628 629#ifdef DEV_APIC 630 /* 631 * If this is a bank this CPU monitors via CMCI, 632 * update the threshold. 633 */ 634 if (PCPU_GET(cmci_mask) & 1 << i) 635 cmci_update(mode, i, valid, &rec); 636#endif 637 } 638 if (mode == POLLED) 639 mca_fill_freelist(); 640 return (mode == MCE ? recoverable : count); 641} 642 643/* 644 * Scan the machine check banks on all CPUs by binding to each CPU in 645 * turn. If any of the CPUs contained new machine check records, log 646 * them to the console. 647 */ 648static void 649mca_scan_cpus(void *context, int pending) 650{ 651 struct mca_internal *mca; 652 struct thread *td; 653 int count, cpu; 654 655 mca_fill_freelist(); 656 td = curthread; 657 count = 0; 658 thread_lock(td); 659 CPU_FOREACH(cpu) { 660 sched_bind(td, cpu); 661 thread_unlock(td); 662 count += mca_scan(POLLED); 663 thread_lock(td); 664 sched_unbind(td); 665 } 666 thread_unlock(td); 667 if (count != 0) { 668 mtx_lock_spin(&mca_lock); 669 STAILQ_FOREACH(mca, &mca_records, link) { 670 if (!mca->logged) { 671 mca->logged = 1; 672 mca_log(&mca->rec); 673 } 674 } 675 mtx_unlock_spin(&mca_lock); 676 } 677} 678 679static void 680mca_periodic_scan(void *arg) 681{ 682 683 taskqueue_enqueue_fast(mca_tq, &mca_scan_task); 684 callout_reset(&mca_timer, mca_ticks * hz, mca_periodic_scan, NULL); 685} 686 687static int 688sysctl_mca_scan(SYSCTL_HANDLER_ARGS) 689{ 690 int error, i; 691 692 i = 0; 693 error = sysctl_handle_int(oidp, &i, 0, req); 694 if (error) 695 return (error); 696 if (i) 697 taskqueue_enqueue_fast(mca_tq, &mca_scan_task); 698 return (0); 699} 700 701static void 702mca_createtq(void *dummy) 703{ 704 if (mca_banks <= 0) 705 return; 706 707 mca_tq = taskqueue_create_fast("mca", M_WAITOK, 708 taskqueue_thread_enqueue, &mca_tq); 709 taskqueue_start_threads(&mca_tq, 1, PI_SWI(SWI_TQ), "mca taskq"); 710} 711SYSINIT(mca_createtq, SI_SUB_CONFIGURE, SI_ORDER_ANY, mca_createtq, NULL); 712 713static void 714mca_startup(void *dummy) 715{ 716 717 if (mca_banks <= 0) 718 return; 719 720 callout_reset(&mca_timer, mca_ticks * hz, mca_periodic_scan, NULL); 721} 722SYSINIT(mca_startup, SI_SUB_SMP, SI_ORDER_ANY, mca_startup, NULL); 723 724#ifdef DEV_APIC 725static void 726cmci_setup(void) 727{ 728 int i; 729 730 cmc_state = malloc((mp_maxid + 1) * sizeof(struct cmc_state *), M_MCA, 731 M_WAITOK); 732 for (i = 0; i <= mp_maxid; i++) 733 cmc_state[i] = malloc(sizeof(struct cmc_state) * mca_banks, 734 M_MCA, M_WAITOK | M_ZERO); 735 SYSCTL_ADD_PROC(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO, 736 "cmc_throttle", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 737 &cmc_throttle, 0, sysctl_positive_int, "I", 738 "Interval in seconds to throttle corrected MC interrupts"); 739} 740#endif 741 742static void 743mca_setup(uint64_t mcg_cap) 744{ 745 746 /* 747 * On AMD Family 10h processors, unless logging of level one TLB 748 * parity (L1TP) errors is disabled, enable the recommended workaround 749 * for Erratum 383. 750 */ 751 if (cpu_vendor_id == CPU_VENDOR_AMD && 752 CPUID_TO_FAMILY(cpu_id) == 0x10 && amd10h_L1TP) 753 workaround_erratum383 = 1; 754 755 mca_banks = mcg_cap & MCG_CAP_COUNT; 756 mtx_init(&mca_lock, "mca", NULL, MTX_SPIN); 757 STAILQ_INIT(&mca_records); 758 TASK_INIT(&mca_scan_task, 0, mca_scan_cpus, NULL); 759 callout_init(&mca_timer, CALLOUT_MPSAFE); 760 STAILQ_INIT(&mca_freelist); 761 TASK_INIT(&mca_refill_task, 0, mca_refill, NULL); 762 mca_fill_freelist(); 763 SYSCTL_ADD_INT(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO, 764 "count", CTLFLAG_RD, (int *)(uintptr_t)&mca_count, 0, 765 "Record count"); 766 SYSCTL_ADD_PROC(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO, 767 "interval", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, &mca_ticks, 768 0, sysctl_positive_int, "I", 769 "Periodic interval in seconds to scan for machine checks"); 770 SYSCTL_ADD_NODE(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO, 771 "records", CTLFLAG_RD, sysctl_mca_records, "Machine check records"); 772 SYSCTL_ADD_PROC(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO, 773 "force_scan", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0, 774 sysctl_mca_scan, "I", "Force an immediate scan for machine checks"); 775#ifdef DEV_APIC 776 if (mcg_cap & MCG_CAP_CMCI_P) 777 cmci_setup(); 778#endif 779} 780 781#ifdef DEV_APIC 782/* 783 * See if we should monitor CMCI for this bank. If CMCI_EN is already 784 * set in MC_CTL2, then another CPU is responsible for this bank, so 785 * ignore it. If CMCI_EN returns zero after being set, then this bank 786 * does not support CMCI_EN. If this CPU sets CMCI_EN, then it should 787 * now monitor this bank. 788 */ 789static void 790cmci_monitor(int i) 791{ 792 struct cmc_state *cc; 793 uint64_t ctl; 794 795 KASSERT(i < mca_banks, ("CPU %d has more MC banks", PCPU_GET(cpuid))); 796 797 ctl = rdmsr(MSR_MC_CTL2(i)); 798 if (ctl & MC_CTL2_CMCI_EN) 799 /* Already monitored by another CPU. */ 800 return; 801 802 /* Set the threshold to one event for now. */ 803 ctl &= ~MC_CTL2_THRESHOLD; 804 ctl |= MC_CTL2_CMCI_EN | 1; 805 wrmsr(MSR_MC_CTL2(i), ctl); 806 ctl = rdmsr(MSR_MC_CTL2(i)); 807 if (!(ctl & MC_CTL2_CMCI_EN)) 808 /* This bank does not support CMCI. */ 809 return; 810 811 cc = &cmc_state[PCPU_GET(cpuid)][i]; 812 813 /* Determine maximum threshold. */ 814 ctl &= ~MC_CTL2_THRESHOLD; 815 ctl |= 0x7fff; 816 wrmsr(MSR_MC_CTL2(i), ctl); 817 ctl = rdmsr(MSR_MC_CTL2(i)); 818 cc->max_threshold = ctl & MC_CTL2_THRESHOLD; 819 820 /* Start off with a threshold of 1. */ 821 ctl &= ~MC_CTL2_THRESHOLD; 822 ctl |= 1; 823 wrmsr(MSR_MC_CTL2(i), ctl); 824 825 /* Mark this bank as monitored. */ 826 PCPU_SET(cmci_mask, PCPU_GET(cmci_mask) | 1 << i); 827} 828 829/* 830 * For resume, reset the threshold for any banks we monitor back to 831 * one and throw away the timestamp of the last interrupt. 832 */ 833static void 834cmci_resume(int i) 835{ 836 struct cmc_state *cc; 837 uint64_t ctl; 838 839 KASSERT(i < mca_banks, ("CPU %d has more MC banks", PCPU_GET(cpuid))); 840 841 /* Ignore banks not monitored by this CPU. */ 842 if (!(PCPU_GET(cmci_mask) & 1 << i)) 843 return; 844 845 cc = &cmc_state[PCPU_GET(cpuid)][i]; 846 cc->last_intr = -ticks; 847 ctl = rdmsr(MSR_MC_CTL2(i)); 848 ctl &= ~MC_CTL2_THRESHOLD; 849 ctl |= MC_CTL2_CMCI_EN | 1; 850 wrmsr(MSR_MC_CTL2(i), ctl); 851} 852#endif 853 854/* 855 * Initializes per-CPU machine check registers and enables corrected 856 * machine check interrupts. 857 */ 858static void 859_mca_init(int boot) 860{ 861 uint64_t mcg_cap; 862 uint64_t ctl, mask; 863 int i, skip; 864 865 /* MCE is required. */ 866 if (!mca_enabled || !(cpu_feature & CPUID_MCE)) 867 return; 868 869 if (cpu_feature & CPUID_MCA) { 870 if (boot) 871 PCPU_SET(cmci_mask, 0); 872 873 mcg_cap = rdmsr(MSR_MCG_CAP); 874 if (mcg_cap & MCG_CAP_CTL_P) 875 /* Enable MCA features. */ 876 wrmsr(MSR_MCG_CTL, MCG_CTL_ENABLE); 877 if (PCPU_GET(cpuid) == 0 && boot) 878 mca_setup(mcg_cap); 879 880 /* 881 * Disable logging of level one TLB parity (L1TP) errors by 882 * the data cache as an alternative workaround for AMD Family 883 * 10h Erratum 383. Unlike the recommended workaround, there 884 * is no performance penalty to this workaround. However, 885 * L1TP errors will go unreported. 886 */ 887 if (cpu_vendor_id == CPU_VENDOR_AMD && 888 CPUID_TO_FAMILY(cpu_id) == 0x10 && !amd10h_L1TP) { 889 mask = rdmsr(MSR_MC0_CTL_MASK); 890 if ((mask & (1UL << 5)) == 0) 891 wrmsr(MSR_MC0_CTL_MASK, mask | (1UL << 5)); 892 } 893 for (i = 0; i < (mcg_cap & MCG_CAP_COUNT); i++) { 894 /* By default enable logging of all errors. */ 895 ctl = 0xffffffffffffffffUL; 896 skip = 0; 897 898 if (cpu_vendor_id == CPU_VENDOR_INTEL) { 899 /* 900 * For P6 models before Nehalem MC0_CTL is 901 * always enabled and reserved. 902 */ 903 if (i == 0 && CPUID_TO_FAMILY(cpu_id) == 0x6 904 && CPUID_TO_MODEL(cpu_id) < 0x1a) 905 skip = 1; 906 } else if (cpu_vendor_id == CPU_VENDOR_AMD) { 907 /* BKDG for Family 10h: unset GartTblWkEn. */ 908 if (i == 4 && CPUID_TO_FAMILY(cpu_id) >= 0xf) 909 ctl &= ~(1UL << 10); 910 } 911 912 if (!skip) 913 wrmsr(MSR_MC_CTL(i), ctl); 914 915#ifdef DEV_APIC 916 if (mcg_cap & MCG_CAP_CMCI_P) { 917 if (boot) 918 cmci_monitor(i); 919 else 920 cmci_resume(i); 921 } 922#endif 923 924 /* Clear all errors. */ 925 wrmsr(MSR_MC_STATUS(i), 0); 926 } 927 928#ifdef DEV_APIC 929 if (PCPU_GET(cmci_mask) != 0 && boot) 930 lapic_enable_cmc(); 931#endif 932 } 933 934 load_cr4(rcr4() | CR4_MCE); 935} 936 937/* Must be executed on each CPU during boot. */ 938void 939mca_init(void) 940{ 941 942 _mca_init(1); 943} 944 945/* Must be executed on each CPU during resume. */ 946void 947mca_resume(void) 948{ 949 950 _mca_init(0); 951} 952 953/* 954 * The machine check registers for the BSP cannot be initialized until 955 * the local APIC is initialized. This happens at SI_SUB_CPU, 956 * SI_ORDER_SECOND. 957 */ 958static void 959mca_init_bsp(void *arg __unused) 960{ 961 962 mca_init(); 963} 964SYSINIT(mca_init_bsp, SI_SUB_CPU, SI_ORDER_ANY, mca_init_bsp, NULL); 965 966/* Called when a machine check exception fires. */ 967void 968mca_intr(void) 969{ 970 uint64_t mcg_status; 971 int old_count, recoverable; 972 973 if (!(cpu_feature & CPUID_MCA)) { 974 /* 975 * Just print the values of the old Pentium registers 976 * and panic. 977 */ 978 printf("MC Type: 0x%jx Address: 0x%jx\n", 979 (uintmax_t)rdmsr(MSR_P5_MC_TYPE), 980 (uintmax_t)rdmsr(MSR_P5_MC_ADDR)); 981 panic("Machine check"); 982 } 983 984 /* Scan the banks and check for any non-recoverable errors. */ 985 old_count = mca_count; 986 recoverable = mca_scan(MCE); 987 mcg_status = rdmsr(MSR_MCG_STATUS); 988 if (!(mcg_status & MCG_STATUS_RIPV)) 989 recoverable = 0; 990 991 if (!recoverable) { 992 /* 993 * Wait for at least one error to be logged before 994 * panic'ing. Some errors will assert a machine check 995 * on all CPUs, but only certain CPUs will find a valid 996 * bank to log. 997 */ 998 while (mca_count == old_count) 999 cpu_spinwait(); 1000 1001 panic("Unrecoverable machine check exception"); 1002 } 1003 1004 /* Clear MCIP. */ 1005 wrmsr(MSR_MCG_STATUS, mcg_status & ~MCG_STATUS_MCIP); 1006} 1007 1008#ifdef DEV_APIC 1009/* Called for a CMCI (correctable machine check interrupt). */ 1010void 1011cmc_intr(void) 1012{ 1013 struct mca_internal *mca; 1014 int count; 1015 1016 /* 1017 * Serialize MCA bank scanning to prevent collisions from 1018 * sibling threads. 1019 */ 1020 count = mca_scan(CMCI); 1021 1022 /* If we found anything, log them to the console. */ 1023 if (count != 0) { 1024 mtx_lock_spin(&mca_lock); 1025 STAILQ_FOREACH(mca, &mca_records, link) { 1026 if (!mca->logged) { 1027 mca->logged = 1; 1028 mca_log(&mca->rec); 1029 } 1030 } 1031 mtx_unlock_spin(&mca_lock); 1032 } 1033} 1034#endif 1035