machdep.c revision 265952
1/*- 2 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 3 * Copyright (C) 1995, 1996 TooLs GmbH. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by TooLs GmbH. 17 * 4. The name of TooLs GmbH may not be used to endorse or promote products 18 * derived from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 23 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 */ 31/*- 32 * Copyright (C) 2001 Benno Rice 33 * All rights reserved. 34 * 35 * Redistribution and use in source and binary forms, with or without 36 * modification, are permitted provided that the following conditions 37 * are met: 38 * 1. Redistributions of source code must retain the above copyright 39 * notice, this list of conditions and the following disclaimer. 40 * 2. Redistributions in binary form must reproduce the above copyright 41 * notice, this list of conditions and the following disclaimer in the 42 * documentation and/or other materials provided with the distribution. 43 * 44 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 45 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 46 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 47 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 49 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 50 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 51 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 52 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 53 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 54 * $NetBSD: machdep.c,v 1.74.2.1 2000/11/01 16:13:48 tv Exp $ 55 */ 56 57#include <sys/cdefs.h> 58__FBSDID("$FreeBSD: stable/10/sys/powerpc/aim/machdep.c 265952 2014-05-13 16:50:10Z ian $"); 59 60#include "opt_compat.h" 61#include "opt_ddb.h" 62#include "opt_kstack_pages.h" 63#include "opt_platform.h" 64 65#include <sys/param.h> 66#include <sys/proc.h> 67#include <sys/systm.h> 68#include <sys/bio.h> 69#include <sys/buf.h> 70#include <sys/bus.h> 71#include <sys/cons.h> 72#include <sys/cpu.h> 73#include <sys/eventhandler.h> 74#include <sys/exec.h> 75#include <sys/imgact.h> 76#include <sys/kdb.h> 77#include <sys/kernel.h> 78#include <sys/ktr.h> 79#include <sys/linker.h> 80#include <sys/lock.h> 81#include <sys/malloc.h> 82#include <sys/mbuf.h> 83#include <sys/msgbuf.h> 84#include <sys/mutex.h> 85#include <sys/ptrace.h> 86#include <sys/reboot.h> 87#include <sys/rwlock.h> 88#include <sys/signalvar.h> 89#include <sys/syscallsubr.h> 90#include <sys/sysctl.h> 91#include <sys/sysent.h> 92#include <sys/sysproto.h> 93#include <sys/ucontext.h> 94#include <sys/uio.h> 95#include <sys/vmmeter.h> 96#include <sys/vnode.h> 97 98#include <net/netisr.h> 99 100#include <vm/vm.h> 101#include <vm/vm_extern.h> 102#include <vm/vm_kern.h> 103#include <vm/vm_page.h> 104#include <vm/vm_map.h> 105#include <vm/vm_object.h> 106#include <vm/vm_pager.h> 107 108#include <machine/altivec.h> 109#ifndef __powerpc64__ 110#include <machine/bat.h> 111#endif 112#include <machine/cpu.h> 113#include <machine/elf.h> 114#include <machine/fpu.h> 115#include <machine/hid.h> 116#include <machine/kdb.h> 117#include <machine/md_var.h> 118#include <machine/metadata.h> 119#include <machine/mmuvar.h> 120#include <machine/pcb.h> 121#include <machine/reg.h> 122#include <machine/sigframe.h> 123#include <machine/spr.h> 124#include <machine/trap.h> 125#include <machine/vmparam.h> 126#include <machine/ofw_machdep.h> 127 128#include <ddb/ddb.h> 129 130#include <dev/ofw/openfirm.h> 131 132#ifdef DDB 133extern vm_offset_t ksym_start, ksym_end; 134#endif 135 136int cold = 1; 137#ifdef __powerpc64__ 138extern int n_slbs; 139int cacheline_size = 128; 140#else 141int cacheline_size = 32; 142#endif 143int hw_direct_map = 1; 144 145extern void *ap_pcpu; 146 147struct pcpu __pcpu[MAXCPU]; 148 149static struct trapframe frame0; 150 151char machine[] = "powerpc"; 152SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0, ""); 153 154static void cpu_startup(void *); 155SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL); 156 157SYSCTL_INT(_machdep, CPU_CACHELINE, cacheline_size, 158 CTLFLAG_RD, &cacheline_size, 0, ""); 159 160uintptr_t powerpc_init(vm_offset_t, vm_offset_t, vm_offset_t, void *); 161 162int setfault(faultbuf); /* defined in locore.S */ 163 164long Maxmem = 0; 165long realmem = 0; 166 167#ifndef __powerpc64__ 168struct bat battable[16]; 169#endif 170 171struct kva_md_info kmi; 172 173static void 174cpu_startup(void *dummy) 175{ 176 177 /* 178 * Initialise the decrementer-based clock. 179 */ 180 decr_init(); 181 182 /* 183 * Good {morning,afternoon,evening,night}. 184 */ 185 cpu_setup(PCPU_GET(cpuid)); 186 187#ifdef PERFMON 188 perfmon_init(); 189#endif 190 printf("real memory = %ld (%ld MB)\n", ptoa(physmem), 191 ptoa(physmem) / 1048576); 192 realmem = physmem; 193 194 if (bootverbose) 195 printf("available KVA = %zd (%zd MB)\n", 196 virtual_end - virtual_avail, 197 (virtual_end - virtual_avail) / 1048576); 198 199 /* 200 * Display any holes after the first chunk of extended memory. 201 */ 202 if (bootverbose) { 203 int indx; 204 205 printf("Physical memory chunk(s):\n"); 206 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) { 207 vm_offset_t size1 = 208 phys_avail[indx + 1] - phys_avail[indx]; 209 210 #ifdef __powerpc64__ 211 printf("0x%016lx - 0x%016lx, %ld bytes (%ld pages)\n", 212 #else 213 printf("0x%08x - 0x%08x, %d bytes (%ld pages)\n", 214 #endif 215 phys_avail[indx], phys_avail[indx + 1] - 1, size1, 216 size1 / PAGE_SIZE); 217 } 218 } 219 220 vm_ksubmap_init(&kmi); 221 222 printf("avail memory = %ld (%ld MB)\n", ptoa(cnt.v_free_count), 223 ptoa(cnt.v_free_count) / 1048576); 224 225 /* 226 * Set up buffers, so they can be used to read disk labels. 227 */ 228 bufinit(); 229 vm_pager_bufferinit(); 230} 231 232extern char kernel_text[], _end[]; 233 234#ifndef __powerpc64__ 235/* Bits for running on 64-bit systems in 32-bit mode. */ 236extern void *testppc64, *testppc64size; 237extern void *restorebridge, *restorebridgesize; 238extern void *rfid_patch, *rfi_patch1, *rfi_patch2; 239extern void *trapcode64; 240#endif 241 242extern void *rstcode, *rstsize; 243extern void *trapcode, *trapsize; 244extern void *slbtrap, *slbtrapsize; 245extern void *alitrap, *alisize; 246extern void *dsitrap, *dsisize; 247extern void *decrint, *decrsize; 248extern void *extint, *extsize; 249extern void *dblow, *dbsize; 250extern void *imisstrap, *imisssize; 251extern void *dlmisstrap, *dlmisssize; 252extern void *dsmisstrap, *dsmisssize; 253char save_trap_init[0x2f00]; /* EXC_LAST */ 254 255uintptr_t 256powerpc_init(vm_offset_t startkernel, vm_offset_t endkernel, 257 vm_offset_t basekernel, void *mdp) 258{ 259 struct pcpu *pc; 260 void *generictrap; 261 size_t trap_offset; 262 void *kmdp; 263 char *env; 264 register_t msr, scratch; 265#ifdef WII 266 register_t vers; 267#endif 268 uint8_t *cache_check; 269 int cacheline_warn; 270 #ifndef __powerpc64__ 271 int ppc64; 272 #endif 273 274 kmdp = NULL; 275 trap_offset = 0; 276 cacheline_warn = 0; 277 278 /* Save trap vectors. */ 279 ofw_save_trap_vec(save_trap_init); 280 281#ifdef WII 282 /* 283 * The Wii loader doesn't pass us any environment so, mdp 284 * points to garbage at this point. The Wii CPU is a 750CL. 285 */ 286 vers = mfpvr(); 287 if ((vers & 0xfffff0e0) == (MPC750 << 16 | MPC750CL)) 288 mdp = NULL; 289#endif 290 291 /* 292 * Parse metadata if present and fetch parameters. Must be done 293 * before console is inited so cninit gets the right value of 294 * boothowto. 295 */ 296 if (mdp != NULL) { 297 preload_metadata = mdp; 298 kmdp = preload_search_by_type("elf kernel"); 299 if (kmdp != NULL) { 300 boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int); 301 kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *); 302 endkernel = ulmax(endkernel, MD_FETCH(kmdp, 303 MODINFOMD_KERNEND, vm_offset_t)); 304#ifdef DDB 305 ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t); 306 ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t); 307#endif 308 } 309 } 310 311 /* 312 * Init params/tunables that can be overridden by the loader 313 */ 314 init_param1(); 315 316 /* 317 * Start initializing proc0 and thread0. 318 */ 319 proc_linkup0(&proc0, &thread0); 320 thread0.td_frame = &frame0; 321 322 /* 323 * Set up per-cpu data. 324 */ 325 pc = __pcpu; 326 pcpu_init(pc, 0, sizeof(struct pcpu)); 327 pc->pc_curthread = &thread0; 328#ifdef __powerpc64__ 329 __asm __volatile("mr 13,%0" :: "r"(pc->pc_curthread)); 330#else 331 __asm __volatile("mr 2,%0" :: "r"(pc->pc_curthread)); 332#endif 333 pc->pc_cpuid = 0; 334 335 __asm __volatile("mtsprg 0, %0" :: "r"(pc)); 336 337 /* 338 * Init mutexes, which we use heavily in PMAP 339 */ 340 341 mutex_init(); 342 343 /* 344 * Install the OF client interface 345 */ 346 347 OF_bootstrap(); 348 349 /* 350 * Initialize the console before printing anything. 351 */ 352 cninit(); 353 354 /* 355 * Complain if there is no metadata. 356 */ 357 if (mdp == NULL || kmdp == NULL) { 358 printf("powerpc_init: no loader metadata.\n"); 359 } 360 361 /* 362 * Init KDB 363 */ 364 365 kdb_init(); 366 367 /* Various very early CPU fix ups */ 368 switch (mfpvr() >> 16) { 369 /* 370 * PowerPC 970 CPUs have a misfeature requested by Apple that 371 * makes them pretend they have a 32-byte cacheline. Turn this 372 * off before we measure the cacheline size. 373 */ 374 case IBM970: 375 case IBM970FX: 376 case IBM970MP: 377 case IBM970GX: 378 scratch = mfspr(SPR_HID5); 379 scratch &= ~HID5_970_DCBZ_SIZE_HI; 380 mtspr(SPR_HID5, scratch); 381 break; 382 #ifdef __powerpc64__ 383 case IBMPOWER7: 384 /* XXX: get from ibm,slb-size in device tree */ 385 n_slbs = 32; 386 break; 387 #endif 388 } 389 390 /* 391 * Initialize the interrupt tables and figure out our cache line 392 * size and whether or not we need the 64-bit bridge code. 393 */ 394 395 /* 396 * Disable translation in case the vector area hasn't been 397 * mapped (G5). Note that no OFW calls can be made until 398 * translation is re-enabled. 399 */ 400 401 msr = mfmsr(); 402 mtmsr((msr & ~(PSL_IR | PSL_DR)) | PSL_RI); 403 404 /* 405 * Measure the cacheline size using dcbz 406 * 407 * Use EXC_PGM as a playground. We are about to overwrite it 408 * anyway, we know it exists, and we know it is cache-aligned. 409 */ 410 411 cache_check = (void *)EXC_PGM; 412 413 for (cacheline_size = 0; cacheline_size < 0x100; cacheline_size++) 414 cache_check[cacheline_size] = 0xff; 415 416 __asm __volatile("dcbz 0,%0":: "r" (cache_check) : "memory"); 417 418 /* Find the first byte dcbz did not zero to get the cache line size */ 419 for (cacheline_size = 0; cacheline_size < 0x100 && 420 cache_check[cacheline_size] == 0; cacheline_size++); 421 422 /* Work around psim bug */ 423 if (cacheline_size == 0) { 424 cacheline_warn = 1; 425 cacheline_size = 32; 426 } 427 428 /* Make sure the kernel icache is valid before we go too much further */ 429 __syncicache((caddr_t)startkernel, endkernel - startkernel); 430 431 #ifndef __powerpc64__ 432 /* 433 * Figure out whether we need to use the 64 bit PMAP. This works by 434 * executing an instruction that is only legal on 64-bit PPC (mtmsrd), 435 * and setting ppc64 = 0 if that causes a trap. 436 */ 437 438 ppc64 = 1; 439 440 bcopy(&testppc64, (void *)EXC_PGM, (size_t)&testppc64size); 441 __syncicache((void *)EXC_PGM, (size_t)&testppc64size); 442 443 __asm __volatile("\ 444 mfmsr %0; \ 445 mtsprg2 %1; \ 446 \ 447 mtmsrd %0; \ 448 mfsprg2 %1;" 449 : "=r"(scratch), "=r"(ppc64)); 450 451 if (ppc64) 452 cpu_features |= PPC_FEATURE_64; 453 454 /* 455 * Now copy restorebridge into all the handlers, if necessary, 456 * and set up the trap tables. 457 */ 458 459 if (cpu_features & PPC_FEATURE_64) { 460 /* Patch the two instances of rfi -> rfid */ 461 bcopy(&rfid_patch,&rfi_patch1,4); 462 #ifdef KDB 463 /* rfi_patch2 is at the end of dbleave */ 464 bcopy(&rfid_patch,&rfi_patch2,4); 465 #endif 466 467 /* 468 * Copy a code snippet to restore 32-bit bridge mode 469 * to the top of every non-generic trap handler 470 */ 471 472 trap_offset += (size_t)&restorebridgesize; 473 bcopy(&restorebridge, (void *)EXC_RST, trap_offset); 474 bcopy(&restorebridge, (void *)EXC_DSI, trap_offset); 475 bcopy(&restorebridge, (void *)EXC_ALI, trap_offset); 476 bcopy(&restorebridge, (void *)EXC_PGM, trap_offset); 477 bcopy(&restorebridge, (void *)EXC_MCHK, trap_offset); 478 bcopy(&restorebridge, (void *)EXC_TRC, trap_offset); 479 bcopy(&restorebridge, (void *)EXC_BPT, trap_offset); 480 481 /* 482 * Set the common trap entry point to the one that 483 * knows to restore 32-bit operation on execution. 484 */ 485 486 generictrap = &trapcode64; 487 } else { 488 generictrap = &trapcode; 489 } 490 491 #else /* powerpc64 */ 492 cpu_features |= PPC_FEATURE_64; 493 generictrap = &trapcode; 494 #endif 495 496 bcopy(&rstcode, (void *)(EXC_RST + trap_offset), (size_t)&rstsize); 497 498#ifdef KDB 499 bcopy(&dblow, (void *)(EXC_MCHK + trap_offset), (size_t)&dbsize); 500 bcopy(&dblow, (void *)(EXC_PGM + trap_offset), (size_t)&dbsize); 501 bcopy(&dblow, (void *)(EXC_TRC + trap_offset), (size_t)&dbsize); 502 bcopy(&dblow, (void *)(EXC_BPT + trap_offset), (size_t)&dbsize); 503#else 504 bcopy(generictrap, (void *)EXC_MCHK, (size_t)&trapsize); 505 bcopy(generictrap, (void *)EXC_PGM, (size_t)&trapsize); 506 bcopy(generictrap, (void *)EXC_TRC, (size_t)&trapsize); 507 bcopy(generictrap, (void *)EXC_BPT, (size_t)&trapsize); 508#endif 509 bcopy(&alitrap, (void *)(EXC_ALI + trap_offset), (size_t)&alisize); 510 bcopy(&dsitrap, (void *)(EXC_DSI + trap_offset), (size_t)&dsisize); 511 bcopy(generictrap, (void *)EXC_ISI, (size_t)&trapsize); 512 #ifdef __powerpc64__ 513 bcopy(&slbtrap, (void *)EXC_DSE, (size_t)&slbtrapsize); 514 bcopy(&slbtrap, (void *)EXC_ISE, (size_t)&slbtrapsize); 515 #endif 516 bcopy(generictrap, (void *)EXC_EXI, (size_t)&trapsize); 517 bcopy(generictrap, (void *)EXC_FPU, (size_t)&trapsize); 518 bcopy(generictrap, (void *)EXC_DECR, (size_t)&trapsize); 519 bcopy(generictrap, (void *)EXC_SC, (size_t)&trapsize); 520 bcopy(generictrap, (void *)EXC_FPA, (size_t)&trapsize); 521 bcopy(generictrap, (void *)EXC_VEC, (size_t)&trapsize); 522 bcopy(generictrap, (void *)EXC_PERF, (size_t)&trapsize); 523 bcopy(generictrap, (void *)EXC_VECAST_G4, (size_t)&trapsize); 524 bcopy(generictrap, (void *)EXC_VECAST_G5, (size_t)&trapsize); 525 #ifndef __powerpc64__ 526 /* G2-specific TLB miss helper handlers */ 527 bcopy(&imisstrap, (void *)EXC_IMISS, (size_t)&imisssize); 528 bcopy(&dlmisstrap, (void *)EXC_DLMISS, (size_t)&dlmisssize); 529 bcopy(&dsmisstrap, (void *)EXC_DSMISS, (size_t)&dsmisssize); 530 #endif 531 __syncicache(EXC_RSVD, EXC_LAST - EXC_RSVD); 532 533 /* 534 * Restore MSR 535 */ 536 mtmsr(msr); 537 538 /* Warn if cachline size was not determined */ 539 if (cacheline_warn == 1) { 540 printf("WARNING: cacheline size undetermined, setting to 32\n"); 541 } 542 543 /* 544 * Choose a platform module so we can get the physical memory map. 545 */ 546 547 platform_probe_and_attach(); 548 549 /* 550 * Initialise virtual memory. Use BUS_PROBE_GENERIC priority 551 * in case the platform module had a better idea of what we 552 * should do. 553 */ 554 if (cpu_features & PPC_FEATURE_64) 555 pmap_mmu_install(MMU_TYPE_G5, BUS_PROBE_GENERIC); 556 else 557 pmap_mmu_install(MMU_TYPE_OEA, BUS_PROBE_GENERIC); 558 559 pmap_bootstrap(startkernel, endkernel); 560 mtmsr(PSL_KERNSET & ~PSL_EE); 561 562 /* 563 * Initialize params/tunables that are derived from memsize 564 */ 565 init_param2(physmem); 566 567 /* 568 * Grab booted kernel's name 569 */ 570 env = getenv("kernelname"); 571 if (env != NULL) { 572 strlcpy(kernelname, env, sizeof(kernelname)); 573 freeenv(env); 574 } 575 576 /* 577 * Finish setting up thread0. 578 */ 579 thread0.td_pcb = (struct pcb *) 580 ((thread0.td_kstack + thread0.td_kstack_pages * PAGE_SIZE - 581 sizeof(struct pcb)) & ~15UL); 582 bzero((void *)thread0.td_pcb, sizeof(struct pcb)); 583 pc->pc_curpcb = thread0.td_pcb; 584 585 /* Initialise the message buffer. */ 586 msgbufinit(msgbufp, msgbufsize); 587 588#ifdef KDB 589 if (boothowto & RB_KDB) 590 kdb_enter(KDB_WHY_BOOTFLAGS, 591 "Boot flags requested debugger"); 592#endif 593 594 return (((uintptr_t)thread0.td_pcb - 595 (sizeof(struct callframe) - 3*sizeof(register_t))) & ~15UL); 596} 597 598void 599bzero(void *buf, size_t len) 600{ 601 caddr_t p; 602 603 p = buf; 604 605 while (((vm_offset_t) p & (sizeof(u_long) - 1)) && len) { 606 *p++ = 0; 607 len--; 608 } 609 610 while (len >= sizeof(u_long) * 8) { 611 *(u_long*) p = 0; 612 *((u_long*) p + 1) = 0; 613 *((u_long*) p + 2) = 0; 614 *((u_long*) p + 3) = 0; 615 len -= sizeof(u_long) * 8; 616 *((u_long*) p + 4) = 0; 617 *((u_long*) p + 5) = 0; 618 *((u_long*) p + 6) = 0; 619 *((u_long*) p + 7) = 0; 620 p += sizeof(u_long) * 8; 621 } 622 623 while (len >= sizeof(u_long)) { 624 *(u_long*) p = 0; 625 len -= sizeof(u_long); 626 p += sizeof(u_long); 627 } 628 629 while (len) { 630 *p++ = 0; 631 len--; 632 } 633} 634 635void 636cpu_boot(int howto) 637{ 638} 639 640/* 641 * Flush the D-cache for non-DMA I/O so that the I-cache can 642 * be made coherent later. 643 */ 644void 645cpu_flush_dcache(void *ptr, size_t len) 646{ 647 /* TBD */ 648} 649 650/* 651 * Shutdown the CPU as much as possible. 652 */ 653void 654cpu_halt(void) 655{ 656 657 OF_exit(); 658} 659 660int 661ptrace_set_pc(struct thread *td, unsigned long addr) 662{ 663 struct trapframe *tf; 664 665 tf = td->td_frame; 666 tf->srr0 = (register_t)addr; 667 668 return (0); 669} 670 671int 672ptrace_single_step(struct thread *td) 673{ 674 struct trapframe *tf; 675 676 tf = td->td_frame; 677 tf->srr1 |= PSL_SE; 678 679 return (0); 680} 681 682int 683ptrace_clear_single_step(struct thread *td) 684{ 685 struct trapframe *tf; 686 687 tf = td->td_frame; 688 tf->srr1 &= ~PSL_SE; 689 690 return (0); 691} 692 693void 694kdb_cpu_clear_singlestep(void) 695{ 696 697 kdb_frame->srr1 &= ~PSL_SE; 698} 699 700void 701kdb_cpu_set_singlestep(void) 702{ 703 704 kdb_frame->srr1 |= PSL_SE; 705} 706 707/* 708 * Initialise a struct pcpu. 709 */ 710void 711cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t sz) 712{ 713#ifdef __powerpc64__ 714/* Copy the SLB contents from the current CPU */ 715memcpy(pcpu->pc_slb, PCPU_GET(slb), sizeof(pcpu->pc_slb)); 716#endif 717} 718 719void 720spinlock_enter(void) 721{ 722 struct thread *td; 723 register_t msr; 724 725 td = curthread; 726 if (td->td_md.md_spinlock_count == 0) { 727 msr = intr_disable(); 728 td->td_md.md_spinlock_count = 1; 729 td->td_md.md_saved_msr = msr; 730 } else 731 td->td_md.md_spinlock_count++; 732 critical_enter(); 733} 734 735void 736spinlock_exit(void) 737{ 738 struct thread *td; 739 register_t msr; 740 741 td = curthread; 742 critical_exit(); 743 msr = td->td_md.md_saved_msr; 744 td->td_md.md_spinlock_count--; 745 if (td->td_md.md_spinlock_count == 0) 746 intr_restore(msr); 747} 748 749int db_trap_glue(struct trapframe *); /* Called from trap_subr.S */ 750 751int 752db_trap_glue(struct trapframe *frame) 753{ 754 if (!(frame->srr1 & PSL_PR) 755 && (frame->exc == EXC_TRC || frame->exc == EXC_RUNMODETRC 756 || (frame->exc == EXC_PGM 757 && (frame->srr1 & 0x20000)) 758 || frame->exc == EXC_BPT 759 || frame->exc == EXC_DSI)) { 760 int type = frame->exc; 761 if (type == EXC_PGM && (frame->srr1 & 0x20000)) { 762 type = T_BREAKPOINT; 763 } 764 return (kdb_trap(type, 0, frame)); 765 } 766 767 return (0); 768} 769 770#ifndef __powerpc64__ 771 772uint64_t 773va_to_vsid(pmap_t pm, vm_offset_t va) 774{ 775 return ((pm->pm_sr[(uintptr_t)va >> ADDR_SR_SHFT]) & SR_VSID_MASK); 776} 777 778#endif 779 780/* From p3-53 of the MPC7450 RISC Microprocessor Family Reference Manual */ 781void 782flush_disable_caches(void) 783{ 784 register_t msr; 785 register_t msscr0; 786 register_t cache_reg; 787 volatile uint32_t *memp; 788 uint32_t temp; 789 int i; 790 int x; 791 792 msr = mfmsr(); 793 powerpc_sync(); 794 mtmsr(msr & ~(PSL_EE | PSL_DR)); 795 msscr0 = mfspr(SPR_MSSCR0); 796 msscr0 &= ~MSSCR0_L2PFE; 797 mtspr(SPR_MSSCR0, msscr0); 798 powerpc_sync(); 799 isync(); 800 __asm__ __volatile__("dssall; sync"); 801 powerpc_sync(); 802 isync(); 803 __asm__ __volatile__("dcbf 0,%0" :: "r"(0)); 804 __asm__ __volatile__("dcbf 0,%0" :: "r"(0)); 805 __asm__ __volatile__("dcbf 0,%0" :: "r"(0)); 806 807 /* Lock the L1 Data cache. */ 808 mtspr(SPR_LDSTCR, mfspr(SPR_LDSTCR) | 0xFF); 809 powerpc_sync(); 810 isync(); 811 812 mtspr(SPR_LDSTCR, 0); 813 814 /* 815 * Perform this in two stages: Flush the cache starting in RAM, then do it 816 * from ROM. 817 */ 818 memp = (volatile uint32_t *)0x00000000; 819 for (i = 0; i < 128 * 1024; i++) { 820 temp = *memp; 821 __asm__ __volatile__("dcbf 0,%0" :: "r"(memp)); 822 memp += 32/sizeof(*memp); 823 } 824 825 memp = (volatile uint32_t *)0xfff00000; 826 x = 0xfe; 827 828 for (; x != 0xff;) { 829 mtspr(SPR_LDSTCR, x); 830 for (i = 0; i < 128; i++) { 831 temp = *memp; 832 __asm__ __volatile__("dcbf 0,%0" :: "r"(memp)); 833 memp += 32/sizeof(*memp); 834 } 835 x = ((x << 1) | 1) & 0xff; 836 } 837 mtspr(SPR_LDSTCR, 0); 838 839 cache_reg = mfspr(SPR_L2CR); 840 if (cache_reg & L2CR_L2E) { 841 cache_reg &= ~(L2CR_L2IO_7450 | L2CR_L2DO_7450); 842 mtspr(SPR_L2CR, cache_reg); 843 powerpc_sync(); 844 mtspr(SPR_L2CR, cache_reg | L2CR_L2HWF); 845 while (mfspr(SPR_L2CR) & L2CR_L2HWF) 846 ; /* Busy wait for cache to flush */ 847 powerpc_sync(); 848 cache_reg &= ~L2CR_L2E; 849 mtspr(SPR_L2CR, cache_reg); 850 powerpc_sync(); 851 mtspr(SPR_L2CR, cache_reg | L2CR_L2I); 852 powerpc_sync(); 853 while (mfspr(SPR_L2CR) & L2CR_L2I) 854 ; /* Busy wait for L2 cache invalidate */ 855 powerpc_sync(); 856 } 857 858 cache_reg = mfspr(SPR_L3CR); 859 if (cache_reg & L3CR_L3E) { 860 cache_reg &= ~(L3CR_L3IO | L3CR_L3DO); 861 mtspr(SPR_L3CR, cache_reg); 862 powerpc_sync(); 863 mtspr(SPR_L3CR, cache_reg | L3CR_L3HWF); 864 while (mfspr(SPR_L3CR) & L3CR_L3HWF) 865 ; /* Busy wait for cache to flush */ 866 powerpc_sync(); 867 cache_reg &= ~L3CR_L3E; 868 mtspr(SPR_L3CR, cache_reg); 869 powerpc_sync(); 870 mtspr(SPR_L3CR, cache_reg | L3CR_L3I); 871 powerpc_sync(); 872 while (mfspr(SPR_L3CR) & L3CR_L3I) 873 ; /* Busy wait for L3 cache invalidate */ 874 powerpc_sync(); 875 } 876 877 mtspr(SPR_HID0, mfspr(SPR_HID0) & ~HID0_DCE); 878 powerpc_sync(); 879 isync(); 880 881 mtmsr(msr); 882} 883 884void 885cpu_sleep() 886{ 887 static u_quad_t timebase = 0; 888 static register_t sprgs[4]; 889 static register_t srrs[2]; 890 891 jmp_buf resetjb; 892 struct thread *fputd; 893 struct thread *vectd; 894 register_t hid0; 895 register_t msr; 896 register_t saved_msr; 897 898 ap_pcpu = pcpup; 899 900 PCPU_SET(restore, &resetjb); 901 902 saved_msr = mfmsr(); 903 fputd = PCPU_GET(fputhread); 904 vectd = PCPU_GET(vecthread); 905 if (fputd != NULL) 906 save_fpu(fputd); 907 if (vectd != NULL) 908 save_vec(vectd); 909 if (setjmp(resetjb) == 0) { 910 sprgs[0] = mfspr(SPR_SPRG0); 911 sprgs[1] = mfspr(SPR_SPRG1); 912 sprgs[2] = mfspr(SPR_SPRG2); 913 sprgs[3] = mfspr(SPR_SPRG3); 914 srrs[0] = mfspr(SPR_SRR0); 915 srrs[1] = mfspr(SPR_SRR1); 916 timebase = mftb(); 917 powerpc_sync(); 918 flush_disable_caches(); 919 hid0 = mfspr(SPR_HID0); 920 hid0 = (hid0 & ~(HID0_DOZE | HID0_NAP)) | HID0_SLEEP; 921 powerpc_sync(); 922 isync(); 923 msr = mfmsr() | PSL_POW; 924 mtspr(SPR_HID0, hid0); 925 powerpc_sync(); 926 927 while (1) 928 mtmsr(msr); 929 } 930 mttb(timebase); 931 PCPU_SET(curthread, curthread); 932 PCPU_SET(curpcb, curthread->td_pcb); 933 pmap_activate(curthread); 934 powerpc_sync(); 935 mtspr(SPR_SPRG0, sprgs[0]); 936 mtspr(SPR_SPRG1, sprgs[1]); 937 mtspr(SPR_SPRG2, sprgs[2]); 938 mtspr(SPR_SPRG3, sprgs[3]); 939 mtspr(SPR_SRR0, srrs[0]); 940 mtspr(SPR_SRR1, srrs[1]); 941 mtmsr(saved_msr); 942 if (fputd == curthread) 943 enable_fpu(curthread); 944 if (vectd == curthread) 945 enable_vec(curthread); 946 powerpc_sync(); 947} 948