1/* $NetBSD: pmap.c,v 1.228.2.1 2012/08/09 06:36:46 jdc Exp $ */ 2 3/* 4 * Copyright 2003 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Steve C. Woodford for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38/* 39 * Copyright (c) 2002-2003 Wasabi Systems, Inc. 40 * Copyright (c) 2001 Richard Earnshaw 41 * Copyright (c) 2001-2002 Christopher Gilbert 42 * All rights reserved. 43 * 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 3. The name of the company nor the name of the author may be used to 50 * endorse or promote products derived from this software without specific 51 * prior written permission. 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 54 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 55 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 56 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 57 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 58 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 59 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 63 * SUCH DAMAGE. 64 */ 65 66/*- 67 * Copyright (c) 1999 The NetBSD Foundation, Inc. 68 * All rights reserved. 69 * 70 * This code is derived from software contributed to The NetBSD Foundation 71 * by Charles M. Hannum. 72 * 73 * Redistribution and use in source and binary forms, with or without 74 * modification, are permitted provided that the following conditions 75 * are met: 76 * 1. Redistributions of source code must retain the above copyright 77 * notice, this list of conditions and the following disclaimer. 78 * 2. Redistributions in binary form must reproduce the above copyright 79 * notice, this list of conditions and the following disclaimer in the 80 * documentation and/or other materials provided with the distribution. 81 * 82 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 83 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 84 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 85 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 86 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 87 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 88 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 89 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 90 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 91 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 92 * POSSIBILITY OF SUCH DAMAGE. 93 */ 94 95/* 96 * Copyright (c) 1994-1998 Mark Brinicombe. 97 * Copyright (c) 1994 Brini. 98 * All rights reserved. 99 * 100 * This code is derived from software written for Brini by Mark Brinicombe 101 * 102 * Redistribution and use in source and binary forms, with or without 103 * modification, are permitted provided that the following conditions 104 * are met: 105 * 1. Redistributions of source code must retain the above copyright 106 * notice, this list of conditions and the following disclaimer. 107 * 2. Redistributions in binary form must reproduce the above copyright 108 * notice, this list of conditions and the following disclaimer in the 109 * documentation and/or other materials provided with the distribution. 110 * 3. All advertising materials mentioning features or use of this software 111 * must display the following acknowledgement: 112 * This product includes software developed by Mark Brinicombe. 113 * 4. The name of the author may not be used to endorse or promote products 114 * derived from this software without specific prior written permission. 115 * 116 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 117 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 118 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 119 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 120 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 121 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 122 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 123 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 124 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 125 * 126 * RiscBSD kernel project 127 * 128 * pmap.c 129 * 130 * Machine dependent vm stuff 131 * 132 * Created : 20/09/94 133 */ 134 135/* 136 * armv6 and VIPT cache support by 3am Software Foundry, 137 * Copyright (c) 2007 Microsoft 138 */ 139 140/* 141 * Performance improvements, UVM changes, overhauls and part-rewrites 142 * were contributed by Neil A. Carson <neil@causality.com>. 143 */ 144 145/* 146 * Overhauled again to speedup the pmap, use MMU Domains so that L1 tables 147 * can be shared, and re-work the KVM layout, by Steve Woodford of Wasabi 148 * Systems, Inc. 149 * 150 * There are still a few things outstanding at this time: 151 * 152 * - There are some unresolved issues for MP systems: 153 * 154 * o The L1 metadata needs a lock, or more specifically, some places 155 * need to acquire an exclusive lock when modifying L1 translation 156 * table entries. 157 * 158 * o When one cpu modifies an L1 entry, and that L1 table is also 159 * being used by another cpu, then the latter will need to be told 160 * that a tlb invalidation may be necessary. (But only if the old 161 * domain number in the L1 entry being over-written is currently 162 * the active domain on that cpu). I guess there are lots more tlb 163 * shootdown issues too... 164 * 165 * o If the vector_page is at 0x00000000 instead of 0xffff0000, then 166 * MP systems will lose big-time because of the MMU domain hack. 167 * The only way this can be solved (apart from moving the vector 168 * page to 0xffff0000) is to reserve the first 1MB of user address 169 * space for kernel use only. This would require re-linking all 170 * applications so that the text section starts above this 1MB 171 * boundary. 172 * 173 * o Tracking which VM space is resident in the cache/tlb has not yet 174 * been implemented for MP systems. 175 * 176 * o Finally, there is a pathological condition where two cpus running 177 * two separate processes (not lwps) which happen to share an L1 178 * can get into a fight over one or more L1 entries. This will result 179 * in a significant slow-down if both processes are in tight loops. 180 */ 181 182/* 183 * Special compilation symbols 184 * PMAP_DEBUG - Build in pmap_debug_level code 185 */ 186 187/* Include header files */ 188 189#include "opt_cpuoptions.h" 190#include "opt_pmap_debug.h" 191#include "opt_ddb.h" 192#include "opt_lockdebug.h" 193#include "opt_multiprocessor.h" 194 195#include <sys/param.h> 196#include <sys/types.h> 197#include <sys/kernel.h> 198#include <sys/systm.h> 199#include <sys/proc.h> 200#include <sys/pool.h> 201#include <sys/kmem.h> 202#include <sys/cdefs.h> 203#include <sys/cpu.h> 204#include <sys/sysctl.h> 205 206#include <uvm/uvm.h> 207 208#include <sys/bus.h> 209#include <machine/pmap.h> 210#include <machine/pcb.h> 211#include <machine/param.h> 212#include <arm/arm32/katelib.h> 213 214__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.228.2.1 2012/08/09 06:36:46 jdc Exp $"); 215 216#ifdef PMAP_DEBUG 217 218/* XXX need to get rid of all refs to this */ 219int pmap_debug_level = 0; 220 221/* 222 * for switching to potentially finer grained debugging 223 */ 224#define PDB_FOLLOW 0x0001 225#define PDB_INIT 0x0002 226#define PDB_ENTER 0x0004 227#define PDB_REMOVE 0x0008 228#define PDB_CREATE 0x0010 229#define PDB_PTPAGE 0x0020 230#define PDB_GROWKERN 0x0040 231#define PDB_BITS 0x0080 232#define PDB_COLLECT 0x0100 233#define PDB_PROTECT 0x0200 234#define PDB_MAP_L1 0x0400 235#define PDB_BOOTSTRAP 0x1000 236#define PDB_PARANOIA 0x2000 237#define PDB_WIRING 0x4000 238#define PDB_PVDUMP 0x8000 239#define PDB_VAC 0x10000 240#define PDB_KENTER 0x20000 241#define PDB_KREMOVE 0x40000 242#define PDB_EXEC 0x80000 243 244int debugmap = 1; 245int pmapdebug = 0; 246#define NPDEBUG(_lev_,_stat_) \ 247 if (pmapdebug & (_lev_)) \ 248 ((_stat_)) 249 250#else /* PMAP_DEBUG */ 251#define NPDEBUG(_lev_,_stat_) /* Nothing */ 252#endif /* PMAP_DEBUG */ 253 254/* 255 * pmap_kernel() points here 256 */ 257static struct pmap kernel_pmap_store; 258struct pmap *const kernel_pmap_ptr = &kernel_pmap_store; 259 260/* 261 * Which pmap is currently 'live' in the cache 262 * 263 * XXXSCW: Fix for SMP ... 264 */ 265static pmap_t pmap_recent_user; 266 267/* 268 * Pointer to last active lwp, or NULL if it exited. 269 */ 270struct lwp *pmap_previous_active_lwp; 271 272/* 273 * Pool and cache that pmap structures are allocated from. 274 * We use a cache to avoid clearing the pm_l2[] array (1KB) 275 * in pmap_create(). 276 */ 277static struct pool_cache pmap_cache; 278static LIST_HEAD(, pmap) pmap_pmaps; 279 280/* 281 * Pool of PV structures 282 */ 283static struct pool pmap_pv_pool; 284static void *pmap_bootstrap_pv_page_alloc(struct pool *, int); 285static void pmap_bootstrap_pv_page_free(struct pool *, void *); 286static struct pool_allocator pmap_bootstrap_pv_allocator = { 287 pmap_bootstrap_pv_page_alloc, pmap_bootstrap_pv_page_free 288}; 289 290/* 291 * Pool and cache of l2_dtable structures. 292 * We use a cache to avoid clearing the structures when they're 293 * allocated. (196 bytes) 294 */ 295static struct pool_cache pmap_l2dtable_cache; 296static vaddr_t pmap_kernel_l2dtable_kva; 297 298/* 299 * Pool and cache of L2 page descriptors. 300 * We use a cache to avoid clearing the descriptor table 301 * when they're allocated. (1KB) 302 */ 303static struct pool_cache pmap_l2ptp_cache; 304static vaddr_t pmap_kernel_l2ptp_kva; 305static paddr_t pmap_kernel_l2ptp_phys; 306 307#ifdef PMAPCOUNTERS 308#define PMAP_EVCNT_INITIALIZER(name) \ 309 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", name) 310 311#ifdef PMAP_CACHE_VIPT 312static struct evcnt pmap_ev_vac_clean_one = 313 PMAP_EVCNT_INITIALIZER("clean page (1 color)"); 314static struct evcnt pmap_ev_vac_flush_one = 315 PMAP_EVCNT_INITIALIZER("flush page (1 color)"); 316static struct evcnt pmap_ev_vac_flush_lots = 317 PMAP_EVCNT_INITIALIZER("flush page (2+ colors)"); 318static struct evcnt pmap_ev_vac_flush_lots2 = 319 PMAP_EVCNT_INITIALIZER("flush page (2+ colors, kmpage)"); 320EVCNT_ATTACH_STATIC(pmap_ev_vac_clean_one); 321EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_one); 322EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_lots); 323EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_lots2); 324 325static struct evcnt pmap_ev_vac_color_new = 326 PMAP_EVCNT_INITIALIZER("new page color"); 327static struct evcnt pmap_ev_vac_color_reuse = 328 PMAP_EVCNT_INITIALIZER("ok first page color"); 329static struct evcnt pmap_ev_vac_color_ok = 330 PMAP_EVCNT_INITIALIZER("ok page color"); 331static struct evcnt pmap_ev_vac_color_blind = 332 PMAP_EVCNT_INITIALIZER("blind page color"); 333static struct evcnt pmap_ev_vac_color_change = 334 PMAP_EVCNT_INITIALIZER("change page color"); 335static struct evcnt pmap_ev_vac_color_erase = 336 PMAP_EVCNT_INITIALIZER("erase page color"); 337static struct evcnt pmap_ev_vac_color_none = 338 PMAP_EVCNT_INITIALIZER("no page color"); 339static struct evcnt pmap_ev_vac_color_restore = 340 PMAP_EVCNT_INITIALIZER("restore page color"); 341 342EVCNT_ATTACH_STATIC(pmap_ev_vac_color_new); 343EVCNT_ATTACH_STATIC(pmap_ev_vac_color_reuse); 344EVCNT_ATTACH_STATIC(pmap_ev_vac_color_ok); 345EVCNT_ATTACH_STATIC(pmap_ev_vac_color_blind); 346EVCNT_ATTACH_STATIC(pmap_ev_vac_color_change); 347EVCNT_ATTACH_STATIC(pmap_ev_vac_color_erase); 348EVCNT_ATTACH_STATIC(pmap_ev_vac_color_none); 349EVCNT_ATTACH_STATIC(pmap_ev_vac_color_restore); 350#endif 351 352static struct evcnt pmap_ev_mappings = 353 PMAP_EVCNT_INITIALIZER("pages mapped"); 354static struct evcnt pmap_ev_unmappings = 355 PMAP_EVCNT_INITIALIZER("pages unmapped"); 356static struct evcnt pmap_ev_remappings = 357 PMAP_EVCNT_INITIALIZER("pages remapped"); 358 359EVCNT_ATTACH_STATIC(pmap_ev_mappings); 360EVCNT_ATTACH_STATIC(pmap_ev_unmappings); 361EVCNT_ATTACH_STATIC(pmap_ev_remappings); 362 363static struct evcnt pmap_ev_kernel_mappings = 364 PMAP_EVCNT_INITIALIZER("kernel pages mapped"); 365static struct evcnt pmap_ev_kernel_unmappings = 366 PMAP_EVCNT_INITIALIZER("kernel pages unmapped"); 367static struct evcnt pmap_ev_kernel_remappings = 368 PMAP_EVCNT_INITIALIZER("kernel pages remapped"); 369 370EVCNT_ATTACH_STATIC(pmap_ev_kernel_mappings); 371EVCNT_ATTACH_STATIC(pmap_ev_kernel_unmappings); 372EVCNT_ATTACH_STATIC(pmap_ev_kernel_remappings); 373 374static struct evcnt pmap_ev_kenter_mappings = 375 PMAP_EVCNT_INITIALIZER("kenter pages mapped"); 376static struct evcnt pmap_ev_kenter_unmappings = 377 PMAP_EVCNT_INITIALIZER("kenter pages unmapped"); 378static struct evcnt pmap_ev_kenter_remappings = 379 PMAP_EVCNT_INITIALIZER("kenter pages remapped"); 380static struct evcnt pmap_ev_pt_mappings = 381 PMAP_EVCNT_INITIALIZER("page table pages mapped"); 382 383EVCNT_ATTACH_STATIC(pmap_ev_kenter_mappings); 384EVCNT_ATTACH_STATIC(pmap_ev_kenter_unmappings); 385EVCNT_ATTACH_STATIC(pmap_ev_kenter_remappings); 386EVCNT_ATTACH_STATIC(pmap_ev_pt_mappings); 387 388#ifdef PMAP_CACHE_VIPT 389static struct evcnt pmap_ev_exec_mappings = 390 PMAP_EVCNT_INITIALIZER("exec pages mapped"); 391static struct evcnt pmap_ev_exec_cached = 392 PMAP_EVCNT_INITIALIZER("exec pages cached"); 393 394EVCNT_ATTACH_STATIC(pmap_ev_exec_mappings); 395EVCNT_ATTACH_STATIC(pmap_ev_exec_cached); 396 397static struct evcnt pmap_ev_exec_synced = 398 PMAP_EVCNT_INITIALIZER("exec pages synced"); 399static struct evcnt pmap_ev_exec_synced_map = 400 PMAP_EVCNT_INITIALIZER("exec pages synced (MP)"); 401static struct evcnt pmap_ev_exec_synced_unmap = 402 PMAP_EVCNT_INITIALIZER("exec pages synced (UM)"); 403static struct evcnt pmap_ev_exec_synced_remap = 404 PMAP_EVCNT_INITIALIZER("exec pages synced (RM)"); 405static struct evcnt pmap_ev_exec_synced_clearbit = 406 PMAP_EVCNT_INITIALIZER("exec pages synced (DG)"); 407static struct evcnt pmap_ev_exec_synced_kremove = 408 PMAP_EVCNT_INITIALIZER("exec pages synced (KU)"); 409 410EVCNT_ATTACH_STATIC(pmap_ev_exec_synced); 411EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_map); 412EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_unmap); 413EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_remap); 414EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_clearbit); 415EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_kremove); 416 417static struct evcnt pmap_ev_exec_discarded_unmap = 418 PMAP_EVCNT_INITIALIZER("exec pages discarded (UM)"); 419static struct evcnt pmap_ev_exec_discarded_zero = 420 PMAP_EVCNT_INITIALIZER("exec pages discarded (ZP)"); 421static struct evcnt pmap_ev_exec_discarded_copy = 422 PMAP_EVCNT_INITIALIZER("exec pages discarded (CP)"); 423static struct evcnt pmap_ev_exec_discarded_page_protect = 424 PMAP_EVCNT_INITIALIZER("exec pages discarded (PP)"); 425static struct evcnt pmap_ev_exec_discarded_clearbit = 426 PMAP_EVCNT_INITIALIZER("exec pages discarded (DG)"); 427static struct evcnt pmap_ev_exec_discarded_kremove = 428 PMAP_EVCNT_INITIALIZER("exec pages discarded (KU)"); 429 430EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_unmap); 431EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_zero); 432EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_copy); 433EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_page_protect); 434EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_clearbit); 435EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_kremove); 436#endif /* PMAP_CACHE_VIPT */ 437 438static struct evcnt pmap_ev_updates = PMAP_EVCNT_INITIALIZER("updates"); 439static struct evcnt pmap_ev_collects = PMAP_EVCNT_INITIALIZER("collects"); 440static struct evcnt pmap_ev_activations = PMAP_EVCNT_INITIALIZER("activations"); 441 442EVCNT_ATTACH_STATIC(pmap_ev_updates); 443EVCNT_ATTACH_STATIC(pmap_ev_collects); 444EVCNT_ATTACH_STATIC(pmap_ev_activations); 445 446#define PMAPCOUNT(x) ((void)(pmap_ev_##x.ev_count++)) 447#else 448#define PMAPCOUNT(x) ((void)0) 449#endif 450 451/* 452 * pmap copy/zero page, and mem(5) hook point 453 */ 454static pt_entry_t *csrc_pte, *cdst_pte; 455static vaddr_t csrcp, cdstp; 456vaddr_t memhook; /* used by mem.c */ 457kmutex_t memlock; /* used by mem.c */ 458void *zeropage; /* used by mem.c */ 459extern void *msgbufaddr; 460int pmap_kmpages; 461/* 462 * Flag to indicate if pmap_init() has done its thing 463 */ 464bool pmap_initialized; 465 466/* 467 * Misc. locking data structures 468 */ 469 470#define pmap_acquire_pmap_lock(pm) \ 471 do { \ 472 if ((pm) != pmap_kernel()) \ 473 mutex_enter((pm)->pm_lock); \ 474 } while (/*CONSTCOND*/0) 475 476#define pmap_release_pmap_lock(pm) \ 477 do { \ 478 if ((pm) != pmap_kernel()) \ 479 mutex_exit((pm)->pm_lock); \ 480 } while (/*CONSTCOND*/0) 481 482 483/* 484 * Metadata for L1 translation tables. 485 */ 486struct l1_ttable { 487 /* Entry on the L1 Table list */ 488 SLIST_ENTRY(l1_ttable) l1_link; 489 490 /* Entry on the L1 Least Recently Used list */ 491 TAILQ_ENTRY(l1_ttable) l1_lru; 492 493 /* Track how many domains are allocated from this L1 */ 494 volatile u_int l1_domain_use_count; 495 496 /* 497 * A free-list of domain numbers for this L1. 498 * We avoid using ffs() and a bitmap to track domains since ffs() 499 * is slow on ARM. 500 */ 501 u_int8_t l1_domain_first; 502 u_int8_t l1_domain_free[PMAP_DOMAINS]; 503 504 /* Physical address of this L1 page table */ 505 paddr_t l1_physaddr; 506 507 /* KVA of this L1 page table */ 508 pd_entry_t *l1_kva; 509}; 510 511/* 512 * Convert a virtual address into its L1 table index. That is, the 513 * index used to locate the L2 descriptor table pointer in an L1 table. 514 * This is basically used to index l1->l1_kva[]. 515 * 516 * Each L2 descriptor table represents 1MB of VA space. 517 */ 518#define L1_IDX(va) (((vaddr_t)(va)) >> L1_S_SHIFT) 519 520/* 521 * L1 Page Tables are tracked using a Least Recently Used list. 522 * - New L1s are allocated from the HEAD. 523 * - Freed L1s are added to the TAIl. 524 * - Recently accessed L1s (where an 'access' is some change to one of 525 * the userland pmaps which owns this L1) are moved to the TAIL. 526 */ 527static TAILQ_HEAD(, l1_ttable) l1_lru_list; 528static kmutex_t l1_lru_lock __cacheline_aligned; 529 530/* 531 * A list of all L1 tables 532 */ 533static SLIST_HEAD(, l1_ttable) l1_list; 534 535/* 536 * The l2_dtable tracks L2_BUCKET_SIZE worth of L1 slots. 537 * 538 * This is normally 16MB worth L2 page descriptors for any given pmap. 539 * Reference counts are maintained for L2 descriptors so they can be 540 * freed when empty. 541 */ 542struct l2_dtable { 543 /* The number of L2 page descriptors allocated to this l2_dtable */ 544 u_int l2_occupancy; 545 546 /* List of L2 page descriptors */ 547 struct l2_bucket { 548 pt_entry_t *l2b_kva; /* KVA of L2 Descriptor Table */ 549 paddr_t l2b_phys; /* Physical address of same */ 550 u_short l2b_l1idx; /* This L2 table's L1 index */ 551 u_short l2b_occupancy; /* How many active descriptors */ 552 } l2_bucket[L2_BUCKET_SIZE]; 553}; 554 555/* 556 * Given an L1 table index, calculate the corresponding l2_dtable index 557 * and bucket index within the l2_dtable. 558 */ 559#define L2_IDX(l1idx) (((l1idx) >> L2_BUCKET_LOG2) & \ 560 (L2_SIZE - 1)) 561#define L2_BUCKET(l1idx) ((l1idx) & (L2_BUCKET_SIZE - 1)) 562 563/* 564 * Given a virtual address, this macro returns the 565 * virtual address required to drop into the next L2 bucket. 566 */ 567#define L2_NEXT_BUCKET(va) (((va) & L1_S_FRAME) + L1_S_SIZE) 568 569/* 570 * L2 allocation. 571 */ 572#define pmap_alloc_l2_dtable() \ 573 pool_cache_get(&pmap_l2dtable_cache, PR_NOWAIT) 574#define pmap_free_l2_dtable(l2) \ 575 pool_cache_put(&pmap_l2dtable_cache, (l2)) 576#define pmap_alloc_l2_ptp(pap) \ 577 ((pt_entry_t *)pool_cache_get_paddr(&pmap_l2ptp_cache,\ 578 PR_NOWAIT, (pap))) 579 580/* 581 * We try to map the page tables write-through, if possible. However, not 582 * all CPUs have a write-through cache mode, so on those we have to sync 583 * the cache when we frob page tables. 584 * 585 * We try to evaluate this at compile time, if possible. However, it's 586 * not always possible to do that, hence this run-time var. 587 */ 588int pmap_needs_pte_sync; 589 590/* 591 * Real definition of pv_entry. 592 */ 593struct pv_entry { 594 SLIST_ENTRY(pv_entry) pv_link; /* next pv_entry */ 595 pmap_t pv_pmap; /* pmap where mapping lies */ 596 vaddr_t pv_va; /* virtual address for mapping */ 597 u_int pv_flags; /* flags */ 598}; 599 600/* 601 * Macro to determine if a mapping might be resident in the 602 * instruction cache and/or TLB 603 */ 604#define PV_BEEN_EXECD(f) (((f) & (PVF_REF | PVF_EXEC)) == (PVF_REF | PVF_EXEC)) 605#define PV_IS_EXEC_P(f) (((f) & PVF_EXEC) != 0) 606 607/* 608 * Macro to determine if a mapping might be resident in the 609 * data cache and/or TLB 610 */ 611#define PV_BEEN_REFD(f) (((f) & PVF_REF) != 0) 612 613/* 614 * Local prototypes 615 */ 616static int pmap_set_pt_cache_mode(pd_entry_t *, vaddr_t); 617static void pmap_alloc_specials(vaddr_t *, int, vaddr_t *, 618 pt_entry_t **); 619static bool pmap_is_current(pmap_t); 620static bool pmap_is_cached(pmap_t); 621static void pmap_enter_pv(struct vm_page_md *, paddr_t, struct pv_entry *, 622 pmap_t, vaddr_t, u_int); 623static struct pv_entry *pmap_find_pv(struct vm_page_md *, pmap_t, vaddr_t); 624static struct pv_entry *pmap_remove_pv(struct vm_page_md *, paddr_t, pmap_t, vaddr_t); 625static u_int pmap_modify_pv(struct vm_page_md *, paddr_t, pmap_t, vaddr_t, 626 u_int, u_int); 627 628static void pmap_pinit(pmap_t); 629static int pmap_pmap_ctor(void *, void *, int); 630 631static void pmap_alloc_l1(pmap_t); 632static void pmap_free_l1(pmap_t); 633static void pmap_use_l1(pmap_t); 634 635static struct l2_bucket *pmap_get_l2_bucket(pmap_t, vaddr_t); 636static struct l2_bucket *pmap_alloc_l2_bucket(pmap_t, vaddr_t); 637static void pmap_free_l2_bucket(pmap_t, struct l2_bucket *, u_int); 638static int pmap_l2ptp_ctor(void *, void *, int); 639static int pmap_l2dtable_ctor(void *, void *, int); 640 641static void pmap_vac_me_harder(struct vm_page_md *, paddr_t, pmap_t, vaddr_t); 642#ifdef PMAP_CACHE_VIVT 643static void pmap_vac_me_kpmap(struct vm_page_md *, paddr_t, pmap_t, vaddr_t); 644static void pmap_vac_me_user(struct vm_page_md *, paddr_t, pmap_t, vaddr_t); 645#endif 646 647static void pmap_clearbit(struct vm_page_md *, paddr_t, u_int); 648#ifdef PMAP_CACHE_VIVT 649static int pmap_clean_page(struct pv_entry *, bool); 650#endif 651#ifdef PMAP_CACHE_VIPT 652static void pmap_syncicache_page(struct vm_page_md *, paddr_t); 653enum pmap_flush_op { 654 PMAP_FLUSH_PRIMARY, 655 PMAP_FLUSH_SECONDARY, 656 PMAP_CLEAN_PRIMARY 657}; 658static void pmap_flush_page(struct vm_page_md *, paddr_t, enum pmap_flush_op); 659#endif 660static void pmap_page_remove(struct vm_page_md *, paddr_t); 661 662static void pmap_init_l1(struct l1_ttable *, pd_entry_t *); 663static vaddr_t kernel_pt_lookup(paddr_t); 664 665 666/* 667 * External function prototypes 668 */ 669extern void bzero_page(vaddr_t); 670extern void bcopy_page(vaddr_t, vaddr_t); 671 672/* 673 * Misc variables 674 */ 675vaddr_t virtual_avail; 676vaddr_t virtual_end; 677vaddr_t pmap_curmaxkvaddr; 678 679paddr_t avail_start; 680paddr_t avail_end; 681 682pv_addrqh_t pmap_boot_freeq = SLIST_HEAD_INITIALIZER(&pmap_boot_freeq); 683pv_addr_t kernelpages; 684pv_addr_t kernel_l1pt; 685pv_addr_t systempage; 686 687/* Function to set the debug level of the pmap code */ 688 689#ifdef PMAP_DEBUG 690void 691pmap_debug(int level) 692{ 693 pmap_debug_level = level; 694 printf("pmap_debug: level=%d\n", pmap_debug_level); 695} 696#endif /* PMAP_DEBUG */ 697 698#ifdef PMAP_CACHE_VIPT 699#define PMAP_VALIDATE_MD_PAGE(md) \ 700 KASSERTMSG(arm_cache_prefer_mask == 0 || (((md)->pvh_attrs & PVF_WRITE) == 0) == ((md)->urw_mappings + (md)->krw_mappings == 0), \ 701 "(md) %p: attrs=%#x urw=%u krw=%u", (md), \ 702 (md)->pvh_attrs, (md)->urw_mappings, (md)->krw_mappings); 703#endif /* PMAP_CACHE_VIPT */ 704/* 705 * A bunch of routines to conditionally flush the caches/TLB depending 706 * on whether the specified pmap actually needs to be flushed at any 707 * given time. 708 */ 709static inline void 710pmap_tlb_flushID_SE(pmap_t pm, vaddr_t va) 711{ 712 713 if (pm->pm_cstate.cs_tlb_id) 714 cpu_tlb_flushID_SE(va); 715} 716 717static inline void 718pmap_tlb_flushD_SE(pmap_t pm, vaddr_t va) 719{ 720 721 if (pm->pm_cstate.cs_tlb_d) 722 cpu_tlb_flushD_SE(va); 723} 724 725static inline void 726pmap_tlb_flushID(pmap_t pm) 727{ 728 729 if (pm->pm_cstate.cs_tlb_id) { 730 cpu_tlb_flushID(); 731 pm->pm_cstate.cs_tlb = 0; 732 } 733} 734 735static inline void 736pmap_tlb_flushD(pmap_t pm) 737{ 738 739 if (pm->pm_cstate.cs_tlb_d) { 740 cpu_tlb_flushD(); 741 pm->pm_cstate.cs_tlb_d = 0; 742 } 743} 744 745#ifdef PMAP_CACHE_VIVT 746static inline void 747pmap_idcache_wbinv_range(pmap_t pm, vaddr_t va, vsize_t len) 748{ 749 if (pm->pm_cstate.cs_cache_id) { 750 cpu_idcache_wbinv_range(va, len); 751 } 752} 753 754static inline void 755pmap_dcache_wb_range(pmap_t pm, vaddr_t va, vsize_t len, 756 bool do_inv, bool rd_only) 757{ 758 759 if (pm->pm_cstate.cs_cache_d) { 760 if (do_inv) { 761 if (rd_only) 762 cpu_dcache_inv_range(va, len); 763 else 764 cpu_dcache_wbinv_range(va, len); 765 } else 766 if (!rd_only) 767 cpu_dcache_wb_range(va, len); 768 } 769} 770 771static inline void 772pmap_idcache_wbinv_all(pmap_t pm) 773{ 774 if (pm->pm_cstate.cs_cache_id) { 775 cpu_idcache_wbinv_all(); 776 pm->pm_cstate.cs_cache = 0; 777 } 778} 779 780static inline void 781pmap_dcache_wbinv_all(pmap_t pm) 782{ 783 if (pm->pm_cstate.cs_cache_d) { 784 cpu_dcache_wbinv_all(); 785 pm->pm_cstate.cs_cache_d = 0; 786 } 787} 788#endif /* PMAP_CACHE_VIVT */ 789 790static inline bool 791pmap_is_current(pmap_t pm) 792{ 793 794 if (pm == pmap_kernel() || curproc->p_vmspace->vm_map.pmap == pm) 795 return true; 796 797 return false; 798} 799 800static inline bool 801pmap_is_cached(pmap_t pm) 802{ 803 804 if (pm == pmap_kernel() || pmap_recent_user == NULL || 805 pmap_recent_user == pm) 806 return (true); 807 808 return false; 809} 810 811/* 812 * PTE_SYNC_CURRENT: 813 * 814 * Make sure the pte is written out to RAM. 815 * We need to do this for one of two cases: 816 * - We're dealing with the kernel pmap 817 * - There is no pmap active in the cache/tlb. 818 * - The specified pmap is 'active' in the cache/tlb. 819 */ 820#ifdef PMAP_INCLUDE_PTE_SYNC 821#define PTE_SYNC_CURRENT(pm, ptep) \ 822do { \ 823 if (PMAP_NEEDS_PTE_SYNC && \ 824 pmap_is_cached(pm)) \ 825 PTE_SYNC(ptep); \ 826} while (/*CONSTCOND*/0) 827#else 828#define PTE_SYNC_CURRENT(pm, ptep) /* nothing */ 829#endif 830 831/* 832 * main pv_entry manipulation functions: 833 * pmap_enter_pv: enter a mapping onto a vm_page list 834 * pmap_remove_pv: remove a mapping from a vm_page list 835 * 836 * NOTE: pmap_enter_pv expects to lock the pvh itself 837 * pmap_remove_pv expects the caller to lock the pvh before calling 838 */ 839 840/* 841 * pmap_enter_pv: enter a mapping onto a vm_page lst 842 * 843 * => caller should hold the proper lock on pmap_main_lock 844 * => caller should have pmap locked 845 * => we will gain the lock on the vm_page and allocate the new pv_entry 846 * => caller should adjust ptp's wire_count before calling 847 * => caller should not adjust pmap's wire_count 848 */ 849static void 850pmap_enter_pv(struct vm_page_md *md, paddr_t pa, struct pv_entry *pv, pmap_t pm, 851 vaddr_t va, u_int flags) 852{ 853 struct pv_entry **pvp; 854 855 NPDEBUG(PDB_PVDUMP, 856 printf("pmap_enter_pv: pm %p, md %p, flags 0x%x\n", pm, md, flags)); 857 858 pv->pv_pmap = pm; 859 pv->pv_va = va; 860 pv->pv_flags = flags; 861 862 pvp = &SLIST_FIRST(&md->pvh_list); 863#ifdef PMAP_CACHE_VIPT 864 /* 865 * Insert unmanaged entries, writeable first, at the head of 866 * the pv list. 867 */ 868 if (__predict_true((flags & PVF_KENTRY) == 0)) { 869 while (*pvp != NULL && (*pvp)->pv_flags & PVF_KENTRY) 870 pvp = &SLIST_NEXT(*pvp, pv_link); 871 } else if ((flags & PVF_WRITE) == 0) { 872 while (*pvp != NULL && (*pvp)->pv_flags & PVF_WRITE) 873 pvp = &SLIST_NEXT(*pvp, pv_link); 874 } 875#endif 876 SLIST_NEXT(pv, pv_link) = *pvp; /* add to ... */ 877 *pvp = pv; /* ... locked list */ 878 md->pvh_attrs |= flags & (PVF_REF | PVF_MOD); 879#ifdef PMAP_CACHE_VIPT 880 if ((pv->pv_flags & PVF_KWRITE) == PVF_KWRITE) 881 md->pvh_attrs |= PVF_KMOD; 882 if ((md->pvh_attrs & (PVF_DMOD|PVF_NC)) != PVF_NC) 883 md->pvh_attrs |= PVF_DIRTY; 884 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 885#endif 886 if (pm == pmap_kernel()) { 887 PMAPCOUNT(kernel_mappings); 888 if (flags & PVF_WRITE) 889 md->krw_mappings++; 890 else 891 md->kro_mappings++; 892 } else { 893 if (flags & PVF_WRITE) 894 md->urw_mappings++; 895 else 896 md->uro_mappings++; 897 } 898 899#ifdef PMAP_CACHE_VIPT 900 /* 901 * Even though pmap_vac_me_harder will set PVF_WRITE for us, 902 * do it here as well to keep the mappings & KVF_WRITE consistent. 903 */ 904 if (arm_cache_prefer_mask != 0 && (flags & PVF_WRITE) != 0) { 905 md->pvh_attrs |= PVF_WRITE; 906 } 907 /* 908 * If this is an exec mapping and its the first exec mapping 909 * for this page, make sure to sync the I-cache. 910 */ 911 if (PV_IS_EXEC_P(flags)) { 912 if (!PV_IS_EXEC_P(md->pvh_attrs)) { 913 pmap_syncicache_page(md, pa); 914 PMAPCOUNT(exec_synced_map); 915 } 916 PMAPCOUNT(exec_mappings); 917 } 918#endif 919 920 PMAPCOUNT(mappings); 921 922 if (pv->pv_flags & PVF_WIRED) 923 ++pm->pm_stats.wired_count; 924} 925 926/* 927 * 928 * pmap_find_pv: Find a pv entry 929 * 930 * => caller should hold lock on vm_page 931 */ 932static inline struct pv_entry * 933pmap_find_pv(struct vm_page_md *md, pmap_t pm, vaddr_t va) 934{ 935 struct pv_entry *pv; 936 937 SLIST_FOREACH(pv, &md->pvh_list, pv_link) { 938 if (pm == pv->pv_pmap && va == pv->pv_va) 939 break; 940 } 941 942 return (pv); 943} 944 945/* 946 * pmap_remove_pv: try to remove a mapping from a pv_list 947 * 948 * => caller should hold proper lock on pmap_main_lock 949 * => pmap should be locked 950 * => caller should hold lock on vm_page [so that attrs can be adjusted] 951 * => caller should adjust ptp's wire_count and free PTP if needed 952 * => caller should NOT adjust pmap's wire_count 953 * => we return the removed pv 954 */ 955static struct pv_entry * 956pmap_remove_pv(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va) 957{ 958 struct pv_entry *pv, **prevptr; 959 960 NPDEBUG(PDB_PVDUMP, 961 printf("pmap_remove_pv: pm %p, md %p, va 0x%08lx\n", pm, md, va)); 962 963 prevptr = &SLIST_FIRST(&md->pvh_list); /* prev pv_entry ptr */ 964 pv = *prevptr; 965 966 while (pv) { 967 if (pv->pv_pmap == pm && pv->pv_va == va) { /* match? */ 968 NPDEBUG(PDB_PVDUMP, printf("pmap_remove_pv: pm %p, md " 969 "%p, flags 0x%x\n", pm, md, pv->pv_flags)); 970 if (pv->pv_flags & PVF_WIRED) { 971 --pm->pm_stats.wired_count; 972 } 973 *prevptr = SLIST_NEXT(pv, pv_link); /* remove it! */ 974 if (pm == pmap_kernel()) { 975 PMAPCOUNT(kernel_unmappings); 976 if (pv->pv_flags & PVF_WRITE) 977 md->krw_mappings--; 978 else 979 md->kro_mappings--; 980 } else { 981 if (pv->pv_flags & PVF_WRITE) 982 md->urw_mappings--; 983 else 984 md->uro_mappings--; 985 } 986 987 PMAPCOUNT(unmappings); 988#ifdef PMAP_CACHE_VIPT 989 if (!(pv->pv_flags & PVF_WRITE)) 990 break; 991 /* 992 * If this page has had an exec mapping, then if 993 * this was the last mapping, discard the contents, 994 * otherwise sync the i-cache for this page. 995 */ 996 if (PV_IS_EXEC_P(md->pvh_attrs)) { 997 if (SLIST_EMPTY(&md->pvh_list)) { 998 md->pvh_attrs &= ~PVF_EXEC; 999 PMAPCOUNT(exec_discarded_unmap); 1000 } else { 1001 pmap_syncicache_page(md, pa); 1002 PMAPCOUNT(exec_synced_unmap); 1003 } 1004 } 1005#endif /* PMAP_CACHE_VIPT */ 1006 break; 1007 } 1008 prevptr = &SLIST_NEXT(pv, pv_link); /* previous pointer */ 1009 pv = *prevptr; /* advance */ 1010 } 1011 1012#ifdef PMAP_CACHE_VIPT 1013 /* 1014 * If we no longer have a WRITEABLE KENTRY at the head of list, 1015 * clear the KMOD attribute from the page. 1016 */ 1017 if (SLIST_FIRST(&md->pvh_list) == NULL 1018 || (SLIST_FIRST(&md->pvh_list)->pv_flags & PVF_KWRITE) != PVF_KWRITE) 1019 md->pvh_attrs &= ~PVF_KMOD; 1020 1021 /* 1022 * If this was a writeable page and there are no more writeable 1023 * mappings (ignoring KMPAGE), clear the WRITE flag and writeback 1024 * the contents to memory. 1025 */ 1026 if (arm_cache_prefer_mask != 0) { 1027 if (md->krw_mappings + md->urw_mappings == 0) 1028 md->pvh_attrs &= ~PVF_WRITE; 1029 PMAP_VALIDATE_MD_PAGE(md); 1030 } 1031 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 1032#endif /* PMAP_CACHE_VIPT */ 1033 1034 return(pv); /* return removed pv */ 1035} 1036 1037/* 1038 * 1039 * pmap_modify_pv: Update pv flags 1040 * 1041 * => caller should hold lock on vm_page [so that attrs can be adjusted] 1042 * => caller should NOT adjust pmap's wire_count 1043 * => caller must call pmap_vac_me_harder() if writable status of a page 1044 * may have changed. 1045 * => we return the old flags 1046 * 1047 * Modify a physical-virtual mapping in the pv table 1048 */ 1049static u_int 1050pmap_modify_pv(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va, 1051 u_int clr_mask, u_int set_mask) 1052{ 1053 struct pv_entry *npv; 1054 u_int flags, oflags; 1055 1056 KASSERT((clr_mask & PVF_KENTRY) == 0); 1057 KASSERT((set_mask & PVF_KENTRY) == 0); 1058 1059 if ((npv = pmap_find_pv(md, pm, va)) == NULL) 1060 return (0); 1061 1062 NPDEBUG(PDB_PVDUMP, 1063 printf("pmap_modify_pv: pm %p, md %p, clr 0x%x, set 0x%x, flags 0x%x\n", pm, md, clr_mask, set_mask, npv->pv_flags)); 1064 1065 /* 1066 * There is at least one VA mapping this page. 1067 */ 1068 1069 if (clr_mask & (PVF_REF | PVF_MOD)) { 1070 md->pvh_attrs |= set_mask & (PVF_REF | PVF_MOD); 1071#ifdef PMAP_CACHE_VIPT 1072 if ((md->pvh_attrs & (PVF_DMOD|PVF_NC)) != PVF_NC) 1073 md->pvh_attrs |= PVF_DIRTY; 1074 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 1075#endif 1076 } 1077 1078 oflags = npv->pv_flags; 1079 npv->pv_flags = flags = (oflags & ~clr_mask) | set_mask; 1080 1081 if ((flags ^ oflags) & PVF_WIRED) { 1082 if (flags & PVF_WIRED) 1083 ++pm->pm_stats.wired_count; 1084 else 1085 --pm->pm_stats.wired_count; 1086 } 1087 1088 if ((flags ^ oflags) & PVF_WRITE) { 1089 if (pm == pmap_kernel()) { 1090 if (flags & PVF_WRITE) { 1091 md->krw_mappings++; 1092 md->kro_mappings--; 1093 } else { 1094 md->kro_mappings++; 1095 md->krw_mappings--; 1096 } 1097 } else { 1098 if (flags & PVF_WRITE) { 1099 md->urw_mappings++; 1100 md->uro_mappings--; 1101 } else { 1102 md->uro_mappings++; 1103 md->urw_mappings--; 1104 } 1105 } 1106 } 1107#ifdef PMAP_CACHE_VIPT 1108 if (arm_cache_prefer_mask != 0) { 1109 if (md->urw_mappings + md->krw_mappings == 0) { 1110 md->pvh_attrs &= ~PVF_WRITE; 1111 } else { 1112 md->pvh_attrs |= PVF_WRITE; 1113 } 1114 } 1115 /* 1116 * We have two cases here: the first is from enter_pv (new exec 1117 * page), the second is a combined pmap_remove_pv/pmap_enter_pv. 1118 * Since in latter, pmap_enter_pv won't do anything, we just have 1119 * to do what pmap_remove_pv would do. 1120 */ 1121 if ((PV_IS_EXEC_P(flags) && !PV_IS_EXEC_P(md->pvh_attrs)) 1122 || (PV_IS_EXEC_P(md->pvh_attrs) 1123 || (!(flags & PVF_WRITE) && (oflags & PVF_WRITE)))) { 1124 pmap_syncicache_page(md, pa); 1125 PMAPCOUNT(exec_synced_remap); 1126 } 1127 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 1128#endif 1129 1130 PMAPCOUNT(remappings); 1131 1132 return (oflags); 1133} 1134 1135/* 1136 * Allocate an L1 translation table for the specified pmap. 1137 * This is called at pmap creation time. 1138 */ 1139static void 1140pmap_alloc_l1(pmap_t pm) 1141{ 1142 struct l1_ttable *l1; 1143 u_int8_t domain; 1144 1145 /* 1146 * Remove the L1 at the head of the LRU list 1147 */ 1148 mutex_spin_enter(&l1_lru_lock); 1149 l1 = TAILQ_FIRST(&l1_lru_list); 1150 KDASSERT(l1 != NULL); 1151 TAILQ_REMOVE(&l1_lru_list, l1, l1_lru); 1152 1153 /* 1154 * Pick the first available domain number, and update 1155 * the link to the next number. 1156 */ 1157 domain = l1->l1_domain_first; 1158 l1->l1_domain_first = l1->l1_domain_free[domain]; 1159 1160 /* 1161 * If there are still free domain numbers in this L1, 1162 * put it back on the TAIL of the LRU list. 1163 */ 1164 if (++l1->l1_domain_use_count < PMAP_DOMAINS) 1165 TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); 1166 1167 mutex_spin_exit(&l1_lru_lock); 1168 1169 /* 1170 * Fix up the relevant bits in the pmap structure 1171 */ 1172 pm->pm_l1 = l1; 1173 pm->pm_domain = domain; 1174} 1175 1176/* 1177 * Free an L1 translation table. 1178 * This is called at pmap destruction time. 1179 */ 1180static void 1181pmap_free_l1(pmap_t pm) 1182{ 1183 struct l1_ttable *l1 = pm->pm_l1; 1184 1185 mutex_spin_enter(&l1_lru_lock); 1186 1187 /* 1188 * If this L1 is currently on the LRU list, remove it. 1189 */ 1190 if (l1->l1_domain_use_count < PMAP_DOMAINS) 1191 TAILQ_REMOVE(&l1_lru_list, l1, l1_lru); 1192 1193 /* 1194 * Free up the domain number which was allocated to the pmap 1195 */ 1196 l1->l1_domain_free[pm->pm_domain] = l1->l1_domain_first; 1197 l1->l1_domain_first = pm->pm_domain; 1198 l1->l1_domain_use_count--; 1199 1200 /* 1201 * The L1 now must have at least 1 free domain, so add 1202 * it back to the LRU list. If the use count is zero, 1203 * put it at the head of the list, otherwise it goes 1204 * to the tail. 1205 */ 1206 if (l1->l1_domain_use_count == 0) 1207 TAILQ_INSERT_HEAD(&l1_lru_list, l1, l1_lru); 1208 else 1209 TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); 1210 1211 mutex_spin_exit(&l1_lru_lock); 1212} 1213 1214static inline void 1215pmap_use_l1(pmap_t pm) 1216{ 1217 struct l1_ttable *l1; 1218 1219 /* 1220 * Do nothing if we're in interrupt context. 1221 * Access to an L1 by the kernel pmap must not affect 1222 * the LRU list. 1223 */ 1224 if (cpu_intr_p() || pm == pmap_kernel()) 1225 return; 1226 1227 l1 = pm->pm_l1; 1228 1229 /* 1230 * If the L1 is not currently on the LRU list, just return 1231 */ 1232 if (l1->l1_domain_use_count == PMAP_DOMAINS) 1233 return; 1234 1235 mutex_spin_enter(&l1_lru_lock); 1236 1237 /* 1238 * Check the use count again, now that we've acquired the lock 1239 */ 1240 if (l1->l1_domain_use_count == PMAP_DOMAINS) { 1241 mutex_spin_exit(&l1_lru_lock); 1242 return; 1243 } 1244 1245 /* 1246 * Move the L1 to the back of the LRU list 1247 */ 1248 TAILQ_REMOVE(&l1_lru_list, l1, l1_lru); 1249 TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); 1250 1251 mutex_spin_exit(&l1_lru_lock); 1252} 1253 1254/* 1255 * void pmap_free_l2_ptp(pt_entry_t *, paddr_t *) 1256 * 1257 * Free an L2 descriptor table. 1258 */ 1259static inline void 1260#ifndef PMAP_INCLUDE_PTE_SYNC 1261pmap_free_l2_ptp(pt_entry_t *l2, paddr_t pa) 1262#else 1263pmap_free_l2_ptp(bool need_sync, pt_entry_t *l2, paddr_t pa) 1264#endif 1265{ 1266#ifdef PMAP_INCLUDE_PTE_SYNC 1267#ifdef PMAP_CACHE_VIVT 1268 /* 1269 * Note: With a write-back cache, we may need to sync this 1270 * L2 table before re-using it. 1271 * This is because it may have belonged to a non-current 1272 * pmap, in which case the cache syncs would have been 1273 * skipped for the pages that were being unmapped. If the 1274 * L2 table were then to be immediately re-allocated to 1275 * the *current* pmap, it may well contain stale mappings 1276 * which have not yet been cleared by a cache write-back 1277 * and so would still be visible to the mmu. 1278 */ 1279 if (need_sync) 1280 PTE_SYNC_RANGE(l2, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)); 1281#endif /* PMAP_CACHE_VIVT */ 1282#endif /* PMAP_INCLUDE_PTE_SYNC */ 1283 pool_cache_put_paddr(&pmap_l2ptp_cache, (void *)l2, pa); 1284} 1285 1286/* 1287 * Returns a pointer to the L2 bucket associated with the specified pmap 1288 * and VA, or NULL if no L2 bucket exists for the address. 1289 */ 1290static inline struct l2_bucket * 1291pmap_get_l2_bucket(pmap_t pm, vaddr_t va) 1292{ 1293 struct l2_dtable *l2; 1294 struct l2_bucket *l2b; 1295 u_short l1idx; 1296 1297 l1idx = L1_IDX(va); 1298 1299 if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL || 1300 (l2b = &l2->l2_bucket[L2_BUCKET(l1idx)])->l2b_kva == NULL) 1301 return (NULL); 1302 1303 return (l2b); 1304} 1305 1306/* 1307 * Returns a pointer to the L2 bucket associated with the specified pmap 1308 * and VA. 1309 * 1310 * If no L2 bucket exists, perform the necessary allocations to put an L2 1311 * bucket/page table in place. 1312 * 1313 * Note that if a new L2 bucket/page was allocated, the caller *must* 1314 * increment the bucket occupancy counter appropriately *before* 1315 * releasing the pmap's lock to ensure no other thread or cpu deallocates 1316 * the bucket/page in the meantime. 1317 */ 1318static struct l2_bucket * 1319pmap_alloc_l2_bucket(pmap_t pm, vaddr_t va) 1320{ 1321 struct l2_dtable *l2; 1322 struct l2_bucket *l2b; 1323 u_short l1idx; 1324 1325 l1idx = L1_IDX(va); 1326 1327 if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) { 1328 /* 1329 * No mapping at this address, as there is 1330 * no entry in the L1 table. 1331 * Need to allocate a new l2_dtable. 1332 */ 1333 if ((l2 = pmap_alloc_l2_dtable()) == NULL) 1334 return (NULL); 1335 1336 /* 1337 * Link it into the parent pmap 1338 */ 1339 pm->pm_l2[L2_IDX(l1idx)] = l2; 1340 } 1341 1342 l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; 1343 1344 /* 1345 * Fetch pointer to the L2 page table associated with the address. 1346 */ 1347 if (l2b->l2b_kva == NULL) { 1348 pt_entry_t *ptep; 1349 1350 /* 1351 * No L2 page table has been allocated. Chances are, this 1352 * is because we just allocated the l2_dtable, above. 1353 */ 1354 if ((ptep = pmap_alloc_l2_ptp(&l2b->l2b_phys)) == NULL) { 1355 /* 1356 * Oops, no more L2 page tables available at this 1357 * time. We may need to deallocate the l2_dtable 1358 * if we allocated a new one above. 1359 */ 1360 if (l2->l2_occupancy == 0) { 1361 pm->pm_l2[L2_IDX(l1idx)] = NULL; 1362 pmap_free_l2_dtable(l2); 1363 } 1364 return (NULL); 1365 } 1366 1367 l2->l2_occupancy++; 1368 l2b->l2b_kva = ptep; 1369 l2b->l2b_l1idx = l1idx; 1370 } 1371 1372 return (l2b); 1373} 1374 1375/* 1376 * One or more mappings in the specified L2 descriptor table have just been 1377 * invalidated. 1378 * 1379 * Garbage collect the metadata and descriptor table itself if necessary. 1380 * 1381 * The pmap lock must be acquired when this is called (not necessary 1382 * for the kernel pmap). 1383 */ 1384static void 1385pmap_free_l2_bucket(pmap_t pm, struct l2_bucket *l2b, u_int count) 1386{ 1387 struct l2_dtable *l2; 1388 pd_entry_t *pl1pd, l1pd; 1389 pt_entry_t *ptep; 1390 u_short l1idx; 1391 1392 KDASSERT(count <= l2b->l2b_occupancy); 1393 1394 /* 1395 * Update the bucket's reference count according to how many 1396 * PTEs the caller has just invalidated. 1397 */ 1398 l2b->l2b_occupancy -= count; 1399 1400 /* 1401 * Note: 1402 * 1403 * Level 2 page tables allocated to the kernel pmap are never freed 1404 * as that would require checking all Level 1 page tables and 1405 * removing any references to the Level 2 page table. See also the 1406 * comment elsewhere about never freeing bootstrap L2 descriptors. 1407 * 1408 * We make do with just invalidating the mapping in the L2 table. 1409 * 1410 * This isn't really a big deal in practice and, in fact, leads 1411 * to a performance win over time as we don't need to continually 1412 * alloc/free. 1413 */ 1414 if (l2b->l2b_occupancy > 0 || pm == pmap_kernel()) 1415 return; 1416 1417 /* 1418 * There are no more valid mappings in this level 2 page table. 1419 * Go ahead and NULL-out the pointer in the bucket, then 1420 * free the page table. 1421 */ 1422 l1idx = l2b->l2b_l1idx; 1423 ptep = l2b->l2b_kva; 1424 l2b->l2b_kva = NULL; 1425 1426 pl1pd = &pm->pm_l1->l1_kva[l1idx]; 1427 1428 /* 1429 * If the L1 slot matches the pmap's domain 1430 * number, then invalidate it. 1431 */ 1432 l1pd = *pl1pd & (L1_TYPE_MASK | L1_C_DOM_MASK); 1433 if (l1pd == (L1_C_DOM(pm->pm_domain) | L1_TYPE_C)) { 1434 *pl1pd = 0; 1435 PTE_SYNC(pl1pd); 1436 } 1437 1438 /* 1439 * Release the L2 descriptor table back to the pool cache. 1440 */ 1441#ifndef PMAP_INCLUDE_PTE_SYNC 1442 pmap_free_l2_ptp(ptep, l2b->l2b_phys); 1443#else 1444 pmap_free_l2_ptp(!pmap_is_cached(pm), ptep, l2b->l2b_phys); 1445#endif 1446 1447 /* 1448 * Update the reference count in the associated l2_dtable 1449 */ 1450 l2 = pm->pm_l2[L2_IDX(l1idx)]; 1451 if (--l2->l2_occupancy > 0) 1452 return; 1453 1454 /* 1455 * There are no more valid mappings in any of the Level 1 1456 * slots managed by this l2_dtable. Go ahead and NULL-out 1457 * the pointer in the parent pmap and free the l2_dtable. 1458 */ 1459 pm->pm_l2[L2_IDX(l1idx)] = NULL; 1460 pmap_free_l2_dtable(l2); 1461} 1462 1463/* 1464 * Pool cache constructors for L2 descriptor tables, metadata and pmap 1465 * structures. 1466 */ 1467static int 1468pmap_l2ptp_ctor(void *arg, void *v, int flags) 1469{ 1470#ifndef PMAP_INCLUDE_PTE_SYNC 1471 struct l2_bucket *l2b; 1472 pt_entry_t *ptep, pte; 1473 vaddr_t va = (vaddr_t)v & ~PGOFSET; 1474 1475 /* 1476 * The mappings for these page tables were initially made using 1477 * pmap_kenter_pa() by the pool subsystem. Therefore, the cache- 1478 * mode will not be right for page table mappings. To avoid 1479 * polluting the pmap_kenter_pa() code with a special case for 1480 * page tables, we simply fix up the cache-mode here if it's not 1481 * correct. 1482 */ 1483 l2b = pmap_get_l2_bucket(pmap_kernel(), va); 1484 KDASSERT(l2b != NULL); 1485 ptep = &l2b->l2b_kva[l2pte_index(va)]; 1486 pte = *ptep; 1487 1488 if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) { 1489 /* 1490 * Page tables must have the cache-mode set to Write-Thru. 1491 */ 1492 *ptep = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt; 1493 PTE_SYNC(ptep); 1494 cpu_tlb_flushD_SE(va); 1495 cpu_cpwait(); 1496 } 1497#endif 1498 1499 memset(v, 0, L2_TABLE_SIZE_REAL); 1500 PTE_SYNC_RANGE(v, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)); 1501 return (0); 1502} 1503 1504static int 1505pmap_l2dtable_ctor(void *arg, void *v, int flags) 1506{ 1507 1508 memset(v, 0, sizeof(struct l2_dtable)); 1509 return (0); 1510} 1511 1512static int 1513pmap_pmap_ctor(void *arg, void *v, int flags) 1514{ 1515 1516 memset(v, 0, sizeof(struct pmap)); 1517 return (0); 1518} 1519 1520static void 1521pmap_pinit(pmap_t pm) 1522{ 1523 struct l2_bucket *l2b; 1524 1525 if (vector_page < KERNEL_BASE) { 1526 /* 1527 * Map the vector page. 1528 */ 1529 pmap_enter(pm, vector_page, systempage.pv_pa, 1530 VM_PROT_READ, VM_PROT_READ | PMAP_WIRED); 1531 pmap_update(pm); 1532 1533 pm->pm_pl1vec = &pm->pm_l1->l1_kva[L1_IDX(vector_page)]; 1534 l2b = pmap_get_l2_bucket(pm, vector_page); 1535 KDASSERT(l2b != NULL); 1536 pm->pm_l1vec = l2b->l2b_phys | L1_C_PROTO | 1537 L1_C_DOM(pm->pm_domain); 1538 } else 1539 pm->pm_pl1vec = NULL; 1540} 1541 1542#ifdef PMAP_CACHE_VIVT 1543/* 1544 * Since we have a virtually indexed cache, we may need to inhibit caching if 1545 * there is more than one mapping and at least one of them is writable. 1546 * Since we purge the cache on every context switch, we only need to check for 1547 * other mappings within the same pmap, or kernel_pmap. 1548 * This function is also called when a page is unmapped, to possibly reenable 1549 * caching on any remaining mappings. 1550 * 1551 * The code implements the following logic, where: 1552 * 1553 * KW = # of kernel read/write pages 1554 * KR = # of kernel read only pages 1555 * UW = # of user read/write pages 1556 * UR = # of user read only pages 1557 * 1558 * KC = kernel mapping is cacheable 1559 * UC = user mapping is cacheable 1560 * 1561 * KW=0,KR=0 KW=0,KR>0 KW=1,KR=0 KW>1,KR>=0 1562 * +--------------------------------------------- 1563 * UW=0,UR=0 | --- KC=1 KC=1 KC=0 1564 * UW=0,UR>0 | UC=1 KC=1,UC=1 KC=0,UC=0 KC=0,UC=0 1565 * UW=1,UR=0 | UC=1 KC=0,UC=0 KC=0,UC=0 KC=0,UC=0 1566 * UW>1,UR>=0 | UC=0 KC=0,UC=0 KC=0,UC=0 KC=0,UC=0 1567 */ 1568 1569static const int pmap_vac_flags[4][4] = { 1570 {-1, 0, 0, PVF_KNC}, 1571 {0, 0, PVF_NC, PVF_NC}, 1572 {0, PVF_NC, PVF_NC, PVF_NC}, 1573 {PVF_UNC, PVF_NC, PVF_NC, PVF_NC} 1574}; 1575 1576static inline int 1577pmap_get_vac_flags(const struct vm_page_md *md) 1578{ 1579 int kidx, uidx; 1580 1581 kidx = 0; 1582 if (md->kro_mappings || md->krw_mappings > 1) 1583 kidx |= 1; 1584 if (md->krw_mappings) 1585 kidx |= 2; 1586 1587 uidx = 0; 1588 if (md->uro_mappings || md->urw_mappings > 1) 1589 uidx |= 1; 1590 if (md->urw_mappings) 1591 uidx |= 2; 1592 1593 return (pmap_vac_flags[uidx][kidx]); 1594} 1595 1596static inline void 1597pmap_vac_me_harder(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va) 1598{ 1599 int nattr; 1600 1601 nattr = pmap_get_vac_flags(md); 1602 1603 if (nattr < 0) { 1604 md->pvh_attrs &= ~PVF_NC; 1605 return; 1606 } 1607 1608 if (nattr == 0 && (md->pvh_attrs & PVF_NC) == 0) 1609 return; 1610 1611 if (pm == pmap_kernel()) 1612 pmap_vac_me_kpmap(md, pa, pm, va); 1613 else 1614 pmap_vac_me_user(md, pa, pm, va); 1615 1616 md->pvh_attrs = (md->pvh_attrs & ~PVF_NC) | nattr; 1617} 1618 1619static void 1620pmap_vac_me_kpmap(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va) 1621{ 1622 u_int u_cacheable, u_entries; 1623 struct pv_entry *pv; 1624 pmap_t last_pmap = pm; 1625 1626 /* 1627 * Pass one, see if there are both kernel and user pmaps for 1628 * this page. Calculate whether there are user-writable or 1629 * kernel-writable pages. 1630 */ 1631 u_cacheable = 0; 1632 SLIST_FOREACH(pv, &md->pvh_list, pv_link) { 1633 if (pv->pv_pmap != pm && (pv->pv_flags & PVF_NC) == 0) 1634 u_cacheable++; 1635 } 1636 1637 u_entries = md->urw_mappings + md->uro_mappings; 1638 1639 /* 1640 * We know we have just been updating a kernel entry, so if 1641 * all user pages are already cacheable, then there is nothing 1642 * further to do. 1643 */ 1644 if (md->k_mappings == 0 && u_cacheable == u_entries) 1645 return; 1646 1647 if (u_entries) { 1648 /* 1649 * Scan over the list again, for each entry, if it 1650 * might not be set correctly, call pmap_vac_me_user 1651 * to recalculate the settings. 1652 */ 1653 SLIST_FOREACH(pv, &md->pvh_list, pv_link) { 1654 /* 1655 * We know kernel mappings will get set 1656 * correctly in other calls. We also know 1657 * that if the pmap is the same as last_pmap 1658 * then we've just handled this entry. 1659 */ 1660 if (pv->pv_pmap == pm || pv->pv_pmap == last_pmap) 1661 continue; 1662 1663 /* 1664 * If there are kernel entries and this page 1665 * is writable but non-cacheable, then we can 1666 * skip this entry also. 1667 */ 1668 if (md->k_mappings && 1669 (pv->pv_flags & (PVF_NC | PVF_WRITE)) == 1670 (PVF_NC | PVF_WRITE)) 1671 continue; 1672 1673 /* 1674 * Similarly if there are no kernel-writable 1675 * entries and the page is already 1676 * read-only/cacheable. 1677 */ 1678 if (md->krw_mappings == 0 && 1679 (pv->pv_flags & (PVF_NC | PVF_WRITE)) == 0) 1680 continue; 1681 1682 /* 1683 * For some of the remaining cases, we know 1684 * that we must recalculate, but for others we 1685 * can't tell if they are correct or not, so 1686 * we recalculate anyway. 1687 */ 1688 pmap_vac_me_user(md, pa, (last_pmap = pv->pv_pmap), 0); 1689 } 1690 1691 if (md->k_mappings == 0) 1692 return; 1693 } 1694 1695 pmap_vac_me_user(md, pa, pm, va); 1696} 1697 1698static void 1699pmap_vac_me_user(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va) 1700{ 1701 pmap_t kpmap = pmap_kernel(); 1702 struct pv_entry *pv, *npv = NULL; 1703 struct l2_bucket *l2b; 1704 pt_entry_t *ptep, pte; 1705 u_int entries = 0; 1706 u_int writable = 0; 1707 u_int cacheable_entries = 0; 1708 u_int kern_cacheable = 0; 1709 u_int other_writable = 0; 1710 1711 /* 1712 * Count mappings and writable mappings in this pmap. 1713 * Include kernel mappings as part of our own. 1714 * Keep a pointer to the first one. 1715 */ 1716 npv = NULL; 1717 SLIST_FOREACH(pv, &md->pvh_list, pv_link) { 1718 /* Count mappings in the same pmap */ 1719 if (pm == pv->pv_pmap || kpmap == pv->pv_pmap) { 1720 if (entries++ == 0) 1721 npv = pv; 1722 1723 /* Cacheable mappings */ 1724 if ((pv->pv_flags & PVF_NC) == 0) { 1725 cacheable_entries++; 1726 if (kpmap == pv->pv_pmap) 1727 kern_cacheable++; 1728 } 1729 1730 /* Writable mappings */ 1731 if (pv->pv_flags & PVF_WRITE) 1732 ++writable; 1733 } else 1734 if (pv->pv_flags & PVF_WRITE) 1735 other_writable = 1; 1736 } 1737 1738 /* 1739 * Enable or disable caching as necessary. 1740 * Note: the first entry might be part of the kernel pmap, 1741 * so we can't assume this is indicative of the state of the 1742 * other (maybe non-kpmap) entries. 1743 */ 1744 if ((entries > 1 && writable) || 1745 (entries > 0 && pm == kpmap && other_writable)) { 1746 if (cacheable_entries == 0) 1747 return; 1748 1749 for (pv = npv; pv; pv = SLIST_NEXT(pv, pv_link)) { 1750 if ((pm != pv->pv_pmap && kpmap != pv->pv_pmap) || 1751 (pv->pv_flags & PVF_NC)) 1752 continue; 1753 1754 pv->pv_flags |= PVF_NC; 1755 1756 l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va); 1757 KDASSERT(l2b != NULL); 1758 ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; 1759 pte = *ptep & ~L2_S_CACHE_MASK; 1760 1761 if ((va != pv->pv_va || pm != pv->pv_pmap) && 1762 l2pte_valid(pte)) { 1763 if (PV_BEEN_EXECD(pv->pv_flags)) { 1764#ifdef PMAP_CACHE_VIVT 1765 pmap_idcache_wbinv_range(pv->pv_pmap, 1766 pv->pv_va, PAGE_SIZE); 1767#endif 1768 pmap_tlb_flushID_SE(pv->pv_pmap, 1769 pv->pv_va); 1770 } else 1771 if (PV_BEEN_REFD(pv->pv_flags)) { 1772#ifdef PMAP_CACHE_VIVT 1773 pmap_dcache_wb_range(pv->pv_pmap, 1774 pv->pv_va, PAGE_SIZE, true, 1775 (pv->pv_flags & PVF_WRITE) == 0); 1776#endif 1777 pmap_tlb_flushD_SE(pv->pv_pmap, 1778 pv->pv_va); 1779 } 1780 } 1781 1782 *ptep = pte; 1783 PTE_SYNC_CURRENT(pv->pv_pmap, ptep); 1784 } 1785 cpu_cpwait(); 1786 } else 1787 if (entries > cacheable_entries) { 1788 /* 1789 * Turn cacheing back on for some pages. If it is a kernel 1790 * page, only do so if there are no other writable pages. 1791 */ 1792 for (pv = npv; pv; pv = SLIST_NEXT(pv, pv_link)) { 1793 if (!(pv->pv_flags & PVF_NC) || (pm != pv->pv_pmap && 1794 (kpmap != pv->pv_pmap || other_writable))) 1795 continue; 1796 1797 pv->pv_flags &= ~PVF_NC; 1798 1799 l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va); 1800 KDASSERT(l2b != NULL); 1801 ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; 1802 pte = (*ptep & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode; 1803 1804 if (l2pte_valid(pte)) { 1805 if (PV_BEEN_EXECD(pv->pv_flags)) { 1806 pmap_tlb_flushID_SE(pv->pv_pmap, 1807 pv->pv_va); 1808 } else 1809 if (PV_BEEN_REFD(pv->pv_flags)) { 1810 pmap_tlb_flushD_SE(pv->pv_pmap, 1811 pv->pv_va); 1812 } 1813 } 1814 1815 *ptep = pte; 1816 PTE_SYNC_CURRENT(pv->pv_pmap, ptep); 1817 } 1818 } 1819} 1820#endif 1821 1822#ifdef PMAP_CACHE_VIPT 1823static void 1824pmap_vac_me_harder(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va) 1825{ 1826 struct pv_entry *pv; 1827 vaddr_t tst_mask; 1828 bool bad_alias; 1829 struct l2_bucket *l2b; 1830 pt_entry_t *ptep, pte, opte; 1831 const u_int 1832 rw_mappings = md->urw_mappings + md->krw_mappings, 1833 ro_mappings = md->uro_mappings + md->kro_mappings; 1834 1835 /* do we need to do anything? */ 1836 if (arm_cache_prefer_mask == 0) 1837 return; 1838 1839 NPDEBUG(PDB_VAC, printf("pmap_vac_me_harder: md=%p, pmap=%p va=%08lx\n", 1840 md, pm, va)); 1841 1842 KASSERT(!va || pm); 1843 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 1844 1845 /* Already a conflict? */ 1846 if (__predict_false(md->pvh_attrs & PVF_NC)) { 1847 /* just an add, things are already non-cached */ 1848 KASSERT(!(md->pvh_attrs & PVF_DIRTY)); 1849 KASSERT(!(md->pvh_attrs & PVF_MULTCLR)); 1850 bad_alias = false; 1851 if (va) { 1852 PMAPCOUNT(vac_color_none); 1853 bad_alias = true; 1854 KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); 1855 goto fixup; 1856 } 1857 pv = SLIST_FIRST(&md->pvh_list); 1858 /* the list can't be empty because it would be cachable */ 1859 if (md->pvh_attrs & PVF_KMPAGE) { 1860 tst_mask = md->pvh_attrs; 1861 } else { 1862 KASSERT(pv); 1863 tst_mask = pv->pv_va; 1864 pv = SLIST_NEXT(pv, pv_link); 1865 } 1866 /* 1867 * Only check for a bad alias if we have writable mappings. 1868 */ 1869 tst_mask &= arm_cache_prefer_mask; 1870 if (rw_mappings > 0) { 1871 for (; pv && !bad_alias; pv = SLIST_NEXT(pv, pv_link)) { 1872 /* if there's a bad alias, stop checking. */ 1873 if (tst_mask != (pv->pv_va & arm_cache_prefer_mask)) 1874 bad_alias = true; 1875 } 1876 md->pvh_attrs |= PVF_WRITE; 1877 if (!bad_alias) 1878 md->pvh_attrs |= PVF_DIRTY; 1879 } else { 1880 /* 1881 * We have only read-only mappings. Let's see if there 1882 * are multiple colors in use or if we mapped a KMPAGE. 1883 * If the latter, we have a bad alias. If the former, 1884 * we need to remember that. 1885 */ 1886 for (; pv; pv = SLIST_NEXT(pv, pv_link)) { 1887 if (tst_mask != (pv->pv_va & arm_cache_prefer_mask)) { 1888 if (md->pvh_attrs & PVF_KMPAGE) 1889 bad_alias = true; 1890 break; 1891 } 1892 } 1893 md->pvh_attrs &= ~PVF_WRITE; 1894 /* 1895 * No KMPAGE and we exited early, so we must have 1896 * multiple color mappings. 1897 */ 1898 if (!bad_alias && pv != NULL) 1899 md->pvh_attrs |= PVF_MULTCLR; 1900 } 1901 1902 /* If no conflicting colors, set everything back to cached */ 1903 if (!bad_alias) { 1904#ifdef DEBUG 1905 if ((md->pvh_attrs & PVF_WRITE) 1906 || ro_mappings < 2) { 1907 SLIST_FOREACH(pv, &md->pvh_list, pv_link) 1908 KDASSERT(((tst_mask ^ pv->pv_va) & arm_cache_prefer_mask) == 0); 1909 } 1910#endif 1911 md->pvh_attrs &= (PAGE_SIZE - 1) & ~PVF_NC; 1912 md->pvh_attrs |= tst_mask | PVF_COLORED; 1913 /* 1914 * Restore DIRTY bit if page is modified 1915 */ 1916 if (md->pvh_attrs & PVF_DMOD) 1917 md->pvh_attrs |= PVF_DIRTY; 1918 PMAPCOUNT(vac_color_restore); 1919 } else { 1920 KASSERT(SLIST_FIRST(&md->pvh_list) != NULL); 1921 KASSERT(SLIST_NEXT(SLIST_FIRST(&md->pvh_list), pv_link) != NULL); 1922 } 1923 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 1924 KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); 1925 } else if (!va) { 1926 KASSERT(pmap_is_page_colored_p(md)); 1927 KASSERT(!(md->pvh_attrs & PVF_WRITE) 1928 || (md->pvh_attrs & PVF_DIRTY)); 1929 if (rw_mappings == 0) { 1930 md->pvh_attrs &= ~PVF_WRITE; 1931 if (ro_mappings == 1 1932 && (md->pvh_attrs & PVF_MULTCLR)) { 1933 /* 1934 * If this is the last readonly mapping 1935 * but it doesn't match the current color 1936 * for the page, change the current color 1937 * to match this last readonly mapping. 1938 */ 1939 pv = SLIST_FIRST(&md->pvh_list); 1940 tst_mask = (md->pvh_attrs ^ pv->pv_va) 1941 & arm_cache_prefer_mask; 1942 if (tst_mask) { 1943 md->pvh_attrs ^= tst_mask; 1944 PMAPCOUNT(vac_color_change); 1945 } 1946 } 1947 } 1948 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 1949 KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); 1950 return; 1951 } else if (!pmap_is_page_colored_p(md)) { 1952 /* not colored so we just use its color */ 1953 KASSERT(md->pvh_attrs & (PVF_WRITE|PVF_DIRTY)); 1954 KASSERT(!(md->pvh_attrs & PVF_MULTCLR)); 1955 PMAPCOUNT(vac_color_new); 1956 md->pvh_attrs &= PAGE_SIZE - 1; 1957 md->pvh_attrs |= PVF_COLORED 1958 | (va & arm_cache_prefer_mask) 1959 | (rw_mappings > 0 ? PVF_WRITE : 0); 1960 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 1961 KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); 1962 return; 1963 } else if (((md->pvh_attrs ^ va) & arm_cache_prefer_mask) == 0) { 1964 bad_alias = false; 1965 if (rw_mappings > 0) { 1966 /* 1967 * We now have writeable mappings and if we have 1968 * readonly mappings in more than once color, we have 1969 * an aliasing problem. Regardless mark the page as 1970 * writeable. 1971 */ 1972 if (md->pvh_attrs & PVF_MULTCLR) { 1973 if (ro_mappings < 2) { 1974 /* 1975 * If we only have less than two 1976 * read-only mappings, just flush the 1977 * non-primary colors from the cache. 1978 */ 1979 pmap_flush_page(md, pa, 1980 PMAP_FLUSH_SECONDARY); 1981 } else { 1982 bad_alias = true; 1983 } 1984 } 1985 md->pvh_attrs |= PVF_WRITE; 1986 } 1987 /* If no conflicting colors, set everything back to cached */ 1988 if (!bad_alias) { 1989#ifdef DEBUG 1990 if (rw_mappings > 0 1991 || (md->pvh_attrs & PMAP_KMPAGE)) { 1992 tst_mask = md->pvh_attrs & arm_cache_prefer_mask; 1993 SLIST_FOREACH(pv, &md->pvh_list, pv_link) 1994 KDASSERT(((tst_mask ^ pv->pv_va) & arm_cache_prefer_mask) == 0); 1995 } 1996#endif 1997 if (SLIST_EMPTY(&md->pvh_list)) 1998 PMAPCOUNT(vac_color_reuse); 1999 else 2000 PMAPCOUNT(vac_color_ok); 2001 2002 /* matching color, just return */ 2003 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 2004 KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); 2005 return; 2006 } 2007 KASSERT(SLIST_FIRST(&md->pvh_list) != NULL); 2008 KASSERT(SLIST_NEXT(SLIST_FIRST(&md->pvh_list), pv_link) != NULL); 2009 2010 /* color conflict. evict from cache. */ 2011 2012 pmap_flush_page(md, pa, PMAP_FLUSH_PRIMARY); 2013 md->pvh_attrs &= ~PVF_COLORED; 2014 md->pvh_attrs |= PVF_NC; 2015 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 2016 KASSERT(!(md->pvh_attrs & PVF_MULTCLR)); 2017 PMAPCOUNT(vac_color_erase); 2018 } else if (rw_mappings == 0 2019 && (md->pvh_attrs & PVF_KMPAGE) == 0) { 2020 KASSERT((md->pvh_attrs & PVF_WRITE) == 0); 2021 2022 /* 2023 * If the page has dirty cache lines, clean it. 2024 */ 2025 if (md->pvh_attrs & PVF_DIRTY) 2026 pmap_flush_page(md, pa, PMAP_CLEAN_PRIMARY); 2027 2028 /* 2029 * If this is the first remapping (we know that there are no 2030 * writeable mappings), then this is a simple color change. 2031 * Otherwise this is a seconary r/o mapping, which means 2032 * we don't have to do anything. 2033 */ 2034 if (ro_mappings == 1) { 2035 KASSERT(((md->pvh_attrs ^ va) & arm_cache_prefer_mask) != 0); 2036 md->pvh_attrs &= PAGE_SIZE - 1; 2037 md->pvh_attrs |= (va & arm_cache_prefer_mask); 2038 PMAPCOUNT(vac_color_change); 2039 } else { 2040 PMAPCOUNT(vac_color_blind); 2041 } 2042 md->pvh_attrs |= PVF_MULTCLR; 2043 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 2044 KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); 2045 return; 2046 } else { 2047 if (rw_mappings > 0) 2048 md->pvh_attrs |= PVF_WRITE; 2049 2050 /* color conflict. evict from cache. */ 2051 pmap_flush_page(md, pa, PMAP_FLUSH_PRIMARY); 2052 2053 /* the list can't be empty because this was a enter/modify */ 2054 pv = SLIST_FIRST(&md->pvh_list); 2055 if ((md->pvh_attrs & PVF_KMPAGE) == 0) { 2056 KASSERT(pv); 2057 /* 2058 * If there's only one mapped page, change color to the 2059 * page's new color and return. Restore the DIRTY bit 2060 * that was erased by pmap_flush_page. 2061 */ 2062 if (SLIST_NEXT(pv, pv_link) == NULL) { 2063 md->pvh_attrs &= PAGE_SIZE - 1; 2064 md->pvh_attrs |= (va & arm_cache_prefer_mask); 2065 if (md->pvh_attrs & PVF_DMOD) 2066 md->pvh_attrs |= PVF_DIRTY; 2067 PMAPCOUNT(vac_color_change); 2068 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 2069 KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); 2070 KASSERT(!(md->pvh_attrs & PVF_MULTCLR)); 2071 return; 2072 } 2073 } 2074 bad_alias = true; 2075 md->pvh_attrs &= ~PVF_COLORED; 2076 md->pvh_attrs |= PVF_NC; 2077 PMAPCOUNT(vac_color_erase); 2078 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 2079 } 2080 2081 fixup: 2082 KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); 2083 2084 /* 2085 * Turn cacheing on/off for all pages. 2086 */ 2087 SLIST_FOREACH(pv, &md->pvh_list, pv_link) { 2088 l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va); 2089 KDASSERT(l2b != NULL); 2090 ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; 2091 opte = *ptep; 2092 pte = opte & ~L2_S_CACHE_MASK; 2093 if (bad_alias) { 2094 pv->pv_flags |= PVF_NC; 2095 } else { 2096 pv->pv_flags &= ~PVF_NC; 2097 pte |= pte_l2_s_cache_mode; 2098 } 2099 2100 if (opte == pte) /* only update is there's a change */ 2101 continue; 2102 2103 if (l2pte_valid(pte)) { 2104 if (PV_BEEN_EXECD(pv->pv_flags)) { 2105 pmap_tlb_flushID_SE(pv->pv_pmap, pv->pv_va); 2106 } else if (PV_BEEN_REFD(pv->pv_flags)) { 2107 pmap_tlb_flushD_SE(pv->pv_pmap, pv->pv_va); 2108 } 2109 } 2110 2111 *ptep = pte; 2112 PTE_SYNC_CURRENT(pv->pv_pmap, ptep); 2113 } 2114} 2115#endif /* PMAP_CACHE_VIPT */ 2116 2117 2118/* 2119 * Modify pte bits for all ptes corresponding to the given physical address. 2120 * We use `maskbits' rather than `clearbits' because we're always passing 2121 * constants and the latter would require an extra inversion at run-time. 2122 */ 2123static void 2124pmap_clearbit(struct vm_page_md *md, paddr_t pa, u_int maskbits) 2125{ 2126 struct l2_bucket *l2b; 2127 struct pv_entry *pv; 2128 pt_entry_t *ptep, npte, opte; 2129 pmap_t pm; 2130 vaddr_t va; 2131 u_int oflags; 2132#ifdef PMAP_CACHE_VIPT 2133 const bool want_syncicache = PV_IS_EXEC_P(md->pvh_attrs); 2134 bool need_syncicache = false; 2135 bool did_syncicache = false; 2136 bool need_vac_me_harder = false; 2137#endif 2138 2139 NPDEBUG(PDB_BITS, 2140 printf("pmap_clearbit: md %p mask 0x%x\n", 2141 md, maskbits)); 2142 2143#ifdef PMAP_CACHE_VIPT 2144 /* 2145 * If we might want to sync the I-cache and we've modified it, 2146 * then we know we definitely need to sync or discard it. 2147 */ 2148 if (want_syncicache) 2149 need_syncicache = md->pvh_attrs & PVF_MOD; 2150#endif 2151 /* 2152 * Clear saved attributes (modify, reference) 2153 */ 2154 md->pvh_attrs &= ~(maskbits & (PVF_MOD | PVF_REF)); 2155 2156 if (SLIST_EMPTY(&md->pvh_list)) { 2157#ifdef PMAP_CACHE_VIPT 2158 if (need_syncicache) { 2159 /* 2160 * No one has it mapped, so just discard it. The next 2161 * exec remapping will cause it to be synced. 2162 */ 2163 md->pvh_attrs &= ~PVF_EXEC; 2164 PMAPCOUNT(exec_discarded_clearbit); 2165 } 2166#endif 2167 return; 2168 } 2169 2170 /* 2171 * Loop over all current mappings setting/clearing as appropos 2172 */ 2173 SLIST_FOREACH(pv, &md->pvh_list, pv_link) { 2174 va = pv->pv_va; 2175 pm = pv->pv_pmap; 2176 oflags = pv->pv_flags; 2177 /* 2178 * Kernel entries are unmanaged and as such not to be changed. 2179 */ 2180 if (oflags & PVF_KENTRY) 2181 continue; 2182 pv->pv_flags &= ~maskbits; 2183 2184 pmap_acquire_pmap_lock(pm); 2185 2186 l2b = pmap_get_l2_bucket(pm, va); 2187 KDASSERT(l2b != NULL); 2188 2189 ptep = &l2b->l2b_kva[l2pte_index(va)]; 2190 npte = opte = *ptep; 2191 2192 NPDEBUG(PDB_BITS, 2193 printf( 2194 "pmap_clearbit: pv %p, pm %p, va 0x%08lx, flag 0x%x\n", 2195 pv, pv->pv_pmap, pv->pv_va, oflags)); 2196 2197 if (maskbits & (PVF_WRITE|PVF_MOD)) { 2198#ifdef PMAP_CACHE_VIVT 2199 if ((pv->pv_flags & PVF_NC)) { 2200 /* 2201 * Entry is not cacheable: 2202 * 2203 * Don't turn caching on again if this is a 2204 * modified emulation. This would be 2205 * inconsitent with the settings created by 2206 * pmap_vac_me_harder(). Otherwise, it's safe 2207 * to re-enable cacheing. 2208 * 2209 * There's no need to call pmap_vac_me_harder() 2210 * here: all pages are losing their write 2211 * permission. 2212 */ 2213 if (maskbits & PVF_WRITE) { 2214 npte |= pte_l2_s_cache_mode; 2215 pv->pv_flags &= ~PVF_NC; 2216 } 2217 } else 2218 if (l2pte_writable_p(opte)) { 2219 /* 2220 * Entry is writable/cacheable: check if pmap 2221 * is current if it is flush it, otherwise it 2222 * won't be in the cache 2223 */ 2224 if (PV_BEEN_EXECD(oflags)) 2225 pmap_idcache_wbinv_range(pm, pv->pv_va, 2226 PAGE_SIZE); 2227 else 2228 if (PV_BEEN_REFD(oflags)) 2229 pmap_dcache_wb_range(pm, pv->pv_va, 2230 PAGE_SIZE, 2231 (maskbits & PVF_REF) != 0, false); 2232 } 2233#endif 2234 2235 /* make the pte read only */ 2236 npte = l2pte_set_readonly(npte); 2237 2238 if (maskbits & oflags & PVF_WRITE) { 2239 /* 2240 * Keep alias accounting up to date 2241 */ 2242 if (pv->pv_pmap == pmap_kernel()) { 2243 md->krw_mappings--; 2244 md->kro_mappings++; 2245 } else { 2246 md->urw_mappings--; 2247 md->uro_mappings++; 2248 } 2249#ifdef PMAP_CACHE_VIPT 2250 if (arm_cache_prefer_mask != 0) { 2251 if (md->urw_mappings + md->krw_mappings == 0) { 2252 md->pvh_attrs &= ~PVF_WRITE; 2253 } else { 2254 PMAP_VALIDATE_MD_PAGE(md); 2255 } 2256 } 2257 if (want_syncicache) 2258 need_syncicache = true; 2259 need_vac_me_harder = true; 2260#endif 2261 } 2262 } 2263 2264 if (maskbits & PVF_REF) { 2265 if ((pv->pv_flags & PVF_NC) == 0 && 2266 (maskbits & (PVF_WRITE|PVF_MOD)) == 0 && 2267 l2pte_valid(npte)) { 2268#ifdef PMAP_CACHE_VIVT 2269 /* 2270 * Check npte here; we may have already 2271 * done the wbinv above, and the validity 2272 * of the PTE is the same for opte and 2273 * npte. 2274 */ 2275 /* XXXJRT need idcache_inv_range */ 2276 if (PV_BEEN_EXECD(oflags)) 2277 pmap_idcache_wbinv_range(pm, 2278 pv->pv_va, PAGE_SIZE); 2279 else 2280 if (PV_BEEN_REFD(oflags)) 2281 pmap_dcache_wb_range(pm, 2282 pv->pv_va, PAGE_SIZE, 2283 true, true); 2284#endif 2285 } 2286 2287 /* 2288 * Make the PTE invalid so that we will take a 2289 * page fault the next time the mapping is 2290 * referenced. 2291 */ 2292 npte &= ~L2_TYPE_MASK; 2293 npte |= L2_TYPE_INV; 2294 } 2295 2296 if (npte != opte) { 2297 *ptep = npte; 2298 PTE_SYNC(ptep); 2299 /* Flush the TLB entry if a current pmap. */ 2300 if (PV_BEEN_EXECD(oflags)) 2301 pmap_tlb_flushID_SE(pm, pv->pv_va); 2302 else 2303 if (PV_BEEN_REFD(oflags)) 2304 pmap_tlb_flushD_SE(pm, pv->pv_va); 2305 } 2306 2307 pmap_release_pmap_lock(pm); 2308 2309 NPDEBUG(PDB_BITS, 2310 printf("pmap_clearbit: pm %p va 0x%lx opte 0x%08x npte 0x%08x\n", 2311 pm, va, opte, npte)); 2312 } 2313 2314#ifdef PMAP_CACHE_VIPT 2315 /* 2316 * If we need to sync the I-cache and we haven't done it yet, do it. 2317 */ 2318 if (need_syncicache && !did_syncicache) { 2319 pmap_syncicache_page(md, pa); 2320 PMAPCOUNT(exec_synced_clearbit); 2321 } 2322 /* 2323 * If we are changing this to read-only, we need to call vac_me_harder 2324 * so we can change all the read-only pages to cacheable. We pretend 2325 * this as a page deletion. 2326 */ 2327 if (need_vac_me_harder) { 2328 if (md->pvh_attrs & PVF_NC) 2329 pmap_vac_me_harder(md, pa, NULL, 0); 2330 } 2331#endif 2332} 2333 2334/* 2335 * pmap_clean_page() 2336 * 2337 * This is a local function used to work out the best strategy to clean 2338 * a single page referenced by its entry in the PV table. It's used by 2339 * pmap_copy_page, pmap_zero page and maybe some others later on. 2340 * 2341 * Its policy is effectively: 2342 * o If there are no mappings, we don't bother doing anything with the cache. 2343 * o If there is one mapping, we clean just that page. 2344 * o If there are multiple mappings, we clean the entire cache. 2345 * 2346 * So that some functions can be further optimised, it returns 0 if it didn't 2347 * clean the entire cache, or 1 if it did. 2348 * 2349 * XXX One bug in this routine is that if the pv_entry has a single page 2350 * mapped at 0x00000000 a whole cache clean will be performed rather than 2351 * just the 1 page. Since this should not occur in everyday use and if it does 2352 * it will just result in not the most efficient clean for the page. 2353 */ 2354#ifdef PMAP_CACHE_VIVT 2355static int 2356pmap_clean_page(struct pv_entry *pv, bool is_src) 2357{ 2358 pmap_t pm_to_clean = NULL; 2359 struct pv_entry *npv; 2360 u_int cache_needs_cleaning = 0; 2361 u_int flags = 0; 2362 vaddr_t page_to_clean = 0; 2363 2364 if (pv == NULL) { 2365 /* nothing mapped in so nothing to flush */ 2366 return (0); 2367 } 2368 2369 /* 2370 * Since we flush the cache each time we change to a different 2371 * user vmspace, we only need to flush the page if it is in the 2372 * current pmap. 2373 */ 2374 2375 for (npv = pv; npv; npv = SLIST_NEXT(npv, pv_link)) { 2376 if (pmap_is_current(npv->pv_pmap)) { 2377 flags |= npv->pv_flags; 2378 /* 2379 * The page is mapped non-cacheable in 2380 * this map. No need to flush the cache. 2381 */ 2382 if (npv->pv_flags & PVF_NC) { 2383#ifdef DIAGNOSTIC 2384 if (cache_needs_cleaning) 2385 panic("pmap_clean_page: " 2386 "cache inconsistency"); 2387#endif 2388 break; 2389 } else if (is_src && (npv->pv_flags & PVF_WRITE) == 0) 2390 continue; 2391 if (cache_needs_cleaning) { 2392 page_to_clean = 0; 2393 break; 2394 } else { 2395 page_to_clean = npv->pv_va; 2396 pm_to_clean = npv->pv_pmap; 2397 } 2398 cache_needs_cleaning = 1; 2399 } 2400 } 2401 2402 if (page_to_clean) { 2403 if (PV_BEEN_EXECD(flags)) 2404 pmap_idcache_wbinv_range(pm_to_clean, page_to_clean, 2405 PAGE_SIZE); 2406 else 2407 pmap_dcache_wb_range(pm_to_clean, page_to_clean, 2408 PAGE_SIZE, !is_src, (flags & PVF_WRITE) == 0); 2409 } else if (cache_needs_cleaning) { 2410 pmap_t const pm = curproc->p_vmspace->vm_map.pmap; 2411 2412 if (PV_BEEN_EXECD(flags)) 2413 pmap_idcache_wbinv_all(pm); 2414 else 2415 pmap_dcache_wbinv_all(pm); 2416 return (1); 2417 } 2418 return (0); 2419} 2420#endif 2421 2422#ifdef PMAP_CACHE_VIPT 2423/* 2424 * Sync a page with the I-cache. Since this is a VIPT, we must pick the 2425 * right cache alias to make sure we flush the right stuff. 2426 */ 2427void 2428pmap_syncicache_page(struct vm_page_md *md, paddr_t pa) 2429{ 2430 const vsize_t va_offset = md->pvh_attrs & arm_cache_prefer_mask; 2431 pt_entry_t * const ptep = &cdst_pte[va_offset >> PGSHIFT]; 2432 2433 NPDEBUG(PDB_EXEC, printf("pmap_syncicache_page: md=%p (attrs=%#x)\n", 2434 md, md->pvh_attrs)); 2435 /* 2436 * No need to clean the page if it's non-cached. 2437 */ 2438 if (md->pvh_attrs & PVF_NC) 2439 return; 2440 KASSERT(arm_cache_prefer_mask == 0 || md->pvh_attrs & PVF_COLORED); 2441 2442 pmap_tlb_flushID_SE(pmap_kernel(), cdstp + va_offset); 2443 /* 2444 * Set up a PTE with the right coloring to flush existing cache lines. 2445 */ 2446 *ptep = L2_S_PROTO | 2447 pa 2448 | L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE) 2449 | pte_l2_s_cache_mode; 2450 PTE_SYNC(ptep); 2451 2452 /* 2453 * Flush it. 2454 */ 2455 cpu_icache_sync_range(cdstp + va_offset, PAGE_SIZE); 2456 /* 2457 * Unmap the page. 2458 */ 2459 *ptep = 0; 2460 PTE_SYNC(ptep); 2461 pmap_tlb_flushID_SE(pmap_kernel(), cdstp + va_offset); 2462 2463 md->pvh_attrs |= PVF_EXEC; 2464 PMAPCOUNT(exec_synced); 2465} 2466 2467void 2468pmap_flush_page(struct vm_page_md *md, paddr_t pa, enum pmap_flush_op flush) 2469{ 2470 vsize_t va_offset, end_va; 2471 void (*cf)(vaddr_t, vsize_t); 2472 2473 if (arm_cache_prefer_mask == 0) 2474 return; 2475 2476 switch (flush) { 2477 case PMAP_FLUSH_PRIMARY: 2478 if (md->pvh_attrs & PVF_MULTCLR) { 2479 va_offset = 0; 2480 end_va = arm_cache_prefer_mask; 2481 md->pvh_attrs &= ~PVF_MULTCLR; 2482 PMAPCOUNT(vac_flush_lots); 2483 } else { 2484 va_offset = md->pvh_attrs & arm_cache_prefer_mask; 2485 end_va = va_offset; 2486 PMAPCOUNT(vac_flush_one); 2487 } 2488 /* 2489 * Mark that the page is no longer dirty. 2490 */ 2491 md->pvh_attrs &= ~PVF_DIRTY; 2492 cf = cpufuncs.cf_idcache_wbinv_range; 2493 break; 2494 case PMAP_FLUSH_SECONDARY: 2495 va_offset = 0; 2496 end_va = arm_cache_prefer_mask; 2497 cf = cpufuncs.cf_idcache_wbinv_range; 2498 md->pvh_attrs &= ~PVF_MULTCLR; 2499 PMAPCOUNT(vac_flush_lots); 2500 break; 2501 case PMAP_CLEAN_PRIMARY: 2502 va_offset = md->pvh_attrs & arm_cache_prefer_mask; 2503 end_va = va_offset; 2504 cf = cpufuncs.cf_dcache_wb_range; 2505 /* 2506 * Mark that the page is no longer dirty. 2507 */ 2508 if ((md->pvh_attrs & PVF_DMOD) == 0) 2509 md->pvh_attrs &= ~PVF_DIRTY; 2510 PMAPCOUNT(vac_clean_one); 2511 break; 2512 default: 2513 return; 2514 } 2515 2516 KASSERT(!(md->pvh_attrs & PVF_NC)); 2517 2518 NPDEBUG(PDB_VAC, printf("pmap_flush_page: md=%p (attrs=%#x)\n", 2519 md, md->pvh_attrs)); 2520 2521 for (; va_offset <= end_va; va_offset += PAGE_SIZE) { 2522 const size_t pte_offset = va_offset >> PGSHIFT; 2523 pt_entry_t * const ptep = &cdst_pte[pte_offset]; 2524 const pt_entry_t oldpte = *ptep; 2525 2526 if (flush == PMAP_FLUSH_SECONDARY 2527 && va_offset == (md->pvh_attrs & arm_cache_prefer_mask)) 2528 continue; 2529 2530 pmap_tlb_flushID_SE(pmap_kernel(), cdstp + va_offset); 2531 /* 2532 * Set up a PTE with the right coloring to flush 2533 * existing cache entries. 2534 */ 2535 *ptep = L2_S_PROTO 2536 | pa 2537 | L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE) 2538 | pte_l2_s_cache_mode; 2539 PTE_SYNC(ptep); 2540 2541 /* 2542 * Flush it. 2543 */ 2544 (*cf)(cdstp + va_offset, PAGE_SIZE); 2545 2546 /* 2547 * Restore the page table entry since we might have interrupted 2548 * pmap_zero_page or pmap_copy_page which was already using 2549 * this pte. 2550 */ 2551 *ptep = oldpte; 2552 PTE_SYNC(ptep); 2553 pmap_tlb_flushID_SE(pmap_kernel(), cdstp + va_offset); 2554 } 2555} 2556#endif /* PMAP_CACHE_VIPT */ 2557 2558/* 2559 * Routine: pmap_page_remove 2560 * Function: 2561 * Removes this physical page from 2562 * all physical maps in which it resides. 2563 * Reflects back modify bits to the pager. 2564 */ 2565static void 2566pmap_page_remove(struct vm_page_md *md, paddr_t pa) 2567{ 2568 struct l2_bucket *l2b; 2569 struct pv_entry *pv, *npv, **pvp; 2570 pmap_t pm; 2571 pt_entry_t *ptep; 2572 bool flush; 2573 u_int flags; 2574 2575 NPDEBUG(PDB_FOLLOW, 2576 printf("pmap_page_remove: md %p (0x%08lx)\n", md, 2577 pa)); 2578 2579 pv = SLIST_FIRST(&md->pvh_list); 2580 if (pv == NULL) { 2581#ifdef PMAP_CACHE_VIPT 2582 /* 2583 * We *know* the page contents are about to be replaced. 2584 * Discard the exec contents 2585 */ 2586 if (PV_IS_EXEC_P(md->pvh_attrs)) 2587 PMAPCOUNT(exec_discarded_page_protect); 2588 md->pvh_attrs &= ~PVF_EXEC; 2589 PMAP_VALIDATE_MD_PAGE(md); 2590#endif 2591 return; 2592 } 2593#ifdef PMAP_CACHE_VIPT 2594 KASSERT(arm_cache_prefer_mask == 0 || pmap_is_page_colored_p(md)); 2595#endif 2596 2597 /* 2598 * Clear alias counts 2599 */ 2600#ifdef PMAP_CACHE_VIVT 2601 md->k_mappings = 0; 2602#endif 2603 md->urw_mappings = md->uro_mappings = 0; 2604 2605 flush = false; 2606 flags = 0; 2607 2608#ifdef PMAP_CACHE_VIVT 2609 pmap_clean_page(pv, false); 2610#endif 2611 2612 pvp = &SLIST_FIRST(&md->pvh_list); 2613 while (pv) { 2614 pm = pv->pv_pmap; 2615 npv = SLIST_NEXT(pv, pv_link); 2616 if (flush == false && pmap_is_current(pm)) 2617 flush = true; 2618 2619 if (pm == pmap_kernel()) { 2620#ifdef PMAP_CACHE_VIPT 2621 /* 2622 * If this was unmanaged mapping, it must be preserved. 2623 * Move it back on the list and advance the end-of-list 2624 * pointer. 2625 */ 2626 if (pv->pv_flags & PVF_KENTRY) { 2627 *pvp = pv; 2628 pvp = &SLIST_NEXT(pv, pv_link); 2629 pv = npv; 2630 continue; 2631 } 2632 if (pv->pv_flags & PVF_WRITE) 2633 md->krw_mappings--; 2634 else 2635 md->kro_mappings--; 2636#endif 2637 PMAPCOUNT(kernel_unmappings); 2638 } 2639 PMAPCOUNT(unmappings); 2640 2641 pmap_acquire_pmap_lock(pm); 2642 2643 l2b = pmap_get_l2_bucket(pm, pv->pv_va); 2644 KDASSERT(l2b != NULL); 2645 2646 ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; 2647 2648 /* 2649 * Update statistics 2650 */ 2651 --pm->pm_stats.resident_count; 2652 2653 /* Wired bit */ 2654 if (pv->pv_flags & PVF_WIRED) 2655 --pm->pm_stats.wired_count; 2656 2657 flags |= pv->pv_flags; 2658 2659 /* 2660 * Invalidate the PTEs. 2661 */ 2662 *ptep = 0; 2663 PTE_SYNC_CURRENT(pm, ptep); 2664 pmap_free_l2_bucket(pm, l2b, 1); 2665 2666 pool_put(&pmap_pv_pool, pv); 2667 pv = npv; 2668 /* 2669 * if we reach the end of the list and there are still 2670 * mappings, they might be able to be cached now. 2671 */ 2672 if (pv == NULL) { 2673 *pvp = NULL; 2674 if (!SLIST_EMPTY(&md->pvh_list)) 2675 pmap_vac_me_harder(md, pa, pm, 0); 2676 } 2677 pmap_release_pmap_lock(pm); 2678 } 2679#ifdef PMAP_CACHE_VIPT 2680 /* 2681 * Its EXEC cache is now gone. 2682 */ 2683 if (PV_IS_EXEC_P(md->pvh_attrs)) 2684 PMAPCOUNT(exec_discarded_page_protect); 2685 md->pvh_attrs &= ~PVF_EXEC; 2686 KASSERT(md->urw_mappings == 0); 2687 KASSERT(md->uro_mappings == 0); 2688 if (arm_cache_prefer_mask != 0) { 2689 if (md->krw_mappings == 0) 2690 md->pvh_attrs &= ~PVF_WRITE; 2691 PMAP_VALIDATE_MD_PAGE(md); 2692 } 2693#endif 2694 2695 if (flush) { 2696 /* 2697 * Note: We can't use pmap_tlb_flush{I,D}() here since that 2698 * would need a subsequent call to pmap_update() to ensure 2699 * curpm->pm_cstate.cs_all is reset. Our callers are not 2700 * required to do that (see pmap(9)), so we can't modify 2701 * the current pmap's state. 2702 */ 2703 if (PV_BEEN_EXECD(flags)) 2704 cpu_tlb_flushID(); 2705 else 2706 cpu_tlb_flushD(); 2707 } 2708 cpu_cpwait(); 2709} 2710 2711/* 2712 * pmap_t pmap_create(void) 2713 * 2714 * Create a new pmap structure from scratch. 2715 */ 2716pmap_t 2717pmap_create(void) 2718{ 2719 pmap_t pm; 2720 2721 pm = pool_cache_get(&pmap_cache, PR_WAITOK); 2722 2723 mutex_init(&pm->pm_obj_lock, MUTEX_DEFAULT, IPL_NONE); 2724 uvm_obj_init(&pm->pm_obj, NULL, false, 1); 2725 uvm_obj_setlock(&pm->pm_obj, &pm->pm_obj_lock); 2726 2727 pm->pm_stats.wired_count = 0; 2728 pm->pm_stats.resident_count = 1; 2729 pm->pm_cstate.cs_all = 0; 2730 pmap_alloc_l1(pm); 2731 2732 /* 2733 * Note: The pool cache ensures that the pm_l2[] array is already 2734 * initialised to zero. 2735 */ 2736 2737 pmap_pinit(pm); 2738 2739 LIST_INSERT_HEAD(&pmap_pmaps, pm, pm_list); 2740 2741 return (pm); 2742} 2743 2744u_int 2745arm32_mmap_flags(paddr_t pa) 2746{ 2747 /* 2748 * the upper 8 bits in pmap_enter()'s flags are reserved for MD stuff 2749 * and we're using the upper bits in page numbers to pass flags around 2750 * so we might as well use the same bits 2751 */ 2752 return (u_int)pa & PMAP_MD_MASK; 2753} 2754/* 2755 * int pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, 2756 * u_int flags) 2757 * 2758 * Insert the given physical page (p) at 2759 * the specified virtual address (v) in the 2760 * target physical map with the protection requested. 2761 * 2762 * NB: This is the only routine which MAY NOT lazy-evaluate 2763 * or lose information. That is, this routine must actually 2764 * insert this page into the given map NOW. 2765 */ 2766int 2767pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) 2768{ 2769 struct l2_bucket *l2b; 2770 struct vm_page *pg, *opg; 2771 struct pv_entry *pv; 2772 pt_entry_t *ptep, npte, opte; 2773 u_int nflags; 2774 u_int oflags; 2775 2776 NPDEBUG(PDB_ENTER, printf("pmap_enter: pm %p va 0x%lx pa 0x%lx prot %x flag %x\n", pm, va, pa, prot, flags)); 2777 2778 KDASSERT((flags & PMAP_WIRED) == 0 || (flags & VM_PROT_ALL) != 0); 2779 KDASSERT(((va | pa) & PGOFSET) == 0); 2780 2781 /* 2782 * Get a pointer to the page. Later on in this function, we 2783 * test for a managed page by checking pg != NULL. 2784 */ 2785 pg = pmap_initialized ? PHYS_TO_VM_PAGE(pa) : NULL; 2786 2787 nflags = 0; 2788 if (prot & VM_PROT_WRITE) 2789 nflags |= PVF_WRITE; 2790 if (prot & VM_PROT_EXECUTE) 2791 nflags |= PVF_EXEC; 2792 if (flags & PMAP_WIRED) 2793 nflags |= PVF_WIRED; 2794 2795 pmap_acquire_pmap_lock(pm); 2796 2797 /* 2798 * Fetch the L2 bucket which maps this page, allocating one if 2799 * necessary for user pmaps. 2800 */ 2801 if (pm == pmap_kernel()) 2802 l2b = pmap_get_l2_bucket(pm, va); 2803 else 2804 l2b = pmap_alloc_l2_bucket(pm, va); 2805 if (l2b == NULL) { 2806 if (flags & PMAP_CANFAIL) { 2807 pmap_release_pmap_lock(pm); 2808 return (ENOMEM); 2809 } 2810 panic("pmap_enter: failed to allocate L2 bucket"); 2811 } 2812 ptep = &l2b->l2b_kva[l2pte_index(va)]; 2813 opte = *ptep; 2814 npte = pa; 2815 oflags = 0; 2816 2817 if (opte) { 2818 /* 2819 * There is already a mapping at this address. 2820 * If the physical address is different, lookup the 2821 * vm_page. 2822 */ 2823 if (l2pte_pa(opte) != pa) 2824 opg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); 2825 else 2826 opg = pg; 2827 } else 2828 opg = NULL; 2829 2830 if (pg) { 2831 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 2832 2833 /* 2834 * This is to be a managed mapping. 2835 */ 2836 if ((flags & VM_PROT_ALL) || (md->pvh_attrs & PVF_REF)) { 2837 /* 2838 * - The access type indicates that we don't need 2839 * to do referenced emulation. 2840 * OR 2841 * - The physical page has already been referenced 2842 * so no need to re-do referenced emulation here. 2843 */ 2844 npte |= l2pte_set_readonly(L2_S_PROTO); 2845 2846 nflags |= PVF_REF; 2847 2848 if ((prot & VM_PROT_WRITE) != 0 && 2849 ((flags & VM_PROT_WRITE) != 0 || 2850 (md->pvh_attrs & PVF_MOD) != 0)) { 2851 /* 2852 * This is a writable mapping, and the 2853 * page's mod state indicates it has 2854 * already been modified. Make it 2855 * writable from the outset. 2856 */ 2857 npte = l2pte_set_writable(npte); 2858 nflags |= PVF_MOD; 2859 } 2860 } else { 2861 /* 2862 * Need to do page referenced emulation. 2863 */ 2864 npte |= L2_TYPE_INV; 2865 } 2866 2867 npte |= pte_l2_s_cache_mode; 2868 2869 if (pg == opg) { 2870 /* 2871 * We're changing the attrs of an existing mapping. 2872 */ 2873#ifdef MULTIPROCESSOR 2874 KASSERT(uvm_page_locked_p(pg)); 2875#endif 2876 oflags = pmap_modify_pv(md, pa, pm, va, 2877 PVF_WRITE | PVF_EXEC | PVF_WIRED | 2878 PVF_MOD | PVF_REF, nflags); 2879 2880#ifdef PMAP_CACHE_VIVT 2881 /* 2882 * We may need to flush the cache if we're 2883 * doing rw-ro... 2884 */ 2885 if (pm->pm_cstate.cs_cache_d && 2886 (oflags & PVF_NC) == 0 && 2887 l2pte_writable_p(opte) && 2888 (prot & VM_PROT_WRITE) == 0) 2889 cpu_dcache_wb_range(va, PAGE_SIZE); 2890#endif 2891 } else { 2892 /* 2893 * New mapping, or changing the backing page 2894 * of an existing mapping. 2895 */ 2896 if (opg) { 2897 struct vm_page_md *omd = VM_PAGE_TO_MD(opg); 2898 paddr_t opa = VM_PAGE_TO_PHYS(opg); 2899 2900 /* 2901 * Replacing an existing mapping with a new one. 2902 * It is part of our managed memory so we 2903 * must remove it from the PV list 2904 */ 2905#ifdef MULTIPROCESSOR 2906 KASSERT(uvm_page_locked_p(opg)); 2907#endif 2908 pv = pmap_remove_pv(omd, opa, pm, va); 2909 pmap_vac_me_harder(omd, opa, pm, 0); 2910 oflags = pv->pv_flags; 2911 2912#ifdef PMAP_CACHE_VIVT 2913 /* 2914 * If the old mapping was valid (ref/mod 2915 * emulation creates 'invalid' mappings 2916 * initially) then make sure to frob 2917 * the cache. 2918 */ 2919 if ((oflags & PVF_NC) == 0 && 2920 l2pte_valid(opte)) { 2921 if (PV_BEEN_EXECD(oflags)) { 2922 pmap_idcache_wbinv_range(pm, va, 2923 PAGE_SIZE); 2924 } else 2925 if (PV_BEEN_REFD(oflags)) { 2926 pmap_dcache_wb_range(pm, va, 2927 PAGE_SIZE, true, 2928 (oflags & PVF_WRITE) == 0); 2929 } 2930 } 2931#endif 2932 } else 2933 if ((pv = pool_get(&pmap_pv_pool, PR_NOWAIT)) == NULL){ 2934 if ((flags & PMAP_CANFAIL) == 0) 2935 panic("pmap_enter: no pv entries"); 2936 2937 if (pm != pmap_kernel()) 2938 pmap_free_l2_bucket(pm, l2b, 0); 2939 pmap_release_pmap_lock(pm); 2940 NPDEBUG(PDB_ENTER, 2941 printf("pmap_enter: ENOMEM\n")); 2942 return (ENOMEM); 2943 } 2944 2945#ifdef MULTIPROCESSOR 2946 KASSERT(uvm_page_locked_p(pg)); 2947#endif 2948 pmap_enter_pv(md, pa, pv, pm, va, nflags); 2949 } 2950 } else { 2951 /* 2952 * We're mapping an unmanaged page. 2953 * These are always readable, and possibly writable, from 2954 * the get go as we don't need to track ref/mod status. 2955 */ 2956 npte |= l2pte_set_readonly(L2_S_PROTO); 2957 if (prot & VM_PROT_WRITE) 2958 npte = l2pte_set_writable(npte); 2959 2960 /* 2961 * Make sure the vector table is mapped cacheable 2962 */ 2963 if ((pm != pmap_kernel() && va == vector_page) || 2964 (flags & ARM32_MMAP_CACHEABLE)) { 2965 npte |= pte_l2_s_cache_mode; 2966 } else if (flags & ARM32_MMAP_WRITECOMBINE) { 2967 npte |= pte_l2_s_wc_mode; 2968 } 2969 if (opg) { 2970 /* 2971 * Looks like there's an existing 'managed' mapping 2972 * at this address. 2973 */ 2974 struct vm_page_md *omd = VM_PAGE_TO_MD(opg); 2975 paddr_t opa = VM_PAGE_TO_PHYS(opg); 2976 2977#ifdef MULTIPROCESSOR 2978 KASSERT(uvm_page_locked_p(opg)); 2979#endif 2980 pv = pmap_remove_pv(omd, opa, pm, va); 2981 pmap_vac_me_harder(omd, opa, pm, 0); 2982 oflags = pv->pv_flags; 2983 2984#ifdef PMAP_CACHE_VIVT 2985 if ((oflags & PVF_NC) == 0 && l2pte_valid(opte)) { 2986 if (PV_BEEN_EXECD(oflags)) 2987 pmap_idcache_wbinv_range(pm, va, 2988 PAGE_SIZE); 2989 else 2990 if (PV_BEEN_REFD(oflags)) 2991 pmap_dcache_wb_range(pm, va, PAGE_SIZE, 2992 true, (oflags & PVF_WRITE) == 0); 2993 } 2994#endif 2995 pool_put(&pmap_pv_pool, pv); 2996 } 2997 } 2998 2999 /* 3000 * Make sure userland mappings get the right permissions 3001 */ 3002 if (pm != pmap_kernel() && va != vector_page) 3003 npte |= L2_S_PROT_U; 3004 3005 /* 3006 * Keep the stats up to date 3007 */ 3008 if (opte == 0) { 3009 l2b->l2b_occupancy++; 3010 pm->pm_stats.resident_count++; 3011 } 3012 3013 NPDEBUG(PDB_ENTER, 3014 printf("pmap_enter: opte 0x%08x npte 0x%08x\n", opte, npte)); 3015 3016 /* 3017 * If this is just a wiring change, the two PTEs will be 3018 * identical, so there's no need to update the page table. 3019 */ 3020 if (npte != opte) { 3021 bool is_cached = pmap_is_cached(pm); 3022 3023 *ptep = npte; 3024 if (is_cached) { 3025 /* 3026 * We only need to frob the cache/tlb if this pmap 3027 * is current 3028 */ 3029 PTE_SYNC(ptep); 3030 if (va != vector_page && l2pte_valid(npte)) { 3031 /* 3032 * This mapping is likely to be accessed as 3033 * soon as we return to userland. Fix up the 3034 * L1 entry to avoid taking another 3035 * page/domain fault. 3036 */ 3037 pd_entry_t *pl1pd, l1pd; 3038 3039 pl1pd = &pm->pm_l1->l1_kva[L1_IDX(va)]; 3040 l1pd = l2b->l2b_phys | L1_C_DOM(pm->pm_domain) | 3041 L1_C_PROTO; 3042 if (*pl1pd != l1pd) { 3043 *pl1pd = l1pd; 3044 PTE_SYNC(pl1pd); 3045 } 3046 } 3047 } 3048 3049 if (PV_BEEN_EXECD(oflags)) 3050 pmap_tlb_flushID_SE(pm, va); 3051 else 3052 if (PV_BEEN_REFD(oflags)) 3053 pmap_tlb_flushD_SE(pm, va); 3054 3055 NPDEBUG(PDB_ENTER, 3056 printf("pmap_enter: is_cached %d cs 0x%08x\n", 3057 is_cached, pm->pm_cstate.cs_all)); 3058 3059 if (pg != NULL) { 3060 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 3061 3062#ifdef MULTIPROCESSOR 3063 KASSERT(uvm_page_locked_p(pg)); 3064#endif 3065 pmap_vac_me_harder(md, pa, pm, va); 3066 } 3067 } 3068#if defined(PMAP_CACHE_VIPT) && defined(DIAGNOSTIC) 3069 if (pg) { 3070 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 3071 3072#ifdef MULTIPROCESSOR 3073 KASSERT(uvm_page_locked_p(pg)); 3074#endif 3075 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 3076 PMAP_VALIDATE_MD_PAGE(md); 3077 } 3078#endif 3079 3080 pmap_release_pmap_lock(pm); 3081 3082 return (0); 3083} 3084 3085/* 3086 * pmap_remove() 3087 * 3088 * pmap_remove is responsible for nuking a number of mappings for a range 3089 * of virtual address space in the current pmap. To do this efficiently 3090 * is interesting, because in a number of cases a wide virtual address 3091 * range may be supplied that contains few actual mappings. So, the 3092 * optimisations are: 3093 * 1. Skip over hunks of address space for which no L1 or L2 entry exists. 3094 * 2. Build up a list of pages we've hit, up to a maximum, so we can 3095 * maybe do just a partial cache clean. This path of execution is 3096 * complicated by the fact that the cache must be flushed _before_ 3097 * the PTE is nuked, being a VAC :-) 3098 * 3. If we're called after UVM calls pmap_remove_all(), we can defer 3099 * all invalidations until pmap_update(), since pmap_remove_all() has 3100 * already flushed the cache. 3101 * 4. Maybe later fast-case a single page, but I don't think this is 3102 * going to make _that_ much difference overall. 3103 */ 3104 3105#define PMAP_REMOVE_CLEAN_LIST_SIZE 3 3106 3107void 3108pmap_remove(pmap_t pm, vaddr_t sva, vaddr_t eva) 3109{ 3110 struct l2_bucket *l2b; 3111 vaddr_t next_bucket; 3112 pt_entry_t *ptep; 3113 u_int cleanlist_idx, total, cnt; 3114 struct { 3115 vaddr_t va; 3116 pt_entry_t *ptep; 3117 } cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE]; 3118 u_int mappings, is_exec, is_refd; 3119 3120 NPDEBUG(PDB_REMOVE, printf("pmap_do_remove: pmap=%p sva=%08lx " 3121 "eva=%08lx\n", pm, sva, eva)); 3122 3123 /* 3124 * we lock in the pmap => pv_head direction 3125 */ 3126 pmap_acquire_pmap_lock(pm); 3127 3128 if (pm->pm_remove_all || !pmap_is_cached(pm)) { 3129 cleanlist_idx = PMAP_REMOVE_CLEAN_LIST_SIZE + 1; 3130 if (pm->pm_cstate.cs_tlb == 0) 3131 pm->pm_remove_all = true; 3132 } else 3133 cleanlist_idx = 0; 3134 3135 total = 0; 3136 3137 while (sva < eva) { 3138 /* 3139 * Do one L2 bucket's worth at a time. 3140 */ 3141 next_bucket = L2_NEXT_BUCKET(sva); 3142 if (next_bucket > eva) 3143 next_bucket = eva; 3144 3145 l2b = pmap_get_l2_bucket(pm, sva); 3146 if (l2b == NULL) { 3147 sva = next_bucket; 3148 continue; 3149 } 3150 3151 ptep = &l2b->l2b_kva[l2pte_index(sva)]; 3152 3153 for (mappings = 0; sva < next_bucket; sva += PAGE_SIZE, ptep++){ 3154 struct vm_page *pg; 3155 pt_entry_t pte; 3156 paddr_t pa; 3157 3158 pte = *ptep; 3159 3160 if (pte == 0) { 3161 /* Nothing here, move along */ 3162 continue; 3163 } 3164 3165 pa = l2pte_pa(pte); 3166 is_exec = 0; 3167 is_refd = 1; 3168 3169 /* 3170 * Update flags. In a number of circumstances, 3171 * we could cluster a lot of these and do a 3172 * number of sequential pages in one go. 3173 */ 3174 if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) { 3175 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 3176 struct pv_entry *pv; 3177 3178#ifdef MULTIPROCESSOR 3179 KASSERT(uvm_page_locked_p(pg)); 3180#endif 3181 pv = pmap_remove_pv(md, pa, pm, sva); 3182 pmap_vac_me_harder(md, pa, pm, 0); 3183 if (pv != NULL) { 3184 if (pm->pm_remove_all == false) { 3185 is_exec = 3186 PV_BEEN_EXECD(pv->pv_flags); 3187 is_refd = 3188 PV_BEEN_REFD(pv->pv_flags); 3189 } 3190 pool_put(&pmap_pv_pool, pv); 3191 } 3192 } 3193 mappings++; 3194 3195 if (!l2pte_valid(pte)) { 3196 /* 3197 * Ref/Mod emulation is still active for this 3198 * mapping, therefore it is has not yet been 3199 * accessed. No need to frob the cache/tlb. 3200 */ 3201 *ptep = 0; 3202 PTE_SYNC_CURRENT(pm, ptep); 3203 continue; 3204 } 3205 3206 if (cleanlist_idx < PMAP_REMOVE_CLEAN_LIST_SIZE) { 3207 /* Add to the clean list. */ 3208 cleanlist[cleanlist_idx].ptep = ptep; 3209 cleanlist[cleanlist_idx].va = 3210 sva | (is_exec & 1); 3211 cleanlist_idx++; 3212 } else 3213 if (cleanlist_idx == PMAP_REMOVE_CLEAN_LIST_SIZE) { 3214 /* Nuke everything if needed. */ 3215#ifdef PMAP_CACHE_VIVT 3216 pmap_idcache_wbinv_all(pm); 3217#endif 3218 pmap_tlb_flushID(pm); 3219 3220 /* 3221 * Roll back the previous PTE list, 3222 * and zero out the current PTE. 3223 */ 3224 for (cnt = 0; 3225 cnt < PMAP_REMOVE_CLEAN_LIST_SIZE; cnt++) { 3226 *cleanlist[cnt].ptep = 0; 3227 PTE_SYNC(cleanlist[cnt].ptep); 3228 } 3229 *ptep = 0; 3230 PTE_SYNC(ptep); 3231 cleanlist_idx++; 3232 pm->pm_remove_all = true; 3233 } else { 3234 *ptep = 0; 3235 PTE_SYNC(ptep); 3236 if (pm->pm_remove_all == false) { 3237 if (is_exec) 3238 pmap_tlb_flushID_SE(pm, sva); 3239 else 3240 if (is_refd) 3241 pmap_tlb_flushD_SE(pm, sva); 3242 } 3243 } 3244 } 3245 3246 /* 3247 * Deal with any left overs 3248 */ 3249 if (cleanlist_idx <= PMAP_REMOVE_CLEAN_LIST_SIZE) { 3250 total += cleanlist_idx; 3251 for (cnt = 0; cnt < cleanlist_idx; cnt++) { 3252 if (pm->pm_cstate.cs_all != 0) { 3253 vaddr_t clva = cleanlist[cnt].va & ~1; 3254 if (cleanlist[cnt].va & 1) { 3255#ifdef PMAP_CACHE_VIVT 3256 pmap_idcache_wbinv_range(pm, 3257 clva, PAGE_SIZE); 3258#endif 3259 pmap_tlb_flushID_SE(pm, clva); 3260 } else { 3261#ifdef PMAP_CACHE_VIVT 3262 pmap_dcache_wb_range(pm, 3263 clva, PAGE_SIZE, true, 3264 false); 3265#endif 3266 pmap_tlb_flushD_SE(pm, clva); 3267 } 3268 } 3269 *cleanlist[cnt].ptep = 0; 3270 PTE_SYNC_CURRENT(pm, cleanlist[cnt].ptep); 3271 } 3272 3273 /* 3274 * If it looks like we're removing a whole bunch 3275 * of mappings, it's faster to just write-back 3276 * the whole cache now and defer TLB flushes until 3277 * pmap_update() is called. 3278 */ 3279 if (total <= PMAP_REMOVE_CLEAN_LIST_SIZE) 3280 cleanlist_idx = 0; 3281 else { 3282 cleanlist_idx = PMAP_REMOVE_CLEAN_LIST_SIZE + 1; 3283#ifdef PMAP_CACHE_VIVT 3284 pmap_idcache_wbinv_all(pm); 3285#endif 3286 pm->pm_remove_all = true; 3287 } 3288 } 3289 3290 pmap_free_l2_bucket(pm, l2b, mappings); 3291 pm->pm_stats.resident_count -= mappings; 3292 } 3293 3294 pmap_release_pmap_lock(pm); 3295} 3296 3297#ifdef PMAP_CACHE_VIPT 3298static struct pv_entry * 3299pmap_kremove_pg(struct vm_page *pg, vaddr_t va) 3300{ 3301 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 3302 paddr_t pa = VM_PAGE_TO_PHYS(pg); 3303 struct pv_entry *pv; 3304 3305 KASSERT(arm_cache_prefer_mask == 0 || md->pvh_attrs & (PVF_COLORED|PVF_NC)); 3306 KASSERT((md->pvh_attrs & PVF_KMPAGE) == 0); 3307 3308 pv = pmap_remove_pv(md, pa, pmap_kernel(), va); 3309 KASSERT(pv); 3310 KASSERT(pv->pv_flags & PVF_KENTRY); 3311 3312 /* 3313 * If we are removing a writeable mapping to a cached exec page, 3314 * if it's the last mapping then clear it execness other sync 3315 * the page to the icache. 3316 */ 3317 if ((md->pvh_attrs & (PVF_NC|PVF_EXEC)) == PVF_EXEC 3318 && (pv->pv_flags & PVF_WRITE) != 0) { 3319 if (SLIST_EMPTY(&md->pvh_list)) { 3320 md->pvh_attrs &= ~PVF_EXEC; 3321 PMAPCOUNT(exec_discarded_kremove); 3322 } else { 3323 pmap_syncicache_page(md, pa); 3324 PMAPCOUNT(exec_synced_kremove); 3325 } 3326 } 3327 pmap_vac_me_harder(md, pa, pmap_kernel(), 0); 3328 3329 return pv; 3330} 3331#endif /* PMAP_CACHE_VIPT */ 3332 3333/* 3334 * pmap_kenter_pa: enter an unmanaged, wired kernel mapping 3335 * 3336 * We assume there is already sufficient KVM space available 3337 * to do this, as we can't allocate L2 descriptor tables/metadata 3338 * from here. 3339 */ 3340void 3341pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) 3342{ 3343 struct l2_bucket *l2b; 3344 pt_entry_t *ptep, opte; 3345#ifdef PMAP_CACHE_VIVT 3346 struct vm_page *pg = (flags & PMAP_KMPAGE) ? PHYS_TO_VM_PAGE(pa) : NULL; 3347#endif 3348#ifdef PMAP_CACHE_VIPT 3349 struct vm_page *pg = PHYS_TO_VM_PAGE(pa); 3350 struct vm_page *opg; 3351 struct pv_entry *pv = NULL; 3352#endif 3353 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 3354 3355 NPDEBUG(PDB_KENTER, 3356 printf("pmap_kenter_pa: va 0x%08lx, pa 0x%08lx, prot 0x%x\n", 3357 va, pa, prot)); 3358 3359 l2b = pmap_get_l2_bucket(pmap_kernel(), va); 3360 KDASSERT(l2b != NULL); 3361 3362 ptep = &l2b->l2b_kva[l2pte_index(va)]; 3363 opte = *ptep; 3364 3365 if (opte == 0) { 3366 PMAPCOUNT(kenter_mappings); 3367 l2b->l2b_occupancy++; 3368 } else { 3369 PMAPCOUNT(kenter_remappings); 3370#ifdef PMAP_CACHE_VIPT 3371 opg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); 3372#ifdef DIAGNOSTIC 3373 struct vm_page_md *omd = VM_PAGE_TO_MD(opg); 3374#endif 3375 if (opg) { 3376 KASSERT(opg != pg); 3377 KASSERT((omd->pvh_attrs & PVF_KMPAGE) == 0); 3378 KASSERT((flags & PMAP_KMPAGE) == 0); 3379 pv = pmap_kremove_pg(opg, va); 3380 } 3381#endif 3382 if (l2pte_valid(opte)) { 3383#ifdef PMAP_CACHE_VIVT 3384 cpu_dcache_wbinv_range(va, PAGE_SIZE); 3385#endif 3386 cpu_tlb_flushD_SE(va); 3387 cpu_cpwait(); 3388 } 3389 } 3390 3391 *ptep = L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) 3392 | ((flags & PMAP_NOCACHE) ? 0 : pte_l2_s_cache_mode); 3393 PTE_SYNC(ptep); 3394 3395 if (pg) { 3396#ifdef MULTIPROCESSOR 3397 KASSERT(uvm_page_locked_p(pg)); 3398#endif 3399 if (flags & PMAP_KMPAGE) { 3400 KASSERT(md->urw_mappings == 0); 3401 KASSERT(md->uro_mappings == 0); 3402 KASSERT(md->krw_mappings == 0); 3403 KASSERT(md->kro_mappings == 0); 3404#ifdef PMAP_CACHE_VIPT 3405 KASSERT(pv == NULL); 3406 KASSERT(arm_cache_prefer_mask == 0 || (va & PVF_COLORED) == 0); 3407 KASSERT((md->pvh_attrs & PVF_NC) == 0); 3408 /* if there is a color conflict, evict from cache. */ 3409 if (pmap_is_page_colored_p(md) 3410 && ((va ^ md->pvh_attrs) & arm_cache_prefer_mask)) { 3411 PMAPCOUNT(vac_color_change); 3412 pmap_flush_page(md, pa, PMAP_FLUSH_PRIMARY); 3413 } else if (md->pvh_attrs & PVF_MULTCLR) { 3414 /* 3415 * If this page has multiple colors, expunge 3416 * them. 3417 */ 3418 PMAPCOUNT(vac_flush_lots2); 3419 pmap_flush_page(md, pa, PMAP_FLUSH_SECONDARY); 3420 } 3421 md->pvh_attrs &= PAGE_SIZE - 1; 3422 md->pvh_attrs |= PVF_KMPAGE 3423 | PVF_COLORED | PVF_DIRTY 3424 | (va & arm_cache_prefer_mask); 3425#endif 3426#ifdef PMAP_CACHE_VIVT 3427 md->pvh_attrs |= PVF_KMPAGE; 3428#endif 3429 pmap_kmpages++; 3430#ifdef PMAP_CACHE_VIPT 3431 } else { 3432 if (pv == NULL) { 3433 pv = pool_get(&pmap_pv_pool, PR_NOWAIT); 3434 KASSERT(pv != NULL); 3435 } 3436 pmap_enter_pv(md, pa, pv, pmap_kernel(), va, 3437 PVF_WIRED | PVF_KENTRY 3438 | (prot & VM_PROT_WRITE ? PVF_WRITE : 0)); 3439 if ((prot & VM_PROT_WRITE) 3440 && !(md->pvh_attrs & PVF_NC)) 3441 md->pvh_attrs |= PVF_DIRTY; 3442 KASSERT((prot & VM_PROT_WRITE) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 3443 pmap_vac_me_harder(md, pa, pmap_kernel(), va); 3444#endif 3445 } 3446#ifdef PMAP_CACHE_VIPT 3447 } else { 3448 if (pv != NULL) 3449 pool_put(&pmap_pv_pool, pv); 3450#endif 3451 } 3452} 3453 3454void 3455pmap_kremove(vaddr_t va, vsize_t len) 3456{ 3457 struct l2_bucket *l2b; 3458 pt_entry_t *ptep, *sptep, opte; 3459 vaddr_t next_bucket, eva; 3460 u_int mappings; 3461 struct vm_page *opg; 3462 3463 PMAPCOUNT(kenter_unmappings); 3464 3465 NPDEBUG(PDB_KREMOVE, printf("pmap_kremove: va 0x%08lx, len 0x%08lx\n", 3466 va, len)); 3467 3468 eva = va + len; 3469 3470 while (va < eva) { 3471 next_bucket = L2_NEXT_BUCKET(va); 3472 if (next_bucket > eva) 3473 next_bucket = eva; 3474 3475 l2b = pmap_get_l2_bucket(pmap_kernel(), va); 3476 KDASSERT(l2b != NULL); 3477 3478 sptep = ptep = &l2b->l2b_kva[l2pte_index(va)]; 3479 mappings = 0; 3480 3481 while (va < next_bucket) { 3482 opte = *ptep; 3483 opg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); 3484 if (opg) { 3485 struct vm_page_md *omd = VM_PAGE_TO_MD(opg); 3486 3487 if (omd->pvh_attrs & PVF_KMPAGE) { 3488 KASSERT(omd->urw_mappings == 0); 3489 KASSERT(omd->uro_mappings == 0); 3490 KASSERT(omd->krw_mappings == 0); 3491 KASSERT(omd->kro_mappings == 0); 3492 omd->pvh_attrs &= ~PVF_KMPAGE; 3493#ifdef PMAP_CACHE_VIPT 3494 if (arm_cache_prefer_mask != 0) { 3495 omd->pvh_attrs &= ~PVF_WRITE; 3496 } 3497#endif 3498 pmap_kmpages--; 3499#ifdef PMAP_CACHE_VIPT 3500 } else { 3501 pool_put(&pmap_pv_pool, 3502 pmap_kremove_pg(opg, va)); 3503#endif 3504 } 3505 } 3506 if (l2pte_valid(opte)) { 3507#ifdef PMAP_CACHE_VIVT 3508 cpu_dcache_wbinv_range(va, PAGE_SIZE); 3509#endif 3510 cpu_tlb_flushD_SE(va); 3511 } 3512 if (opte) { 3513 *ptep = 0; 3514 mappings++; 3515 } 3516 va += PAGE_SIZE; 3517 ptep++; 3518 } 3519 KDASSERT(mappings <= l2b->l2b_occupancy); 3520 l2b->l2b_occupancy -= mappings; 3521 PTE_SYNC_RANGE(sptep, (u_int)(ptep - sptep)); 3522 } 3523 cpu_cpwait(); 3524} 3525 3526bool 3527pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap) 3528{ 3529 struct l2_dtable *l2; 3530 pd_entry_t *pl1pd, l1pd; 3531 pt_entry_t *ptep, pte; 3532 paddr_t pa; 3533 u_int l1idx; 3534 3535 pmap_acquire_pmap_lock(pm); 3536 3537 l1idx = L1_IDX(va); 3538 pl1pd = &pm->pm_l1->l1_kva[l1idx]; 3539 l1pd = *pl1pd; 3540 3541 if (l1pte_section_p(l1pd)) { 3542 /* 3543 * These should only happen for pmap_kernel() 3544 */ 3545 KDASSERT(pm == pmap_kernel()); 3546 pmap_release_pmap_lock(pm); 3547 pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET); 3548 } else { 3549 /* 3550 * Note that we can't rely on the validity of the L1 3551 * descriptor as an indication that a mapping exists. 3552 * We have to look it up in the L2 dtable. 3553 */ 3554 l2 = pm->pm_l2[L2_IDX(l1idx)]; 3555 3556 if (l2 == NULL || 3557 (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) { 3558 pmap_release_pmap_lock(pm); 3559 return false; 3560 } 3561 3562 ptep = &ptep[l2pte_index(va)]; 3563 pte = *ptep; 3564 pmap_release_pmap_lock(pm); 3565 3566 if (pte == 0) 3567 return false; 3568 3569 switch (pte & L2_TYPE_MASK) { 3570 case L2_TYPE_L: 3571 pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET); 3572 break; 3573 3574 default: 3575 pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET); 3576 break; 3577 } 3578 } 3579 3580 if (pap != NULL) 3581 *pap = pa; 3582 3583 return true; 3584} 3585 3586void 3587pmap_protect(pmap_t pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot) 3588{ 3589 struct l2_bucket *l2b; 3590 pt_entry_t *ptep, pte; 3591 vaddr_t next_bucket; 3592 u_int flags; 3593 u_int clr_mask; 3594 int flush; 3595 3596 NPDEBUG(PDB_PROTECT, 3597 printf("pmap_protect: pm %p sva 0x%lx eva 0x%lx prot 0x%x\n", 3598 pm, sva, eva, prot)); 3599 3600 if ((prot & VM_PROT_READ) == 0) { 3601 pmap_remove(pm, sva, eva); 3602 return; 3603 } 3604 3605 if (prot & VM_PROT_WRITE) { 3606 /* 3607 * If this is a read->write transition, just ignore it and let 3608 * uvm_fault() take care of it later. 3609 */ 3610 return; 3611 } 3612 3613 pmap_acquire_pmap_lock(pm); 3614 3615 flush = ((eva - sva) >= (PAGE_SIZE * 4)) ? 0 : -1; 3616 flags = 0; 3617 clr_mask = PVF_WRITE | ((prot & VM_PROT_EXECUTE) ? 0 : PVF_EXEC); 3618 3619 while (sva < eva) { 3620 next_bucket = L2_NEXT_BUCKET(sva); 3621 if (next_bucket > eva) 3622 next_bucket = eva; 3623 3624 l2b = pmap_get_l2_bucket(pm, sva); 3625 if (l2b == NULL) { 3626 sva = next_bucket; 3627 continue; 3628 } 3629 3630 ptep = &l2b->l2b_kva[l2pte_index(sva)]; 3631 3632 while (sva < next_bucket) { 3633 pte = *ptep; 3634 if (l2pte_valid(pte) != 0 && l2pte_writable_p(pte)) { 3635 struct vm_page *pg; 3636 u_int f; 3637 3638#ifdef PMAP_CACHE_VIVT 3639 /* 3640 * OK, at this point, we know we're doing 3641 * write-protect operation. If the pmap is 3642 * active, write-back the page. 3643 */ 3644 pmap_dcache_wb_range(pm, sva, PAGE_SIZE, 3645 false, false); 3646#endif 3647 3648 pg = PHYS_TO_VM_PAGE(l2pte_pa(pte)); 3649 pte = l2pte_set_readonly(pte); 3650 *ptep = pte; 3651 PTE_SYNC(ptep); 3652 3653 if (pg != NULL) { 3654 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 3655 paddr_t pa = VM_PAGE_TO_PHYS(pg); 3656 3657#ifdef MULTIPROCESSOR 3658 KASSERT(uvm_page_locked_p(pg)); 3659#endif 3660 f = pmap_modify_pv(md, pa, pm, sva, 3661 clr_mask, 0); 3662 pmap_vac_me_harder(md, pa, pm, sva); 3663 } else { 3664 f = PVF_REF | PVF_EXEC; 3665 } 3666 3667 if (flush >= 0) { 3668 flush++; 3669 flags |= f; 3670 } else 3671 if (PV_BEEN_EXECD(f)) 3672 pmap_tlb_flushID_SE(pm, sva); 3673 else 3674 if (PV_BEEN_REFD(f)) 3675 pmap_tlb_flushD_SE(pm, sva); 3676 } 3677 3678 sva += PAGE_SIZE; 3679 ptep++; 3680 } 3681 } 3682 3683 pmap_release_pmap_lock(pm); 3684 3685 if (flush) { 3686 if (PV_BEEN_EXECD(flags)) 3687 pmap_tlb_flushID(pm); 3688 else 3689 if (PV_BEEN_REFD(flags)) 3690 pmap_tlb_flushD(pm); 3691 } 3692} 3693 3694void 3695pmap_icache_sync_range(pmap_t pm, vaddr_t sva, vaddr_t eva) 3696{ 3697 struct l2_bucket *l2b; 3698 pt_entry_t *ptep; 3699 vaddr_t next_bucket; 3700 vsize_t page_size = trunc_page(sva) + PAGE_SIZE - sva; 3701 3702 NPDEBUG(PDB_EXEC, 3703 printf("pmap_icache_sync_range: pm %p sva 0x%lx eva 0x%lx\n", 3704 pm, sva, eva)); 3705 3706 pmap_acquire_pmap_lock(pm); 3707 3708 while (sva < eva) { 3709 next_bucket = L2_NEXT_BUCKET(sva); 3710 if (next_bucket > eva) 3711 next_bucket = eva; 3712 3713 l2b = pmap_get_l2_bucket(pm, sva); 3714 if (l2b == NULL) { 3715 sva = next_bucket; 3716 continue; 3717 } 3718 3719 for (ptep = &l2b->l2b_kva[l2pte_index(sva)]; 3720 sva < next_bucket; 3721 sva += page_size, ptep++, page_size = PAGE_SIZE) { 3722 if (l2pte_valid(*ptep)) { 3723 cpu_icache_sync_range(sva, 3724 min(page_size, eva - sva)); 3725 } 3726 } 3727 } 3728 3729 pmap_release_pmap_lock(pm); 3730} 3731 3732void 3733pmap_page_protect(struct vm_page *pg, vm_prot_t prot) 3734{ 3735 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 3736 paddr_t pa = VM_PAGE_TO_PHYS(pg); 3737 3738 NPDEBUG(PDB_PROTECT, 3739 printf("pmap_page_protect: md %p (0x%08lx), prot 0x%x\n", 3740 md, pa, prot)); 3741 3742#ifdef MULTIPROCESSOR 3743 KASSERT(uvm_page_locked_p(pg)); 3744#endif 3745 3746 switch(prot) { 3747 case VM_PROT_READ|VM_PROT_WRITE: 3748#if defined(PMAP_CHECK_VIPT) && defined(PMAP_APX) 3749 pmap_clearbit(md, pa, PVF_EXEC); 3750 break; 3751#endif 3752 case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE: 3753 break; 3754 3755 case VM_PROT_READ: 3756#if defined(PMAP_CHECK_VIPT) && defined(PMAP_APX) 3757 pmap_clearbit(md, pa, PVF_WRITE|PVF_EXEC); 3758 break; 3759#endif 3760 case VM_PROT_READ|VM_PROT_EXECUTE: 3761 pmap_clearbit(md, pa, PVF_WRITE); 3762 break; 3763 3764 default: 3765 pmap_page_remove(md, pa); 3766 break; 3767 } 3768} 3769 3770/* 3771 * pmap_clear_modify: 3772 * 3773 * Clear the "modified" attribute for a page. 3774 */ 3775bool 3776pmap_clear_modify(struct vm_page *pg) 3777{ 3778 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 3779 paddr_t pa = VM_PAGE_TO_PHYS(pg); 3780 bool rv; 3781 3782#ifdef MULTIPROCESSOR 3783 KASSERT(uvm_page_locked_p(pg)); 3784#endif 3785 3786 if (md->pvh_attrs & PVF_MOD) { 3787 rv = true; 3788#ifdef PMAP_CACHE_VIPT 3789 /* 3790 * If we are going to clear the modified bit and there are 3791 * no other modified bits set, flush the page to memory and 3792 * mark it clean. 3793 */ 3794 if ((md->pvh_attrs & (PVF_DMOD|PVF_NC)) == PVF_MOD) 3795 pmap_flush_page(md, pa, PMAP_CLEAN_PRIMARY); 3796#endif 3797 pmap_clearbit(md, pa, PVF_MOD); 3798 } else 3799 rv = false; 3800 3801 return (rv); 3802} 3803 3804/* 3805 * pmap_clear_reference: 3806 * 3807 * Clear the "referenced" attribute for a page. 3808 */ 3809bool 3810pmap_clear_reference(struct vm_page *pg) 3811{ 3812 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 3813 paddr_t pa = VM_PAGE_TO_PHYS(pg); 3814 bool rv; 3815 3816#ifdef MULTIPROCESSOR 3817 KASSERT(uvm_page_locked_p(pg)); 3818#endif 3819 3820 if (md->pvh_attrs & PVF_REF) { 3821 rv = true; 3822 pmap_clearbit(md, pa, PVF_REF); 3823 } else 3824 rv = false; 3825 3826 return (rv); 3827} 3828 3829/* 3830 * pmap_is_modified: 3831 * 3832 * Test if a page has the "modified" attribute. 3833 */ 3834/* See <arm/arm32/pmap.h> */ 3835 3836/* 3837 * pmap_is_referenced: 3838 * 3839 * Test if a page has the "referenced" attribute. 3840 */ 3841/* See <arm/arm32/pmap.h> */ 3842 3843int 3844pmap_fault_fixup(pmap_t pm, vaddr_t va, vm_prot_t ftype, int user) 3845{ 3846 struct l2_dtable *l2; 3847 struct l2_bucket *l2b; 3848 pd_entry_t *pl1pd, l1pd; 3849 pt_entry_t *ptep, pte; 3850 paddr_t pa; 3851 u_int l1idx; 3852 int rv = 0; 3853 3854 pmap_acquire_pmap_lock(pm); 3855 3856 l1idx = L1_IDX(va); 3857 3858 /* 3859 * If there is no l2_dtable for this address, then the process 3860 * has no business accessing it. 3861 * 3862 * Note: This will catch userland processes trying to access 3863 * kernel addresses. 3864 */ 3865 l2 = pm->pm_l2[L2_IDX(l1idx)]; 3866 if (l2 == NULL) 3867 goto out; 3868 3869 /* 3870 * Likewise if there is no L2 descriptor table 3871 */ 3872 l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; 3873 if (l2b->l2b_kva == NULL) 3874 goto out; 3875 3876 /* 3877 * Check the PTE itself. 3878 */ 3879 ptep = &l2b->l2b_kva[l2pte_index(va)]; 3880 pte = *ptep; 3881 if (pte == 0) 3882 goto out; 3883 3884 /* 3885 * Catch a userland access to the vector page mapped at 0x0 3886 */ 3887 if (user && (pte & L2_S_PROT_U) == 0) 3888 goto out; 3889 3890 pa = l2pte_pa(pte); 3891 3892 if ((ftype & VM_PROT_WRITE) && !l2pte_writable_p(pte)) { 3893 /* 3894 * This looks like a good candidate for "page modified" 3895 * emulation... 3896 */ 3897 struct pv_entry *pv; 3898 struct vm_page *pg; 3899 3900 /* Extract the physical address of the page */ 3901 if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) 3902 goto out; 3903 3904 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 3905 3906 /* Get the current flags for this page. */ 3907#ifdef MULTIPROCESSOR 3908 KASSERT(uvm_page_locked_p(pg)); 3909#endif 3910 3911 pv = pmap_find_pv(md, pm, va); 3912 if (pv == NULL) { 3913 goto out; 3914 } 3915 3916 /* 3917 * Do the flags say this page is writable? If not then it 3918 * is a genuine write fault. If yes then the write fault is 3919 * our fault as we did not reflect the write access in the 3920 * PTE. Now we know a write has occurred we can correct this 3921 * and also set the modified bit 3922 */ 3923 if ((pv->pv_flags & PVF_WRITE) == 0) { 3924 goto out; 3925 } 3926 3927 NPDEBUG(PDB_FOLLOW, 3928 printf("pmap_fault_fixup: mod emul. pm %p, va 0x%08lx, pa 0x%08lx\n", 3929 pm, va, pa)); 3930 3931 md->pvh_attrs |= PVF_REF | PVF_MOD; 3932 pv->pv_flags |= PVF_REF | PVF_MOD; 3933#ifdef PMAP_CACHE_VIPT 3934 /* 3935 * If there are cacheable mappings for this page, mark it dirty. 3936 */ 3937 if ((md->pvh_attrs & PVF_NC) == 0) 3938 md->pvh_attrs |= PVF_DIRTY; 3939#endif 3940 3941 /* 3942 * Re-enable write permissions for the page. No need to call 3943 * pmap_vac_me_harder(), since this is just a 3944 * modified-emulation fault, and the PVF_WRITE bit isn't 3945 * changing. We've already set the cacheable bits based on 3946 * the assumption that we can write to this page. 3947 */ 3948 *ptep = l2pte_set_writable((pte & ~L2_TYPE_MASK) | L2_S_PROTO); 3949 PTE_SYNC(ptep); 3950 rv = 1; 3951 } else 3952 if ((pte & L2_TYPE_MASK) == L2_TYPE_INV) { 3953 /* 3954 * This looks like a good candidate for "page referenced" 3955 * emulation. 3956 */ 3957 struct pv_entry *pv; 3958 struct vm_page *pg; 3959 3960 /* Extract the physical address of the page */ 3961 if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) 3962 goto out; 3963 3964 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 3965 3966 /* Get the current flags for this page. */ 3967#ifdef MULTIPROCESSOR 3968 KASSERT(uvm_page_locked_p(pg)); 3969#endif 3970 3971 pv = pmap_find_pv(md, pm, va); 3972 if (pv == NULL) { 3973 goto out; 3974 } 3975 3976 md->pvh_attrs |= PVF_REF; 3977 pv->pv_flags |= PVF_REF; 3978 3979 NPDEBUG(PDB_FOLLOW, 3980 printf("pmap_fault_fixup: ref emul. pm %p, va 0x%08lx, pa 0x%08lx\n", 3981 pm, va, pa)); 3982 3983 *ptep = l2pte_set_readonly((pte & ~L2_TYPE_MASK) | L2_S_PROTO); 3984 PTE_SYNC(ptep); 3985 rv = 1; 3986 } 3987 3988 /* 3989 * We know there is a valid mapping here, so simply 3990 * fix up the L1 if necessary. 3991 */ 3992 pl1pd = &pm->pm_l1->l1_kva[l1idx]; 3993 l1pd = l2b->l2b_phys | L1_C_DOM(pm->pm_domain) | L1_C_PROTO; 3994 if (*pl1pd != l1pd) { 3995 *pl1pd = l1pd; 3996 PTE_SYNC(pl1pd); 3997 rv = 1; 3998 } 3999 4000#ifdef CPU_SA110 4001 /* 4002 * There are bugs in the rev K SA110. This is a check for one 4003 * of them. 4004 */ 4005 if (rv == 0 && curcpu()->ci_arm_cputype == CPU_ID_SA110 && 4006 curcpu()->ci_arm_cpurev < 3) { 4007 /* Always current pmap */ 4008 if (l2pte_valid(pte)) { 4009 extern int kernel_debug; 4010 if (kernel_debug & 1) { 4011 struct proc *p = curlwp->l_proc; 4012 printf("prefetch_abort: page is already " 4013 "mapped - pte=%p *pte=%08x\n", ptep, pte); 4014 printf("prefetch_abort: pc=%08lx proc=%p " 4015 "process=%s\n", va, p, p->p_comm); 4016 printf("prefetch_abort: far=%08x fs=%x\n", 4017 cpu_faultaddress(), cpu_faultstatus()); 4018 } 4019#ifdef DDB 4020 if (kernel_debug & 2) 4021 Debugger(); 4022#endif 4023 rv = 1; 4024 } 4025 } 4026#endif /* CPU_SA110 */ 4027 4028#ifdef DEBUG 4029 /* 4030 * If 'rv == 0' at this point, it generally indicates that there is a 4031 * stale TLB entry for the faulting address. This happens when two or 4032 * more processes are sharing an L1. Since we don't flush the TLB on 4033 * a context switch between such processes, we can take domain faults 4034 * for mappings which exist at the same VA in both processes. EVEN IF 4035 * WE'VE RECENTLY FIXED UP THE CORRESPONDING L1 in pmap_enter(), for 4036 * example. 4037 * 4038 * This is extremely likely to happen if pmap_enter() updated the L1 4039 * entry for a recently entered mapping. In this case, the TLB is 4040 * flushed for the new mapping, but there may still be TLB entries for 4041 * other mappings belonging to other processes in the 1MB range 4042 * covered by the L1 entry. 4043 * 4044 * Since 'rv == 0', we know that the L1 already contains the correct 4045 * value, so the fault must be due to a stale TLB entry. 4046 * 4047 * Since we always need to flush the TLB anyway in the case where we 4048 * fixed up the L1, or frobbed the L2 PTE, we effectively deal with 4049 * stale TLB entries dynamically. 4050 * 4051 * However, the above condition can ONLY happen if the current L1 is 4052 * being shared. If it happens when the L1 is unshared, it indicates 4053 * that other parts of the pmap are not doing their job WRT managing 4054 * the TLB. 4055 */ 4056 if (rv == 0 && pm->pm_l1->l1_domain_use_count == 1) { 4057 extern int last_fault_code; 4058 extern int kernel_debug; 4059 printf("fixup: pm %p, va 0x%lx, ftype %d - nothing to do!\n", 4060 pm, va, ftype); 4061 printf("fixup: l2 %p, l2b %p, ptep %p, pl1pd %p\n", 4062 l2, l2b, ptep, pl1pd); 4063 printf("fixup: pte 0x%x, l1pd 0x%x, last code 0x%x\n", 4064 pte, l1pd, last_fault_code); 4065#ifdef DDB 4066 if (kernel_debug & 2) 4067 Debugger(); 4068#endif 4069 } 4070#endif 4071 4072 cpu_tlb_flushID_SE(va); 4073 cpu_cpwait(); 4074 4075 rv = 1; 4076 4077out: 4078 pmap_release_pmap_lock(pm); 4079 4080 return (rv); 4081} 4082 4083/* 4084 * Routine: pmap_procwr 4085 * 4086 * Function: 4087 * Synchronize caches corresponding to [addr, addr+len) in p. 4088 * 4089 */ 4090void 4091pmap_procwr(struct proc *p, vaddr_t va, int len) 4092{ 4093 /* We only need to do anything if it is the current process. */ 4094 if (p == curproc) 4095 cpu_icache_sync_range(va, len); 4096} 4097 4098/* 4099 * Routine: pmap_unwire 4100 * Function: Clear the wired attribute for a map/virtual-address pair. 4101 * 4102 * In/out conditions: 4103 * The mapping must already exist in the pmap. 4104 */ 4105void 4106pmap_unwire(pmap_t pm, vaddr_t va) 4107{ 4108 struct l2_bucket *l2b; 4109 pt_entry_t *ptep, pte; 4110 struct vm_page *pg; 4111 paddr_t pa; 4112 4113 NPDEBUG(PDB_WIRING, printf("pmap_unwire: pm %p, va 0x%08lx\n", pm, va)); 4114 4115 pmap_acquire_pmap_lock(pm); 4116 4117 l2b = pmap_get_l2_bucket(pm, va); 4118 KDASSERT(l2b != NULL); 4119 4120 ptep = &l2b->l2b_kva[l2pte_index(va)]; 4121 pte = *ptep; 4122 4123 /* Extract the physical address of the page */ 4124 pa = l2pte_pa(pte); 4125 4126 if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) { 4127 /* Update the wired bit in the pv entry for this page. */ 4128 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 4129 4130#ifdef MULTIPROCESSOR 4131 KASSERT(uvm_page_locked_p(pg)); 4132#endif 4133 (void) pmap_modify_pv(md, pa, pm, va, PVF_WIRED, 0); 4134 } 4135 4136 pmap_release_pmap_lock(pm); 4137} 4138 4139void 4140pmap_activate(struct lwp *l) 4141{ 4142 extern int block_userspace_access; 4143 pmap_t opm, npm, rpm; 4144 uint32_t odacr, ndacr; 4145 int oldirqstate; 4146 4147 /* 4148 * If activating a non-current lwp or the current lwp is 4149 * already active, just return. 4150 */ 4151 if (l != curlwp || 4152 l->l_proc->p_vmspace->vm_map.pmap->pm_activated == true) 4153 return; 4154 4155 npm = l->l_proc->p_vmspace->vm_map.pmap; 4156 ndacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | 4157 (DOMAIN_CLIENT << (npm->pm_domain * 2)); 4158 4159 /* 4160 * If TTB and DACR are unchanged, short-circuit all the 4161 * TLB/cache management stuff. 4162 */ 4163 if (pmap_previous_active_lwp != NULL) { 4164 opm = pmap_previous_active_lwp->l_proc->p_vmspace->vm_map.pmap; 4165 odacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | 4166 (DOMAIN_CLIENT << (opm->pm_domain * 2)); 4167 4168 if (opm->pm_l1 == npm->pm_l1 && odacr == ndacr) 4169 goto all_done; 4170 } else 4171 opm = NULL; 4172 4173 PMAPCOUNT(activations); 4174 block_userspace_access = 1; 4175 4176 /* 4177 * If switching to a user vmspace which is different to the 4178 * most recent one, and the most recent one is potentially 4179 * live in the cache, we must write-back and invalidate the 4180 * entire cache. 4181 */ 4182 rpm = pmap_recent_user; 4183 4184/* 4185 * XXXSCW: There's a corner case here which can leave turds in the cache as 4186 * reported in kern/41058. They're probably left over during tear-down and 4187 * switching away from an exiting process. Until the root cause is identified 4188 * and fixed, zap the cache when switching pmaps. This will result in a few 4189 * unnecessary cache flushes, but that's better than silently corrupting data. 4190 */ 4191#if 0 4192 if (npm != pmap_kernel() && rpm && npm != rpm && 4193 rpm->pm_cstate.cs_cache) { 4194 rpm->pm_cstate.cs_cache = 0; 4195#ifdef PMAP_CACHE_VIVT 4196 cpu_idcache_wbinv_all(); 4197#endif 4198 } 4199#else 4200 if (rpm) { 4201 rpm->pm_cstate.cs_cache = 0; 4202 if (npm == pmap_kernel()) 4203 pmap_recent_user = NULL; 4204#ifdef PMAP_CACHE_VIVT 4205 cpu_idcache_wbinv_all(); 4206#endif 4207 } 4208#endif 4209 4210 /* No interrupts while we frob the TTB/DACR */ 4211 oldirqstate = disable_interrupts(IF32_bits); 4212 4213 /* 4214 * For ARM_VECTORS_LOW, we MUST, I repeat, MUST fix up the L1 4215 * entry corresponding to 'vector_page' in the incoming L1 table 4216 * before switching to it otherwise subsequent interrupts/exceptions 4217 * (including domain faults!) will jump into hyperspace. 4218 */ 4219 if (npm->pm_pl1vec != NULL) { 4220 cpu_tlb_flushID_SE((u_int)vector_page); 4221 cpu_cpwait(); 4222 *npm->pm_pl1vec = npm->pm_l1vec; 4223 PTE_SYNC(npm->pm_pl1vec); 4224 } 4225 4226 cpu_domains(ndacr); 4227 4228 if (npm == pmap_kernel() || npm == rpm) { 4229 /* 4230 * Switching to a kernel thread, or back to the 4231 * same user vmspace as before... Simply update 4232 * the TTB (no TLB flush required) 4233 */ 4234 __asm volatile("mcr p15, 0, %0, c2, c0, 0" :: 4235 "r"(npm->pm_l1->l1_physaddr)); 4236 cpu_cpwait(); 4237 } else { 4238 /* 4239 * Otherwise, update TTB and flush TLB 4240 */ 4241 cpu_context_switch(npm->pm_l1->l1_physaddr); 4242 if (rpm != NULL) 4243 rpm->pm_cstate.cs_tlb = 0; 4244 } 4245 4246 restore_interrupts(oldirqstate); 4247 4248 block_userspace_access = 0; 4249 4250 all_done: 4251 /* 4252 * The new pmap is resident. Make sure it's marked 4253 * as resident in the cache/TLB. 4254 */ 4255 npm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL; 4256 if (npm != pmap_kernel()) 4257 pmap_recent_user = npm; 4258 4259 /* The old pmap is not longer active */ 4260 if (opm != NULL) 4261 opm->pm_activated = false; 4262 4263 /* But the new one is */ 4264 npm->pm_activated = true; 4265} 4266 4267void 4268pmap_deactivate(struct lwp *l) 4269{ 4270 4271 /* 4272 * If the process is exiting, make sure pmap_activate() does 4273 * a full MMU context-switch and cache flush, which we might 4274 * otherwise skip. See PR port-arm/38950. 4275 */ 4276 if (l->l_proc->p_sflag & PS_WEXIT) 4277 pmap_previous_active_lwp = NULL; 4278 4279 l->l_proc->p_vmspace->vm_map.pmap->pm_activated = false; 4280} 4281 4282void 4283pmap_update(pmap_t pm) 4284{ 4285 4286 if (pm->pm_remove_all) { 4287 /* 4288 * Finish up the pmap_remove_all() optimisation by flushing 4289 * the TLB. 4290 */ 4291 pmap_tlb_flushID(pm); 4292 pm->pm_remove_all = false; 4293 } 4294 4295 if (pmap_is_current(pm)) { 4296 /* 4297 * If we're dealing with a current userland pmap, move its L1 4298 * to the end of the LRU. 4299 */ 4300 if (pm != pmap_kernel()) 4301 pmap_use_l1(pm); 4302 4303 /* 4304 * We can assume we're done with frobbing the cache/tlb for 4305 * now. Make sure any future pmap ops don't skip cache/tlb 4306 * flushes. 4307 */ 4308 pm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL; 4309 } 4310 4311 PMAPCOUNT(updates); 4312 4313 /* 4314 * make sure TLB/cache operations have completed. 4315 */ 4316 cpu_cpwait(); 4317} 4318 4319void 4320pmap_remove_all(pmap_t pm) 4321{ 4322 4323 /* 4324 * The vmspace described by this pmap is about to be torn down. 4325 * Until pmap_update() is called, UVM will only make calls 4326 * to pmap_remove(). We can make life much simpler by flushing 4327 * the cache now, and deferring TLB invalidation to pmap_update(). 4328 */ 4329#ifdef PMAP_CACHE_VIVT 4330 pmap_idcache_wbinv_all(pm); 4331#endif 4332 pm->pm_remove_all = true; 4333} 4334 4335/* 4336 * Retire the given physical map from service. 4337 * Should only be called if the map contains no valid mappings. 4338 */ 4339void 4340pmap_destroy(pmap_t pm) 4341{ 4342 u_int count; 4343 4344 if (pm == NULL) 4345 return; 4346 4347 if (pm->pm_remove_all) { 4348 pmap_tlb_flushID(pm); 4349 pm->pm_remove_all = false; 4350 } 4351 4352 /* 4353 * Drop reference count 4354 */ 4355 mutex_enter(pm->pm_lock); 4356 count = --pm->pm_obj.uo_refs; 4357 mutex_exit(pm->pm_lock); 4358 if (count > 0) { 4359 if (pmap_is_current(pm)) { 4360 if (pm != pmap_kernel()) 4361 pmap_use_l1(pm); 4362 pm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL; 4363 } 4364 return; 4365 } 4366 4367 /* 4368 * reference count is zero, free pmap resources and then free pmap. 4369 */ 4370 4371 if (vector_page < KERNEL_BASE) { 4372 KDASSERT(!pmap_is_current(pm)); 4373 4374 /* Remove the vector page mapping */ 4375 pmap_remove(pm, vector_page, vector_page + PAGE_SIZE); 4376 pmap_update(pm); 4377 } 4378 4379 LIST_REMOVE(pm, pm_list); 4380 4381 pmap_free_l1(pm); 4382 4383 if (pmap_recent_user == pm) 4384 pmap_recent_user = NULL; 4385 4386 uvm_obj_destroy(&pm->pm_obj, false); 4387 mutex_destroy(&pm->pm_obj_lock); 4388 pool_cache_put(&pmap_cache, pm); 4389} 4390 4391 4392/* 4393 * void pmap_reference(pmap_t pm) 4394 * 4395 * Add a reference to the specified pmap. 4396 */ 4397void 4398pmap_reference(pmap_t pm) 4399{ 4400 4401 if (pm == NULL) 4402 return; 4403 4404 pmap_use_l1(pm); 4405 4406 mutex_enter(pm->pm_lock); 4407 pm->pm_obj.uo_refs++; 4408 mutex_exit(pm->pm_lock); 4409} 4410 4411#if (ARM_MMU_V6 + ARM_MMU_V7) > 0 4412 4413static struct evcnt pmap_prefer_nochange_ev = 4414 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap prefer", "nochange"); 4415static struct evcnt pmap_prefer_change_ev = 4416 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap prefer", "change"); 4417 4418EVCNT_ATTACH_STATIC(pmap_prefer_change_ev); 4419EVCNT_ATTACH_STATIC(pmap_prefer_nochange_ev); 4420 4421void 4422pmap_prefer(vaddr_t hint, vaddr_t *vap, int td) 4423{ 4424 vsize_t mask = arm_cache_prefer_mask | (PAGE_SIZE - 1); 4425 vaddr_t va = *vap; 4426 vaddr_t diff = (hint - va) & mask; 4427 if (diff == 0) { 4428 pmap_prefer_nochange_ev.ev_count++; 4429 } else { 4430 pmap_prefer_change_ev.ev_count++; 4431 if (__predict_false(td)) 4432 va -= mask + 1; 4433 *vap = va + diff; 4434 } 4435} 4436#endif /* ARM_MMU_V6 | ARM_MMU_V7 */ 4437 4438/* 4439 * pmap_zero_page() 4440 * 4441 * Zero a given physical page by mapping it at a page hook point. 4442 * In doing the zero page op, the page we zero is mapped cachable, as with 4443 * StrongARM accesses to non-cached pages are non-burst making writing 4444 * _any_ bulk data very slow. 4445 */ 4446#if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6 + ARM_MMU_V7) != 0 4447void 4448pmap_zero_page_generic(paddr_t phys) 4449{ 4450#if defined(PMAP_CACHE_VIPT) || defined(DEBUG) 4451 struct vm_page *pg = PHYS_TO_VM_PAGE(phys); 4452 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 4453#endif 4454#ifdef PMAP_CACHE_VIPT 4455 /* Choose the last page color it had, if any */ 4456 const vsize_t va_offset = md->pvh_attrs & arm_cache_prefer_mask; 4457#else 4458 const vsize_t va_offset = 0; 4459#endif 4460 pt_entry_t * const ptep = &cdst_pte[va_offset >> PGSHIFT]; 4461 4462#ifdef DEBUG 4463 if (!SLIST_EMPTY(&md->pvh_list)) 4464 panic("pmap_zero_page: page has mappings"); 4465#endif 4466 4467 KDASSERT((phys & PGOFSET) == 0); 4468 4469 /* 4470 * Hook in the page, zero it, and purge the cache for that 4471 * zeroed page. Invalidate the TLB as needed. 4472 */ 4473 *ptep = L2_S_PROTO | phys | 4474 L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode; 4475 PTE_SYNC(ptep); 4476 cpu_tlb_flushD_SE(cdstp + va_offset); 4477 cpu_cpwait(); 4478 bzero_page(cdstp + va_offset); 4479 /* 4480 * Unmap the page. 4481 */ 4482 *ptep = 0; 4483 PTE_SYNC(ptep); 4484 cpu_tlb_flushD_SE(cdstp + va_offset); 4485#ifdef PMAP_CACHE_VIVT 4486 cpu_dcache_wbinv_range(cdstp + va_offset, PAGE_SIZE); 4487#endif 4488#ifdef PMAP_CACHE_VIPT 4489 /* 4490 * This page is now cache resident so it now has a page color. 4491 * Any contents have been obliterated so clear the EXEC flag. 4492 */ 4493 if (!pmap_is_page_colored_p(md)) { 4494 PMAPCOUNT(vac_color_new); 4495 md->pvh_attrs |= PVF_COLORED; 4496 } 4497 if (PV_IS_EXEC_P(md->pvh_attrs)) { 4498 md->pvh_attrs &= ~PVF_EXEC; 4499 PMAPCOUNT(exec_discarded_zero); 4500 } 4501 md->pvh_attrs |= PVF_DIRTY; 4502#endif 4503} 4504#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0 */ 4505 4506#if ARM_MMU_XSCALE == 1 4507void 4508pmap_zero_page_xscale(paddr_t phys) 4509{ 4510#ifdef DEBUG 4511 struct vm_page *pg = PHYS_TO_VM_PAGE(phys); 4512 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 4513 4514 if (!SLIST_EMPTY(&md->pvh_list)) 4515 panic("pmap_zero_page: page has mappings"); 4516#endif 4517 4518 KDASSERT((phys & PGOFSET) == 0); 4519 4520 /* 4521 * Hook in the page, zero it, and purge the cache for that 4522 * zeroed page. Invalidate the TLB as needed. 4523 */ 4524 *cdst_pte = L2_S_PROTO | phys | 4525 L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | 4526 L2_C | L2_XS_T_TEX(TEX_XSCALE_X); /* mini-data */ 4527 PTE_SYNC(cdst_pte); 4528 cpu_tlb_flushD_SE(cdstp); 4529 cpu_cpwait(); 4530 bzero_page(cdstp); 4531 xscale_cache_clean_minidata(); 4532} 4533#endif /* ARM_MMU_XSCALE == 1 */ 4534 4535/* pmap_pageidlezero() 4536 * 4537 * The same as above, except that we assume that the page is not 4538 * mapped. This means we never have to flush the cache first. Called 4539 * from the idle loop. 4540 */ 4541bool 4542pmap_pageidlezero(paddr_t phys) 4543{ 4544 unsigned int i; 4545 int *ptr; 4546 bool rv = true; 4547#if defined(PMAP_CACHE_VIPT) || defined(DEBUG) 4548 struct vm_page * const pg = PHYS_TO_VM_PAGE(phys); 4549 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 4550#endif 4551#ifdef PMAP_CACHE_VIPT 4552 /* Choose the last page color it had, if any */ 4553 const vsize_t va_offset = md->pvh_attrs & arm_cache_prefer_mask; 4554#else 4555 const vsize_t va_offset = 0; 4556#endif 4557 pt_entry_t * const ptep = &csrc_pte[va_offset >> PGSHIFT]; 4558 4559 4560#ifdef DEBUG 4561 if (!SLIST_EMPTY(&md->pvh_list)) 4562 panic("pmap_pageidlezero: page has mappings"); 4563#endif 4564 4565 KDASSERT((phys & PGOFSET) == 0); 4566 4567 /* 4568 * Hook in the page, zero it, and purge the cache for that 4569 * zeroed page. Invalidate the TLB as needed. 4570 */ 4571 *ptep = L2_S_PROTO | phys | 4572 L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode; 4573 PTE_SYNC(ptep); 4574 cpu_tlb_flushD_SE(cdstp + va_offset); 4575 cpu_cpwait(); 4576 4577 for (i = 0, ptr = (int *)(cdstp + va_offset); 4578 i < (PAGE_SIZE / sizeof(int)); i++) { 4579 if (sched_curcpu_runnable_p() != 0) { 4580 /* 4581 * A process has become ready. Abort now, 4582 * so we don't keep it waiting while we 4583 * do slow memory access to finish this 4584 * page. 4585 */ 4586 rv = false; 4587 break; 4588 } 4589 *ptr++ = 0; 4590 } 4591 4592#ifdef PMAP_CACHE_VIVT 4593 if (rv) 4594 /* 4595 * if we aborted we'll rezero this page again later so don't 4596 * purge it unless we finished it 4597 */ 4598 cpu_dcache_wbinv_range(cdstp, PAGE_SIZE); 4599#elif defined(PMAP_CACHE_VIPT) 4600 /* 4601 * This page is now cache resident so it now has a page color. 4602 * Any contents have been obliterated so clear the EXEC flag. 4603 */ 4604 if (!pmap_is_page_colored_p(md)) { 4605 PMAPCOUNT(vac_color_new); 4606 md->pvh_attrs |= PVF_COLORED; 4607 } 4608 if (PV_IS_EXEC_P(md->pvh_attrs)) { 4609 md->pvh_attrs &= ~PVF_EXEC; 4610 PMAPCOUNT(exec_discarded_zero); 4611 } 4612#endif 4613 /* 4614 * Unmap the page. 4615 */ 4616 *ptep = 0; 4617 PTE_SYNC(ptep); 4618 cpu_tlb_flushD_SE(cdstp + va_offset); 4619 4620 return (rv); 4621} 4622 4623/* 4624 * pmap_copy_page() 4625 * 4626 * Copy one physical page into another, by mapping the pages into 4627 * hook points. The same comment regarding cachability as in 4628 * pmap_zero_page also applies here. 4629 */ 4630#if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6 + ARM_MMU_V7) != 0 4631void 4632pmap_copy_page_generic(paddr_t src, paddr_t dst) 4633{ 4634 struct vm_page * const src_pg = PHYS_TO_VM_PAGE(src); 4635 struct vm_page_md *src_md = VM_PAGE_TO_MD(src_pg); 4636#if defined(PMAP_CACHE_VIPT) || defined(DEBUG) 4637 struct vm_page * const dst_pg = PHYS_TO_VM_PAGE(dst); 4638 struct vm_page_md *dst_md = VM_PAGE_TO_MD(dst_pg); 4639#endif 4640#ifdef PMAP_CACHE_VIPT 4641 const vsize_t src_va_offset = src_md->pvh_attrs & arm_cache_prefer_mask; 4642 const vsize_t dst_va_offset = dst_md->pvh_attrs & arm_cache_prefer_mask; 4643#else 4644 const vsize_t src_va_offset = 0; 4645 const vsize_t dst_va_offset = 0; 4646#endif 4647 pt_entry_t * const src_ptep = &csrc_pte[src_va_offset >> PGSHIFT]; 4648 pt_entry_t * const dst_ptep = &cdst_pte[dst_va_offset >> PGSHIFT]; 4649 4650#ifdef DEBUG 4651 if (!SLIST_EMPTY(&dst_md->pvh_list)) 4652 panic("pmap_copy_page: dst page has mappings"); 4653#endif 4654 4655#ifdef PMAP_CACHE_VIPT 4656 KASSERT(arm_cache_prefer_mask == 0 || src_md->pvh_attrs & (PVF_COLORED|PVF_NC)); 4657#endif 4658 KDASSERT((src & PGOFSET) == 0); 4659 KDASSERT((dst & PGOFSET) == 0); 4660 4661 /* 4662 * Clean the source page. Hold the source page's lock for 4663 * the duration of the copy so that no other mappings can 4664 * be created while we have a potentially aliased mapping. 4665 */ 4666#ifdef MULTIPROCESSOR 4667 KASSERT(uvm_page_locked_p(src_pg)); 4668#endif 4669#ifdef PMAP_CACHE_VIVT 4670 (void) pmap_clean_page(SLIST_FIRST(&src_md->pvh_list), true); 4671#endif 4672 4673 /* 4674 * Map the pages into the page hook points, copy them, and purge 4675 * the cache for the appropriate page. Invalidate the TLB 4676 * as required. 4677 */ 4678 *src_ptep = L2_S_PROTO 4679 | src 4680#ifdef PMAP_CACHE_VIPT 4681 | ((src_md->pvh_attrs & PVF_NC) ? 0 : pte_l2_s_cache_mode) 4682#endif 4683#ifdef PMAP_CACHE_VIVT 4684 | pte_l2_s_cache_mode 4685#endif 4686 | L2_S_PROT(PTE_KERNEL, VM_PROT_READ); 4687 *dst_ptep = L2_S_PROTO | dst | 4688 L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode; 4689 PTE_SYNC(src_ptep); 4690 PTE_SYNC(dst_ptep); 4691 cpu_tlb_flushD_SE(csrcp + src_va_offset); 4692 cpu_tlb_flushD_SE(cdstp + dst_va_offset); 4693 cpu_cpwait(); 4694 bcopy_page(csrcp + src_va_offset, cdstp + dst_va_offset); 4695#ifdef PMAP_CACHE_VIVT 4696 cpu_dcache_inv_range(csrcp + src_va_offset, PAGE_SIZE); 4697#endif 4698#ifdef PMAP_CACHE_VIVT 4699 cpu_dcache_wbinv_range(cdstp + dst_va_offset, PAGE_SIZE); 4700#endif 4701 /* 4702 * Unmap the pages. 4703 */ 4704 *src_ptep = 0; 4705 *dst_ptep = 0; 4706 PTE_SYNC(src_ptep); 4707 PTE_SYNC(dst_ptep); 4708 cpu_tlb_flushD_SE(csrcp + src_va_offset); 4709 cpu_tlb_flushD_SE(cdstp + dst_va_offset); 4710#ifdef PMAP_CACHE_VIPT 4711 /* 4712 * Now that the destination page is in the cache, mark it as colored. 4713 * If this was an exec page, discard it. 4714 */ 4715 if (!pmap_is_page_colored_p(dst_md)) { 4716 PMAPCOUNT(vac_color_new); 4717 dst_md->pvh_attrs |= PVF_COLORED; 4718 } 4719 if (PV_IS_EXEC_P(dst_md->pvh_attrs)) { 4720 dst_md->pvh_attrs &= ~PVF_EXEC; 4721 PMAPCOUNT(exec_discarded_copy); 4722 } 4723 dst_md->pvh_attrs |= PVF_DIRTY; 4724#endif 4725} 4726#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0 */ 4727 4728#if ARM_MMU_XSCALE == 1 4729void 4730pmap_copy_page_xscale(paddr_t src, paddr_t dst) 4731{ 4732 struct vm_page *src_pg = PHYS_TO_VM_PAGE(src); 4733 struct vm_page_md *src_md = VM_PAGE_TO_MD(src_pg); 4734#ifdef DEBUG 4735 struct vm_page_md *dst_md = VM_PAGE_TO_MD(PHYS_TO_VM_PAGE(dst)); 4736 4737 if (!SLIST_EMPTY(&dst_md->pvh_list)) 4738 panic("pmap_copy_page: dst page has mappings"); 4739#endif 4740 4741 KDASSERT((src & PGOFSET) == 0); 4742 KDASSERT((dst & PGOFSET) == 0); 4743 4744 /* 4745 * Clean the source page. Hold the source page's lock for 4746 * the duration of the copy so that no other mappings can 4747 * be created while we have a potentially aliased mapping. 4748 */ 4749#ifdef MULTIPROCESSOR 4750 KASSERT(uvm_page_locked_p(src_pg)); 4751#endif 4752#ifdef PMAP_CACHE_VIVT 4753 (void) pmap_clean_page(SLIST_FIRST(&src_md->pvh_list), true); 4754#endif 4755 4756 /* 4757 * Map the pages into the page hook points, copy them, and purge 4758 * the cache for the appropriate page. Invalidate the TLB 4759 * as required. 4760 */ 4761 *csrc_pte = L2_S_PROTO | src | 4762 L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | 4763 L2_C | L2_XS_T_TEX(TEX_XSCALE_X); /* mini-data */ 4764 PTE_SYNC(csrc_pte); 4765 *cdst_pte = L2_S_PROTO | dst | 4766 L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | 4767 L2_C | L2_XS_T_TEX(TEX_XSCALE_X); /* mini-data */ 4768 PTE_SYNC(cdst_pte); 4769 cpu_tlb_flushD_SE(csrcp); 4770 cpu_tlb_flushD_SE(cdstp); 4771 cpu_cpwait(); 4772 bcopy_page(csrcp, cdstp); 4773 xscale_cache_clean_minidata(); 4774} 4775#endif /* ARM_MMU_XSCALE == 1 */ 4776 4777/* 4778 * void pmap_virtual_space(vaddr_t *start, vaddr_t *end) 4779 * 4780 * Return the start and end addresses of the kernel's virtual space. 4781 * These values are setup in pmap_bootstrap and are updated as pages 4782 * are allocated. 4783 */ 4784void 4785pmap_virtual_space(vaddr_t *start, vaddr_t *end) 4786{ 4787 *start = virtual_avail; 4788 *end = virtual_end; 4789} 4790 4791/* 4792 * Helper function for pmap_grow_l2_bucket() 4793 */ 4794static inline int 4795pmap_grow_map(vaddr_t va, pt_entry_t cache_mode, paddr_t *pap) 4796{ 4797 struct l2_bucket *l2b; 4798 pt_entry_t *ptep; 4799 paddr_t pa; 4800 4801 if (uvm.page_init_done == false) { 4802#ifdef PMAP_STEAL_MEMORY 4803 pv_addr_t pv; 4804 pmap_boot_pagealloc(PAGE_SIZE, 4805#ifdef PMAP_CACHE_VIPT 4806 arm_cache_prefer_mask, 4807 va & arm_cache_prefer_mask, 4808#else 4809 0, 0, 4810#endif 4811 &pv); 4812 pa = pv.pv_pa; 4813#else 4814 if (uvm_page_physget(&pa) == false) 4815 return (1); 4816#endif /* PMAP_STEAL_MEMORY */ 4817 } else { 4818 struct vm_page *pg; 4819 pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE); 4820 if (pg == NULL) 4821 return (1); 4822 pa = VM_PAGE_TO_PHYS(pg); 4823#ifdef PMAP_CACHE_VIPT 4824#ifdef DIAGNOSTIC 4825 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 4826#endif 4827 /* 4828 * This new page must not have any mappings. Enter it via 4829 * pmap_kenter_pa and let that routine do the hard work. 4830 */ 4831 KASSERT(SLIST_EMPTY(&md->pvh_list)); 4832 pmap_kenter_pa(va, pa, 4833 VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE); 4834#endif 4835 } 4836 4837 if (pap) 4838 *pap = pa; 4839 4840 PMAPCOUNT(pt_mappings); 4841 l2b = pmap_get_l2_bucket(pmap_kernel(), va); 4842 KDASSERT(l2b != NULL); 4843 4844 ptep = &l2b->l2b_kva[l2pte_index(va)]; 4845 *ptep = L2_S_PROTO | pa | cache_mode | 4846 L2_S_PROT(PTE_KERNEL, VM_PROT_READ | VM_PROT_WRITE); 4847 PTE_SYNC(ptep); 4848 memset((void *)va, 0, PAGE_SIZE); 4849 return (0); 4850} 4851 4852/* 4853 * This is the same as pmap_alloc_l2_bucket(), except that it is only 4854 * used by pmap_growkernel(). 4855 */ 4856static inline struct l2_bucket * 4857pmap_grow_l2_bucket(pmap_t pm, vaddr_t va) 4858{ 4859 struct l2_dtable *l2; 4860 struct l2_bucket *l2b; 4861 u_short l1idx; 4862 vaddr_t nva; 4863 4864 l1idx = L1_IDX(va); 4865 4866 if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) { 4867 /* 4868 * No mapping at this address, as there is 4869 * no entry in the L1 table. 4870 * Need to allocate a new l2_dtable. 4871 */ 4872 nva = pmap_kernel_l2dtable_kva; 4873 if ((nva & PGOFSET) == 0) { 4874 /* 4875 * Need to allocate a backing page 4876 */ 4877 if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL)) 4878 return (NULL); 4879 } 4880 4881 l2 = (struct l2_dtable *)nva; 4882 nva += sizeof(struct l2_dtable); 4883 4884 if ((nva & PGOFSET) < (pmap_kernel_l2dtable_kva & PGOFSET)) { 4885 /* 4886 * The new l2_dtable straddles a page boundary. 4887 * Map in another page to cover it. 4888 */ 4889 if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL)) 4890 return (NULL); 4891 } 4892 4893 pmap_kernel_l2dtable_kva = nva; 4894 4895 /* 4896 * Link it into the parent pmap 4897 */ 4898 pm->pm_l2[L2_IDX(l1idx)] = l2; 4899 } 4900 4901 l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; 4902 4903 /* 4904 * Fetch pointer to the L2 page table associated with the address. 4905 */ 4906 if (l2b->l2b_kva == NULL) { 4907 pt_entry_t *ptep; 4908 4909 /* 4910 * No L2 page table has been allocated. Chances are, this 4911 * is because we just allocated the l2_dtable, above. 4912 */ 4913 nva = pmap_kernel_l2ptp_kva; 4914 ptep = (pt_entry_t *)nva; 4915 if ((nva & PGOFSET) == 0) { 4916 /* 4917 * Need to allocate a backing page 4918 */ 4919 if (pmap_grow_map(nva, pte_l2_s_cache_mode_pt, 4920 &pmap_kernel_l2ptp_phys)) 4921 return (NULL); 4922 PTE_SYNC_RANGE(ptep, PAGE_SIZE / sizeof(pt_entry_t)); 4923 } 4924 4925 l2->l2_occupancy++; 4926 l2b->l2b_kva = ptep; 4927 l2b->l2b_l1idx = l1idx; 4928 l2b->l2b_phys = pmap_kernel_l2ptp_phys; 4929 4930 pmap_kernel_l2ptp_kva += L2_TABLE_SIZE_REAL; 4931 pmap_kernel_l2ptp_phys += L2_TABLE_SIZE_REAL; 4932 } 4933 4934 return (l2b); 4935} 4936 4937vaddr_t 4938pmap_growkernel(vaddr_t maxkvaddr) 4939{ 4940 pmap_t kpm = pmap_kernel(); 4941 struct l1_ttable *l1; 4942 struct l2_bucket *l2b; 4943 pd_entry_t *pl1pd; 4944 int s; 4945 4946 if (maxkvaddr <= pmap_curmaxkvaddr) 4947 goto out; /* we are OK */ 4948 4949 NPDEBUG(PDB_GROWKERN, 4950 printf("pmap_growkernel: growing kernel from 0x%lx to 0x%lx\n", 4951 pmap_curmaxkvaddr, maxkvaddr)); 4952 4953 KDASSERT(maxkvaddr <= virtual_end); 4954 4955 /* 4956 * whoops! we need to add kernel PTPs 4957 */ 4958 4959 s = splhigh(); /* to be safe */ 4960 mutex_enter(kpm->pm_lock); 4961 4962 /* Map 1MB at a time */ 4963 for (; pmap_curmaxkvaddr < maxkvaddr; pmap_curmaxkvaddr += L1_S_SIZE) { 4964 4965 l2b = pmap_grow_l2_bucket(kpm, pmap_curmaxkvaddr); 4966 KDASSERT(l2b != NULL); 4967 4968 /* Distribute new L1 entry to all other L1s */ 4969 SLIST_FOREACH(l1, &l1_list, l1_link) { 4970 pl1pd = &l1->l1_kva[L1_IDX(pmap_curmaxkvaddr)]; 4971 *pl1pd = l2b->l2b_phys | L1_C_DOM(PMAP_DOMAIN_KERNEL) | 4972 L1_C_PROTO; 4973 PTE_SYNC(pl1pd); 4974 } 4975 } 4976 4977 /* 4978 * flush out the cache, expensive but growkernel will happen so 4979 * rarely 4980 */ 4981 cpu_dcache_wbinv_all(); 4982 cpu_tlb_flushD(); 4983 cpu_cpwait(); 4984 4985 mutex_exit(kpm->pm_lock); 4986 splx(s); 4987 4988out: 4989 return (pmap_curmaxkvaddr); 4990} 4991 4992/************************ Utility routines ****************************/ 4993 4994/* 4995 * vector_page_setprot: 4996 * 4997 * Manipulate the protection of the vector page. 4998 */ 4999void 5000vector_page_setprot(int prot) 5001{ 5002 struct l2_bucket *l2b; 5003 pt_entry_t *ptep; 5004 5005 l2b = pmap_get_l2_bucket(pmap_kernel(), vector_page); 5006 KDASSERT(l2b != NULL); 5007 5008 ptep = &l2b->l2b_kva[l2pte_index(vector_page)]; 5009 5010 *ptep = (*ptep & ~L2_S_PROT_MASK) | L2_S_PROT(PTE_KERNEL, prot); 5011 PTE_SYNC(ptep); 5012 cpu_tlb_flushD_SE(vector_page); 5013 cpu_cpwait(); 5014} 5015 5016/* 5017 * Fetch pointers to the PDE/PTE for the given pmap/VA pair. 5018 * Returns true if the mapping exists, else false. 5019 * 5020 * NOTE: This function is only used by a couple of arm-specific modules. 5021 * It is not safe to take any pmap locks here, since we could be right 5022 * in the middle of debugging the pmap anyway... 5023 * 5024 * It is possible for this routine to return false even though a valid 5025 * mapping does exist. This is because we don't lock, so the metadata 5026 * state may be inconsistent. 5027 * 5028 * NOTE: We can return a NULL *ptp in the case where the L1 pde is 5029 * a "section" mapping. 5030 */ 5031bool 5032pmap_get_pde_pte(pmap_t pm, vaddr_t va, pd_entry_t **pdp, pt_entry_t **ptp) 5033{ 5034 struct l2_dtable *l2; 5035 pd_entry_t *pl1pd, l1pd; 5036 pt_entry_t *ptep; 5037 u_short l1idx; 5038 5039 if (pm->pm_l1 == NULL) 5040 return false; 5041 5042 l1idx = L1_IDX(va); 5043 *pdp = pl1pd = &pm->pm_l1->l1_kva[l1idx]; 5044 l1pd = *pl1pd; 5045 5046 if (l1pte_section_p(l1pd)) { 5047 *ptp = NULL; 5048 return true; 5049 } 5050 5051 if (pm->pm_l2 == NULL) 5052 return false; 5053 5054 l2 = pm->pm_l2[L2_IDX(l1idx)]; 5055 5056 if (l2 == NULL || 5057 (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) { 5058 return false; 5059 } 5060 5061 *ptp = &ptep[l2pte_index(va)]; 5062 return true; 5063} 5064 5065bool 5066pmap_get_pde(pmap_t pm, vaddr_t va, pd_entry_t **pdp) 5067{ 5068 u_short l1idx; 5069 5070 if (pm->pm_l1 == NULL) 5071 return false; 5072 5073 l1idx = L1_IDX(va); 5074 *pdp = &pm->pm_l1->l1_kva[l1idx]; 5075 5076 return true; 5077} 5078 5079/************************ Bootstrapping routines ****************************/ 5080 5081static void 5082pmap_init_l1(struct l1_ttable *l1, pd_entry_t *l1pt) 5083{ 5084 int i; 5085 5086 l1->l1_kva = l1pt; 5087 l1->l1_domain_use_count = 0; 5088 l1->l1_domain_first = 0; 5089 5090 for (i = 0; i < PMAP_DOMAINS; i++) 5091 l1->l1_domain_free[i] = i + 1; 5092 5093 /* 5094 * Copy the kernel's L1 entries to each new L1. 5095 */ 5096 if (pmap_initialized) 5097 memcpy(l1pt, pmap_kernel()->pm_l1->l1_kva, L1_TABLE_SIZE); 5098 5099 if (pmap_extract(pmap_kernel(), (vaddr_t)l1pt, 5100 &l1->l1_physaddr) == false) 5101 panic("pmap_init_l1: can't get PA of L1 at %p", l1pt); 5102 5103 SLIST_INSERT_HEAD(&l1_list, l1, l1_link); 5104 TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); 5105} 5106 5107/* 5108 * pmap_bootstrap() is called from the board-specific initarm() routine 5109 * once the kernel L1/L2 descriptors tables have been set up. 5110 * 5111 * This is a somewhat convoluted process since pmap bootstrap is, effectively, 5112 * spread over a number of disparate files/functions. 5113 * 5114 * We are passed the following parameters 5115 * - kernel_l1pt 5116 * This is a pointer to the base of the kernel's L1 translation table. 5117 * - vstart 5118 * 1MB-aligned start of managed kernel virtual memory. 5119 * - vend 5120 * 1MB-aligned end of managed kernel virtual memory. 5121 * 5122 * We use the first parameter to build the metadata (struct l1_ttable and 5123 * struct l2_dtable) necessary to track kernel mappings. 5124 */ 5125#define PMAP_STATIC_L2_SIZE 16 5126void 5127pmap_bootstrap(vaddr_t vstart, vaddr_t vend) 5128{ 5129 static struct l1_ttable static_l1; 5130 static struct l2_dtable static_l2[PMAP_STATIC_L2_SIZE]; 5131 struct l1_ttable *l1 = &static_l1; 5132 struct l2_dtable *l2; 5133 struct l2_bucket *l2b; 5134 pd_entry_t *l1pt = (pd_entry_t *) kernel_l1pt.pv_va; 5135 pmap_t pm = pmap_kernel(); 5136 pd_entry_t pde; 5137 pt_entry_t *ptep; 5138 paddr_t pa; 5139 vaddr_t va; 5140 vsize_t size; 5141 int nptes, l1idx, l2idx, l2next = 0; 5142 5143 /* 5144 * Initialise the kernel pmap object 5145 */ 5146 pm->pm_l1 = l1; 5147 pm->pm_domain = PMAP_DOMAIN_KERNEL; 5148 pm->pm_activated = true; 5149 pm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL; 5150 5151 mutex_init(&pm->pm_obj_lock, MUTEX_DEFAULT, IPL_NONE); 5152 uvm_obj_init(&pm->pm_obj, NULL, false, 1); 5153 uvm_obj_setlock(&pm->pm_obj, &pm->pm_obj_lock); 5154 5155 /* 5156 * Scan the L1 translation table created by initarm() and create 5157 * the required metadata for all valid mappings found in it. 5158 */ 5159 for (l1idx = 0; l1idx < (L1_TABLE_SIZE / sizeof(pd_entry_t)); l1idx++) { 5160 pde = l1pt[l1idx]; 5161 5162 /* 5163 * We're only interested in Coarse mappings. 5164 * pmap_extract() can deal with section mappings without 5165 * recourse to checking L2 metadata. 5166 */ 5167 if ((pde & L1_TYPE_MASK) != L1_TYPE_C) 5168 continue; 5169 5170 /* 5171 * Lookup the KVA of this L2 descriptor table 5172 */ 5173 pa = (paddr_t)(pde & L1_C_ADDR_MASK); 5174 ptep = (pt_entry_t *)kernel_pt_lookup(pa); 5175 if (ptep == NULL) { 5176 panic("pmap_bootstrap: No L2 for va 0x%x, pa 0x%lx", 5177 (u_int)l1idx << L1_S_SHIFT, pa); 5178 } 5179 5180 /* 5181 * Fetch the associated L2 metadata structure. 5182 * Allocate a new one if necessary. 5183 */ 5184 if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) { 5185 if (l2next == PMAP_STATIC_L2_SIZE) 5186 panic("pmap_bootstrap: out of static L2s"); 5187 pm->pm_l2[L2_IDX(l1idx)] = l2 = &static_l2[l2next++]; 5188 } 5189 5190 /* 5191 * One more L1 slot tracked... 5192 */ 5193 l2->l2_occupancy++; 5194 5195 /* 5196 * Fill in the details of the L2 descriptor in the 5197 * appropriate bucket. 5198 */ 5199 l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; 5200 l2b->l2b_kva = ptep; 5201 l2b->l2b_phys = pa; 5202 l2b->l2b_l1idx = l1idx; 5203 5204 /* 5205 * Establish an initial occupancy count for this descriptor 5206 */ 5207 for (l2idx = 0; 5208 l2idx < (L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)); 5209 l2idx++) { 5210 if ((ptep[l2idx] & L2_TYPE_MASK) != L2_TYPE_INV) { 5211 l2b->l2b_occupancy++; 5212 } 5213 } 5214 5215 /* 5216 * Make sure the descriptor itself has the correct cache mode. 5217 * If not, fix it, but whine about the problem. Port-meisters 5218 * should consider this a clue to fix up their initarm() 5219 * function. :) 5220 */ 5221 if (pmap_set_pt_cache_mode(l1pt, (vaddr_t)ptep)) { 5222 printf("pmap_bootstrap: WARNING! wrong cache mode for " 5223 "L2 pte @ %p\n", ptep); 5224 } 5225 } 5226 5227 /* 5228 * Ensure the primary (kernel) L1 has the correct cache mode for 5229 * a page table. Bitch if it is not correctly set. 5230 */ 5231 for (va = (vaddr_t)l1pt; 5232 va < ((vaddr_t)l1pt + L1_TABLE_SIZE); va += PAGE_SIZE) { 5233 if (pmap_set_pt_cache_mode(l1pt, va)) 5234 printf("pmap_bootstrap: WARNING! wrong cache mode for " 5235 "primary L1 @ 0x%lx\n", va); 5236 } 5237 5238 cpu_dcache_wbinv_all(); 5239 cpu_tlb_flushID(); 5240 cpu_cpwait(); 5241 5242 /* 5243 * now we allocate the "special" VAs which are used for tmp mappings 5244 * by the pmap (and other modules). we allocate the VAs by advancing 5245 * virtual_avail (note that there are no pages mapped at these VAs). 5246 * 5247 * Managed KVM space start from wherever initarm() tells us. 5248 */ 5249 virtual_avail = vstart; 5250 virtual_end = vend; 5251 5252#ifdef PMAP_CACHE_VIPT 5253 /* 5254 * If we have a VIPT cache, we need one page/pte per possible alias 5255 * page so we won't violate cache aliasing rules. 5256 */ 5257 virtual_avail = (virtual_avail + arm_cache_prefer_mask) & ~arm_cache_prefer_mask; 5258 nptes = (arm_cache_prefer_mask >> PGSHIFT) + 1; 5259#else 5260 nptes = 1; 5261#endif 5262 pmap_alloc_specials(&virtual_avail, nptes, &csrcp, &csrc_pte); 5263 pmap_set_pt_cache_mode(l1pt, (vaddr_t)csrc_pte); 5264 pmap_alloc_specials(&virtual_avail, nptes, &cdstp, &cdst_pte); 5265 pmap_set_pt_cache_mode(l1pt, (vaddr_t)cdst_pte); 5266 pmap_alloc_specials(&virtual_avail, nptes, &memhook, NULL); 5267 pmap_alloc_specials(&virtual_avail, round_page(MSGBUFSIZE) / PAGE_SIZE, 5268 (void *)&msgbufaddr, NULL); 5269 5270 /* 5271 * Allocate a range of kernel virtual address space to be used 5272 * for L2 descriptor tables and metadata allocation in 5273 * pmap_growkernel(). 5274 */ 5275 size = ((virtual_end - pmap_curmaxkvaddr) + L1_S_OFFSET) / L1_S_SIZE; 5276 pmap_alloc_specials(&virtual_avail, 5277 round_page(size * L2_TABLE_SIZE_REAL) / PAGE_SIZE, 5278 &pmap_kernel_l2ptp_kva, NULL); 5279 5280 size = (size + (L2_BUCKET_SIZE - 1)) / L2_BUCKET_SIZE; 5281 pmap_alloc_specials(&virtual_avail, 5282 round_page(size * sizeof(struct l2_dtable)) / PAGE_SIZE, 5283 &pmap_kernel_l2dtable_kva, NULL); 5284 5285 /* 5286 * init the static-global locks and global pmap list. 5287 */ 5288 mutex_init(&l1_lru_lock, MUTEX_DEFAULT, IPL_VM); 5289 5290 /* 5291 * We can now initialise the first L1's metadata. 5292 */ 5293 SLIST_INIT(&l1_list); 5294 TAILQ_INIT(&l1_lru_list); 5295 pmap_init_l1(l1, l1pt); 5296 5297 /* Set up vector page L1 details, if necessary */ 5298 if (vector_page < KERNEL_BASE) { 5299 pm->pm_pl1vec = &pm->pm_l1->l1_kva[L1_IDX(vector_page)]; 5300 l2b = pmap_get_l2_bucket(pm, vector_page); 5301 KDASSERT(l2b != NULL); 5302 pm->pm_l1vec = l2b->l2b_phys | L1_C_PROTO | 5303 L1_C_DOM(pm->pm_domain); 5304 } else 5305 pm->pm_pl1vec = NULL; 5306 5307 /* 5308 * Initialize the pmap cache 5309 */ 5310 pool_cache_bootstrap(&pmap_cache, sizeof(struct pmap), 0, 0, 0, 5311 "pmappl", NULL, IPL_NONE, pmap_pmap_ctor, NULL, NULL); 5312 LIST_INIT(&pmap_pmaps); 5313 LIST_INSERT_HEAD(&pmap_pmaps, pm, pm_list); 5314 5315 /* 5316 * Initialize the pv pool. 5317 */ 5318 pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pvepl", 5319 &pmap_bootstrap_pv_allocator, IPL_NONE); 5320 5321 /* 5322 * Initialize the L2 dtable pool and cache. 5323 */ 5324 pool_cache_bootstrap(&pmap_l2dtable_cache, sizeof(struct l2_dtable), 0, 5325 0, 0, "l2dtblpl", NULL, IPL_NONE, pmap_l2dtable_ctor, NULL, NULL); 5326 5327 /* 5328 * Initialise the L2 descriptor table pool and cache 5329 */ 5330 pool_cache_bootstrap(&pmap_l2ptp_cache, L2_TABLE_SIZE_REAL, 0, 5331 L2_TABLE_SIZE_REAL, 0, "l2ptppl", NULL, IPL_NONE, 5332 pmap_l2ptp_ctor, NULL, NULL); 5333 5334 cpu_dcache_wbinv_all(); 5335} 5336 5337static int 5338pmap_set_pt_cache_mode(pd_entry_t *kl1, vaddr_t va) 5339{ 5340 pd_entry_t *pdep, pde; 5341 pt_entry_t *ptep, pte; 5342 vaddr_t pa; 5343 int rv = 0; 5344 5345 /* 5346 * Make sure the descriptor itself has the correct cache mode 5347 */ 5348 pdep = &kl1[L1_IDX(va)]; 5349 pde = *pdep; 5350 5351 if (l1pte_section_p(pde)) { 5352 if ((pde & L1_S_CACHE_MASK) != pte_l1_s_cache_mode_pt) { 5353 *pdep = (pde & ~L1_S_CACHE_MASK) | 5354 pte_l1_s_cache_mode_pt; 5355 PTE_SYNC(pdep); 5356 cpu_dcache_wbinv_range((vaddr_t)pdep, sizeof(*pdep)); 5357 rv = 1; 5358 } 5359 } else { 5360 pa = (paddr_t)(pde & L1_C_ADDR_MASK); 5361 ptep = (pt_entry_t *)kernel_pt_lookup(pa); 5362 if (ptep == NULL) 5363 panic("pmap_bootstrap: No L2 for L2 @ va %p\n", ptep); 5364 5365 ptep = &ptep[l2pte_index(va)]; 5366 pte = *ptep; 5367 if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) { 5368 *ptep = (pte & ~L2_S_CACHE_MASK) | 5369 pte_l2_s_cache_mode_pt; 5370 PTE_SYNC(ptep); 5371 cpu_dcache_wbinv_range((vaddr_t)ptep, sizeof(*ptep)); 5372 rv = 1; 5373 } 5374 } 5375 5376 return (rv); 5377} 5378 5379static void 5380pmap_alloc_specials(vaddr_t *availp, int pages, vaddr_t *vap, pt_entry_t **ptep) 5381{ 5382 vaddr_t va = *availp; 5383 struct l2_bucket *l2b; 5384 5385 if (ptep) { 5386 l2b = pmap_get_l2_bucket(pmap_kernel(), va); 5387 if (l2b == NULL) 5388 panic("pmap_alloc_specials: no l2b for 0x%lx", va); 5389 5390 if (ptep) 5391 *ptep = &l2b->l2b_kva[l2pte_index(va)]; 5392 } 5393 5394 *vap = va; 5395 *availp = va + (PAGE_SIZE * pages); 5396} 5397 5398void 5399pmap_init(void) 5400{ 5401 5402 /* 5403 * Set the available memory vars - These do not map to real memory 5404 * addresses and cannot as the physical memory is fragmented. 5405 * They are used by ps for %mem calculations. 5406 * One could argue whether this should be the entire memory or just 5407 * the memory that is useable in a user process. 5408 */ 5409 avail_start = ptoa(VM_PHYSMEM_PTR(0)->start); 5410 avail_end = ptoa(VM_PHYSMEM_PTR(vm_nphysseg - 1)->end); 5411 5412 /* 5413 * Now we need to free enough pv_entry structures to allow us to get 5414 * the kmem_map/kmem_object allocated and inited (done after this 5415 * function is finished). to do this we allocate one bootstrap page out 5416 * of kernel_map and use it to provide an initial pool of pv_entry 5417 * structures. we never free this page. 5418 */ 5419 pool_setlowat(&pmap_pv_pool, 5420 (PAGE_SIZE / sizeof(struct pv_entry)) * 2); 5421 5422 mutex_init(&memlock, MUTEX_DEFAULT, IPL_NONE); 5423 zeropage = (void *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0, 5424 UVM_KMF_WIRED|UVM_KMF_ZERO); 5425 5426 pmap_initialized = true; 5427} 5428 5429static vaddr_t last_bootstrap_page = 0; 5430static void *free_bootstrap_pages = NULL; 5431 5432static void * 5433pmap_bootstrap_pv_page_alloc(struct pool *pp, int flags) 5434{ 5435 extern void *pool_page_alloc(struct pool *, int); 5436 vaddr_t new_page; 5437 void *rv; 5438 5439 if (pmap_initialized) 5440 return (pool_page_alloc(pp, flags)); 5441 5442 if (free_bootstrap_pages) { 5443 rv = free_bootstrap_pages; 5444 free_bootstrap_pages = *((void **)rv); 5445 return (rv); 5446 } 5447 5448 new_page = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, 5449 UVM_KMF_WIRED | ((flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT)); 5450 5451 KASSERT(new_page > last_bootstrap_page); 5452 last_bootstrap_page = new_page; 5453 return ((void *)new_page); 5454} 5455 5456static void 5457pmap_bootstrap_pv_page_free(struct pool *pp, void *v) 5458{ 5459 extern void pool_page_free(struct pool *, void *); 5460 5461 if ((vaddr_t)v <= last_bootstrap_page) { 5462 *((void **)v) = free_bootstrap_pages; 5463 free_bootstrap_pages = v; 5464 return; 5465 } 5466 5467 if (pmap_initialized) { 5468 pool_page_free(pp, v); 5469 return; 5470 } 5471} 5472 5473/* 5474 * pmap_postinit() 5475 * 5476 * This routine is called after the vm and kmem subsystems have been 5477 * initialised. This allows the pmap code to perform any initialisation 5478 * that can only be done one the memory allocation is in place. 5479 */ 5480void 5481pmap_postinit(void) 5482{ 5483 extern paddr_t physical_start, physical_end; 5484 struct l2_bucket *l2b; 5485 struct l1_ttable *l1; 5486 struct pglist plist; 5487 struct vm_page *m; 5488 pd_entry_t *pl1pt; 5489 pt_entry_t *ptep, pte; 5490 vaddr_t va, eva; 5491 u_int loop, needed; 5492 int error; 5493 5494 pool_cache_setlowat(&pmap_l2ptp_cache, 5495 (PAGE_SIZE / L2_TABLE_SIZE_REAL) * 4); 5496 pool_cache_setlowat(&pmap_l2dtable_cache, 5497 (PAGE_SIZE / sizeof(struct l2_dtable)) * 2); 5498 5499 needed = (maxproc / PMAP_DOMAINS) + ((maxproc % PMAP_DOMAINS) ? 1 : 0); 5500 needed -= 1; 5501 5502 l1 = kmem_alloc(sizeof(*l1) * needed, KM_SLEEP); 5503 5504 for (loop = 0; loop < needed; loop++, l1++) { 5505 /* Allocate a L1 page table */ 5506 va = uvm_km_alloc(kernel_map, L1_TABLE_SIZE, 0, UVM_KMF_VAONLY); 5507 if (va == 0) 5508 panic("Cannot allocate L1 KVM"); 5509 5510 error = uvm_pglistalloc(L1_TABLE_SIZE, physical_start, 5511 physical_end, L1_TABLE_SIZE, 0, &plist, 1, 1); 5512 if (error) 5513 panic("Cannot allocate L1 physical pages"); 5514 5515 m = TAILQ_FIRST(&plist); 5516 eva = va + L1_TABLE_SIZE; 5517 pl1pt = (pd_entry_t *)va; 5518 5519 while (m && va < eva) { 5520 paddr_t pa = VM_PAGE_TO_PHYS(m); 5521 5522 pmap_kenter_pa(va, pa, 5523 VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE); 5524 5525 /* 5526 * Make sure the L1 descriptor table is mapped 5527 * with the cache-mode set to write-through. 5528 */ 5529 l2b = pmap_get_l2_bucket(pmap_kernel(), va); 5530 KDASSERT(l2b != NULL); 5531 ptep = &l2b->l2b_kva[l2pte_index(va)]; 5532 pte = *ptep; 5533 pte = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt; 5534 *ptep = pte; 5535 PTE_SYNC(ptep); 5536 cpu_tlb_flushD_SE(va); 5537 5538 va += PAGE_SIZE; 5539 m = TAILQ_NEXT(m, pageq.queue); 5540 } 5541 5542#ifdef DIAGNOSTIC 5543 if (m) 5544 panic("pmap_alloc_l1pt: pglist not empty"); 5545#endif /* DIAGNOSTIC */ 5546 5547 pmap_init_l1(l1, pl1pt); 5548 } 5549 5550#ifdef DEBUG 5551 printf("pmap_postinit: Allocated %d static L1 descriptor tables\n", 5552 needed); 5553#endif 5554} 5555 5556/* 5557 * Note that the following routines are used by board-specific initialisation 5558 * code to configure the initial kernel page tables. 5559 * 5560 * If ARM32_NEW_VM_LAYOUT is *not* defined, they operate on the assumption that 5561 * L2 page-table pages are 4KB in size and use 4 L1 slots. This mimics the 5562 * behaviour of the old pmap, and provides an easy migration path for 5563 * initial bring-up of the new pmap on existing ports. Fortunately, 5564 * pmap_bootstrap() compensates for this hackery. This is only a stop-gap and 5565 * will be deprecated. 5566 * 5567 * If ARM32_NEW_VM_LAYOUT *is* defined, these functions deal with 1KB L2 page 5568 * tables. 5569 */ 5570 5571/* 5572 * This list exists for the benefit of pmap_map_chunk(). It keeps track 5573 * of the kernel L2 tables during bootstrap, so that pmap_map_chunk() can 5574 * find them as necessary. 5575 * 5576 * Note that the data on this list MUST remain valid after initarm() returns, 5577 * as pmap_bootstrap() uses it to contruct L2 table metadata. 5578 */ 5579SLIST_HEAD(, pv_addr) kernel_pt_list = SLIST_HEAD_INITIALIZER(kernel_pt_list); 5580 5581static vaddr_t 5582kernel_pt_lookup(paddr_t pa) 5583{ 5584 pv_addr_t *pv; 5585 5586 SLIST_FOREACH(pv, &kernel_pt_list, pv_list) { 5587#ifndef ARM32_NEW_VM_LAYOUT 5588 if (pv->pv_pa == (pa & ~PGOFSET)) 5589 return (pv->pv_va | (pa & PGOFSET)); 5590#else 5591 if (pv->pv_pa == pa) 5592 return (pv->pv_va); 5593#endif 5594 } 5595 return (0); 5596} 5597 5598/* 5599 * pmap_map_section: 5600 * 5601 * Create a single section mapping. 5602 */ 5603void 5604pmap_map_section(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache) 5605{ 5606 pd_entry_t *pde = (pd_entry_t *) l1pt; 5607 pd_entry_t fl; 5608 5609 KASSERT(((va | pa) & L1_S_OFFSET) == 0); 5610 5611 switch (cache) { 5612 case PTE_NOCACHE: 5613 default: 5614 fl = 0; 5615 break; 5616 5617 case PTE_CACHE: 5618 fl = pte_l1_s_cache_mode; 5619 break; 5620 5621 case PTE_PAGETABLE: 5622 fl = pte_l1_s_cache_mode_pt; 5623 break; 5624 } 5625 5626 pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa | 5627 L1_S_PROT(PTE_KERNEL, prot) | fl | L1_S_DOM(PMAP_DOMAIN_KERNEL); 5628 PTE_SYNC(&pde[va >> L1_S_SHIFT]); 5629} 5630 5631/* 5632 * pmap_map_entry: 5633 * 5634 * Create a single page mapping. 5635 */ 5636void 5637pmap_map_entry(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache) 5638{ 5639 pd_entry_t *pde = (pd_entry_t *) l1pt; 5640 pt_entry_t fl; 5641 pt_entry_t *pte; 5642 5643 KASSERT(((va | pa) & PGOFSET) == 0); 5644 5645 switch (cache) { 5646 case PTE_NOCACHE: 5647 default: 5648 fl = 0; 5649 break; 5650 5651 case PTE_CACHE: 5652 fl = pte_l2_s_cache_mode; 5653 break; 5654 5655 case PTE_PAGETABLE: 5656 fl = pte_l2_s_cache_mode_pt; 5657 break; 5658 } 5659 5660 if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C) 5661 panic("pmap_map_entry: no L2 table for VA 0x%08lx", va); 5662 5663#ifndef ARM32_NEW_VM_LAYOUT 5664 pte = (pt_entry_t *) 5665 kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME); 5666#else 5667 pte = (pt_entry_t *) kernel_pt_lookup(pde[L1_IDX(va)] & L1_C_ADDR_MASK); 5668#endif 5669 if (pte == NULL) 5670 panic("pmap_map_entry: can't find L2 table for VA 0x%08lx", va); 5671 5672 fl |= L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot); 5673#ifndef ARM32_NEW_VM_LAYOUT 5674 pte += (va >> PGSHIFT) & 0x3ff; 5675#else 5676 pte += l2pte_index(va); 5677 L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | fl; 5678#endif 5679 *pte = fl; 5680 PTE_SYNC(pte); 5681} 5682 5683/* 5684 * pmap_link_l2pt: 5685 * 5686 * Link the L2 page table specified by "l2pv" into the L1 5687 * page table at the slot for "va". 5688 */ 5689void 5690pmap_link_l2pt(vaddr_t l1pt, vaddr_t va, pv_addr_t *l2pv) 5691{ 5692 pd_entry_t *pde = (pd_entry_t *) l1pt, proto; 5693 u_int slot = va >> L1_S_SHIFT; 5694 5695#ifndef ARM32_NEW_VM_LAYOUT 5696 KASSERT((va & ((L1_S_SIZE * 4) - 1)) == 0); 5697 KASSERT((l2pv->pv_pa & PGOFSET) == 0); 5698#endif 5699 5700 proto = L1_S_DOM(PMAP_DOMAIN_KERNEL) | L1_C_PROTO; 5701 5702 pde[slot + 0] = proto | (l2pv->pv_pa + 0x000); 5703#ifdef ARM32_NEW_VM_LAYOUT 5704 PTE_SYNC(&pde[slot]); 5705#else 5706 pde[slot + 1] = proto | (l2pv->pv_pa + 0x400); 5707 pde[slot + 2] = proto | (l2pv->pv_pa + 0x800); 5708 pde[slot + 3] = proto | (l2pv->pv_pa + 0xc00); 5709 PTE_SYNC_RANGE(&pde[slot + 0], 4); 5710#endif 5711 5712 SLIST_INSERT_HEAD(&kernel_pt_list, l2pv, pv_list); 5713} 5714 5715/* 5716 * pmap_map_chunk: 5717 * 5718 * Map a chunk of memory using the most efficient mappings 5719 * possible (section, large page, small page) into the 5720 * provided L1 and L2 tables at the specified virtual address. 5721 */ 5722vsize_t 5723pmap_map_chunk(vaddr_t l1pt, vaddr_t va, paddr_t pa, vsize_t size, 5724 int prot, int cache) 5725{ 5726 pd_entry_t *pde = (pd_entry_t *) l1pt; 5727 pt_entry_t *pte, f1, f2s, f2l; 5728 vsize_t resid; 5729 int i; 5730 5731 resid = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); 5732 5733 if (l1pt == 0) 5734 panic("pmap_map_chunk: no L1 table provided"); 5735 5736#ifdef VERBOSE_INIT_ARM 5737 printf("pmap_map_chunk: pa=0x%lx va=0x%lx size=0x%lx resid=0x%lx " 5738 "prot=0x%x cache=%d\n", pa, va, size, resid, prot, cache); 5739#endif 5740 5741 switch (cache) { 5742 case PTE_NOCACHE: 5743 default: 5744 f1 = 0; 5745 f2l = 0; 5746 f2s = 0; 5747 break; 5748 5749 case PTE_CACHE: 5750 f1 = pte_l1_s_cache_mode; 5751 f2l = pte_l2_l_cache_mode; 5752 f2s = pte_l2_s_cache_mode; 5753 break; 5754 5755 case PTE_PAGETABLE: 5756 f1 = pte_l1_s_cache_mode_pt; 5757 f2l = pte_l2_l_cache_mode_pt; 5758 f2s = pte_l2_s_cache_mode_pt; 5759 break; 5760 } 5761 5762 size = resid; 5763 5764 while (resid > 0) { 5765 /* See if we can use a section mapping. */ 5766 if (L1_S_MAPPABLE_P(va, pa, resid)) { 5767#ifdef VERBOSE_INIT_ARM 5768 printf("S"); 5769#endif 5770 pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa | 5771 L1_S_PROT(PTE_KERNEL, prot) | f1 | 5772 L1_S_DOM(PMAP_DOMAIN_KERNEL); 5773 PTE_SYNC(&pde[va >> L1_S_SHIFT]); 5774 va += L1_S_SIZE; 5775 pa += L1_S_SIZE; 5776 resid -= L1_S_SIZE; 5777 continue; 5778 } 5779 5780 /* 5781 * Ok, we're going to use an L2 table. Make sure 5782 * one is actually in the corresponding L1 slot 5783 * for the current VA. 5784 */ 5785 if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C) 5786 panic("pmap_map_chunk: no L2 table for VA 0x%08lx", va); 5787 5788#ifndef ARM32_NEW_VM_LAYOUT 5789 pte = (pt_entry_t *) 5790 kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME); 5791#else 5792 pte = (pt_entry_t *) kernel_pt_lookup( 5793 pde[L1_IDX(va)] & L1_C_ADDR_MASK); 5794#endif 5795 if (pte == NULL) 5796 panic("pmap_map_chunk: can't find L2 table for VA" 5797 "0x%08lx", va); 5798 5799 /* See if we can use a L2 large page mapping. */ 5800 if (L2_L_MAPPABLE_P(va, pa, resid)) { 5801#ifdef VERBOSE_INIT_ARM 5802 printf("L"); 5803#endif 5804 for (i = 0; i < 16; i++) { 5805#ifndef ARM32_NEW_VM_LAYOUT 5806 pte[((va >> PGSHIFT) & 0x3f0) + i] = 5807 L2_L_PROTO | pa | 5808 L2_L_PROT(PTE_KERNEL, prot) | f2l; 5809 PTE_SYNC(&pte[((va >> PGSHIFT) & 0x3f0) + i]); 5810#else 5811 pte[l2pte_index(va) + i] = 5812 L2_L_PROTO | pa | 5813 L2_L_PROT(PTE_KERNEL, prot) | f2l; 5814 PTE_SYNC(&pte[l2pte_index(va) + i]); 5815#endif 5816 } 5817 va += L2_L_SIZE; 5818 pa += L2_L_SIZE; 5819 resid -= L2_L_SIZE; 5820 continue; 5821 } 5822 5823 /* Use a small page mapping. */ 5824#ifdef VERBOSE_INIT_ARM 5825 printf("P"); 5826#endif 5827#ifndef ARM32_NEW_VM_LAYOUT 5828 pte[(va >> PGSHIFT) & 0x3ff] = 5829 L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | f2s; 5830 PTE_SYNC(&pte[(va >> PGSHIFT) & 0x3ff]); 5831#else 5832 pte[l2pte_index(va)] = 5833 L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | f2s; 5834 PTE_SYNC(&pte[l2pte_index(va)]); 5835#endif 5836 va += PAGE_SIZE; 5837 pa += PAGE_SIZE; 5838 resid -= PAGE_SIZE; 5839 } 5840#ifdef VERBOSE_INIT_ARM 5841 printf("\n"); 5842#endif 5843 return (size); 5844} 5845 5846/********************** Static device map routines ***************************/ 5847 5848static const struct pmap_devmap *pmap_devmap_table; 5849 5850/* 5851 * Register the devmap table. This is provided in case early console 5852 * initialization needs to register mappings created by bootstrap code 5853 * before pmap_devmap_bootstrap() is called. 5854 */ 5855void 5856pmap_devmap_register(const struct pmap_devmap *table) 5857{ 5858 5859 pmap_devmap_table = table; 5860} 5861 5862/* 5863 * Map all of the static regions in the devmap table, and remember 5864 * the devmap table so other parts of the kernel can look up entries 5865 * later. 5866 */ 5867void 5868pmap_devmap_bootstrap(vaddr_t l1pt, const struct pmap_devmap *table) 5869{ 5870 int i; 5871 5872 pmap_devmap_table = table; 5873 5874 for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) { 5875#ifdef VERBOSE_INIT_ARM 5876 printf("devmap: %08lx -> %08lx @ %08lx\n", 5877 pmap_devmap_table[i].pd_pa, 5878 pmap_devmap_table[i].pd_pa + 5879 pmap_devmap_table[i].pd_size - 1, 5880 pmap_devmap_table[i].pd_va); 5881#endif 5882 pmap_map_chunk(l1pt, pmap_devmap_table[i].pd_va, 5883 pmap_devmap_table[i].pd_pa, 5884 pmap_devmap_table[i].pd_size, 5885 pmap_devmap_table[i].pd_prot, 5886 pmap_devmap_table[i].pd_cache); 5887 } 5888} 5889 5890const struct pmap_devmap * 5891pmap_devmap_find_pa(paddr_t pa, psize_t size) 5892{ 5893 uint64_t endpa; 5894 int i; 5895 5896 if (pmap_devmap_table == NULL) 5897 return (NULL); 5898 5899 endpa = (uint64_t)pa + (uint64_t)(size - 1); 5900 5901 for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) { 5902 if (pa >= pmap_devmap_table[i].pd_pa && 5903 endpa <= (uint64_t)pmap_devmap_table[i].pd_pa + 5904 (uint64_t)(pmap_devmap_table[i].pd_size - 1)) 5905 return (&pmap_devmap_table[i]); 5906 } 5907 5908 return (NULL); 5909} 5910 5911const struct pmap_devmap * 5912pmap_devmap_find_va(vaddr_t va, vsize_t size) 5913{ 5914 int i; 5915 5916 if (pmap_devmap_table == NULL) 5917 return (NULL); 5918 5919 for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) { 5920 if (va >= pmap_devmap_table[i].pd_va && 5921 va + size - 1 <= pmap_devmap_table[i].pd_va + 5922 pmap_devmap_table[i].pd_size - 1) 5923 return (&pmap_devmap_table[i]); 5924 } 5925 5926 return (NULL); 5927} 5928 5929/********************** PTE initialization routines **************************/ 5930 5931/* 5932 * These routines are called when the CPU type is identified to set up 5933 * the PTE prototypes, cache modes, etc. 5934 * 5935 * The variables are always here, just in case modules need to reference 5936 * them (though, they shouldn't). 5937 */ 5938 5939pt_entry_t pte_l1_s_cache_mode; 5940pt_entry_t pte_l1_s_wc_mode; 5941pt_entry_t pte_l1_s_cache_mode_pt; 5942pt_entry_t pte_l1_s_cache_mask; 5943 5944pt_entry_t pte_l2_l_cache_mode; 5945pt_entry_t pte_l2_l_wc_mode; 5946pt_entry_t pte_l2_l_cache_mode_pt; 5947pt_entry_t pte_l2_l_cache_mask; 5948 5949pt_entry_t pte_l2_s_cache_mode; 5950pt_entry_t pte_l2_s_wc_mode; 5951pt_entry_t pte_l2_s_cache_mode_pt; 5952pt_entry_t pte_l2_s_cache_mask; 5953 5954pt_entry_t pte_l1_s_prot_u; 5955pt_entry_t pte_l1_s_prot_w; 5956pt_entry_t pte_l1_s_prot_ro; 5957pt_entry_t pte_l1_s_prot_mask; 5958 5959pt_entry_t pte_l2_s_prot_u; 5960pt_entry_t pte_l2_s_prot_w; 5961pt_entry_t pte_l2_s_prot_ro; 5962pt_entry_t pte_l2_s_prot_mask; 5963 5964pt_entry_t pte_l2_l_prot_u; 5965pt_entry_t pte_l2_l_prot_w; 5966pt_entry_t pte_l2_l_prot_ro; 5967pt_entry_t pte_l2_l_prot_mask; 5968 5969pt_entry_t pte_l1_s_proto; 5970pt_entry_t pte_l1_c_proto; 5971pt_entry_t pte_l2_s_proto; 5972 5973void (*pmap_copy_page_func)(paddr_t, paddr_t); 5974void (*pmap_zero_page_func)(paddr_t); 5975 5976#if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6 + ARM_MMU_V7) != 0 5977void 5978pmap_pte_init_generic(void) 5979{ 5980 5981 pte_l1_s_cache_mode = L1_S_B|L1_S_C; 5982 pte_l1_s_wc_mode = L1_S_B; 5983 pte_l1_s_cache_mask = L1_S_CACHE_MASK_generic; 5984 5985 pte_l2_l_cache_mode = L2_B|L2_C; 5986 pte_l2_l_wc_mode = L2_B; 5987 pte_l2_l_cache_mask = L2_L_CACHE_MASK_generic; 5988 5989 pte_l2_s_cache_mode = L2_B|L2_C; 5990 pte_l2_s_wc_mode = L2_B; 5991 pte_l2_s_cache_mask = L2_S_CACHE_MASK_generic; 5992 5993 /* 5994 * If we have a write-through cache, set B and C. If 5995 * we have a write-back cache, then we assume setting 5996 * only C will make those pages write-through. 5997 */ 5998 if (cpufuncs.cf_dcache_wb_range == (void *) cpufunc_nullop) { 5999 pte_l1_s_cache_mode_pt = L1_S_B|L1_S_C; 6000 pte_l2_l_cache_mode_pt = L2_B|L2_C; 6001 pte_l2_s_cache_mode_pt = L2_B|L2_C; 6002 } else { 6003#if ARM_MMU_V6 > 1 6004 pte_l1_s_cache_mode_pt = L1_S_B|L1_S_C; /* arm116 errata 399234 */ 6005 pte_l2_l_cache_mode_pt = L2_B|L2_C; /* arm116 errata 399234 */ 6006 pte_l2_s_cache_mode_pt = L2_B|L2_C; /* arm116 errata 399234 */ 6007#else 6008 pte_l1_s_cache_mode_pt = L1_S_C; 6009 pte_l2_l_cache_mode_pt = L2_C; 6010 pte_l2_s_cache_mode_pt = L2_C; 6011#endif 6012 } 6013 6014 pte_l1_s_prot_u = L1_S_PROT_U_generic; 6015 pte_l1_s_prot_w = L1_S_PROT_W_generic; 6016 pte_l1_s_prot_ro = L1_S_PROT_RO_generic; 6017 pte_l1_s_prot_mask = L1_S_PROT_MASK_generic; 6018 6019 pte_l2_s_prot_u = L2_S_PROT_U_generic; 6020 pte_l2_s_prot_w = L2_S_PROT_W_generic; 6021 pte_l2_s_prot_ro = L2_S_PROT_RO_generic; 6022 pte_l2_s_prot_mask = L2_S_PROT_MASK_generic; 6023 6024 pte_l2_l_prot_u = L2_L_PROT_U_generic; 6025 pte_l2_l_prot_w = L2_L_PROT_W_generic; 6026 pte_l2_l_prot_ro = L2_L_PROT_RO_generic; 6027 pte_l2_l_prot_mask = L2_L_PROT_MASK_generic; 6028 6029 pte_l1_s_proto = L1_S_PROTO_generic; 6030 pte_l1_c_proto = L1_C_PROTO_generic; 6031 pte_l2_s_proto = L2_S_PROTO_generic; 6032 6033 pmap_copy_page_func = pmap_copy_page_generic; 6034 pmap_zero_page_func = pmap_zero_page_generic; 6035} 6036 6037#if defined(CPU_ARM8) 6038void 6039pmap_pte_init_arm8(void) 6040{ 6041 6042 /* 6043 * ARM8 is compatible with generic, but we need to use 6044 * the page tables uncached. 6045 */ 6046 pmap_pte_init_generic(); 6047 6048 pte_l1_s_cache_mode_pt = 0; 6049 pte_l2_l_cache_mode_pt = 0; 6050 pte_l2_s_cache_mode_pt = 0; 6051} 6052#endif /* CPU_ARM8 */ 6053 6054#if defined(CPU_ARM9) && defined(ARM9_CACHE_WRITE_THROUGH) 6055void 6056pmap_pte_init_arm9(void) 6057{ 6058 6059 /* 6060 * ARM9 is compatible with generic, but we want to use 6061 * write-through caching for now. 6062 */ 6063 pmap_pte_init_generic(); 6064 6065 pte_l1_s_cache_mode = L1_S_C; 6066 pte_l2_l_cache_mode = L2_C; 6067 pte_l2_s_cache_mode = L2_C; 6068 6069 pte_l1_s_wc_mode = L1_S_B; 6070 pte_l2_l_wc_mode = L2_B; 6071 pte_l2_s_wc_mode = L2_B; 6072 6073 pte_l1_s_cache_mode_pt = L1_S_C; 6074 pte_l2_l_cache_mode_pt = L2_C; 6075 pte_l2_s_cache_mode_pt = L2_C; 6076} 6077#endif /* CPU_ARM9 && ARM9_CACHE_WRITE_THROUGH */ 6078#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0 */ 6079 6080#if defined(CPU_ARM10) 6081void 6082pmap_pte_init_arm10(void) 6083{ 6084 6085 /* 6086 * ARM10 is compatible with generic, but we want to use 6087 * write-through caching for now. 6088 */ 6089 pmap_pte_init_generic(); 6090 6091 pte_l1_s_cache_mode = L1_S_B | L1_S_C; 6092 pte_l2_l_cache_mode = L2_B | L2_C; 6093 pte_l2_s_cache_mode = L2_B | L2_C; 6094 6095 pte_l1_s_cache_mode = L1_S_B; 6096 pte_l2_l_cache_mode = L2_B; 6097 pte_l2_s_cache_mode = L2_B; 6098 6099 pte_l1_s_cache_mode_pt = L1_S_C; 6100 pte_l2_l_cache_mode_pt = L2_C; 6101 pte_l2_s_cache_mode_pt = L2_C; 6102 6103} 6104#endif /* CPU_ARM10 */ 6105 6106#if defined(CPU_ARM11) && defined(ARM11_CACHE_WRITE_THROUGH) 6107void 6108pmap_pte_init_arm11(void) 6109{ 6110 6111 /* 6112 * ARM11 is compatible with generic, but we want to use 6113 * write-through caching for now. 6114 */ 6115 pmap_pte_init_generic(); 6116 6117 pte_l1_s_cache_mode = L1_S_C; 6118 pte_l2_l_cache_mode = L2_C; 6119 pte_l2_s_cache_mode = L2_C; 6120 6121 pte_l1_s_wc_mode = L1_S_B; 6122 pte_l2_l_wc_mode = L2_B; 6123 pte_l2_s_wc_mode = L2_B; 6124 6125 pte_l1_s_cache_mode_pt = L1_S_C; 6126 pte_l2_l_cache_mode_pt = L2_C; 6127 pte_l2_s_cache_mode_pt = L2_C; 6128} 6129#endif /* CPU_ARM11 && ARM11_CACHE_WRITE_THROUGH */ 6130 6131#if ARM_MMU_SA1 == 1 6132void 6133pmap_pte_init_sa1(void) 6134{ 6135 6136 /* 6137 * The StrongARM SA-1 cache does not have a write-through 6138 * mode. So, do the generic initialization, then reset 6139 * the page table cache mode to B=1,C=1, and note that 6140 * the PTEs need to be sync'd. 6141 */ 6142 pmap_pte_init_generic(); 6143 6144 pte_l1_s_cache_mode_pt = L1_S_B|L1_S_C; 6145 pte_l2_l_cache_mode_pt = L2_B|L2_C; 6146 pte_l2_s_cache_mode_pt = L2_B|L2_C; 6147 6148 pmap_needs_pte_sync = 1; 6149} 6150#endif /* ARM_MMU_SA1 == 1*/ 6151 6152#if ARM_MMU_XSCALE == 1 6153#if (ARM_NMMUS > 1) 6154static u_int xscale_use_minidata; 6155#endif 6156 6157void 6158pmap_pte_init_xscale(void) 6159{ 6160 uint32_t auxctl; 6161 int write_through = 0; 6162 6163 pte_l1_s_cache_mode = L1_S_B|L1_S_C; 6164 pte_l1_s_wc_mode = L1_S_B; 6165 pte_l1_s_cache_mask = L1_S_CACHE_MASK_xscale; 6166 6167 pte_l2_l_cache_mode = L2_B|L2_C; 6168 pte_l2_l_wc_mode = L2_B; 6169 pte_l2_l_cache_mask = L2_L_CACHE_MASK_xscale; 6170 6171 pte_l2_s_cache_mode = L2_B|L2_C; 6172 pte_l2_s_wc_mode = L2_B; 6173 pte_l2_s_cache_mask = L2_S_CACHE_MASK_xscale; 6174 6175 pte_l1_s_cache_mode_pt = L1_S_C; 6176 pte_l2_l_cache_mode_pt = L2_C; 6177 pte_l2_s_cache_mode_pt = L2_C; 6178 6179#ifdef XSCALE_CACHE_READ_WRITE_ALLOCATE 6180 /* 6181 * The XScale core has an enhanced mode where writes that 6182 * miss the cache cause a cache line to be allocated. This 6183 * is significantly faster than the traditional, write-through 6184 * behavior of this case. 6185 */ 6186 pte_l1_s_cache_mode |= L1_S_XS_TEX(TEX_XSCALE_X); 6187 pte_l2_l_cache_mode |= L2_XS_L_TEX(TEX_XSCALE_X); 6188 pte_l2_s_cache_mode |= L2_XS_T_TEX(TEX_XSCALE_X); 6189#endif /* XSCALE_CACHE_READ_WRITE_ALLOCATE */ 6190 6191#ifdef XSCALE_CACHE_WRITE_THROUGH 6192 /* 6193 * Some versions of the XScale core have various bugs in 6194 * their cache units, the work-around for which is to run 6195 * the cache in write-through mode. Unfortunately, this 6196 * has a major (negative) impact on performance. So, we 6197 * go ahead and run fast-and-loose, in the hopes that we 6198 * don't line up the planets in a way that will trip the 6199 * bugs. 6200 * 6201 * However, we give you the option to be slow-but-correct. 6202 */ 6203 write_through = 1; 6204#elif defined(XSCALE_CACHE_WRITE_BACK) 6205 /* force write back cache mode */ 6206 write_through = 0; 6207#elif defined(CPU_XSCALE_PXA250) || defined(CPU_XSCALE_PXA270) 6208 /* 6209 * Intel PXA2[15]0 processors are known to have a bug in 6210 * write-back cache on revision 4 and earlier (stepping 6211 * A[01] and B[012]). Fixed for C0 and later. 6212 */ 6213 { 6214 uint32_t id, type; 6215 6216 id = cpufunc_id(); 6217 type = id & ~(CPU_ID_XSCALE_COREREV_MASK|CPU_ID_REVISION_MASK); 6218 6219 if (type == CPU_ID_PXA250 || type == CPU_ID_PXA210) { 6220 if ((id & CPU_ID_REVISION_MASK) < 5) { 6221 /* write through for stepping A0-1 and B0-2 */ 6222 write_through = 1; 6223 } 6224 } 6225 } 6226#endif /* XSCALE_CACHE_WRITE_THROUGH */ 6227 6228 if (write_through) { 6229 pte_l1_s_cache_mode = L1_S_C; 6230 pte_l2_l_cache_mode = L2_C; 6231 pte_l2_s_cache_mode = L2_C; 6232 } 6233 6234#if (ARM_NMMUS > 1) 6235 xscale_use_minidata = 1; 6236#endif 6237 6238 pte_l1_s_prot_u = L1_S_PROT_U_xscale; 6239 pte_l1_s_prot_w = L1_S_PROT_W_xscale; 6240 pte_l1_s_prot_ro = L1_S_PROT_RO_xscale; 6241 pte_l1_s_prot_mask = L1_S_PROT_MASK_xscale; 6242 6243 pte_l2_s_prot_u = L2_S_PROT_U_xscale; 6244 pte_l2_s_prot_w = L2_S_PROT_W_xscale; 6245 pte_l2_s_prot_ro = L2_S_PROT_RO_xscale; 6246 pte_l2_s_prot_mask = L2_S_PROT_MASK_xscale; 6247 6248 pte_l2_l_prot_u = L2_L_PROT_U_xscale; 6249 pte_l2_l_prot_w = L2_L_PROT_W_xscale; 6250 pte_l2_l_prot_ro = L2_L_PROT_RO_xscale; 6251 pte_l2_l_prot_mask = L2_L_PROT_MASK_xscale; 6252 6253 pte_l1_s_proto = L1_S_PROTO_xscale; 6254 pte_l1_c_proto = L1_C_PROTO_xscale; 6255 pte_l2_s_proto = L2_S_PROTO_xscale; 6256 6257 pmap_copy_page_func = pmap_copy_page_xscale; 6258 pmap_zero_page_func = pmap_zero_page_xscale; 6259 6260 /* 6261 * Disable ECC protection of page table access, for now. 6262 */ 6263 __asm volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (auxctl)); 6264 auxctl &= ~XSCALE_AUXCTL_P; 6265 __asm volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (auxctl)); 6266} 6267 6268/* 6269 * xscale_setup_minidata: 6270 * 6271 * Set up the mini-data cache clean area. We require the 6272 * caller to allocate the right amount of physically and 6273 * virtually contiguous space. 6274 */ 6275void 6276xscale_setup_minidata(vaddr_t l1pt, vaddr_t va, paddr_t pa) 6277{ 6278 extern vaddr_t xscale_minidata_clean_addr; 6279 extern vsize_t xscale_minidata_clean_size; /* already initialized */ 6280 pd_entry_t *pde = (pd_entry_t *) l1pt; 6281 pt_entry_t *pte; 6282 vsize_t size; 6283 uint32_t auxctl; 6284 6285 xscale_minidata_clean_addr = va; 6286 6287 /* Round it to page size. */ 6288 size = (xscale_minidata_clean_size + L2_S_OFFSET) & L2_S_FRAME; 6289 6290 for (; size != 0; 6291 va += L2_S_SIZE, pa += L2_S_SIZE, size -= L2_S_SIZE) { 6292#ifndef ARM32_NEW_VM_LAYOUT 6293 pte = (pt_entry_t *) 6294 kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME); 6295#else 6296 pte = (pt_entry_t *) kernel_pt_lookup( 6297 pde[L1_IDX(va)] & L1_C_ADDR_MASK); 6298#endif 6299 if (pte == NULL) 6300 panic("xscale_setup_minidata: can't find L2 table for " 6301 "VA 0x%08lx", va); 6302#ifndef ARM32_NEW_VM_LAYOUT 6303 pte[(va >> PGSHIFT) & 0x3ff] = 6304#else 6305 pte[l2pte_index(va)] = 6306#endif 6307 L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | 6308 L2_C | L2_XS_T_TEX(TEX_XSCALE_X); 6309 } 6310 6311 /* 6312 * Configure the mini-data cache for write-back with 6313 * read/write-allocate. 6314 * 6315 * NOTE: In order to reconfigure the mini-data cache, we must 6316 * make sure it contains no valid data! In order to do that, 6317 * we must issue a global data cache invalidate command! 6318 * 6319 * WE ASSUME WE ARE RUNNING UN-CACHED WHEN THIS ROUTINE IS CALLED! 6320 * THIS IS VERY IMPORTANT! 6321 */ 6322 6323 /* Invalidate data and mini-data. */ 6324 __asm volatile("mcr p15, 0, %0, c7, c6, 0" : : "r" (0)); 6325 __asm volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (auxctl)); 6326 auxctl = (auxctl & ~XSCALE_AUXCTL_MD_MASK) | XSCALE_AUXCTL_MD_WB_RWA; 6327 __asm volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (auxctl)); 6328} 6329 6330/* 6331 * Change the PTEs for the specified kernel mappings such that they 6332 * will use the mini data cache instead of the main data cache. 6333 */ 6334void 6335pmap_uarea(vaddr_t va) 6336{ 6337 struct l2_bucket *l2b; 6338 pt_entry_t *ptep, *sptep, pte; 6339 vaddr_t next_bucket, eva; 6340 6341#if (ARM_NMMUS > 1) 6342 if (xscale_use_minidata == 0) 6343 return; 6344#endif 6345 6346 eva = va + USPACE; 6347 6348 while (va < eva) { 6349 next_bucket = L2_NEXT_BUCKET(va); 6350 if (next_bucket > eva) 6351 next_bucket = eva; 6352 6353 l2b = pmap_get_l2_bucket(pmap_kernel(), va); 6354 KDASSERT(l2b != NULL); 6355 6356 sptep = ptep = &l2b->l2b_kva[l2pte_index(va)]; 6357 6358 while (va < next_bucket) { 6359 pte = *ptep; 6360 if (!l2pte_minidata(pte)) { 6361 cpu_dcache_wbinv_range(va, PAGE_SIZE); 6362 cpu_tlb_flushD_SE(va); 6363 *ptep = pte & ~L2_B; 6364 } 6365 ptep++; 6366 va += PAGE_SIZE; 6367 } 6368 PTE_SYNC_RANGE(sptep, (u_int)(ptep - sptep)); 6369 } 6370 cpu_cpwait(); 6371} 6372#endif /* ARM_MMU_XSCALE == 1 */ 6373 6374 6375#if defined(CPU_ARM11MPCORE) 6376 6377void 6378pmap_pte_init_arm11mpcore(void) 6379{ 6380 6381 /* cache mode is controlled by 5 bits (B, C, TEX) */ 6382 pte_l1_s_cache_mask = L1_S_CACHE_MASK_armv6; 6383 pte_l2_l_cache_mask = L2_L_CACHE_MASK_armv6; 6384#if defined(ARM11MPCORE_COMPAT_MMU) || defined(ARMV6_EXTENDED_SMALL_PAGE) 6385 /* use extended small page (without APn, with TEX) */ 6386 pte_l2_s_cache_mask = L2_XS_CACHE_MASK_armv6; 6387#else 6388 pte_l2_s_cache_mask = L2_S_CACHE_MASK_armv6c; 6389#endif 6390 6391 /* write-back, write-allocate */ 6392 pte_l1_s_cache_mode = L1_S_C | L1_S_B | L1_S_V6_TEX(0x01); 6393 pte_l2_l_cache_mode = L2_C | L2_B | L2_V6_L_TEX(0x01); 6394#if defined(ARM11MPCORE_COMPAT_MMU) || defined(ARMV6_EXTENDED_SMALL_PAGE) 6395 pte_l2_s_cache_mode = L2_C | L2_B | L2_V6_XS_TEX(0x01); 6396#else 6397 /* no TEX. read-allocate */ 6398 pte_l2_s_cache_mode = L2_C | L2_B; 6399#endif 6400 /* 6401 * write-back, write-allocate for page tables. 6402 */ 6403 pte_l1_s_cache_mode_pt = L1_S_C | L1_S_B | L1_S_V6_TEX(0x01); 6404 pte_l2_l_cache_mode_pt = L2_C | L2_B | L2_V6_L_TEX(0x01); 6405#if defined(ARM11MPCORE_COMPAT_MMU) || defined(ARMV6_EXTENDED_SMALL_PAGE) 6406 pte_l2_s_cache_mode_pt = L2_C | L2_B | L2_V6_XS_TEX(0x01); 6407#else 6408 pte_l2_s_cache_mode_pt = L2_C | L2_B; 6409#endif 6410 6411 pte_l1_s_prot_u = L1_S_PROT_U_armv6; 6412 pte_l1_s_prot_w = L1_S_PROT_W_armv6; 6413 pte_l1_s_prot_ro = L1_S_PROT_RO_armv6; 6414 pte_l1_s_prot_mask = L1_S_PROT_MASK_armv6; 6415 6416#if defined(ARM11MPCORE_COMPAT_MMU) || defined(ARMV6_EXTENDED_SMALL_PAGE) 6417 pte_l2_s_prot_u = L2_S_PROT_U_armv6n; 6418 pte_l2_s_prot_w = L2_S_PROT_W_armv6n; 6419 pte_l2_s_prot_ro = L2_S_PROT_RO_armv6n; 6420 pte_l2_s_prot_mask = L2_S_PROT_MASK_armv6n; 6421 6422#else 6423 /* with AP[0..3] */ 6424 pte_l2_s_prot_u = L2_S_PROT_U_generic; 6425 pte_l2_s_prot_w = L2_S_PROT_W_generic; 6426 pte_l2_s_prot_ro = L2_S_PROT_RO_generic; 6427 pte_l2_s_prot_mask = L2_S_PROT_MASK_generic; 6428#endif 6429 6430#ifdef ARM11MPCORE_COMPAT_MMU 6431 /* with AP[0..3] */ 6432 pte_l2_l_prot_u = L2_L_PROT_U_generic; 6433 pte_l2_l_prot_w = L2_L_PROT_W_generic; 6434 pte_l2_l_prot_ro = L2_L_PROT_RO_generic; 6435 pte_l2_l_prot_mask = L2_L_PROT_MASK_generic; 6436 6437 pte_l1_s_proto = L1_S_PROTO_armv6; 6438 pte_l1_c_proto = L1_C_PROTO_armv6; 6439 pte_l2_s_proto = L2_S_PROTO_armv6c; 6440#else 6441 pte_l2_l_prot_u = L2_L_PROT_U_armv6n; 6442 pte_l2_l_prot_w = L2_L_PROT_W_armv6n; 6443 pte_l2_l_prot_ro = L2_L_PROT_RO_armv6n; 6444 pte_l2_l_prot_mask = L2_L_PROT_MASK_armv6n; 6445 6446 pte_l1_s_proto = L1_S_PROTO_armv6; 6447 pte_l1_c_proto = L1_C_PROTO_armv6; 6448 pte_l2_s_proto = L2_S_PROTO_armv6n; 6449#endif 6450 6451 pmap_copy_page_func = pmap_copy_page_generic; 6452 pmap_zero_page_func = pmap_zero_page_generic; 6453 pmap_needs_pte_sync = 1; 6454} 6455#endif /* CPU_ARM11MPCORE */ 6456 6457 6458#if ARM_MMU_V7 == 1 6459void 6460pmap_pte_init_armv7(void) 6461{ 6462 /* 6463 * The ARMv7-A MMU is mostly compatible with generic. If the 6464 * AP field is zero, that now means "no access" rather than 6465 * read-only. The prototypes are a little different because of 6466 * the XN bit. 6467 */ 6468 pmap_pte_init_generic(); 6469 6470 pte_l1_s_cache_mask = L1_S_CACHE_MASK_armv7; 6471 pte_l2_l_cache_mask = L2_L_CACHE_MASK_armv7; 6472 pte_l2_s_cache_mask = L2_S_CACHE_MASK_armv7; 6473 6474 pte_l1_s_prot_u = L1_S_PROT_U_armv7; 6475 pte_l1_s_prot_w = L1_S_PROT_W_armv7; 6476 pte_l1_s_prot_ro = L1_S_PROT_RO_armv7; 6477 pte_l1_s_prot_mask = L1_S_PROT_MASK_armv7; 6478 6479 pte_l2_s_prot_u = L2_S_PROT_U_armv7; 6480 pte_l2_s_prot_w = L2_S_PROT_W_armv7; 6481 pte_l2_s_prot_ro = L2_S_PROT_RO_armv7; 6482 pte_l2_s_prot_mask = L2_S_PROT_MASK_armv7; 6483 6484 pte_l2_l_prot_u = L2_L_PROT_U_armv7; 6485 pte_l2_l_prot_w = L2_L_PROT_W_armv7; 6486 pte_l2_l_prot_ro = L2_L_PROT_RO_armv7; 6487 pte_l2_l_prot_mask = L2_L_PROT_MASK_armv7; 6488 6489 pte_l1_s_proto = L1_S_PROTO_armv7; 6490 pte_l1_c_proto = L1_C_PROTO_armv7; 6491 pte_l2_s_proto = L2_S_PROTO_armv7; 6492} 6493#endif /* ARM_MMU_V7 */ 6494 6495/* 6496 * return the PA of the current L1 table, for use when handling a crash dump 6497 */ 6498uint32_t pmap_kernel_L1_addr(void) 6499{ 6500 return pmap_kernel()->pm_l1->l1_physaddr; 6501} 6502 6503#if defined(DDB) 6504/* 6505 * A couple of ddb-callable functions for dumping pmaps 6506 */ 6507void pmap_dump_all(void); 6508void pmap_dump(pmap_t); 6509 6510void 6511pmap_dump_all(void) 6512{ 6513 pmap_t pm; 6514 6515 LIST_FOREACH(pm, &pmap_pmaps, pm_list) { 6516 if (pm == pmap_kernel()) 6517 continue; 6518 pmap_dump(pm); 6519 printf("\n"); 6520 } 6521} 6522 6523static pt_entry_t ncptes[64]; 6524static void pmap_dump_ncpg(pmap_t); 6525 6526void 6527pmap_dump(pmap_t pm) 6528{ 6529 struct l2_dtable *l2; 6530 struct l2_bucket *l2b; 6531 pt_entry_t *ptep, pte; 6532 vaddr_t l2_va, l2b_va, va; 6533 int i, j, k, occ, rows = 0; 6534 6535 if (pm == pmap_kernel()) 6536 printf("pmap_kernel (%p): ", pm); 6537 else 6538 printf("user pmap (%p): ", pm); 6539 6540 printf("domain %d, l1 at %p\n", pm->pm_domain, pm->pm_l1->l1_kva); 6541 6542 l2_va = 0; 6543 for (i = 0; i < L2_SIZE; i++, l2_va += 0x01000000) { 6544 l2 = pm->pm_l2[i]; 6545 6546 if (l2 == NULL || l2->l2_occupancy == 0) 6547 continue; 6548 6549 l2b_va = l2_va; 6550 for (j = 0; j < L2_BUCKET_SIZE; j++, l2b_va += 0x00100000) { 6551 l2b = &l2->l2_bucket[j]; 6552 6553 if (l2b->l2b_occupancy == 0 || l2b->l2b_kva == NULL) 6554 continue; 6555 6556 ptep = l2b->l2b_kva; 6557 6558 for (k = 0; k < 256 && ptep[k] == 0; k++) 6559 ; 6560 6561 k &= ~63; 6562 occ = l2b->l2b_occupancy; 6563 va = l2b_va + (k * 4096); 6564 for (; k < 256; k++, va += 0x1000) { 6565 char ch = ' '; 6566 if ((k % 64) == 0) { 6567 if ((rows % 8) == 0) { 6568 printf( 6569" |0000 |8000 |10000 |18000 |20000 |28000 |30000 |38000\n"); 6570 } 6571 printf("%08lx: ", va); 6572 } 6573 6574 ncptes[k & 63] = 0; 6575 pte = ptep[k]; 6576 if (pte == 0) { 6577 ch = '.'; 6578 } else { 6579 occ--; 6580 switch (pte & 0x0c) { 6581 case 0x00: 6582 ch = 'D'; /* No cache No buff */ 6583 break; 6584 case 0x04: 6585 ch = 'B'; /* No cache buff */ 6586 break; 6587 case 0x08: 6588 if (pte & 0x40) 6589 ch = 'm'; 6590 else 6591 ch = 'C'; /* Cache No buff */ 6592 break; 6593 case 0x0c: 6594 ch = 'F'; /* Cache Buff */ 6595 break; 6596 } 6597 6598 if ((pte & L2_S_PROT_U) == L2_S_PROT_U) 6599 ch += 0x20; 6600 6601 if ((pte & 0xc) == 0) 6602 ncptes[k & 63] = pte; 6603 } 6604 6605 if ((k % 64) == 63) { 6606 rows++; 6607 printf("%c\n", ch); 6608 pmap_dump_ncpg(pm); 6609 if (occ == 0) 6610 break; 6611 } else 6612 printf("%c", ch); 6613 } 6614 } 6615 } 6616} 6617 6618static void 6619pmap_dump_ncpg(pmap_t pm) 6620{ 6621 struct vm_page *pg; 6622 struct vm_page_md *md; 6623 struct pv_entry *pv; 6624 int i; 6625 6626 for (i = 0; i < 63; i++) { 6627 if (ncptes[i] == 0) 6628 continue; 6629 6630 pg = PHYS_TO_VM_PAGE(l2pte_pa(ncptes[i])); 6631 if (pg == NULL) 6632 continue; 6633 md = VM_PAGE_TO_MD(pg); 6634 6635 printf(" pa 0x%08lx: krw %d kro %d urw %d uro %d\n", 6636 VM_PAGE_TO_PHYS(pg), 6637 md->krw_mappings, md->kro_mappings, 6638 md->urw_mappings, md->uro_mappings); 6639 6640 SLIST_FOREACH(pv, &md->pvh_list, pv_link) { 6641 printf(" %c va 0x%08lx, flags 0x%x\n", 6642 (pm == pv->pv_pmap) ? '*' : ' ', 6643 pv->pv_va, pv->pv_flags); 6644 } 6645 } 6646} 6647#endif 6648 6649#ifdef PMAP_STEAL_MEMORY 6650void 6651pmap_boot_pageadd(pv_addr_t *newpv) 6652{ 6653 pv_addr_t *pv, *npv; 6654 6655 if ((pv = SLIST_FIRST(&pmap_boot_freeq)) != NULL) { 6656 if (newpv->pv_pa < pv->pv_va) { 6657 KASSERT(newpv->pv_pa + newpv->pv_size <= pv->pv_pa); 6658 if (newpv->pv_pa + newpv->pv_size == pv->pv_pa) { 6659 newpv->pv_size += pv->pv_size; 6660 SLIST_REMOVE_HEAD(&pmap_boot_freeq, pv_list); 6661 } 6662 pv = NULL; 6663 } else { 6664 for (; (npv = SLIST_NEXT(pv, pv_list)) != NULL; 6665 pv = npv) { 6666 KASSERT(pv->pv_pa + pv->pv_size < npv->pv_pa); 6667 KASSERT(pv->pv_pa < newpv->pv_pa); 6668 if (newpv->pv_pa > npv->pv_pa) 6669 continue; 6670 if (pv->pv_pa + pv->pv_size == newpv->pv_pa) { 6671 pv->pv_size += newpv->pv_size; 6672 return; 6673 } 6674 if (newpv->pv_pa + newpv->pv_size < npv->pv_pa) 6675 break; 6676 newpv->pv_size += npv->pv_size; 6677 SLIST_INSERT_AFTER(pv, newpv, pv_list); 6678 SLIST_REMOVE_AFTER(newpv, pv_list); 6679 return; 6680 } 6681 } 6682 } 6683 6684 if (pv) { 6685 SLIST_INSERT_AFTER(pv, newpv, pv_list); 6686 } else { 6687 SLIST_INSERT_HEAD(&pmap_boot_freeq, newpv, pv_list); 6688 } 6689} 6690 6691void 6692pmap_boot_pagealloc(psize_t amount, psize_t mask, psize_t match, 6693 pv_addr_t *rpv) 6694{ 6695 pv_addr_t *pv, **pvp; 6696 struct vm_physseg *ps; 6697 size_t i; 6698 6699 KASSERT(amount & PGOFSET); 6700 KASSERT((mask & PGOFSET) == 0); 6701 KASSERT((match & PGOFSET) == 0); 6702 KASSERT(amount != 0); 6703 6704 for (pvp = &SLIST_FIRST(&pmap_boot_freeq); 6705 (pv = *pvp) != NULL; 6706 pvp = &SLIST_NEXT(pv, pv_list)) { 6707 pv_addr_t *newpv; 6708 psize_t off; 6709 /* 6710 * If this entry is too small to satify the request... 6711 */ 6712 KASSERT(pv->pv_size > 0); 6713 if (pv->pv_size < amount) 6714 continue; 6715 6716 for (off = 0; off <= mask; off += PAGE_SIZE) { 6717 if (((pv->pv_pa + off) & mask) == match 6718 && off + amount <= pv->pv_size) 6719 break; 6720 } 6721 if (off > mask) 6722 continue; 6723 6724 rpv->pv_va = pv->pv_va + off; 6725 rpv->pv_pa = pv->pv_pa + off; 6726 rpv->pv_size = amount; 6727 pv->pv_size -= amount; 6728 if (pv->pv_size == 0) { 6729 KASSERT(off == 0); 6730 KASSERT((vaddr_t) pv == rpv->pv_va); 6731 *pvp = SLIST_NEXT(pv, pv_list); 6732 } else if (off == 0) { 6733 KASSERT((vaddr_t) pv == rpv->pv_va); 6734 newpv = (pv_addr_t *) (rpv->pv_va + amount); 6735 *newpv = *pv; 6736 newpv->pv_pa += amount; 6737 newpv->pv_va += amount; 6738 *pvp = newpv; 6739 } else if (off < pv->pv_size) { 6740 newpv = (pv_addr_t *) (rpv->pv_va + amount); 6741 *newpv = *pv; 6742 newpv->pv_size -= off; 6743 newpv->pv_pa += off + amount; 6744 newpv->pv_va += off + amount; 6745 6746 SLIST_NEXT(pv, pv_list) = newpv; 6747 pv->pv_size = off; 6748 } else { 6749 KASSERT((vaddr_t) pv != rpv->pv_va); 6750 } 6751 memset((void *)rpv->pv_va, 0, amount); 6752 return; 6753 } 6754 6755 if (vm_nphysseg == 0) 6756 panic("pmap_boot_pagealloc: couldn't allocate memory"); 6757 6758 for (pvp = &SLIST_FIRST(&pmap_boot_freeq); 6759 (pv = *pvp) != NULL; 6760 pvp = &SLIST_NEXT(pv, pv_list)) { 6761 if (SLIST_NEXT(pv, pv_list) == NULL) 6762 break; 6763 } 6764 KASSERT(mask == 0); 6765 for (i = 0; i < vm_nphysseg; i++) { 6766 ps = VM_PHYSMEM_PTR(i); 6767 if (ps->avail_start == atop(pv->pv_pa + pv->pv_size) 6768 && pv->pv_va + pv->pv_size <= ptoa(ps->avail_end)) { 6769 rpv->pv_va = pv->pv_va; 6770 rpv->pv_pa = pv->pv_pa; 6771 rpv->pv_size = amount; 6772 *pvp = NULL; 6773 pmap_map_chunk(kernel_l1pt.pv_va, 6774 ptoa(ps->avail_start) + (pv->pv_va - pv->pv_pa), 6775 ptoa(ps->avail_start), 6776 amount - pv->pv_size, 6777 VM_PROT_READ|VM_PROT_WRITE, 6778 PTE_CACHE); 6779 ps->avail_start += atop(amount - pv->pv_size); 6780 /* 6781 * If we consumed the entire physseg, remove it. 6782 */ 6783 if (ps->avail_start == ps->avail_end) { 6784 for (--vm_nphysseg; i < vm_nphysseg; i++) 6785 VM_PHYSMEM_PTR_SWAP(i, i + 1); 6786 } 6787 memset((void *)rpv->pv_va, 0, rpv->pv_size); 6788 return; 6789 } 6790 } 6791 6792 panic("pmap_boot_pagealloc: couldn't allocate memory"); 6793} 6794 6795vaddr_t 6796pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp) 6797{ 6798 pv_addr_t pv; 6799 6800 pmap_boot_pagealloc(size, 0, 0, &pv); 6801 6802 return pv.pv_va; 6803} 6804#endif /* PMAP_STEAL_MEMORY */ 6805 6806SYSCTL_SETUP(sysctl_machdep_pmap_setup, "sysctl machdep.kmpages setup") 6807{ 6808 sysctl_createv(clog, 0, NULL, NULL, 6809 CTLFLAG_PERMANENT, 6810 CTLTYPE_NODE, "machdep", NULL, 6811 NULL, 0, NULL, 0, 6812 CTL_MACHDEP, CTL_EOL); 6813 6814 sysctl_createv(clog, 0, NULL, NULL, 6815 CTLFLAG_PERMANENT, 6816 CTLTYPE_INT, "kmpages", 6817 SYSCTL_DESCR("count of pages allocated to kernel memory allocators"), 6818 NULL, 0, &pmap_kmpages, 0, 6819 CTL_MACHDEP, CTL_CREATE, CTL_EOL); 6820} 6821