mmu_if.m revision 270920
1#-
2# Copyright (c) 2005 Peter Grehan
3# All rights reserved.
4#
5# Redistribution and use in source and binary forms, with or without
6# modification, are permitted provided that the following conditions
7# are met:
8# 1. Redistributions of source code must retain the above copyright
9#    notice, this list of conditions and the following disclaimer.
10# 2. Redistributions in binary form must reproduce the above copyright
11#    notice, this list of conditions and the following disclaimer in the
12#    documentation and/or other materials provided with the distribution.
13#
14# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17# ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24# SUCH DAMAGE.
25#
26# $FreeBSD: stable/10/sys/powerpc/powerpc/mmu_if.m 270920 2014-09-01 07:58:15Z kib $
27#
28
29#include <sys/param.h>
30#include <sys/lock.h>
31#include <sys/mutex.h>
32#include <sys/systm.h>
33
34#include <vm/vm.h>
35#include <vm/vm_page.h>
36
37#include <machine/mmuvar.h>
38
39/**
40 * @defgroup MMU mmu - KObj methods for PowerPC MMU implementations
41 * @brief A set of methods required by all MMU implementations. These
42 * are basically direct call-thru's from the pmap machine-dependent
43 * code.
44 * Thanks to Bruce M Simpson's pmap man pages for routine descriptions.
45 *@{
46 */
47
48INTERFACE mmu;
49
50#
51# Default implementations of some methods
52#
53CODE {
54	static void mmu_null_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap,
55	    vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr)
56	{
57		return;
58	}
59
60	static void mmu_null_growkernel(mmu_t mmu, vm_offset_t addr)
61	{
62		return;
63	}
64
65	static void mmu_null_init(mmu_t mmu)
66	{
67		return;
68	}
69
70	static boolean_t mmu_null_is_prefaultable(mmu_t mmu, pmap_t pmap,
71	    vm_offset_t va)
72	{
73		return (FALSE);
74	}
75
76	static void mmu_null_object_init_pt(mmu_t mmu, pmap_t pmap,
77	    vm_offset_t addr, vm_object_t object, vm_pindex_t index,
78	    vm_size_t size)
79	{
80		return;
81	}
82
83	static void mmu_null_page_init(mmu_t mmu, vm_page_t m)
84	{
85		return;
86	}
87
88	static void mmu_null_remove_pages(mmu_t mmu, pmap_t pmap)
89	{
90		return;
91	}
92
93	static int mmu_null_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
94	    vm_paddr_t *locked_pa)
95	{
96		return (0);
97	}
98
99	static void mmu_null_deactivate(struct thread *td)
100	{
101		return;
102	}
103
104	static void mmu_null_align_superpage(mmu_t mmu, vm_object_t object,
105	    vm_ooffset_t offset, vm_offset_t *addr, vm_size_t size)
106	{
107		return;
108	}
109
110	static struct pmap_md *mmu_null_scan_md(mmu_t mmu, struct pmap_md *p)
111	{
112		return (NULL);
113	}
114
115	static void *mmu_null_mapdev_attr(mmu_t mmu, vm_offset_t pa,
116	    vm_size_t size, vm_memattr_t ma)
117	{
118		return MMU_MAPDEV(mmu, pa, size);
119	}
120
121	static void mmu_null_kenter_attr(mmu_t mmu, vm_offset_t va,
122	    vm_offset_t pa, vm_memattr_t ma)
123	{
124		MMU_KENTER(mmu, va, pa);
125	}
126
127	static void mmu_null_page_set_memattr(mmu_t mmu, vm_page_t m,
128	    vm_memattr_t ma)
129	{
130		return;
131	}
132};
133
134
135/**
136 * @brief Apply the given advice to the specified range of addresses within
137 * the given pmap.  Depending on the advice, clear the referenced and/or
138 * modified flags in each mapping and set the mapped page's dirty field.
139 *
140 * @param _pmap		physical map
141 * @param _start	virtual range start
142 * @param _end		virtual range end
143 * @param _advice	advice to apply
144 */
145METHOD void advise {
146	mmu_t		_mmu;
147	pmap_t		_pmap;
148	vm_offset_t	_start;
149	vm_offset_t	_end;
150	int		_advice;
151};
152
153
154/**
155 * @brief Clear the 'modified' bit on the given physical page
156 *
157 * @param _pg		physical page
158 */
159METHOD void clear_modify {
160	mmu_t		_mmu;
161	vm_page_t	_pg;
162};
163
164
165/**
166 * @brief Clear the write and modified bits in each of the given
167 * physical page's mappings
168 *
169 * @param _pg		physical page
170 */
171METHOD void remove_write {
172	mmu_t		_mmu;
173	vm_page_t	_pg;
174};
175
176
177/**
178 * @brief Copy the address range given by the source physical map, virtual
179 * address and length to the destination physical map and virtual address.
180 * This routine is optional (xxx default null implementation ?)
181 *
182 * @param _dst_pmap	destination physical map
183 * @param _src_pmap	source physical map
184 * @param _dst_addr	destination virtual address
185 * @param _len		size of range
186 * @param _src_addr	source virtual address
187 */
188METHOD void copy {
189	mmu_t		_mmu;
190	pmap_t		_dst_pmap;
191	pmap_t		_src_pmap;
192	vm_offset_t	_dst_addr;
193	vm_size_t	_len;
194	vm_offset_t	_src_addr;
195} DEFAULT mmu_null_copy;
196
197
198/**
199 * @brief Copy the source physical page to the destination physical page
200 *
201 * @param _src		source physical page
202 * @param _dst		destination physical page
203 */
204METHOD void copy_page {
205	mmu_t		_mmu;
206	vm_page_t	_src;
207	vm_page_t	_dst;
208};
209
210METHOD void copy_pages {
211	mmu_t		_mmu;
212	vm_page_t	*_ma;
213	vm_offset_t	_a_offset;
214	vm_page_t	*_mb;
215	vm_offset_t	_b_offset;
216	int		_xfersize;
217};
218
219/**
220 * @brief Create a mapping between a virtual/physical address pair in the
221 * passed physical map with the specified protection and wiring
222 *
223 * @param _pmap		physical map
224 * @param _va		mapping virtual address
225 * @param _p		mapping physical page
226 * @param _prot		mapping page protection
227 * @param _flags	pmap_enter flags
228 * @param _psind	superpage size index
229 */
230METHOD int enter {
231	mmu_t		_mmu;
232	pmap_t		_pmap;
233	vm_offset_t	_va;
234	vm_page_t	_p;
235	vm_prot_t	_prot;
236	u_int		_flags;
237	int8_t		_psind;
238};
239
240
241/**
242 * @brief Maps a sequence of resident pages belonging to the same object.
243 *
244 * @param _pmap		physical map
245 * @param _start	virtual range start
246 * @param _end		virtual range end
247 * @param _m_start	physical page mapped at start
248 * @param _prot		mapping page protection
249 */
250METHOD void enter_object {
251	mmu_t		_mmu;
252	pmap_t		_pmap;
253	vm_offset_t	_start;
254	vm_offset_t	_end;
255	vm_page_t	_m_start;
256	vm_prot_t	_prot;
257};
258
259
260/**
261 * @brief A faster entry point for page mapping where it is possible
262 * to short-circuit some of the tests in pmap_enter.
263 *
264 * @param _pmap		physical map (and also currently active pmap)
265 * @param _va		mapping virtual address
266 * @param _pg		mapping physical page
267 * @param _prot		new page protection - used to see if page is exec.
268 */
269METHOD void enter_quick {
270	mmu_t		_mmu;
271	pmap_t		_pmap;
272	vm_offset_t	_va;
273	vm_page_t	_pg;
274	vm_prot_t	_prot;
275};
276
277
278/**
279 * @brief Reverse map the given virtual address, returning the physical
280 * page associated with the address if a mapping exists.
281 *
282 * @param _pmap		physical map
283 * @param _va		mapping virtual address
284 *
285 * @retval 0		No mapping found
286 * @retval addr		The mapping physical address
287 */
288METHOD vm_paddr_t extract {
289	mmu_t		_mmu;
290	pmap_t		_pmap;
291	vm_offset_t	_va;
292};
293
294
295/**
296 * @brief Reverse map the given virtual address, returning the
297 * physical page if found. The page must be held (by calling
298 * vm_page_hold) if the page protection matches the given protection
299 *
300 * @param _pmap		physical map
301 * @param _va		mapping virtual address
302 * @param _prot		protection used to determine if physical page
303 *			should be locked
304 *
305 * @retval NULL		No mapping found
306 * @retval page		Pointer to physical page. Held if protections match
307 */
308METHOD vm_page_t extract_and_hold {
309	mmu_t		_mmu;
310	pmap_t		_pmap;
311	vm_offset_t	_va;
312	vm_prot_t	_prot;
313};
314
315
316/**
317 * @brief Increase kernel virtual address space to the given virtual address.
318 * Not really required for PowerPC, so optional unless the MMU implementation
319 * can use it.
320 *
321 * @param _va		new upper limit for kernel virtual address space
322 */
323METHOD void growkernel {
324	mmu_t		_mmu;
325	vm_offset_t	_va;
326} DEFAULT mmu_null_growkernel;
327
328
329/**
330 * @brief Called from vm_mem_init. Zone allocation is available at
331 * this stage so a convenient time to create zones. This routine is
332 * for MMU-implementation convenience and is optional.
333 */
334METHOD void init {
335	mmu_t		_mmu;
336} DEFAULT mmu_null_init;
337
338
339/**
340 * @brief Return if the page has been marked by MMU hardware to have been
341 * modified
342 *
343 * @param _pg		physical page to test
344 *
345 * @retval boolean	TRUE if page has been modified
346 */
347METHOD boolean_t is_modified {
348	mmu_t		_mmu;
349	vm_page_t	_pg;
350};
351
352
353/**
354 * @brief Return whether the specified virtual address is a candidate to be
355 * prefaulted in. This routine is optional.
356 *
357 * @param _pmap		physical map
358 * @param _va		virtual address to test
359 *
360 * @retval boolean	TRUE if the address is a candidate.
361 */
362METHOD boolean_t is_prefaultable {
363	mmu_t		_mmu;
364	pmap_t		_pmap;
365	vm_offset_t	_va;
366} DEFAULT mmu_null_is_prefaultable;
367
368
369/**
370 * @brief Return whether or not the specified physical page was referenced
371 * in any physical maps.
372 *
373 * @params _pg		physical page
374 *
375 * @retval boolean	TRUE if page has been referenced
376 */
377METHOD boolean_t is_referenced {
378	mmu_t		_mmu;
379	vm_page_t	_pg;
380};
381
382
383/**
384 * @brief Return a count of referenced bits for a page, clearing those bits.
385 * Not all referenced bits need to be cleared, but it is necessary that 0
386 * only be returned when there are none set.
387 *
388 * @params _m		physical page
389 *
390 * @retval int		count of referenced bits
391 */
392METHOD int ts_referenced {
393	mmu_t		_mmu;
394	vm_page_t	_pg;
395};
396
397
398/**
399 * @brief Map the requested physical address range into kernel virtual
400 * address space. The value in _virt is taken as a hint. The virtual
401 * address of the range is returned, or NULL if the mapping could not
402 * be created. The range can be direct-mapped if that is supported.
403 *
404 * @param *_virt	Hint for start virtual address, and also return
405 *			value
406 * @param _start	physical address range start
407 * @param _end		physical address range end
408 * @param _prot		protection of range (currently ignored)
409 *
410 * @retval NULL		could not map the area
411 * @retval addr, *_virt	mapping start virtual address
412 */
413METHOD vm_offset_t map {
414	mmu_t		_mmu;
415	vm_offset_t	*_virt;
416	vm_paddr_t	_start;
417	vm_paddr_t	_end;
418	int		_prot;
419};
420
421
422/**
423 * @brief Used to create a contiguous set of read-only mappings for a
424 * given object to try and eliminate a cascade of on-demand faults as
425 * the object is accessed sequentially. This routine is optional.
426 *
427 * @param _pmap		physical map
428 * @param _addr		mapping start virtual address
429 * @param _object	device-backed V.M. object to be mapped
430 * @param _pindex	page-index within object of mapping start
431 * @param _size		size in bytes of mapping
432 */
433METHOD void object_init_pt {
434	mmu_t		_mmu;
435	pmap_t		_pmap;
436	vm_offset_t	_addr;
437	vm_object_t	_object;
438	vm_pindex_t	_pindex;
439	vm_size_t	_size;
440} DEFAULT mmu_null_object_init_pt;
441
442
443/**
444 * @brief Used to determine if the specified page has a mapping for the
445 * given physical map, by scanning the list of reverse-mappings from the
446 * page. The list is scanned to a maximum of 16 entries.
447 *
448 * @param _pmap		physical map
449 * @param _pg		physical page
450 *
451 * @retval bool		TRUE if the physical map was found in the first 16
452 *			reverse-map list entries off the physical page.
453 */
454METHOD boolean_t page_exists_quick {
455	mmu_t		_mmu;
456	pmap_t		_pmap;
457	vm_page_t	_pg;
458};
459
460
461/**
462 * @brief Initialise the machine-dependent section of the physical page
463 * data structure. This routine is optional.
464 *
465 * @param _pg		physical page
466 */
467METHOD void page_init {
468	mmu_t		_mmu;
469	vm_page_t	_pg;
470} DEFAULT mmu_null_page_init;
471
472
473/**
474 * @brief Count the number of managed mappings to the given physical
475 * page that are wired.
476 *
477 * @param _pg		physical page
478 *
479 * @retval int		the number of wired, managed mappings to the
480 *			given physical page
481 */
482METHOD int page_wired_mappings {
483	mmu_t		_mmu;
484	vm_page_t	_pg;
485};
486
487
488/**
489 * @brief Initialise a physical map data structure
490 *
491 * @param _pmap		physical map
492 */
493METHOD void pinit {
494	mmu_t		_mmu;
495	pmap_t		_pmap;
496};
497
498
499/**
500 * @brief Initialise the physical map for process 0, the initial process
501 * in the system.
502 * XXX default to pinit ?
503 *
504 * @param _pmap		physical map
505 */
506METHOD void pinit0 {
507	mmu_t		_mmu;
508	pmap_t		_pmap;
509};
510
511
512/**
513 * @brief Set the protection for physical pages in the given virtual address
514 * range to the given value.
515 *
516 * @param _pmap		physical map
517 * @param _start	virtual range start
518 * @param _end		virtual range end
519 * @param _prot		new page protection
520 */
521METHOD void protect {
522	mmu_t		_mmu;
523	pmap_t		_pmap;
524	vm_offset_t	_start;
525	vm_offset_t	_end;
526	vm_prot_t	_prot;
527};
528
529
530/**
531 * @brief Create a mapping in kernel virtual address space for the given array
532 * of wired physical pages.
533 *
534 * @param _start	mapping virtual address start
535 * @param *_m		array of physical page pointers
536 * @param _count	array elements
537 */
538METHOD void qenter {
539	mmu_t		_mmu;
540	vm_offset_t	_start;
541	vm_page_t	*_pg;
542	int		_count;
543};
544
545
546/**
547 * @brief Remove the temporary mappings created by qenter.
548 *
549 * @param _start	mapping virtual address start
550 * @param _count	number of pages in mapping
551 */
552METHOD void qremove {
553	mmu_t		_mmu;
554	vm_offset_t	_start;
555	int		_count;
556};
557
558
559/**
560 * @brief Release per-pmap resources, e.g. mutexes, allocated memory etc. There
561 * should be no existing mappings for the physical map at this point
562 *
563 * @param _pmap		physical map
564 */
565METHOD void release {
566	mmu_t		_mmu;
567	pmap_t		_pmap;
568};
569
570
571/**
572 * @brief Remove all mappings in the given physical map for the start/end
573 * virtual address range. The range will be page-aligned.
574 *
575 * @param _pmap		physical map
576 * @param _start	mapping virtual address start
577 * @param _end		mapping virtual address end
578 */
579METHOD void remove {
580	mmu_t		_mmu;
581	pmap_t		_pmap;
582	vm_offset_t	_start;
583	vm_offset_t	_end;
584};
585
586
587/**
588 * @brief Traverse the reverse-map list off the given physical page and
589 * remove all mappings. Clear the PGA_WRITEABLE attribute from the page.
590 *
591 * @param _pg		physical page
592 */
593METHOD void remove_all {
594	mmu_t		_mmu;
595	vm_page_t	_pg;
596};
597
598
599/**
600 * @brief Remove all mappings in the given start/end virtual address range
601 * for the given physical map. Similar to the remove method, but it used
602 * when tearing down all mappings in an address space. This method is
603 * optional, since pmap_remove will be called for each valid vm_map in
604 * the address space later.
605 *
606 * @param _pmap		physical map
607 * @param _start	mapping virtual address start
608 * @param _end		mapping virtual address end
609 */
610METHOD void remove_pages {
611	mmu_t		_mmu;
612	pmap_t		_pmap;
613} DEFAULT mmu_null_remove_pages;
614
615
616/**
617 * @brief Clear the wired attribute from the mappings for the specified range
618 * of addresses in the given pmap.
619 *
620 * @param _pmap		physical map
621 * @param _start	virtual range start
622 * @param _end		virtual range end
623 */
624METHOD void unwire {
625	mmu_t		_mmu;
626	pmap_t		_pmap;
627	vm_offset_t	_start;
628	vm_offset_t	_end;
629};
630
631
632/**
633 * @brief Zero a physical page. It is not assumed that the page is mapped,
634 * so a temporary (or direct) mapping may need to be used.
635 *
636 * @param _pg		physical page
637 */
638METHOD void zero_page {
639	mmu_t		_mmu;
640	vm_page_t	_pg;
641};
642
643
644/**
645 * @brief Zero a portion of a physical page, starting at a given offset and
646 * for a given size (multiples of 512 bytes for 4k pages).
647 *
648 * @param _pg		physical page
649 * @param _off		byte offset from start of page
650 * @param _size		size of area to zero
651 */
652METHOD void zero_page_area {
653	mmu_t		_mmu;
654	vm_page_t	_pg;
655	int		_off;
656	int		_size;
657};
658
659
660/**
661 * @brief Called from the idle loop to zero pages. XXX I think locking
662 * constraints might be different here compared to zero_page.
663 *
664 * @param _pg		physical page
665 */
666METHOD void zero_page_idle {
667	mmu_t		_mmu;
668	vm_page_t	_pg;
669};
670
671
672/**
673 * @brief Extract mincore(2) information from a mapping.
674 *
675 * @param _pmap		physical map
676 * @param _addr		page virtual address
677 * @param _locked_pa	page physical address
678 *
679 * @retval 0		no result
680 * @retval non-zero	mincore(2) flag values
681 */
682METHOD int mincore {
683	mmu_t		_mmu;
684	pmap_t		_pmap;
685	vm_offset_t	_addr;
686	vm_paddr_t	*_locked_pa;
687} DEFAULT mmu_null_mincore;
688
689
690/**
691 * @brief Perform any operations required to allow a physical map to be used
692 * before it's address space is accessed.
693 *
694 * @param _td		thread associated with physical map
695 */
696METHOD void activate {
697	mmu_t		_mmu;
698	struct thread	*_td;
699};
700
701/**
702 * @brief Perform any operations required to deactivate a physical map,
703 * for instance as it is context-switched out.
704 *
705 * @param _td		thread associated with physical map
706 */
707METHOD void deactivate {
708	mmu_t		_mmu;
709	struct thread	*_td;
710} DEFAULT mmu_null_deactivate;
711
712/**
713 * @brief Return a hint for the best virtual address to map a tentative
714 * virtual address range in a given VM object. The default is to just
715 * return the given tentative start address.
716 *
717 * @param _obj		VM backing object
718 * @param _offset	starting offset with the VM object
719 * @param _addr		initial guess at virtual address
720 * @param _size		size of virtual address range
721 */
722METHOD void align_superpage {
723	mmu_t		_mmu;
724	vm_object_t	_obj;
725	vm_ooffset_t	_offset;
726	vm_offset_t	*_addr;
727	vm_size_t	_size;
728} DEFAULT mmu_null_align_superpage;
729
730
731
732
733/**
734 * INTERNAL INTERFACES
735 */
736
737/**
738 * @brief Bootstrap the VM system. At the completion of this routine, the
739 * kernel will be running in it's own address space with full control over
740 * paging.
741 *
742 * @param _start	start of reserved memory (obsolete ???)
743 * @param _end		end of reserved memory (obsolete ???)
744 *			XXX I think the intent of these was to allow
745 *			the memory used by kernel text+data+bss and
746 *			loader variables/load-time kld's to be carved out
747 *			of available physical mem.
748 *
749 */
750METHOD void bootstrap {
751	mmu_t		_mmu;
752	vm_offset_t	_start;
753	vm_offset_t	_end;
754};
755
756/**
757 * @brief Set up the MMU on the current CPU. Only called by the PMAP layer
758 * for alternate CPUs on SMP systems.
759 *
760 * @param _ap		Set to 1 if the CPU being set up is an AP
761 *
762 */
763METHOD void cpu_bootstrap {
764	mmu_t		_mmu;
765	int		_ap;
766};
767
768
769/**
770 * @brief Create a kernel mapping for a given physical address range.
771 * Called by bus code on behalf of device drivers. The mapping does not
772 * have to be a virtual address: it can be a direct-mapped physical address
773 * if that is supported by the MMU.
774 *
775 * @param _pa		start physical address
776 * @param _size		size in bytes of mapping
777 *
778 * @retval addr		address of mapping.
779 */
780METHOD void * mapdev {
781	mmu_t		_mmu;
782	vm_paddr_t	_pa;
783	vm_size_t	_size;
784};
785
786/**
787 * @brief Create a kernel mapping for a given physical address range.
788 * Called by bus code on behalf of device drivers. The mapping does not
789 * have to be a virtual address: it can be a direct-mapped physical address
790 * if that is supported by the MMU.
791 *
792 * @param _pa		start physical address
793 * @param _size		size in bytes of mapping
794 * @param _attr		cache attributes
795 *
796 * @retval addr		address of mapping.
797 */
798METHOD void * mapdev_attr {
799	mmu_t		_mmu;
800	vm_offset_t	_pa;
801	vm_size_t	_size;
802	vm_memattr_t	_attr;
803} DEFAULT mmu_null_mapdev_attr;
804
805/**
806 * @brief Change cache control attributes for a page. Should modify all
807 * mappings for that page.
808 *
809 * @param _m		page to modify
810 * @param _ma		new cache control attributes
811 */
812METHOD void page_set_memattr {
813	mmu_t		_mmu;
814	vm_page_t	_pg;
815	vm_memattr_t	_ma;
816} DEFAULT mmu_null_page_set_memattr;
817
818/**
819 * @brief Remove the mapping created by mapdev. Called when a driver
820 * is unloaded.
821 *
822 * @param _va		Mapping address returned from mapdev
823 * @param _size		size in bytes of mapping
824 */
825METHOD void unmapdev {
826	mmu_t		_mmu;
827	vm_offset_t	_va;
828	vm_size_t	_size;
829};
830
831
832/**
833 * @brief Reverse-map a kernel virtual address
834 *
835 * @param _va		kernel virtual address to reverse-map
836 *
837 * @retval pa		physical address corresponding to mapping
838 */
839METHOD vm_paddr_t kextract {
840	mmu_t		_mmu;
841	vm_offset_t	_va;
842};
843
844
845/**
846 * @brief Map a wired page into kernel virtual address space
847 *
848 * @param _va		mapping virtual address
849 * @param _pa		mapping physical address
850 */
851METHOD void kenter {
852	mmu_t		_mmu;
853	vm_offset_t	_va;
854	vm_paddr_t	_pa;
855};
856
857/**
858 * @brief Map a wired page into kernel virtual address space
859 *
860 * @param _va		mapping virtual address
861 * @param _pa		mapping physical address
862 * @param _ma		mapping cache control attributes
863 */
864METHOD void kenter_attr {
865	mmu_t		_mmu;
866	vm_offset_t	_va;
867	vm_offset_t	_pa;
868	vm_memattr_t	_ma;
869} DEFAULT mmu_null_kenter_attr;
870
871/**
872 * @brief Determine if the given physical address range has been direct-mapped.
873 *
874 * @param _pa		physical address start
875 * @param _size		physical address range size
876 *
877 * @retval bool		TRUE if the range is direct-mapped.
878 */
879METHOD boolean_t dev_direct_mapped {
880	mmu_t		_mmu;
881	vm_paddr_t	_pa;
882	vm_size_t	_size;
883};
884
885
886/**
887 * @brief Enforce instruction cache coherency. Typically called after a
888 * region of memory has been modified and before execution of or within
889 * that region is attempted. Setting breakpoints in a process through
890 * ptrace(2) is one example of when the instruction cache needs to be
891 * made coherent.
892 *
893 * @param _pm		the physical map of the virtual address
894 * @param _va		the virtual address of the modified region
895 * @param _sz		the size of the modified region
896 */
897METHOD void sync_icache {
898	mmu_t		_mmu;
899	pmap_t		_pm;
900	vm_offset_t	_va;
901	vm_size_t	_sz;
902};
903
904
905/**
906 * @brief Create temporary memory mapping for use by dumpsys().
907 *
908 * @param _md		The memory chunk in which the mapping lies.
909 * @param _ofs		The offset within the chunk of the mapping.
910 * @param _sz		The requested size of the mapping.
911 *
912 * @retval vm_offset_t	The virtual address of the mapping.
913 *			
914 * The sz argument is modified to reflect the actual size of the
915 * mapping.
916 */
917METHOD vm_offset_t dumpsys_map {
918	mmu_t		_mmu;
919	struct pmap_md	*_md;
920	vm_size_t	_ofs;
921	vm_size_t	*_sz;
922};
923
924
925/**
926 * @brief Remove temporary dumpsys() mapping.
927 *
928 * @param _md		The memory chunk in which the mapping lies.
929 * @param _ofs		The offset within the chunk of the mapping.
930 * @param _va		The virtual address of the mapping.
931 */
932METHOD void dumpsys_unmap {
933	mmu_t		_mmu;
934	struct pmap_md	*_md;
935	vm_size_t	_ofs;
936	vm_offset_t	_va;
937};
938
939
940/**
941 * @brief Scan/iterate memory chunks.
942 *
943 * @param _prev		The previously returned chunk or NULL.
944 *
945 * @retval		The next (or first when _prev is NULL) chunk.
946 */
947METHOD struct pmap_md * scan_md {
948	mmu_t		_mmu;
949	struct pmap_md	*_prev;
950} DEFAULT mmu_null_scan_md;
951