1/*-
2 * Copyright (c) 2005 Peter Grehan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD$");
30
31/*
32 * Dispatch MI pmap calls to the appropriate MMU implementation
33 * through a previously registered kernel object.
34 *
35 * Before pmap_bootstrap() can be called, a CPU module must have
36 * called pmap_mmu_install(). This may be called multiple times:
37 * the highest priority call will be installed as the default
38 * MMU handler when pmap_bootstrap() is called.
39 *
40 * It is required that mutex_init() be called before pmap_bootstrap(),
41 * as the PMAP layer makes extensive use of mutexes.
42 */
43
44#include <sys/param.h>
45#include <sys/kernel.h>
46#include <sys/lock.h>
47#include <sys/ktr.h>
48#include <sys/mutex.h>
49#include <sys/systm.h>
50
51#include <vm/vm.h>
52#include <vm/vm_page.h>
53
54#include <machine/mmuvar.h>
55#include <machine/smp.h>
56
57#include "mmu_if.h"
58
59static mmu_def_t	*mmu_def_impl;
60static mmu_t		mmu_obj;
61static struct mmu_kobj	mmu_kernel_obj;
62static struct kobj_ops	mmu_kernel_kops;
63
64/*
65 * pmap globals
66 */
67struct pmap kernel_pmap_store;
68
69struct msgbuf *msgbufp;
70vm_offset_t    msgbuf_phys;
71
72vm_offset_t kernel_vm_end;
73vm_offset_t phys_avail[PHYS_AVAIL_SZ];
74vm_offset_t virtual_avail;
75vm_offset_t virtual_end;
76
77int pmap_bootstrapped;
78
79#ifdef AIM
80int
81pvo_vaddr_compare(struct pvo_entry *a, struct pvo_entry *b)
82{
83	if (PVO_VADDR(a) < PVO_VADDR(b))
84		return (-1);
85	else if (PVO_VADDR(a) > PVO_VADDR(b))
86		return (1);
87	return (0);
88}
89RB_GENERATE(pvo_tree, pvo_entry, pvo_plink, pvo_vaddr_compare);
90#endif
91
92
93void
94pmap_advise(pmap_t pmap, vm_offset_t start, vm_offset_t end, int advice)
95{
96
97	CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %d)", __func__, pmap, start, end,
98	    advice);
99	MMU_ADVISE(mmu_obj, pmap, start, end, advice);
100}
101
102void
103pmap_clear_modify(vm_page_t m)
104{
105
106	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
107	MMU_CLEAR_MODIFY(mmu_obj, m);
108}
109
110void
111pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
112    vm_size_t len, vm_offset_t src_addr)
113{
114
115	CTR6(KTR_PMAP, "%s(%p, %p, %#x, %#x, %#x)", __func__, dst_pmap,
116	    src_pmap, dst_addr, len, src_addr);
117	MMU_COPY(mmu_obj, dst_pmap, src_pmap, dst_addr, len, src_addr);
118}
119
120void
121pmap_copy_page(vm_page_t src, vm_page_t dst)
122{
123
124	CTR3(KTR_PMAP, "%s(%p, %p)", __func__, src, dst);
125	MMU_COPY_PAGE(mmu_obj, src, dst);
126}
127
128void
129pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
130    vm_offset_t b_offset, int xfersize)
131{
132
133	CTR6(KTR_PMAP, "%s(%p, %#x, %p, %#x, %#x)", __func__, ma,
134	    a_offset, mb, b_offset, xfersize);
135	MMU_COPY_PAGES(mmu_obj, ma, a_offset, mb, b_offset, xfersize);
136}
137
138int
139pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t p, vm_prot_t prot,
140    u_int flags, int8_t psind)
141{
142
143	CTR6(KTR_PMAP, "pmap_enter(%p, %#x, %p, %#x, %x, %d)", pmap, va,
144	    p, prot, flags, psind);
145	return (MMU_ENTER(mmu_obj, pmap, va, p, prot, flags, psind));
146}
147
148void
149pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
150    vm_page_t m_start, vm_prot_t prot)
151{
152
153	CTR6(KTR_PMAP, "%s(%p, %#x, %#x, %p, %#x)", __func__, pmap, start,
154	    end, m_start, prot);
155	MMU_ENTER_OBJECT(mmu_obj, pmap, start, end, m_start, prot);
156}
157
158void
159pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
160{
161
162	CTR5(KTR_PMAP, "%s(%p, %#x, %p, %#x)", __func__, pmap, va, m, prot);
163	MMU_ENTER_QUICK(mmu_obj, pmap, va, m, prot);
164}
165
166vm_paddr_t
167pmap_extract(pmap_t pmap, vm_offset_t va)
168{
169
170	CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, va);
171	return (MMU_EXTRACT(mmu_obj, pmap, va));
172}
173
174vm_page_t
175pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
176{
177
178	CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, va, prot);
179	return (MMU_EXTRACT_AND_HOLD(mmu_obj, pmap, va, prot));
180}
181
182void
183pmap_growkernel(vm_offset_t va)
184{
185
186	CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
187	MMU_GROWKERNEL(mmu_obj, va);
188}
189
190void
191pmap_init(void)
192{
193
194	CTR1(KTR_PMAP, "%s()", __func__);
195	MMU_INIT(mmu_obj);
196}
197
198boolean_t
199pmap_is_modified(vm_page_t m)
200{
201
202	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
203	return (MMU_IS_MODIFIED(mmu_obj, m));
204}
205
206boolean_t
207pmap_is_prefaultable(pmap_t pmap, vm_offset_t va)
208{
209
210	CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, va);
211	return (MMU_IS_PREFAULTABLE(mmu_obj, pmap, va));
212}
213
214boolean_t
215pmap_is_referenced(vm_page_t m)
216{
217
218	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
219	return (MMU_IS_REFERENCED(mmu_obj, m));
220}
221
222boolean_t
223pmap_ts_referenced(vm_page_t m)
224{
225
226	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
227	return (MMU_TS_REFERENCED(mmu_obj, m));
228}
229
230vm_offset_t
231pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
232{
233
234	CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, virt, start, end,
235	    prot);
236	return (MMU_MAP(mmu_obj, virt, start, end, prot));
237}
238
239void
240pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
241    vm_pindex_t pindex, vm_size_t size)
242{
243
244	CTR6(KTR_PMAP, "%s(%p, %#x, %p, %u, %#x)", __func__, pmap, addr,
245	    object, pindex, size);
246	MMU_OBJECT_INIT_PT(mmu_obj, pmap, addr, object, pindex, size);
247}
248
249boolean_t
250pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
251{
252
253	CTR3(KTR_PMAP, "%s(%p, %p)", __func__, pmap, m);
254	return (MMU_PAGE_EXISTS_QUICK(mmu_obj, pmap, m));
255}
256
257void
258pmap_page_init(vm_page_t m)
259{
260
261	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
262	MMU_PAGE_INIT(mmu_obj, m);
263}
264
265int
266pmap_page_wired_mappings(vm_page_t m)
267{
268
269	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
270	return (MMU_PAGE_WIRED_MAPPINGS(mmu_obj, m));
271}
272
273int
274pmap_pinit(pmap_t pmap)
275{
276
277	CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
278	MMU_PINIT(mmu_obj, pmap);
279	return (1);
280}
281
282void
283pmap_pinit0(pmap_t pmap)
284{
285
286	CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
287	MMU_PINIT0(mmu_obj, pmap);
288}
289
290void
291pmap_protect(pmap_t pmap, vm_offset_t start, vm_offset_t end, vm_prot_t prot)
292{
293
294	CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, pmap, start, end,
295	    prot);
296	MMU_PROTECT(mmu_obj, pmap, start, end, prot);
297}
298
299void
300pmap_qenter(vm_offset_t start, vm_page_t *m, int count)
301{
302
303	CTR4(KTR_PMAP, "%s(%#x, %p, %d)", __func__, start, m, count);
304	MMU_QENTER(mmu_obj, start, m, count);
305}
306
307void
308pmap_qremove(vm_offset_t start, int count)
309{
310
311	CTR3(KTR_PMAP, "%s(%#x, %d)", __func__, start, count);
312	MMU_QREMOVE(mmu_obj, start, count);
313}
314
315void
316pmap_release(pmap_t pmap)
317{
318
319	CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
320	MMU_RELEASE(mmu_obj, pmap);
321}
322
323void
324pmap_remove(pmap_t pmap, vm_offset_t start, vm_offset_t end)
325{
326
327	CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, start, end);
328	MMU_REMOVE(mmu_obj, pmap, start, end);
329}
330
331void
332pmap_remove_all(vm_page_t m)
333{
334
335	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
336	MMU_REMOVE_ALL(mmu_obj, m);
337}
338
339void
340pmap_remove_pages(pmap_t pmap)
341{
342
343	CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
344	MMU_REMOVE_PAGES(mmu_obj, pmap);
345}
346
347void
348pmap_remove_write(vm_page_t m)
349{
350
351	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
352	MMU_REMOVE_WRITE(mmu_obj, m);
353}
354
355void
356pmap_unwire(pmap_t pmap, vm_offset_t start, vm_offset_t end)
357{
358
359	CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, start, end);
360	MMU_UNWIRE(mmu_obj, pmap, start, end);
361}
362
363void
364pmap_zero_page(vm_page_t m)
365{
366
367	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
368	MMU_ZERO_PAGE(mmu_obj, m);
369}
370
371void
372pmap_zero_page_area(vm_page_t m, int off, int size)
373{
374
375	CTR4(KTR_PMAP, "%s(%p, %d, %d)", __func__, m, off, size);
376	MMU_ZERO_PAGE_AREA(mmu_obj, m, off, size);
377}
378
379void
380pmap_zero_page_idle(vm_page_t m)
381{
382
383	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
384	MMU_ZERO_PAGE_IDLE(mmu_obj, m);
385}
386
387int
388pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
389{
390
391	CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, addr);
392	return (MMU_MINCORE(mmu_obj, pmap, addr, locked_pa));
393}
394
395void
396pmap_activate(struct thread *td)
397{
398
399	CTR2(KTR_PMAP, "%s(%p)", __func__, td);
400	MMU_ACTIVATE(mmu_obj, td);
401}
402
403void
404pmap_deactivate(struct thread *td)
405{
406
407	CTR2(KTR_PMAP, "%s(%p)", __func__, td);
408	MMU_DEACTIVATE(mmu_obj, td);
409}
410
411/*
412 *	Increase the starting virtual address of the given mapping if a
413 *	different alignment might result in more superpage mappings.
414 */
415void
416pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
417    vm_offset_t *addr, vm_size_t size)
418{
419
420	CTR5(KTR_PMAP, "%s(%p, %#x, %p, %#x)", __func__, object, offset, addr,
421	    size);
422	MMU_ALIGN_SUPERPAGE(mmu_obj, object, offset, addr, size);
423}
424
425/*
426 * Routines used in machine-dependent code
427 */
428void
429pmap_bootstrap(vm_offset_t start, vm_offset_t end)
430{
431	mmu_obj = &mmu_kernel_obj;
432
433	/*
434	 * Take care of compiling the selected class, and
435	 * then statically initialise the MMU object
436	 */
437	kobj_class_compile_static(mmu_def_impl, &mmu_kernel_kops);
438	kobj_init_static((kobj_t)mmu_obj, mmu_def_impl);
439
440	MMU_BOOTSTRAP(mmu_obj, start, end);
441}
442
443void
444pmap_cpu_bootstrap(int ap)
445{
446	/*
447	 * No KTR here because our console probably doesn't work yet
448	 */
449
450	return (MMU_CPU_BOOTSTRAP(mmu_obj, ap));
451}
452
453void *
454pmap_mapdev(vm_paddr_t pa, vm_size_t size)
455{
456
457	CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
458	return (MMU_MAPDEV(mmu_obj, pa, size));
459}
460
461void *
462pmap_mapdev_attr(vm_offset_t pa, vm_size_t size, vm_memattr_t attr)
463{
464
465	CTR4(KTR_PMAP, "%s(%#x, %#x, %#x)", __func__, pa, size, attr);
466	return (MMU_MAPDEV_ATTR(mmu_obj, pa, size, attr));
467}
468
469void
470pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
471{
472
473	CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, m, ma);
474	return (MMU_PAGE_SET_MEMATTR(mmu_obj, m, ma));
475}
476
477void
478pmap_unmapdev(vm_offset_t va, vm_size_t size)
479{
480
481	CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, va, size);
482	MMU_UNMAPDEV(mmu_obj, va, size);
483}
484
485vm_paddr_t
486pmap_kextract(vm_offset_t va)
487{
488
489	CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
490	return (MMU_KEXTRACT(mmu_obj, va));
491}
492
493void
494pmap_kenter(vm_offset_t va, vm_paddr_t pa)
495{
496
497	CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, va, pa);
498	MMU_KENTER(mmu_obj, va, pa);
499}
500
501void
502pmap_kenter_attr(vm_offset_t va, vm_offset_t pa, vm_memattr_t ma)
503{
504
505	CTR4(KTR_PMAP, "%s(%#x, %#x, %#x)", __func__, va, pa, ma);
506	MMU_KENTER_ATTR(mmu_obj, va, pa, ma);
507}
508
509boolean_t
510pmap_dev_direct_mapped(vm_paddr_t pa, vm_size_t size)
511{
512
513	CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
514	return (MMU_DEV_DIRECT_MAPPED(mmu_obj, pa, size));
515}
516
517void
518pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
519{
520
521	CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pm, va, sz);
522	return (MMU_SYNC_ICACHE(mmu_obj, pm, va, sz));
523}
524
525vm_offset_t
526pmap_dumpsys_map(struct pmap_md *md, vm_size_t ofs, vm_size_t *sz)
527{
528
529	CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, md, ofs, *sz);
530	return (MMU_DUMPSYS_MAP(mmu_obj, md, ofs, sz));
531}
532
533void
534pmap_dumpsys_unmap(struct pmap_md *md, vm_size_t ofs, vm_offset_t va)
535{
536
537	CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, md, ofs, va);
538	return (MMU_DUMPSYS_UNMAP(mmu_obj, md, ofs, va));
539}
540
541struct pmap_md *
542pmap_scan_md(struct pmap_md *prev)
543{
544
545	CTR2(KTR_PMAP, "%s(%p)", __func__, prev);
546	return (MMU_SCAN_MD(mmu_obj, prev));
547}
548
549/*
550 * MMU install routines. Highest priority wins, equal priority also
551 * overrides allowing last-set to win.
552 */
553SET_DECLARE(mmu_set, mmu_def_t);
554
555boolean_t
556pmap_mmu_install(char *name, int prio)
557{
558	mmu_def_t	**mmupp, *mmup;
559	static int	curr_prio = 0;
560
561	/*
562	 * Try and locate the MMU kobj corresponding to the name
563	 */
564	SET_FOREACH(mmupp, mmu_set) {
565		mmup = *mmupp;
566
567		if (mmup->name &&
568		    !strcmp(mmup->name, name) &&
569		    (prio >= curr_prio || mmu_def_impl == NULL)) {
570			curr_prio = prio;
571			mmu_def_impl = mmup;
572			return (TRUE);
573		}
574	}
575
576	return (FALSE);
577}
578
579int unmapped_buf_allowed;
580