slb.c revision 287945
1/*-
2 * Copyright (c) 2010 Nathan Whitehorn
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $FreeBSD: stable/10/sys/powerpc/aim/slb.c 287945 2015-09-17 23:31:44Z rstone $
27 */
28
29#include <sys/param.h>
30#include <sys/kernel.h>
31#include <sys/lock.h>
32#include <sys/malloc.h>
33#include <sys/mutex.h>
34#include <sys/proc.h>
35#include <sys/systm.h>
36
37#include <vm/vm.h>
38#include <vm/pmap.h>
39#include <vm/uma.h>
40#include <vm/vm.h>
41#include <vm/vm_map.h>
42#include <vm/vm_page.h>
43#include <vm/vm_pageout.h>
44
45#include <machine/md_var.h>
46#include <machine/platform.h>
47#include <machine/pmap.h>
48#include <machine/vmparam.h>
49
50uintptr_t moea64_get_unique_vsid(void);
51void moea64_release_vsid(uint64_t vsid);
52static void slb_zone_init(void *);
53
54static uma_zone_t slbt_zone;
55static uma_zone_t slb_cache_zone;
56int n_slbs = 64;
57
58SYSINIT(slb_zone_init, SI_SUB_KMEM, SI_ORDER_ANY, slb_zone_init, NULL);
59
60struct slbtnode {
61	uint16_t	ua_alloc;
62	uint8_t		ua_level;
63	/* Only 36 bits needed for full 64-bit address space. */
64	uint64_t	ua_base;
65	union {
66		struct slbtnode	*ua_child[16];
67		struct slb	slb_entries[16];
68	} u;
69};
70
71/*
72 * For a full 64-bit address space, there are 36 bits in play in an
73 * esid, so 8 levels, with the leaf being at level 0.
74 *
75 * |3333|3322|2222|2222|1111|1111|11  |    |    |  esid
76 * |5432|1098|7654|3210|9876|5432|1098|7654|3210|  bits
77 * +----+----+----+----+----+----+----+----+----+--------
78 * | 8  | 7  | 6  | 5  | 4  | 3  | 2  | 1  | 0  | level
79 */
80#define UAD_ROOT_LEVEL  8
81#define UAD_LEAF_LEVEL  0
82
83static inline int
84esid2idx(uint64_t esid, int level)
85{
86	int shift;
87
88	shift = level * 4;
89	return ((esid >> shift) & 0xF);
90}
91
92/*
93 * The ua_base field should have 0 bits after the first 4*(level+1)
94 * bits; i.e. only
95 */
96#define uad_baseok(ua)                          \
97	(esid2base(ua->ua_base, ua->ua_level) == ua->ua_base)
98
99
100static inline uint64_t
101esid2base(uint64_t esid, int level)
102{
103	uint64_t mask;
104	int shift;
105
106	shift = (level + 1) * 4;
107	mask = ~((1ULL << shift) - 1);
108	return (esid & mask);
109}
110
111/*
112 * Allocate a new leaf node for the specified esid/vmhandle from the
113 * parent node.
114 */
115static struct slb *
116make_new_leaf(uint64_t esid, uint64_t slbv, struct slbtnode *parent)
117{
118	struct slbtnode *child;
119	struct slb *retval;
120	int idx;
121
122	idx = esid2idx(esid, parent->ua_level);
123	KASSERT(parent->u.ua_child[idx] == NULL, ("Child already exists!"));
124
125	/* unlock and M_WAITOK and loop? */
126	child = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO);
127	KASSERT(child != NULL, ("unhandled NULL case"));
128
129	child->ua_level = UAD_LEAF_LEVEL;
130	child->ua_base = esid2base(esid, child->ua_level);
131	idx = esid2idx(esid, child->ua_level);
132	child->u.slb_entries[idx].slbv = slbv;
133	child->u.slb_entries[idx].slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
134	setbit(&child->ua_alloc, idx);
135
136	retval = &child->u.slb_entries[idx];
137
138	/*
139	 * The above stores must be visible before the next one, so
140	 * that a lockless searcher always sees a valid path through
141	 * the tree.
142	 */
143	mb();
144
145	idx = esid2idx(esid, parent->ua_level);
146	parent->u.ua_child[idx] = child;
147	setbit(&parent->ua_alloc, idx);
148
149	return (retval);
150}
151
152/*
153 * Allocate a new intermediate node to fit between the parent and
154 * esid.
155 */
156static struct slbtnode*
157make_intermediate(uint64_t esid, struct slbtnode *parent)
158{
159	struct slbtnode *child, *inter;
160	int idx, level;
161
162	idx = esid2idx(esid, parent->ua_level);
163	child = parent->u.ua_child[idx];
164	KASSERT(esid2base(esid, child->ua_level) != child->ua_base,
165	    ("No need for an intermediate node?"));
166
167	/*
168	 * Find the level where the existing child and our new esid
169	 * meet.  It must be lower than parent->ua_level or we would
170	 * have chosen a different index in parent.
171	 */
172	level = child->ua_level + 1;
173	while (esid2base(esid, level) !=
174	    esid2base(child->ua_base, level))
175		level++;
176	KASSERT(level < parent->ua_level,
177	    ("Found splitting level %d for %09jx and %09jx, "
178	    "but it's the same as %p's",
179	    level, esid, child->ua_base, parent));
180
181	/* unlock and M_WAITOK and loop? */
182	inter = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO);
183	KASSERT(inter != NULL, ("unhandled NULL case"));
184
185	/* Set up intermediate node to point to child ... */
186	inter->ua_level = level;
187	inter->ua_base = esid2base(esid, inter->ua_level);
188	idx = esid2idx(child->ua_base, inter->ua_level);
189	inter->u.ua_child[idx] = child;
190	setbit(&inter->ua_alloc, idx);
191	mb();
192
193	/* Set up parent to point to intermediate node ... */
194	idx = esid2idx(inter->ua_base, parent->ua_level);
195	parent->u.ua_child[idx] = inter;
196	setbit(&parent->ua_alloc, idx);
197
198	return (inter);
199}
200
201uint64_t
202kernel_va_to_slbv(vm_offset_t va)
203{
204	uint64_t slbv;
205
206	/* Set kernel VSID to deterministic value */
207	slbv = (KERNEL_VSID((uintptr_t)va >> ADDR_SR_SHFT)) << SLBV_VSID_SHIFT;
208
209	/* Figure out if this is a large-page mapping */
210	if (hw_direct_map && va < VM_MIN_KERNEL_ADDRESS) {
211		/*
212		 * XXX: If we have set up a direct map, assumes
213		 * all physical memory is mapped with large pages.
214		 */
215		if (mem_valid(va, 0) == 0)
216			slbv |= SLBV_L;
217	}
218
219	return (slbv);
220}
221
222struct slb *
223user_va_to_slb_entry(pmap_t pm, vm_offset_t va)
224{
225	uint64_t esid = va >> ADDR_SR_SHFT;
226	struct slbtnode *ua;
227	int idx;
228
229	ua = pm->pm_slb_tree_root;
230
231	for (;;) {
232		KASSERT(uad_baseok(ua), ("uad base %016jx level %d bad!",
233		    ua->ua_base, ua->ua_level));
234		idx = esid2idx(esid, ua->ua_level);
235
236		/*
237		 * This code is specific to ppc64 where a load is
238		 * atomic, so no need for atomic_load macro.
239		 */
240		if (ua->ua_level == UAD_LEAF_LEVEL)
241			return ((ua->u.slb_entries[idx].slbe & SLBE_VALID) ?
242			    &ua->u.slb_entries[idx] : NULL);
243
244		ua = ua->u.ua_child[idx];
245		if (ua == NULL ||
246		    esid2base(esid, ua->ua_level) != ua->ua_base)
247			return (NULL);
248	}
249
250	return (NULL);
251}
252
253uint64_t
254va_to_vsid(pmap_t pm, vm_offset_t va)
255{
256	struct slb *entry;
257
258	/* Shortcut kernel case */
259	if (pm == kernel_pmap)
260		return (KERNEL_VSID((uintptr_t)va >> ADDR_SR_SHFT));
261
262	/*
263	 * If there is no vsid for this VA, we need to add a new entry
264	 * to the PMAP's segment table.
265	 */
266
267	entry = user_va_to_slb_entry(pm, va);
268
269	if (entry == NULL)
270		return (allocate_user_vsid(pm,
271		    (uintptr_t)va >> ADDR_SR_SHFT, 0));
272
273	return ((entry->slbv & SLBV_VSID_MASK) >> SLBV_VSID_SHIFT);
274}
275
276uint64_t
277allocate_user_vsid(pmap_t pm, uint64_t esid, int large)
278{
279	uint64_t vsid, slbv;
280	struct slbtnode *ua, *next, *inter;
281	struct slb *slb;
282	int idx;
283
284	KASSERT(pm != kernel_pmap, ("Attempting to allocate a kernel VSID"));
285
286	PMAP_LOCK_ASSERT(pm, MA_OWNED);
287	vsid = moea64_get_unique_vsid();
288
289	slbv = vsid << SLBV_VSID_SHIFT;
290	if (large)
291		slbv |= SLBV_L;
292
293	ua = pm->pm_slb_tree_root;
294
295	/* Descend to the correct leaf or NULL pointer. */
296	for (;;) {
297		KASSERT(uad_baseok(ua),
298		   ("uad base %09jx level %d bad!", ua->ua_base, ua->ua_level));
299		idx = esid2idx(esid, ua->ua_level);
300
301		if (ua->ua_level == UAD_LEAF_LEVEL) {
302			ua->u.slb_entries[idx].slbv = slbv;
303			eieio();
304			ua->u.slb_entries[idx].slbe = (esid << SLBE_ESID_SHIFT)
305			    | SLBE_VALID;
306			setbit(&ua->ua_alloc, idx);
307			slb = &ua->u.slb_entries[idx];
308			break;
309		}
310
311		next = ua->u.ua_child[idx];
312		if (next == NULL) {
313			slb = make_new_leaf(esid, slbv, ua);
314			break;
315                }
316
317		/*
318		 * Check if the next item down has an okay ua_base.
319		 * If not, we need to allocate an intermediate node.
320		 */
321		if (esid2base(esid, next->ua_level) != next->ua_base) {
322			inter = make_intermediate(esid, ua);
323			slb = make_new_leaf(esid, slbv, inter);
324			break;
325		}
326
327		ua = next;
328	}
329
330	/*
331	 * Someone probably wants this soon, and it may be a wired
332	 * SLB mapping, so pre-spill this entry.
333	 */
334	eieio();
335	slb_insert_user(pm, slb);
336
337	return (vsid);
338}
339
340void
341free_vsid(pmap_t pm, uint64_t esid, int large)
342{
343	struct slbtnode *ua;
344	int idx;
345
346	PMAP_LOCK_ASSERT(pm, MA_OWNED);
347
348	ua = pm->pm_slb_tree_root;
349	/* Descend to the correct leaf. */
350	for (;;) {
351		KASSERT(uad_baseok(ua),
352		   ("uad base %09jx level %d bad!", ua->ua_base, ua->ua_level));
353
354		idx = esid2idx(esid, ua->ua_level);
355		if (ua->ua_level == UAD_LEAF_LEVEL) {
356			ua->u.slb_entries[idx].slbv = 0;
357			eieio();
358			ua->u.slb_entries[idx].slbe = 0;
359			clrbit(&ua->ua_alloc, idx);
360			return;
361		}
362
363		ua = ua->u.ua_child[idx];
364		if (ua == NULL ||
365		    esid2base(esid, ua->ua_level) != ua->ua_base) {
366			/* Perhaps just return instead of assert? */
367			KASSERT(0,
368			    ("Asked to remove an entry that was never inserted!"));
369			return;
370		}
371	}
372}
373
374static void
375free_slb_tree_node(struct slbtnode *ua)
376{
377	int idx;
378
379	for (idx = 0; idx < 16; idx++) {
380		if (ua->ua_level != UAD_LEAF_LEVEL) {
381			if (ua->u.ua_child[idx] != NULL)
382				free_slb_tree_node(ua->u.ua_child[idx]);
383		} else {
384			if (ua->u.slb_entries[idx].slbv != 0)
385				moea64_release_vsid(ua->u.slb_entries[idx].slbv
386				    >> SLBV_VSID_SHIFT);
387		}
388	}
389
390	uma_zfree(slbt_zone, ua);
391}
392
393void
394slb_free_tree(pmap_t pm)
395{
396
397	free_slb_tree_node(pm->pm_slb_tree_root);
398}
399
400struct slbtnode *
401slb_alloc_tree(void)
402{
403	struct slbtnode *root;
404
405	root = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO);
406	root->ua_level = UAD_ROOT_LEVEL;
407
408	return (root);
409}
410
411/* Lock entries mapping kernel text and stacks */
412
413void
414slb_insert_kernel(uint64_t slbe, uint64_t slbv)
415{
416	struct slb *slbcache;
417	int i;
418
419	/* We don't want to be preempted while modifying the kernel map */
420	critical_enter();
421
422	slbcache = PCPU_GET(slb);
423
424	/* Check for an unused slot, abusing the user slot as a full flag */
425	if (slbcache[USER_SLB_SLOT].slbe == 0) {
426		for (i = 0; i < n_slbs; i++) {
427			if (i == USER_SLB_SLOT)
428				continue;
429			if (!(slbcache[i].slbe & SLBE_VALID))
430				goto fillkernslb;
431		}
432
433		if (i == n_slbs)
434			slbcache[USER_SLB_SLOT].slbe = 1;
435	}
436
437	i = mftb() % n_slbs;
438	if (i == USER_SLB_SLOT)
439			i = (i+1) % n_slbs;
440
441fillkernslb:
442	KASSERT(i != USER_SLB_SLOT,
443	    ("Filling user SLB slot with a kernel mapping"));
444	slbcache[i].slbv = slbv;
445	slbcache[i].slbe = slbe | (uint64_t)i;
446
447	/* If it is for this CPU, put it in the SLB right away */
448	if (pmap_bootstrapped) {
449		/* slbie not required */
450		__asm __volatile ("slbmte %0, %1" ::
451		    "r"(slbcache[i].slbv), "r"(slbcache[i].slbe));
452	}
453
454	critical_exit();
455}
456
457void
458slb_insert_user(pmap_t pm, struct slb *slb)
459{
460	int i;
461
462	PMAP_LOCK_ASSERT(pm, MA_OWNED);
463
464	if (pm->pm_slb_len < n_slbs) {
465		i = pm->pm_slb_len;
466		pm->pm_slb_len++;
467	} else {
468		i = mftb() % n_slbs;
469	}
470
471	/* Note that this replacement is atomic with respect to trap_subr */
472	pm->pm_slb[i] = slb;
473}
474
475static void *
476slb_uma_real_alloc(uma_zone_t zone, vm_size_t bytes, u_int8_t *flags, int wait)
477{
478	static vm_offset_t realmax = 0;
479	void *va;
480	vm_page_t m;
481	int pflags;
482
483	if (realmax == 0)
484		realmax = platform_real_maxaddr();
485
486	*flags = UMA_SLAB_PRIV;
487	pflags = malloc2vm_flags(wait) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED;
488
489	for (;;) {
490		m = vm_page_alloc_contig(NULL, 0, pflags, 1, 0, realmax,
491		    PAGE_SIZE, PAGE_SIZE, VM_MEMATTR_DEFAULT);
492		if (m == NULL) {
493			if (wait & M_NOWAIT)
494				return (NULL);
495			VM_WAIT;
496		} else
497                        break;
498        }
499
500	va = (void *) VM_PAGE_TO_PHYS(m);
501
502	if (!hw_direct_map)
503		pmap_kenter((vm_offset_t)va, VM_PAGE_TO_PHYS(m));
504
505	if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
506		bzero(va, PAGE_SIZE);
507
508	return (va);
509}
510
511static void
512slb_zone_init(void *dummy)
513{
514
515	slbt_zone = uma_zcreate("SLB tree node", sizeof(struct slbtnode),
516	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM);
517	slb_cache_zone = uma_zcreate("SLB cache",
518	    (n_slbs + 1)*sizeof(struct slb *), NULL, NULL, NULL, NULL,
519	    UMA_ALIGN_PTR, UMA_ZONE_VM);
520
521	if (platform_real_maxaddr() != VM_MAX_ADDRESS) {
522		uma_zone_set_allocf(slb_cache_zone, slb_uma_real_alloc);
523		uma_zone_set_allocf(slbt_zone, slb_uma_real_alloc);
524	}
525}
526
527struct slb **
528slb_alloc_user_cache(void)
529{
530	return (uma_zalloc(slb_cache_zone, M_ZERO));
531}
532
533void
534slb_free_user_cache(struct slb **slb)
535{
536	uma_zfree(slb_cache_zone, slb);
537}
538