1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2010, 2012 Konstantin Belousov <kib@FreeBSD.org>
5 * Copyright (c) 2015 The FreeBSD Foundation
6 * All rights reserved.
7 *
8 * Portions of this software were developed by Konstantin Belousov
9 * under sponsorship from the FreeBSD Foundation.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34#include "opt_vm.h"
35
36#include <sys/param.h>
37#include <sys/systm.h>
38#include <sys/kernel.h>
39#include <sys/lock.h>
40#include <sys/malloc.h>
41#include <sys/rwlock.h>
42#include <sys/stddef.h>
43#include <sys/sysent.h>
44#include <sys/sysctl.h>
45#include <sys/vdso.h>
46
47#include <vm/vm.h>
48#include <vm/vm_param.h>
49#include <vm/pmap.h>
50#include <vm/vm_extern.h>
51#include <vm/vm_kern.h>
52#include <vm/vm_map.h>
53#include <vm/vm_object.h>
54#include <vm/vm_page.h>
55#include <vm/vm_pager.h>
56
57static struct sx shared_page_alloc_sx;
58static vm_object_t shared_page_obj;
59static int shared_page_free;
60char *shared_page_mapping;
61
62#ifdef RANDOM_FENESTRASX
63static struct vdso_fxrng_generation *fxrng_shpage_mapping;
64
65static bool fxrng_enabled = true;
66SYSCTL_BOOL(_debug, OID_AUTO, fxrng_vdso_enable, CTLFLAG_RWTUN, &fxrng_enabled,
67    0, "Enable FXRNG VDSO");
68#endif
69
70void
71shared_page_write(int base, int size, const void *data)
72{
73
74	bcopy(data, shared_page_mapping + base, size);
75}
76
77static int
78shared_page_alloc_locked(int size, int align)
79{
80	int res;
81
82	res = roundup(shared_page_free, align);
83	if (res + size >= IDX_TO_OFF(shared_page_obj->size))
84		res = -1;
85	else
86		shared_page_free = res + size;
87	return (res);
88}
89
90int
91shared_page_alloc(int size, int align)
92{
93	int res;
94
95	sx_xlock(&shared_page_alloc_sx);
96	res = shared_page_alloc_locked(size, align);
97	sx_xunlock(&shared_page_alloc_sx);
98	return (res);
99}
100
101int
102shared_page_fill(int size, int align, const void *data)
103{
104	int res;
105
106	sx_xlock(&shared_page_alloc_sx);
107	res = shared_page_alloc_locked(size, align);
108	if (res != -1)
109		shared_page_write(res, size, data);
110	sx_xunlock(&shared_page_alloc_sx);
111	return (res);
112}
113
114static void
115shared_page_init(void *dummy __unused)
116{
117	vm_page_t m;
118	vm_offset_t addr;
119
120	sx_init(&shared_page_alloc_sx, "shpsx");
121	shared_page_obj = vm_pager_allocate(OBJT_PHYS, 0, PAGE_SIZE,
122	    VM_PROT_DEFAULT, 0, NULL);
123	VM_OBJECT_WLOCK(shared_page_obj);
124	m = vm_page_grab(shared_page_obj, 0, VM_ALLOC_ZERO);
125	VM_OBJECT_WUNLOCK(shared_page_obj);
126	vm_page_valid(m);
127	vm_page_xunbusy(m);
128	addr = kva_alloc(PAGE_SIZE);
129	pmap_qenter(addr, &m, 1);
130	shared_page_mapping = (char *)addr;
131}
132
133SYSINIT(shp, SI_SUB_EXEC, SI_ORDER_FIRST, (sysinit_cfunc_t)shared_page_init,
134    NULL);
135
136/*
137 * Push the timehands update to the shared page.
138 *
139 * The lockless update scheme is similar to the one used to update the
140 * in-kernel timehands, see sys/kern/kern_tc.c:tc_windup() (which
141 * calls us after the timehands are updated).
142 */
143static void
144timehands_update(struct vdso_sv_tk *svtk)
145{
146	struct vdso_timehands th;
147	struct vdso_timekeep *tk;
148	uint32_t enabled, idx;
149
150	enabled = tc_fill_vdso_timehands(&th);
151	th.th_gen = 0;
152	idx = svtk->sv_timekeep_curr;
153	if (++idx >= VDSO_TH_NUM)
154		idx = 0;
155	svtk->sv_timekeep_curr = idx;
156	if (++svtk->sv_timekeep_gen == 0)
157		svtk->sv_timekeep_gen = 1;
158
159	tk = (struct vdso_timekeep *)(shared_page_mapping +
160	    svtk->sv_timekeep_off);
161	tk->tk_th[idx].th_gen = 0;
162	atomic_thread_fence_rel();
163	if (enabled)
164		tk->tk_th[idx] = th;
165	atomic_store_rel_32(&tk->tk_th[idx].th_gen, svtk->sv_timekeep_gen);
166	atomic_store_rel_32(&tk->tk_current, idx);
167
168	/*
169	 * The ordering of the assignment to tk_enabled relative to
170	 * the update of the vdso_timehands is not important.
171	 */
172	tk->tk_enabled = enabled;
173}
174
175#ifdef COMPAT_FREEBSD32
176static void
177timehands_update32(struct vdso_sv_tk *svtk)
178{
179	struct vdso_timehands32 th;
180	struct vdso_timekeep32 *tk;
181	uint32_t enabled, idx;
182
183	enabled = tc_fill_vdso_timehands32(&th);
184	th.th_gen = 0;
185	idx = svtk->sv_timekeep_curr;
186	if (++idx >= VDSO_TH_NUM)
187		idx = 0;
188	svtk->sv_timekeep_curr = idx;
189	if (++svtk->sv_timekeep_gen == 0)
190		svtk->sv_timekeep_gen = 1;
191
192	tk = (struct vdso_timekeep32 *)(shared_page_mapping +
193	    svtk->sv_timekeep_off);
194	tk->tk_th[idx].th_gen = 0;
195	atomic_thread_fence_rel();
196	if (enabled)
197		tk->tk_th[idx] = th;
198	atomic_store_rel_32(&tk->tk_th[idx].th_gen, svtk->sv_timekeep_gen);
199	atomic_store_rel_32(&tk->tk_current, idx);
200	tk->tk_enabled = enabled;
201}
202#endif
203
204/*
205 * This is hackish, but easiest way to avoid creating list structures
206 * that needs to be iterated over from the hardclock interrupt
207 * context.
208 */
209static struct vdso_sv_tk *host_svtk;
210#ifdef COMPAT_FREEBSD32
211static struct vdso_sv_tk *compat32_svtk;
212#endif
213
214void
215timekeep_push_vdso(void)
216{
217
218	if (host_svtk != NULL)
219		timehands_update(host_svtk);
220#ifdef COMPAT_FREEBSD32
221	if (compat32_svtk != NULL)
222		timehands_update32(compat32_svtk);
223#endif
224}
225
226struct vdso_sv_tk *
227alloc_sv_tk(void)
228{
229	struct vdso_sv_tk *svtk;
230	int tk_base;
231	uint32_t tk_ver;
232
233	tk_ver = VDSO_TK_VER_CURR;
234	svtk = malloc(sizeof(struct vdso_sv_tk), M_TEMP, M_WAITOK | M_ZERO);
235	tk_base = shared_page_alloc(sizeof(struct vdso_timekeep) +
236	    sizeof(struct vdso_timehands) * VDSO_TH_NUM, 16);
237	KASSERT(tk_base != -1, ("tk_base -1 for native"));
238	shared_page_write(tk_base + offsetof(struct vdso_timekeep, tk_ver),
239	    sizeof(uint32_t), &tk_ver);
240	svtk->sv_timekeep_off = tk_base;
241	timekeep_push_vdso();
242	return (svtk);
243}
244
245#ifdef COMPAT_FREEBSD32
246struct vdso_sv_tk *
247alloc_sv_tk_compat32(void)
248{
249	struct vdso_sv_tk *svtk;
250	int tk_base;
251	uint32_t tk_ver;
252
253	svtk = malloc(sizeof(struct vdso_sv_tk), M_TEMP, M_WAITOK | M_ZERO);
254	tk_ver = VDSO_TK_VER_CURR;
255	tk_base = shared_page_alloc(sizeof(struct vdso_timekeep32) +
256	    sizeof(struct vdso_timehands32) * VDSO_TH_NUM, 16);
257	KASSERT(tk_base != -1, ("tk_base -1 for 32bit"));
258	shared_page_write(tk_base + offsetof(struct vdso_timekeep32,
259	    tk_ver), sizeof(uint32_t), &tk_ver);
260	svtk->sv_timekeep_off = tk_base;
261	timekeep_push_vdso();
262	return (svtk);
263}
264#endif
265
266#ifdef RANDOM_FENESTRASX
267void
268fxrng_push_seed_generation(uint64_t gen)
269{
270	if (fxrng_shpage_mapping == NULL || !fxrng_enabled)
271		return;
272	KASSERT(gen < INT32_MAX,
273	    ("fxrng seed version shouldn't roll over a 32-bit counter "
274	     "for approximately 456,000 years"));
275	atomic_store_rel_32(&fxrng_shpage_mapping->fx_generation32,
276	    (uint32_t)gen);
277}
278
279static void
280alloc_sv_fxrng_generation(void)
281{
282	int base;
283
284	/*
285	 * Allocate a full cache line for the fxrng root generation (64-bit
286	 * counter, or truncated 32-bit counter on ILP32 userspace).  It is
287	 * important that the line is not shared with frequently dirtied data,
288	 * and the shared page allocator lacks a __read_mostly mechanism.
289	 * However, PAGE_SIZE is typically large relative to the amount of
290	 * stuff we've got in it so far, so maybe the possible waste isn't an
291	 * issue.
292	 */
293	base = shared_page_alloc(CACHE_LINE_SIZE, CACHE_LINE_SIZE);
294	KASSERT(base != -1, ("%s: base allocation failed", __func__));
295	fxrng_shpage_mapping = (void *)(shared_page_mapping + base);
296	*fxrng_shpage_mapping = (struct vdso_fxrng_generation) {
297		.fx_vdso_version = VDSO_FXRNG_VER_CURR,
298	};
299}
300#endif /* RANDOM_FENESTRASX */
301
302void
303exec_sysvec_init(void *param)
304{
305	struct sysentvec *sv;
306	u_int flags;
307	int res;
308
309	sv = param;
310	flags = sv->sv_flags;
311	if ((flags & SV_SHP) == 0)
312		return;
313	MPASS(sv->sv_shared_page_obj == NULL);
314	MPASS(sv->sv_shared_page_base != 0);
315
316	sv->sv_shared_page_obj = shared_page_obj;
317	if ((flags & SV_ABI_MASK) == SV_ABI_FREEBSD) {
318		if ((flags & SV_DSO_SIG) != 0) {
319			res = shared_page_fill((uintptr_t)sv->sv_szsigcode,
320			    16, sv->sv_sigcode);
321			if (res == -1)
322				panic("copying vdso to shared page");
323			sv->sv_vdso_offset = res;
324			sv->sv_sigcode_offset = res + sv->sv_sigcodeoff;
325		} else {
326			res = shared_page_fill(*(sv->sv_szsigcode),
327			    16, sv->sv_sigcode);
328			if (res == -1)
329				panic("copying sigtramp to shared page");
330			sv->sv_sigcode_offset = res;
331		}
332	}
333	if ((flags & SV_TIMEKEEP) != 0) {
334#ifdef COMPAT_FREEBSD32
335		if ((flags & SV_ILP32) != 0) {
336			if ((flags & SV_ABI_MASK) == SV_ABI_FREEBSD) {
337				KASSERT(compat32_svtk == NULL,
338				    ("Compat32 already registered"));
339				compat32_svtk = alloc_sv_tk_compat32();
340			} else {
341				KASSERT(compat32_svtk != NULL,
342				    ("Compat32 not registered"));
343			}
344			sv->sv_timekeep_offset = compat32_svtk->sv_timekeep_off;
345		} else {
346#endif
347			if ((flags & SV_ABI_MASK) == SV_ABI_FREEBSD) {
348				KASSERT(host_svtk == NULL,
349				    ("Host already registered"));
350				host_svtk = alloc_sv_tk();
351			} else {
352				KASSERT(host_svtk != NULL,
353				    ("Host not registered"));
354			}
355			sv->sv_timekeep_offset = host_svtk->sv_timekeep_off;
356#ifdef COMPAT_FREEBSD32
357		}
358#endif
359	}
360#ifdef RANDOM_FENESTRASX
361	if ((flags & (SV_ABI_MASK | SV_RNG_SEED_VER)) ==
362	    (SV_ABI_FREEBSD | SV_RNG_SEED_VER)) {
363		/*
364		 * Only allocate a single VDSO entry for multiple sysentvecs,
365		 * i.e., native and COMPAT32.
366		 */
367		if (fxrng_shpage_mapping == NULL)
368			alloc_sv_fxrng_generation();
369		sv->sv_fxrng_gen_offset =
370		    (char *)fxrng_shpage_mapping - shared_page_mapping;
371	}
372#endif
373}
374
375void
376exec_sysvec_init_secondary(struct sysentvec *sv, struct sysentvec *sv2)
377{
378	MPASS((sv2->sv_flags & SV_ABI_MASK) == (sv->sv_flags & SV_ABI_MASK));
379	MPASS((sv2->sv_flags & SV_TIMEKEEP) == (sv->sv_flags & SV_TIMEKEEP));
380	MPASS((sv2->sv_flags & SV_SHP) != 0 && (sv->sv_flags & SV_SHP) != 0);
381	MPASS((sv2->sv_flags & SV_DSO_SIG) == (sv->sv_flags & SV_DSO_SIG));
382	MPASS((sv2->sv_flags & SV_RNG_SEED_VER) ==
383	    (sv->sv_flags & SV_RNG_SEED_VER));
384
385	sv2->sv_shared_page_obj = sv->sv_shared_page_obj;
386	sv2->sv_sigcode_offset = sv->sv_sigcode_offset;
387	sv2->sv_vdso_offset = sv->sv_vdso_offset;
388	if ((sv2->sv_flags & SV_ABI_MASK) != SV_ABI_FREEBSD)
389		return;
390	sv2->sv_timekeep_offset = sv->sv_timekeep_offset;
391	sv2->sv_fxrng_gen_offset = sv->sv_fxrng_gen_offset;
392}
393