1175164Sjhb/*-
2225344Srwatson * Copyright (c) 2006, 2011 Robert N. M. Watson
3175164Sjhb * All rights reserved.
4175164Sjhb *
5175164Sjhb * Redistribution and use in source and binary forms, with or without
6175164Sjhb * modification, are permitted provided that the following conditions
7175164Sjhb * are met:
8175164Sjhb * 1. Redistributions of source code must retain the above copyright
9175164Sjhb *    notice, this list of conditions and the following disclaimer.
10175164Sjhb * 2. Redistributions in binary form must reproduce the above copyright
11175164Sjhb *    notice, this list of conditions and the following disclaimer in the
12175164Sjhb *    documentation and/or other materials provided with the distribution.
13175164Sjhb *
14175164Sjhb * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15175164Sjhb * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16175164Sjhb * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17175164Sjhb * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18175164Sjhb * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19175164Sjhb * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20175164Sjhb * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21175164Sjhb * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22175164Sjhb * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23175164Sjhb * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24175164Sjhb * SUCH DAMAGE.
25175164Sjhb */
26175164Sjhb
27175164Sjhb/*
28175164Sjhb * Support for shared swap-backed anonymous memory objects via
29175164Sjhb * shm_open(2) and shm_unlink(2).  While most of the implementation is
30175164Sjhb * here, vm_mmap.c contains mapping logic changes.
31175164Sjhb *
32175164Sjhb * TODO:
33175164Sjhb *
34225344Srwatson * (1) Need to export data to a userland tool via a sysctl.  Should ipcs(1)
35175164Sjhb *     and ipcrm(1) be expanded or should new tools to manage both POSIX
36175164Sjhb *     kernel semaphores and POSIX shared memory be written?
37175164Sjhb *
38225344Srwatson * (2) Add support for this file type to fstat(1).
39175164Sjhb *
40225344Srwatson * (3) Resource limits?  Does this need its own resource limits or are the
41175164Sjhb *     existing limits in mmap(2) sufficient?
42175164Sjhb */
43175164Sjhb
44175164Sjhb#include <sys/cdefs.h>
45175164Sjhb__FBSDID("$FreeBSD: stable/10/sys/kern/uipc_shm.c 325783 2017-11-13 23:21:17Z jamie $");
46175164Sjhb
47223692Sjonathan#include "opt_capsicum.h"
48269742Srpaulo#include "opt_ktrace.h"
49223692Sjonathan
50175164Sjhb#include <sys/param.h>
51280258Srwatson#include <sys/capsicum.h>
52271399Sjhb#include <sys/conf.h>
53175164Sjhb#include <sys/fcntl.h>
54175164Sjhb#include <sys/file.h>
55175164Sjhb#include <sys/filedesc.h>
56175164Sjhb#include <sys/fnv_hash.h>
57175164Sjhb#include <sys/kernel.h>
58269742Srpaulo#include <sys/uio.h>
59269742Srpaulo#include <sys/signal.h>
60325783Sjamie#include <sys/jail.h>
61269742Srpaulo#include <sys/ktrace.h>
62175164Sjhb#include <sys/lock.h>
63175164Sjhb#include <sys/malloc.h>
64175164Sjhb#include <sys/mman.h>
65175164Sjhb#include <sys/mutex.h>
66224914Skib#include <sys/priv.h>
67175164Sjhb#include <sys/proc.h>
68175164Sjhb#include <sys/refcount.h>
69175164Sjhb#include <sys/resourcevar.h>
70248084Sattilio#include <sys/rwlock.h>
71175164Sjhb#include <sys/stat.h>
72175164Sjhb#include <sys/sysctl.h>
73175164Sjhb#include <sys/sysproto.h>
74175164Sjhb#include <sys/systm.h>
75175164Sjhb#include <sys/sx.h>
76175164Sjhb#include <sys/time.h>
77175164Sjhb#include <sys/vnode.h>
78254603Skib#include <sys/unistd.h>
79175164Sjhb
80175164Sjhb#include <security/mac/mac_framework.h>
81175164Sjhb
82175164Sjhb#include <vm/vm.h>
83175164Sjhb#include <vm/vm_param.h>
84175164Sjhb#include <vm/pmap.h>
85228533Sjhb#include <vm/vm_extern.h>
86175164Sjhb#include <vm/vm_map.h>
87228509Sjhb#include <vm/vm_kern.h>
88175164Sjhb#include <vm/vm_object.h>
89175164Sjhb#include <vm/vm_page.h>
90229821Salc#include <vm/vm_pageout.h>
91175164Sjhb#include <vm/vm_pager.h>
92175164Sjhb#include <vm/swap_pager.h>
93175164Sjhb
94175164Sjhbstruct shm_mapping {
95175164Sjhb	char		*sm_path;
96175164Sjhb	Fnv32_t		sm_fnv;
97175164Sjhb	struct shmfd	*sm_shmfd;
98175164Sjhb	LIST_ENTRY(shm_mapping) sm_link;
99175164Sjhb};
100175164Sjhb
101175164Sjhbstatic MALLOC_DEFINE(M_SHMFD, "shmfd", "shared memory file descriptor");
102175164Sjhbstatic LIST_HEAD(, shm_mapping) *shm_dictionary;
103175164Sjhbstatic struct sx shm_dict_lock;
104175164Sjhbstatic struct mtx shm_timestamp_lock;
105175164Sjhbstatic u_long shm_hash;
106271399Sjhbstatic struct unrhdr *shm_ino_unr;
107271399Sjhbstatic dev_t shm_dev_ino;
108175164Sjhb
109175164Sjhb#define	SHM_HASH(fnv)	(&shm_dictionary[(fnv) & shm_hash])
110175164Sjhb
111175164Sjhbstatic int	shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags);
112175164Sjhbstatic struct shmfd *shm_alloc(struct ucred *ucred, mode_t mode);
113271399Sjhbstatic void	shm_init(void *arg);
114175164Sjhbstatic void	shm_drop(struct shmfd *shmfd);
115175164Sjhbstatic struct shmfd *shm_hold(struct shmfd *shmfd);
116175164Sjhbstatic void	shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd);
117175164Sjhbstatic struct shmfd *shm_lookup(char *path, Fnv32_t fnv);
118175164Sjhbstatic int	shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred);
119194766Skibstatic int	shm_dotruncate(struct shmfd *shmfd, off_t length);
120175164Sjhb
121175164Sjhbstatic fo_rdwr_t	shm_read;
122175164Sjhbstatic fo_rdwr_t	shm_write;
123175164Sjhbstatic fo_truncate_t	shm_truncate;
124175164Sjhbstatic fo_ioctl_t	shm_ioctl;
125175164Sjhbstatic fo_poll_t	shm_poll;
126175164Sjhbstatic fo_kqfilter_t	shm_kqfilter;
127175164Sjhbstatic fo_stat_t	shm_stat;
128175164Sjhbstatic fo_close_t	shm_close;
129224914Skibstatic fo_chmod_t	shm_chmod;
130224914Skibstatic fo_chown_t	shm_chown;
131254603Skibstatic fo_seek_t	shm_seek;
132175164Sjhb
133175164Sjhb/* File descriptor operations. */
134175164Sjhbstatic struct fileops shm_ops = {
135175164Sjhb	.fo_read = shm_read,
136175164Sjhb	.fo_write = shm_write,
137175164Sjhb	.fo_truncate = shm_truncate,
138175164Sjhb	.fo_ioctl = shm_ioctl,
139175164Sjhb	.fo_poll = shm_poll,
140175164Sjhb	.fo_kqfilter = shm_kqfilter,
141175164Sjhb	.fo_stat = shm_stat,
142175164Sjhb	.fo_close = shm_close,
143224914Skib	.fo_chmod = shm_chmod,
144224914Skib	.fo_chown = shm_chown,
145255467Skib	.fo_sendfile = vn_sendfile,
146254603Skib	.fo_seek = shm_seek,
147254603Skib	.fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE
148175164Sjhb};
149175164Sjhb
150175164SjhbFEATURE(posix_shm, "POSIX shared memory");
151175164Sjhb
152175164Sjhbstatic int
153254601Skibuiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
154254601Skib{
155254601Skib	vm_page_t m;
156254601Skib	vm_pindex_t idx;
157254601Skib	size_t tlen;
158254601Skib	int error, offset, rv;
159254601Skib
160254601Skib	idx = OFF_TO_IDX(uio->uio_offset);
161254601Skib	offset = uio->uio_offset & PAGE_MASK;
162254601Skib	tlen = MIN(PAGE_SIZE - offset, len);
163254601Skib
164254601Skib	VM_OBJECT_WLOCK(obj);
165254601Skib
166254601Skib	/*
167254601Skib	 * Parallel reads of the page content from disk are prevented
168254601Skib	 * by exclusive busy.
169254601Skib	 *
170254601Skib	 * Although the tmpfs vnode lock is held here, it is
171254601Skib	 * nonetheless safe to sleep waiting for a free page.  The
172254601Skib	 * pageout daemon does not need to acquire the tmpfs vnode
173254601Skib	 * lock to page out tobj's pages because tobj is a OBJT_SWAP
174254601Skib	 * type object.
175254601Skib	 */
176254649Skib	m = vm_page_grab(obj, idx, VM_ALLOC_NORMAL);
177254601Skib	if (m->valid != VM_PAGE_BITS_ALL) {
178254601Skib		if (vm_pager_has_page(obj, idx, NULL, NULL)) {
179254601Skib			rv = vm_pager_get_pages(obj, &m, 1, 0);
180254601Skib			m = vm_page_lookup(obj, idx);
181254601Skib			if (m == NULL) {
182254601Skib				printf(
183254601Skib		    "uiomove_object: vm_obj %p idx %jd null lookup rv %d\n",
184254601Skib				    obj, idx, rv);
185254601Skib				VM_OBJECT_WUNLOCK(obj);
186254601Skib				return (EIO);
187254601Skib			}
188254601Skib			if (rv != VM_PAGER_OK) {
189254601Skib				printf(
190254601Skib	    "uiomove_object: vm_obj %p idx %jd valid %x pager error %d\n",
191254601Skib				    obj, idx, m->valid, rv);
192254601Skib				vm_page_lock(m);
193254601Skib				vm_page_free(m);
194254601Skib				vm_page_unlock(m);
195254601Skib				VM_OBJECT_WUNLOCK(obj);
196254601Skib				return (EIO);
197254601Skib			}
198254601Skib		} else
199254601Skib			vm_page_zero_invalid(m, TRUE);
200254601Skib	}
201254601Skib	vm_page_xunbusy(m);
202254601Skib	vm_page_lock(m);
203254601Skib	vm_page_hold(m);
204270205Skib	if (m->queue == PQ_NONE) {
205270205Skib		vm_page_deactivate(m);
206270205Skib	} else {
207270205Skib		/* Requeue to maintain LRU ordering. */
208270205Skib		vm_page_requeue(m);
209270205Skib	}
210254601Skib	vm_page_unlock(m);
211254601Skib	VM_OBJECT_WUNLOCK(obj);
212254601Skib	error = uiomove_fromphys(&m, offset, tlen, uio);
213254601Skib	if (uio->uio_rw == UIO_WRITE && error == 0) {
214254601Skib		VM_OBJECT_WLOCK(obj);
215254601Skib		vm_page_dirty(m);
216269495Skib		vm_pager_page_unswapped(m);
217254601Skib		VM_OBJECT_WUNLOCK(obj);
218254601Skib	}
219254601Skib	vm_page_lock(m);
220254601Skib	vm_page_unhold(m);
221254601Skib	vm_page_unlock(m);
222254601Skib
223254601Skib	return (error);
224254601Skib}
225254601Skib
226254601Skibint
227254601Skibuiomove_object(vm_object_t obj, off_t obj_size, struct uio *uio)
228254601Skib{
229254601Skib	ssize_t resid;
230254601Skib	size_t len;
231254601Skib	int error;
232254601Skib
233254601Skib	error = 0;
234254601Skib	while ((resid = uio->uio_resid) > 0) {
235254601Skib		if (obj_size <= uio->uio_offset)
236254601Skib			break;
237254601Skib		len = MIN(obj_size - uio->uio_offset, resid);
238254601Skib		if (len == 0)
239254601Skib			break;
240254601Skib		error = uiomove_object_page(obj, len, uio);
241254601Skib		if (error != 0 || resid == uio->uio_resid)
242254601Skib			break;
243254601Skib	}
244254601Skib	return (error);
245254601Skib}
246254601Skib
247254601Skibstatic int
248254603Skibshm_seek(struct file *fp, off_t offset, int whence, struct thread *td)
249254603Skib{
250254603Skib	struct shmfd *shmfd;
251254603Skib	off_t foffset;
252254603Skib	int error;
253254603Skib
254254603Skib	shmfd = fp->f_data;
255254603Skib	foffset = foffset_lock(fp, 0);
256254603Skib	error = 0;
257254603Skib	switch (whence) {
258254603Skib	case L_INCR:
259254603Skib		if (foffset < 0 ||
260254603Skib		    (offset > 0 && foffset > OFF_MAX - offset)) {
261254603Skib			error = EOVERFLOW;
262254603Skib			break;
263254603Skib		}
264254603Skib		offset += foffset;
265254603Skib		break;
266254603Skib	case L_XTND:
267254603Skib		if (offset > 0 && shmfd->shm_size > OFF_MAX - offset) {
268254603Skib			error = EOVERFLOW;
269254603Skib			break;
270254603Skib		}
271254603Skib		offset += shmfd->shm_size;
272254603Skib		break;
273254603Skib	case L_SET:
274254603Skib		break;
275254603Skib	default:
276254603Skib		error = EINVAL;
277254603Skib	}
278254603Skib	if (error == 0) {
279254603Skib		if (offset < 0 || offset > shmfd->shm_size)
280254603Skib			error = EINVAL;
281254603Skib		else
282254603Skib			*(off_t *)(td->td_retval) = offset;
283254603Skib	}
284254603Skib	foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0);
285254603Skib	return (error);
286254603Skib}
287254603Skib
288254603Skibstatic int
289175164Sjhbshm_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
290175164Sjhb    int flags, struct thread *td)
291175164Sjhb{
292254603Skib	struct shmfd *shmfd;
293254603Skib	void *rl_cookie;
294254603Skib	int error;
295175164Sjhb
296254603Skib	shmfd = fp->f_data;
297254603Skib#ifdef MAC
298254603Skib	error = mac_posixshm_check_read(active_cred, fp->f_cred, shmfd);
299254603Skib	if (error)
300254603Skib		return (error);
301254603Skib#endif
302302323Sjilles	foffset_lock_uio(fp, uio, flags);
303302323Sjilles	rl_cookie = rangelock_rlock(&shmfd->shm_rl, uio->uio_offset,
304302323Sjilles	    uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx);
305254603Skib	error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
306254603Skib	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
307254603Skib	foffset_unlock_uio(fp, uio, flags);
308254603Skib	return (error);
309175164Sjhb}
310175164Sjhb
311175164Sjhbstatic int
312175164Sjhbshm_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
313175164Sjhb    int flags, struct thread *td)
314175164Sjhb{
315254603Skib	struct shmfd *shmfd;
316254603Skib	void *rl_cookie;
317254603Skib	int error;
318175164Sjhb
319254603Skib	shmfd = fp->f_data;
320254603Skib#ifdef MAC
321254603Skib	error = mac_posixshm_check_write(active_cred, fp->f_cred, shmfd);
322254603Skib	if (error)
323254603Skib		return (error);
324254603Skib#endif
325254603Skib	foffset_lock_uio(fp, uio, flags);
326254603Skib	if ((flags & FOF_OFFSET) == 0) {
327254603Skib		rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
328254603Skib		    &shmfd->shm_mtx);
329254603Skib	} else {
330254603Skib		rl_cookie = rangelock_wlock(&shmfd->shm_rl, uio->uio_offset,
331254603Skib		    uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx);
332254603Skib	}
333254603Skib
334254603Skib	error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
335254603Skib	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
336254603Skib	foffset_unlock_uio(fp, uio, flags);
337254603Skib	return (error);
338175164Sjhb}
339175164Sjhb
340175164Sjhbstatic int
341175164Sjhbshm_truncate(struct file *fp, off_t length, struct ucred *active_cred,
342175164Sjhb    struct thread *td)
343175164Sjhb{
344175164Sjhb	struct shmfd *shmfd;
345175164Sjhb#ifdef MAC
346175164Sjhb	int error;
347175164Sjhb#endif
348175164Sjhb
349175164Sjhb	shmfd = fp->f_data;
350175164Sjhb#ifdef MAC
351175164Sjhb	error = mac_posixshm_check_truncate(active_cred, fp->f_cred, shmfd);
352175164Sjhb	if (error)
353175164Sjhb		return (error);
354175164Sjhb#endif
355194766Skib	return (shm_dotruncate(shmfd, length));
356175164Sjhb}
357175164Sjhb
358175164Sjhbstatic int
359175164Sjhbshm_ioctl(struct file *fp, u_long com, void *data,
360175164Sjhb    struct ucred *active_cred, struct thread *td)
361175164Sjhb{
362175164Sjhb
363175164Sjhb	return (EOPNOTSUPP);
364175164Sjhb}
365175164Sjhb
366175164Sjhbstatic int
367175164Sjhbshm_poll(struct file *fp, int events, struct ucred *active_cred,
368175164Sjhb    struct thread *td)
369175164Sjhb{
370175164Sjhb
371175164Sjhb	return (EOPNOTSUPP);
372175164Sjhb}
373175164Sjhb
374175164Sjhbstatic int
375175164Sjhbshm_kqfilter(struct file *fp, struct knote *kn)
376175164Sjhb{
377175164Sjhb
378175164Sjhb	return (EOPNOTSUPP);
379175164Sjhb}
380175164Sjhb
381175164Sjhbstatic int
382175164Sjhbshm_stat(struct file *fp, struct stat *sb, struct ucred *active_cred,
383175164Sjhb    struct thread *td)
384175164Sjhb{
385175164Sjhb	struct shmfd *shmfd;
386175164Sjhb#ifdef MAC
387175164Sjhb	int error;
388175164Sjhb#endif
389175164Sjhb
390175164Sjhb	shmfd = fp->f_data;
391175164Sjhb
392175164Sjhb#ifdef MAC
393175164Sjhb	error = mac_posixshm_check_stat(active_cred, fp->f_cred, shmfd);
394175164Sjhb	if (error)
395175164Sjhb		return (error);
396175164Sjhb#endif
397175164Sjhb
398175164Sjhb	/*
399175164Sjhb	 * Attempt to return sanish values for fstat() on a memory file
400175164Sjhb	 * descriptor.
401175164Sjhb	 */
402175164Sjhb	bzero(sb, sizeof(*sb));
403175164Sjhb	sb->st_blksize = PAGE_SIZE;
404175164Sjhb	sb->st_size = shmfd->shm_size;
405175164Sjhb	sb->st_blocks = (sb->st_size + sb->st_blksize - 1) / sb->st_blksize;
406224914Skib	mtx_lock(&shm_timestamp_lock);
407205792Sed	sb->st_atim = shmfd->shm_atime;
408205792Sed	sb->st_ctim = shmfd->shm_ctime;
409205792Sed	sb->st_mtim = shmfd->shm_mtime;
410224914Skib	sb->st_birthtim = shmfd->shm_birthtime;
411224914Skib	sb->st_mode = S_IFREG | shmfd->shm_mode;		/* XXX */
412175164Sjhb	sb->st_uid = shmfd->shm_uid;
413175164Sjhb	sb->st_gid = shmfd->shm_gid;
414224914Skib	mtx_unlock(&shm_timestamp_lock);
415271399Sjhb	sb->st_dev = shm_dev_ino;
416271399Sjhb	sb->st_ino = shmfd->shm_ino;
417175164Sjhb
418175164Sjhb	return (0);
419175164Sjhb}
420175164Sjhb
421175164Sjhbstatic int
422175164Sjhbshm_close(struct file *fp, struct thread *td)
423175164Sjhb{
424175164Sjhb	struct shmfd *shmfd;
425175164Sjhb
426175164Sjhb	shmfd = fp->f_data;
427175164Sjhb	fp->f_data = NULL;
428175164Sjhb	shm_drop(shmfd);
429175164Sjhb
430175164Sjhb	return (0);
431175164Sjhb}
432175164Sjhb
433194766Skibstatic int
434175164Sjhbshm_dotruncate(struct shmfd *shmfd, off_t length)
435175164Sjhb{
436175164Sjhb	vm_object_t object;
437229821Salc	vm_page_t m, ma[1];
438229821Salc	vm_pindex_t idx, nobjsize;
439194766Skib	vm_ooffset_t delta;
440229821Salc	int base, rv;
441175164Sjhb
442321361Salc	KASSERT(length >= 0, ("shm_dotruncate: length < 0"));
443175164Sjhb	object = shmfd->shm_object;
444248084Sattilio	VM_OBJECT_WLOCK(object);
445175164Sjhb	if (length == shmfd->shm_size) {
446248084Sattilio		VM_OBJECT_WUNLOCK(object);
447194766Skib		return (0);
448175164Sjhb	}
449175164Sjhb	nobjsize = OFF_TO_IDX(length + PAGE_MASK);
450175164Sjhb
451175164Sjhb	/* Are we shrinking?  If so, trim the end. */
452175164Sjhb	if (length < shmfd->shm_size) {
453228509Sjhb		/*
454228509Sjhb		 * Disallow any requests to shrink the size if this
455228509Sjhb		 * object is mapped into the kernel.
456228509Sjhb		 */
457228509Sjhb		if (shmfd->shm_kmappings > 0) {
458248084Sattilio			VM_OBJECT_WUNLOCK(object);
459228509Sjhb			return (EBUSY);
460228509Sjhb		}
461229821Salc
462229821Salc		/*
463229821Salc		 * Zero the truncated part of the last page.
464229821Salc		 */
465229821Salc		base = length & PAGE_MASK;
466229821Salc		if (base != 0) {
467229821Salc			idx = OFF_TO_IDX(length);
468229821Salcretry:
469229821Salc			m = vm_page_lookup(object, idx);
470229821Salc			if (m != NULL) {
471254138Sattilio				if (vm_page_sleep_if_busy(m, "shmtrc"))
472229821Salc					goto retry;
473229821Salc			} else if (vm_pager_has_page(object, idx, NULL, NULL)) {
474229821Salc				m = vm_page_alloc(object, idx, VM_ALLOC_NORMAL);
475229821Salc				if (m == NULL) {
476248084Sattilio					VM_OBJECT_WUNLOCK(object);
477229821Salc					VM_WAIT;
478248084Sattilio					VM_OBJECT_WLOCK(object);
479229821Salc					goto retry;
480229821Salc				} else if (m->valid != VM_PAGE_BITS_ALL) {
481229821Salc					ma[0] = m;
482229821Salc					rv = vm_pager_get_pages(object, ma, 1,
483229821Salc					    0);
484229821Salc					m = vm_page_lookup(object, idx);
485229821Salc				} else
486229821Salc					/* A cached page was reactivated. */
487229821Salc					rv = VM_PAGER_OK;
488229821Salc				vm_page_lock(m);
489229821Salc				if (rv == VM_PAGER_OK) {
490229821Salc					vm_page_deactivate(m);
491229821Salc					vm_page_unlock(m);
492254138Sattilio					vm_page_xunbusy(m);
493229821Salc				} else {
494229821Salc					vm_page_free(m);
495229821Salc					vm_page_unlock(m);
496248084Sattilio					VM_OBJECT_WUNLOCK(object);
497229821Salc					return (EIO);
498229821Salc				}
499229821Salc			}
500229821Salc			if (m != NULL) {
501229821Salc				pmap_zero_page_area(m, base, PAGE_SIZE - base);
502229821Salc				KASSERT(m->valid == VM_PAGE_BITS_ALL,
503229821Salc				    ("shm_dotruncate: page %p is invalid", m));
504229821Salc				vm_page_dirty(m);
505229821Salc				vm_pager_page_unswapped(m);
506229821Salc			}
507229821Salc		}
508321361Salc		delta = IDX_TO_OFF(object->size - nobjsize);
509194766Skib
510175164Sjhb		/* Toss in memory pages. */
511175164Sjhb		if (nobjsize < object->size)
512175164Sjhb			vm_object_page_remove(object, nobjsize, object->size,
513223677Salc			    0);
514175164Sjhb
515175164Sjhb		/* Toss pages from swap. */
516175164Sjhb		if (object->type == OBJT_SWAP)
517194766Skib			swap_pager_freespace(object, nobjsize, delta);
518175164Sjhb
519194766Skib		/* Free the swap accounted for shm */
520216128Strasz		swap_release_by_cred(delta, object->cred);
521194766Skib		object->charge -= delta;
522194766Skib	} else {
523321361Salc		/* Try to reserve additional swap space. */
524321361Salc		delta = IDX_TO_OFF(nobjsize - object->size);
525216128Strasz		if (!swap_reserve_by_cred(delta, object->cred)) {
526248084Sattilio			VM_OBJECT_WUNLOCK(object);
527194766Skib			return (ENOMEM);
528194766Skib		}
529194766Skib		object->charge += delta;
530175164Sjhb	}
531175164Sjhb	shmfd->shm_size = length;
532175164Sjhb	mtx_lock(&shm_timestamp_lock);
533175164Sjhb	vfs_timestamp(&shmfd->shm_ctime);
534175164Sjhb	shmfd->shm_mtime = shmfd->shm_ctime;
535175164Sjhb	mtx_unlock(&shm_timestamp_lock);
536175164Sjhb	object->size = nobjsize;
537248084Sattilio	VM_OBJECT_WUNLOCK(object);
538194766Skib	return (0);
539175164Sjhb}
540175164Sjhb
541175164Sjhb/*
542175164Sjhb * shmfd object management including creation and reference counting
543175164Sjhb * routines.
544175164Sjhb */
545175164Sjhbstatic struct shmfd *
546175164Sjhbshm_alloc(struct ucred *ucred, mode_t mode)
547175164Sjhb{
548175164Sjhb	struct shmfd *shmfd;
549271399Sjhb	int ino;
550175164Sjhb
551175164Sjhb	shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO);
552175164Sjhb	shmfd->shm_size = 0;
553175164Sjhb	shmfd->shm_uid = ucred->cr_uid;
554175164Sjhb	shmfd->shm_gid = ucred->cr_gid;
555175164Sjhb	shmfd->shm_mode = mode;
556175164Sjhb	shmfd->shm_object = vm_pager_allocate(OBJT_DEFAULT, NULL,
557194766Skib	    shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred);
558175164Sjhb	KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate"));
559281778Salc	shmfd->shm_object->pg_color = 0;
560248084Sattilio	VM_OBJECT_WLOCK(shmfd->shm_object);
561178181Salc	vm_object_clear_flag(shmfd->shm_object, OBJ_ONEMAPPING);
562281778Salc	vm_object_set_flag(shmfd->shm_object, OBJ_COLORED | OBJ_NOSPLIT);
563248084Sattilio	VM_OBJECT_WUNLOCK(shmfd->shm_object);
564175164Sjhb	vfs_timestamp(&shmfd->shm_birthtime);
565175164Sjhb	shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime =
566175164Sjhb	    shmfd->shm_birthtime;
567271399Sjhb	ino = alloc_unr(shm_ino_unr);
568271399Sjhb	if (ino == -1)
569271399Sjhb		shmfd->shm_ino = 0;
570271399Sjhb	else
571271399Sjhb		shmfd->shm_ino = ino;
572175164Sjhb	refcount_init(&shmfd->shm_refs, 1);
573254603Skib	mtx_init(&shmfd->shm_mtx, "shmrl", NULL, MTX_DEF);
574254603Skib	rangelock_init(&shmfd->shm_rl);
575175164Sjhb#ifdef MAC
576175164Sjhb	mac_posixshm_init(shmfd);
577175164Sjhb	mac_posixshm_create(ucred, shmfd);
578175164Sjhb#endif
579175164Sjhb
580175164Sjhb	return (shmfd);
581175164Sjhb}
582175164Sjhb
583175164Sjhbstatic struct shmfd *
584175164Sjhbshm_hold(struct shmfd *shmfd)
585175164Sjhb{
586175164Sjhb
587175164Sjhb	refcount_acquire(&shmfd->shm_refs);
588175164Sjhb	return (shmfd);
589175164Sjhb}
590175164Sjhb
591175164Sjhbstatic void
592175164Sjhbshm_drop(struct shmfd *shmfd)
593175164Sjhb{
594175164Sjhb
595175164Sjhb	if (refcount_release(&shmfd->shm_refs)) {
596175164Sjhb#ifdef MAC
597175164Sjhb		mac_posixshm_destroy(shmfd);
598175164Sjhb#endif
599254603Skib		rangelock_destroy(&shmfd->shm_rl);
600254603Skib		mtx_destroy(&shmfd->shm_mtx);
601175164Sjhb		vm_object_deallocate(shmfd->shm_object);
602271399Sjhb		if (shmfd->shm_ino != 0)
603271399Sjhb			free_unr(shm_ino_unr, shmfd->shm_ino);
604175164Sjhb		free(shmfd, M_SHMFD);
605175164Sjhb	}
606175164Sjhb}
607175164Sjhb
608175164Sjhb/*
609175164Sjhb * Determine if the credentials have sufficient permissions for a
610175164Sjhb * specified combination of FREAD and FWRITE.
611175164Sjhb */
612175164Sjhbstatic int
613175164Sjhbshm_access(struct shmfd *shmfd, struct ucred *ucred, int flags)
614175164Sjhb{
615184413Strasz	accmode_t accmode;
616224914Skib	int error;
617175164Sjhb
618184413Strasz	accmode = 0;
619175164Sjhb	if (flags & FREAD)
620184413Strasz		accmode |= VREAD;
621175164Sjhb	if (flags & FWRITE)
622184413Strasz		accmode |= VWRITE;
623224914Skib	mtx_lock(&shm_timestamp_lock);
624224914Skib	error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
625224914Skib	    accmode, ucred, NULL);
626224914Skib	mtx_unlock(&shm_timestamp_lock);
627224914Skib	return (error);
628175164Sjhb}
629175164Sjhb
630175164Sjhb/*
631175164Sjhb * Dictionary management.  We maintain an in-kernel dictionary to map
632175164Sjhb * paths to shmfd objects.  We use the FNV hash on the path to store
633175164Sjhb * the mappings in a hash table.
634175164Sjhb */
635175164Sjhbstatic void
636271399Sjhbshm_init(void *arg)
637175164Sjhb{
638175164Sjhb
639175164Sjhb	mtx_init(&shm_timestamp_lock, "shm timestamps", NULL, MTX_DEF);
640175164Sjhb	sx_init(&shm_dict_lock, "shm dictionary");
641175164Sjhb	shm_dictionary = hashinit(1024, M_SHMFD, &shm_hash);
642271399Sjhb	shm_ino_unr = new_unrhdr(1, INT32_MAX, NULL);
643271399Sjhb	KASSERT(shm_ino_unr != NULL, ("shm fake inodes not initialized"));
644271399Sjhb	shm_dev_ino = devfs_alloc_cdp_inode();
645271399Sjhb	KASSERT(shm_dev_ino > 0, ("shm dev inode not initialized"));
646175164Sjhb}
647271399SjhbSYSINIT(shm_init, SI_SUB_SYSV_SHM, SI_ORDER_ANY, shm_init, NULL);
648175164Sjhb
649175164Sjhbstatic struct shmfd *
650175164Sjhbshm_lookup(char *path, Fnv32_t fnv)
651175164Sjhb{
652175164Sjhb	struct shm_mapping *map;
653175164Sjhb
654175164Sjhb	LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
655175164Sjhb		if (map->sm_fnv != fnv)
656175164Sjhb			continue;
657175164Sjhb		if (strcmp(map->sm_path, path) == 0)
658175164Sjhb			return (map->sm_shmfd);
659175164Sjhb	}
660175164Sjhb
661175164Sjhb	return (NULL);
662175164Sjhb}
663175164Sjhb
664175164Sjhbstatic void
665175164Sjhbshm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd)
666175164Sjhb{
667175164Sjhb	struct shm_mapping *map;
668175164Sjhb
669175164Sjhb	map = malloc(sizeof(struct shm_mapping), M_SHMFD, M_WAITOK);
670175164Sjhb	map->sm_path = path;
671175164Sjhb	map->sm_fnv = fnv;
672175164Sjhb	map->sm_shmfd = shm_hold(shmfd);
673233760Sjhb	shmfd->shm_path = path;
674175164Sjhb	LIST_INSERT_HEAD(SHM_HASH(fnv), map, sm_link);
675175164Sjhb}
676175164Sjhb
677175164Sjhbstatic int
678175164Sjhbshm_remove(char *path, Fnv32_t fnv, struct ucred *ucred)
679175164Sjhb{
680175164Sjhb	struct shm_mapping *map;
681175164Sjhb	int error;
682175164Sjhb
683175164Sjhb	LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
684175164Sjhb		if (map->sm_fnv != fnv)
685175164Sjhb			continue;
686175164Sjhb		if (strcmp(map->sm_path, path) == 0) {
687175164Sjhb#ifdef MAC
688175164Sjhb			error = mac_posixshm_check_unlink(ucred, map->sm_shmfd);
689175164Sjhb			if (error)
690175164Sjhb				return (error);
691175164Sjhb#endif
692175164Sjhb			error = shm_access(map->sm_shmfd, ucred,
693175164Sjhb			    FREAD | FWRITE);
694175164Sjhb			if (error)
695175164Sjhb				return (error);
696233760Sjhb			map->sm_shmfd->shm_path = NULL;
697175164Sjhb			LIST_REMOVE(map, sm_link);
698175164Sjhb			shm_drop(map->sm_shmfd);
699175164Sjhb			free(map->sm_path, M_SHMFD);
700175164Sjhb			free(map, M_SHMFD);
701175164Sjhb			return (0);
702175164Sjhb		}
703175164Sjhb	}
704175164Sjhb
705175164Sjhb	return (ENOENT);
706175164Sjhb}
707175164Sjhb
708175164Sjhb/* System calls. */
709175164Sjhbint
710225617Skmacysys_shm_open(struct thread *td, struct shm_open_args *uap)
711175164Sjhb{
712175164Sjhb	struct filedesc *fdp;
713175164Sjhb	struct shmfd *shmfd;
714175164Sjhb	struct file *fp;
715175164Sjhb	char *path;
716325783Sjamie	const char *pr_path;
717325783Sjamie	size_t pr_pathlen;
718175164Sjhb	Fnv32_t fnv;
719175164Sjhb	mode_t cmode;
720175164Sjhb	int fd, error;
721175164Sjhb
722223692Sjonathan#ifdef CAPABILITY_MODE
723223692Sjonathan	/*
724223692Sjonathan	 * shm_open(2) is only allowed for anonymous objects.
725223692Sjonathan	 */
726223692Sjonathan	if (IN_CAPABILITY_MODE(td) && (uap->path != SHM_ANON))
727223692Sjonathan		return (ECAPMODE);
728223692Sjonathan#endif
729223692Sjonathan
730175164Sjhb	if ((uap->flags & O_ACCMODE) != O_RDONLY &&
731175164Sjhb	    (uap->flags & O_ACCMODE) != O_RDWR)
732175164Sjhb		return (EINVAL);
733175164Sjhb
734261329Srmh	if ((uap->flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC | O_CLOEXEC)) != 0)
735175164Sjhb		return (EINVAL);
736175164Sjhb
737175164Sjhb	fdp = td->td_proc->p_fd;
738175164Sjhb	cmode = (uap->mode & ~fdp->fd_cmask) & ACCESSPERMS;
739175164Sjhb
740249233Sjilles	error = falloc(td, &fp, &fd, O_CLOEXEC);
741175164Sjhb	if (error)
742175164Sjhb		return (error);
743175164Sjhb
744175164Sjhb	/* A SHM_ANON path pointer creates an anonymous object. */
745175164Sjhb	if (uap->path == SHM_ANON) {
746175164Sjhb		/* A read-only anonymous object is pointless. */
747175164Sjhb		if ((uap->flags & O_ACCMODE) == O_RDONLY) {
748321020Sdchagin			fdclose(td, fp, fd);
749175164Sjhb			fdrop(fp, td);
750175164Sjhb			return (EINVAL);
751175164Sjhb		}
752175164Sjhb		shmfd = shm_alloc(td->td_ucred, cmode);
753175164Sjhb	} else {
754175164Sjhb		path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK);
755325783Sjamie		pr_path = td->td_ucred->cr_prison->pr_path;
756325783Sjamie
757325783Sjamie		/* Construct a full pathname for jailed callers. */
758325783Sjamie		pr_pathlen = strcmp(pr_path, "/") == 0 ? 0
759325783Sjamie		    : strlcpy(path, pr_path, MAXPATHLEN);
760325783Sjamie		error = copyinstr(uap->path, path + pr_pathlen,
761325783Sjamie		    MAXPATHLEN - pr_pathlen, NULL);
762269742Srpaulo#ifdef KTRACE
763269742Srpaulo		if (error == 0 && KTRPOINT(curthread, KTR_NAMEI))
764269742Srpaulo			ktrnamei(path);
765269742Srpaulo#endif
766175164Sjhb		/* Require paths to start with a '/' character. */
767325783Sjamie		if (error == 0 && path[pr_pathlen] != '/')
768175164Sjhb			error = EINVAL;
769175164Sjhb		if (error) {
770321020Sdchagin			fdclose(td, fp, fd);
771175164Sjhb			fdrop(fp, td);
772175164Sjhb			free(path, M_SHMFD);
773175164Sjhb			return (error);
774175164Sjhb		}
775175164Sjhb
776175164Sjhb		fnv = fnv_32_str(path, FNV1_32_INIT);
777175164Sjhb		sx_xlock(&shm_dict_lock);
778175164Sjhb		shmfd = shm_lookup(path, fnv);
779175164Sjhb		if (shmfd == NULL) {
780175164Sjhb			/* Object does not yet exist, create it if requested. */
781175164Sjhb			if (uap->flags & O_CREAT) {
782225344Srwatson#ifdef MAC
783225344Srwatson				error = mac_posixshm_check_create(td->td_ucred,
784225344Srwatson				    path);
785225344Srwatson				if (error == 0) {
786225344Srwatson#endif
787225344Srwatson					shmfd = shm_alloc(td->td_ucred, cmode);
788225344Srwatson					shm_insert(path, fnv, shmfd);
789225344Srwatson#ifdef MAC
790225344Srwatson				}
791225344Srwatson#endif
792175164Sjhb			} else {
793175164Sjhb				free(path, M_SHMFD);
794175164Sjhb				error = ENOENT;
795175164Sjhb			}
796175164Sjhb		} else {
797175164Sjhb			/*
798175164Sjhb			 * Object already exists, obtain a new
799175164Sjhb			 * reference if requested and permitted.
800175164Sjhb			 */
801175164Sjhb			free(path, M_SHMFD);
802175164Sjhb			if ((uap->flags & (O_CREAT | O_EXCL)) ==
803175164Sjhb			    (O_CREAT | O_EXCL))
804175164Sjhb				error = EEXIST;
805175164Sjhb			else {
806175164Sjhb#ifdef MAC
807175164Sjhb				error = mac_posixshm_check_open(td->td_ucred,
808225344Srwatson				    shmfd, FFLAGS(uap->flags & O_ACCMODE));
809175164Sjhb				if (error == 0)
810175164Sjhb#endif
811175164Sjhb				error = shm_access(shmfd, td->td_ucred,
812175164Sjhb				    FFLAGS(uap->flags & O_ACCMODE));
813175164Sjhb			}
814175164Sjhb
815175164Sjhb			/*
816175164Sjhb			 * Truncate the file back to zero length if
817175164Sjhb			 * O_TRUNC was specified and the object was
818175164Sjhb			 * opened with read/write.
819175164Sjhb			 */
820175164Sjhb			if (error == 0 &&
821175164Sjhb			    (uap->flags & (O_ACCMODE | O_TRUNC)) ==
822175164Sjhb			    (O_RDWR | O_TRUNC)) {
823175164Sjhb#ifdef MAC
824175164Sjhb				error = mac_posixshm_check_truncate(
825175164Sjhb					td->td_ucred, fp->f_cred, shmfd);
826175164Sjhb				if (error == 0)
827175164Sjhb#endif
828175164Sjhb					shm_dotruncate(shmfd, 0);
829175164Sjhb			}
830175164Sjhb			if (error == 0)
831175164Sjhb				shm_hold(shmfd);
832175164Sjhb		}
833175164Sjhb		sx_xunlock(&shm_dict_lock);
834175164Sjhb
835175164Sjhb		if (error) {
836321020Sdchagin			fdclose(td, fp, fd);
837175164Sjhb			fdrop(fp, td);
838175164Sjhb			return (error);
839175164Sjhb		}
840175164Sjhb	}
841175164Sjhb
842175164Sjhb	finit(fp, FFLAGS(uap->flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops);
843175164Sjhb
844175164Sjhb	td->td_retval[0] = fd;
845175164Sjhb	fdrop(fp, td);
846175164Sjhb
847175164Sjhb	return (0);
848175164Sjhb}
849175164Sjhb
850175164Sjhbint
851225617Skmacysys_shm_unlink(struct thread *td, struct shm_unlink_args *uap)
852175164Sjhb{
853175164Sjhb	char *path;
854325783Sjamie	const char *pr_path;
855325783Sjamie	size_t pr_pathlen;
856175164Sjhb	Fnv32_t fnv;
857175164Sjhb	int error;
858175164Sjhb
859175164Sjhb	path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
860325783Sjamie	pr_path = td->td_ucred->cr_prison->pr_path;
861325783Sjamie	pr_pathlen = strcmp(pr_path, "/") == 0 ? 0
862325783Sjamie	    : strlcpy(path, pr_path, MAXPATHLEN);
863325783Sjamie	error = copyinstr(uap->path, path + pr_pathlen, MAXPATHLEN - pr_pathlen,
864325783Sjamie	    NULL);
865175164Sjhb	if (error) {
866175164Sjhb		free(path, M_TEMP);
867175164Sjhb		return (error);
868175164Sjhb	}
869269742Srpaulo#ifdef KTRACE
870269742Srpaulo	if (KTRPOINT(curthread, KTR_NAMEI))
871269742Srpaulo		ktrnamei(path);
872269742Srpaulo#endif
873175164Sjhb	fnv = fnv_32_str(path, FNV1_32_INIT);
874175164Sjhb	sx_xlock(&shm_dict_lock);
875175164Sjhb	error = shm_remove(path, fnv, td->td_ucred);
876175164Sjhb	sx_xunlock(&shm_dict_lock);
877175164Sjhb	free(path, M_TEMP);
878175164Sjhb
879175164Sjhb	return (error);
880175164Sjhb}
881175164Sjhb
882175164Sjhb/*
883175164Sjhb * mmap() helper to validate mmap() requests against shm object state
884175164Sjhb * and give mmap() the vm_object to use for the mapping.
885175164Sjhb */
886175164Sjhbint
887175164Sjhbshm_mmap(struct shmfd *shmfd, vm_size_t objsize, vm_ooffset_t foff,
888175164Sjhb    vm_object_t *obj)
889175164Sjhb{
890175164Sjhb
891175164Sjhb	/*
892175164Sjhb	 * XXXRW: This validation is probably insufficient, and subject to
893175164Sjhb	 * sign errors.  It should be fixed.
894175164Sjhb	 */
895185533Skan	if (foff >= shmfd->shm_size ||
896185533Skan	    foff + objsize > round_page(shmfd->shm_size))
897175164Sjhb		return (EINVAL);
898175164Sjhb
899175164Sjhb	mtx_lock(&shm_timestamp_lock);
900175164Sjhb	vfs_timestamp(&shmfd->shm_atime);
901175164Sjhb	mtx_unlock(&shm_timestamp_lock);
902175164Sjhb	vm_object_reference(shmfd->shm_object);
903175164Sjhb	*obj = shmfd->shm_object;
904175164Sjhb	return (0);
905175164Sjhb}
906224914Skib
907224914Skibstatic int
908224914Skibshm_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
909224914Skib    struct thread *td)
910224914Skib{
911224914Skib	struct shmfd *shmfd;
912224914Skib	int error;
913224914Skib
914224914Skib	error = 0;
915224914Skib	shmfd = fp->f_data;
916224914Skib	mtx_lock(&shm_timestamp_lock);
917224914Skib	/*
918224914Skib	 * SUSv4 says that x bits of permission need not be affected.
919224914Skib	 * Be consistent with our shm_open there.
920224914Skib	 */
921224914Skib#ifdef MAC
922224914Skib	error = mac_posixshm_check_setmode(active_cred, shmfd, mode);
923224914Skib	if (error != 0)
924224914Skib		goto out;
925224914Skib#endif
926224914Skib	error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid,
927224914Skib	    shmfd->shm_gid, VADMIN, active_cred, NULL);
928224914Skib	if (error != 0)
929224914Skib		goto out;
930224914Skib	shmfd->shm_mode = mode & ACCESSPERMS;
931224914Skibout:
932224914Skib	mtx_unlock(&shm_timestamp_lock);
933224914Skib	return (error);
934224914Skib}
935224914Skib
936224914Skibstatic int
937224914Skibshm_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
938224914Skib    struct thread *td)
939224914Skib{
940224914Skib	struct shmfd *shmfd;
941224914Skib	int error;
942224914Skib
943224935Skib	error = 0;
944224914Skib	shmfd = fp->f_data;
945224914Skib	mtx_lock(&shm_timestamp_lock);
946224914Skib#ifdef MAC
947224914Skib	error = mac_posixshm_check_setowner(active_cred, shmfd, uid, gid);
948224914Skib	if (error != 0)
949224914Skib		goto out;
950224914Skib#endif
951224914Skib	if (uid == (uid_t)-1)
952224914Skib		uid = shmfd->shm_uid;
953224914Skib	if (gid == (gid_t)-1)
954224914Skib                 gid = shmfd->shm_gid;
955224914Skib	if (((uid != shmfd->shm_uid && uid != active_cred->cr_uid) ||
956224914Skib	    (gid != shmfd->shm_gid && !groupmember(gid, active_cred))) &&
957224914Skib	    (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN, 0)))
958224914Skib		goto out;
959224914Skib	shmfd->shm_uid = uid;
960224914Skib	shmfd->shm_gid = gid;
961224914Skibout:
962224914Skib	mtx_unlock(&shm_timestamp_lock);
963224914Skib	return (error);
964224914Skib}
965228509Sjhb
966228509Sjhb/*
967228509Sjhb * Helper routines to allow the backing object of a shared memory file
968228509Sjhb * descriptor to be mapped in the kernel.
969228509Sjhb */
970228509Sjhbint
971228509Sjhbshm_map(struct file *fp, size_t size, off_t offset, void **memp)
972228509Sjhb{
973228509Sjhb	struct shmfd *shmfd;
974228509Sjhb	vm_offset_t kva, ofs;
975228509Sjhb	vm_object_t obj;
976228509Sjhb	int rv;
977228509Sjhb
978228509Sjhb	if (fp->f_type != DTYPE_SHM)
979228509Sjhb		return (EINVAL);
980228509Sjhb	shmfd = fp->f_data;
981228509Sjhb	obj = shmfd->shm_object;
982248084Sattilio	VM_OBJECT_WLOCK(obj);
983228509Sjhb	/*
984228509Sjhb	 * XXXRW: This validation is probably insufficient, and subject to
985228509Sjhb	 * sign errors.  It should be fixed.
986228509Sjhb	 */
987228509Sjhb	if (offset >= shmfd->shm_size ||
988228509Sjhb	    offset + size > round_page(shmfd->shm_size)) {
989248084Sattilio		VM_OBJECT_WUNLOCK(obj);
990228509Sjhb		return (EINVAL);
991228509Sjhb	}
992228509Sjhb
993228509Sjhb	shmfd->shm_kmappings++;
994228509Sjhb	vm_object_reference_locked(obj);
995248084Sattilio	VM_OBJECT_WUNLOCK(obj);
996228509Sjhb
997228509Sjhb	/* Map the object into the kernel_map and wire it. */
998228509Sjhb	kva = vm_map_min(kernel_map);
999228509Sjhb	ofs = offset & PAGE_MASK;
1000228509Sjhb	offset = trunc_page(offset);
1001228509Sjhb	size = round_page(size + ofs);
1002255426Sjhb	rv = vm_map_find(kernel_map, obj, offset, &kva, size, 0,
1003253620Sjhb	    VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
1004228509Sjhb	    VM_PROT_READ | VM_PROT_WRITE, 0);
1005228509Sjhb	if (rv == KERN_SUCCESS) {
1006228509Sjhb		rv = vm_map_wire(kernel_map, kva, kva + size,
1007228509Sjhb		    VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
1008228509Sjhb		if (rv == KERN_SUCCESS) {
1009228509Sjhb			*memp = (void *)(kva + ofs);
1010228509Sjhb			return (0);
1011228509Sjhb		}
1012228509Sjhb		vm_map_remove(kernel_map, kva, kva + size);
1013228509Sjhb	} else
1014228509Sjhb		vm_object_deallocate(obj);
1015228509Sjhb
1016228509Sjhb	/* On failure, drop our mapping reference. */
1017248084Sattilio	VM_OBJECT_WLOCK(obj);
1018228509Sjhb	shmfd->shm_kmappings--;
1019248084Sattilio	VM_OBJECT_WUNLOCK(obj);
1020228509Sjhb
1021228533Sjhb	return (vm_mmap_to_errno(rv));
1022228509Sjhb}
1023228509Sjhb
1024228509Sjhb/*
1025228509Sjhb * We require the caller to unmap the entire entry.  This allows us to
1026228509Sjhb * safely decrement shm_kmappings when a mapping is removed.
1027228509Sjhb */
1028228509Sjhbint
1029228509Sjhbshm_unmap(struct file *fp, void *mem, size_t size)
1030228509Sjhb{
1031228509Sjhb	struct shmfd *shmfd;
1032228509Sjhb	vm_map_entry_t entry;
1033228509Sjhb	vm_offset_t kva, ofs;
1034228509Sjhb	vm_object_t obj;
1035228509Sjhb	vm_pindex_t pindex;
1036228509Sjhb	vm_prot_t prot;
1037228509Sjhb	boolean_t wired;
1038228509Sjhb	vm_map_t map;
1039228509Sjhb	int rv;
1040228509Sjhb
1041228509Sjhb	if (fp->f_type != DTYPE_SHM)
1042228509Sjhb		return (EINVAL);
1043228509Sjhb	shmfd = fp->f_data;
1044228509Sjhb	kva = (vm_offset_t)mem;
1045228509Sjhb	ofs = kva & PAGE_MASK;
1046228509Sjhb	kva = trunc_page(kva);
1047228509Sjhb	size = round_page(size + ofs);
1048228509Sjhb	map = kernel_map;
1049228509Sjhb	rv = vm_map_lookup(&map, kva, VM_PROT_READ | VM_PROT_WRITE, &entry,
1050228509Sjhb	    &obj, &pindex, &prot, &wired);
1051228509Sjhb	if (rv != KERN_SUCCESS)
1052228509Sjhb		return (EINVAL);
1053228509Sjhb	if (entry->start != kva || entry->end != kva + size) {
1054228509Sjhb		vm_map_lookup_done(map, entry);
1055228509Sjhb		return (EINVAL);
1056228509Sjhb	}
1057228509Sjhb	vm_map_lookup_done(map, entry);
1058228509Sjhb	if (obj != shmfd->shm_object)
1059228509Sjhb		return (EINVAL);
1060228509Sjhb	vm_map_remove(map, kva, kva + size);
1061248084Sattilio	VM_OBJECT_WLOCK(obj);
1062228509Sjhb	KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped"));
1063228509Sjhb	shmfd->shm_kmappings--;
1064248084Sattilio	VM_OBJECT_WUNLOCK(obj);
1065228509Sjhb	return (0);
1066228509Sjhb}
1067233760Sjhb
1068233760Sjhbvoid
1069233760Sjhbshm_path(struct shmfd *shmfd, char *path, size_t size)
1070233760Sjhb{
1071325783Sjamie	const char *shm_path, *pr_path;
1072325783Sjamie	size_t pr_pathlen;
1073233760Sjhb
1074233760Sjhb	if (shmfd->shm_path == NULL)
1075233760Sjhb		return;
1076233760Sjhb	sx_slock(&shm_dict_lock);
1077325783Sjamie	shm_path = shmfd->shm_path;
1078325783Sjamie	if (shm_path != NULL) {
1079325783Sjamie		pr_path = curthread->td_ucred->cr_prison->pr_path;
1080325783Sjamie		if (strcmp(pr_path, "/") != 0) {
1081325783Sjamie			/* Return the jail-rooted pathname. */
1082325783Sjamie			pr_pathlen = strlen(pr_path);
1083325783Sjamie			if (strncmp(shm_path, pr_path, pr_pathlen) == 0 &&
1084325783Sjamie			    shm_path[pr_pathlen] == '/')
1085325783Sjamie				shm_path += pr_pathlen;
1086325783Sjamie		}
1087325783Sjamie		strlcpy(path, shm_path, size);
1088325783Sjamie	}
1089233760Sjhb	sx_sunlock(&shm_dict_lock);
1090233760Sjhb}
1091