sysv_shm.c revision 76972
1/* $FreeBSD: head/sys/kern/sysv_shm.c 76972 2001-05-22 03:56:26Z dd $ */
2/*	$NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $	*/
3
4/*
5 * Copyright (c) 1994 Adam Glass and Charles Hannum.  All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 *    must display the following acknowledgement:
17 *	This product includes software developed by Adam Glass and Charles
18 *	Hannum.
19 * 4. The names of the authors may not be used to endorse or promote products
20 *    derived from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include "opt_compat.h"
35#include "opt_rlimit.h"
36#include "opt_sysvipc.h"
37
38#include <sys/param.h>
39#include <sys/systm.h>
40#include <sys/kernel.h>
41#include <sys/lock.h>
42#include <sys/sysctl.h>
43#include <sys/shm.h>
44#include <sys/proc.h>
45#include <sys/malloc.h>
46#include <sys/mman.h>
47#include <sys/mutex.h>
48#include <sys/stat.h>
49#include <sys/syscall.h>
50#include <sys/sysent.h>
51#include <sys/sysproto.h>
52#include <sys/jail.h>
53
54#include <vm/vm.h>
55#include <vm/vm_param.h>
56#include <vm/pmap.h>
57#include <vm/vm_object.h>
58#include <vm/vm_map.h>
59#include <vm/vm_page.h>
60#include <vm/vm_pager.h>
61
62static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments");
63
64struct oshmctl_args;
65static int oshmctl __P((struct proc *p, struct oshmctl_args *uap));
66
67static int shmget_allocate_segment __P((struct proc *p, struct shmget_args *uap, int mode));
68static int shmget_existing __P((struct proc *p, struct shmget_args *uap, int mode, int segnum));
69
70/* XXX casting to (sy_call_t *) is bogus, as usual. */
71static sy_call_t *shmcalls[] = {
72	(sy_call_t *)shmat, (sy_call_t *)oshmctl,
73	(sy_call_t *)shmdt, (sy_call_t *)shmget,
74	(sy_call_t *)shmctl
75};
76
77#define	SHMSEG_FREE     	0x0200
78#define	SHMSEG_REMOVED  	0x0400
79#define	SHMSEG_ALLOCATED	0x0800
80#define	SHMSEG_WANTED		0x1000
81
82static int shm_last_free, shm_nused, shm_committed, shmalloced;
83static struct shmid_ds	*shmsegs;
84
85struct shm_handle {
86	/* vm_offset_t kva; */
87	vm_object_t shm_object;
88};
89
90struct shmmap_state {
91	vm_offset_t va;
92	int shmid;
93};
94
95static void shm_deallocate_segment __P((struct shmid_ds *));
96static int shm_find_segment_by_key __P((key_t));
97static struct shmid_ds *shm_find_segment_by_shmid __P((int));
98static int shm_delete_mapping __P((struct proc *, struct shmmap_state *));
99static void shmrealloc __P((void));
100static void shminit __P((void));
101static int sysvshm_modload __P((struct module *, int, void *));
102static int shmunload __P((void));
103static void shmexit_myhook __P((struct proc *p));
104static void shmfork_myhook __P((struct proc *p1, struct proc *p2));
105
106/*
107 * Tuneable values
108 */
109#ifndef SHMMAXPGS
110#define	SHMMAXPGS	8192	/* note: sysv shared memory is swap backed */
111#endif
112#ifndef SHMMAX
113#define	SHMMAX	(SHMMAXPGS*PAGE_SIZE)
114#endif
115#ifndef SHMMIN
116#define	SHMMIN	1
117#endif
118#ifndef SHMMNI
119#define	SHMMNI	192
120#endif
121#ifndef SHMSEG
122#define	SHMSEG	128
123#endif
124#ifndef SHMALL
125#define	SHMALL	(SHMMAXPGS)
126#endif
127
128struct	shminfo shminfo = {
129	SHMMAX,
130	SHMMIN,
131	SHMMNI,
132	SHMSEG,
133	SHMALL
134};
135
136static int shm_use_phys;
137
138SYSCTL_DECL(_kern_ipc);
139SYSCTL_INT(_kern_ipc, OID_AUTO, shmmax, CTLFLAG_RW, &shminfo.shmmax, 0, "");
140SYSCTL_INT(_kern_ipc, OID_AUTO, shmmin, CTLFLAG_RW, &shminfo.shmmin, 0, "");
141SYSCTL_INT(_kern_ipc, OID_AUTO, shmmni, CTLFLAG_RD, &shminfo.shmmni, 0, "");
142SYSCTL_INT(_kern_ipc, OID_AUTO, shmseg, CTLFLAG_RD, &shminfo.shmseg, 0, "");
143SYSCTL_INT(_kern_ipc, OID_AUTO, shmall, CTLFLAG_RW, &shminfo.shmall, 0, "");
144SYSCTL_INT(_kern_ipc, OID_AUTO, shm_use_phys, CTLFLAG_RW, &shm_use_phys, 0, "");
145
146static int
147shm_find_segment_by_key(key)
148	key_t key;
149{
150	int i;
151
152	for (i = 0; i < shmalloced; i++)
153		if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
154		    shmsegs[i].shm_perm.key == key)
155			return i;
156	return -1;
157}
158
159static struct shmid_ds *
160shm_find_segment_by_shmid(shmid)
161	int shmid;
162{
163	int segnum;
164	struct shmid_ds *shmseg;
165
166	segnum = IPCID_TO_IX(shmid);
167	if (segnum < 0 || segnum >= shmalloced)
168		return NULL;
169	shmseg = &shmsegs[segnum];
170	if ((shmseg->shm_perm.mode & (SHMSEG_ALLOCATED | SHMSEG_REMOVED))
171	    != SHMSEG_ALLOCATED ||
172	    shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid))
173		return NULL;
174	return shmseg;
175}
176
177static void
178shm_deallocate_segment(shmseg)
179	struct shmid_ds *shmseg;
180{
181	struct shm_handle *shm_handle;
182	size_t size;
183
184	/* for vm_object_deallocate */
185	mtx_assert(&vm_mtx, MA_OWNED);
186	shm_handle = shmseg->shm_internal;
187	vm_object_deallocate(shm_handle->shm_object);
188	free((caddr_t)shm_handle, M_SHM);
189	shmseg->shm_internal = NULL;
190	size = round_page(shmseg->shm_segsz);
191	shm_committed -= btoc(size);
192	shm_nused--;
193	shmseg->shm_perm.mode = SHMSEG_FREE;
194}
195
196static int
197shm_delete_mapping(p, shmmap_s)
198	struct proc *p;
199	struct shmmap_state *shmmap_s;
200{
201	struct shmid_ds *shmseg;
202	int segnum, result;
203	size_t size;
204
205	/* for vm_map_remove and shm_deallocate_segment */
206	mtx_assert(&vm_mtx, MA_OWNED);
207	segnum = IPCID_TO_IX(shmmap_s->shmid);
208	shmseg = &shmsegs[segnum];
209	size = round_page(shmseg->shm_segsz);
210	result = vm_map_remove(&p->p_vmspace->vm_map, shmmap_s->va, shmmap_s->va + size);
211	if (result != KERN_SUCCESS)
212		return EINVAL;
213	shmmap_s->shmid = -1;
214	shmseg->shm_dtime = time_second;
215	if ((--shmseg->shm_nattch <= 0) &&
216	    (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
217		shm_deallocate_segment(shmseg);
218		shm_last_free = segnum;
219	}
220	return 0;
221}
222
223#ifndef _SYS_SYSPROTO_H_
224struct shmdt_args {
225	void *shmaddr;
226};
227#endif
228
229int
230shmdt(p, uap)
231	struct proc *p;
232	struct shmdt_args *uap;
233{
234	struct shmmap_state *shmmap_s;
235	int i;
236	int error;
237
238	if (!jail_sysvipc_allowed && jailed(p->p_ucred))
239		return (ENOSYS);
240
241	shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
242 	if (shmmap_s == NULL)
243 	    return EINVAL;
244	for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
245		if (shmmap_s->shmid != -1 &&
246		    shmmap_s->va == (vm_offset_t)uap->shmaddr)
247			break;
248	if (i == shminfo.shmseg)
249		return EINVAL;
250	mtx_lock(&vm_mtx);
251	error = shm_delete_mapping(p, shmmap_s);
252	mtx_unlock(&vm_mtx);
253	return error;
254}
255
256#ifndef _SYS_SYSPROTO_H_
257struct shmat_args {
258	int shmid;
259	void *shmaddr;
260	int shmflg;
261};
262#endif
263
264int
265shmat(p, uap)
266	struct proc *p;
267	struct shmat_args *uap;
268{
269	int error, i, flags;
270	struct shmid_ds *shmseg;
271	struct shmmap_state *shmmap_s = NULL;
272	struct shm_handle *shm_handle;
273	vm_offset_t attach_va;
274	vm_prot_t prot;
275	vm_size_t size;
276	int rv;
277
278	if (!jail_sysvipc_allowed && jailed(p->p_ucred))
279		return (ENOSYS);
280
281	shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
282	if (shmmap_s == NULL) {
283		size = shminfo.shmseg * sizeof(struct shmmap_state);
284		shmmap_s = malloc(size, M_SHM, M_WAITOK);
285		for (i = 0; i < shminfo.shmseg; i++)
286			shmmap_s[i].shmid = -1;
287		p->p_vmspace->vm_shm = (caddr_t)shmmap_s;
288	}
289	shmseg = shm_find_segment_by_shmid(uap->shmid);
290	if (shmseg == NULL)
291		return EINVAL;
292	error = ipcperm(p, &shmseg->shm_perm,
293	    (uap->shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
294	if (error)
295		return error;
296	for (i = 0; i < shminfo.shmseg; i++) {
297		if (shmmap_s->shmid == -1)
298			break;
299		shmmap_s++;
300	}
301	if (i >= shminfo.shmseg)
302		return EMFILE;
303	size = round_page(shmseg->shm_segsz);
304#ifdef VM_PROT_READ_IS_EXEC
305	prot = VM_PROT_READ | VM_PROT_EXECUTE;
306#else
307	prot = VM_PROT_READ;
308#endif
309	if ((uap->shmflg & SHM_RDONLY) == 0)
310		prot |= VM_PROT_WRITE;
311	flags = MAP_ANON | MAP_SHARED;
312	if (uap->shmaddr) {
313		flags |= MAP_FIXED;
314		if (uap->shmflg & SHM_RND)
315			attach_va = (vm_offset_t)uap->shmaddr & ~(SHMLBA-1);
316		else if (((vm_offset_t)uap->shmaddr & (SHMLBA-1)) == 0)
317			attach_va = (vm_offset_t)uap->shmaddr;
318		else
319			return EINVAL;
320	} else {
321		/* This is just a hint to vm_map_find() about where to put it. */
322		attach_va = round_page((vm_offset_t)p->p_vmspace->vm_taddr + MAXTSIZ + MAXDSIZ);
323	}
324
325	shm_handle = shmseg->shm_internal;
326	mtx_lock(&vm_mtx);
327	vm_object_reference(shm_handle->shm_object);
328	rv = vm_map_find(&p->p_vmspace->vm_map, shm_handle->shm_object,
329		0, &attach_va, size, (flags & MAP_FIXED)?0:1, prot, prot, 0);
330	if (rv != KERN_SUCCESS) {
331		mtx_unlock(&vm_mtx);
332		return ENOMEM;
333	}
334	vm_map_inherit(&p->p_vmspace->vm_map,
335		attach_va, attach_va + size, VM_INHERIT_SHARE);
336	mtx_unlock(&vm_mtx);
337
338	shmmap_s->va = attach_va;
339	shmmap_s->shmid = uap->shmid;
340	shmseg->shm_lpid = p->p_pid;
341	shmseg->shm_atime = time_second;
342	shmseg->shm_nattch++;
343	p->p_retval[0] = attach_va;
344	return 0;
345}
346
347struct oshmid_ds {
348	struct	ipc_perm shm_perm;	/* operation perms */
349	int	shm_segsz;		/* size of segment (bytes) */
350	ushort	shm_cpid;		/* pid, creator */
351	ushort	shm_lpid;		/* pid, last operation */
352	short	shm_nattch;		/* no. of current attaches */
353	time_t	shm_atime;		/* last attach time */
354	time_t	shm_dtime;		/* last detach time */
355	time_t	shm_ctime;		/* last change time */
356	void	*shm_handle;		/* internal handle for shm segment */
357};
358
359struct oshmctl_args {
360	int shmid;
361	int cmd;
362	struct oshmid_ds *ubuf;
363};
364
365static int
366oshmctl(p, uap)
367	struct proc *p;
368	struct oshmctl_args *uap;
369{
370#ifdef COMPAT_43
371	int error;
372	struct shmid_ds *shmseg;
373	struct oshmid_ds outbuf;
374
375	if (!jail_sysvipc_allowed && jailed(p->p_ucred))
376		return (ENOSYS);
377
378	shmseg = shm_find_segment_by_shmid(uap->shmid);
379	if (shmseg == NULL)
380		return EINVAL;
381	switch (uap->cmd) {
382	case IPC_STAT:
383		error = ipcperm(p, &shmseg->shm_perm, IPC_R);
384		if (error)
385			return error;
386		outbuf.shm_perm = shmseg->shm_perm;
387		outbuf.shm_segsz = shmseg->shm_segsz;
388		outbuf.shm_cpid = shmseg->shm_cpid;
389		outbuf.shm_lpid = shmseg->shm_lpid;
390		outbuf.shm_nattch = shmseg->shm_nattch;
391		outbuf.shm_atime = shmseg->shm_atime;
392		outbuf.shm_dtime = shmseg->shm_dtime;
393		outbuf.shm_ctime = shmseg->shm_ctime;
394		outbuf.shm_handle = shmseg->shm_internal;
395		error = copyout((caddr_t)&outbuf, uap->ubuf, sizeof(outbuf));
396		if (error)
397			return error;
398		break;
399	default:
400		/* XXX casting to (sy_call_t *) is bogus, as usual. */
401		return ((sy_call_t *)shmctl)(p, uap);
402	}
403	return 0;
404#else
405	return EINVAL;
406#endif
407}
408
409#ifndef _SYS_SYSPROTO_H_
410struct shmctl_args {
411	int shmid;
412	int cmd;
413	struct shmid_ds *buf;
414};
415#endif
416
417int
418shmctl(p, uap)
419	struct proc *p;
420	struct shmctl_args *uap;
421{
422	int error;
423	struct shmid_ds inbuf;
424	struct shmid_ds *shmseg;
425
426	if (!jail_sysvipc_allowed && jailed(p->p_ucred))
427		return (ENOSYS);
428
429	shmseg = shm_find_segment_by_shmid(uap->shmid);
430	if (shmseg == NULL)
431		return EINVAL;
432	switch (uap->cmd) {
433	case IPC_STAT:
434		error = ipcperm(p, &shmseg->shm_perm, IPC_R);
435		if (error)
436			return error;
437		error = copyout((caddr_t)shmseg, uap->buf, sizeof(inbuf));
438		if (error)
439			return error;
440		break;
441	case IPC_SET:
442		error = ipcperm(p, &shmseg->shm_perm, IPC_M);
443		if (error)
444			return error;
445		error = copyin(uap->buf, (caddr_t)&inbuf, sizeof(inbuf));
446		if (error)
447			return error;
448		shmseg->shm_perm.uid = inbuf.shm_perm.uid;
449		shmseg->shm_perm.gid = inbuf.shm_perm.gid;
450		shmseg->shm_perm.mode =
451		    (shmseg->shm_perm.mode & ~ACCESSPERMS) |
452		    (inbuf.shm_perm.mode & ACCESSPERMS);
453		shmseg->shm_ctime = time_second;
454		break;
455	case IPC_RMID:
456		error = ipcperm(p, &shmseg->shm_perm, IPC_M);
457		if (error)
458			return error;
459		shmseg->shm_perm.key = IPC_PRIVATE;
460		shmseg->shm_perm.mode |= SHMSEG_REMOVED;
461		if (shmseg->shm_nattch <= 0) {
462			mtx_lock(&vm_mtx);
463			shm_deallocate_segment(shmseg);
464			mtx_unlock(&vm_mtx);
465			shm_last_free = IPCID_TO_IX(uap->shmid);
466		}
467		break;
468#if 0
469	case SHM_LOCK:
470	case SHM_UNLOCK:
471#endif
472	default:
473		return EINVAL;
474	}
475	return 0;
476}
477
478#ifndef _SYS_SYSPROTO_H_
479struct shmget_args {
480	key_t key;
481	size_t size;
482	int shmflg;
483};
484#endif
485
486static int
487shmget_existing(p, uap, mode, segnum)
488	struct proc *p;
489	struct shmget_args *uap;
490	int mode;
491	int segnum;
492{
493	struct shmid_ds *shmseg;
494	int error;
495
496	shmseg = &shmsegs[segnum];
497	if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
498		/*
499		 * This segment is in the process of being allocated.  Wait
500		 * until it's done, and look the key up again (in case the
501		 * allocation failed or it was freed).
502		 */
503		shmseg->shm_perm.mode |= SHMSEG_WANTED;
504		error = tsleep((caddr_t)shmseg, PLOCK | PCATCH, "shmget", 0);
505		if (error)
506			return error;
507		return EAGAIN;
508	}
509	if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL))
510		return EEXIST;
511	error = ipcperm(p, &shmseg->shm_perm, mode);
512	if (error)
513		return error;
514	if (uap->size && uap->size > shmseg->shm_segsz)
515		return EINVAL;
516	p->p_retval[0] = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
517	return 0;
518}
519
520static int
521shmget_allocate_segment(p, uap, mode)
522	struct proc *p;
523	struct shmget_args *uap;
524	int mode;
525{
526	int i, segnum, shmid, size;
527	struct ucred *cred = p->p_ucred;
528	struct shmid_ds *shmseg;
529	struct shm_handle *shm_handle;
530
531	if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
532		return EINVAL;
533	if (shm_nused >= shminfo.shmmni) /* any shmids left? */
534		return ENOSPC;
535	size = round_page(uap->size);
536	if (shm_committed + btoc(size) > shminfo.shmall)
537		return ENOMEM;
538	if (shm_last_free < 0) {
539		shmrealloc();	/* maybe expand the shmsegs[] array */
540		for (i = 0; i < shmalloced; i++)
541			if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
542				break;
543		if (i == shmalloced)
544			return ENOSPC;
545		segnum = i;
546	} else  {
547		segnum = shm_last_free;
548		shm_last_free = -1;
549	}
550	shmseg = &shmsegs[segnum];
551	/*
552	 * In case we sleep in malloc(), mark the segment present but deleted
553	 * so that noone else tries to create the same key.
554	 */
555	shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
556	shmseg->shm_perm.key = uap->key;
557	shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff;
558	shm_handle = (struct shm_handle *)
559	    malloc(sizeof(struct shm_handle), M_SHM, M_WAITOK);
560	shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
561
562	/*
563	 * We make sure that we have allocated a pager before we need
564	 * to.
565	 */
566	mtx_lock(&vm_mtx);
567	if (shm_use_phys) {
568		shm_handle->shm_object =
569		    vm_pager_allocate(OBJT_PHYS, 0, size, VM_PROT_DEFAULT, 0);
570	} else {
571		shm_handle->shm_object =
572		    vm_pager_allocate(OBJT_SWAP, 0, size, VM_PROT_DEFAULT, 0);
573	}
574	vm_object_clear_flag(shm_handle->shm_object, OBJ_ONEMAPPING);
575	vm_object_set_flag(shm_handle->shm_object, OBJ_NOSPLIT);
576	mtx_unlock(&vm_mtx);
577
578	shmseg->shm_internal = shm_handle;
579	shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
580	shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
581	shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
582	    (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
583	shmseg->shm_segsz = uap->size;
584	shmseg->shm_cpid = p->p_pid;
585	shmseg->shm_lpid = shmseg->shm_nattch = 0;
586	shmseg->shm_atime = shmseg->shm_dtime = 0;
587	shmseg->shm_ctime = time_second;
588	shm_committed += btoc(size);
589	shm_nused++;
590	if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
591		/*
592		 * Somebody else wanted this key while we were asleep.  Wake
593		 * them up now.
594		 */
595		shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
596		wakeup((caddr_t)shmseg);
597	}
598	p->p_retval[0] = shmid;
599	return 0;
600}
601
602int
603shmget(p, uap)
604	struct proc *p;
605	struct shmget_args *uap;
606{
607	int segnum, mode, error;
608
609	if (!jail_sysvipc_allowed && jailed(p->p_ucred))
610		return (ENOSYS);
611
612	mode = uap->shmflg & ACCESSPERMS;
613	if (uap->key != IPC_PRIVATE) {
614	again:
615		segnum = shm_find_segment_by_key(uap->key);
616		if (segnum >= 0) {
617			error = shmget_existing(p, uap, mode, segnum);
618			if (error == EAGAIN)
619				goto again;
620			return error;
621		}
622		if ((uap->shmflg & IPC_CREAT) == 0)
623			return ENOENT;
624	}
625	return shmget_allocate_segment(p, uap, mode);
626}
627
628int
629shmsys(p, uap)
630	struct proc *p;
631	/* XXX actually varargs. */
632	struct shmsys_args /* {
633		u_int	which;
634		int	a2;
635		int	a3;
636		int	a4;
637	} */ *uap;
638{
639
640	if (!jail_sysvipc_allowed && jailed(p->p_ucred))
641		return (ENOSYS);
642
643	if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
644		return EINVAL;
645	return ((*shmcalls[uap->which])(p, &uap->a2));
646}
647
648static void
649shmfork_myhook(p1, p2)
650	struct proc *p1, *p2;
651{
652	struct shmmap_state *shmmap_s;
653	size_t size;
654	int i;
655
656	size = shminfo.shmseg * sizeof(struct shmmap_state);
657	shmmap_s = malloc(size, M_SHM, M_WAITOK);
658	bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmmap_s, size);
659	p2->p_vmspace->vm_shm = (caddr_t)shmmap_s;
660	for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
661		if (shmmap_s->shmid != -1)
662			shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++;
663}
664
665static void
666shmexit_myhook(p)
667	struct proc *p;
668{
669	struct shmmap_state *shmmap_s;
670	int i;
671
672	/* shm_delete_mapping requires this */
673	mtx_assert(&vm_mtx, MA_OWNED);
674	shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
675	for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
676		if (shmmap_s->shmid != -1)
677			shm_delete_mapping(p, shmmap_s);
678	free((caddr_t)p->p_vmspace->vm_shm, M_SHM);
679	p->p_vmspace->vm_shm = NULL;
680}
681
682static void
683shmrealloc(void)
684{
685	int i;
686	struct shmid_ds *newsegs;
687
688	if (shmalloced >= shminfo.shmmni)
689		return;
690
691	newsegs = malloc(shminfo.shmmni * sizeof(*newsegs), M_SHM, M_WAITOK);
692	if (newsegs == NULL)
693		return;
694	for (i = 0; i < shmalloced; i++)
695		bcopy(&shmsegs[i], &newsegs[i], sizeof(newsegs[0]));
696	for (; i < shminfo.shmmni; i++) {
697		shmsegs[i].shm_perm.mode = SHMSEG_FREE;
698		shmsegs[i].shm_perm.seq = 0;
699	}
700	free(shmsegs, M_SHM);
701	shmsegs = newsegs;
702	shmalloced = shminfo.shmmni;
703}
704
705static void
706shminit()
707{
708	int i;
709
710	shmalloced = shminfo.shmmni;
711	shmsegs = malloc(shmalloced * sizeof(shmsegs[0]), M_SHM, M_WAITOK);
712	if (shmsegs == NULL)
713		panic("cannot allocate initial memory for sysvshm");
714	for (i = 0; i < shmalloced; i++) {
715		shmsegs[i].shm_perm.mode = SHMSEG_FREE;
716		shmsegs[i].shm_perm.seq = 0;
717	}
718	shm_last_free = 0;
719	shm_nused = 0;
720	shm_committed = 0;
721	shmexit_hook = &shmexit_myhook;
722	shmfork_hook = &shmfork_myhook;
723}
724
725static int
726shmunload()
727{
728
729	if (shm_nused > 0)
730		return (EBUSY);
731
732	free(shmsegs, M_SHM);
733	shmexit_hook = NULL;
734	shmfork_hook = NULL;
735	return (0);
736}
737
738static int
739sysvshm_modload(struct module *module, int cmd, void *arg)
740{
741	int error = 0;
742
743	switch (cmd) {
744	case MOD_LOAD:
745		shminit();
746		break;
747	case MOD_UNLOAD:
748		error = shmunload();
749		break;
750	case MOD_SHUTDOWN:
751		break;
752	default:
753		error = EINVAL;
754		break;
755	}
756	return (error);
757}
758
759static moduledata_t sysvshm_mod = {
760	"sysvshm",
761	&sysvshm_modload,
762	NULL
763};
764
765SYSCALL_MODULE_HELPER(shmsys, 4);
766SYSCALL_MODULE_HELPER(shmat, 3);
767SYSCALL_MODULE_HELPER(shmctl, 3);
768SYSCALL_MODULE_HELPER(shmdt, 1);
769SYSCALL_MODULE_HELPER(shmget, 3);
770
771DECLARE_MODULE(sysvshm, sysvshm_mod,
772	SI_SUB_SYSV_SHM, SI_ORDER_FIRST);
773MODULE_VERSION(sysvshm, 1);
774