sysv_shm.c revision 114724
1/* $FreeBSD: head/sys/kern/sysv_shm.c 114724 2003-05-05 09:22:58Z mbr $ */
2/*	$NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $	*/
3
4/*
5 * Copyright (c) 1994 Adam Glass and Charles Hannum.  All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 *    must display the following acknowledgement:
17 *	This product includes software developed by Adam Glass and Charles
18 *	Hannum.
19 * 4. The names of the authors may not be used to endorse or promote products
20 *    derived from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include "opt_compat.h"
35#include "opt_sysvipc.h"
36
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/kernel.h>
40#include <sys/lock.h>
41#include <sys/sysctl.h>
42#include <sys/shm.h>
43#include <sys/proc.h>
44#include <sys/malloc.h>
45#include <sys/mman.h>
46#include <sys/mutex.h>
47#include <sys/stat.h>
48#include <sys/syscall.h>
49#include <sys/syscallsubr.h>
50#include <sys/sysent.h>
51#include <sys/sysproto.h>
52#include <sys/jail.h>
53
54#include <vm/vm.h>
55#include <vm/vm_param.h>
56#include <vm/pmap.h>
57#include <vm/vm_object.h>
58#include <vm/vm_map.h>
59#include <vm/vm_page.h>
60#include <vm/vm_pager.h>
61
62static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments");
63
64struct oshmctl_args;
65static int oshmctl(struct thread *td, struct oshmctl_args *uap);
66
67static int shmget_allocate_segment(struct thread *td,
68    struct shmget_args *uap, int mode);
69static int shmget_existing(struct thread *td, struct shmget_args *uap,
70    int mode, int segnum);
71
72/* XXX casting to (sy_call_t *) is bogus, as usual. */
73static sy_call_t *shmcalls[] = {
74	(sy_call_t *)shmat, (sy_call_t *)oshmctl,
75	(sy_call_t *)shmdt, (sy_call_t *)shmget,
76	(sy_call_t *)shmctl
77};
78
79#define	SHMSEG_FREE     	0x0200
80#define	SHMSEG_REMOVED  	0x0400
81#define	SHMSEG_ALLOCATED	0x0800
82#define	SHMSEG_WANTED		0x1000
83
84static int shm_last_free, shm_nused, shm_committed, shmalloced;
85static struct shmid_ds	*shmsegs;
86
87struct shm_handle {
88	/* vm_offset_t kva; */
89	vm_object_t shm_object;
90};
91
92struct shmmap_state {
93	vm_offset_t va;
94	int shmid;
95};
96
97static void shm_deallocate_segment(struct shmid_ds *);
98static int shm_find_segment_by_key(key_t);
99static struct shmid_ds *shm_find_segment_by_shmid(int, int);
100static struct shmid_ds *shm_find_segment_by_shmidx(int, int);
101static int shm_delete_mapping(struct vmspace *vm, struct shmmap_state *);
102static void shmrealloc(void);
103static void shminit(void);
104static int sysvshm_modload(struct module *, int, void *);
105static int shmunload(void);
106static void shmexit_myhook(struct vmspace *vm);
107static void shmfork_myhook(struct proc *p1, struct proc *p2);
108static int sysctl_shmsegs(SYSCTL_HANDLER_ARGS);
109
110/*
111 * Tuneable values.
112 */
113#ifndef SHMMAXPGS
114#define	SHMMAXPGS	8192	/* Note: sysv shared memory is swap backed. */
115#endif
116#ifndef SHMMAX
117#define	SHMMAX	(SHMMAXPGS*PAGE_SIZE)
118#endif
119#ifndef SHMMIN
120#define	SHMMIN	1
121#endif
122#ifndef SHMMNI
123#define	SHMMNI	192
124#endif
125#ifndef SHMSEG
126#define	SHMSEG	128
127#endif
128#ifndef SHMALL
129#define	SHMALL	(SHMMAXPGS)
130#endif
131
132struct	shminfo shminfo = {
133	SHMMAX,
134	SHMMIN,
135	SHMMNI,
136	SHMSEG,
137	SHMALL
138};
139
140static int shm_use_phys;
141
142SYSCTL_DECL(_kern_ipc);
143SYSCTL_INT(_kern_ipc, OID_AUTO, shmmax, CTLFLAG_RW, &shminfo.shmmax, 0, "");
144SYSCTL_INT(_kern_ipc, OID_AUTO, shmmin, CTLFLAG_RW, &shminfo.shmmin, 0, "");
145SYSCTL_INT(_kern_ipc, OID_AUTO, shmmni, CTLFLAG_RD, &shminfo.shmmni, 0, "");
146SYSCTL_INT(_kern_ipc, OID_AUTO, shmseg, CTLFLAG_RD, &shminfo.shmseg, 0, "");
147SYSCTL_INT(_kern_ipc, OID_AUTO, shmall, CTLFLAG_RW, &shminfo.shmall, 0, "");
148SYSCTL_INT(_kern_ipc, OID_AUTO, shm_use_phys, CTLFLAG_RW,
149    &shm_use_phys, 0, "");
150SYSCTL_PROC(_kern_ipc, OID_AUTO, shmsegs, CTLFLAG_RD,
151    NULL, 0, sysctl_shmsegs, "", "");
152
153static int
154shm_find_segment_by_key(key)
155	key_t key;
156{
157	int i;
158
159	for (i = 0; i < shmalloced; i++)
160		if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
161		    shmsegs[i].shm_perm.key == key)
162			return (i);
163	return (-1);
164}
165
166static struct shmid_ds *
167shm_find_segment_by_shmid(int shmid, int wantrem)
168{
169	int segnum;
170	struct shmid_ds *shmseg;
171
172	segnum = IPCID_TO_IX(shmid);
173	if (segnum < 0 || segnum >= shmalloced)
174		return (NULL);
175	shmseg = &shmsegs[segnum];
176	if (!((shmseg->shm_perm.mode & SHMSEG_ALLOCATED) ||
177  	    (wantrem && !(shmseg->shm_perm.mode & SHMSEG_REMOVED))) ||
178	    shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid))
179		return (NULL);
180	return (shmseg);
181}
182
183static struct shmid_ds *
184shm_find_segment_by_shmidx(int segnum, int wantrem)
185{
186	struct shmid_ds *shmseg;
187
188	if (segnum < 0 || segnum >= shmalloced)
189		return (NULL);
190	shmseg = &shmsegs[segnum];
191	if (!((shmseg->shm_perm.mode & SHMSEG_ALLOCATED) ||
192  	    (wantrem && !(shmseg->shm_perm.mode & SHMSEG_REMOVED))))
193		return (NULL);
194	return (shmseg);
195}
196
197static void
198shm_deallocate_segment(shmseg)
199	struct shmid_ds *shmseg;
200{
201	struct shm_handle *shm_handle;
202	size_t size;
203
204	GIANT_REQUIRED;
205
206	shm_handle = shmseg->shm_internal;
207	vm_object_deallocate(shm_handle->shm_object);
208	free(shm_handle, M_SHM);
209	shmseg->shm_internal = NULL;
210	size = round_page(shmseg->shm_segsz);
211	shm_committed -= btoc(size);
212	shm_nused--;
213	shmseg->shm_perm.mode = SHMSEG_FREE;
214}
215
216static int
217shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s)
218{
219	struct shmid_ds *shmseg;
220	int segnum, result;
221	size_t size;
222
223	GIANT_REQUIRED;
224
225	segnum = IPCID_TO_IX(shmmap_s->shmid);
226	shmseg = &shmsegs[segnum];
227	size = round_page(shmseg->shm_segsz);
228	result = vm_map_remove(&vm->vm_map, shmmap_s->va, shmmap_s->va + size);
229	if (result != KERN_SUCCESS)
230		return (EINVAL);
231	shmmap_s->shmid = -1;
232	shmseg->shm_dtime = time_second;
233	if ((--shmseg->shm_nattch <= 0) &&
234	    (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
235		shm_deallocate_segment(shmseg);
236		shm_last_free = segnum;
237	}
238	return (0);
239}
240
241#ifndef _SYS_SYSPROTO_H_
242struct shmdt_args {
243	const void *shmaddr;
244};
245#endif
246
247/*
248 * MPSAFE
249 */
250int
251shmdt(td, uap)
252	struct thread *td;
253	struct shmdt_args *uap;
254{
255	struct proc *p = td->td_proc;
256	struct shmmap_state *shmmap_s;
257	int i;
258	int error = 0;
259
260	if (!jail_sysvipc_allowed && jailed(td->td_ucred))
261		return (ENOSYS);
262	mtx_lock(&Giant);
263	shmmap_s = p->p_vmspace->vm_shm;
264 	if (shmmap_s == NULL) {
265		error = EINVAL;
266		goto done2;
267	}
268	for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) {
269		if (shmmap_s->shmid != -1 &&
270		    shmmap_s->va == (vm_offset_t)uap->shmaddr) {
271			break;
272		}
273	}
274	if (i == shminfo.shmseg) {
275		error = EINVAL;
276		goto done2;
277	}
278	error = shm_delete_mapping(p->p_vmspace, shmmap_s);
279done2:
280	mtx_unlock(&Giant);
281	return (error);
282}
283
284#ifndef _SYS_SYSPROTO_H_
285struct shmat_args {
286	int shmid;
287	const void *shmaddr;
288	int shmflg;
289};
290#endif
291
292/*
293 * MPSAFE
294 */
295int
296kern_shmat(td, shmid, shmaddr, shmflg, wantrem)
297	struct thread *td;
298	int shmid;
299	const void *shmaddr;
300	int shmflg;
301	int wantrem;
302{
303	struct proc *p = td->td_proc;
304	int i, flags;
305	struct shmid_ds *shmseg;
306	struct shmmap_state *shmmap_s = NULL;
307	struct shm_handle *shm_handle;
308	vm_offset_t attach_va;
309	vm_prot_t prot;
310	vm_size_t size;
311	int rv;
312	int error = 0;
313
314	if (!jail_sysvipc_allowed && jailed(td->td_ucred))
315		return (ENOSYS);
316	mtx_lock(&Giant);
317	shmmap_s = p->p_vmspace->vm_shm;
318	if (shmmap_s == NULL) {
319		size = shminfo.shmseg * sizeof(struct shmmap_state);
320		shmmap_s = malloc(size, M_SHM, M_WAITOK);
321		for (i = 0; i < shminfo.shmseg; i++)
322			shmmap_s[i].shmid = -1;
323		p->p_vmspace->vm_shm = shmmap_s;
324	}
325	shmseg = shm_find_segment_by_shmid(shmid, wantrem);
326	if (shmseg == NULL) {
327		error = EINVAL;
328		goto done2;
329	}
330	error = ipcperm(td, &shmseg->shm_perm,
331	    (shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
332	if (error)
333		goto done2;
334	for (i = 0; i < shminfo.shmseg; i++) {
335		if (shmmap_s->shmid == -1)
336			break;
337		shmmap_s++;
338	}
339	if (i >= shminfo.shmseg) {
340		error = EMFILE;
341		goto done2;
342	}
343	size = round_page(shmseg->shm_segsz);
344#ifdef VM_PROT_READ_IS_EXEC
345	prot = VM_PROT_READ | VM_PROT_EXECUTE;
346#else
347	prot = VM_PROT_READ;
348#endif
349	if ((shmflg & SHM_RDONLY) == 0)
350		prot |= VM_PROT_WRITE;
351	flags = MAP_ANON | MAP_SHARED;
352	if (shmaddr) {
353		flags |= MAP_FIXED;
354		if (shmflg & SHM_RND) {
355			attach_va = (vm_offset_t)shmaddr & ~(SHMLBA-1);
356		} else if (((vm_offset_t)shmaddr & (SHMLBA-1)) == 0) {
357			attach_va = (vm_offset_t)shmaddr;
358		} else {
359			error = EINVAL;
360			goto done2;
361		}
362	} else {
363		/*
364		 * This is just a hint to vm_map_find() about where to
365		 * put it.
366		 */
367		attach_va = round_page((vm_offset_t)p->p_vmspace->vm_taddr
368		    + maxtsiz + maxdsiz);
369	}
370
371	shm_handle = shmseg->shm_internal;
372	vm_object_reference(shm_handle->shm_object);
373	rv = vm_map_find(&p->p_vmspace->vm_map, shm_handle->shm_object,
374		0, &attach_va, size, (flags & MAP_FIXED)?0:1, prot, prot, 0);
375	if (rv != KERN_SUCCESS) {
376		error = ENOMEM;
377		goto done2;
378	}
379	vm_map_inherit(&p->p_vmspace->vm_map,
380		attach_va, attach_va + size, VM_INHERIT_SHARE);
381
382	shmmap_s->va = attach_va;
383	shmmap_s->shmid = shmid;
384	shmseg->shm_lpid = p->p_pid;
385	shmseg->shm_atime = time_second;
386	shmseg->shm_nattch++;
387	td->td_retval[0] = attach_va;
388done2:
389	mtx_unlock(&Giant);
390	return (error);
391}
392
393int
394shmat(td, uap)
395	struct thread *td;
396	struct shmat_args *uap;
397{
398	return kern_shmat(td, uap->shmid, uap->shmaddr, uap->shmflg, 0);
399}
400
401struct oshmid_ds {
402	struct	ipc_perm shm_perm;	/* operation perms */
403	int	shm_segsz;		/* size of segment (bytes) */
404	ushort	shm_cpid;		/* pid, creator */
405	ushort	shm_lpid;		/* pid, last operation */
406	short	shm_nattch;		/* no. of current attaches */
407	time_t	shm_atime;		/* last attach time */
408	time_t	shm_dtime;		/* last detach time */
409	time_t	shm_ctime;		/* last change time */
410	void	*shm_handle;		/* internal handle for shm segment */
411};
412
413struct oshmctl_args {
414	int shmid;
415	int cmd;
416	struct oshmid_ds *ubuf;
417};
418
419/*
420 * MPSAFE
421 */
422static int
423oshmctl(td, uap)
424	struct thread *td;
425	struct oshmctl_args *uap;
426{
427#ifdef COMPAT_43
428	int error = 0;
429	struct shmid_ds *shmseg;
430	struct oshmid_ds outbuf;
431
432	if (!jail_sysvipc_allowed && jailed(td->td_ucred))
433		return (ENOSYS);
434	mtx_lock(&Giant);
435	shmseg = shm_find_segment_by_shmid(uap->shmid, 0);
436	if (shmseg == NULL) {
437		error = EINVAL;
438		goto done2;
439	}
440	switch (uap->cmd) {
441	case IPC_STAT:
442		error = ipcperm(td, &shmseg->shm_perm, IPC_R);
443		if (error)
444			goto done2;
445		outbuf.shm_perm = shmseg->shm_perm;
446		outbuf.shm_segsz = shmseg->shm_segsz;
447		outbuf.shm_cpid = shmseg->shm_cpid;
448		outbuf.shm_lpid = shmseg->shm_lpid;
449		outbuf.shm_nattch = shmseg->shm_nattch;
450		outbuf.shm_atime = shmseg->shm_atime;
451		outbuf.shm_dtime = shmseg->shm_dtime;
452		outbuf.shm_ctime = shmseg->shm_ctime;
453		outbuf.shm_handle = shmseg->shm_internal;
454		error = copyout(&outbuf, uap->ubuf, sizeof(outbuf));
455		if (error)
456			goto done2;
457		break;
458	default:
459		/* XXX casting to (sy_call_t *) is bogus, as usual. */
460		error = ((sy_call_t *)shmctl)(td, uap);
461		break;
462	}
463done2:
464	mtx_unlock(&Giant);
465	return (error);
466#else
467	return (EINVAL);
468#endif
469}
470
471#ifndef _SYS_SYSPROTO_H_
472struct shmctl_args {
473	int shmid;
474	int cmd;
475	struct shmid_ds *buf;
476};
477#endif
478
479/*
480 * MPSAFE
481 */
482int
483kern_shmctl(td, shmid, cmd, buf, bufsz, wantrem)
484	struct thread *td;
485	int shmid;
486	int cmd;
487	void *buf;
488	size_t *bufsz;
489	int wantrem;
490{
491	int error = 0;
492	struct shmid_ds *shmseg;
493
494	if (!jail_sysvipc_allowed && jailed(td->td_ucred))
495		return (ENOSYS);
496
497	mtx_lock(&Giant);
498	switch (cmd) {
499	case IPC_INFO:
500		memcpy(buf, &shminfo, sizeof(shminfo));
501		if (bufsz)
502			*bufsz = sizeof(shminfo);
503		td->td_retval[0] = shmalloced;
504		goto done2;
505	case SHM_INFO: {
506		struct shm_info shm_info;
507		shm_info.used_ids = shm_nused;
508		shm_info.shm_rss = 0;	/*XXX where to get from ? */
509		shm_info.shm_tot = 0;	/*XXX where to get from ? */
510		shm_info.shm_swp = 0;	/*XXX where to get from ? */
511		shm_info.swap_attempts = 0;	/*XXX where to get from ? */
512		shm_info.swap_successes = 0;	/*XXX where to get from ? */
513		memcpy(buf, &shm_info, sizeof(shm_info));
514		if (bufsz)
515			*bufsz = sizeof(shm_info);
516		td->td_retval[0] = shmalloced;
517		goto done2;
518	}
519	}
520	if (cmd == SHM_STAT)
521		shmseg = shm_find_segment_by_shmidx(shmid, wantrem);
522	else
523		shmseg = shm_find_segment_by_shmid(shmid, wantrem);
524	if (shmseg == NULL) {
525		error = EINVAL;
526		goto done2;
527	}
528	switch (cmd) {
529	case SHM_STAT:
530	case IPC_STAT:
531		error = ipcperm(td, &shmseg->shm_perm, IPC_R);
532		if (error)
533			goto done2;
534		memcpy(buf, shmseg, sizeof(struct shmid_ds));
535		if (bufsz)
536			*bufsz = sizeof(struct shmid_ds);
537		if (cmd == SHM_STAT)
538			td->td_retval[0] = IXSEQ_TO_IPCID(shmid, shmseg->shm_perm);
539		break;
540	case IPC_SET: {
541		struct shmid_ds *shmid;
542
543		shmid = (struct shmid_ds *)buf;
544		error = ipcperm(td, &shmseg->shm_perm, IPC_M);
545		if (error)
546			goto done2;
547		shmseg->shm_perm.uid = shmid->shm_perm.uid;
548		shmseg->shm_perm.gid = shmid->shm_perm.gid;
549		shmseg->shm_perm.mode =
550		    (shmseg->shm_perm.mode & ~ACCESSPERMS) |
551		    (shmid->shm_perm.mode & ACCESSPERMS);
552		shmseg->shm_ctime = time_second;
553		break;
554	}
555	case IPC_RMID:
556		error = ipcperm(td, &shmseg->shm_perm, IPC_M);
557		if (error)
558			goto done2;
559		shmseg->shm_perm.key = IPC_PRIVATE;
560		shmseg->shm_perm.mode |= SHMSEG_REMOVED;
561		if (shmseg->shm_nattch <= 0) {
562			shm_deallocate_segment(shmseg);
563			shm_last_free = IPCID_TO_IX(shmid);
564		}
565		break;
566#if 0
567	case SHM_LOCK:
568	case SHM_UNLOCK:
569#endif
570	default:
571		error = EINVAL;
572		break;
573	}
574done2:
575	mtx_unlock(&Giant);
576	return (error);
577}
578
579int
580shmctl(td, uap)
581	struct thread *td;
582	struct shmctl_args *uap;
583{
584	int error = 0;
585	struct shmid_ds buf;
586	size_t bufsz;
587
588	/* IPC_SET needs to copyin the buffer before calling kern_shmctl */
589	if (uap->cmd == IPC_SET) {
590		if ((error = copyin(uap->buf, &buf, sizeof(struct shmid_ds))))
591			goto done;
592	}
593
594	error = kern_shmctl(td, uap->shmid, uap->cmd, (void *)&buf, &bufsz, 0);
595	if (error)
596		goto done;
597
598	/* Cases in which we need to copyout */
599	switch (uap->cmd) {
600	case IPC_INFO:
601	case SHM_INFO:
602	case SHM_STAT:
603	case IPC_STAT:
604		error = copyout(&buf, uap->buf, bufsz);
605		break;
606	}
607
608done:
609	if (error) {
610		/* Invalidate the return value */
611		td->td_retval[0] = -1;
612	}
613	return (error);
614}
615
616
617#ifndef _SYS_SYSPROTO_H_
618struct shmget_args {
619	key_t key;
620	size_t size;
621	int shmflg;
622};
623#endif
624
625static int
626shmget_existing(td, uap, mode, segnum)
627	struct thread *td;
628	struct shmget_args *uap;
629	int mode;
630	int segnum;
631{
632	struct shmid_ds *shmseg;
633	int error;
634
635	shmseg = &shmsegs[segnum];
636	if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
637		/*
638		 * This segment is in the process of being allocated.  Wait
639		 * until it's done, and look the key up again (in case the
640		 * allocation failed or it was freed).
641		 */
642		shmseg->shm_perm.mode |= SHMSEG_WANTED;
643		error = tsleep(shmseg, PLOCK | PCATCH, "shmget", 0);
644		if (error)
645			return (error);
646		return (EAGAIN);
647	}
648	if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL))
649		return (EEXIST);
650	error = ipcperm(td, &shmseg->shm_perm, mode);
651	if (error)
652		return (error);
653	if (uap->size && uap->size > shmseg->shm_segsz)
654		return (EINVAL);
655	td->td_retval[0] = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
656	return (0);
657}
658
659static int
660shmget_allocate_segment(td, uap, mode)
661	struct thread *td;
662	struct shmget_args *uap;
663	int mode;
664{
665	int i, segnum, shmid, size;
666	struct ucred *cred = td->td_ucred;
667	struct shmid_ds *shmseg;
668	struct shm_handle *shm_handle;
669
670	GIANT_REQUIRED;
671
672	if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
673		return (EINVAL);
674	if (shm_nused >= shminfo.shmmni) /* Any shmids left? */
675		return (ENOSPC);
676	size = round_page(uap->size);
677	if (shm_committed + btoc(size) > shminfo.shmall)
678		return (ENOMEM);
679	if (shm_last_free < 0) {
680		shmrealloc();	/* Maybe expand the shmsegs[] array. */
681		for (i = 0; i < shmalloced; i++)
682			if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
683				break;
684		if (i == shmalloced)
685			return (ENOSPC);
686		segnum = i;
687	} else  {
688		segnum = shm_last_free;
689		shm_last_free = -1;
690	}
691	shmseg = &shmsegs[segnum];
692	/*
693	 * In case we sleep in malloc(), mark the segment present but deleted
694	 * so that noone else tries to create the same key.
695	 */
696	shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
697	shmseg->shm_perm.key = uap->key;
698	shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff;
699	shm_handle = (struct shm_handle *)
700	    malloc(sizeof(struct shm_handle), M_SHM, M_WAITOK);
701	shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
702
703	/*
704	 * We make sure that we have allocated a pager before we need
705	 * to.
706	 */
707	if (shm_use_phys) {
708		shm_handle->shm_object =
709		    vm_pager_allocate(OBJT_PHYS, 0, size, VM_PROT_DEFAULT, 0);
710	} else {
711		shm_handle->shm_object =
712		    vm_pager_allocate(OBJT_SWAP, 0, size, VM_PROT_DEFAULT, 0);
713	}
714	VM_OBJECT_LOCK(shm_handle->shm_object);
715	vm_object_clear_flag(shm_handle->shm_object, OBJ_ONEMAPPING);
716	vm_object_set_flag(shm_handle->shm_object, OBJ_NOSPLIT);
717	VM_OBJECT_UNLOCK(shm_handle->shm_object);
718
719	shmseg->shm_internal = shm_handle;
720	shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
721	shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
722	shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
723	    (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
724	shmseg->shm_segsz = uap->size;
725	shmseg->shm_cpid = td->td_proc->p_pid;
726	shmseg->shm_lpid = shmseg->shm_nattch = 0;
727	shmseg->shm_atime = shmseg->shm_dtime = 0;
728	shmseg->shm_ctime = time_second;
729	shm_committed += btoc(size);
730	shm_nused++;
731	if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
732		/*
733		 * Somebody else wanted this key while we were asleep.  Wake
734		 * them up now.
735		 */
736		shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
737		wakeup(shmseg);
738	}
739	td->td_retval[0] = shmid;
740	return (0);
741}
742
743/*
744 * MPSAFE
745 */
746int
747shmget(td, uap)
748	struct thread *td;
749	struct shmget_args *uap;
750{
751	int segnum, mode;
752	int error;
753
754	if (!jail_sysvipc_allowed && jailed(td->td_ucred))
755		return (ENOSYS);
756	mtx_lock(&Giant);
757	mode = uap->shmflg & ACCESSPERMS;
758	if (uap->key != IPC_PRIVATE) {
759	again:
760		segnum = shm_find_segment_by_key(uap->key);
761		if (segnum >= 0) {
762			error = shmget_existing(td, uap, mode, segnum);
763			if (error == EAGAIN)
764				goto again;
765			goto done2;
766		}
767		if ((uap->shmflg & IPC_CREAT) == 0) {
768			error = ENOENT;
769			goto done2;
770		}
771	}
772	error = shmget_allocate_segment(td, uap, mode);
773done2:
774	mtx_unlock(&Giant);
775	return (error);
776}
777
778/*
779 * MPSAFE
780 */
781int
782shmsys(td, uap)
783	struct thread *td;
784	/* XXX actually varargs. */
785	struct shmsys_args /* {
786		u_int	which;
787		int	a2;
788		int	a3;
789		int	a4;
790	} */ *uap;
791{
792	int error;
793
794	if (!jail_sysvipc_allowed && jailed(td->td_ucred))
795		return (ENOSYS);
796	if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
797		return (EINVAL);
798	mtx_lock(&Giant);
799	error = (*shmcalls[uap->which])(td, &uap->a2);
800	mtx_unlock(&Giant);
801	return (error);
802}
803
804static void
805shmfork_myhook(p1, p2)
806	struct proc *p1, *p2;
807{
808	struct shmmap_state *shmmap_s;
809	size_t size;
810	int i;
811
812	size = shminfo.shmseg * sizeof(struct shmmap_state);
813	shmmap_s = malloc(size, M_SHM, M_WAITOK);
814	bcopy(p1->p_vmspace->vm_shm, shmmap_s, size);
815	p2->p_vmspace->vm_shm = shmmap_s;
816	for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
817		if (shmmap_s->shmid != -1)
818			shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++;
819}
820
821static void
822shmexit_myhook(struct vmspace *vm)
823{
824	struct shmmap_state *base, *shm;
825	int i;
826
827	GIANT_REQUIRED;
828
829	if ((base = vm->vm_shm) != NULL) {
830		vm->vm_shm = NULL;
831		for (i = 0, shm = base; i < shminfo.shmseg; i++, shm++) {
832			if (shm->shmid != -1)
833				shm_delete_mapping(vm, shm);
834		}
835		free(base, M_SHM);
836	}
837}
838
839static void
840shmrealloc(void)
841{
842	int i;
843	struct shmid_ds *newsegs;
844
845	if (shmalloced >= shminfo.shmmni)
846		return;
847
848	newsegs = malloc(shminfo.shmmni * sizeof(*newsegs), M_SHM, M_WAITOK);
849	if (newsegs == NULL)
850		return;
851	for (i = 0; i < shmalloced; i++)
852		bcopy(&shmsegs[i], &newsegs[i], sizeof(newsegs[0]));
853	for (; i < shminfo.shmmni; i++) {
854		shmsegs[i].shm_perm.mode = SHMSEG_FREE;
855		shmsegs[i].shm_perm.seq = 0;
856	}
857	free(shmsegs, M_SHM);
858	shmsegs = newsegs;
859	shmalloced = shminfo.shmmni;
860}
861
862static void
863shminit()
864{
865	int i;
866
867	TUNABLE_INT_FETCH("kern.ipc.shmmaxpgs", &shminfo.shmall);
868	for (i = PAGE_SIZE; i > 0; i--) {
869		shminfo.shmmax = shminfo.shmall * PAGE_SIZE;
870		if (shminfo.shmmax >= shminfo.shmall)
871			break;
872	}
873	TUNABLE_INT_FETCH("kern.ipc.shmmin", &shminfo.shmmin);
874	TUNABLE_INT_FETCH("kern.ipc.shmmni", &shminfo.shmmni);
875	TUNABLE_INT_FETCH("kern.ipc.shmseg", &shminfo.shmseg);
876	TUNABLE_INT_FETCH("kern.ipc.shm_use_phys", &shm_use_phys);
877
878	shmalloced = shminfo.shmmni;
879	shmsegs = malloc(shmalloced * sizeof(shmsegs[0]), M_SHM, M_WAITOK);
880	if (shmsegs == NULL)
881		panic("cannot allocate initial memory for sysvshm");
882	for (i = 0; i < shmalloced; i++) {
883		shmsegs[i].shm_perm.mode = SHMSEG_FREE;
884		shmsegs[i].shm_perm.seq = 0;
885	}
886	shm_last_free = 0;
887	shm_nused = 0;
888	shm_committed = 0;
889	shmexit_hook = &shmexit_myhook;
890	shmfork_hook = &shmfork_myhook;
891}
892
893static int
894shmunload()
895{
896
897	if (shm_nused > 0)
898		return (EBUSY);
899
900	free(shmsegs, M_SHM);
901	shmexit_hook = NULL;
902	shmfork_hook = NULL;
903	return (0);
904}
905
906static int
907sysctl_shmsegs(SYSCTL_HANDLER_ARGS)
908{
909
910	return (SYSCTL_OUT(req, shmsegs, shmalloced * sizeof(shmsegs[0])));
911}
912
913static int
914sysvshm_modload(struct module *module, int cmd, void *arg)
915{
916	int error = 0;
917
918	switch (cmd) {
919	case MOD_LOAD:
920		shminit();
921		break;
922	case MOD_UNLOAD:
923		error = shmunload();
924		break;
925	case MOD_SHUTDOWN:
926		break;
927	default:
928		error = EINVAL;
929		break;
930	}
931	return (error);
932}
933
934static moduledata_t sysvshm_mod = {
935	"sysvshm",
936	&sysvshm_modload,
937	NULL
938};
939
940SYSCALL_MODULE_HELPER(shmsys);
941SYSCALL_MODULE_HELPER(shmat);
942SYSCALL_MODULE_HELPER(shmctl);
943SYSCALL_MODULE_HELPER(shmdt);
944SYSCALL_MODULE_HELPER(shmget);
945
946DECLARE_MODULE(sysvshm, sysvshm_mod,
947	SI_SUB_SYSV_SHM, SI_ORDER_FIRST);
948MODULE_VERSION(sysvshm, 1);
949