sysv_shm.c revision 58820
1/* $FreeBSD: head/sys/kern/sysv_shm.c 58820 2000-03-30 07:17:05Z peter $ */
2/*	$NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $	*/
3
4/*
5 * Copyright (c) 1994 Adam Glass and Charles Hannum.  All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 *    must display the following acknowledgement:
17 *	This product includes software developed by Adam Glass and Charles
18 *	Hannum.
19 * 4. The names of the authors may not be used to endorse or promote products
20 *    derived from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include "opt_compat.h"
35#include "opt_rlimit.h"
36#include "opt_sysvipc.h"
37
38#include <sys/param.h>
39#include <sys/systm.h>
40#include <sys/sysproto.h>
41#include <sys/kernel.h>
42#include <sys/sysctl.h>
43#include <sys/shm.h>
44#include <sys/proc.h>
45#include <sys/malloc.h>
46#include <sys/mman.h>
47#include <sys/stat.h>
48#include <sys/sysent.h>
49
50#include <vm/vm.h>
51#include <vm/vm_param.h>
52#include <sys/lock.h>
53#include <vm/pmap.h>
54#include <vm/vm_object.h>
55#include <vm/vm_map.h>
56#include <vm/vm_page.h>
57#include <vm/vm_pager.h>
58
59static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments");
60
61struct oshmctl_args;
62static int oshmctl __P((struct proc *p, struct oshmctl_args *uap));
63
64static int shmget_allocate_segment __P((struct proc *p, struct shmget_args *uap, int mode));
65static int shmget_existing __P((struct proc *p, struct shmget_args *uap, int mode, int segnum));
66
67/* XXX casting to (sy_call_t *) is bogus, as usual. */
68static sy_call_t *shmcalls[] = {
69	(sy_call_t *)shmat, (sy_call_t *)oshmctl,
70	(sy_call_t *)shmdt, (sy_call_t *)shmget,
71	(sy_call_t *)shmctl
72};
73
74#define	SHMSEG_FREE     	0x0200
75#define	SHMSEG_REMOVED  	0x0400
76#define	SHMSEG_ALLOCATED	0x0800
77#define	SHMSEG_WANTED		0x1000
78
79static int shm_last_free, shm_nused, shm_committed, shmalloced;
80static struct shmid_ds	*shmsegs;
81
82struct shm_handle {
83	/* vm_offset_t kva; */
84	vm_object_t shm_object;
85};
86
87struct shmmap_state {
88	vm_offset_t va;
89	int shmid;
90};
91
92static void shm_deallocate_segment __P((struct shmid_ds *));
93static int shm_find_segment_by_key __P((key_t));
94static struct shmid_ds *shm_find_segment_by_shmid __P((int));
95static int shm_delete_mapping __P((struct proc *, struct shmmap_state *));
96static void shmrealloc __P((void));
97static void shminit __P((void *));
98
99/*
100 * Tuneable values
101 */
102#ifndef SHMMAXPGS
103#define	SHMMAXPGS	1024	/* XXX increase this, it's not in kva! */
104#endif
105#ifndef SHMMAX
106#define	SHMMAX	(SHMMAXPGS*PAGE_SIZE)
107#endif
108#ifndef SHMMIN
109#define	SHMMIN	1
110#endif
111#ifndef SHMMNI
112#define	SHMMNI	96
113#endif
114#ifndef SHMSEG
115#define	SHMSEG	64
116#endif
117#ifndef SHMALL
118#define	SHMALL	(SHMMAXPGS)
119#endif
120
121struct	shminfo shminfo = {
122	SHMMAX,
123	SHMMIN,
124	SHMMNI,
125	SHMSEG,
126	SHMALL
127};
128
129SYSCTL_DECL(_kern_ipc);
130SYSCTL_INT(_kern_ipc, OID_AUTO, shmmax, CTLFLAG_RW, &shminfo.shmmax, 0, "");
131SYSCTL_INT(_kern_ipc, OID_AUTO, shmmin, CTLFLAG_RW, &shminfo.shmmin, 0, "");
132SYSCTL_INT(_kern_ipc, OID_AUTO, shmmni, CTLFLAG_RD, &shminfo.shmmni, 0, "");
133SYSCTL_INT(_kern_ipc, OID_AUTO, shmseg, CTLFLAG_RW, &shminfo.shmseg, 0, "");
134SYSCTL_INT(_kern_ipc, OID_AUTO, shmall, CTLFLAG_RW, &shminfo.shmall, 0, "");
135
136static int
137shm_find_segment_by_key(key)
138	key_t key;
139{
140	int i;
141
142	for (i = 0; i < shmalloced; i++)
143		if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
144		    shmsegs[i].shm_perm.key == key)
145			return i;
146	return -1;
147}
148
149static struct shmid_ds *
150shm_find_segment_by_shmid(shmid)
151	int shmid;
152{
153	int segnum;
154	struct shmid_ds *shmseg;
155
156	segnum = IPCID_TO_IX(shmid);
157	if (segnum < 0 || segnum >= shmalloced)
158		return NULL;
159	shmseg = &shmsegs[segnum];
160	if ((shmseg->shm_perm.mode & (SHMSEG_ALLOCATED | SHMSEG_REMOVED))
161	    != SHMSEG_ALLOCATED ||
162	    shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid))
163		return NULL;
164	return shmseg;
165}
166
167static void
168shm_deallocate_segment(shmseg)
169	struct shmid_ds *shmseg;
170{
171	struct shm_handle *shm_handle;
172	size_t size;
173
174	shm_handle = shmseg->shm_internal;
175	vm_object_deallocate(shm_handle->shm_object);
176	free((caddr_t)shm_handle, M_SHM);
177	shmseg->shm_internal = NULL;
178	size = round_page(shmseg->shm_segsz);
179	shm_committed -= btoc(size);
180	shm_nused--;
181	shmseg->shm_perm.mode = SHMSEG_FREE;
182}
183
184static int
185shm_delete_mapping(p, shmmap_s)
186	struct proc *p;
187	struct shmmap_state *shmmap_s;
188{
189	struct shmid_ds *shmseg;
190	int segnum, result;
191	size_t size;
192
193	segnum = IPCID_TO_IX(shmmap_s->shmid);
194	shmseg = &shmsegs[segnum];
195	size = round_page(shmseg->shm_segsz);
196	result = vm_map_remove(&p->p_vmspace->vm_map, shmmap_s->va, shmmap_s->va + size);
197	if (result != KERN_SUCCESS)
198		return EINVAL;
199	shmmap_s->shmid = -1;
200	shmseg->shm_dtime = time_second;
201	if ((--shmseg->shm_nattch <= 0) &&
202	    (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
203		shm_deallocate_segment(shmseg);
204		shm_last_free = segnum;
205	}
206	return 0;
207}
208
209#ifndef _SYS_SYSPROTO_H_
210struct shmdt_args {
211	void *shmaddr;
212};
213#endif
214
215int
216shmdt(p, uap)
217	struct proc *p;
218	struct shmdt_args *uap;
219{
220	struct shmmap_state *shmmap_s;
221	int i;
222
223	shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
224 	if (shmmap_s == NULL)
225 	    return EINVAL;
226	for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
227		if (shmmap_s->shmid != -1 &&
228		    shmmap_s->va == (vm_offset_t)uap->shmaddr)
229			break;
230	if (i == shminfo.shmseg)
231		return EINVAL;
232	return shm_delete_mapping(p, shmmap_s);
233}
234
235#ifndef _SYS_SYSPROTO_H_
236struct shmat_args {
237	int shmid;
238	void *shmaddr;
239	int shmflg;
240};
241#endif
242
243int
244shmat(p, uap)
245	struct proc *p;
246	struct shmat_args *uap;
247{
248	int error, i, flags;
249	struct shmid_ds *shmseg;
250	struct shmmap_state *shmmap_s = NULL;
251	struct shm_handle *shm_handle;
252	vm_offset_t attach_va;
253	vm_prot_t prot;
254	vm_size_t size;
255	int rv;
256
257	shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
258	if (shmmap_s == NULL) {
259		size = shminfo.shmseg * sizeof(struct shmmap_state);
260		shmmap_s = malloc(size, M_SHM, M_WAITOK);
261		for (i = 0; i < shminfo.shmseg; i++)
262			shmmap_s[i].shmid = -1;
263		p->p_vmspace->vm_shm = (caddr_t)shmmap_s;
264	}
265	shmseg = shm_find_segment_by_shmid(uap->shmid);
266	if (shmseg == NULL)
267		return EINVAL;
268	error = ipcperm(p, &shmseg->shm_perm,
269	    (uap->shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
270	if (error)
271		return error;
272	for (i = 0; i < shminfo.shmseg; i++) {
273		if (shmmap_s->shmid == -1)
274			break;
275		shmmap_s++;
276	}
277	if (i >= shminfo.shmseg)
278		return EMFILE;
279	size = round_page(shmseg->shm_segsz);
280#ifdef VM_PROT_READ_IS_EXEC
281	prot = VM_PROT_READ | VM_PROT_EXECUTE;
282#else
283	prot = VM_PROT_READ;
284#endif
285	if ((uap->shmflg & SHM_RDONLY) == 0)
286		prot |= VM_PROT_WRITE;
287	flags = MAP_ANON | MAP_SHARED;
288	if (uap->shmaddr) {
289		flags |= MAP_FIXED;
290		if (uap->shmflg & SHM_RND)
291			attach_va = (vm_offset_t)uap->shmaddr & ~(SHMLBA-1);
292		else if (((vm_offset_t)uap->shmaddr & (SHMLBA-1)) == 0)
293			attach_va = (vm_offset_t)uap->shmaddr;
294		else
295			return EINVAL;
296	} else {
297		/* This is just a hint to vm_map_find() about where to put it. */
298		attach_va = round_page((vm_offset_t)p->p_vmspace->vm_taddr + MAXTSIZ + MAXDSIZ);
299	}
300
301	shm_handle = shmseg->shm_internal;
302	vm_object_reference(shm_handle->shm_object);
303	rv = vm_map_find(&p->p_vmspace->vm_map, shm_handle->shm_object,
304		0, &attach_va, size, (flags & MAP_FIXED)?0:1, prot, prot, 0);
305	if (rv != KERN_SUCCESS) {
306		return ENOMEM;
307	}
308	vm_map_inherit(&p->p_vmspace->vm_map,
309		attach_va, attach_va + size, VM_INHERIT_SHARE);
310
311	shmmap_s->va = attach_va;
312	shmmap_s->shmid = uap->shmid;
313	shmseg->shm_lpid = p->p_pid;
314	shmseg->shm_atime = time_second;
315	shmseg->shm_nattch++;
316	p->p_retval[0] = attach_va;
317	return 0;
318}
319
320struct oshmid_ds {
321	struct	ipc_perm shm_perm;	/* operation perms */
322	int	shm_segsz;		/* size of segment (bytes) */
323	ushort	shm_cpid;		/* pid, creator */
324	ushort	shm_lpid;		/* pid, last operation */
325	short	shm_nattch;		/* no. of current attaches */
326	time_t	shm_atime;		/* last attach time */
327	time_t	shm_dtime;		/* last detach time */
328	time_t	shm_ctime;		/* last change time */
329	void	*shm_handle;		/* internal handle for shm segment */
330};
331
332struct oshmctl_args {
333	int shmid;
334	int cmd;
335	struct oshmid_ds *ubuf;
336};
337
338static int
339oshmctl(p, uap)
340	struct proc *p;
341	struct oshmctl_args *uap;
342{
343#ifdef COMPAT_43
344	int error;
345	struct shmid_ds *shmseg;
346	struct oshmid_ds outbuf;
347
348	shmseg = shm_find_segment_by_shmid(uap->shmid);
349	if (shmseg == NULL)
350		return EINVAL;
351	switch (uap->cmd) {
352	case IPC_STAT:
353		error = ipcperm(p, &shmseg->shm_perm, IPC_R);
354		if (error)
355			return error;
356		outbuf.shm_perm = shmseg->shm_perm;
357		outbuf.shm_segsz = shmseg->shm_segsz;
358		outbuf.shm_cpid = shmseg->shm_cpid;
359		outbuf.shm_lpid = shmseg->shm_lpid;
360		outbuf.shm_nattch = shmseg->shm_nattch;
361		outbuf.shm_atime = shmseg->shm_atime;
362		outbuf.shm_dtime = shmseg->shm_dtime;
363		outbuf.shm_ctime = shmseg->shm_ctime;
364		outbuf.shm_handle = shmseg->shm_internal;
365		error = copyout((caddr_t)&outbuf, uap->ubuf, sizeof(outbuf));
366		if (error)
367			return error;
368		break;
369	default:
370		/* XXX casting to (sy_call_t *) is bogus, as usual. */
371		return ((sy_call_t *)shmctl)(p, uap);
372	}
373	return 0;
374#else
375	return EINVAL;
376#endif
377}
378
379#ifndef _SYS_SYSPROTO_H_
380struct shmctl_args {
381	int shmid;
382	int cmd;
383	struct shmid_ds *buf;
384};
385#endif
386
387int
388shmctl(p, uap)
389	struct proc *p;
390	struct shmctl_args *uap;
391{
392	int error;
393	struct shmid_ds inbuf;
394	struct shmid_ds *shmseg;
395
396	shmseg = shm_find_segment_by_shmid(uap->shmid);
397	if (shmseg == NULL)
398		return EINVAL;
399	switch (uap->cmd) {
400	case IPC_STAT:
401		error = ipcperm(p, &shmseg->shm_perm, IPC_R);
402		if (error)
403			return error;
404		error = copyout((caddr_t)shmseg, uap->buf, sizeof(inbuf));
405		if (error)
406			return error;
407		break;
408	case IPC_SET:
409		error = ipcperm(p, &shmseg->shm_perm, IPC_M);
410		if (error)
411			return error;
412		error = copyin(uap->buf, (caddr_t)&inbuf, sizeof(inbuf));
413		if (error)
414			return error;
415		shmseg->shm_perm.uid = inbuf.shm_perm.uid;
416		shmseg->shm_perm.gid = inbuf.shm_perm.gid;
417		shmseg->shm_perm.mode =
418		    (shmseg->shm_perm.mode & ~ACCESSPERMS) |
419		    (inbuf.shm_perm.mode & ACCESSPERMS);
420		shmseg->shm_ctime = time_second;
421		break;
422	case IPC_RMID:
423		error = ipcperm(p, &shmseg->shm_perm, IPC_M);
424		if (error)
425			return error;
426		shmseg->shm_perm.key = IPC_PRIVATE;
427		shmseg->shm_perm.mode |= SHMSEG_REMOVED;
428		if (shmseg->shm_nattch <= 0) {
429			shm_deallocate_segment(shmseg);
430			shm_last_free = IPCID_TO_IX(uap->shmid);
431		}
432		break;
433#if 0
434	case SHM_LOCK:
435	case SHM_UNLOCK:
436#endif
437	default:
438		return EINVAL;
439	}
440	return 0;
441}
442
443#ifndef _SYS_SYSPROTO_H_
444struct shmget_args {
445	key_t key;
446	size_t size;
447	int shmflg;
448};
449#endif
450
451static int
452shmget_existing(p, uap, mode, segnum)
453	struct proc *p;
454	struct shmget_args *uap;
455	int mode;
456	int segnum;
457{
458	struct shmid_ds *shmseg;
459	int error;
460
461	shmseg = &shmsegs[segnum];
462	if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
463		/*
464		 * This segment is in the process of being allocated.  Wait
465		 * until it's done, and look the key up again (in case the
466		 * allocation failed or it was freed).
467		 */
468		shmseg->shm_perm.mode |= SHMSEG_WANTED;
469		error = tsleep((caddr_t)shmseg, PLOCK | PCATCH, "shmget", 0);
470		if (error)
471			return error;
472		return EAGAIN;
473	}
474	if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL))
475		return EEXIST;
476	error = ipcperm(p, &shmseg->shm_perm, mode);
477	if (error)
478		return error;
479	if (uap->size && uap->size > shmseg->shm_segsz)
480		return EINVAL;
481	p->p_retval[0] = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
482	return 0;
483}
484
485static int
486shmget_allocate_segment(p, uap, mode)
487	struct proc *p;
488	struct shmget_args *uap;
489	int mode;
490{
491	int i, segnum, shmid, size;
492	struct ucred *cred = p->p_ucred;
493	struct shmid_ds *shmseg;
494	struct shm_handle *shm_handle;
495
496	if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
497		return EINVAL;
498	if (shm_nused >= shminfo.shmmni) /* any shmids left? */
499		return ENOSPC;
500	size = round_page(uap->size);
501	if (shm_committed + btoc(size) > shminfo.shmall)
502		return ENOMEM;
503	if (shm_last_free < 0) {
504		shmrealloc();	/* maybe expand the shmsegs[] array */
505		for (i = 0; i < shmalloced; i++)
506			if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
507				break;
508		if (i == shmalloced)
509			return ENOSPC;
510		segnum = i;
511	} else  {
512		segnum = shm_last_free;
513		shm_last_free = -1;
514	}
515	shmseg = &shmsegs[segnum];
516	/*
517	 * In case we sleep in malloc(), mark the segment present but deleted
518	 * so that noone else tries to create the same key.
519	 */
520	shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
521	shmseg->shm_perm.key = uap->key;
522	shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff;
523	shm_handle = (struct shm_handle *)
524	    malloc(sizeof(struct shm_handle), M_SHM, M_WAITOK);
525	shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
526
527	/*
528	 * We make sure that we have allocated a pager before we need
529	 * to.
530	 */
531	shm_handle->shm_object =
532		vm_pager_allocate(OBJT_SWAP, 0, size, VM_PROT_DEFAULT, 0);
533	vm_object_clear_flag(shm_handle->shm_object, OBJ_ONEMAPPING);
534	vm_object_set_flag(shm_handle->shm_object, OBJ_NOSPLIT);
535
536	shmseg->shm_internal = shm_handle;
537	shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
538	shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
539	shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
540	    (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
541	shmseg->shm_segsz = uap->size;
542	shmseg->shm_cpid = p->p_pid;
543	shmseg->shm_lpid = shmseg->shm_nattch = 0;
544	shmseg->shm_atime = shmseg->shm_dtime = 0;
545	shmseg->shm_ctime = time_second;
546	shm_committed += btoc(size);
547	shm_nused++;
548	if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
549		/*
550		 * Somebody else wanted this key while we were asleep.  Wake
551		 * them up now.
552		 */
553		shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
554		wakeup((caddr_t)shmseg);
555	}
556	p->p_retval[0] = shmid;
557	return 0;
558}
559
560int
561shmget(p, uap)
562	struct proc *p;
563	struct shmget_args *uap;
564{
565	int segnum, mode, error;
566
567	mode = uap->shmflg & ACCESSPERMS;
568	if (uap->key != IPC_PRIVATE) {
569	again:
570		segnum = shm_find_segment_by_key(uap->key);
571		if (segnum >= 0) {
572			error = shmget_existing(p, uap, mode, segnum);
573			if (error == EAGAIN)
574				goto again;
575			return error;
576		}
577		if ((uap->shmflg & IPC_CREAT) == 0)
578			return ENOENT;
579	}
580	return shmget_allocate_segment(p, uap, mode);
581}
582
583int
584shmsys(p, uap)
585	struct proc *p;
586	/* XXX actually varargs. */
587	struct shmsys_args /* {
588		u_int	which;
589		int	a2;
590		int	a3;
591		int	a4;
592	} */ *uap;
593{
594
595	if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
596		return EINVAL;
597	return ((*shmcalls[uap->which])(p, &uap->a2));
598}
599
600void
601shmfork(p1, p2)
602	struct proc *p1, *p2;
603{
604	struct shmmap_state *shmmap_s;
605	size_t size;
606	int i;
607
608	size = shminfo.shmseg * sizeof(struct shmmap_state);
609	shmmap_s = malloc(size, M_SHM, M_WAITOK);
610	bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmmap_s, size);
611	p2->p_vmspace->vm_shm = (caddr_t)shmmap_s;
612	for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
613		if (shmmap_s->shmid != -1)
614			shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++;
615}
616
617void
618shmexit(p)
619	struct proc *p;
620{
621	struct shmmap_state *shmmap_s;
622	int i;
623
624	shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
625	for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
626		if (shmmap_s->shmid != -1)
627			shm_delete_mapping(p, shmmap_s);
628	free((caddr_t)p->p_vmspace->vm_shm, M_SHM);
629	p->p_vmspace->vm_shm = NULL;
630}
631
632static void
633shmrealloc(void)
634{
635	int i;
636	struct shmid_ds *newsegs;
637
638	if (shmalloced >= shminfo.shmmni)
639		return;
640
641	newsegs = malloc(shminfo.shmmni * sizeof(*newsegs), M_SHM, M_WAITOK);
642	if (newsegs == NULL)
643		return;
644	for (i = 0; i < shmalloced; i++)
645		bcopy(&shmsegs[i], &newsegs[i], sizeof(newsegs[0]));
646	for (; i < shminfo.shmmni; i++) {
647		shmsegs[i].shm_perm.mode = SHMSEG_FREE;
648		shmsegs[i].shm_perm.seq = 0;
649	}
650	free(shmsegs, M_SHM);
651	shmsegs = newsegs;
652	shmalloced = shminfo.shmmni;
653}
654
655static void
656shminit(dummy)
657	void *dummy;
658{
659	int i;
660
661	shmalloced = shminfo.shmmni;
662	shmsegs = malloc(shmalloced * sizeof(shmsegs[0]), M_SHM, M_WAITOK);
663	if (shmsegs == NULL)
664		panic("cannot allocate initial memory for sysvshm");
665	for (i = 0; i < shmalloced; i++) {
666		shmsegs[i].shm_perm.mode = SHMSEG_FREE;
667		shmsegs[i].shm_perm.seq = 0;
668	}
669	shm_last_free = 0;
670	shm_nused = 0;
671	shm_committed = 0;
672}
673SYSINIT(sysv_shm, SI_SUB_SYSV_SHM, SI_ORDER_FIRST, shminit, NULL);
674