sysv_shm.c revision 46116
1/*	$Id: sysv_shm.c,v 1.40 1999/01/21 08:29:04 dillon Exp $ */
2/*	$NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $	*/
3
4/*
5 * Copyright (c) 1994 Adam Glass and Charles Hannum.  All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 *    must display the following acknowledgement:
17 *	This product includes software developed by Adam Glass and Charles
18 *	Hannum.
19 * 4. The names of the authors may not be used to endorse or promote products
20 *    derived from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include "opt_compat.h"
35#include "opt_rlimit.h"
36
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/sysproto.h>
40#include <sys/kernel.h>
41#include <sys/shm.h>
42#include <sys/proc.h>
43#include <sys/malloc.h>
44#include <sys/mman.h>
45#include <sys/stat.h>
46#include <sys/sysent.h>
47
48#include <vm/vm.h>
49#include <vm/vm_param.h>
50#include <vm/vm_prot.h>
51#include <sys/lock.h>
52#include <vm/pmap.h>
53#include <vm/vm_object.h>
54#include <vm/vm_map.h>
55#include <vm/vm_page.h>
56#include <vm/vm_pager.h>
57#include <vm/vm_inherit.h>
58
59#ifndef _SYS_SYSPROTO_H_
60struct shmat_args;
61extern int shmat __P((struct proc *p, struct shmat_args *uap));
62struct shmctl_args;
63extern int shmctl __P((struct proc *p, struct shmctl_args *uap));
64struct shmdt_args;
65extern int shmdt __P((struct proc *p, struct shmdt_args *uap));
66struct shmget_args;
67extern int shmget __P((struct proc *p, struct shmget_args *uap));
68#endif
69
70static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments");
71
72static void shminit __P((void *));
73SYSINIT(sysv_shm, SI_SUB_SYSV_SHM, SI_ORDER_FIRST, shminit, NULL)
74
75struct oshmctl_args;
76static int oshmctl __P((struct proc *p, struct oshmctl_args *uap));
77static int shmget_allocate_segment __P((struct proc *p, struct shmget_args *uap, int mode));
78static int shmget_existing __P((struct proc *p, struct shmget_args *uap, int mode, int segnum));
79
80/* XXX casting to (sy_call_t *) is bogus, as usual. */
81static sy_call_t *shmcalls[] = {
82	(sy_call_t *)shmat, (sy_call_t *)oshmctl,
83	(sy_call_t *)shmdt, (sy_call_t *)shmget,
84	(sy_call_t *)shmctl
85};
86
87#define	SHMSEG_FREE     	0x0200
88#define	SHMSEG_REMOVED  	0x0400
89#define	SHMSEG_ALLOCATED	0x0800
90#define	SHMSEG_WANTED		0x1000
91
92static int shm_last_free, shm_nused, shm_committed;
93struct shmid_ds	*shmsegs;
94
95struct shm_handle {
96	/* vm_offset_t kva; */
97	vm_object_t shm_object;
98};
99
100struct shmmap_state {
101	vm_offset_t va;
102	int shmid;
103};
104
105static void shm_deallocate_segment __P((struct shmid_ds *));
106static int shm_find_segment_by_key __P((key_t));
107static struct shmid_ds *shm_find_segment_by_shmid __P((int));
108static int shm_delete_mapping __P((struct proc *, struct shmmap_state *));
109
110static int
111shm_find_segment_by_key(key)
112	key_t key;
113{
114	int i;
115
116	for (i = 0; i < shminfo.shmmni; i++)
117		if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
118		    shmsegs[i].shm_perm.key == key)
119			return i;
120	return -1;
121}
122
123static struct shmid_ds *
124shm_find_segment_by_shmid(shmid)
125	int shmid;
126{
127	int segnum;
128	struct shmid_ds *shmseg;
129
130	segnum = IPCID_TO_IX(shmid);
131	if (segnum < 0 || segnum >= shminfo.shmmni)
132		return NULL;
133	shmseg = &shmsegs[segnum];
134	if ((shmseg->shm_perm.mode & (SHMSEG_ALLOCATED | SHMSEG_REMOVED))
135	    != SHMSEG_ALLOCATED ||
136	    shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid))
137		return NULL;
138	return shmseg;
139}
140
141static void
142shm_deallocate_segment(shmseg)
143	struct shmid_ds *shmseg;
144{
145	struct shm_handle *shm_handle;
146	size_t size;
147
148	shm_handle = shmseg->shm_internal;
149	vm_object_deallocate(shm_handle->shm_object);
150	free((caddr_t)shm_handle, M_SHM);
151	shmseg->shm_internal = NULL;
152	size = round_page(shmseg->shm_segsz);
153	shm_committed -= btoc(size);
154	shm_nused--;
155	shmseg->shm_perm.mode = SHMSEG_FREE;
156}
157
158static int
159shm_delete_mapping(p, shmmap_s)
160	struct proc *p;
161	struct shmmap_state *shmmap_s;
162{
163	struct shmid_ds *shmseg;
164	int segnum, result;
165	size_t size;
166
167	segnum = IPCID_TO_IX(shmmap_s->shmid);
168	shmseg = &shmsegs[segnum];
169	size = round_page(shmseg->shm_segsz);
170	result = vm_map_remove(&p->p_vmspace->vm_map, shmmap_s->va, shmmap_s->va + size);
171	if (result != KERN_SUCCESS)
172		return EINVAL;
173	shmmap_s->shmid = -1;
174	shmseg->shm_dtime = time_second;
175	if ((--shmseg->shm_nattch <= 0) &&
176	    (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
177		shm_deallocate_segment(shmseg);
178		shm_last_free = segnum;
179	}
180	return 0;
181}
182
183#ifndef _SYS_SYSPROTO_H_
184struct shmdt_args {
185	void *shmaddr;
186};
187#endif
188
189int
190shmdt(p, uap)
191	struct proc *p;
192	struct shmdt_args *uap;
193{
194	struct shmmap_state *shmmap_s;
195	int i;
196
197	shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
198 	if (shmmap_s == NULL)
199 	    return EINVAL;
200	for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
201		if (shmmap_s->shmid != -1 &&
202		    shmmap_s->va == (vm_offset_t)uap->shmaddr)
203			break;
204	if (i == shminfo.shmseg)
205		return EINVAL;
206	return shm_delete_mapping(p, shmmap_s);
207}
208
209#ifndef _SYS_SYSPROTO_H_
210struct shmat_args {
211	int shmid;
212	void *shmaddr;
213	int shmflg;
214};
215#endif
216
217int
218shmat(p, uap)
219	struct proc *p;
220	struct shmat_args *uap;
221{
222	int error, i, flags;
223	struct shmid_ds *shmseg;
224	struct shmmap_state *shmmap_s = NULL;
225	struct shm_handle *shm_handle;
226	vm_offset_t attach_va;
227	vm_prot_t prot;
228	vm_size_t size;
229	int rv;
230
231	shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
232	if (shmmap_s == NULL) {
233		size = shminfo.shmseg * sizeof(struct shmmap_state);
234		shmmap_s = malloc(size, M_SHM, M_WAITOK);
235		for (i = 0; i < shminfo.shmseg; i++)
236			shmmap_s[i].shmid = -1;
237		p->p_vmspace->vm_shm = (caddr_t)shmmap_s;
238	}
239	shmseg = shm_find_segment_by_shmid(uap->shmid);
240	if (shmseg == NULL)
241		return EINVAL;
242	error = ipcperm(p, &shmseg->shm_perm,
243	    (uap->shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
244	if (error)
245		return error;
246	for (i = 0; i < shminfo.shmseg; i++) {
247		if (shmmap_s->shmid == -1)
248			break;
249		shmmap_s++;
250	}
251	if (i >= shminfo.shmseg)
252		return EMFILE;
253	size = round_page(shmseg->shm_segsz);
254	prot = VM_PROT_READ;
255	if ((uap->shmflg & SHM_RDONLY) == 0)
256		prot |= VM_PROT_WRITE;
257	flags = MAP_ANON | MAP_SHARED;
258	if (uap->shmaddr) {
259		flags |= MAP_FIXED;
260		if (uap->shmflg & SHM_RND)
261			attach_va = (vm_offset_t)uap->shmaddr & ~(SHMLBA-1);
262		else if (((vm_offset_t)uap->shmaddr & (SHMLBA-1)) == 0)
263			attach_va = (vm_offset_t)uap->shmaddr;
264		else
265			return EINVAL;
266	} else {
267		/* This is just a hint to vm_map_find() about where to put it. */
268		attach_va = round_page((vm_offset_t)p->p_vmspace->vm_taddr + MAXTSIZ + MAXDSIZ);
269	}
270
271	shm_handle = shmseg->shm_internal;
272	vm_object_reference(shm_handle->shm_object);
273	rv = vm_map_find(&p->p_vmspace->vm_map, shm_handle->shm_object,
274		0, &attach_va, size, (flags & MAP_FIXED)?0:1, prot, prot, 0);
275	if (rv != KERN_SUCCESS) {
276		return ENOMEM;
277	}
278	vm_map_inherit(&p->p_vmspace->vm_map,
279		attach_va, attach_va + size, VM_INHERIT_SHARE);
280
281	shmmap_s->va = attach_va;
282	shmmap_s->shmid = uap->shmid;
283	shmseg->shm_lpid = p->p_pid;
284	shmseg->shm_atime = time_second;
285	shmseg->shm_nattch++;
286	p->p_retval[0] = attach_va;
287	return 0;
288}
289
290struct oshmid_ds {
291	struct	ipc_perm shm_perm;	/* operation perms */
292	int	shm_segsz;		/* size of segment (bytes) */
293	ushort	shm_cpid;		/* pid, creator */
294	ushort	shm_lpid;		/* pid, last operation */
295	short	shm_nattch;		/* no. of current attaches */
296	time_t	shm_atime;		/* last attach time */
297	time_t	shm_dtime;		/* last detach time */
298	time_t	shm_ctime;		/* last change time */
299	void	*shm_handle;		/* internal handle for shm segment */
300};
301
302struct oshmctl_args {
303	int shmid;
304	int cmd;
305	struct oshmid_ds *ubuf;
306};
307
308static int
309oshmctl(p, uap)
310	struct proc *p;
311	struct oshmctl_args *uap;
312{
313#ifdef COMPAT_43
314	int error;
315	struct shmid_ds *shmseg;
316	struct oshmid_ds outbuf;
317
318	shmseg = shm_find_segment_by_shmid(uap->shmid);
319	if (shmseg == NULL)
320		return EINVAL;
321	switch (uap->cmd) {
322	case IPC_STAT:
323		error = ipcperm(p, &shmseg->shm_perm, IPC_R);
324		if (error)
325			return error;
326		outbuf.shm_perm = shmseg->shm_perm;
327		outbuf.shm_segsz = shmseg->shm_segsz;
328		outbuf.shm_cpid = shmseg->shm_cpid;
329		outbuf.shm_lpid = shmseg->shm_lpid;
330		outbuf.shm_nattch = shmseg->shm_nattch;
331		outbuf.shm_atime = shmseg->shm_atime;
332		outbuf.shm_dtime = shmseg->shm_dtime;
333		outbuf.shm_ctime = shmseg->shm_ctime;
334		outbuf.shm_handle = shmseg->shm_internal;
335		error = copyout((caddr_t)&outbuf, uap->ubuf, sizeof(outbuf));
336		if (error)
337			return error;
338		break;
339	default:
340		/* XXX casting to (sy_call_t *) is bogus, as usual. */
341		return ((sy_call_t *)shmctl)(p, uap);
342	}
343	return 0;
344#else
345	return EINVAL;
346#endif
347}
348
349#ifndef _SYS_SYSPROTO_H_
350struct shmctl_args {
351	int shmid;
352	int cmd;
353	struct shmid_ds *buf;
354};
355#endif
356
357int
358shmctl(p, uap)
359	struct proc *p;
360	struct shmctl_args *uap;
361{
362	int error;
363	struct shmid_ds inbuf;
364	struct shmid_ds *shmseg;
365
366	shmseg = shm_find_segment_by_shmid(uap->shmid);
367	if (shmseg == NULL)
368		return EINVAL;
369	switch (uap->cmd) {
370	case IPC_STAT:
371		error = ipcperm(p, &shmseg->shm_perm, IPC_R);
372		if (error)
373			return error;
374		error = copyout((caddr_t)shmseg, uap->buf, sizeof(inbuf));
375		if (error)
376			return error;
377		break;
378	case IPC_SET:
379		error = ipcperm(p, &shmseg->shm_perm, IPC_M);
380		if (error)
381			return error;
382		error = copyin(uap->buf, (caddr_t)&inbuf, sizeof(inbuf));
383		if (error)
384			return error;
385		shmseg->shm_perm.uid = inbuf.shm_perm.uid;
386		shmseg->shm_perm.gid = inbuf.shm_perm.gid;
387		shmseg->shm_perm.mode =
388		    (shmseg->shm_perm.mode & ~ACCESSPERMS) |
389		    (inbuf.shm_perm.mode & ACCESSPERMS);
390		shmseg->shm_ctime = time_second;
391		break;
392	case IPC_RMID:
393		error = ipcperm(p, &shmseg->shm_perm, IPC_M);
394		if (error)
395			return error;
396		shmseg->shm_perm.key = IPC_PRIVATE;
397		shmseg->shm_perm.mode |= SHMSEG_REMOVED;
398		if (shmseg->shm_nattch <= 0) {
399			shm_deallocate_segment(shmseg);
400			shm_last_free = IPCID_TO_IX(uap->shmid);
401		}
402		break;
403#if 0
404	case SHM_LOCK:
405	case SHM_UNLOCK:
406#endif
407	default:
408		return EINVAL;
409	}
410	return 0;
411}
412
413#ifndef _SYS_SYSPROTO_H_
414struct shmget_args {
415	key_t key;
416	size_t size;
417	int shmflg;
418};
419#endif
420
421static int
422shmget_existing(p, uap, mode, segnum)
423	struct proc *p;
424	struct shmget_args *uap;
425	int mode;
426	int segnum;
427{
428	struct shmid_ds *shmseg;
429	int error;
430
431	shmseg = &shmsegs[segnum];
432	if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
433		/*
434		 * This segment is in the process of being allocated.  Wait
435		 * until it's done, and look the key up again (in case the
436		 * allocation failed or it was freed).
437		 */
438		shmseg->shm_perm.mode |= SHMSEG_WANTED;
439		error = tsleep((caddr_t)shmseg, PLOCK | PCATCH, "shmget", 0);
440		if (error)
441			return error;
442		return EAGAIN;
443	}
444	error = ipcperm(p, &shmseg->shm_perm, mode);
445	if (error)
446		return error;
447	if (uap->size && uap->size > shmseg->shm_segsz)
448		return EINVAL;
449       if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL))
450		return EEXIST;
451	p->p_retval[0] = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
452	return 0;
453}
454
455static int
456shmget_allocate_segment(p, uap, mode)
457	struct proc *p;
458	struct shmget_args *uap;
459	int mode;
460{
461	int i, segnum, shmid, size;
462	struct ucred *cred = p->p_ucred;
463	struct shmid_ds *shmseg;
464	struct shm_handle *shm_handle;
465
466	if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
467		return EINVAL;
468	if (shm_nused >= shminfo.shmmni) /* any shmids left? */
469		return ENOSPC;
470	size = round_page(uap->size);
471	if (shm_committed + btoc(size) > shminfo.shmall)
472		return ENOMEM;
473	if (shm_last_free < 0) {
474		for (i = 0; i < shminfo.shmmni; i++)
475			if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
476				break;
477		if (i == shminfo.shmmni)
478			panic("shmseg free count inconsistent");
479		segnum = i;
480	} else  {
481		segnum = shm_last_free;
482		shm_last_free = -1;
483	}
484	shmseg = &shmsegs[segnum];
485	/*
486	 * In case we sleep in malloc(), mark the segment present but deleted
487	 * so that noone else tries to create the same key.
488	 */
489	shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
490	shmseg->shm_perm.key = uap->key;
491	shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff;
492	shm_handle = (struct shm_handle *)
493	    malloc(sizeof(struct shm_handle), M_SHM, M_WAITOK);
494	shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
495
496	/*
497	 * We make sure that we have allocated a pager before we need
498	 * to.
499	 */
500	shm_handle->shm_object =
501		vm_pager_allocate(OBJT_SWAP, 0, size, VM_PROT_DEFAULT, 0);
502	vm_object_clear_flag(shm_handle->shm_object, OBJ_ONEMAPPING);
503	vm_object_set_flag(shm_handle->shm_object, OBJ_NOSPLIT);
504
505	shmseg->shm_internal = shm_handle;
506	shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
507	shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
508	shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
509	    (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
510	shmseg->shm_segsz = uap->size;
511	shmseg->shm_cpid = p->p_pid;
512	shmseg->shm_lpid = shmseg->shm_nattch = 0;
513	shmseg->shm_atime = shmseg->shm_dtime = 0;
514	shmseg->shm_ctime = time_second;
515	shm_committed += btoc(size);
516	shm_nused++;
517	if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
518		/*
519		 * Somebody else wanted this key while we were asleep.  Wake
520		 * them up now.
521		 */
522		shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
523		wakeup((caddr_t)shmseg);
524	}
525	p->p_retval[0] = shmid;
526	return 0;
527}
528
529int
530shmget(p, uap)
531	struct proc *p;
532	struct shmget_args *uap;
533{
534	int segnum, mode, error;
535
536	mode = uap->shmflg & ACCESSPERMS;
537	if (uap->key != IPC_PRIVATE) {
538	again:
539		segnum = shm_find_segment_by_key(uap->key);
540		if (segnum >= 0) {
541			error = shmget_existing(p, uap, mode, segnum);
542			if (error == EAGAIN)
543				goto again;
544			return error;
545		}
546		if ((uap->shmflg & IPC_CREAT) == 0)
547			return ENOENT;
548	}
549	return shmget_allocate_segment(p, uap, mode);
550}
551
552int
553shmsys(p, uap)
554	struct proc *p;
555	/* XXX actually varargs. */
556	struct shmsys_args /* {
557		u_int	which;
558		int	a2;
559		int	a3;
560		int	a4;
561	} */ *uap;
562{
563
564	if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
565		return EINVAL;
566	return ((*shmcalls[uap->which])(p, &uap->a2));
567}
568
569void
570shmfork(p1, p2)
571	struct proc *p1, *p2;
572{
573	struct shmmap_state *shmmap_s;
574	size_t size;
575	int i;
576
577	size = shminfo.shmseg * sizeof(struct shmmap_state);
578	shmmap_s = malloc(size, M_SHM, M_WAITOK);
579	bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmmap_s, size);
580	p2->p_vmspace->vm_shm = (caddr_t)shmmap_s;
581	for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
582		if (shmmap_s->shmid != -1)
583			shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++;
584}
585
586void
587shmexit(p)
588	struct proc *p;
589{
590	struct shmmap_state *shmmap_s;
591	int i;
592
593	shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
594	for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
595		if (shmmap_s->shmid != -1)
596			shm_delete_mapping(p, shmmap_s);
597	free((caddr_t)p->p_vmspace->vm_shm, M_SHM);
598	p->p_vmspace->vm_shm = NULL;
599}
600
601void
602shminit(dummy)
603	void *dummy;
604{
605	int i;
606	for (i = 0; i < shminfo.shmmni; i++) {
607		shmsegs[i].shm_perm.mode = SHMSEG_FREE;
608		shmsegs[i].shm_perm.seq = 0;
609	}
610	shm_last_free = 0;
611	shm_nused = 0;
612	shm_committed = 0;
613}
614