sysv_shm.c revision 30354
1/*	$Id: sysv_shm.c,v 1.30 1997/10/11 18:31:25 phk Exp $ */
2/*	$NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $	*/
3
4/*
5 * Copyright (c) 1994 Adam Glass and Charles Hannum.  All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 *    must display the following acknowledgement:
17 *	This product includes software developed by Adam Glass and Charles
18 *	Hannum.
19 * 4. The names of the authors may not be used to endorse or promote products
20 *    derived from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include "opt_rlimit.h"
35
36#include <sys/param.h>
37#include <sys/systm.h>
38#include <sys/sysproto.h>
39#include <sys/kernel.h>
40#include <sys/shm.h>
41#include <sys/proc.h>
42#include <sys/malloc.h>
43#include <sys/mman.h>
44#include <sys/stat.h>
45#include <sys/sysent.h>
46
47#include <vm/vm.h>
48#include <vm/vm_param.h>
49#include <vm/vm_prot.h>
50#include <sys/lock.h>
51#include <vm/pmap.h>
52#include <vm/vm_object.h>
53#include <vm/vm_map.h>
54#include <vm/vm_pager.h>
55#include <vm/vm_inherit.h>
56
57#ifndef _SYS_SYSPROTO_H_
58struct shmat_args;
59extern int shmat __P((struct proc *p, struct shmat_args *uap, int *retval));
60struct shmctl_args;
61extern int shmctl __P((struct proc *p, struct shmctl_args *uap, int *retval));
62struct shmdt_args;
63extern int shmdt __P((struct proc *p, struct shmdt_args *uap, int *retval));
64struct shmget_args;
65extern int shmget __P((struct proc *p, struct shmget_args *uap, int *retval));
66#endif
67
68static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments");
69
70static void shminit __P((void *));
71SYSINIT(sysv_shm, SI_SUB_SYSV_SHM, SI_ORDER_FIRST, shminit, NULL)
72
73struct oshmctl_args;
74static int oshmctl __P((struct proc *p, struct oshmctl_args *uap, int *retval));
75static int shmget_allocate_segment __P((struct proc *p, struct shmget_args *uap, int mode, int *retval));
76static int shmget_existing __P((struct proc *p, struct shmget_args *uap, int mode, int segnum, int *retval));
77
78/* XXX casting to (sy_call_t *) is bogus, as usual. */
79sy_call_t *shmcalls[] = {
80	(sy_call_t *)shmat, (sy_call_t *)oshmctl,
81	(sy_call_t *)shmdt, (sy_call_t *)shmget,
82	(sy_call_t *)shmctl
83};
84
85#define	SHMSEG_FREE     	0x0200
86#define	SHMSEG_REMOVED  	0x0400
87#define	SHMSEG_ALLOCATED	0x0800
88#define	SHMSEG_WANTED		0x1000
89
90static int shm_last_free, shm_nused, shm_committed;
91struct shmid_ds	*shmsegs;
92
93struct shm_handle {
94	/* vm_offset_t kva; */
95	vm_object_t shm_object;
96};
97
98struct shmmap_state {
99	vm_offset_t va;
100	int shmid;
101};
102
103static void shm_deallocate_segment __P((struct shmid_ds *));
104static int shm_find_segment_by_key __P((key_t));
105static struct shmid_ds *shm_find_segment_by_shmid __P((int));
106static int shm_delete_mapping __P((struct proc *, struct shmmap_state *));
107
108static int
109shm_find_segment_by_key(key)
110	key_t key;
111{
112	int i;
113
114	for (i = 0; i < shminfo.shmmni; i++)
115		if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
116		    shmsegs[i].shm_perm.key == key)
117			return i;
118	return -1;
119}
120
121static struct shmid_ds *
122shm_find_segment_by_shmid(shmid)
123	int shmid;
124{
125	int segnum;
126	struct shmid_ds *shmseg;
127
128	segnum = IPCID_TO_IX(shmid);
129	if (segnum < 0 || segnum >= shminfo.shmmni)
130		return NULL;
131	shmseg = &shmsegs[segnum];
132	if ((shmseg->shm_perm.mode & (SHMSEG_ALLOCATED | SHMSEG_REMOVED))
133	    != SHMSEG_ALLOCATED ||
134	    shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid))
135		return NULL;
136	return shmseg;
137}
138
139static void
140shm_deallocate_segment(shmseg)
141	struct shmid_ds *shmseg;
142{
143	struct shm_handle *shm_handle;
144	size_t size;
145
146	shm_handle = shmseg->shm_internal;
147	vm_object_deallocate(shm_handle->shm_object);
148	free((caddr_t)shm_handle, M_SHM);
149	shmseg->shm_internal = NULL;
150	size = round_page(shmseg->shm_segsz);
151	shm_committed -= btoc(size);
152	shm_nused--;
153	shmseg->shm_perm.mode = SHMSEG_FREE;
154}
155
156static int
157shm_delete_mapping(p, shmmap_s)
158	struct proc *p;
159	struct shmmap_state *shmmap_s;
160{
161	struct shmid_ds *shmseg;
162	int segnum, result;
163	size_t size;
164
165	segnum = IPCID_TO_IX(shmmap_s->shmid);
166	shmseg = &shmsegs[segnum];
167	size = round_page(shmseg->shm_segsz);
168	result = vm_map_remove(&p->p_vmspace->vm_map, shmmap_s->va, shmmap_s->va + size);
169	if (result != KERN_SUCCESS)
170		return EINVAL;
171	shmmap_s->shmid = -1;
172	shmseg->shm_dtime = time.tv_sec;
173	if ((--shmseg->shm_nattch <= 0) &&
174	    (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
175		shm_deallocate_segment(shmseg);
176		shm_last_free = segnum;
177	}
178	return 0;
179}
180
181#ifndef _SYS_SYSPROTO_H_
182struct shmdt_args {
183	void *shmaddr;
184};
185#endif
186
187int
188shmdt(p, uap, retval)
189	struct proc *p;
190	struct shmdt_args *uap;
191	int *retval;
192{
193	struct shmmap_state *shmmap_s;
194	int i;
195
196	shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
197 	if (shmmap_s == NULL)
198 	    return EINVAL;
199	for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
200		if (shmmap_s->shmid != -1 &&
201		    shmmap_s->va == (vm_offset_t)uap->shmaddr)
202			break;
203	if (i == shminfo.shmseg)
204		return EINVAL;
205	return shm_delete_mapping(p, shmmap_s);
206}
207
208#ifndef _SYS_SYSPROTO_H_
209struct shmat_args {
210	int shmid;
211	void *shmaddr;
212	int shmflg;
213};
214#endif
215
216int
217shmat(p, uap, retval)
218	struct proc *p;
219	struct shmat_args *uap;
220	int *retval;
221{
222	int error, i, flags;
223	struct ucred *cred = p->p_ucred;
224	struct shmid_ds *shmseg;
225	struct shmmap_state *shmmap_s = NULL;
226	struct shm_handle *shm_handle;
227	vm_offset_t attach_va;
228	vm_prot_t prot;
229	vm_size_t size;
230	int rv;
231
232	shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
233	if (shmmap_s == NULL) {
234		size = shminfo.shmseg * sizeof(struct shmmap_state);
235		shmmap_s = malloc(size, M_SHM, M_WAITOK);
236		for (i = 0; i < shminfo.shmseg; i++)
237			shmmap_s[i].shmid = -1;
238		p->p_vmspace->vm_shm = (caddr_t)shmmap_s;
239	}
240	shmseg = shm_find_segment_by_shmid(uap->shmid);
241	if (shmseg == NULL)
242		return EINVAL;
243	error = ipcperm(cred, &shmseg->shm_perm,
244	    (uap->shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
245	if (error)
246		return error;
247	for (i = 0; i < shminfo.shmseg; i++) {
248		if (shmmap_s->shmid == -1)
249			break;
250		shmmap_s++;
251	}
252	if (i >= shminfo.shmseg)
253		return EMFILE;
254	size = round_page(shmseg->shm_segsz);
255	prot = VM_PROT_READ;
256	if ((uap->shmflg & SHM_RDONLY) == 0)
257		prot |= VM_PROT_WRITE;
258	flags = MAP_ANON | MAP_SHARED;
259	if (uap->shmaddr) {
260		flags |= MAP_FIXED;
261		if (uap->shmflg & SHM_RND)
262			attach_va = (vm_offset_t)uap->shmaddr & ~(SHMLBA-1);
263		else if (((vm_offset_t)uap->shmaddr & (SHMLBA-1)) == 0)
264			attach_va = (vm_offset_t)uap->shmaddr;
265		else
266			return EINVAL;
267	} else {
268		/* This is just a hint to vm_map_find() about where to put it. */
269		attach_va = round_page(p->p_vmspace->vm_taddr + MAXTSIZ + MAXDSIZ);
270	}
271
272	shm_handle = shmseg->shm_internal;
273	vm_object_reference(shm_handle->shm_object);
274	rv = vm_map_find(&p->p_vmspace->vm_map, shm_handle->shm_object,
275		0, &attach_va, size, (flags & MAP_FIXED)?0:1, prot, prot, 0);
276	if (rv != KERN_SUCCESS) {
277		return ENOMEM;
278	}
279	vm_map_inherit(&p->p_vmspace->vm_map,
280		attach_va, attach_va + size, VM_INHERIT_SHARE);
281
282	shmmap_s->va = attach_va;
283	shmmap_s->shmid = uap->shmid;
284	shmseg->shm_lpid = p->p_pid;
285	shmseg->shm_atime = time.tv_sec;
286	shmseg->shm_nattch++;
287	*retval = attach_va;
288	return 0;
289}
290
291struct oshmid_ds {
292	struct	ipc_perm shm_perm;	/* operation perms */
293	int	shm_segsz;		/* size of segment (bytes) */
294	ushort	shm_cpid;		/* pid, creator */
295	ushort	shm_lpid;		/* pid, last operation */
296	short	shm_nattch;		/* no. of current attaches */
297	time_t	shm_atime;		/* last attach time */
298	time_t	shm_dtime;		/* last detach time */
299	time_t	shm_ctime;		/* last change time */
300	void	*shm_handle;		/* internal handle for shm segment */
301};
302
303struct oshmctl_args {
304	int shmid;
305	int cmd;
306	struct oshmid_ds *ubuf;
307};
308
309static int
310oshmctl(p, uap, retval)
311	struct proc *p;
312	struct oshmctl_args *uap;
313	int *retval;
314{
315#ifdef COMPAT_43
316	int error;
317	struct ucred *cred = p->p_ucred;
318	struct shmid_ds *shmseg;
319	struct oshmid_ds outbuf;
320
321	shmseg = shm_find_segment_by_shmid(uap->shmid);
322	if (shmseg == NULL)
323		return EINVAL;
324	switch (uap->cmd) {
325	case IPC_STAT:
326		error = ipcperm(cred, &shmseg->shm_perm, IPC_R);
327		if (error)
328			return error;
329		outbuf.shm_perm = shmseg->shm_perm;
330		outbuf.shm_segsz = shmseg->shm_segsz;
331		outbuf.shm_cpid = shmseg->shm_cpid;
332		outbuf.shm_lpid = shmseg->shm_lpid;
333		outbuf.shm_nattch = shmseg->shm_nattch;
334		outbuf.shm_atime = shmseg->shm_atime;
335		outbuf.shm_dtime = shmseg->shm_dtime;
336		outbuf.shm_ctime = shmseg->shm_ctime;
337		outbuf.shm_handle = shmseg->shm_internal;
338		error = copyout((caddr_t)&outbuf, uap->ubuf, sizeof(outbuf));
339		if (error)
340			return error;
341		break;
342	default:
343		/* XXX casting to (sy_call_t *) is bogus, as usual. */
344		return ((sy_call_t *)shmctl)(p, uap, retval);
345	}
346	return 0;
347#else
348	return EINVAL;
349#endif
350}
351
352#ifndef _SYS_SYSPROTO_H_
353struct shmctl_args {
354	int shmid;
355	int cmd;
356	struct shmid_ds *buf;
357};
358#endif
359
360int
361shmctl(p, uap, retval)
362	struct proc *p;
363	struct shmctl_args *uap;
364	int *retval;
365{
366	int error;
367	struct ucred *cred = p->p_ucred;
368	struct shmid_ds inbuf;
369	struct shmid_ds *shmseg;
370
371	shmseg = shm_find_segment_by_shmid(uap->shmid);
372	if (shmseg == NULL)
373		return EINVAL;
374	switch (uap->cmd) {
375	case IPC_STAT:
376		error = ipcperm(cred, &shmseg->shm_perm, IPC_R);
377		if (error)
378			return error;
379		error = copyout((caddr_t)shmseg, uap->buf, sizeof(inbuf));
380		if (error)
381			return error;
382		break;
383	case IPC_SET:
384		error = ipcperm(cred, &shmseg->shm_perm, IPC_M);
385		if (error)
386			return error;
387		error = copyin(uap->buf, (caddr_t)&inbuf, sizeof(inbuf));
388		if (error)
389			return error;
390		shmseg->shm_perm.uid = inbuf.shm_perm.uid;
391		shmseg->shm_perm.gid = inbuf.shm_perm.gid;
392		shmseg->shm_perm.mode =
393		    (shmseg->shm_perm.mode & ~ACCESSPERMS) |
394		    (inbuf.shm_perm.mode & ACCESSPERMS);
395		shmseg->shm_ctime = time.tv_sec;
396		break;
397	case IPC_RMID:
398		error = ipcperm(cred, &shmseg->shm_perm, IPC_M);
399		if (error)
400			return error;
401		shmseg->shm_perm.key = IPC_PRIVATE;
402		shmseg->shm_perm.mode |= SHMSEG_REMOVED;
403		if (shmseg->shm_nattch <= 0) {
404			shm_deallocate_segment(shmseg);
405			shm_last_free = IPCID_TO_IX(uap->shmid);
406		}
407		break;
408#if 0
409	case SHM_LOCK:
410	case SHM_UNLOCK:
411#endif
412	default:
413		return EINVAL;
414	}
415	return 0;
416}
417
418#ifndef _SYS_SYSPROTO_H_
419struct shmget_args {
420	key_t key;
421	size_t size;
422	int shmflg;
423};
424#endif
425
426static int
427shmget_existing(p, uap, mode, segnum, retval)
428	struct proc *p;
429	struct shmget_args *uap;
430	int mode;
431	int segnum;
432	int *retval;
433{
434	struct shmid_ds *shmseg;
435	struct ucred *cred = p->p_ucred;
436	int error;
437
438	shmseg = &shmsegs[segnum];
439	if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
440		/*
441		 * This segment is in the process of being allocated.  Wait
442		 * until it's done, and look the key up again (in case the
443		 * allocation failed or it was freed).
444		 */
445		shmseg->shm_perm.mode |= SHMSEG_WANTED;
446		error = tsleep((caddr_t)shmseg, PLOCK | PCATCH, "shmget", 0);
447		if (error)
448			return error;
449		return EAGAIN;
450	}
451	error = ipcperm(cred, &shmseg->shm_perm, mode);
452	if (error)
453		return error;
454	if (uap->size && uap->size > shmseg->shm_segsz)
455		return EINVAL;
456       if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL))
457		return EEXIST;
458	*retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
459	return 0;
460}
461
462static int
463shmget_allocate_segment(p, uap, mode, retval)
464	struct proc *p;
465	struct shmget_args *uap;
466	int mode;
467	int *retval;
468{
469	int i, segnum, shmid, size;
470	struct ucred *cred = p->p_ucred;
471	struct shmid_ds *shmseg;
472	struct shm_handle *shm_handle;
473
474	if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
475		return EINVAL;
476	if (shm_nused >= shminfo.shmmni) /* any shmids left? */
477		return ENOSPC;
478	size = round_page(uap->size);
479	if (shm_committed + btoc(size) > shminfo.shmall)
480		return ENOMEM;
481	if (shm_last_free < 0) {
482		for (i = 0; i < shminfo.shmmni; i++)
483			if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
484				break;
485		if (i == shminfo.shmmni)
486			panic("shmseg free count inconsistent");
487		segnum = i;
488	} else  {
489		segnum = shm_last_free;
490		shm_last_free = -1;
491	}
492	shmseg = &shmsegs[segnum];
493	/*
494	 * In case we sleep in malloc(), mark the segment present but deleted
495	 * so that noone else tries to create the same key.
496	 */
497	shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
498	shmseg->shm_perm.key = uap->key;
499	shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff;
500	shm_handle = (struct shm_handle *)
501	    malloc(sizeof(struct shm_handle), M_SHM, M_WAITOK);
502	shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
503
504	/*
505	 * We make sure that we have allocated a pager before we need
506	 * to.
507	 */
508	shm_handle->shm_object =
509		vm_pager_allocate(OBJT_SWAP, 0, OFF_TO_IDX(size),
510			VM_PROT_DEFAULT, 0);
511	shmseg->shm_internal = shm_handle;
512	shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
513	shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
514	shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
515	    (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
516	shmseg->shm_segsz = uap->size;
517	shmseg->shm_cpid = p->p_pid;
518	shmseg->shm_lpid = shmseg->shm_nattch = 0;
519	shmseg->shm_atime = shmseg->shm_dtime = 0;
520	shmseg->shm_ctime = time.tv_sec;
521	shm_committed += btoc(size);
522	shm_nused++;
523	if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
524		/*
525		 * Somebody else wanted this key while we were asleep.  Wake
526		 * them up now.
527		 */
528		shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
529		wakeup((caddr_t)shmseg);
530	}
531	*retval = shmid;
532	return 0;
533}
534
535int
536shmget(p, uap, retval)
537	struct proc *p;
538	struct shmget_args *uap;
539	int *retval;
540{
541	int segnum, mode, error;
542
543	mode = uap->shmflg & ACCESSPERMS;
544	if (uap->key != IPC_PRIVATE) {
545	again:
546		segnum = shm_find_segment_by_key(uap->key);
547		if (segnum >= 0) {
548			error = shmget_existing(p, uap, mode, segnum, retval);
549			if (error == EAGAIN)
550				goto again;
551			return error;
552		}
553		if ((uap->shmflg & IPC_CREAT) == 0)
554			return ENOENT;
555	}
556	return shmget_allocate_segment(p, uap, mode, retval);
557}
558
559int
560shmsys(p, uap, retval)
561	struct proc *p;
562	/* XXX actually varargs. */
563	struct shmsys_args /* {
564		u_int	which;
565		int	a2;
566		int	a3;
567		int	a4;
568	} */ *uap;
569	int *retval;
570{
571
572	if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
573		return EINVAL;
574	return ((*shmcalls[uap->which])(p, &uap->a2, retval));
575}
576
577void
578shmfork(p1, p2)
579	struct proc *p1, *p2;
580{
581	struct shmmap_state *shmmap_s;
582	size_t size;
583	int i;
584
585	size = shminfo.shmseg * sizeof(struct shmmap_state);
586	shmmap_s = malloc(size, M_SHM, M_WAITOK);
587	bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmmap_s, size);
588	p2->p_vmspace->vm_shm = (caddr_t)shmmap_s;
589	for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
590		if (shmmap_s->shmid != -1)
591			shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++;
592}
593
594void
595shmexit(p)
596	struct proc *p;
597{
598	struct shmmap_state *shmmap_s;
599	int i;
600
601	shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
602	for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
603		if (shmmap_s->shmid != -1)
604			shm_delete_mapping(p, shmmap_s);
605	free((caddr_t)p->p_vmspace->vm_shm, M_SHM);
606	p->p_vmspace->vm_shm = NULL;
607}
608
609void
610shminit(dummy)
611	void *dummy;
612{
613	int i;
614	for (i = 0; i < shminfo.shmmni; i++) {
615		shmsegs[i].shm_perm.mode = SHMSEG_FREE;
616		shmsegs[i].shm_perm.seq = 0;
617	}
618	shm_last_free = 0;
619	shm_nused = 0;
620	shm_committed = 0;
621}
622