sysv_shm.c revision 83413
1/* $FreeBSD: head/sys/kern/sysv_shm.c 83413 2001-09-13 20:20:09Z mr $ */ 2/* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */ 3 4/* 5 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Adam Glass and Charles 18 * Hannum. 19 * 4. The names of the authors may not be used to endorse or promote products 20 * derived from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34#include "opt_compat.h" 35#include "opt_rlimit.h" 36#include "opt_sysvipc.h" 37 38#include <sys/param.h> 39#include <sys/systm.h> 40#include <sys/kernel.h> 41#include <sys/lock.h> 42#include <sys/sysctl.h> 43#include <sys/shm.h> 44#include <sys/proc.h> 45#include <sys/malloc.h> 46#include <sys/mman.h> 47#include <sys/mutex.h> 48#include <sys/stat.h> 49#include <sys/syscall.h> 50#include <sys/sysent.h> 51#include <sys/sysproto.h> 52#include <sys/jail.h> 53 54#include <vm/vm.h> 55#include <vm/vm_param.h> 56#include <vm/pmap.h> 57#include <vm/vm_object.h> 58#include <vm/vm_map.h> 59#include <vm/vm_page.h> 60#include <vm/vm_pager.h> 61 62static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments"); 63 64struct oshmctl_args; 65static int oshmctl __P((struct thread *td, struct oshmctl_args *uap)); 66 67static int shmget_allocate_segment __P((struct thread *td, 68 struct shmget_args *uap, int mode)); 69static int shmget_existing __P((struct thread *td, struct shmget_args *uap, 70 int mode, int segnum)); 71 72/* XXX casting to (sy_call_t *) is bogus, as usual. */ 73static sy_call_t *shmcalls[] = { 74 (sy_call_t *)shmat, (sy_call_t *)oshmctl, 75 (sy_call_t *)shmdt, (sy_call_t *)shmget, 76 (sy_call_t *)shmctl 77}; 78 79#define SHMSEG_FREE 0x0200 80#define SHMSEG_REMOVED 0x0400 81#define SHMSEG_ALLOCATED 0x0800 82#define SHMSEG_WANTED 0x1000 83 84static int shm_last_free, shm_nused, shm_committed, shmalloced; 85static struct shmid_ds *shmsegs; 86 87struct shm_handle { 88 /* vm_offset_t kva; */ 89 vm_object_t shm_object; 90}; 91 92struct shmmap_state { 93 vm_offset_t va; 94 int shmid; 95}; 96 97static void shm_deallocate_segment __P((struct shmid_ds *)); 98static int shm_find_segment_by_key __P((key_t)); 99static struct shmid_ds *shm_find_segment_by_shmid __P((int)); 100static int shm_delete_mapping __P((struct proc *p, struct shmmap_state *)); 101static void shmrealloc __P((void)); 102static void shminit __P((void)); 103static int sysvshm_modload __P((struct module *, int, void *)); 104static int shmunload __P((void)); 105static void shmexit_myhook __P((struct proc *p)); 106static void shmfork_myhook __P((struct proc *p1, struct proc *p2)); 107static int sysctl_shmsegs __P((SYSCTL_HANDLER_ARGS)); 108 109/* 110 * Tuneable values. 111 */ 112#ifndef SHMMAXPGS 113#define SHMMAXPGS 8192 /* Note: sysv shared memory is swap backed. */ 114#endif 115#ifndef SHMMAX 116#define SHMMAX (SHMMAXPGS*PAGE_SIZE) 117#endif 118#ifndef SHMMIN 119#define SHMMIN 1 120#endif 121#ifndef SHMMNI 122#define SHMMNI 192 123#endif 124#ifndef SHMSEG 125#define SHMSEG 128 126#endif 127#ifndef SHMALL 128#define SHMALL (SHMMAXPGS) 129#endif 130 131struct shminfo shminfo = { 132 SHMMAX, 133 SHMMIN, 134 SHMMNI, 135 SHMSEG, 136 SHMALL 137}; 138 139static int shm_use_phys; 140 141SYSCTL_DECL(_kern_ipc); 142SYSCTL_INT(_kern_ipc, OID_AUTO, shmmax, CTLFLAG_RW, &shminfo.shmmax, 0, ""); 143SYSCTL_INT(_kern_ipc, OID_AUTO, shmmin, CTLFLAG_RW, &shminfo.shmmin, 0, ""); 144SYSCTL_INT(_kern_ipc, OID_AUTO, shmmni, CTLFLAG_RD, &shminfo.shmmni, 0, ""); 145SYSCTL_INT(_kern_ipc, OID_AUTO, shmseg, CTLFLAG_RD, &shminfo.shmseg, 0, ""); 146SYSCTL_INT(_kern_ipc, OID_AUTO, shmall, CTLFLAG_RW, &shminfo.shmall, 0, ""); 147SYSCTL_INT(_kern_ipc, OID_AUTO, shm_use_phys, CTLFLAG_RW, 148 &shm_use_phys, 0, ""); 149SYSCTL_PROC(_kern_ipc, OID_AUTO, shmsegs, CTLFLAG_RD, 150 NULL, 0, sysctl_shmsegs, "", ""); 151 152static int 153shm_find_segment_by_key(key) 154 key_t key; 155{ 156 int i; 157 158 for (i = 0; i < shmalloced; i++) 159 if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) && 160 shmsegs[i].shm_perm.key == key) 161 return i; 162 return -1; 163} 164 165static struct shmid_ds * 166shm_find_segment_by_shmid(shmid) 167 int shmid; 168{ 169 int segnum; 170 struct shmid_ds *shmseg; 171 172 segnum = IPCID_TO_IX(shmid); 173 if (segnum < 0 || segnum >= shmalloced) 174 return NULL; 175 shmseg = &shmsegs[segnum]; 176 if ((shmseg->shm_perm.mode & (SHMSEG_ALLOCATED | SHMSEG_REMOVED)) 177 != SHMSEG_ALLOCATED || 178 shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid)) 179 return NULL; 180 return shmseg; 181} 182 183static void 184shm_deallocate_segment(shmseg) 185 struct shmid_ds *shmseg; 186{ 187 struct shm_handle *shm_handle; 188 size_t size; 189 190 GIANT_REQUIRED; 191 192 shm_handle = shmseg->shm_internal; 193 vm_object_deallocate(shm_handle->shm_object); 194 free((caddr_t)shm_handle, M_SHM); 195 shmseg->shm_internal = NULL; 196 size = round_page(shmseg->shm_segsz); 197 shm_committed -= btoc(size); 198 shm_nused--; 199 shmseg->shm_perm.mode = SHMSEG_FREE; 200} 201 202static int 203shm_delete_mapping(p, shmmap_s) 204 struct proc *p; 205 struct shmmap_state *shmmap_s; 206{ 207 struct shmid_ds *shmseg; 208 int segnum, result; 209 size_t size; 210 211 GIANT_REQUIRED; 212 213 segnum = IPCID_TO_IX(shmmap_s->shmid); 214 shmseg = &shmsegs[segnum]; 215 size = round_page(shmseg->shm_segsz); 216 result = vm_map_remove(&p->p_vmspace->vm_map, shmmap_s->va, 217 shmmap_s->va + size); 218 if (result != KERN_SUCCESS) 219 return EINVAL; 220 shmmap_s->shmid = -1; 221 shmseg->shm_dtime = time_second; 222 if ((--shmseg->shm_nattch <= 0) && 223 (shmseg->shm_perm.mode & SHMSEG_REMOVED)) { 224 shm_deallocate_segment(shmseg); 225 shm_last_free = segnum; 226 } 227 return 0; 228} 229 230#ifndef _SYS_SYSPROTO_H_ 231struct shmdt_args { 232 void *shmaddr; 233}; 234#endif 235 236/* 237 * MPSAFE 238 */ 239int 240shmdt(td, uap) 241 struct thread *td; 242 struct shmdt_args *uap; 243{ 244 struct proc *p = td->td_proc; 245 struct shmmap_state *shmmap_s; 246 int i; 247 int error = 0; 248 249 mtx_lock(&Giant); 250 if (!jail_sysvipc_allowed && jailed(p->p_ucred)) { 251 error = ENOSYS; 252 goto done2; 253 } 254 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm; 255 if (shmmap_s == NULL) { 256 error = EINVAL; 257 goto done2; 258 } 259 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) { 260 if (shmmap_s->shmid != -1 && 261 shmmap_s->va == (vm_offset_t)uap->shmaddr) { 262 break; 263 } 264 } 265 if (i == shminfo.shmseg) { 266 error = EINVAL; 267 goto done2; 268 } 269 error = shm_delete_mapping(p, shmmap_s); 270done2: 271 mtx_unlock(&Giant); 272 return (error); 273} 274 275#ifndef _SYS_SYSPROTO_H_ 276struct shmat_args { 277 int shmid; 278 void *shmaddr; 279 int shmflg; 280}; 281#endif 282 283/* 284 * MPSAFE 285 */ 286int 287shmat(td, uap) 288 struct thread *td; 289 struct shmat_args *uap; 290{ 291 struct proc *p = td->td_proc; 292 int i, flags; 293 struct shmid_ds *shmseg; 294 struct shmmap_state *shmmap_s = NULL; 295 struct shm_handle *shm_handle; 296 vm_offset_t attach_va; 297 vm_prot_t prot; 298 vm_size_t size; 299 int rv; 300 int error = 0; 301 302 mtx_lock(&Giant); 303 if (!jail_sysvipc_allowed && jailed(p->p_ucred)) { 304 error = ENOSYS; 305 goto done2; 306 } 307 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm; 308 if (shmmap_s == NULL) { 309 size = shminfo.shmseg * sizeof(struct shmmap_state); 310 shmmap_s = malloc(size, M_SHM, M_WAITOK); 311 for (i = 0; i < shminfo.shmseg; i++) 312 shmmap_s[i].shmid = -1; 313 p->p_vmspace->vm_shm = (caddr_t)shmmap_s; 314 } 315 shmseg = shm_find_segment_by_shmid(uap->shmid); 316 if (shmseg == NULL) { 317 error = EINVAL; 318 goto done2; 319 } 320 error = ipcperm(td, &shmseg->shm_perm, 321 (uap->shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W); 322 if (error) 323 goto done2; 324 for (i = 0; i < shminfo.shmseg; i++) { 325 if (shmmap_s->shmid == -1) 326 break; 327 shmmap_s++; 328 } 329 if (i >= shminfo.shmseg) { 330 error = EMFILE; 331 goto done2; 332 } 333 size = round_page(shmseg->shm_segsz); 334#ifdef VM_PROT_READ_IS_EXEC 335 prot = VM_PROT_READ | VM_PROT_EXECUTE; 336#else 337 prot = VM_PROT_READ; 338#endif 339 if ((uap->shmflg & SHM_RDONLY) == 0) 340 prot |= VM_PROT_WRITE; 341 flags = MAP_ANON | MAP_SHARED; 342 if (uap->shmaddr) { 343 flags |= MAP_FIXED; 344 if (uap->shmflg & SHM_RND) { 345 attach_va = (vm_offset_t)uap->shmaddr & ~(SHMLBA-1); 346 } else if (((vm_offset_t)uap->shmaddr & (SHMLBA-1)) == 0) { 347 attach_va = (vm_offset_t)uap->shmaddr; 348 } else { 349 error = EINVAL; 350 goto done2; 351 } 352 } else { 353 /* 354 * This is just a hint to vm_map_find() about where to 355 * put it. 356 */ 357 attach_va = round_page((vm_offset_t)p->p_vmspace->vm_taddr 358 + MAXTSIZ + MAXDSIZ); 359 } 360 361 shm_handle = shmseg->shm_internal; 362 vm_object_reference(shm_handle->shm_object); 363 rv = vm_map_find(&p->p_vmspace->vm_map, shm_handle->shm_object, 364 0, &attach_va, size, (flags & MAP_FIXED)?0:1, prot, prot, 0); 365 if (rv != KERN_SUCCESS) { 366 error = ENOMEM; 367 goto done2; 368 } 369 vm_map_inherit(&p->p_vmspace->vm_map, 370 attach_va, attach_va + size, VM_INHERIT_SHARE); 371 372 shmmap_s->va = attach_va; 373 shmmap_s->shmid = uap->shmid; 374 shmseg->shm_lpid = p->p_pid; 375 shmseg->shm_atime = time_second; 376 shmseg->shm_nattch++; 377 td->td_retval[0] = attach_va; 378done2: 379 mtx_unlock(&Giant); 380 return (error); 381} 382 383struct oshmid_ds { 384 struct ipc_perm shm_perm; /* operation perms */ 385 int shm_segsz; /* size of segment (bytes) */ 386 ushort shm_cpid; /* pid, creator */ 387 ushort shm_lpid; /* pid, last operation */ 388 short shm_nattch; /* no. of current attaches */ 389 time_t shm_atime; /* last attach time */ 390 time_t shm_dtime; /* last detach time */ 391 time_t shm_ctime; /* last change time */ 392 void *shm_handle; /* internal handle for shm segment */ 393}; 394 395struct oshmctl_args { 396 int shmid; 397 int cmd; 398 struct oshmid_ds *ubuf; 399}; 400 401/* 402 * MPSAFE 403 */ 404static int 405oshmctl(td, uap) 406 struct thread *td; 407 struct oshmctl_args *uap; 408{ 409#ifdef COMPAT_43 410 int error = 0; 411 struct shmid_ds *shmseg; 412 struct oshmid_ds outbuf; 413 414 mtx_lock(&Giant); 415 if (!jail_sysvipc_allowed && jailed(td->td_proc->p_ucred)) { 416 error = ENOSYS; 417 goto done2; 418 } 419 shmseg = shm_find_segment_by_shmid(uap->shmid); 420 if (shmseg == NULL) { 421 error = EINVAL; 422 goto done2; 423 } 424 switch (uap->cmd) { 425 case IPC_STAT: 426 error = ipcperm(td, &shmseg->shm_perm, IPC_R); 427 if (error) 428 goto done2; 429 outbuf.shm_perm = shmseg->shm_perm; 430 outbuf.shm_segsz = shmseg->shm_segsz; 431 outbuf.shm_cpid = shmseg->shm_cpid; 432 outbuf.shm_lpid = shmseg->shm_lpid; 433 outbuf.shm_nattch = shmseg->shm_nattch; 434 outbuf.shm_atime = shmseg->shm_atime; 435 outbuf.shm_dtime = shmseg->shm_dtime; 436 outbuf.shm_ctime = shmseg->shm_ctime; 437 outbuf.shm_handle = shmseg->shm_internal; 438 error = copyout((caddr_t)&outbuf, uap->ubuf, sizeof(outbuf)); 439 if (error) 440 goto done2; 441 break; 442 default: 443 /* XXX casting to (sy_call_t *) is bogus, as usual. */ 444 error = ((sy_call_t *)shmctl)(td, uap); 445 break; 446 } 447done2: 448 mtx_unlock(&Giant); 449 return (error); 450#else 451 return EINVAL; 452#endif 453} 454 455#ifndef _SYS_SYSPROTO_H_ 456struct shmctl_args { 457 int shmid; 458 int cmd; 459 struct shmid_ds *buf; 460}; 461#endif 462 463/* 464 * MPSAFE 465 */ 466int 467shmctl(td, uap) 468 struct thread *td; 469 struct shmctl_args *uap; 470{ 471 int error = 0; 472 struct shmid_ds inbuf; 473 struct shmid_ds *shmseg; 474 475 mtx_lock(&Giant); 476 if (!jail_sysvipc_allowed && jailed(td->td_proc->p_ucred)) { 477 error = ENOSYS; 478 goto done2; 479 } 480 shmseg = shm_find_segment_by_shmid(uap->shmid); 481 if (shmseg == NULL) { 482 error = EINVAL; 483 goto done2; 484 } 485 switch (uap->cmd) { 486 case IPC_STAT: 487 error = ipcperm(td, &shmseg->shm_perm, IPC_R); 488 if (error) 489 goto done2; 490 error = copyout((caddr_t)shmseg, uap->buf, sizeof(inbuf)); 491 if (error) 492 goto done2; 493 break; 494 case IPC_SET: 495 error = ipcperm(td, &shmseg->shm_perm, IPC_M); 496 if (error) 497 goto done2; 498 error = copyin(uap->buf, (caddr_t)&inbuf, sizeof(inbuf)); 499 if (error) 500 goto done2; 501 shmseg->shm_perm.uid = inbuf.shm_perm.uid; 502 shmseg->shm_perm.gid = inbuf.shm_perm.gid; 503 shmseg->shm_perm.mode = 504 (shmseg->shm_perm.mode & ~ACCESSPERMS) | 505 (inbuf.shm_perm.mode & ACCESSPERMS); 506 shmseg->shm_ctime = time_second; 507 break; 508 case IPC_RMID: 509 error = ipcperm(td, &shmseg->shm_perm, IPC_M); 510 if (error) 511 goto done2; 512 shmseg->shm_perm.key = IPC_PRIVATE; 513 shmseg->shm_perm.mode |= SHMSEG_REMOVED; 514 if (shmseg->shm_nattch <= 0) { 515 shm_deallocate_segment(shmseg); 516 shm_last_free = IPCID_TO_IX(uap->shmid); 517 } 518 break; 519#if 0 520 case SHM_LOCK: 521 case SHM_UNLOCK: 522#endif 523 default: 524 error = EINVAL; 525 break; 526 } 527done2: 528 mtx_unlock(&Giant); 529 return (error); 530} 531 532#ifndef _SYS_SYSPROTO_H_ 533struct shmget_args { 534 key_t key; 535 size_t size; 536 int shmflg; 537}; 538#endif 539 540static int 541shmget_existing(td, uap, mode, segnum) 542 struct thread *td; 543 struct shmget_args *uap; 544 int mode; 545 int segnum; 546{ 547 struct shmid_ds *shmseg; 548 int error; 549 550 shmseg = &shmsegs[segnum]; 551 if (shmseg->shm_perm.mode & SHMSEG_REMOVED) { 552 /* 553 * This segment is in the process of being allocated. Wait 554 * until it's done, and look the key up again (in case the 555 * allocation failed or it was freed). 556 */ 557 shmseg->shm_perm.mode |= SHMSEG_WANTED; 558 error = tsleep((caddr_t)shmseg, PLOCK | PCATCH, "shmget", 0); 559 if (error) 560 return error; 561 return EAGAIN; 562 } 563 if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL)) 564 return EEXIST; 565 error = ipcperm(td, &shmseg->shm_perm, mode); 566 if (error) 567 return error; 568 if (uap->size && uap->size > shmseg->shm_segsz) 569 return EINVAL; 570 td->td_retval[0] = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm); 571 return 0; 572} 573 574static int 575shmget_allocate_segment(td, uap, mode) 576 struct thread *td; 577 struct shmget_args *uap; 578 int mode; 579{ 580 int i, segnum, shmid, size; 581 struct ucred *cred = td->td_proc->p_ucred; 582 struct shmid_ds *shmseg; 583 struct shm_handle *shm_handle; 584 585 GIANT_REQUIRED; 586 587 if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax) 588 return EINVAL; 589 if (shm_nused >= shminfo.shmmni) /* Any shmids left? */ 590 return ENOSPC; 591 size = round_page(uap->size); 592 if (shm_committed + btoc(size) > shminfo.shmall) 593 return ENOMEM; 594 if (shm_last_free < 0) { 595 shmrealloc(); /* Maybe expand the shmsegs[] array. */ 596 for (i = 0; i < shmalloced; i++) 597 if (shmsegs[i].shm_perm.mode & SHMSEG_FREE) 598 break; 599 if (i == shmalloced) 600 return ENOSPC; 601 segnum = i; 602 } else { 603 segnum = shm_last_free; 604 shm_last_free = -1; 605 } 606 shmseg = &shmsegs[segnum]; 607 /* 608 * In case we sleep in malloc(), mark the segment present but deleted 609 * so that noone else tries to create the same key. 610 */ 611 shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED; 612 shmseg->shm_perm.key = uap->key; 613 shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff; 614 shm_handle = (struct shm_handle *) 615 malloc(sizeof(struct shm_handle), M_SHM, M_WAITOK); 616 shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm); 617 618 /* 619 * We make sure that we have allocated a pager before we need 620 * to. 621 */ 622 if (shm_use_phys) { 623 shm_handle->shm_object = 624 vm_pager_allocate(OBJT_PHYS, 0, size, VM_PROT_DEFAULT, 0); 625 } else { 626 shm_handle->shm_object = 627 vm_pager_allocate(OBJT_SWAP, 0, size, VM_PROT_DEFAULT, 0); 628 } 629 vm_object_clear_flag(shm_handle->shm_object, OBJ_ONEMAPPING); 630 vm_object_set_flag(shm_handle->shm_object, OBJ_NOSPLIT); 631 632 shmseg->shm_internal = shm_handle; 633 shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid; 634 shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid; 635 shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) | 636 (mode & ACCESSPERMS) | SHMSEG_ALLOCATED; 637 shmseg->shm_segsz = uap->size; 638 shmseg->shm_cpid = td->td_proc->p_pid; 639 shmseg->shm_lpid = shmseg->shm_nattch = 0; 640 shmseg->shm_atime = shmseg->shm_dtime = 0; 641 shmseg->shm_ctime = time_second; 642 shm_committed += btoc(size); 643 shm_nused++; 644 if (shmseg->shm_perm.mode & SHMSEG_WANTED) { 645 /* 646 * Somebody else wanted this key while we were asleep. Wake 647 * them up now. 648 */ 649 shmseg->shm_perm.mode &= ~SHMSEG_WANTED; 650 wakeup((caddr_t)shmseg); 651 } 652 td->td_retval[0] = shmid; 653 return 0; 654} 655 656/* 657 * MPSAFE 658 */ 659int 660shmget(td, uap) 661 struct thread *td; 662 struct shmget_args *uap; 663{ 664 int segnum, mode; 665 int error; 666 667 mtx_lock(&Giant); 668 if (!jail_sysvipc_allowed && jailed(td->td_proc->p_ucred)) { 669 error = ENOSYS; 670 goto done2; 671 } 672 mode = uap->shmflg & ACCESSPERMS; 673 if (uap->key != IPC_PRIVATE) { 674 again: 675 segnum = shm_find_segment_by_key(uap->key); 676 if (segnum >= 0) { 677 error = shmget_existing(td, uap, mode, segnum); 678 if (error == EAGAIN) 679 goto again; 680 goto done2; 681 } 682 if ((uap->shmflg & IPC_CREAT) == 0) { 683 error = ENOENT; 684 goto done2; 685 } 686 } 687 error = shmget_allocate_segment(td, uap, mode); 688done2: 689 mtx_unlock(&Giant); 690 return (error); 691} 692 693/* 694 * MPSAFE 695 */ 696int 697shmsys(td, uap) 698 struct thread *td; 699 /* XXX actually varargs. */ 700 struct shmsys_args /* { 701 u_int which; 702 int a2; 703 int a3; 704 int a4; 705 } */ *uap; 706{ 707 int error; 708 709 mtx_lock(&Giant); 710 if (!jail_sysvipc_allowed && jailed(td->td_proc->p_ucred)) { 711 error = ENOSYS; 712 goto done2; 713 } 714 if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0])) { 715 error = EINVAL; 716 goto done2; 717 } 718 error = (*shmcalls[uap->which])(td, &uap->a2); 719done2: 720 mtx_unlock(&Giant); 721 return (error); 722} 723 724static void 725shmfork_myhook(p1, p2) 726 struct proc *p1, *p2; 727{ 728 struct shmmap_state *shmmap_s; 729 size_t size; 730 int i; 731 732 size = shminfo.shmseg * sizeof(struct shmmap_state); 733 shmmap_s = malloc(size, M_SHM, M_WAITOK); 734 bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmmap_s, size); 735 p2->p_vmspace->vm_shm = (caddr_t)shmmap_s; 736 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) 737 if (shmmap_s->shmid != -1) 738 shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++; 739} 740 741static void 742shmexit_myhook(p) 743 struct proc *p; 744{ 745 struct shmmap_state *shmmap_s; 746 int i; 747 748 GIANT_REQUIRED; 749 750 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm; 751 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) 752 if (shmmap_s->shmid != -1) 753 shm_delete_mapping(p, shmmap_s); 754 free((caddr_t)p->p_vmspace->vm_shm, M_SHM); 755 p->p_vmspace->vm_shm = NULL; 756} 757 758static void 759shmrealloc(void) 760{ 761 int i; 762 struct shmid_ds *newsegs; 763 764 if (shmalloced >= shminfo.shmmni) 765 return; 766 767 newsegs = malloc(shminfo.shmmni * sizeof(*newsegs), M_SHM, M_WAITOK); 768 if (newsegs == NULL) 769 return; 770 for (i = 0; i < shmalloced; i++) 771 bcopy(&shmsegs[i], &newsegs[i], sizeof(newsegs[0])); 772 for (; i < shminfo.shmmni; i++) { 773 shmsegs[i].shm_perm.mode = SHMSEG_FREE; 774 shmsegs[i].shm_perm.seq = 0; 775 } 776 free(shmsegs, M_SHM); 777 shmsegs = newsegs; 778 shmalloced = shminfo.shmmni; 779} 780 781static void 782shminit() 783{ 784 int i; 785 786 TUNABLE_INT_FETCH("kern.ipc.shmmaxpgs", &shminfo.shmall); 787 shminfo.shmmax = shminfo.shmall * PAGE_SIZE; 788 TUNABLE_INT_FETCH("kern.ipc.shmmin", &shminfo.shmmin); 789 TUNABLE_INT_FETCH("kern.ipc.shmmni", &shminfo.shmmni); 790 TUNABLE_INT_FETCH("kern.ipc.shmseg", &shminfo.shmseg); 791 TUNABLE_INT_FETCH("kern.ipc.shm_use_phys", &shm_use_phys); 792 793 shmalloced = shminfo.shmmni; 794 shmsegs = malloc(shmalloced * sizeof(shmsegs[0]), M_SHM, M_WAITOK); 795 if (shmsegs == NULL) 796 panic("cannot allocate initial memory for sysvshm"); 797 for (i = 0; i < shmalloced; i++) { 798 shmsegs[i].shm_perm.mode = SHMSEG_FREE; 799 shmsegs[i].shm_perm.seq = 0; 800 } 801 shm_last_free = 0; 802 shm_nused = 0; 803 shm_committed = 0; 804 shmexit_hook = &shmexit_myhook; 805 shmfork_hook = &shmfork_myhook; 806} 807 808static int 809shmunload() 810{ 811 812 if (shm_nused > 0) 813 return (EBUSY); 814 815 free(shmsegs, M_SHM); 816 shmexit_hook = NULL; 817 shmfork_hook = NULL; 818 return (0); 819} 820 821static int 822sysctl_shmsegs(SYSCTL_HANDLER_ARGS) 823{ 824 825 return (SYSCTL_OUT(req, shmsegs, shmalloced * sizeof(shmsegs[0]))); 826} 827 828static int 829sysvshm_modload(struct module *module, int cmd, void *arg) 830{ 831 int error = 0; 832 833 switch (cmd) { 834 case MOD_LOAD: 835 shminit(); 836 break; 837 case MOD_UNLOAD: 838 error = shmunload(); 839 break; 840 case MOD_SHUTDOWN: 841 break; 842 default: 843 error = EINVAL; 844 break; 845 } 846 return (error); 847} 848 849static moduledata_t sysvshm_mod = { 850 "sysvshm", 851 &sysvshm_modload, 852 NULL 853}; 854 855SYSCALL_MODULE_HELPER(shmsys, 4); 856SYSCALL_MODULE_HELPER(shmat, 3); 857SYSCALL_MODULE_HELPER(shmctl, 3); 858SYSCALL_MODULE_HELPER(shmdt, 1); 859SYSCALL_MODULE_HELPER(shmget, 3); 860 861DECLARE_MODULE(sysvshm, sysvshm_mod, 862 SI_SUB_SYSV_SHM, SI_ORDER_FIRST); 863MODULE_VERSION(sysvshm, 1); 864