linux_compat.c revision 282513
1/*- 2 * Copyright (c) 2010 Isilon Systems, Inc. 3 * Copyright (c) 2010 iX Systems, Inc. 4 * Copyright (c) 2010 Panasas, Inc. 5 * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/malloc.h> 33#include <sys/kernel.h> 34#include <sys/sysctl.h> 35#include <sys/proc.h> 36#include <sys/sleepqueue.h> 37#include <sys/lock.h> 38#include <sys/mutex.h> 39#include <sys/bus.h> 40#include <sys/fcntl.h> 41#include <sys/file.h> 42#include <sys/filio.h> 43#include <sys/rwlock.h> 44 45#include <vm/vm.h> 46#include <vm/pmap.h> 47 48#include <machine/stdarg.h> 49#include <machine/pmap.h> 50 51#include <linux/kobject.h> 52#include <linux/device.h> 53#include <linux/slab.h> 54#include <linux/module.h> 55#include <linux/cdev.h> 56#include <linux/file.h> 57#include <linux/sysfs.h> 58#include <linux/mm.h> 59#include <linux/io.h> 60#include <linux/vmalloc.h> 61#include <linux/timer.h> 62#include <linux/netdevice.h> 63 64#include <vm/vm_pager.h> 65 66MALLOC_DEFINE(M_KMALLOC, "linux", "Linux kmalloc compat"); 67 68#include <linux/rbtree.h> 69/* Undo Linux compat changes. */ 70#undef RB_ROOT 71#undef file 72#undef cdev 73#define RB_ROOT(head) (head)->rbh_root 74 75struct kobject class_root; 76struct device linux_rootdev; 77struct class miscclass; 78struct list_head pci_drivers; 79struct list_head pci_devices; 80struct net init_net; 81spinlock_t pci_lock; 82 83unsigned long linux_timer_hz_mask; 84 85int 86panic_cmp(struct rb_node *one, struct rb_node *two) 87{ 88 panic("no cmp"); 89} 90 91RB_GENERATE(linux_root, rb_node, __entry, panic_cmp); 92 93int 94kobject_set_name(struct kobject *kobj, const char *fmt, ...) 95{ 96 va_list args; 97 int error; 98 99 va_start(args, fmt); 100 error = kobject_set_name_vargs(kobj, fmt, args); 101 va_end(args); 102 103 return (error); 104} 105 106static inline int 107kobject_add_complete(struct kobject *kobj, struct kobject *parent) 108{ 109 struct kobj_type *t; 110 int error; 111 112 kobj->parent = kobject_get(parent); 113 error = sysfs_create_dir(kobj); 114 if (error == 0 && kobj->ktype && kobj->ktype->default_attrs) { 115 struct attribute **attr; 116 t = kobj->ktype; 117 118 for (attr = t->default_attrs; *attr != NULL; attr++) { 119 error = sysfs_create_file(kobj, *attr); 120 if (error) 121 break; 122 } 123 if (error) 124 sysfs_remove_dir(kobj); 125 126 } 127 return (error); 128} 129 130int 131kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...) 132{ 133 va_list args; 134 int error; 135 136 va_start(args, fmt); 137 error = kobject_set_name_vargs(kobj, fmt, args); 138 va_end(args); 139 if (error) 140 return (error); 141 142 return kobject_add_complete(kobj, parent); 143} 144 145void 146kobject_release(struct kref *kref) 147{ 148 struct kobject *kobj; 149 char *name; 150 151 kobj = container_of(kref, struct kobject, kref); 152 sysfs_remove_dir(kobj); 153 if (kobj->parent) 154 kobject_put(kobj->parent); 155 kobj->parent = NULL; 156 name = kobj->name; 157 if (kobj->ktype && kobj->ktype->release) 158 kobj->ktype->release(kobj); 159 kfree(name); 160} 161 162static void 163kobject_kfree(struct kobject *kobj) 164{ 165 kfree(kobj); 166} 167 168static void 169kobject_kfree_name(struct kobject *kobj) 170{ 171 if (kobj) { 172 kfree(kobj->name); 173 } 174} 175 176struct kobj_type kfree_type = { .release = kobject_kfree }; 177 178static void 179dev_release(struct device *dev) 180{ 181 pr_debug("dev_release: %s\n", dev_name(dev)); 182 kfree(dev); 183} 184 185struct device * 186device_create(struct class *class, struct device *parent, dev_t devt, 187 void *drvdata, const char *fmt, ...) 188{ 189 struct device *dev; 190 va_list args; 191 192 dev = kzalloc(sizeof(*dev), M_WAITOK); 193 dev->parent = parent; 194 dev->class = class; 195 dev->devt = devt; 196 dev->driver_data = drvdata; 197 dev->release = dev_release; 198 va_start(args, fmt); 199 kobject_set_name_vargs(&dev->kobj, fmt, args); 200 va_end(args); 201 device_register(dev); 202 203 return (dev); 204} 205 206int 207kobject_init_and_add(struct kobject *kobj, struct kobj_type *ktype, 208 struct kobject *parent, const char *fmt, ...) 209{ 210 va_list args; 211 int error; 212 213 kobject_init(kobj, ktype); 214 kobj->ktype = ktype; 215 kobj->parent = parent; 216 kobj->name = NULL; 217 218 va_start(args, fmt); 219 error = kobject_set_name_vargs(kobj, fmt, args); 220 va_end(args); 221 if (error) 222 return (error); 223 return kobject_add_complete(kobj, parent); 224} 225 226static void 227linux_file_dtor(void *cdp) 228{ 229 struct linux_file *filp; 230 231 filp = cdp; 232 filp->f_op->release(filp->f_vnode, filp); 233 vdrop(filp->f_vnode); 234 kfree(filp); 235} 236 237static int 238linux_dev_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 239{ 240 struct linux_cdev *ldev; 241 struct linux_file *filp; 242 struct file *file; 243 int error; 244 245 file = curthread->td_fpop; 246 ldev = dev->si_drv1; 247 if (ldev == NULL) 248 return (ENODEV); 249 filp = kzalloc(sizeof(*filp), GFP_KERNEL); 250 filp->f_dentry = &filp->f_dentry_store; 251 filp->f_op = ldev->ops; 252 filp->f_flags = file->f_flag; 253 vhold(file->f_vnode); 254 filp->f_vnode = file->f_vnode; 255 if (filp->f_op->open) { 256 error = -filp->f_op->open(file->f_vnode, filp); 257 if (error) { 258 kfree(filp); 259 return (error); 260 } 261 } 262 error = devfs_set_cdevpriv(filp, linux_file_dtor); 263 if (error) { 264 filp->f_op->release(file->f_vnode, filp); 265 kfree(filp); 266 return (error); 267 } 268 269 return 0; 270} 271 272static int 273linux_dev_close(struct cdev *dev, int fflag, int devtype, struct thread *td) 274{ 275 struct linux_cdev *ldev; 276 struct linux_file *filp; 277 struct file *file; 278 int error; 279 280 file = curthread->td_fpop; 281 ldev = dev->si_drv1; 282 if (ldev == NULL) 283 return (0); 284 if ((error = devfs_get_cdevpriv((void **)&filp)) != 0) 285 return (error); 286 filp->f_flags = file->f_flag; 287 devfs_clear_cdevpriv(); 288 289 290 return (0); 291} 292 293static int 294linux_dev_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, 295 struct thread *td) 296{ 297 struct linux_cdev *ldev; 298 struct linux_file *filp; 299 struct file *file; 300 int error; 301 302 file = curthread->td_fpop; 303 ldev = dev->si_drv1; 304 if (ldev == NULL) 305 return (0); 306 if ((error = devfs_get_cdevpriv((void **)&filp)) != 0) 307 return (error); 308 filp->f_flags = file->f_flag; 309 /* 310 * Linux does not have a generic ioctl copyin/copyout layer. All 311 * linux ioctls must be converted to void ioctls which pass a 312 * pointer to the address of the data. We want the actual user 313 * address so we dereference here. 314 */ 315 data = *(void **)data; 316 if (filp->f_op->unlocked_ioctl) 317 error = -filp->f_op->unlocked_ioctl(filp, cmd, (u_long)data); 318 else 319 error = ENOTTY; 320 321 return (error); 322} 323 324static int 325linux_dev_read(struct cdev *dev, struct uio *uio, int ioflag) 326{ 327 struct linux_cdev *ldev; 328 struct linux_file *filp; 329 struct file *file; 330 ssize_t bytes; 331 int error; 332 333 file = curthread->td_fpop; 334 ldev = dev->si_drv1; 335 if (ldev == NULL) 336 return (0); 337 if ((error = devfs_get_cdevpriv((void **)&filp)) != 0) 338 return (error); 339 filp->f_flags = file->f_flag; 340 if (uio->uio_iovcnt != 1) 341 panic("linux_dev_read: uio %p iovcnt %d", 342 uio, uio->uio_iovcnt); 343 if (filp->f_op->read) { 344 bytes = filp->f_op->read(filp, uio->uio_iov->iov_base, 345 uio->uio_iov->iov_len, &uio->uio_offset); 346 if (bytes >= 0) { 347 uio->uio_iov->iov_base += bytes; 348 uio->uio_iov->iov_len -= bytes; 349 uio->uio_resid -= bytes; 350 } else 351 error = -bytes; 352 } else 353 error = ENXIO; 354 355 return (error); 356} 357 358static int 359linux_dev_write(struct cdev *dev, struct uio *uio, int ioflag) 360{ 361 struct linux_cdev *ldev; 362 struct linux_file *filp; 363 struct file *file; 364 ssize_t bytes; 365 int error; 366 367 file = curthread->td_fpop; 368 ldev = dev->si_drv1; 369 if (ldev == NULL) 370 return (0); 371 if ((error = devfs_get_cdevpriv((void **)&filp)) != 0) 372 return (error); 373 filp->f_flags = file->f_flag; 374 if (uio->uio_iovcnt != 1) 375 panic("linux_dev_write: uio %p iovcnt %d", 376 uio, uio->uio_iovcnt); 377 if (filp->f_op->write) { 378 bytes = filp->f_op->write(filp, uio->uio_iov->iov_base, 379 uio->uio_iov->iov_len, &uio->uio_offset); 380 if (bytes >= 0) { 381 uio->uio_iov->iov_base += bytes; 382 uio->uio_iov->iov_len -= bytes; 383 uio->uio_resid -= bytes; 384 } else 385 error = -bytes; 386 } else 387 error = ENXIO; 388 389 return (error); 390} 391 392static int 393linux_dev_poll(struct cdev *dev, int events, struct thread *td) 394{ 395 struct linux_cdev *ldev; 396 struct linux_file *filp; 397 struct file *file; 398 int revents; 399 int error; 400 401 file = curthread->td_fpop; 402 ldev = dev->si_drv1; 403 if (ldev == NULL) 404 return (0); 405 if ((error = devfs_get_cdevpriv((void **)&filp)) != 0) 406 return (error); 407 filp->f_flags = file->f_flag; 408 if (filp->f_op->poll) 409 revents = filp->f_op->poll(filp, NULL) & events; 410 else 411 revents = 0; 412 413 return (revents); 414} 415 416static int 417linux_dev_mmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr, 418 int nprot, vm_memattr_t *memattr) 419{ 420 421 /* XXX memattr not honored. */ 422 *paddr = offset; 423 return (0); 424} 425 426static int 427linux_dev_mmap_single(struct cdev *dev, vm_ooffset_t *offset, 428 vm_size_t size, struct vm_object **object, int nprot) 429{ 430 struct linux_cdev *ldev; 431 struct linux_file *filp; 432 struct file *file; 433 struct vm_area_struct vma; 434 vm_paddr_t paddr; 435 vm_page_t m; 436 int error; 437 438 file = curthread->td_fpop; 439 ldev = dev->si_drv1; 440 if (ldev == NULL) 441 return (ENODEV); 442 if (size != PAGE_SIZE) 443 return (EINVAL); 444 if ((error = devfs_get_cdevpriv((void **)&filp)) != 0) 445 return (error); 446 filp->f_flags = file->f_flag; 447 vma.vm_start = 0; 448 vma.vm_end = PAGE_SIZE; 449 vma.vm_pgoff = *offset / PAGE_SIZE; 450 vma.vm_pfn = 0; 451 vma.vm_page_prot = 0; 452 if (filp->f_op->mmap) { 453 error = -filp->f_op->mmap(filp, &vma); 454 if (error == 0) { 455 paddr = (vm_paddr_t)vma.vm_pfn << PAGE_SHIFT; 456 *offset = paddr; 457 m = PHYS_TO_VM_PAGE(paddr); 458 *object = vm_pager_allocate(OBJT_DEVICE, dev, 459 PAGE_SIZE, nprot, *offset, curthread->td_ucred); 460 if (*object == NULL) 461 return (EINVAL); 462 if (vma.vm_page_prot != VM_MEMATTR_DEFAULT) 463 pmap_page_set_memattr(m, vma.vm_page_prot); 464 } 465 } else 466 error = ENODEV; 467 468 return (error); 469} 470 471struct cdevsw linuxcdevsw = { 472 .d_version = D_VERSION, 473 .d_flags = D_TRACKCLOSE, 474 .d_open = linux_dev_open, 475 .d_close = linux_dev_close, 476 .d_read = linux_dev_read, 477 .d_write = linux_dev_write, 478 .d_ioctl = linux_dev_ioctl, 479 .d_mmap_single = linux_dev_mmap_single, 480 .d_mmap = linux_dev_mmap, 481 .d_poll = linux_dev_poll, 482}; 483 484static int 485linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred, 486 int flags, struct thread *td) 487{ 488 struct linux_file *filp; 489 ssize_t bytes; 490 int error; 491 492 error = 0; 493 filp = (struct linux_file *)file->f_data; 494 filp->f_flags = file->f_flag; 495 if (uio->uio_iovcnt != 1) 496 panic("linux_file_read: uio %p iovcnt %d", 497 uio, uio->uio_iovcnt); 498 if (filp->f_op->read) { 499 bytes = filp->f_op->read(filp, uio->uio_iov->iov_base, 500 uio->uio_iov->iov_len, &uio->uio_offset); 501 if (bytes >= 0) { 502 uio->uio_iov->iov_base += bytes; 503 uio->uio_iov->iov_len -= bytes; 504 uio->uio_resid -= bytes; 505 } else 506 error = -bytes; 507 } else 508 error = ENXIO; 509 510 return (error); 511} 512 513static int 514linux_file_poll(struct file *file, int events, struct ucred *active_cred, 515 struct thread *td) 516{ 517 struct linux_file *filp; 518 int revents; 519 520 filp = (struct linux_file *)file->f_data; 521 filp->f_flags = file->f_flag; 522 if (filp->f_op->poll) 523 revents = filp->f_op->poll(filp, NULL) & events; 524 else 525 revents = 0; 526 527 return (0); 528} 529 530static int 531linux_file_close(struct file *file, struct thread *td) 532{ 533 struct linux_file *filp; 534 int error; 535 536 filp = (struct linux_file *)file->f_data; 537 filp->f_flags = file->f_flag; 538 error = -filp->f_op->release(NULL, filp); 539 funsetown(&filp->f_sigio); 540 kfree(filp); 541 542 return (error); 543} 544 545static int 546linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred, 547 struct thread *td) 548{ 549 struct linux_file *filp; 550 int error; 551 552 filp = (struct linux_file *)fp->f_data; 553 filp->f_flags = fp->f_flag; 554 error = 0; 555 556 switch (cmd) { 557 case FIONBIO: 558 break; 559 case FIOASYNC: 560 if (filp->f_op->fasync == NULL) 561 break; 562 error = filp->f_op->fasync(0, filp, fp->f_flag & FASYNC); 563 break; 564 case FIOSETOWN: 565 error = fsetown(*(int *)data, &filp->f_sigio); 566 if (error == 0) 567 error = filp->f_op->fasync(0, filp, 568 fp->f_flag & FASYNC); 569 break; 570 case FIOGETOWN: 571 *(int *)data = fgetown(&filp->f_sigio); 572 break; 573 default: 574 error = ENOTTY; 575 break; 576 } 577 return (error); 578} 579 580struct fileops linuxfileops = { 581 .fo_read = linux_file_read, 582 .fo_poll = linux_file_poll, 583 .fo_close = linux_file_close, 584 .fo_ioctl = linux_file_ioctl, 585 .fo_chmod = invfo_chmod, 586 .fo_chown = invfo_chown, 587 .fo_sendfile = invfo_sendfile, 588}; 589 590/* 591 * Hash of vmmap addresses. This is infrequently accessed and does not 592 * need to be particularly large. This is done because we must store the 593 * caller's idea of the map size to properly unmap. 594 */ 595struct vmmap { 596 LIST_ENTRY(vmmap) vm_next; 597 void *vm_addr; 598 unsigned long vm_size; 599}; 600 601struct vmmaphd { 602 struct vmmap *lh_first; 603}; 604#define VMMAP_HASH_SIZE 64 605#define VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1) 606#define VM_HASH(addr) ((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK 607static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE]; 608static struct mtx vmmaplock; 609 610static void 611vmmap_add(void *addr, unsigned long size) 612{ 613 struct vmmap *vmmap; 614 615 vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL); 616 mtx_lock(&vmmaplock); 617 vmmap->vm_size = size; 618 vmmap->vm_addr = addr; 619 LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next); 620 mtx_unlock(&vmmaplock); 621} 622 623static struct vmmap * 624vmmap_remove(void *addr) 625{ 626 struct vmmap *vmmap; 627 628 mtx_lock(&vmmaplock); 629 LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next) 630 if (vmmap->vm_addr == addr) 631 break; 632 if (vmmap) 633 LIST_REMOVE(vmmap, vm_next); 634 mtx_unlock(&vmmaplock); 635 636 return (vmmap); 637} 638 639void * 640_ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr) 641{ 642 void *addr; 643 644 addr = pmap_mapdev_attr(phys_addr, size, attr); 645 if (addr == NULL) 646 return (NULL); 647 vmmap_add(addr, size); 648 649 return (addr); 650} 651 652void 653iounmap(void *addr) 654{ 655 struct vmmap *vmmap; 656 657 vmmap = vmmap_remove(addr); 658 if (vmmap == NULL) 659 return; 660 pmap_unmapdev((vm_offset_t)addr, vmmap->vm_size); 661 kfree(vmmap); 662} 663 664 665void * 666vmap(struct page **pages, unsigned int count, unsigned long flags, int prot) 667{ 668 vm_offset_t off; 669 size_t size; 670 671 size = count * PAGE_SIZE; 672 off = kva_alloc(size); 673 if (off == 0) 674 return (NULL); 675 vmmap_add((void *)off, size); 676 pmap_qenter(off, pages, count); 677 678 return ((void *)off); 679} 680 681void 682vunmap(void *addr) 683{ 684 struct vmmap *vmmap; 685 686 vmmap = vmmap_remove(addr); 687 if (vmmap == NULL) 688 return; 689 pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE); 690 kva_free((vm_offset_t)addr, vmmap->vm_size); 691 kfree(vmmap); 692} 693 694 695char * 696kasprintf(gfp_t gfp, const char *fmt, ...) 697{ 698 va_list ap; 699 char *p; 700 701 va_start(ap, fmt); 702 p = kvasprintf(gfp, fmt, ap); 703 va_end(ap); 704 705 return p; 706} 707 708static int 709linux_timer_jiffies_until(unsigned long expires) 710{ 711 int delta = expires - jiffies; 712 /* guard against already expired values */ 713 if (delta < 1) 714 delta = 1; 715 return (delta); 716} 717 718static void 719linux_timer_callback_wrapper(void *context) 720{ 721 struct timer_list *timer; 722 723 timer = context; 724 timer->function(timer->data); 725} 726 727void 728mod_timer(struct timer_list *timer, unsigned long expires) 729{ 730 731 timer->expires = expires; 732 callout_reset(&timer->timer_callout, 733 linux_timer_jiffies_until(expires), 734 &linux_timer_callback_wrapper, timer); 735} 736 737void 738add_timer(struct timer_list *timer) 739{ 740 741 callout_reset(&timer->timer_callout, 742 linux_timer_jiffies_until(timer->expires), 743 &linux_timer_callback_wrapper, timer); 744} 745 746static void 747linux_timer_init(void *arg) 748{ 749 750 /* 751 * Compute an internal HZ value which can divide 2**32 to 752 * avoid timer rounding problems when the tick value wraps 753 * around 2**32: 754 */ 755 linux_timer_hz_mask = 1; 756 while (linux_timer_hz_mask < (unsigned long)hz) 757 linux_timer_hz_mask *= 2; 758 linux_timer_hz_mask--; 759} 760SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL); 761 762void 763linux_complete_common(struct completion *c, int all) 764{ 765 int wakeup_swapper; 766 767 sleepq_lock(c); 768 c->done++; 769 if (all) 770 wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0); 771 else 772 wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0); 773 sleepq_release(c); 774 if (wakeup_swapper) 775 kick_proc0(); 776} 777 778/* 779 * Indefinite wait for done != 0 with or without signals. 780 */ 781long 782linux_wait_for_common(struct completion *c, int flags) 783{ 784 785 if (flags != 0) 786 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 787 else 788 flags = SLEEPQ_SLEEP; 789 for (;;) { 790 sleepq_lock(c); 791 if (c->done) 792 break; 793 sleepq_add(c, NULL, "completion", flags, 0); 794 if (flags & SLEEPQ_INTERRUPTIBLE) { 795 if (sleepq_wait_sig(c, 0) != 0) 796 return (-ERESTARTSYS); 797 } else 798 sleepq_wait(c, 0); 799 } 800 c->done--; 801 sleepq_release(c); 802 803 return (0); 804} 805 806/* 807 * Time limited wait for done != 0 with or without signals. 808 */ 809long 810linux_wait_for_timeout_common(struct completion *c, long timeout, int flags) 811{ 812 long end = jiffies + timeout; 813 814 if (flags != 0) 815 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 816 else 817 flags = SLEEPQ_SLEEP; 818 for (;;) { 819 int ret; 820 821 sleepq_lock(c); 822 if (c->done) 823 break; 824 sleepq_add(c, NULL, "completion", flags, 0); 825 sleepq_set_timeout(c, linux_timer_jiffies_until(end)); 826 if (flags & SLEEPQ_INTERRUPTIBLE) 827 ret = sleepq_timedwait_sig(c, 0); 828 else 829 ret = sleepq_timedwait(c, 0); 830 if (ret != 0) { 831 /* check for timeout or signal */ 832 if (ret == EWOULDBLOCK) 833 return (0); 834 else 835 return (-ERESTARTSYS); 836 } 837 } 838 c->done--; 839 sleepq_release(c); 840 841 /* return how many jiffies are left */ 842 return (linux_timer_jiffies_until(end)); 843} 844 845int 846linux_try_wait_for_completion(struct completion *c) 847{ 848 int isdone; 849 850 isdone = 1; 851 sleepq_lock(c); 852 if (c->done) 853 c->done--; 854 else 855 isdone = 0; 856 sleepq_release(c); 857 return (isdone); 858} 859 860int 861linux_completion_done(struct completion *c) 862{ 863 int isdone; 864 865 isdone = 1; 866 sleepq_lock(c); 867 if (c->done == 0) 868 isdone = 0; 869 sleepq_release(c); 870 return (isdone); 871} 872 873static void 874linux_compat_init(void *arg) 875{ 876 struct sysctl_oid *rootoid; 877 int i; 878 879 rootoid = SYSCTL_ADD_NODE(NULL, SYSCTL_STATIC_CHILDREN(), 880 OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys"); 881 kobject_init(&class_root, &class_ktype); 882 kobject_set_name(&class_root, "class"); 883 class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid), 884 OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class"); 885 kobject_init(&linux_rootdev.kobj, &dev_ktype); 886 kobject_set_name(&linux_rootdev.kobj, "device"); 887 linux_rootdev.kobj.oidp = SYSCTL_ADD_NODE(NULL, 888 SYSCTL_CHILDREN(rootoid), OID_AUTO, "device", CTLFLAG_RD, NULL, 889 "device"); 890 linux_rootdev.bsddev = root_bus; 891 miscclass.name = "misc"; 892 class_register(&miscclass); 893 INIT_LIST_HEAD(&pci_drivers); 894 INIT_LIST_HEAD(&pci_devices); 895 spin_lock_init(&pci_lock); 896 mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF); 897 for (i = 0; i < VMMAP_HASH_SIZE; i++) 898 LIST_INIT(&vmmaphead[i]); 899} 900 901SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL); 902 903static void 904linux_compat_uninit(void *arg) 905{ 906 kobject_kfree_name(&class_root); 907 kobject_kfree_name(&linux_rootdev.kobj); 908 kobject_kfree_name(&miscclass.kobj); 909} 910SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL); 911