1/* 2 * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 29 30/* 31 * APPLE NOTE: This file is compiled even if dtrace is unconfig'd. A symbol 32 * from this file (_dtrace_register_anon_DOF) always needs to be exported for 33 * an external kext to link against. 34 */ 35 36#if CONFIG_DTRACE 37 38#define MACH__POSIX_C_SOURCE_PRIVATE 1 /* pulls in suitable savearea from mach/ppc/thread_status.h */ 39#include <kern/thread.h> 40#include <mach/thread_status.h> 41 42#include <stdarg.h> 43#include <string.h> 44#include <sys/malloc.h> 45#include <sys/time.h> 46#include <sys/proc.h> 47#include <sys/proc_internal.h> 48#include <sys/kauth.h> 49#include <sys/user.h> 50#include <sys/systm.h> 51#include <sys/dtrace.h> 52#include <sys/dtrace_impl.h> 53#include <libkern/OSAtomic.h> 54#include <kern/kern_types.h> 55#include <kern/timer_call.h> 56#include <kern/thread_call.h> 57#include <kern/task.h> 58#include <kern/sched_prim.h> 59#include <kern/queue.h> 60#include <miscfs/devfs/devfs.h> 61#include <kern/kalloc.h> 62 63#include <mach/vm_param.h> 64#include <mach/mach_vm.h> 65#include <mach/task.h> 66#include <vm/pmap.h> 67#include <vm/vm_map.h> /* All the bits we care about are guarded by MACH_KERNEL_PRIVATE :-( */ 68 69 70/* 71 * pid/proc 72 */ 73/* Solaris proc_t is the struct. Darwin's proc_t is a pointer to it. */ 74#define proc_t struct proc /* Steer clear of the Darwin typedef for proc_t */ 75 76/* Not called from probe context */ 77proc_t * 78sprlock(pid_t pid) 79{ 80 proc_t* p; 81 82 if ((p = proc_find(pid)) == PROC_NULL) { 83 return PROC_NULL; 84 } 85 86 task_suspend(p->task); 87 88 proc_lock(p); 89 90 lck_mtx_lock(&p->p_dtrace_sprlock); 91 92 return p; 93} 94 95/* Not called from probe context */ 96void 97sprunlock(proc_t *p) 98{ 99 if (p != PROC_NULL) { 100 lck_mtx_unlock(&p->p_dtrace_sprlock); 101 102 proc_unlock(p); 103 104 task_resume(p->task); 105 106 proc_rele(p); 107 } 108} 109 110/* 111 * uread/uwrite 112 */ 113 114// These are not exported from vm_map.h. 115extern kern_return_t vm_map_read_user(vm_map_t map, vm_map_address_t src_addr, void *dst_p, vm_size_t size); 116extern kern_return_t vm_map_write_user(vm_map_t map, void *src_p, vm_map_address_t dst_addr, vm_size_t size); 117 118/* Not called from probe context */ 119int 120uread(proc_t *p, void *buf, user_size_t len, user_addr_t a) 121{ 122 kern_return_t ret; 123 124 ASSERT(p != PROC_NULL); 125 ASSERT(p->task != NULL); 126 127 task_t task = p->task; 128 129 /* 130 * Grab a reference to the task vm_map_t to make sure 131 * the map isn't pulled out from under us. 132 * 133 * Because the proc_lock is not held at all times on all code 134 * paths leading here, it is possible for the proc to have 135 * exited. If the map is null, fail. 136 */ 137 vm_map_t map = get_task_map_reference(task); 138 if (map) { 139 ret = vm_map_read_user( map, (vm_map_address_t)a, buf, (vm_size_t)len); 140 vm_map_deallocate(map); 141 } else 142 ret = KERN_TERMINATED; 143 144 return (int)ret; 145} 146 147 148/* Not called from probe context */ 149int 150uwrite(proc_t *p, void *buf, user_size_t len, user_addr_t a) 151{ 152 kern_return_t ret; 153 154 ASSERT(p != NULL); 155 ASSERT(p->task != NULL); 156 157 task_t task = p->task; 158 159 /* 160 * Grab a reference to the task vm_map_t to make sure 161 * the map isn't pulled out from under us. 162 * 163 * Because the proc_lock is not held at all times on all code 164 * paths leading here, it is possible for the proc to have 165 * exited. If the map is null, fail. 166 */ 167 vm_map_t map = get_task_map_reference(task); 168 if (map) { 169 /* Find the memory permissions. */ 170 uint32_t nestingDepth=999999; 171 vm_region_submap_short_info_data_64_t info; 172 mach_msg_type_number_t count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64; 173 mach_vm_address_t address = (mach_vm_address_t)a; 174 mach_vm_size_t sizeOfRegion = (mach_vm_size_t)len; 175 176 ret = mach_vm_region_recurse(map, &address, &sizeOfRegion, &nestingDepth, (vm_region_recurse_info_t)&info, &count); 177 if (ret != KERN_SUCCESS) 178 goto done; 179 180 vm_prot_t reprotect; 181 182 if (!(info.protection & VM_PROT_WRITE)) { 183 /* Save the original protection values for restoration later */ 184 reprotect = info.protection; 185 186 if (info.max_protection & VM_PROT_WRITE) { 187 /* The memory is not currently writable, but can be made writable. */ 188 ret = mach_vm_protect (map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, reprotect | VM_PROT_WRITE); 189 } else { 190 /* 191 * The memory is not currently writable, and cannot be made writable. We need to COW this memory. 192 * 193 * Strange, we can't just say "reprotect | VM_PROT_COPY", that fails. 194 */ 195 ret = mach_vm_protect (map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, VM_PROT_COPY | VM_PROT_READ | VM_PROT_WRITE); 196 } 197 198 if (ret != KERN_SUCCESS) 199 goto done; 200 201 } else { 202 /* The memory was already writable. */ 203 reprotect = VM_PROT_NONE; 204 } 205 206 ret = vm_map_write_user( map, 207 buf, 208 (vm_map_address_t)a, 209 (vm_size_t)len); 210 211 if (ret != KERN_SUCCESS) 212 goto done; 213 214 if (reprotect != VM_PROT_NONE) { 215 ASSERT(reprotect & VM_PROT_EXECUTE); 216 ret = mach_vm_protect (map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, reprotect); 217 } 218 219done: 220 vm_map_deallocate(map); 221 } else 222 ret = KERN_TERMINATED; 223 224 return (int)ret; 225} 226 227/* 228 * cpuvar 229 */ 230lck_mtx_t cpu_lock; 231lck_mtx_t mod_lock; 232 233dtrace_cpu_t *cpu_list; 234cpu_core_t *cpu_core; /* XXX TLB lockdown? */ 235 236/* 237 * cred_t 238 */ 239 240/* 241 * dtrace_CRED() can be called from probe context. We cannot simply call kauth_cred_get() since 242 * that function may try to resolve a lazy credential binding, which entails taking the proc_lock. 243 */ 244cred_t * 245dtrace_CRED(void) 246{ 247 struct uthread *uthread = get_bsdthread_info(current_thread()); 248 249 if (uthread == NULL) 250 return NULL; 251 else 252 return uthread->uu_ucred; /* May return NOCRED which is defined to be 0 */ 253} 254 255#define HAS_ALLPRIVS(cr) priv_isfullset(&CR_OEPRIV(cr)) 256#define HAS_PRIVILEGE(cr, pr) ((pr) == PRIV_ALL ? \ 257 HAS_ALLPRIVS(cr) : \ 258 PRIV_ISASSERT(&CR_OEPRIV(cr), pr)) 259 260int PRIV_POLICY_CHOICE(void* cred, int priv, int all) 261{ 262#pragma unused(priv, all) 263 return kauth_cred_issuser(cred); /* XXX TODO: How is this different from PRIV_POLICY_ONLY? */ 264} 265 266int 267PRIV_POLICY_ONLY(void *cr, int priv, int boolean) 268{ 269#pragma unused(priv, boolean) 270 return kauth_cred_issuser(cr); /* XXX TODO: HAS_PRIVILEGE(cr, priv); */ 271} 272 273/* XXX Get around const poisoning using structure assigns */ 274gid_t 275crgetgid(const cred_t *cr) { cred_t copy_cr = *cr; return kauth_cred_getgid(©_cr); } 276 277uid_t 278crgetuid(const cred_t *cr) { cred_t copy_cr = *cr; return kauth_cred_getuid(©_cr); } 279 280/* 281 * "cyclic" 282 */ 283 284typedef struct wrap_timer_call { 285 cyc_handler_t hdlr; 286 cyc_time_t when; 287 uint64_t deadline; 288 struct timer_call call; 289} wrap_timer_call_t; 290 291#define WAKEUP_REAPER 0x7FFFFFFFFFFFFFFFLL 292#define NEARLY_FOREVER 0x7FFFFFFFFFFFFFFELL 293 294static void 295_timer_call_apply_cyclic( void *ignore, void *vTChdl ) 296{ 297#pragma unused(ignore) 298 wrap_timer_call_t *wrapTC = (wrap_timer_call_t *)vTChdl; 299 300 (*(wrapTC->hdlr.cyh_func))( wrapTC->hdlr.cyh_arg ); 301 302 clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, mach_absolute_time(), &(wrapTC->deadline) ); 303 timer_call_enter1( &(wrapTC->call), (void *)wrapTC, wrapTC->deadline, TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL ); 304 305 /* Did timer_call_remove_cyclic request a wakeup call when this timer call was re-armed? */ 306 if (wrapTC->when.cyt_interval == WAKEUP_REAPER) 307 thread_wakeup((event_t)wrapTC); 308} 309 310static cyclic_id_t 311timer_call_add_cyclic(wrap_timer_call_t *wrapTC, cyc_handler_t *handler, cyc_time_t *when) 312{ 313 uint64_t now; 314 315 timer_call_setup( &(wrapTC->call), _timer_call_apply_cyclic, NULL ); 316 wrapTC->hdlr = *handler; 317 wrapTC->when = *when; 318 319 nanoseconds_to_absolutetime( wrapTC->when.cyt_interval, (uint64_t *)&wrapTC->when.cyt_interval ); 320 321 now = mach_absolute_time(); 322 wrapTC->deadline = now; 323 324 clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, now, &(wrapTC->deadline) ); 325 timer_call_enter1( &(wrapTC->call), (void *)wrapTC, wrapTC->deadline, TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL ); 326 327 return (cyclic_id_t)wrapTC; 328} 329 330static void 331timer_call_remove_cyclic(cyclic_id_t cyclic) 332{ 333 wrap_timer_call_t *wrapTC = (wrap_timer_call_t *)cyclic; 334 335 while (!timer_call_cancel(&(wrapTC->call))) { 336 int ret = assert_wait(wrapTC, THREAD_UNINT); 337 ASSERT(ret == THREAD_WAITING); 338 339 wrapTC->when.cyt_interval = WAKEUP_REAPER; 340 341 ret = thread_block(THREAD_CONTINUE_NULL); 342 ASSERT(ret == THREAD_AWAKENED); 343 } 344} 345 346static void * 347timer_call_get_cyclic_arg(cyclic_id_t cyclic) 348{ 349 wrap_timer_call_t *wrapTC = (wrap_timer_call_t *)cyclic; 350 351 return (wrapTC ? wrapTC->hdlr.cyh_arg : NULL); 352} 353 354cyclic_id_t 355cyclic_timer_add(cyc_handler_t *handler, cyc_time_t *when) 356{ 357 wrap_timer_call_t *wrapTC = _MALLOC(sizeof(wrap_timer_call_t), M_TEMP, M_ZERO | M_WAITOK); 358 if (NULL == wrapTC) 359 return CYCLIC_NONE; 360 else 361 return timer_call_add_cyclic( wrapTC, handler, when ); 362} 363 364void 365cyclic_timer_remove(cyclic_id_t cyclic) 366{ 367 ASSERT( cyclic != CYCLIC_NONE ); 368 369 timer_call_remove_cyclic( cyclic ); 370 _FREE((void *)cyclic, M_TEMP); 371} 372 373static void 374_cyclic_add_omni(cyclic_id_list_t cyc_list) 375{ 376 cyc_time_t cT; 377 cyc_handler_t cH; 378 wrap_timer_call_t *wrapTC; 379 cyc_omni_handler_t *omni = (cyc_omni_handler_t *)cyc_list; 380 char *t; 381 382 (omni->cyo_online)(omni->cyo_arg, CPU, &cH, &cT); 383 384 t = (char *)cyc_list; 385 t += sizeof(cyc_omni_handler_t); 386 cyc_list = (cyclic_id_list_t)(uintptr_t)t; 387 388 t += sizeof(cyclic_id_t)*NCPU; 389 t += (sizeof(wrap_timer_call_t))*cpu_number(); 390 wrapTC = (wrap_timer_call_t *)(uintptr_t)t; 391 392 cyc_list[cpu_number()] = timer_call_add_cyclic(wrapTC, &cH, &cT); 393} 394 395cyclic_id_list_t 396cyclic_add_omni(cyc_omni_handler_t *omni) 397{ 398 cyclic_id_list_t cyc_list = 399 _MALLOC( (sizeof(wrap_timer_call_t))*NCPU + 400 sizeof(cyclic_id_t)*NCPU + 401 sizeof(cyc_omni_handler_t), M_TEMP, M_ZERO | M_WAITOK); 402 if (NULL == cyc_list) 403 return (cyclic_id_list_t)CYCLIC_NONE; 404 405 *(cyc_omni_handler_t *)cyc_list = *omni; 406 dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)_cyclic_add_omni, (void *)cyc_list); 407 408 return cyc_list; 409} 410 411static void 412_cyclic_remove_omni(cyclic_id_list_t cyc_list) 413{ 414 cyc_omni_handler_t *omni = (cyc_omni_handler_t *)cyc_list; 415 void *oarg; 416 cyclic_id_t cid; 417 char *t; 418 419 t = (char *)cyc_list; 420 t += sizeof(cyc_omni_handler_t); 421 cyc_list = (cyclic_id_list_t)(uintptr_t)t; 422 423 cid = cyc_list[cpu_number()]; 424 oarg = timer_call_get_cyclic_arg(cid); 425 426 timer_call_remove_cyclic( cid ); 427 (omni->cyo_offline)(omni->cyo_arg, CPU, oarg); 428} 429 430void 431cyclic_remove_omni(cyclic_id_list_t cyc_list) 432{ 433 ASSERT( cyc_list != (cyclic_id_list_t)CYCLIC_NONE ); 434 435 dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)_cyclic_remove_omni, (void *)cyc_list); 436 _FREE(cyc_list, M_TEMP); 437} 438 439typedef struct wrap_thread_call { 440 thread_call_t TChdl; 441 cyc_handler_t hdlr; 442 cyc_time_t when; 443 uint64_t deadline; 444} wrap_thread_call_t; 445 446/* 447 * _cyclic_apply will run on some thread under kernel_task. That's OK for the 448 * cleaner and the deadman, but too distant in time and place for the profile provider. 449 */ 450static void 451_cyclic_apply( void *ignore, void *vTChdl ) 452{ 453#pragma unused(ignore) 454 wrap_thread_call_t *wrapTC = (wrap_thread_call_t *)vTChdl; 455 456 (*(wrapTC->hdlr.cyh_func))( wrapTC->hdlr.cyh_arg ); 457 458 clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, mach_absolute_time(), &(wrapTC->deadline) ); 459 (void)thread_call_enter1_delayed( wrapTC->TChdl, (void *)wrapTC, wrapTC->deadline ); 460 461 /* Did cyclic_remove request a wakeup call when this thread call was re-armed? */ 462 if (wrapTC->when.cyt_interval == WAKEUP_REAPER) 463 thread_wakeup((event_t)wrapTC); 464} 465 466cyclic_id_t 467cyclic_add(cyc_handler_t *handler, cyc_time_t *when) 468{ 469 uint64_t now; 470 471 wrap_thread_call_t *wrapTC = _MALLOC(sizeof(wrap_thread_call_t), M_TEMP, M_ZERO | M_WAITOK); 472 if (NULL == wrapTC) 473 return CYCLIC_NONE; 474 475 wrapTC->TChdl = thread_call_allocate( _cyclic_apply, NULL ); 476 wrapTC->hdlr = *handler; 477 wrapTC->when = *when; 478 479 ASSERT(when->cyt_when == 0); 480 ASSERT(when->cyt_interval < WAKEUP_REAPER); 481 482 nanoseconds_to_absolutetime(wrapTC->when.cyt_interval, (uint64_t *)&wrapTC->when.cyt_interval); 483 484 now = mach_absolute_time(); 485 wrapTC->deadline = now; 486 487 clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, now, &(wrapTC->deadline) ); 488 (void)thread_call_enter1_delayed( wrapTC->TChdl, (void *)wrapTC, wrapTC->deadline ); 489 490 return (cyclic_id_t)wrapTC; 491} 492 493static void 494noop_cyh_func(void * ignore) 495{ 496#pragma unused(ignore) 497} 498 499void 500cyclic_remove(cyclic_id_t cyclic) 501{ 502 wrap_thread_call_t *wrapTC = (wrap_thread_call_t *)cyclic; 503 504 ASSERT(cyclic != CYCLIC_NONE); 505 506 while (!thread_call_cancel(wrapTC->TChdl)) { 507 int ret = assert_wait(wrapTC, THREAD_UNINT); 508 ASSERT(ret == THREAD_WAITING); 509 510 wrapTC->when.cyt_interval = WAKEUP_REAPER; 511 512 ret = thread_block(THREAD_CONTINUE_NULL); 513 ASSERT(ret == THREAD_AWAKENED); 514 } 515 516 if (thread_call_free(wrapTC->TChdl)) 517 _FREE(wrapTC, M_TEMP); 518 else { 519 /* Gut this cyclic and move on ... */ 520 wrapTC->hdlr.cyh_func = noop_cyh_func; 521 wrapTC->when.cyt_interval = NEARLY_FOREVER; 522 } 523} 524 525/* 526 * timeout / untimeout (converted to dtrace_timeout / dtrace_untimeout due to name collision) 527 */ 528 529thread_call_t 530dtrace_timeout(void (*func)(void *, void *), void* arg, uint64_t nanos) 531{ 532#pragma unused(arg) 533 thread_call_t call = thread_call_allocate(func, NULL); 534 535 nanoseconds_to_absolutetime(nanos, &nanos); 536 537 /* 538 * This method does not use clock_deadline_for_periodic_event() because it is a one-shot, 539 * and clock drift on later invocations is not a worry. 540 */ 541 uint64_t deadline = mach_absolute_time() + nanos; 542 /* DRK: consider using a lower priority callout here */ 543 thread_call_enter_delayed(call, deadline); 544 545 return call; 546} 547 548/* 549 * ddi 550 */ 551void 552ddi_report_dev(dev_info_t *devi) 553{ 554#pragma unused(devi) 555} 556 557#define NSOFT_STATES 32 /* XXX No more than 32 clients at a time, please. */ 558static void *soft[NSOFT_STATES]; 559 560int 561ddi_soft_state_init(void **state_p, size_t size, size_t n_items) 562{ 563#pragma unused(n_items) 564 int i; 565 566 for (i = 0; i < NSOFT_STATES; ++i) soft[i] = _MALLOC(size, M_TEMP, M_ZERO | M_WAITOK); 567 *(size_t *)state_p = size; 568 return 0; 569} 570 571int 572ddi_soft_state_zalloc(void *state, int item) 573{ 574#pragma unused(state) 575 if (item < NSOFT_STATES) 576 return DDI_SUCCESS; 577 else 578 return DDI_FAILURE; 579} 580 581void * 582ddi_get_soft_state(void *state, int item) 583{ 584#pragma unused(state) 585 ASSERT(item < NSOFT_STATES); 586 return soft[item]; 587} 588 589int 590ddi_soft_state_free(void *state, int item) 591{ 592 ASSERT(item < NSOFT_STATES); 593 bzero( soft[item], (size_t)state ); 594 return DDI_SUCCESS; 595} 596 597void 598ddi_soft_state_fini(void **state_p) 599{ 600#pragma unused(state_p) 601 int i; 602 603 for (i = 0; i < NSOFT_STATES; ++i) _FREE( soft[i], M_TEMP ); 604} 605 606static unsigned int gRegisteredProps = 0; 607static struct { 608 char name[32]; /* enough for "dof-data-" + digits */ 609 int *data; 610 uint_t nelements; 611} gPropTable[16]; 612 613kern_return_t _dtrace_register_anon_DOF(char *, uchar_t *, uint_t); 614 615kern_return_t 616_dtrace_register_anon_DOF(char *name, uchar_t *data, uint_t nelements) 617{ 618 if (gRegisteredProps < sizeof(gPropTable)/sizeof(gPropTable[0])) { 619 int *p = (int *)_MALLOC(nelements*sizeof(int), M_TEMP, M_WAITOK); 620 621 if (NULL == p) 622 return KERN_FAILURE; 623 624 strlcpy(gPropTable[gRegisteredProps].name, name, sizeof(gPropTable[0].name)); 625 gPropTable[gRegisteredProps].nelements = nelements; 626 gPropTable[gRegisteredProps].data = p; 627 628 while (nelements-- > 0) { 629 *p++ = (int)(*data++); 630 } 631 632 gRegisteredProps++; 633 return KERN_SUCCESS; 634 } 635 else 636 return KERN_FAILURE; 637} 638 639int 640ddi_prop_lookup_int_array(dev_t match_dev, dev_info_t *dip, uint_t flags, 641 const char *name, int **data, uint_t *nelements) 642{ 643#pragma unused(match_dev,dip,flags) 644 unsigned int i; 645 for (i = 0; i < gRegisteredProps; ++i) 646 { 647 if (0 == strncmp(name, gPropTable[i].name, 648 sizeof(gPropTable[i].name))) { 649 *data = gPropTable[i].data; 650 *nelements = gPropTable[i].nelements; 651 return DDI_SUCCESS; 652 } 653 } 654 return DDI_FAILURE; 655} 656 657int 658ddi_prop_free(void *buf) 659{ 660 _FREE(buf, M_TEMP); 661 return DDI_SUCCESS; 662} 663 664int 665ddi_driver_major(dev_info_t *devi) { return (int)major(CAST_DOWN_EXPLICIT(int,devi)); } 666 667int 668ddi_create_minor_node(dev_info_t *dip, const char *name, int spec_type, 669 minor_t minor_num, const char *node_type, int flag) 670{ 671#pragma unused(spec_type,node_type,flag) 672 dev_t dev = makedev( ddi_driver_major(dip), minor_num ); 673 674 if (NULL == devfs_make_node( dev, DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666, name, 0 )) 675 return DDI_FAILURE; 676 else 677 return DDI_SUCCESS; 678} 679 680void 681ddi_remove_minor_node(dev_info_t *dip, char *name) 682{ 683#pragma unused(dip,name) 684/* XXX called from dtrace_detach, so NOTREACHED for now. */ 685} 686 687major_t 688getemajor( dev_t d ) 689{ 690 return (major_t) major(d); 691} 692 693minor_t 694getminor ( dev_t d ) 695{ 696 return (minor_t) minor(d); 697} 698 699dev_t 700makedevice(major_t major, minor_t minor) 701{ 702 return makedev( major, minor ); 703} 704 705int ddi_getprop(dev_t dev, dev_info_t *dip, int flags, const char *name, int defvalue) 706{ 707#pragma unused(dev, dip, flags, name) 708 709 return defvalue; 710} 711 712/* 713 * Kernel Debug Interface 714 */ 715int 716kdi_dtrace_set(kdi_dtrace_set_t ignore) 717{ 718#pragma unused(ignore) 719 return 0; /* Success */ 720} 721 722extern void Debugger(const char*); 723 724void 725debug_enter(char *c) { Debugger(c); } 726 727/* 728 * kmem 729 */ 730 731void * 732dt_kmem_alloc(size_t size, int kmflag) 733{ 734#pragma unused(kmflag) 735 736/* 737 * We ignore the M_NOWAIT bit in kmflag (all of kmflag, in fact). 738 * Requests larger than 8K with M_NOWAIT fail in kalloc_canblock. 739 */ 740#if defined(DTRACE_MEMORY_ZONES) 741 return dtrace_alloc(size); 742#else 743 return kalloc(size); 744#endif 745} 746 747void * 748dt_kmem_zalloc(size_t size, int kmflag) 749{ 750#pragma unused(kmflag) 751 752/* 753 * We ignore the M_NOWAIT bit in kmflag (all of kmflag, in fact). 754 * Requests larger than 8K with M_NOWAIT fail in kalloc_canblock. 755 */ 756#if defined(DTRACE_MEMORY_ZONES) 757 void* buf = dtrace_alloc(size); 758#else 759 void* buf = kalloc(size); 760#endif 761 762 if(!buf) 763 return NULL; 764 765 bzero(buf, size); 766 767 return buf; 768} 769 770void 771dt_kmem_free(void *buf, size_t size) 772{ 773#pragma unused(size) 774 /* 775 * DTrace relies on this, its doing a lot of NULL frees. 776 * A null free causes the debug builds to panic. 777 */ 778 if (buf == NULL) return; 779 780 ASSERT(size > 0); 781 782#if defined(DTRACE_MEMORY_ZONES) 783 dtrace_free(buf, size); 784#else 785 kfree(buf, size); 786#endif 787} 788 789 790 791/* 792 * aligned kmem allocator 793 * align should be a power of two 794 */ 795 796void* dt_kmem_alloc_aligned(size_t size, size_t align, int kmflag) 797{ 798 void* buf; 799 intptr_t p; 800 void** buf_backup; 801 802 buf = dt_kmem_alloc(align + sizeof(void*) + size, kmflag); 803 804 if(!buf) 805 return NULL; 806 807 p = (intptr_t)buf; 808 p += sizeof(void*); /* now we have enough room to store the backup */ 809 p = P2ROUNDUP(p, align); /* and now we're aligned */ 810 811 buf_backup = (void**)(p - sizeof(void*)); 812 *buf_backup = buf; /* back up the address we need to free */ 813 814 return (void*)p; 815} 816 817void* dt_kmem_zalloc_aligned(size_t size, size_t align, int kmflag) 818{ 819 void* buf; 820 821 buf = dt_kmem_alloc_aligned(size, align, kmflag); 822 823 if(!buf) 824 return NULL; 825 826 bzero(buf, size); 827 828 return buf; 829} 830 831void dt_kmem_free_aligned(void* buf, size_t size) 832{ 833#pragma unused(size) 834 intptr_t p; 835 void** buf_backup; 836 837 p = (intptr_t)buf; 838 p -= sizeof(void*); 839 buf_backup = (void**)(p); 840 841 dt_kmem_free(*buf_backup, size + ((char*)buf - (char*)*buf_backup)); 842} 843 844/* 845 * dtrace wants to manage just a single block: dtrace_state_percpu_t * NCPU, and 846 * doesn't specify constructor, destructor, or reclaim methods. 847 * At present, it always zeroes the block it obtains from kmem_cache_alloc(). 848 * We'll manage this constricted use of kmem_cache with ordinary _MALLOC and _FREE. 849 */ 850kmem_cache_t * 851kmem_cache_create( 852 const char *name, /* descriptive name for this cache */ 853 size_t bufsize, /* size of the objects it manages */ 854 size_t align, /* required object alignment */ 855 int (*constructor)(void *, void *, int), /* object constructor */ 856 void (*destructor)(void *, void *), /* object destructor */ 857 void (*reclaim)(void *), /* memory reclaim callback */ 858 void *private, /* pass-thru arg for constr/destr/reclaim */ 859 vmem_t *vmp, /* vmem source for slab allocation */ 860 int cflags) /* cache creation flags */ 861{ 862#pragma unused(name,align,constructor,destructor,reclaim,private,vmp,cflags) 863 return (kmem_cache_t *)bufsize; /* A cookie that tracks the single object size. */ 864} 865 866void * 867kmem_cache_alloc(kmem_cache_t *cp, int kmflag) 868{ 869#pragma unused(kmflag) 870 size_t bufsize = (size_t)cp; 871 return (void *)_MALLOC(bufsize, M_TEMP, M_WAITOK); 872} 873 874void 875kmem_cache_free(kmem_cache_t *cp, void *buf) 876{ 877#pragma unused(cp) 878 _FREE(buf, M_TEMP); 879} 880 881void 882kmem_cache_destroy(kmem_cache_t *cp) 883{ 884#pragma unused(cp) 885} 886 887/* 888 * taskq 889 */ 890extern void thread_call_setup(thread_call_t, thread_call_func_t, thread_call_param_t); /* XXX MACH_KERNEL_PRIVATE */ 891 892static void 893_taskq_apply( task_func_t func, thread_call_param_t arg ) 894{ 895 func( (void *)arg ); 896} 897 898taskq_t * 899taskq_create(const char *name, int nthreads, pri_t pri, int minalloc, 900 int maxalloc, uint_t flags) 901{ 902#pragma unused(name,nthreads,pri,minalloc,maxalloc,flags) 903 904 return (taskq_t *)thread_call_allocate( (thread_call_func_t)_taskq_apply, NULL ); 905} 906 907taskqid_t 908taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags) 909{ 910#pragma unused(flags) 911 thread_call_setup( (thread_call_t) tq, (thread_call_func_t)_taskq_apply, (thread_call_param_t)func ); 912 thread_call_enter1( (thread_call_t) tq, (thread_call_param_t)arg ); 913 return (taskqid_t) tq /* for lack of anything better */; 914} 915 916void 917taskq_destroy(taskq_t *tq) 918{ 919 thread_call_cancel( (thread_call_t) tq ); 920 thread_call_free( (thread_call_t) tq ); 921} 922 923pri_t maxclsyspri; 924 925/* 926 * vmem (Solaris "slab" allocator) used by DTrace solely to hand out resource ids 927 */ 928typedef unsigned int u_daddr_t; 929#include "blist.h" 930 931/* By passing around blist *handles*, the underlying blist can be resized as needed. */ 932struct blist_hdl { 933 blist_t blist; 934}; 935 936vmem_t * 937vmem_create(const char *name, void *base, size_t size, size_t quantum, void *ignore5, 938 void *ignore6, vmem_t *source, size_t qcache_max, int vmflag) 939{ 940#pragma unused(name,quantum,ignore5,ignore6,source,qcache_max,vmflag) 941 blist_t bl; 942 struct blist_hdl *p = _MALLOC(sizeof(struct blist_hdl), M_TEMP, M_WAITOK); 943 944 ASSERT(quantum == 1); 945 ASSERT(NULL == ignore5); 946 ASSERT(NULL == ignore6); 947 ASSERT(NULL == source); 948 ASSERT(0 == qcache_max); 949 ASSERT(vmflag & VMC_IDENTIFIER); 950 951 size = MIN(128, size); /* Clamp to 128 initially, since the underlying data structure is pre-allocated */ 952 953 p->blist = bl = blist_create( size ); 954 blist_free(bl, 0, size); 955 if (base) blist_alloc( bl, (daddr_t)(uintptr_t)base ); /* Chomp off initial ID(s) */ 956 957 return (vmem_t *)p; 958} 959 960void * 961vmem_alloc(vmem_t *vmp, size_t size, int vmflag) 962{ 963#pragma unused(vmflag) 964 struct blist_hdl *q = (struct blist_hdl *)vmp; 965 blist_t bl = q->blist; 966 daddr_t p; 967 968 p = blist_alloc(bl, (daddr_t)size); 969 970 if ((daddr_t)-1 == p) { 971 blist_resize(&bl, (bl->bl_blocks) << 1, 1); 972 q->blist = bl; 973 p = blist_alloc(bl, (daddr_t)size); 974 if ((daddr_t)-1 == p) 975 panic("vmem_alloc: failure after blist_resize!"); 976 } 977 978 return (void *)(uintptr_t)p; 979} 980 981void 982vmem_free(vmem_t *vmp, void *vaddr, size_t size) 983{ 984 struct blist_hdl *p = (struct blist_hdl *)vmp; 985 986 blist_free( p->blist, (daddr_t)(uintptr_t)vaddr, (daddr_t)size ); 987} 988 989void 990vmem_destroy(vmem_t *vmp) 991{ 992 struct blist_hdl *p = (struct blist_hdl *)vmp; 993 994 blist_destroy( p->blist ); 995 _FREE( p, sizeof(struct blist_hdl) ); 996} 997 998/* 999 * Timing 1000 */ 1001 1002/* 1003 * dtrace_gethrestime() provides the "walltimestamp", a value that is anchored at 1004 * January 1, 1970. Because it can be called from probe context, it must take no locks. 1005 */ 1006 1007hrtime_t 1008dtrace_gethrestime(void) 1009{ 1010 clock_sec_t secs; 1011 clock_nsec_t nanosecs; 1012 uint64_t secs64, ns64; 1013 1014 clock_get_calendar_nanotime_nowait(&secs, &nanosecs); 1015 secs64 = (uint64_t)secs; 1016 ns64 = (uint64_t)nanosecs; 1017 1018 ns64 = ns64 + (secs64 * 1000000000LL); 1019 return ns64; 1020} 1021 1022/* 1023 * dtrace_gethrtime() provides high-resolution timestamps with machine-dependent origin. 1024 * Hence its primary use is to specify intervals. 1025 */ 1026 1027hrtime_t 1028dtrace_abs_to_nano(uint64_t elapsed) 1029{ 1030 static mach_timebase_info_data_t sTimebaseInfo = { 0, 0 }; 1031 1032 /* 1033 * If this is the first time we've run, get the timebase. 1034 * We can use denom == 0 to indicate that sTimebaseInfo is 1035 * uninitialised because it makes no sense to have a zero 1036 * denominator in a fraction. 1037 */ 1038 1039 if ( sTimebaseInfo.denom == 0 ) { 1040 (void) clock_timebase_info(&sTimebaseInfo); 1041 } 1042 1043 /* 1044 * Convert to nanoseconds. 1045 * return (elapsed * (uint64_t)sTimebaseInfo.numer)/(uint64_t)sTimebaseInfo.denom; 1046 * 1047 * Provided the final result is representable in 64 bits the following maneuver will 1048 * deliver that result without intermediate overflow. 1049 */ 1050 if (sTimebaseInfo.denom == sTimebaseInfo.numer) 1051 return elapsed; 1052 else if (sTimebaseInfo.denom == 1) 1053 return elapsed * (uint64_t)sTimebaseInfo.numer; 1054 else { 1055 /* Decompose elapsed = eta32 * 2^32 + eps32: */ 1056 uint64_t eta32 = elapsed >> 32; 1057 uint64_t eps32 = elapsed & 0x00000000ffffffffLL; 1058 1059 uint32_t numer = sTimebaseInfo.numer, denom = sTimebaseInfo.denom; 1060 1061 /* Form product of elapsed64 (decomposed) and numer: */ 1062 uint64_t mu64 = numer * eta32; 1063 uint64_t lambda64 = numer * eps32; 1064 1065 /* Divide the constituents by denom: */ 1066 uint64_t q32 = mu64/denom; 1067 uint64_t r32 = mu64 - (q32 * denom); /* mu64 % denom */ 1068 1069 return (q32 << 32) + ((r32 << 32) + lambda64)/denom; 1070 } 1071} 1072 1073hrtime_t 1074dtrace_gethrtime(void) 1075{ 1076 static uint64_t start = 0; 1077 1078 if (start == 0) 1079 start = mach_absolute_time(); 1080 1081 return dtrace_abs_to_nano(mach_absolute_time() - start); 1082} 1083 1084/* 1085 * Atomicity and synchronization 1086 */ 1087uint32_t 1088dtrace_cas32(uint32_t *target, uint32_t cmp, uint32_t new) 1089{ 1090 if (OSCompareAndSwap( (UInt32)cmp, (UInt32)new, (volatile UInt32 *)target )) 1091 return cmp; 1092 else 1093 return ~cmp; /* Must return something *other* than cmp */ 1094} 1095 1096void * 1097dtrace_casptr(void *target, void *cmp, void *new) 1098{ 1099 if (OSCompareAndSwapPtr( cmp, new, (void**)target )) 1100 return cmp; 1101 else 1102 return (void *)(~(uintptr_t)cmp); /* Must return something *other* than cmp */ 1103} 1104 1105/* 1106 * Interrupt manipulation 1107 */ 1108dtrace_icookie_t 1109dtrace_interrupt_disable(void) 1110{ 1111 return (dtrace_icookie_t)ml_set_interrupts_enabled(FALSE); 1112} 1113 1114void 1115dtrace_interrupt_enable(dtrace_icookie_t reenable) 1116{ 1117 (void)ml_set_interrupts_enabled((boolean_t)reenable); 1118} 1119 1120/* 1121 * MP coordination 1122 */ 1123static void 1124dtrace_sync_func(void) {} 1125 1126/* 1127 * dtrace_sync() is not called from probe context. 1128 */ 1129void 1130dtrace_sync(void) 1131{ 1132 dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)dtrace_sync_func, NULL); 1133} 1134 1135/* 1136 * The dtrace_copyin/out/instr and dtrace_fuword* routines can be called from probe context. 1137 */ 1138 1139extern kern_return_t dtrace_copyio_preflight(addr64_t); 1140extern kern_return_t dtrace_copyio_postflight(addr64_t); 1141 1142static int 1143dtrace_copycheck(user_addr_t uaddr, uintptr_t kaddr, size_t size) 1144{ 1145#pragma unused(kaddr) 1146 1147 vm_offset_t recover = dtrace_set_thread_recover( current_thread(), 0 ); /* Snare any extant recovery point. */ 1148 dtrace_set_thread_recover( current_thread(), recover ); /* Put it back. We *must not* re-enter and overwrite. */ 1149 1150 ASSERT(kaddr + size >= kaddr); 1151 1152 if ( uaddr + size < uaddr || /* Avoid address wrap. */ 1153 KERN_FAILURE == dtrace_copyio_preflight(uaddr)) /* Machine specific setup/constraints. */ 1154 { 1155 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 1156 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr; 1157 return (0); 1158 } 1159 return (1); 1160} 1161 1162void 1163dtrace_copyin(user_addr_t src, uintptr_t dst, size_t len, volatile uint16_t *flags) 1164{ 1165#pragma unused(flags) 1166 1167 if (dtrace_copycheck( src, dst, len )) { 1168 if (copyin((const user_addr_t)src, (char *)dst, (vm_size_t)len)) { 1169 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 1170 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = src; 1171 } 1172 dtrace_copyio_postflight(src); 1173 } 1174} 1175 1176void 1177dtrace_copyinstr(user_addr_t src, uintptr_t dst, size_t len, volatile uint16_t *flags) 1178{ 1179#pragma unused(flags) 1180 1181 size_t actual; 1182 1183 if (dtrace_copycheck( src, dst, len )) { 1184 /* copyin as many as 'len' bytes. */ 1185 int error = copyinstr((const user_addr_t)src, (char *)dst, (vm_size_t)len, &actual); 1186 1187 /* 1188 * ENAMETOOLONG is returned when 'len' bytes have been copied in but the NUL terminator was 1189 * not encountered. That does not require raising CPU_DTRACE_BADADDR, and we press on. 1190 * Note that we do *not* stuff a NUL terminator when returning ENAMETOOLONG, that's left 1191 * to the caller. 1192 */ 1193 if (error && error != ENAMETOOLONG) { 1194 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 1195 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = src; 1196 } 1197 dtrace_copyio_postflight(src); 1198 } 1199} 1200 1201void 1202dtrace_copyout(uintptr_t src, user_addr_t dst, size_t len, volatile uint16_t *flags) 1203{ 1204#pragma unused(flags) 1205 1206 if (dtrace_copycheck( dst, src, len )) { 1207 if (copyout((const void *)src, dst, (vm_size_t)len)) { 1208 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 1209 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = dst; 1210 } 1211 dtrace_copyio_postflight(dst); 1212 } 1213} 1214 1215void 1216dtrace_copyoutstr(uintptr_t src, user_addr_t dst, size_t len, volatile uint16_t *flags) 1217{ 1218#pragma unused(flags) 1219 1220 size_t actual; 1221 1222 if (dtrace_copycheck( dst, src, len )) { 1223 1224 /* 1225 * ENAMETOOLONG is returned when 'len' bytes have been copied out but the NUL terminator was 1226 * not encountered. We raise CPU_DTRACE_BADADDR in that case. 1227 * Note that we do *not* stuff a NUL terminator when returning ENAMETOOLONG, that's left 1228 * to the caller. 1229 */ 1230 if (copyoutstr((const void *)src, dst, (size_t)len, &actual)) { 1231 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 1232 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = dst; 1233 } 1234 dtrace_copyio_postflight(dst); 1235 } 1236} 1237 1238uint8_t 1239dtrace_fuword8(user_addr_t uaddr) 1240{ 1241 uint8_t ret = 0; 1242 1243 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 1244 if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) { 1245 if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) { 1246 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 1247 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr; 1248 } 1249 dtrace_copyio_postflight(uaddr); 1250 } 1251 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 1252 1253 return(ret); 1254} 1255 1256uint16_t 1257dtrace_fuword16(user_addr_t uaddr) 1258{ 1259 uint16_t ret = 0; 1260 1261 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 1262 if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) { 1263 if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) { 1264 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 1265 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr; 1266 } 1267 dtrace_copyio_postflight(uaddr); 1268 } 1269 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 1270 1271 return(ret); 1272} 1273 1274uint32_t 1275dtrace_fuword32(user_addr_t uaddr) 1276{ 1277 uint32_t ret = 0; 1278 1279 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 1280 if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) { 1281 if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) { 1282 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 1283 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr; 1284 } 1285 dtrace_copyio_postflight(uaddr); 1286 } 1287 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 1288 1289 return(ret); 1290} 1291 1292uint64_t 1293dtrace_fuword64(user_addr_t uaddr) 1294{ 1295 uint64_t ret = 0; 1296 1297 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 1298 if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) { 1299 if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) { 1300 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 1301 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr; 1302 } 1303 dtrace_copyio_postflight(uaddr); 1304 } 1305 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 1306 1307 return(ret); 1308} 1309 1310/* 1311 * Emulation of Solaris fuword / suword 1312 * Called from the fasttrap provider, so the use of copyin/out requires fewer safegaurds. 1313 */ 1314 1315int 1316fuword8(user_addr_t uaddr, uint8_t *value) 1317{ 1318 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint8_t)) != 0) { 1319 return -1; 1320 } 1321 1322 return 0; 1323} 1324 1325int 1326fuword16(user_addr_t uaddr, uint16_t *value) 1327{ 1328 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint16_t)) != 0) { 1329 return -1; 1330 } 1331 1332 return 0; 1333} 1334 1335int 1336fuword32(user_addr_t uaddr, uint32_t *value) 1337{ 1338 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint32_t)) != 0) { 1339 return -1; 1340 } 1341 1342 return 0; 1343} 1344 1345int 1346fuword64(user_addr_t uaddr, uint64_t *value) 1347{ 1348 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint64_t)) != 0) { 1349 return -1; 1350 } 1351 1352 return 0; 1353} 1354 1355void 1356fuword8_noerr(user_addr_t uaddr, uint8_t *value) 1357{ 1358 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint8_t))) { 1359 *value = 0; 1360 } 1361} 1362 1363void 1364fuword16_noerr(user_addr_t uaddr, uint16_t *value) 1365{ 1366 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint16_t))) { 1367 *value = 0; 1368 } 1369} 1370 1371void 1372fuword32_noerr(user_addr_t uaddr, uint32_t *value) 1373{ 1374 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint32_t))) { 1375 *value = 0; 1376 } 1377} 1378 1379void 1380fuword64_noerr(user_addr_t uaddr, uint64_t *value) 1381{ 1382 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint64_t))) { 1383 *value = 0; 1384 } 1385} 1386 1387int 1388suword64(user_addr_t addr, uint64_t value) 1389{ 1390 if (copyout((const void *)&value, addr, sizeof(value)) != 0) { 1391 return -1; 1392 } 1393 1394 return 0; 1395} 1396 1397int 1398suword32(user_addr_t addr, uint32_t value) 1399{ 1400 if (copyout((const void *)&value, addr, sizeof(value)) != 0) { 1401 return -1; 1402 } 1403 1404 return 0; 1405} 1406 1407int 1408suword16(user_addr_t addr, uint16_t value) 1409{ 1410 if (copyout((const void *)&value, addr, sizeof(value)) != 0) { 1411 return -1; 1412 } 1413 1414 return 0; 1415} 1416 1417int 1418suword8(user_addr_t addr, uint8_t value) 1419{ 1420 if (copyout((const void *)&value, addr, sizeof(value)) != 0) { 1421 return -1; 1422 } 1423 1424 return 0; 1425} 1426 1427 1428/* 1429 * Miscellaneous 1430 */ 1431extern boolean_t dtrace_tally_fault(user_addr_t); 1432 1433boolean_t 1434dtrace_tally_fault(user_addr_t uaddr) 1435{ 1436 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 1437 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr; 1438 return( DTRACE_CPUFLAG_ISSET(CPU_DTRACE_NOFAULT) ? TRUE : FALSE ); 1439} 1440 1441#define TOTTY 0x02 1442extern int prf(const char *, va_list, int, struct tty *); /* bsd/kern/subr_prf.h */ 1443 1444int 1445vuprintf(const char *format, va_list ap) 1446{ 1447 return prf(format, ap, TOTTY, NULL); 1448} 1449 1450/* Not called from probe context */ 1451void cmn_err( int level, const char *format, ... ) 1452{ 1453#pragma unused(level) 1454 va_list alist; 1455 1456 va_start(alist, format); 1457 vuprintf(format, alist); 1458 va_end(alist); 1459 uprintf("\n"); 1460} 1461 1462/* 1463 * History: 1464 * 2002-01-24 gvdl Initial implementation of strstr 1465 */ 1466 1467__private_extern__ const char * 1468strstr(const char *in, const char *str) 1469{ 1470 char c; 1471 size_t len; 1472 1473 c = *str++; 1474 if (!c) 1475 return (const char *) in; // Trivial empty string case 1476 1477 len = strlen(str); 1478 do { 1479 char sc; 1480 1481 do { 1482 sc = *in++; 1483 if (!sc) 1484 return (char *) 0; 1485 } while (sc != c); 1486 } while (strncmp(in, str, len) != 0); 1487 1488 return (const char *) (in - 1); 1489} 1490 1491/* 1492 * Runtime and ABI 1493 */ 1494uintptr_t 1495dtrace_caller(int ignore) 1496{ 1497#pragma unused(ignore) 1498 return -1; /* Just as in Solaris dtrace_asm.s */ 1499} 1500 1501int 1502dtrace_getstackdepth(int aframes) 1503{ 1504 struct frame *fp = (struct frame *)__builtin_frame_address(0); 1505 struct frame *nextfp, *minfp, *stacktop; 1506 int depth = 0; 1507 int on_intr; 1508 1509 if ((on_intr = CPU_ON_INTR(CPU)) != 0) 1510 stacktop = (struct frame *)dtrace_get_cpu_int_stack_top(); 1511 else 1512 stacktop = (struct frame *)(dtrace_get_kernel_stack(current_thread()) + kernel_stack_size); 1513 1514 minfp = fp; 1515 1516 aframes++; 1517 1518 for (;;) { 1519 depth++; 1520 1521 nextfp = *(struct frame **)fp; 1522 1523 if (nextfp <= minfp || nextfp >= stacktop) { 1524 if (on_intr) { 1525 /* 1526 * Hop from interrupt stack to thread stack. 1527 */ 1528 vm_offset_t kstack_base = dtrace_get_kernel_stack(current_thread()); 1529 1530 minfp = (struct frame *)kstack_base; 1531 stacktop = (struct frame *)(kstack_base + kernel_stack_size); 1532 1533 on_intr = 0; 1534 continue; 1535 } 1536 break; 1537 } 1538 1539 fp = nextfp; 1540 minfp = fp; 1541 } 1542 1543 if (depth <= aframes) 1544 return (0); 1545 1546 return (depth - aframes); 1547} 1548 1549/* 1550 * Unconsidered 1551 */ 1552void 1553dtrace_vtime_enable(void) {} 1554 1555void 1556dtrace_vtime_disable(void) {} 1557 1558#else /* else ! CONFIG_DTRACE */ 1559 1560#include <sys/types.h> 1561#include <mach/vm_types.h> 1562#include <mach/kmod.h> 1563 1564/* 1565 * This exists to prevent build errors when dtrace is unconfigured. 1566 */ 1567 1568kern_return_t _dtrace_register_anon_DOF(char *, unsigned char *, uint32_t); 1569 1570kern_return_t _dtrace_register_anon_DOF(char *arg1, unsigned char *arg2, uint32_t arg3) { 1571#pragma unused(arg1, arg2, arg3) 1572 1573 return KERN_FAILURE; 1574} 1575 1576#endif /* CONFIG_DTRACE */ 1577