fasttrap.c revision 299003
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 * 21 * Portions Copyright 2010 The FreeBSD Foundation 22 * 23 * $FreeBSD: stable/10/sys/cddl/contrib/opensolaris/uts/common/dtrace/fasttrap.c 299003 2016-05-03 20:08:05Z markj $ 24 */ 25 26/* 27 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 28 * Use is subject to license terms. 29 */ 30 31/* 32 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 33 */ 34 35#include <sys/atomic.h> 36#include <sys/errno.h> 37#include <sys/stat.h> 38#include <sys/modctl.h> 39#include <sys/conf.h> 40#include <sys/systm.h> 41#ifdef illumos 42#include <sys/ddi.h> 43#endif 44#include <sys/sunddi.h> 45#include <sys/cpuvar.h> 46#include <sys/kmem.h> 47#ifdef illumos 48#include <sys/strsubr.h> 49#endif 50#include <sys/fasttrap.h> 51#include <sys/fasttrap_impl.h> 52#include <sys/fasttrap_isa.h> 53#include <sys/dtrace.h> 54#include <sys/dtrace_impl.h> 55#include <sys/sysmacros.h> 56#include <sys/proc.h> 57#include <sys/policy.h> 58#ifdef illumos 59#include <util/qsort.h> 60#endif 61#include <sys/mutex.h> 62#include <sys/kernel.h> 63#ifndef illumos 64#include <sys/dtrace_bsd.h> 65#include <sys/eventhandler.h> 66#include <sys/rmlock.h> 67#include <sys/sysctl.h> 68#include <sys/u8_textprep.h> 69#include <sys/user.h> 70 71#include <vm/vm.h> 72#include <vm/pmap.h> 73#include <vm/vm_map.h> 74#include <vm/vm_param.h> 75 76#include <cddl/dev/dtrace/dtrace_cddl.h> 77#endif 78 79/* 80 * User-Land Trap-Based Tracing 81 * ---------------------------- 82 * 83 * The fasttrap provider allows DTrace consumers to instrument any user-level 84 * instruction to gather data; this includes probes with semantic 85 * signifigance like entry and return as well as simple offsets into the 86 * function. While the specific techniques used are very ISA specific, the 87 * methodology is generalizable to any architecture. 88 * 89 * 90 * The General Methodology 91 * ----------------------- 92 * 93 * With the primary goal of tracing every user-land instruction and the 94 * limitation that we can't trust user space so don't want to rely on much 95 * information there, we begin by replacing the instructions we want to trace 96 * with trap instructions. Each instruction we overwrite is saved into a hash 97 * table keyed by process ID and pc address. When we enter the kernel due to 98 * this trap instruction, we need the effects of the replaced instruction to 99 * appear to have occurred before we proceed with the user thread's 100 * execution. 101 * 102 * Each user level thread is represented by a ulwp_t structure which is 103 * always easily accessible through a register. The most basic way to produce 104 * the effects of the instruction we replaced is to copy that instruction out 105 * to a bit of scratch space reserved in the user thread's ulwp_t structure 106 * (a sort of kernel-private thread local storage), set the PC to that 107 * scratch space and single step. When we reenter the kernel after single 108 * stepping the instruction we must then adjust the PC to point to what would 109 * normally be the next instruction. Of course, special care must be taken 110 * for branches and jumps, but these represent such a small fraction of any 111 * instruction set that writing the code to emulate these in the kernel is 112 * not too difficult. 113 * 114 * Return probes may require several tracepoints to trace every return site, 115 * and, conversely, each tracepoint may activate several probes (the entry 116 * and offset 0 probes, for example). To solve this muliplexing problem, 117 * tracepoints contain lists of probes to activate and probes contain lists 118 * of tracepoints to enable. If a probe is activated, it adds its ID to 119 * existing tracepoints or creates new ones as necessary. 120 * 121 * Most probes are activated _before_ the instruction is executed, but return 122 * probes are activated _after_ the effects of the last instruction of the 123 * function are visible. Return probes must be fired _after_ we have 124 * single-stepped the instruction whereas all other probes are fired 125 * beforehand. 126 * 127 * 128 * Lock Ordering 129 * ------------- 130 * 131 * The lock ordering below -- both internally and with respect to the DTrace 132 * framework -- is a little tricky and bears some explanation. Each provider 133 * has a lock (ftp_mtx) that protects its members including reference counts 134 * for enabled probes (ftp_rcount), consumers actively creating probes 135 * (ftp_ccount) and USDT consumers (ftp_mcount); all three prevent a provider 136 * from being freed. A provider is looked up by taking the bucket lock for the 137 * provider hash table, and is returned with its lock held. The provider lock 138 * may be taken in functions invoked by the DTrace framework, but may not be 139 * held while calling functions in the DTrace framework. 140 * 141 * To ensure consistency over multiple calls to the DTrace framework, the 142 * creation lock (ftp_cmtx) should be held. Naturally, the creation lock may 143 * not be taken when holding the provider lock as that would create a cyclic 144 * lock ordering. In situations where one would naturally take the provider 145 * lock and then the creation lock, we instead up a reference count to prevent 146 * the provider from disappearing, drop the provider lock, and acquire the 147 * creation lock. 148 * 149 * Briefly: 150 * bucket lock before provider lock 151 * DTrace before provider lock 152 * creation lock before DTrace 153 * never hold the provider lock and creation lock simultaneously 154 */ 155 156static d_open_t fasttrap_open; 157static d_ioctl_t fasttrap_ioctl; 158 159static struct cdevsw fasttrap_cdevsw = { 160 .d_version = D_VERSION, 161 .d_open = fasttrap_open, 162 .d_ioctl = fasttrap_ioctl, 163 .d_name = "fasttrap", 164}; 165static struct cdev *fasttrap_cdev; 166static dtrace_meta_provider_id_t fasttrap_meta_id; 167 168static struct proc *fasttrap_cleanup_proc; 169static struct mtx fasttrap_cleanup_mtx; 170static uint_t fasttrap_cleanup_work, fasttrap_cleanup_drain, fasttrap_cleanup_cv; 171 172/* 173 * Generation count on modifications to the global tracepoint lookup table. 174 */ 175static volatile uint64_t fasttrap_mod_gen; 176 177/* 178 * When the fasttrap provider is loaded, fasttrap_max is set to either 179 * FASTTRAP_MAX_DEFAULT, or the value for fasttrap-max-probes in the 180 * fasttrap.conf file (Illumos), or the value provied in the loader.conf (FreeBSD). 181 * Each time a probe is created, fasttrap_total is incremented by the number 182 * of tracepoints that may be associated with that probe; fasttrap_total is capped 183 * at fasttrap_max. 184 */ 185#define FASTTRAP_MAX_DEFAULT 250000 186static uint32_t fasttrap_max = FASTTRAP_MAX_DEFAULT; 187static uint32_t fasttrap_total; 188 189/* 190 * Copyright (c) 2011, Joyent, Inc. All rights reserved. 191 */ 192 193#define FASTTRAP_TPOINTS_DEFAULT_SIZE 0x4000 194#define FASTTRAP_PROVIDERS_DEFAULT_SIZE 0x100 195#define FASTTRAP_PROCS_DEFAULT_SIZE 0x100 196 197#define FASTTRAP_PID_NAME "pid" 198 199fasttrap_hash_t fasttrap_tpoints; 200static fasttrap_hash_t fasttrap_provs; 201static fasttrap_hash_t fasttrap_procs; 202 203static uint64_t fasttrap_pid_count; /* pid ref count */ 204static kmutex_t fasttrap_count_mtx; /* lock on ref count */ 205 206#define FASTTRAP_ENABLE_FAIL 1 207#define FASTTRAP_ENABLE_PARTIAL 2 208 209static int fasttrap_tracepoint_enable(proc_t *, fasttrap_probe_t *, uint_t); 210static void fasttrap_tracepoint_disable(proc_t *, fasttrap_probe_t *, uint_t); 211 212static fasttrap_provider_t *fasttrap_provider_lookup(pid_t, const char *, 213 const dtrace_pattr_t *); 214static void fasttrap_provider_retire(pid_t, const char *, int); 215static void fasttrap_provider_free(fasttrap_provider_t *); 216 217static fasttrap_proc_t *fasttrap_proc_lookup(pid_t); 218static void fasttrap_proc_release(fasttrap_proc_t *); 219 220#ifndef illumos 221static void fasttrap_thread_dtor(void *, struct thread *); 222#endif 223 224#define FASTTRAP_PROVS_INDEX(pid, name) \ 225 ((fasttrap_hash_str(name) + (pid)) & fasttrap_provs.fth_mask) 226 227#define FASTTRAP_PROCS_INDEX(pid) ((pid) & fasttrap_procs.fth_mask) 228 229#ifndef illumos 230struct rmlock fasttrap_tp_lock; 231static eventhandler_tag fasttrap_thread_dtor_tag; 232#endif 233 234static unsigned long tpoints_hash_size = FASTTRAP_TPOINTS_DEFAULT_SIZE; 235 236#ifdef __FreeBSD__ 237SYSCTL_DECL(_kern_dtrace); 238SYSCTL_NODE(_kern_dtrace, OID_AUTO, fasttrap, CTLFLAG_RD, 0, "DTrace fasttrap parameters"); 239SYSCTL_UINT(_kern_dtrace_fasttrap, OID_AUTO, max_probes, CTLFLAG_RWTUN, &fasttrap_max, 240 FASTTRAP_MAX_DEFAULT, "Maximum number of fasttrap probes"); 241SYSCTL_ULONG(_kern_dtrace_fasttrap, OID_AUTO, tpoints_hash_size, CTLFLAG_RDTUN, &tpoints_hash_size, 242 FASTTRAP_TPOINTS_DEFAULT_SIZE, "Size of the tracepoint hash table"); 243#endif 244 245static int 246fasttrap_highbit(ulong_t i) 247{ 248 int h = 1; 249 250 if (i == 0) 251 return (0); 252#ifdef _LP64 253 if (i & 0xffffffff00000000ul) { 254 h += 32; i >>= 32; 255 } 256#endif 257 if (i & 0xffff0000) { 258 h += 16; i >>= 16; 259 } 260 if (i & 0xff00) { 261 h += 8; i >>= 8; 262 } 263 if (i & 0xf0) { 264 h += 4; i >>= 4; 265 } 266 if (i & 0xc) { 267 h += 2; i >>= 2; 268 } 269 if (i & 0x2) { 270 h += 1; 271 } 272 return (h); 273} 274 275static uint_t 276fasttrap_hash_str(const char *p) 277{ 278 unsigned int g; 279 uint_t hval = 0; 280 281 while (*p) { 282 hval = (hval << 4) + *p++; 283 if ((g = (hval & 0xf0000000)) != 0) 284 hval ^= g >> 24; 285 hval &= ~g; 286 } 287 return (hval); 288} 289 290void 291fasttrap_sigtrap(proc_t *p, kthread_t *t, uintptr_t pc) 292{ 293#ifdef illumos 294 sigqueue_t *sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP); 295 296 sqp->sq_info.si_signo = SIGTRAP; 297 sqp->sq_info.si_code = TRAP_DTRACE; 298 sqp->sq_info.si_addr = (caddr_t)pc; 299 300 mutex_enter(&p->p_lock); 301 sigaddqa(p, t, sqp); 302 mutex_exit(&p->p_lock); 303 304 if (t != NULL) 305 aston(t); 306#else 307 ksiginfo_t *ksi = kmem_zalloc(sizeof (ksiginfo_t), KM_SLEEP); 308 309 ksiginfo_init(ksi); 310 ksi->ksi_signo = SIGTRAP; 311 ksi->ksi_code = TRAP_DTRACE; 312 ksi->ksi_addr = (caddr_t)pc; 313 PROC_LOCK(p); 314 (void) tdsendsignal(p, t, SIGTRAP, ksi); 315 PROC_UNLOCK(p); 316#endif 317} 318 319#ifndef illumos 320/* 321 * Obtain a chunk of scratch space in the address space of the target process. 322 */ 323fasttrap_scrspace_t * 324fasttrap_scraddr(struct thread *td, fasttrap_proc_t *fprc) 325{ 326 fasttrap_scrblock_t *scrblk; 327 fasttrap_scrspace_t *scrspc; 328 struct proc *p; 329 vm_offset_t addr; 330 int error, i; 331 332 scrspc = NULL; 333 if (td->t_dtrace_sscr != NULL) { 334 /* If the thread already has scratch space, we're done. */ 335 scrspc = (fasttrap_scrspace_t *)td->t_dtrace_sscr; 336 return (scrspc); 337 } 338 339 p = td->td_proc; 340 341 mutex_enter(&fprc->ftpc_mtx); 342 if (LIST_EMPTY(&fprc->ftpc_fscr)) { 343 /* 344 * No scratch space is available, so we'll map a new scratch 345 * space block into the traced process' address space. 346 */ 347 addr = 0; 348 error = vm_map_find(&p->p_vmspace->vm_map, NULL, 0, &addr, 349 FASTTRAP_SCRBLOCK_SIZE, 0, VMFS_ANY_SPACE, VM_PROT_ALL, 350 VM_PROT_ALL, 0); 351 if (error != KERN_SUCCESS) 352 goto done; 353 354 scrblk = malloc(sizeof(*scrblk), M_SOLARIS, M_WAITOK); 355 scrblk->ftsb_addr = addr; 356 LIST_INSERT_HEAD(&fprc->ftpc_scrblks, scrblk, ftsb_next); 357 358 /* 359 * Carve the block up into chunks and put them on the free list. 360 */ 361 for (i = 0; 362 i < FASTTRAP_SCRBLOCK_SIZE / FASTTRAP_SCRSPACE_SIZE; i++) { 363 scrspc = malloc(sizeof(*scrspc), M_SOLARIS, M_WAITOK); 364 scrspc->ftss_addr = addr + 365 i * FASTTRAP_SCRSPACE_SIZE; 366 LIST_INSERT_HEAD(&fprc->ftpc_fscr, scrspc, 367 ftss_next); 368 } 369 } 370 371 /* 372 * Take the first scratch chunk off the free list, put it on the 373 * allocated list, and return its address. 374 */ 375 scrspc = LIST_FIRST(&fprc->ftpc_fscr); 376 LIST_REMOVE(scrspc, ftss_next); 377 LIST_INSERT_HEAD(&fprc->ftpc_ascr, scrspc, ftss_next); 378 379 /* 380 * This scratch space is reserved for use by td until the thread exits. 381 */ 382 td->t_dtrace_sscr = scrspc; 383 384done: 385 mutex_exit(&fprc->ftpc_mtx); 386 387 return (scrspc); 388} 389 390/* 391 * Return any allocated per-thread scratch space chunks back to the process' 392 * free list. 393 */ 394static void 395fasttrap_thread_dtor(void *arg __unused, struct thread *td) 396{ 397 fasttrap_bucket_t *bucket; 398 fasttrap_proc_t *fprc; 399 fasttrap_scrspace_t *scrspc; 400 pid_t pid; 401 402 if (td->t_dtrace_sscr == NULL) 403 return; 404 405 pid = td->td_proc->p_pid; 406 bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)]; 407 fprc = NULL; 408 409 /* Look up the fasttrap process handle for this process. */ 410 mutex_enter(&bucket->ftb_mtx); 411 for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) { 412 if (fprc->ftpc_pid == pid) { 413 mutex_enter(&fprc->ftpc_mtx); 414 mutex_exit(&bucket->ftb_mtx); 415 break; 416 } 417 } 418 if (fprc == NULL) { 419 mutex_exit(&bucket->ftb_mtx); 420 return; 421 } 422 423 scrspc = (fasttrap_scrspace_t *)td->t_dtrace_sscr; 424 LIST_REMOVE(scrspc, ftss_next); 425 LIST_INSERT_HEAD(&fprc->ftpc_fscr, scrspc, ftss_next); 426 427 mutex_exit(&fprc->ftpc_mtx); 428} 429#endif 430 431/* 432 * This function ensures that no threads are actively using the memory 433 * associated with probes that were formerly live. 434 */ 435static void 436fasttrap_mod_barrier(uint64_t gen) 437{ 438 int i; 439 440 if (gen < fasttrap_mod_gen) 441 return; 442 443 fasttrap_mod_gen++; 444 445#ifdef illumos 446 CPU_FOREACH(i) { 447 mutex_enter(&fasttrap_cpuc_pid_lock[i]); 448 mutex_exit(&fasttrap_cpuc_pid_lock[i]); 449 } 450#else 451 rm_wlock(&fasttrap_tp_lock); 452 rm_wunlock(&fasttrap_tp_lock); 453#endif 454} 455 456/* 457 * This function performs asynchronous cleanup of fasttrap providers. The 458 * Solaris implementation of this mechanism use a timeout that's activated in 459 * fasttrap_pid_cleanup(), but this doesn't work in FreeBSD: one may sleep while 460 * holding the DTrace mutexes, but it is unsafe to sleep in a callout handler. 461 * Thus we use a dedicated process to perform the cleanup when requested. 462 */ 463/*ARGSUSED*/ 464static void 465fasttrap_pid_cleanup_cb(void *data) 466{ 467 fasttrap_provider_t **fpp, *fp; 468 fasttrap_bucket_t *bucket; 469 dtrace_provider_id_t provid; 470 int i, later = 0, rval; 471 472 mtx_lock(&fasttrap_cleanup_mtx); 473 while (!fasttrap_cleanup_drain || later > 0) { 474 fasttrap_cleanup_work = 0; 475 mtx_unlock(&fasttrap_cleanup_mtx); 476 477 later = 0; 478 479 /* 480 * Iterate over all the providers trying to remove the marked 481 * ones. If a provider is marked but not retired, we just 482 * have to take a crack at removing it -- it's no big deal if 483 * we can't. 484 */ 485 for (i = 0; i < fasttrap_provs.fth_nent; i++) { 486 bucket = &fasttrap_provs.fth_table[i]; 487 mutex_enter(&bucket->ftb_mtx); 488 fpp = (fasttrap_provider_t **)&bucket->ftb_data; 489 490 while ((fp = *fpp) != NULL) { 491 if (!fp->ftp_marked) { 492 fpp = &fp->ftp_next; 493 continue; 494 } 495 496 mutex_enter(&fp->ftp_mtx); 497 498 /* 499 * If this provider has consumers actively 500 * creating probes (ftp_ccount) or is a USDT 501 * provider (ftp_mcount), we can't unregister 502 * or even condense. 503 */ 504 if (fp->ftp_ccount != 0 || 505 fp->ftp_mcount != 0) { 506 mutex_exit(&fp->ftp_mtx); 507 fp->ftp_marked = 0; 508 continue; 509 } 510 511 if (!fp->ftp_retired || fp->ftp_rcount != 0) 512 fp->ftp_marked = 0; 513 514 mutex_exit(&fp->ftp_mtx); 515 516 /* 517 * If we successfully unregister this 518 * provider we can remove it from the hash 519 * chain and free the memory. If our attempt 520 * to unregister fails and this is a retired 521 * provider, increment our flag to try again 522 * pretty soon. If we've consumed more than 523 * half of our total permitted number of 524 * probes call dtrace_condense() to try to 525 * clean out the unenabled probes. 526 */ 527 provid = fp->ftp_provid; 528 if ((rval = dtrace_unregister(provid)) != 0) { 529 if (fasttrap_total > fasttrap_max / 2) 530 (void) dtrace_condense(provid); 531 532 if (rval == EAGAIN) 533 fp->ftp_marked = 1; 534 535 later += fp->ftp_marked; 536 fpp = &fp->ftp_next; 537 } else { 538 *fpp = fp->ftp_next; 539 fasttrap_provider_free(fp); 540 } 541 } 542 mutex_exit(&bucket->ftb_mtx); 543 } 544 mtx_lock(&fasttrap_cleanup_mtx); 545 546 /* 547 * If we were unable to retire a provider, try again after a 548 * second. This situation can occur in certain circumstances 549 * where providers cannot be unregistered even though they have 550 * no probes enabled because of an execution of dtrace -l or 551 * something similar. 552 */ 553 if (later > 0 || fasttrap_cleanup_work || 554 fasttrap_cleanup_drain) { 555 mtx_unlock(&fasttrap_cleanup_mtx); 556 pause("ftclean", hz); 557 mtx_lock(&fasttrap_cleanup_mtx); 558 } else 559 mtx_sleep(&fasttrap_cleanup_cv, &fasttrap_cleanup_mtx, 560 0, "ftcl", 0); 561 } 562 563 /* 564 * Wake up the thread in fasttrap_unload() now that we're done. 565 */ 566 wakeup(&fasttrap_cleanup_drain); 567 mtx_unlock(&fasttrap_cleanup_mtx); 568 569 kthread_exit(); 570} 571 572/* 573 * Activates the asynchronous cleanup mechanism. 574 */ 575static void 576fasttrap_pid_cleanup(void) 577{ 578 579 mtx_lock(&fasttrap_cleanup_mtx); 580 if (!fasttrap_cleanup_work) { 581 fasttrap_cleanup_work = 1; 582 wakeup(&fasttrap_cleanup_cv); 583 } 584 mtx_unlock(&fasttrap_cleanup_mtx); 585} 586 587/* 588 * This is called from cfork() via dtrace_fasttrap_fork(). The child 589 * process's address space is (roughly) a copy of the parent process's so 590 * we have to remove all the instrumentation we had previously enabled in the 591 * parent. 592 */ 593static void 594fasttrap_fork(proc_t *p, proc_t *cp) 595{ 596#ifndef illumos 597 fasttrap_scrblock_t *scrblk; 598 fasttrap_proc_t *fprc = NULL; 599#endif 600 pid_t ppid = p->p_pid; 601 int i; 602 603#ifdef illumos 604 ASSERT(curproc == p); 605 ASSERT(p->p_proc_flag & P_PR_LOCK); 606#else 607 PROC_LOCK_ASSERT(p, MA_OWNED); 608#endif 609#ifdef illumos 610 ASSERT(p->p_dtrace_count > 0); 611#else 612 if (p->p_dtrace_helpers) { 613 /* 614 * dtrace_helpers_duplicate() allocates memory. 615 */ 616 _PHOLD(cp); 617 PROC_UNLOCK(p); 618 PROC_UNLOCK(cp); 619 dtrace_helpers_duplicate(p, cp); 620 PROC_LOCK(cp); 621 PROC_LOCK(p); 622 _PRELE(cp); 623 } 624 /* 625 * This check is purposely here instead of in kern_fork.c because, 626 * for legal resons, we cannot include the dtrace_cddl.h header 627 * inside kern_fork.c and insert if-clause there. 628 */ 629 if (p->p_dtrace_count == 0) 630 return; 631#endif 632 ASSERT(cp->p_dtrace_count == 0); 633 634 /* 635 * This would be simpler and faster if we maintained per-process 636 * hash tables of enabled tracepoints. It could, however, potentially 637 * slow down execution of a tracepoint since we'd need to go 638 * through two levels of indirection. In the future, we should 639 * consider either maintaining per-process ancillary lists of 640 * enabled tracepoints or hanging a pointer to a per-process hash 641 * table of enabled tracepoints off the proc structure. 642 */ 643 644 /* 645 * We don't have to worry about the child process disappearing 646 * because we're in fork(). 647 */ 648#ifdef illumos 649 mtx_lock_spin(&cp->p_slock); 650 sprlock_proc(cp); 651 mtx_unlock_spin(&cp->p_slock); 652#else 653 /* 654 * fasttrap_tracepoint_remove() expects the child process to be 655 * unlocked and the VM then expects curproc to be unlocked. 656 */ 657 _PHOLD(cp); 658 PROC_UNLOCK(cp); 659 PROC_UNLOCK(p); 660#endif 661 662 /* 663 * Iterate over every tracepoint looking for ones that belong to the 664 * parent process, and remove each from the child process. 665 */ 666 for (i = 0; i < fasttrap_tpoints.fth_nent; i++) { 667 fasttrap_tracepoint_t *tp; 668 fasttrap_bucket_t *bucket = &fasttrap_tpoints.fth_table[i]; 669 670 mutex_enter(&bucket->ftb_mtx); 671 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) { 672 if (tp->ftt_pid == ppid && 673 tp->ftt_proc->ftpc_acount != 0) { 674 int ret = fasttrap_tracepoint_remove(cp, tp); 675 ASSERT(ret == 0); 676 677 /* 678 * The count of active providers can only be 679 * decremented (i.e. to zero) during exec, 680 * exit, and removal of a meta provider so it 681 * should be impossible to drop the count 682 * mid-fork. 683 */ 684 ASSERT(tp->ftt_proc->ftpc_acount != 0); 685#ifndef illumos 686 fprc = tp->ftt_proc; 687#endif 688 } 689 } 690 mutex_exit(&bucket->ftb_mtx); 691 692#ifndef illumos 693 /* 694 * Unmap any scratch space inherited from the parent's address 695 * space. 696 */ 697 if (fprc != NULL) { 698 mutex_enter(&fprc->ftpc_mtx); 699 LIST_FOREACH(scrblk, &fprc->ftpc_scrblks, ftsb_next) { 700 vm_map_remove(&cp->p_vmspace->vm_map, 701 scrblk->ftsb_addr, 702 scrblk->ftsb_addr + FASTTRAP_SCRBLOCK_SIZE); 703 } 704 mutex_exit(&fprc->ftpc_mtx); 705 } 706#endif 707 } 708 709#ifdef illumos 710 mutex_enter(&cp->p_lock); 711 sprunlock(cp); 712#else 713 PROC_LOCK(p); 714 PROC_LOCK(cp); 715 _PRELE(cp); 716#endif 717} 718 719/* 720 * This is called from proc_exit() or from exec_common() if p_dtrace_probes 721 * is set on the proc structure to indicate that there is a pid provider 722 * associated with this process. 723 */ 724static void 725fasttrap_exec_exit(proc_t *p) 726{ 727#ifndef illumos 728 struct thread *td; 729#endif 730 731#ifdef illumos 732 ASSERT(p == curproc); 733#else 734 PROC_LOCK_ASSERT(p, MA_OWNED); 735 _PHOLD(p); 736 /* 737 * Since struct threads may be recycled, we cannot rely on t_dtrace_sscr 738 * fields to be zeroed by kdtrace_thread_ctor. Thus we must zero it 739 * ourselves when a process exits. 740 */ 741 FOREACH_THREAD_IN_PROC(p, td) 742 td->t_dtrace_sscr = NULL; 743 PROC_UNLOCK(p); 744#endif 745 746 /* 747 * We clean up the pid provider for this process here; user-land 748 * static probes are handled by the meta-provider remove entry point. 749 */ 750 fasttrap_provider_retire(p->p_pid, FASTTRAP_PID_NAME, 0); 751#ifndef illumos 752 if (p->p_dtrace_helpers) 753 dtrace_helpers_destroy(p); 754 PROC_LOCK(p); 755 _PRELE(p); 756#endif 757} 758 759 760/*ARGSUSED*/ 761static void 762fasttrap_pid_provide(void *arg, dtrace_probedesc_t *desc) 763{ 764 /* 765 * There are no "default" pid probes. 766 */ 767} 768 769static int 770fasttrap_tracepoint_enable(proc_t *p, fasttrap_probe_t *probe, uint_t index) 771{ 772 fasttrap_tracepoint_t *tp, *new_tp = NULL; 773 fasttrap_bucket_t *bucket; 774 fasttrap_id_t *id; 775 pid_t pid; 776 uintptr_t pc; 777 778 ASSERT(index < probe->ftp_ntps); 779 780 pid = probe->ftp_pid; 781 pc = probe->ftp_tps[index].fit_tp->ftt_pc; 782 id = &probe->ftp_tps[index].fit_id; 783 784 ASSERT(probe->ftp_tps[index].fit_tp->ftt_pid == pid); 785 786#ifdef illumos 787 ASSERT(!(p->p_flag & SVFORK)); 788#endif 789 790 /* 791 * Before we make any modifications, make sure we've imposed a barrier 792 * on the generation in which this probe was last modified. 793 */ 794 fasttrap_mod_barrier(probe->ftp_gen); 795 796 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)]; 797 798 /* 799 * If the tracepoint has already been enabled, just add our id to the 800 * list of interested probes. This may be our second time through 801 * this path in which case we'll have constructed the tracepoint we'd 802 * like to install. If we can't find a match, and have an allocated 803 * tracepoint ready to go, enable that one now. 804 * 805 * A tracepoint whose process is defunct is also considered defunct. 806 */ 807again: 808 mutex_enter(&bucket->ftb_mtx); 809 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) { 810 /* 811 * Note that it's safe to access the active count on the 812 * associated proc structure because we know that at least one 813 * provider (this one) will still be around throughout this 814 * operation. 815 */ 816 if (tp->ftt_pid != pid || tp->ftt_pc != pc || 817 tp->ftt_proc->ftpc_acount == 0) 818 continue; 819 820 /* 821 * Now that we've found a matching tracepoint, it would be 822 * a decent idea to confirm that the tracepoint is still 823 * enabled and the trap instruction hasn't been overwritten. 824 * Since this is a little hairy, we'll punt for now. 825 */ 826 827 /* 828 * This can't be the first interested probe. We don't have 829 * to worry about another thread being in the midst of 830 * deleting this tracepoint (which would be the only valid 831 * reason for a tracepoint to have no interested probes) 832 * since we're holding P_PR_LOCK for this process. 833 */ 834 ASSERT(tp->ftt_ids != NULL || tp->ftt_retids != NULL); 835 836 switch (id->fti_ptype) { 837 case DTFTP_ENTRY: 838 case DTFTP_OFFSETS: 839 case DTFTP_IS_ENABLED: 840 id->fti_next = tp->ftt_ids; 841 membar_producer(); 842 tp->ftt_ids = id; 843 membar_producer(); 844 break; 845 846 case DTFTP_RETURN: 847 case DTFTP_POST_OFFSETS: 848 id->fti_next = tp->ftt_retids; 849 membar_producer(); 850 tp->ftt_retids = id; 851 membar_producer(); 852 break; 853 854 default: 855 ASSERT(0); 856 } 857 858 mutex_exit(&bucket->ftb_mtx); 859 860 if (new_tp != NULL) { 861 new_tp->ftt_ids = NULL; 862 new_tp->ftt_retids = NULL; 863 } 864 865 return (0); 866 } 867 868 /* 869 * If we have a good tracepoint ready to go, install it now while 870 * we have the lock held and no one can screw with us. 871 */ 872 if (new_tp != NULL) { 873 int rc = 0; 874 875 new_tp->ftt_next = bucket->ftb_data; 876 membar_producer(); 877 bucket->ftb_data = new_tp; 878 membar_producer(); 879 mutex_exit(&bucket->ftb_mtx); 880 881 /* 882 * Activate the tracepoint in the ISA-specific manner. 883 * If this fails, we need to report the failure, but 884 * indicate that this tracepoint must still be disabled 885 * by calling fasttrap_tracepoint_disable(). 886 */ 887 if (fasttrap_tracepoint_install(p, new_tp) != 0) 888 rc = FASTTRAP_ENABLE_PARTIAL; 889 890 /* 891 * Increment the count of the number of tracepoints active in 892 * the victim process. 893 */ 894#ifdef illumos 895 ASSERT(p->p_proc_flag & P_PR_LOCK); 896#endif 897 p->p_dtrace_count++; 898 899 return (rc); 900 } 901 902 mutex_exit(&bucket->ftb_mtx); 903 904 /* 905 * Initialize the tracepoint that's been preallocated with the probe. 906 */ 907 new_tp = probe->ftp_tps[index].fit_tp; 908 909 ASSERT(new_tp->ftt_pid == pid); 910 ASSERT(new_tp->ftt_pc == pc); 911 ASSERT(new_tp->ftt_proc == probe->ftp_prov->ftp_proc); 912 ASSERT(new_tp->ftt_ids == NULL); 913 ASSERT(new_tp->ftt_retids == NULL); 914 915 switch (id->fti_ptype) { 916 case DTFTP_ENTRY: 917 case DTFTP_OFFSETS: 918 case DTFTP_IS_ENABLED: 919 id->fti_next = NULL; 920 new_tp->ftt_ids = id; 921 break; 922 923 case DTFTP_RETURN: 924 case DTFTP_POST_OFFSETS: 925 id->fti_next = NULL; 926 new_tp->ftt_retids = id; 927 break; 928 929 default: 930 ASSERT(0); 931 } 932 933 /* 934 * If the ISA-dependent initialization goes to plan, go back to the 935 * beginning and try to install this freshly made tracepoint. 936 */ 937 if (fasttrap_tracepoint_init(p, new_tp, pc, id->fti_ptype) == 0) 938 goto again; 939 940 new_tp->ftt_ids = NULL; 941 new_tp->ftt_retids = NULL; 942 943 return (FASTTRAP_ENABLE_FAIL); 944} 945 946static void 947fasttrap_tracepoint_disable(proc_t *p, fasttrap_probe_t *probe, uint_t index) 948{ 949 fasttrap_bucket_t *bucket; 950 fasttrap_provider_t *provider = probe->ftp_prov; 951 fasttrap_tracepoint_t **pp, *tp; 952 fasttrap_id_t *id, **idp = NULL; 953 pid_t pid; 954 uintptr_t pc; 955 956 ASSERT(index < probe->ftp_ntps); 957 958 pid = probe->ftp_pid; 959 pc = probe->ftp_tps[index].fit_tp->ftt_pc; 960 id = &probe->ftp_tps[index].fit_id; 961 962 ASSERT(probe->ftp_tps[index].fit_tp->ftt_pid == pid); 963 964 /* 965 * Find the tracepoint and make sure that our id is one of the 966 * ones registered with it. 967 */ 968 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)]; 969 mutex_enter(&bucket->ftb_mtx); 970 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) { 971 if (tp->ftt_pid == pid && tp->ftt_pc == pc && 972 tp->ftt_proc == provider->ftp_proc) 973 break; 974 } 975 976 /* 977 * If we somehow lost this tracepoint, we're in a world of hurt. 978 */ 979 ASSERT(tp != NULL); 980 981 switch (id->fti_ptype) { 982 case DTFTP_ENTRY: 983 case DTFTP_OFFSETS: 984 case DTFTP_IS_ENABLED: 985 ASSERT(tp->ftt_ids != NULL); 986 idp = &tp->ftt_ids; 987 break; 988 989 case DTFTP_RETURN: 990 case DTFTP_POST_OFFSETS: 991 ASSERT(tp->ftt_retids != NULL); 992 idp = &tp->ftt_retids; 993 break; 994 995 default: 996 ASSERT(0); 997 } 998 999 while ((*idp)->fti_probe != probe) { 1000 idp = &(*idp)->fti_next; 1001 ASSERT(*idp != NULL); 1002 } 1003 1004 id = *idp; 1005 *idp = id->fti_next; 1006 membar_producer(); 1007 1008 ASSERT(id->fti_probe == probe); 1009 1010 /* 1011 * If there are other registered enablings of this tracepoint, we're 1012 * all done, but if this was the last probe assocated with this 1013 * this tracepoint, we need to remove and free it. 1014 */ 1015 if (tp->ftt_ids != NULL || tp->ftt_retids != NULL) { 1016 1017 /* 1018 * If the current probe's tracepoint is in use, swap it 1019 * for an unused tracepoint. 1020 */ 1021 if (tp == probe->ftp_tps[index].fit_tp) { 1022 fasttrap_probe_t *tmp_probe; 1023 fasttrap_tracepoint_t **tmp_tp; 1024 uint_t tmp_index; 1025 1026 if (tp->ftt_ids != NULL) { 1027 tmp_probe = tp->ftt_ids->fti_probe; 1028 /* LINTED - alignment */ 1029 tmp_index = FASTTRAP_ID_INDEX(tp->ftt_ids); 1030 tmp_tp = &tmp_probe->ftp_tps[tmp_index].fit_tp; 1031 } else { 1032 tmp_probe = tp->ftt_retids->fti_probe; 1033 /* LINTED - alignment */ 1034 tmp_index = FASTTRAP_ID_INDEX(tp->ftt_retids); 1035 tmp_tp = &tmp_probe->ftp_tps[tmp_index].fit_tp; 1036 } 1037 1038 ASSERT(*tmp_tp != NULL); 1039 ASSERT(*tmp_tp != probe->ftp_tps[index].fit_tp); 1040 ASSERT((*tmp_tp)->ftt_ids == NULL); 1041 ASSERT((*tmp_tp)->ftt_retids == NULL); 1042 1043 probe->ftp_tps[index].fit_tp = *tmp_tp; 1044 *tmp_tp = tp; 1045 } 1046 1047 mutex_exit(&bucket->ftb_mtx); 1048 1049 /* 1050 * Tag the modified probe with the generation in which it was 1051 * changed. 1052 */ 1053 probe->ftp_gen = fasttrap_mod_gen; 1054 return; 1055 } 1056 1057 mutex_exit(&bucket->ftb_mtx); 1058 1059 /* 1060 * We can't safely remove the tracepoint from the set of active 1061 * tracepoints until we've actually removed the fasttrap instruction 1062 * from the process's text. We can, however, operate on this 1063 * tracepoint secure in the knowledge that no other thread is going to 1064 * be looking at it since we hold P_PR_LOCK on the process if it's 1065 * live or we hold the provider lock on the process if it's dead and 1066 * gone. 1067 */ 1068 1069 /* 1070 * We only need to remove the actual instruction if we're looking 1071 * at an existing process 1072 */ 1073 if (p != NULL) { 1074 /* 1075 * If we fail to restore the instruction we need to kill 1076 * this process since it's in a completely unrecoverable 1077 * state. 1078 */ 1079 if (fasttrap_tracepoint_remove(p, tp) != 0) 1080 fasttrap_sigtrap(p, NULL, pc); 1081 1082 /* 1083 * Decrement the count of the number of tracepoints active 1084 * in the victim process. 1085 */ 1086#ifdef illumos 1087 ASSERT(p->p_proc_flag & P_PR_LOCK); 1088#endif 1089 p->p_dtrace_count--; 1090 } 1091 1092 /* 1093 * Remove the probe from the hash table of active tracepoints. 1094 */ 1095 mutex_enter(&bucket->ftb_mtx); 1096 pp = (fasttrap_tracepoint_t **)&bucket->ftb_data; 1097 ASSERT(*pp != NULL); 1098 while (*pp != tp) { 1099 pp = &(*pp)->ftt_next; 1100 ASSERT(*pp != NULL); 1101 } 1102 1103 *pp = tp->ftt_next; 1104 membar_producer(); 1105 1106 mutex_exit(&bucket->ftb_mtx); 1107 1108 /* 1109 * Tag the modified probe with the generation in which it was changed. 1110 */ 1111 probe->ftp_gen = fasttrap_mod_gen; 1112} 1113 1114static void 1115fasttrap_enable_callbacks(void) 1116{ 1117 /* 1118 * We don't have to play the rw lock game here because we're 1119 * providing something rather than taking something away -- 1120 * we can be sure that no threads have tried to follow this 1121 * function pointer yet. 1122 */ 1123 mutex_enter(&fasttrap_count_mtx); 1124 if (fasttrap_pid_count == 0) { 1125 ASSERT(dtrace_pid_probe_ptr == NULL); 1126 ASSERT(dtrace_return_probe_ptr == NULL); 1127 dtrace_pid_probe_ptr = &fasttrap_pid_probe; 1128 dtrace_return_probe_ptr = &fasttrap_return_probe; 1129 } 1130 ASSERT(dtrace_pid_probe_ptr == &fasttrap_pid_probe); 1131 ASSERT(dtrace_return_probe_ptr == &fasttrap_return_probe); 1132 fasttrap_pid_count++; 1133 mutex_exit(&fasttrap_count_mtx); 1134} 1135 1136static void 1137fasttrap_disable_callbacks(void) 1138{ 1139#ifdef illumos 1140 ASSERT(MUTEX_HELD(&cpu_lock)); 1141#endif 1142 1143 1144 mutex_enter(&fasttrap_count_mtx); 1145 ASSERT(fasttrap_pid_count > 0); 1146 fasttrap_pid_count--; 1147 if (fasttrap_pid_count == 0) { 1148#ifdef illumos 1149 cpu_t *cur, *cpu = CPU; 1150 1151 for (cur = cpu->cpu_next_onln; cur != cpu; 1152 cur = cur->cpu_next_onln) { 1153 rw_enter(&cur->cpu_ft_lock, RW_WRITER); 1154 } 1155#endif 1156 dtrace_pid_probe_ptr = NULL; 1157 dtrace_return_probe_ptr = NULL; 1158#ifdef illumos 1159 for (cur = cpu->cpu_next_onln; cur != cpu; 1160 cur = cur->cpu_next_onln) { 1161 rw_exit(&cur->cpu_ft_lock); 1162 } 1163#endif 1164 } 1165 mutex_exit(&fasttrap_count_mtx); 1166} 1167 1168/*ARGSUSED*/ 1169static void 1170fasttrap_pid_enable(void *arg, dtrace_id_t id, void *parg) 1171{ 1172 fasttrap_probe_t *probe = parg; 1173 proc_t *p = NULL; 1174 int i, rc; 1175 1176 ASSERT(probe != NULL); 1177 ASSERT(!probe->ftp_enabled); 1178 ASSERT(id == probe->ftp_id); 1179#ifdef illumos 1180 ASSERT(MUTEX_HELD(&cpu_lock)); 1181#endif 1182 1183 /* 1184 * Increment the count of enabled probes on this probe's provider; 1185 * the provider can't go away while the probe still exists. We 1186 * must increment this even if we aren't able to properly enable 1187 * this probe. 1188 */ 1189 mutex_enter(&probe->ftp_prov->ftp_mtx); 1190 probe->ftp_prov->ftp_rcount++; 1191 mutex_exit(&probe->ftp_prov->ftp_mtx); 1192 1193 /* 1194 * If this probe's provider is retired (meaning it was valid in a 1195 * previously exec'ed incarnation of this address space), bail out. The 1196 * provider can't go away while we're in this code path. 1197 */ 1198 if (probe->ftp_prov->ftp_retired) 1199 return; 1200 1201 /* 1202 * If we can't find the process, it may be that we're in the context of 1203 * a fork in which the traced process is being born and we're copying 1204 * USDT probes. Otherwise, the process is gone so bail. 1205 */ 1206#ifdef illumos 1207 if ((p = sprlock(probe->ftp_pid)) == NULL) { 1208 if ((curproc->p_flag & SFORKING) == 0) 1209 return; 1210 1211 mutex_enter(&pidlock); 1212 p = prfind(probe->ftp_pid); 1213 1214 /* 1215 * Confirm that curproc is indeed forking the process in which 1216 * we're trying to enable probes. 1217 */ 1218 ASSERT(p != NULL); 1219 ASSERT(p->p_parent == curproc); 1220 ASSERT(p->p_stat == SIDL); 1221 1222 mutex_enter(&p->p_lock); 1223 mutex_exit(&pidlock); 1224 1225 sprlock_proc(p); 1226 } 1227 1228 ASSERT(!(p->p_flag & SVFORK)); 1229 mutex_exit(&p->p_lock); 1230#else 1231 if ((p = pfind(probe->ftp_pid)) == NULL) 1232 return; 1233#endif 1234 1235 /* 1236 * We have to enable the trap entry point before any user threads have 1237 * the chance to execute the trap instruction we're about to place 1238 * in their process's text. 1239 */ 1240#ifdef __FreeBSD__ 1241 /* 1242 * pfind() returns a locked process. 1243 */ 1244 _PHOLD(p); 1245 PROC_UNLOCK(p); 1246#endif 1247 fasttrap_enable_callbacks(); 1248 1249 /* 1250 * Enable all the tracepoints and add this probe's id to each 1251 * tracepoint's list of active probes. 1252 */ 1253 for (i = 0; i < probe->ftp_ntps; i++) { 1254 if ((rc = fasttrap_tracepoint_enable(p, probe, i)) != 0) { 1255 /* 1256 * If enabling the tracepoint failed completely, 1257 * we don't have to disable it; if the failure 1258 * was only partial we must disable it. 1259 */ 1260 if (rc == FASTTRAP_ENABLE_FAIL) 1261 i--; 1262 else 1263 ASSERT(rc == FASTTRAP_ENABLE_PARTIAL); 1264 1265 /* 1266 * Back up and pull out all the tracepoints we've 1267 * created so far for this probe. 1268 */ 1269 while (i >= 0) { 1270 fasttrap_tracepoint_disable(p, probe, i); 1271 i--; 1272 } 1273 1274#ifdef illumos 1275 mutex_enter(&p->p_lock); 1276 sprunlock(p); 1277#else 1278 PRELE(p); 1279#endif 1280 1281 /* 1282 * Since we're not actually enabling this probe, 1283 * drop our reference on the trap table entry. 1284 */ 1285 fasttrap_disable_callbacks(); 1286 return; 1287 } 1288 } 1289#ifdef illumos 1290 mutex_enter(&p->p_lock); 1291 sprunlock(p); 1292#else 1293 PRELE(p); 1294#endif 1295 1296 probe->ftp_enabled = 1; 1297} 1298 1299/*ARGSUSED*/ 1300static void 1301fasttrap_pid_disable(void *arg, dtrace_id_t id, void *parg) 1302{ 1303 fasttrap_probe_t *probe = parg; 1304 fasttrap_provider_t *provider = probe->ftp_prov; 1305 proc_t *p; 1306 int i, whack = 0; 1307 1308 ASSERT(id == probe->ftp_id); 1309 1310 mutex_enter(&provider->ftp_mtx); 1311 1312 /* 1313 * We won't be able to acquire a /proc-esque lock on the process 1314 * iff the process is dead and gone. In this case, we rely on the 1315 * provider lock as a point of mutual exclusion to prevent other 1316 * DTrace consumers from disabling this probe. 1317 */ 1318 if ((p = pfind(probe->ftp_pid)) != NULL) { 1319#ifdef __FreeBSD__ 1320 if (p->p_flag & P_WEXIT) { 1321 PROC_UNLOCK(p); 1322 p = NULL; 1323 } else { 1324 _PHOLD(p); 1325 PROC_UNLOCK(p); 1326 } 1327#endif 1328 } 1329 1330 /* 1331 * Disable all the associated tracepoints (for fully enabled probes). 1332 */ 1333 if (probe->ftp_enabled) { 1334 for (i = 0; i < probe->ftp_ntps; i++) { 1335 fasttrap_tracepoint_disable(p, probe, i); 1336 } 1337 } 1338 1339 ASSERT(provider->ftp_rcount > 0); 1340 provider->ftp_rcount--; 1341 1342 if (p != NULL) { 1343 /* 1344 * Even though we may not be able to remove it entirely, we 1345 * mark this retired provider to get a chance to remove some 1346 * of the associated probes. 1347 */ 1348 if (provider->ftp_retired && !provider->ftp_marked) 1349 whack = provider->ftp_marked = 1; 1350 mutex_exit(&provider->ftp_mtx); 1351 } else { 1352 /* 1353 * If the process is dead, we're just waiting for the 1354 * last probe to be disabled to be able to free it. 1355 */ 1356 if (provider->ftp_rcount == 0 && !provider->ftp_marked) 1357 whack = provider->ftp_marked = 1; 1358 mutex_exit(&provider->ftp_mtx); 1359 } 1360 1361 if (whack) 1362 fasttrap_pid_cleanup(); 1363 1364#ifdef __FreeBSD__ 1365 if (p != NULL) 1366 PRELE(p); 1367#endif 1368 if (!probe->ftp_enabled) 1369 return; 1370 1371 probe->ftp_enabled = 0; 1372 1373#ifdef illumos 1374 ASSERT(MUTEX_HELD(&cpu_lock)); 1375#endif 1376 fasttrap_disable_callbacks(); 1377} 1378 1379/*ARGSUSED*/ 1380static void 1381fasttrap_pid_getargdesc(void *arg, dtrace_id_t id, void *parg, 1382 dtrace_argdesc_t *desc) 1383{ 1384 fasttrap_probe_t *probe = parg; 1385 char *str; 1386 int i, ndx; 1387 1388 desc->dtargd_native[0] = '\0'; 1389 desc->dtargd_xlate[0] = '\0'; 1390 1391 if (probe->ftp_prov->ftp_retired != 0 || 1392 desc->dtargd_ndx >= probe->ftp_nargs) { 1393 desc->dtargd_ndx = DTRACE_ARGNONE; 1394 return; 1395 } 1396 1397 ndx = (probe->ftp_argmap != NULL) ? 1398 probe->ftp_argmap[desc->dtargd_ndx] : desc->dtargd_ndx; 1399 1400 str = probe->ftp_ntypes; 1401 for (i = 0; i < ndx; i++) { 1402 str += strlen(str) + 1; 1403 } 1404 1405 ASSERT(strlen(str + 1) < sizeof (desc->dtargd_native)); 1406 (void) strcpy(desc->dtargd_native, str); 1407 1408 if (probe->ftp_xtypes == NULL) 1409 return; 1410 1411 str = probe->ftp_xtypes; 1412 for (i = 0; i < desc->dtargd_ndx; i++) { 1413 str += strlen(str) + 1; 1414 } 1415 1416 ASSERT(strlen(str + 1) < sizeof (desc->dtargd_xlate)); 1417 (void) strcpy(desc->dtargd_xlate, str); 1418} 1419 1420/*ARGSUSED*/ 1421static void 1422fasttrap_pid_destroy(void *arg, dtrace_id_t id, void *parg) 1423{ 1424 fasttrap_probe_t *probe = parg; 1425 int i; 1426 size_t size; 1427 1428 ASSERT(probe != NULL); 1429 ASSERT(!probe->ftp_enabled); 1430 ASSERT(fasttrap_total >= probe->ftp_ntps); 1431 1432 atomic_add_32(&fasttrap_total, -probe->ftp_ntps); 1433 size = offsetof(fasttrap_probe_t, ftp_tps[probe->ftp_ntps]); 1434 1435 if (probe->ftp_gen + 1 >= fasttrap_mod_gen) 1436 fasttrap_mod_barrier(probe->ftp_gen); 1437 1438 for (i = 0; i < probe->ftp_ntps; i++) { 1439 kmem_free(probe->ftp_tps[i].fit_tp, 1440 sizeof (fasttrap_tracepoint_t)); 1441 } 1442 1443 kmem_free(probe, size); 1444} 1445 1446 1447static const dtrace_pattr_t pid_attr = { 1448{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA }, 1449{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 1450{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 1451{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA }, 1452{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 1453}; 1454 1455static dtrace_pops_t pid_pops = { 1456 fasttrap_pid_provide, 1457 NULL, 1458 fasttrap_pid_enable, 1459 fasttrap_pid_disable, 1460 NULL, 1461 NULL, 1462 fasttrap_pid_getargdesc, 1463 fasttrap_pid_getarg, 1464 NULL, 1465 fasttrap_pid_destroy 1466}; 1467 1468static dtrace_pops_t usdt_pops = { 1469 fasttrap_pid_provide, 1470 NULL, 1471 fasttrap_pid_enable, 1472 fasttrap_pid_disable, 1473 NULL, 1474 NULL, 1475 fasttrap_pid_getargdesc, 1476 fasttrap_usdt_getarg, 1477 NULL, 1478 fasttrap_pid_destroy 1479}; 1480 1481static fasttrap_proc_t * 1482fasttrap_proc_lookup(pid_t pid) 1483{ 1484 fasttrap_bucket_t *bucket; 1485 fasttrap_proc_t *fprc, *new_fprc; 1486 1487 1488 bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)]; 1489 mutex_enter(&bucket->ftb_mtx); 1490 1491 for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) { 1492 if (fprc->ftpc_pid == pid && fprc->ftpc_acount != 0) { 1493 mutex_enter(&fprc->ftpc_mtx); 1494 mutex_exit(&bucket->ftb_mtx); 1495 fprc->ftpc_rcount++; 1496 atomic_inc_64(&fprc->ftpc_acount); 1497 ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount); 1498 mutex_exit(&fprc->ftpc_mtx); 1499 1500 return (fprc); 1501 } 1502 } 1503 1504 /* 1505 * Drop the bucket lock so we don't try to perform a sleeping 1506 * allocation under it. 1507 */ 1508 mutex_exit(&bucket->ftb_mtx); 1509 1510 new_fprc = kmem_zalloc(sizeof (fasttrap_proc_t), KM_SLEEP); 1511 new_fprc->ftpc_pid = pid; 1512 new_fprc->ftpc_rcount = 1; 1513 new_fprc->ftpc_acount = 1; 1514#ifndef illumos 1515 mutex_init(&new_fprc->ftpc_mtx, "fasttrap proc mtx", MUTEX_DEFAULT, 1516 NULL); 1517#endif 1518 1519 mutex_enter(&bucket->ftb_mtx); 1520 1521 /* 1522 * Take another lap through the list to make sure a proc hasn't 1523 * been created for this pid while we weren't under the bucket lock. 1524 */ 1525 for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) { 1526 if (fprc->ftpc_pid == pid && fprc->ftpc_acount != 0) { 1527 mutex_enter(&fprc->ftpc_mtx); 1528 mutex_exit(&bucket->ftb_mtx); 1529 fprc->ftpc_rcount++; 1530 atomic_inc_64(&fprc->ftpc_acount); 1531 ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount); 1532 mutex_exit(&fprc->ftpc_mtx); 1533 1534 kmem_free(new_fprc, sizeof (fasttrap_proc_t)); 1535 1536 return (fprc); 1537 } 1538 } 1539 1540 new_fprc->ftpc_next = bucket->ftb_data; 1541 bucket->ftb_data = new_fprc; 1542 1543 mutex_exit(&bucket->ftb_mtx); 1544 1545 return (new_fprc); 1546} 1547 1548static void 1549fasttrap_proc_release(fasttrap_proc_t *proc) 1550{ 1551 fasttrap_bucket_t *bucket; 1552 fasttrap_proc_t *fprc, **fprcp; 1553 pid_t pid = proc->ftpc_pid; 1554#ifndef illumos 1555 fasttrap_scrblock_t *scrblk, *scrblktmp; 1556 fasttrap_scrspace_t *scrspc, *scrspctmp; 1557 struct proc *p; 1558 struct thread *td; 1559#endif 1560 1561 mutex_enter(&proc->ftpc_mtx); 1562 1563 ASSERT(proc->ftpc_rcount != 0); 1564 ASSERT(proc->ftpc_acount <= proc->ftpc_rcount); 1565 1566 if (--proc->ftpc_rcount != 0) { 1567 mutex_exit(&proc->ftpc_mtx); 1568 return; 1569 } 1570 1571#ifndef illumos 1572 /* 1573 * Free all structures used to manage per-thread scratch space. 1574 */ 1575 LIST_FOREACH_SAFE(scrblk, &proc->ftpc_scrblks, ftsb_next, 1576 scrblktmp) { 1577 LIST_REMOVE(scrblk, ftsb_next); 1578 free(scrblk, M_SOLARIS); 1579 } 1580 LIST_FOREACH_SAFE(scrspc, &proc->ftpc_fscr, ftss_next, scrspctmp) { 1581 LIST_REMOVE(scrspc, ftss_next); 1582 free(scrspc, M_SOLARIS); 1583 } 1584 LIST_FOREACH_SAFE(scrspc, &proc->ftpc_ascr, ftss_next, scrspctmp) { 1585 LIST_REMOVE(scrspc, ftss_next); 1586 free(scrspc, M_SOLARIS); 1587 } 1588 1589 if ((p = pfind(pid)) != NULL) { 1590 FOREACH_THREAD_IN_PROC(p, td) 1591 td->t_dtrace_sscr = NULL; 1592 PROC_UNLOCK(p); 1593 } 1594#endif 1595 1596 mutex_exit(&proc->ftpc_mtx); 1597 1598 /* 1599 * There should definitely be no live providers associated with this 1600 * process at this point. 1601 */ 1602 ASSERT(proc->ftpc_acount == 0); 1603 1604 bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)]; 1605 mutex_enter(&bucket->ftb_mtx); 1606 1607 fprcp = (fasttrap_proc_t **)&bucket->ftb_data; 1608 while ((fprc = *fprcp) != NULL) { 1609 if (fprc == proc) 1610 break; 1611 1612 fprcp = &fprc->ftpc_next; 1613 } 1614 1615 /* 1616 * Something strange has happened if we can't find the proc. 1617 */ 1618 ASSERT(fprc != NULL); 1619 1620 *fprcp = fprc->ftpc_next; 1621 1622 mutex_exit(&bucket->ftb_mtx); 1623 1624 kmem_free(fprc, sizeof (fasttrap_proc_t)); 1625} 1626 1627/* 1628 * Lookup a fasttrap-managed provider based on its name and associated pid. 1629 * If the pattr argument is non-NULL, this function instantiates the provider 1630 * if it doesn't exist otherwise it returns NULL. The provider is returned 1631 * with its lock held. 1632 */ 1633static fasttrap_provider_t * 1634fasttrap_provider_lookup(pid_t pid, const char *name, 1635 const dtrace_pattr_t *pattr) 1636{ 1637 fasttrap_provider_t *fp, *new_fp = NULL; 1638 fasttrap_bucket_t *bucket; 1639 char provname[DTRACE_PROVNAMELEN]; 1640 proc_t *p; 1641 cred_t *cred; 1642 1643 ASSERT(strlen(name) < sizeof (fp->ftp_name)); 1644 ASSERT(pattr != NULL); 1645 1646 bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(pid, name)]; 1647 mutex_enter(&bucket->ftb_mtx); 1648 1649 /* 1650 * Take a lap through the list and return the match if we find it. 1651 */ 1652 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) { 1653 if (fp->ftp_pid == pid && strcmp(fp->ftp_name, name) == 0 && 1654 !fp->ftp_retired) { 1655 mutex_enter(&fp->ftp_mtx); 1656 mutex_exit(&bucket->ftb_mtx); 1657 return (fp); 1658 } 1659 } 1660 1661 /* 1662 * Drop the bucket lock so we don't try to perform a sleeping 1663 * allocation under it. 1664 */ 1665 mutex_exit(&bucket->ftb_mtx); 1666 1667 /* 1668 * Make sure the process exists, isn't a child created as the result 1669 * of a vfork(2), and isn't a zombie (but may be in fork). 1670 */ 1671 if ((p = pfind(pid)) == NULL) 1672 return (NULL); 1673 1674 /* 1675 * Increment p_dtrace_probes so that the process knows to inform us 1676 * when it exits or execs. fasttrap_provider_free() decrements this 1677 * when we're done with this provider. 1678 */ 1679 p->p_dtrace_probes++; 1680 1681 /* 1682 * Grab the credentials for this process so we have 1683 * something to pass to dtrace_register(). 1684 */ 1685 PROC_LOCK_ASSERT(p, MA_OWNED); 1686 crhold(p->p_ucred); 1687 cred = p->p_ucred; 1688 PROC_UNLOCK(p); 1689 1690 new_fp = kmem_zalloc(sizeof (fasttrap_provider_t), KM_SLEEP); 1691 new_fp->ftp_pid = pid; 1692 new_fp->ftp_proc = fasttrap_proc_lookup(pid); 1693#ifndef illumos 1694 mutex_init(&new_fp->ftp_mtx, "provider mtx", MUTEX_DEFAULT, NULL); 1695 mutex_init(&new_fp->ftp_cmtx, "lock on creating", MUTEX_DEFAULT, NULL); 1696#endif 1697 1698 ASSERT(new_fp->ftp_proc != NULL); 1699 1700 mutex_enter(&bucket->ftb_mtx); 1701 1702 /* 1703 * Take another lap through the list to make sure a provider hasn't 1704 * been created for this pid while we weren't under the bucket lock. 1705 */ 1706 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) { 1707 if (fp->ftp_pid == pid && strcmp(fp->ftp_name, name) == 0 && 1708 !fp->ftp_retired) { 1709 mutex_enter(&fp->ftp_mtx); 1710 mutex_exit(&bucket->ftb_mtx); 1711 fasttrap_provider_free(new_fp); 1712 crfree(cred); 1713 return (fp); 1714 } 1715 } 1716 1717 (void) strcpy(new_fp->ftp_name, name); 1718 1719 /* 1720 * Fail and return NULL if either the provider name is too long 1721 * or we fail to register this new provider with the DTrace 1722 * framework. Note that this is the only place we ever construct 1723 * the full provider name -- we keep it in pieces in the provider 1724 * structure. 1725 */ 1726 if (snprintf(provname, sizeof (provname), "%s%u", name, (uint_t)pid) >= 1727 sizeof (provname) || 1728 dtrace_register(provname, pattr, 1729 DTRACE_PRIV_PROC | DTRACE_PRIV_OWNER | DTRACE_PRIV_ZONEOWNER, cred, 1730 pattr == &pid_attr ? &pid_pops : &usdt_pops, new_fp, 1731 &new_fp->ftp_provid) != 0) { 1732 mutex_exit(&bucket->ftb_mtx); 1733 fasttrap_provider_free(new_fp); 1734 crfree(cred); 1735 return (NULL); 1736 } 1737 1738 new_fp->ftp_next = bucket->ftb_data; 1739 bucket->ftb_data = new_fp; 1740 1741 mutex_enter(&new_fp->ftp_mtx); 1742 mutex_exit(&bucket->ftb_mtx); 1743 1744 crfree(cred); 1745 return (new_fp); 1746} 1747 1748static void 1749fasttrap_provider_free(fasttrap_provider_t *provider) 1750{ 1751 pid_t pid = provider->ftp_pid; 1752 proc_t *p; 1753 1754 /* 1755 * There need to be no associated enabled probes, no consumers 1756 * creating probes, and no meta providers referencing this provider. 1757 */ 1758 ASSERT(provider->ftp_rcount == 0); 1759 ASSERT(provider->ftp_ccount == 0); 1760 ASSERT(provider->ftp_mcount == 0); 1761 1762 /* 1763 * If this provider hasn't been retired, we need to explicitly drop the 1764 * count of active providers on the associated process structure. 1765 */ 1766 if (!provider->ftp_retired) { 1767 atomic_dec_64(&provider->ftp_proc->ftpc_acount); 1768 ASSERT(provider->ftp_proc->ftpc_acount < 1769 provider->ftp_proc->ftpc_rcount); 1770 } 1771 1772 fasttrap_proc_release(provider->ftp_proc); 1773 1774#ifndef illumos 1775 mutex_destroy(&provider->ftp_mtx); 1776 mutex_destroy(&provider->ftp_cmtx); 1777#endif 1778 kmem_free(provider, sizeof (fasttrap_provider_t)); 1779 1780 /* 1781 * Decrement p_dtrace_probes on the process whose provider we're 1782 * freeing. We don't have to worry about clobbering somone else's 1783 * modifications to it because we have locked the bucket that 1784 * corresponds to this process's hash chain in the provider hash 1785 * table. Don't sweat it if we can't find the process. 1786 */ 1787 if ((p = pfind(pid)) == NULL) { 1788 return; 1789 } 1790 1791 p->p_dtrace_probes--; 1792#ifndef illumos 1793 PROC_UNLOCK(p); 1794#endif 1795} 1796 1797static void 1798fasttrap_provider_retire(pid_t pid, const char *name, int mprov) 1799{ 1800 fasttrap_provider_t *fp; 1801 fasttrap_bucket_t *bucket; 1802 dtrace_provider_id_t provid; 1803 1804 ASSERT(strlen(name) < sizeof (fp->ftp_name)); 1805 1806 bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(pid, name)]; 1807 mutex_enter(&bucket->ftb_mtx); 1808 1809 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) { 1810 if (fp->ftp_pid == pid && strcmp(fp->ftp_name, name) == 0 && 1811 !fp->ftp_retired) 1812 break; 1813 } 1814 1815 if (fp == NULL) { 1816 mutex_exit(&bucket->ftb_mtx); 1817 return; 1818 } 1819 1820 mutex_enter(&fp->ftp_mtx); 1821 ASSERT(!mprov || fp->ftp_mcount > 0); 1822 if (mprov && --fp->ftp_mcount != 0) { 1823 mutex_exit(&fp->ftp_mtx); 1824 mutex_exit(&bucket->ftb_mtx); 1825 return; 1826 } 1827 1828 /* 1829 * Mark the provider to be removed in our post-processing step, mark it 1830 * retired, and drop the active count on its proc. Marking it indicates 1831 * that we should try to remove it; setting the retired flag indicates 1832 * that we're done with this provider; dropping the active the proc 1833 * releases our hold, and when this reaches zero (as it will during 1834 * exit or exec) the proc and associated providers become defunct. 1835 * 1836 * We obviously need to take the bucket lock before the provider lock 1837 * to perform the lookup, but we need to drop the provider lock 1838 * before calling into the DTrace framework since we acquire the 1839 * provider lock in callbacks invoked from the DTrace framework. The 1840 * bucket lock therefore protects the integrity of the provider hash 1841 * table. 1842 */ 1843 atomic_dec_64(&fp->ftp_proc->ftpc_acount); 1844 ASSERT(fp->ftp_proc->ftpc_acount < fp->ftp_proc->ftpc_rcount); 1845 1846 fp->ftp_retired = 1; 1847 fp->ftp_marked = 1; 1848 provid = fp->ftp_provid; 1849 mutex_exit(&fp->ftp_mtx); 1850 1851 /* 1852 * We don't have to worry about invalidating the same provider twice 1853 * since fasttrap_provider_lookup() will ignore provider that have 1854 * been marked as retired. 1855 */ 1856 dtrace_invalidate(provid); 1857 1858 mutex_exit(&bucket->ftb_mtx); 1859 1860 fasttrap_pid_cleanup(); 1861} 1862 1863static int 1864fasttrap_uint32_cmp(const void *ap, const void *bp) 1865{ 1866 return (*(const uint32_t *)ap - *(const uint32_t *)bp); 1867} 1868 1869static int 1870fasttrap_uint64_cmp(const void *ap, const void *bp) 1871{ 1872 return (*(const uint64_t *)ap - *(const uint64_t *)bp); 1873} 1874 1875static int 1876fasttrap_add_probe(fasttrap_probe_spec_t *pdata) 1877{ 1878 fasttrap_provider_t *provider; 1879 fasttrap_probe_t *pp; 1880 fasttrap_tracepoint_t *tp; 1881 char *name; 1882 int i, aframes = 0, whack; 1883 1884 /* 1885 * There needs to be at least one desired trace point. 1886 */ 1887 if (pdata->ftps_noffs == 0) 1888 return (EINVAL); 1889 1890 switch (pdata->ftps_type) { 1891 case DTFTP_ENTRY: 1892 name = "entry"; 1893 aframes = FASTTRAP_ENTRY_AFRAMES; 1894 break; 1895 case DTFTP_RETURN: 1896 name = "return"; 1897 aframes = FASTTRAP_RETURN_AFRAMES; 1898 break; 1899 case DTFTP_OFFSETS: 1900 name = NULL; 1901 break; 1902 default: 1903 return (EINVAL); 1904 } 1905 1906 if ((provider = fasttrap_provider_lookup(pdata->ftps_pid, 1907 FASTTRAP_PID_NAME, &pid_attr)) == NULL) 1908 return (ESRCH); 1909 1910 /* 1911 * Increment this reference count to indicate that a consumer is 1912 * actively adding a new probe associated with this provider. This 1913 * prevents the provider from being deleted -- we'll need to check 1914 * for pending deletions when we drop this reference count. 1915 */ 1916 provider->ftp_ccount++; 1917 mutex_exit(&provider->ftp_mtx); 1918 1919 /* 1920 * Grab the creation lock to ensure consistency between calls to 1921 * dtrace_probe_lookup() and dtrace_probe_create() in the face of 1922 * other threads creating probes. We must drop the provider lock 1923 * before taking this lock to avoid a three-way deadlock with the 1924 * DTrace framework. 1925 */ 1926 mutex_enter(&provider->ftp_cmtx); 1927 1928 if (name == NULL) { 1929 for (i = 0; i < pdata->ftps_noffs; i++) { 1930 char name_str[17]; 1931 1932 (void) sprintf(name_str, "%llx", 1933 (unsigned long long)pdata->ftps_offs[i]); 1934 1935 if (dtrace_probe_lookup(provider->ftp_provid, 1936 pdata->ftps_mod, pdata->ftps_func, name_str) != 0) 1937 continue; 1938 1939 atomic_inc_32(&fasttrap_total); 1940 1941 if (fasttrap_total > fasttrap_max) { 1942 atomic_dec_32(&fasttrap_total); 1943 goto no_mem; 1944 } 1945 1946 pp = kmem_zalloc(sizeof (fasttrap_probe_t), KM_SLEEP); 1947 1948 pp->ftp_prov = provider; 1949 pp->ftp_faddr = pdata->ftps_pc; 1950 pp->ftp_fsize = pdata->ftps_size; 1951 pp->ftp_pid = pdata->ftps_pid; 1952 pp->ftp_ntps = 1; 1953 1954 tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), 1955 KM_SLEEP); 1956 1957 tp->ftt_proc = provider->ftp_proc; 1958 tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc; 1959 tp->ftt_pid = pdata->ftps_pid; 1960 1961 pp->ftp_tps[0].fit_tp = tp; 1962 pp->ftp_tps[0].fit_id.fti_probe = pp; 1963 pp->ftp_tps[0].fit_id.fti_ptype = pdata->ftps_type; 1964 1965 pp->ftp_id = dtrace_probe_create(provider->ftp_provid, 1966 pdata->ftps_mod, pdata->ftps_func, name_str, 1967 FASTTRAP_OFFSET_AFRAMES, pp); 1968 } 1969 1970 } else if (dtrace_probe_lookup(provider->ftp_provid, pdata->ftps_mod, 1971 pdata->ftps_func, name) == 0) { 1972 atomic_add_32(&fasttrap_total, pdata->ftps_noffs); 1973 1974 if (fasttrap_total > fasttrap_max) { 1975 atomic_add_32(&fasttrap_total, -pdata->ftps_noffs); 1976 goto no_mem; 1977 } 1978 1979 /* 1980 * Make sure all tracepoint program counter values are unique. 1981 * We later assume that each probe has exactly one tracepoint 1982 * for a given pc. 1983 */ 1984 qsort(pdata->ftps_offs, pdata->ftps_noffs, 1985 sizeof (uint64_t), fasttrap_uint64_cmp); 1986 for (i = 1; i < pdata->ftps_noffs; i++) { 1987 if (pdata->ftps_offs[i] > pdata->ftps_offs[i - 1]) 1988 continue; 1989 1990 atomic_add_32(&fasttrap_total, -pdata->ftps_noffs); 1991 goto no_mem; 1992 } 1993 1994 ASSERT(pdata->ftps_noffs > 0); 1995 pp = kmem_zalloc(offsetof(fasttrap_probe_t, 1996 ftp_tps[pdata->ftps_noffs]), KM_SLEEP); 1997 1998 pp->ftp_prov = provider; 1999 pp->ftp_faddr = pdata->ftps_pc; 2000 pp->ftp_fsize = pdata->ftps_size; 2001 pp->ftp_pid = pdata->ftps_pid; 2002 pp->ftp_ntps = pdata->ftps_noffs; 2003 2004 for (i = 0; i < pdata->ftps_noffs; i++) { 2005 tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), 2006 KM_SLEEP); 2007 2008 tp->ftt_proc = provider->ftp_proc; 2009 tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc; 2010 tp->ftt_pid = pdata->ftps_pid; 2011 2012 pp->ftp_tps[i].fit_tp = tp; 2013 pp->ftp_tps[i].fit_id.fti_probe = pp; 2014 pp->ftp_tps[i].fit_id.fti_ptype = pdata->ftps_type; 2015 } 2016 2017 pp->ftp_id = dtrace_probe_create(provider->ftp_provid, 2018 pdata->ftps_mod, pdata->ftps_func, name, aframes, pp); 2019 } 2020 2021 mutex_exit(&provider->ftp_cmtx); 2022 2023 /* 2024 * We know that the provider is still valid since we incremented the 2025 * creation reference count. If someone tried to clean up this provider 2026 * while we were using it (e.g. because the process called exec(2) or 2027 * exit(2)), take note of that and try to clean it up now. 2028 */ 2029 mutex_enter(&provider->ftp_mtx); 2030 provider->ftp_ccount--; 2031 whack = provider->ftp_retired; 2032 mutex_exit(&provider->ftp_mtx); 2033 2034 if (whack) 2035 fasttrap_pid_cleanup(); 2036 2037 return (0); 2038 2039no_mem: 2040 /* 2041 * If we've exhausted the allowable resources, we'll try to remove 2042 * this provider to free some up. This is to cover the case where 2043 * the user has accidentally created many more probes than was 2044 * intended (e.g. pid123:::). 2045 */ 2046 mutex_exit(&provider->ftp_cmtx); 2047 mutex_enter(&provider->ftp_mtx); 2048 provider->ftp_ccount--; 2049 provider->ftp_marked = 1; 2050 mutex_exit(&provider->ftp_mtx); 2051 2052 fasttrap_pid_cleanup(); 2053 2054 return (ENOMEM); 2055} 2056 2057/*ARGSUSED*/ 2058static void * 2059fasttrap_meta_provide(void *arg, dtrace_helper_provdesc_t *dhpv, pid_t pid) 2060{ 2061 fasttrap_provider_t *provider; 2062 2063 /* 2064 * A 32-bit unsigned integer (like a pid for example) can be 2065 * expressed in 10 or fewer decimal digits. Make sure that we'll 2066 * have enough space for the provider name. 2067 */ 2068 if (strlen(dhpv->dthpv_provname) + 10 >= 2069 sizeof (provider->ftp_name)) { 2070 printf("failed to instantiate provider %s: " 2071 "name too long to accomodate pid", dhpv->dthpv_provname); 2072 return (NULL); 2073 } 2074 2075 /* 2076 * Don't let folks spoof the true pid provider. 2077 */ 2078 if (strcmp(dhpv->dthpv_provname, FASTTRAP_PID_NAME) == 0) { 2079 printf("failed to instantiate provider %s: " 2080 "%s is an invalid name", dhpv->dthpv_provname, 2081 FASTTRAP_PID_NAME); 2082 return (NULL); 2083 } 2084 2085 /* 2086 * The highest stability class that fasttrap supports is ISA; cap 2087 * the stability of the new provider accordingly. 2088 */ 2089 if (dhpv->dthpv_pattr.dtpa_provider.dtat_class > DTRACE_CLASS_ISA) 2090 dhpv->dthpv_pattr.dtpa_provider.dtat_class = DTRACE_CLASS_ISA; 2091 if (dhpv->dthpv_pattr.dtpa_mod.dtat_class > DTRACE_CLASS_ISA) 2092 dhpv->dthpv_pattr.dtpa_mod.dtat_class = DTRACE_CLASS_ISA; 2093 if (dhpv->dthpv_pattr.dtpa_func.dtat_class > DTRACE_CLASS_ISA) 2094 dhpv->dthpv_pattr.dtpa_func.dtat_class = DTRACE_CLASS_ISA; 2095 if (dhpv->dthpv_pattr.dtpa_name.dtat_class > DTRACE_CLASS_ISA) 2096 dhpv->dthpv_pattr.dtpa_name.dtat_class = DTRACE_CLASS_ISA; 2097 if (dhpv->dthpv_pattr.dtpa_args.dtat_class > DTRACE_CLASS_ISA) 2098 dhpv->dthpv_pattr.dtpa_args.dtat_class = DTRACE_CLASS_ISA; 2099 2100 if ((provider = fasttrap_provider_lookup(pid, dhpv->dthpv_provname, 2101 &dhpv->dthpv_pattr)) == NULL) { 2102 printf("failed to instantiate provider %s for " 2103 "process %u", dhpv->dthpv_provname, (uint_t)pid); 2104 return (NULL); 2105 } 2106 2107 /* 2108 * Up the meta provider count so this provider isn't removed until 2109 * the meta provider has been told to remove it. 2110 */ 2111 provider->ftp_mcount++; 2112 2113 mutex_exit(&provider->ftp_mtx); 2114 2115 return (provider); 2116} 2117 2118/*ARGSUSED*/ 2119static void 2120fasttrap_meta_create_probe(void *arg, void *parg, 2121 dtrace_helper_probedesc_t *dhpb) 2122{ 2123 fasttrap_provider_t *provider = parg; 2124 fasttrap_probe_t *pp; 2125 fasttrap_tracepoint_t *tp; 2126 int i, j; 2127 uint32_t ntps; 2128 2129 /* 2130 * Since the meta provider count is non-zero we don't have to worry 2131 * about this provider disappearing. 2132 */ 2133 ASSERT(provider->ftp_mcount > 0); 2134 2135 /* 2136 * The offsets must be unique. 2137 */ 2138 qsort(dhpb->dthpb_offs, dhpb->dthpb_noffs, sizeof (uint32_t), 2139 fasttrap_uint32_cmp); 2140 for (i = 1; i < dhpb->dthpb_noffs; i++) { 2141 if (dhpb->dthpb_base + dhpb->dthpb_offs[i] <= 2142 dhpb->dthpb_base + dhpb->dthpb_offs[i - 1]) 2143 return; 2144 } 2145 2146 qsort(dhpb->dthpb_enoffs, dhpb->dthpb_nenoffs, sizeof (uint32_t), 2147 fasttrap_uint32_cmp); 2148 for (i = 1; i < dhpb->dthpb_nenoffs; i++) { 2149 if (dhpb->dthpb_base + dhpb->dthpb_enoffs[i] <= 2150 dhpb->dthpb_base + dhpb->dthpb_enoffs[i - 1]) 2151 return; 2152 } 2153 2154 /* 2155 * Grab the creation lock to ensure consistency between calls to 2156 * dtrace_probe_lookup() and dtrace_probe_create() in the face of 2157 * other threads creating probes. 2158 */ 2159 mutex_enter(&provider->ftp_cmtx); 2160 2161 if (dtrace_probe_lookup(provider->ftp_provid, dhpb->dthpb_mod, 2162 dhpb->dthpb_func, dhpb->dthpb_name) != 0) { 2163 mutex_exit(&provider->ftp_cmtx); 2164 return; 2165 } 2166 2167 ntps = dhpb->dthpb_noffs + dhpb->dthpb_nenoffs; 2168 ASSERT(ntps > 0); 2169 2170 atomic_add_32(&fasttrap_total, ntps); 2171 2172 if (fasttrap_total > fasttrap_max) { 2173 atomic_add_32(&fasttrap_total, -ntps); 2174 mutex_exit(&provider->ftp_cmtx); 2175 return; 2176 } 2177 2178 pp = kmem_zalloc(offsetof(fasttrap_probe_t, ftp_tps[ntps]), KM_SLEEP); 2179 2180 pp->ftp_prov = provider; 2181 pp->ftp_pid = provider->ftp_pid; 2182 pp->ftp_ntps = ntps; 2183 pp->ftp_nargs = dhpb->dthpb_xargc; 2184 pp->ftp_xtypes = dhpb->dthpb_xtypes; 2185 pp->ftp_ntypes = dhpb->dthpb_ntypes; 2186 2187 /* 2188 * First create a tracepoint for each actual point of interest. 2189 */ 2190 for (i = 0; i < dhpb->dthpb_noffs; i++) { 2191 tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), KM_SLEEP); 2192 2193 tp->ftt_proc = provider->ftp_proc; 2194 tp->ftt_pc = dhpb->dthpb_base + dhpb->dthpb_offs[i]; 2195 tp->ftt_pid = provider->ftp_pid; 2196 2197 pp->ftp_tps[i].fit_tp = tp; 2198 pp->ftp_tps[i].fit_id.fti_probe = pp; 2199#ifdef __sparc 2200 pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_POST_OFFSETS; 2201#else 2202 pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_OFFSETS; 2203#endif 2204 } 2205 2206 /* 2207 * Then create a tracepoint for each is-enabled point. 2208 */ 2209 for (j = 0; i < ntps; i++, j++) { 2210 tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), KM_SLEEP); 2211 2212 tp->ftt_proc = provider->ftp_proc; 2213 tp->ftt_pc = dhpb->dthpb_base + dhpb->dthpb_enoffs[j]; 2214 tp->ftt_pid = provider->ftp_pid; 2215 2216 pp->ftp_tps[i].fit_tp = tp; 2217 pp->ftp_tps[i].fit_id.fti_probe = pp; 2218 pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_IS_ENABLED; 2219 } 2220 2221 /* 2222 * If the arguments are shuffled around we set the argument remapping 2223 * table. Later, when the probe fires, we only remap the arguments 2224 * if the table is non-NULL. 2225 */ 2226 for (i = 0; i < dhpb->dthpb_xargc; i++) { 2227 if (dhpb->dthpb_args[i] != i) { 2228 pp->ftp_argmap = dhpb->dthpb_args; 2229 break; 2230 } 2231 } 2232 2233 /* 2234 * The probe is fully constructed -- register it with DTrace. 2235 */ 2236 pp->ftp_id = dtrace_probe_create(provider->ftp_provid, dhpb->dthpb_mod, 2237 dhpb->dthpb_func, dhpb->dthpb_name, FASTTRAP_OFFSET_AFRAMES, pp); 2238 2239 mutex_exit(&provider->ftp_cmtx); 2240} 2241 2242/*ARGSUSED*/ 2243static void 2244fasttrap_meta_remove(void *arg, dtrace_helper_provdesc_t *dhpv, pid_t pid) 2245{ 2246 /* 2247 * Clean up the USDT provider. There may be active consumers of the 2248 * provider busy adding probes, no damage will actually befall the 2249 * provider until that count has dropped to zero. This just puts 2250 * the provider on death row. 2251 */ 2252 fasttrap_provider_retire(pid, dhpv->dthpv_provname, 1); 2253} 2254 2255static dtrace_mops_t fasttrap_mops = { 2256 fasttrap_meta_create_probe, 2257 fasttrap_meta_provide, 2258 fasttrap_meta_remove 2259}; 2260 2261/*ARGSUSED*/ 2262static int 2263fasttrap_open(struct cdev *dev __unused, int oflags __unused, 2264 int devtype __unused, struct thread *td __unused) 2265{ 2266 return (0); 2267} 2268 2269/*ARGSUSED*/ 2270static int 2271fasttrap_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int fflag, 2272 struct thread *td) 2273{ 2274#ifdef notyet 2275 struct kinfo_proc kp; 2276 const cred_t *cr = td->td_ucred; 2277#endif 2278 if (!dtrace_attached()) 2279 return (EAGAIN); 2280 2281 if (cmd == FASTTRAPIOC_MAKEPROBE) { 2282 fasttrap_probe_spec_t *uprobe = *(fasttrap_probe_spec_t **)arg; 2283 fasttrap_probe_spec_t *probe; 2284 uint64_t noffs; 2285 size_t size; 2286 int ret, err; 2287 2288 if (copyin(&uprobe->ftps_noffs, &noffs, 2289 sizeof (uprobe->ftps_noffs))) 2290 return (EFAULT); 2291 2292 /* 2293 * Probes must have at least one tracepoint. 2294 */ 2295 if (noffs == 0) 2296 return (EINVAL); 2297 2298 size = sizeof (fasttrap_probe_spec_t) + 2299 sizeof (probe->ftps_offs[0]) * (noffs - 1); 2300 2301 if (size > 1024 * 1024) 2302 return (ENOMEM); 2303 2304 probe = kmem_alloc(size, KM_SLEEP); 2305 2306 if (copyin(uprobe, probe, size) != 0 || 2307 probe->ftps_noffs != noffs) { 2308 kmem_free(probe, size); 2309 return (EFAULT); 2310 } 2311 2312 /* 2313 * Verify that the function and module strings contain no 2314 * funny characters. 2315 */ 2316 if (u8_validate(probe->ftps_func, strlen(probe->ftps_func), 2317 NULL, U8_VALIDATE_ENTIRE, &err) < 0) { 2318 ret = EINVAL; 2319 goto err; 2320 } 2321 2322 if (u8_validate(probe->ftps_mod, strlen(probe->ftps_mod), 2323 NULL, U8_VALIDATE_ENTIRE, &err) < 0) { 2324 ret = EINVAL; 2325 goto err; 2326 } 2327 2328#ifdef notyet 2329 if (!PRIV_POLICY_CHOICE(cr, PRIV_ALL, B_FALSE)) { 2330 proc_t *p; 2331 pid_t pid = probe->ftps_pid; 2332 2333#ifdef illumos 2334 mutex_enter(&pidlock); 2335#endif 2336 /* 2337 * Report an error if the process doesn't exist 2338 * or is actively being birthed. 2339 */ 2340 p = pfind(pid); 2341 if (p) 2342 fill_kinfo_proc(p, &kp); 2343 if (p == NULL || kp.ki_stat == SIDL) { 2344#ifdef illumos 2345 mutex_exit(&pidlock); 2346#endif 2347 return (ESRCH); 2348 } 2349#ifdef illumos 2350 mutex_enter(&p->p_lock); 2351 mutex_exit(&pidlock); 2352#else 2353 PROC_LOCK_ASSERT(p, MA_OWNED); 2354#endif 2355 2356#ifdef notyet 2357 if ((ret = priv_proc_cred_perm(cr, p, NULL, 2358 VREAD | VWRITE)) != 0) { 2359#ifdef illumos 2360 mutex_exit(&p->p_lock); 2361#else 2362 PROC_UNLOCK(p); 2363#endif 2364 return (ret); 2365 } 2366#endif /* notyet */ 2367#ifdef illumos 2368 mutex_exit(&p->p_lock); 2369#else 2370 PROC_UNLOCK(p); 2371#endif 2372 } 2373#endif /* notyet */ 2374 2375 ret = fasttrap_add_probe(probe); 2376err: 2377 kmem_free(probe, size); 2378 2379 return (ret); 2380 2381 } else if (cmd == FASTTRAPIOC_GETINSTR) { 2382 fasttrap_instr_query_t instr; 2383 fasttrap_tracepoint_t *tp; 2384 uint_t index; 2385#ifdef illumos 2386 int ret; 2387#endif 2388 2389#ifdef illumos 2390 if (copyin((void *)arg, &instr, sizeof (instr)) != 0) 2391 return (EFAULT); 2392#endif 2393 2394#ifdef notyet 2395 if (!PRIV_POLICY_CHOICE(cr, PRIV_ALL, B_FALSE)) { 2396 proc_t *p; 2397 pid_t pid = instr.ftiq_pid; 2398 2399#ifdef illumos 2400 mutex_enter(&pidlock); 2401#endif 2402 /* 2403 * Report an error if the process doesn't exist 2404 * or is actively being birthed. 2405 */ 2406 p = pfind(pid); 2407 if (p) 2408 fill_kinfo_proc(p, &kp); 2409 if (p == NULL || kp.ki_stat == SIDL) { 2410#ifdef illumos 2411 mutex_exit(&pidlock); 2412#endif 2413 return (ESRCH); 2414 } 2415#ifdef illumos 2416 mutex_enter(&p->p_lock); 2417 mutex_exit(&pidlock); 2418#else 2419 PROC_LOCK_ASSERT(p, MA_OWNED); 2420#endif 2421 2422#ifdef notyet 2423 if ((ret = priv_proc_cred_perm(cr, p, NULL, 2424 VREAD)) != 0) { 2425#ifdef illumos 2426 mutex_exit(&p->p_lock); 2427#else 2428 PROC_UNLOCK(p); 2429#endif 2430 return (ret); 2431 } 2432#endif /* notyet */ 2433 2434#ifdef illumos 2435 mutex_exit(&p->p_lock); 2436#else 2437 PROC_UNLOCK(p); 2438#endif 2439 } 2440#endif /* notyet */ 2441 2442 index = FASTTRAP_TPOINTS_INDEX(instr.ftiq_pid, instr.ftiq_pc); 2443 2444 mutex_enter(&fasttrap_tpoints.fth_table[index].ftb_mtx); 2445 tp = fasttrap_tpoints.fth_table[index].ftb_data; 2446 while (tp != NULL) { 2447 if (instr.ftiq_pid == tp->ftt_pid && 2448 instr.ftiq_pc == tp->ftt_pc && 2449 tp->ftt_proc->ftpc_acount != 0) 2450 break; 2451 2452 tp = tp->ftt_next; 2453 } 2454 2455 if (tp == NULL) { 2456 mutex_exit(&fasttrap_tpoints.fth_table[index].ftb_mtx); 2457 return (ENOENT); 2458 } 2459 2460 bcopy(&tp->ftt_instr, &instr.ftiq_instr, 2461 sizeof (instr.ftiq_instr)); 2462 mutex_exit(&fasttrap_tpoints.fth_table[index].ftb_mtx); 2463 2464 if (copyout(&instr, (void *)arg, sizeof (instr)) != 0) 2465 return (EFAULT); 2466 2467 return (0); 2468 } 2469 2470 return (EINVAL); 2471} 2472 2473static int 2474fasttrap_load(void) 2475{ 2476 ulong_t nent; 2477 int i, ret; 2478 2479 /* Create the /dev/dtrace/fasttrap entry. */ 2480 fasttrap_cdev = make_dev(&fasttrap_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, 2481 "dtrace/fasttrap"); 2482 2483 mtx_init(&fasttrap_cleanup_mtx, "fasttrap clean", "dtrace", MTX_DEF); 2484 mutex_init(&fasttrap_count_mtx, "fasttrap count mtx", MUTEX_DEFAULT, 2485 NULL); 2486 2487#ifdef illumos 2488 fasttrap_max = ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 2489 "fasttrap-max-probes", FASTTRAP_MAX_DEFAULT); 2490#endif 2491 fasttrap_total = 0; 2492 2493 /* 2494 * Conjure up the tracepoints hashtable... 2495 */ 2496#ifdef illumos 2497 nent = ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 2498 "fasttrap-hash-size", FASTTRAP_TPOINTS_DEFAULT_SIZE); 2499#else 2500 nent = tpoints_hash_size; 2501#endif 2502 2503 if (nent == 0 || nent > 0x1000000) 2504 nent = FASTTRAP_TPOINTS_DEFAULT_SIZE; 2505 2506 tpoints_hash_size = nent; 2507 2508 if (ISP2(nent)) 2509 fasttrap_tpoints.fth_nent = nent; 2510 else 2511 fasttrap_tpoints.fth_nent = 1 << fasttrap_highbit(nent); 2512 ASSERT(fasttrap_tpoints.fth_nent > 0); 2513 fasttrap_tpoints.fth_mask = fasttrap_tpoints.fth_nent - 1; 2514 fasttrap_tpoints.fth_table = kmem_zalloc(fasttrap_tpoints.fth_nent * 2515 sizeof (fasttrap_bucket_t), KM_SLEEP); 2516#ifndef illumos 2517 for (i = 0; i < fasttrap_tpoints.fth_nent; i++) 2518 mutex_init(&fasttrap_tpoints.fth_table[i].ftb_mtx, 2519 "tracepoints bucket mtx", MUTEX_DEFAULT, NULL); 2520#endif 2521 2522 /* 2523 * ... and the providers hash table... 2524 */ 2525 nent = FASTTRAP_PROVIDERS_DEFAULT_SIZE; 2526 if (ISP2(nent)) 2527 fasttrap_provs.fth_nent = nent; 2528 else 2529 fasttrap_provs.fth_nent = 1 << fasttrap_highbit(nent); 2530 ASSERT(fasttrap_provs.fth_nent > 0); 2531 fasttrap_provs.fth_mask = fasttrap_provs.fth_nent - 1; 2532 fasttrap_provs.fth_table = kmem_zalloc(fasttrap_provs.fth_nent * 2533 sizeof (fasttrap_bucket_t), KM_SLEEP); 2534#ifndef illumos 2535 for (i = 0; i < fasttrap_provs.fth_nent; i++) 2536 mutex_init(&fasttrap_provs.fth_table[i].ftb_mtx, 2537 "providers bucket mtx", MUTEX_DEFAULT, NULL); 2538#endif 2539 2540 ret = kproc_create(fasttrap_pid_cleanup_cb, NULL, 2541 &fasttrap_cleanup_proc, 0, 0, "ftcleanup"); 2542 if (ret != 0) { 2543 destroy_dev(fasttrap_cdev); 2544#ifndef illumos 2545 for (i = 0; i < fasttrap_provs.fth_nent; i++) 2546 mutex_destroy(&fasttrap_provs.fth_table[i].ftb_mtx); 2547 for (i = 0; i < fasttrap_tpoints.fth_nent; i++) 2548 mutex_destroy(&fasttrap_tpoints.fth_table[i].ftb_mtx); 2549#endif 2550 kmem_free(fasttrap_provs.fth_table, fasttrap_provs.fth_nent * 2551 sizeof (fasttrap_bucket_t)); 2552 mtx_destroy(&fasttrap_cleanup_mtx); 2553 mutex_destroy(&fasttrap_count_mtx); 2554 return (ret); 2555 } 2556 2557 2558 /* 2559 * ... and the procs hash table. 2560 */ 2561 nent = FASTTRAP_PROCS_DEFAULT_SIZE; 2562 if (ISP2(nent)) 2563 fasttrap_procs.fth_nent = nent; 2564 else 2565 fasttrap_procs.fth_nent = 1 << fasttrap_highbit(nent); 2566 ASSERT(fasttrap_procs.fth_nent > 0); 2567 fasttrap_procs.fth_mask = fasttrap_procs.fth_nent - 1; 2568 fasttrap_procs.fth_table = kmem_zalloc(fasttrap_procs.fth_nent * 2569 sizeof (fasttrap_bucket_t), KM_SLEEP); 2570#ifndef illumos 2571 for (i = 0; i < fasttrap_procs.fth_nent; i++) 2572 mutex_init(&fasttrap_procs.fth_table[i].ftb_mtx, 2573 "processes bucket mtx", MUTEX_DEFAULT, NULL); 2574 2575 rm_init(&fasttrap_tp_lock, "fasttrap tracepoint"); 2576 2577 /* 2578 * This event handler must run before kdtrace_thread_dtor() since it 2579 * accesses the thread's struct kdtrace_thread. 2580 */ 2581 fasttrap_thread_dtor_tag = EVENTHANDLER_REGISTER(thread_dtor, 2582 fasttrap_thread_dtor, NULL, EVENTHANDLER_PRI_FIRST); 2583#endif 2584 2585 /* 2586 * Install our hooks into fork(2), exec(2), and exit(2). 2587 */ 2588 dtrace_fasttrap_fork = &fasttrap_fork; 2589 dtrace_fasttrap_exit = &fasttrap_exec_exit; 2590 dtrace_fasttrap_exec = &fasttrap_exec_exit; 2591 2592 (void) dtrace_meta_register("fasttrap", &fasttrap_mops, NULL, 2593 &fasttrap_meta_id); 2594 2595 return (0); 2596} 2597 2598static int 2599fasttrap_unload(void) 2600{ 2601 int i, fail = 0; 2602 2603 /* 2604 * Unregister the meta-provider to make sure no new fasttrap- 2605 * managed providers come along while we're trying to close up 2606 * shop. If we fail to detach, we'll need to re-register as a 2607 * meta-provider. We can fail to unregister as a meta-provider 2608 * if providers we manage still exist. 2609 */ 2610 if (fasttrap_meta_id != DTRACE_METAPROVNONE && 2611 dtrace_meta_unregister(fasttrap_meta_id) != 0) 2612 return (-1); 2613 2614 /* 2615 * Iterate over all of our providers. If there's still a process 2616 * that corresponds to that pid, fail to detach. 2617 */ 2618 for (i = 0; i < fasttrap_provs.fth_nent; i++) { 2619 fasttrap_provider_t **fpp, *fp; 2620 fasttrap_bucket_t *bucket = &fasttrap_provs.fth_table[i]; 2621 2622 mutex_enter(&bucket->ftb_mtx); 2623 fpp = (fasttrap_provider_t **)&bucket->ftb_data; 2624 while ((fp = *fpp) != NULL) { 2625 /* 2626 * Acquire and release the lock as a simple way of 2627 * waiting for any other consumer to finish with 2628 * this provider. A thread must first acquire the 2629 * bucket lock so there's no chance of another thread 2630 * blocking on the provider's lock. 2631 */ 2632 mutex_enter(&fp->ftp_mtx); 2633 mutex_exit(&fp->ftp_mtx); 2634 2635 if (dtrace_unregister(fp->ftp_provid) != 0) { 2636 fail = 1; 2637 fpp = &fp->ftp_next; 2638 } else { 2639 *fpp = fp->ftp_next; 2640 fasttrap_provider_free(fp); 2641 } 2642 } 2643 2644 mutex_exit(&bucket->ftb_mtx); 2645 } 2646 2647 if (fail) { 2648 (void) dtrace_meta_register("fasttrap", &fasttrap_mops, NULL, 2649 &fasttrap_meta_id); 2650 2651 return (-1); 2652 } 2653 2654 /* 2655 * Stop new processes from entering these hooks now, before the 2656 * fasttrap_cleanup thread runs. That way all processes will hopefully 2657 * be out of these hooks before we free fasttrap_provs.fth_table 2658 */ 2659 ASSERT(dtrace_fasttrap_fork == &fasttrap_fork); 2660 dtrace_fasttrap_fork = NULL; 2661 2662 ASSERT(dtrace_fasttrap_exec == &fasttrap_exec_exit); 2663 dtrace_fasttrap_exec = NULL; 2664 2665 ASSERT(dtrace_fasttrap_exit == &fasttrap_exec_exit); 2666 dtrace_fasttrap_exit = NULL; 2667 2668 mtx_lock(&fasttrap_cleanup_mtx); 2669 fasttrap_cleanup_drain = 1; 2670 /* Wait for the cleanup thread to finish up and signal us. */ 2671 wakeup(&fasttrap_cleanup_cv); 2672 mtx_sleep(&fasttrap_cleanup_drain, &fasttrap_cleanup_mtx, 0, "ftcld", 2673 0); 2674 fasttrap_cleanup_proc = NULL; 2675 mtx_destroy(&fasttrap_cleanup_mtx); 2676 2677#ifdef DEBUG 2678 mutex_enter(&fasttrap_count_mtx); 2679 ASSERT(fasttrap_pid_count == 0); 2680 mutex_exit(&fasttrap_count_mtx); 2681#endif 2682 2683#ifndef illumos 2684 EVENTHANDLER_DEREGISTER(thread_dtor, fasttrap_thread_dtor_tag); 2685 2686 for (i = 0; i < fasttrap_tpoints.fth_nent; i++) 2687 mutex_destroy(&fasttrap_tpoints.fth_table[i].ftb_mtx); 2688 for (i = 0; i < fasttrap_provs.fth_nent; i++) 2689 mutex_destroy(&fasttrap_provs.fth_table[i].ftb_mtx); 2690 for (i = 0; i < fasttrap_procs.fth_nent; i++) 2691 mutex_destroy(&fasttrap_procs.fth_table[i].ftb_mtx); 2692#endif 2693 kmem_free(fasttrap_tpoints.fth_table, 2694 fasttrap_tpoints.fth_nent * sizeof (fasttrap_bucket_t)); 2695 fasttrap_tpoints.fth_nent = 0; 2696 2697 kmem_free(fasttrap_provs.fth_table, 2698 fasttrap_provs.fth_nent * sizeof (fasttrap_bucket_t)); 2699 fasttrap_provs.fth_nent = 0; 2700 2701 kmem_free(fasttrap_procs.fth_table, 2702 fasttrap_procs.fth_nent * sizeof (fasttrap_bucket_t)); 2703 fasttrap_procs.fth_nent = 0; 2704 2705#ifndef illumos 2706 destroy_dev(fasttrap_cdev); 2707 mutex_destroy(&fasttrap_count_mtx); 2708 rm_destroy(&fasttrap_tp_lock); 2709#endif 2710 2711 return (0); 2712} 2713 2714/* ARGSUSED */ 2715static int 2716fasttrap_modevent(module_t mod __unused, int type, void *data __unused) 2717{ 2718 int error = 0; 2719 2720 switch (type) { 2721 case MOD_LOAD: 2722 break; 2723 2724 case MOD_UNLOAD: 2725 break; 2726 2727 case MOD_SHUTDOWN: 2728 break; 2729 2730 default: 2731 error = EOPNOTSUPP; 2732 break; 2733 } 2734 return (error); 2735} 2736 2737SYSINIT(fasttrap_load, SI_SUB_DTRACE_PROVIDER, SI_ORDER_ANY, fasttrap_load, 2738 NULL); 2739SYSUNINIT(fasttrap_unload, SI_SUB_DTRACE_PROVIDER, SI_ORDER_ANY, 2740 fasttrap_unload, NULL); 2741 2742DEV_MODULE(fasttrap, fasttrap_modevent, NULL); 2743MODULE_VERSION(fasttrap, 1); 2744MODULE_DEPEND(fasttrap, dtrace, 1, 1, 1); 2745MODULE_DEPEND(fasttrap, opensolaris, 1, 1, 1); 2746