dtrace.c revision 269520
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 * 21 * $FreeBSD: stable/10/sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c 269520 2014-08-04 15:36:22Z markj $ 22 */ 23 24/* 25 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. 26 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 27 * Copyright (c) 2012 by Delphix. All rights reserved. 28 */ 29 30/* 31 * DTrace - Dynamic Tracing for Solaris 32 * 33 * This is the implementation of the Solaris Dynamic Tracing framework 34 * (DTrace). The user-visible interface to DTrace is described at length in 35 * the "Solaris Dynamic Tracing Guide". The interfaces between the libdtrace 36 * library, the in-kernel DTrace framework, and the DTrace providers are 37 * described in the block comments in the <sys/dtrace.h> header file. The 38 * internal architecture of DTrace is described in the block comments in the 39 * <sys/dtrace_impl.h> header file. The comments contained within the DTrace 40 * implementation very much assume mastery of all of these sources; if one has 41 * an unanswered question about the implementation, one should consult them 42 * first. 43 * 44 * The functions here are ordered roughly as follows: 45 * 46 * - Probe context functions 47 * - Probe hashing functions 48 * - Non-probe context utility functions 49 * - Matching functions 50 * - Provider-to-Framework API functions 51 * - Probe management functions 52 * - DIF object functions 53 * - Format functions 54 * - Predicate functions 55 * - ECB functions 56 * - Buffer functions 57 * - Enabling functions 58 * - DOF functions 59 * - Anonymous enabling functions 60 * - Consumer state functions 61 * - Helper functions 62 * - Hook functions 63 * - Driver cookbook functions 64 * 65 * Each group of functions begins with a block comment labelled the "DTrace 66 * [Group] Functions", allowing one to find each block by searching forward 67 * on capital-f functions. 68 */ 69#include <sys/errno.h> 70#if !defined(sun) 71#include <sys/time.h> 72#endif 73#include <sys/stat.h> 74#include <sys/modctl.h> 75#include <sys/conf.h> 76#include <sys/systm.h> 77#if defined(sun) 78#include <sys/ddi.h> 79#include <sys/sunddi.h> 80#endif 81#include <sys/cpuvar.h> 82#include <sys/kmem.h> 83#if defined(sun) 84#include <sys/strsubr.h> 85#endif 86#include <sys/sysmacros.h> 87#include <sys/dtrace_impl.h> 88#include <sys/atomic.h> 89#include <sys/cmn_err.h> 90#if defined(sun) 91#include <sys/mutex_impl.h> 92#include <sys/rwlock_impl.h> 93#endif 94#include <sys/ctf_api.h> 95#if defined(sun) 96#include <sys/panic.h> 97#include <sys/priv_impl.h> 98#endif 99#include <sys/policy.h> 100#if defined(sun) 101#include <sys/cred_impl.h> 102#include <sys/procfs_isa.h> 103#endif 104#include <sys/taskq.h> 105#if defined(sun) 106#include <sys/mkdev.h> 107#include <sys/kdi.h> 108#endif 109#include <sys/zone.h> 110#include <sys/socket.h> 111#include <netinet/in.h> 112#include "strtolctype.h" 113 114/* FreeBSD includes: */ 115#if !defined(sun) 116#include <sys/callout.h> 117#include <sys/ctype.h> 118#include <sys/eventhandler.h> 119#include <sys/limits.h> 120#include <sys/kdb.h> 121#include <sys/kernel.h> 122#include <sys/malloc.h> 123#include <sys/sysctl.h> 124#include <sys/lock.h> 125#include <sys/mutex.h> 126#include <sys/rwlock.h> 127#include <sys/sx.h> 128#include <sys/dtrace_bsd.h> 129#include <netinet/in.h> 130#include "dtrace_cddl.h" 131#include "dtrace_debug.c" 132#endif 133 134/* 135 * DTrace Tunable Variables 136 * 137 * The following variables may be tuned by adding a line to /etc/system that 138 * includes both the name of the DTrace module ("dtrace") and the name of the 139 * variable. For example: 140 * 141 * set dtrace:dtrace_destructive_disallow = 1 142 * 143 * In general, the only variables that one should be tuning this way are those 144 * that affect system-wide DTrace behavior, and for which the default behavior 145 * is undesirable. Most of these variables are tunable on a per-consumer 146 * basis using DTrace options, and need not be tuned on a system-wide basis. 147 * When tuning these variables, avoid pathological values; while some attempt 148 * is made to verify the integrity of these variables, they are not considered 149 * part of the supported interface to DTrace, and they are therefore not 150 * checked comprehensively. Further, these variables should not be tuned 151 * dynamically via "mdb -kw" or other means; they should only be tuned via 152 * /etc/system. 153 */ 154int dtrace_destructive_disallow = 0; 155dtrace_optval_t dtrace_nonroot_maxsize = (16 * 1024 * 1024); 156size_t dtrace_difo_maxsize = (256 * 1024); 157dtrace_optval_t dtrace_dof_maxsize = (8 * 1024 * 1024); 158size_t dtrace_global_maxsize = (16 * 1024); 159size_t dtrace_actions_max = (16 * 1024); 160size_t dtrace_retain_max = 1024; 161dtrace_optval_t dtrace_helper_actions_max = 128; 162dtrace_optval_t dtrace_helper_providers_max = 32; 163dtrace_optval_t dtrace_dstate_defsize = (1 * 1024 * 1024); 164size_t dtrace_strsize_default = 256; 165dtrace_optval_t dtrace_cleanrate_default = 9900990; /* 101 hz */ 166dtrace_optval_t dtrace_cleanrate_min = 200000; /* 5000 hz */ 167dtrace_optval_t dtrace_cleanrate_max = (uint64_t)60 * NANOSEC; /* 1/minute */ 168dtrace_optval_t dtrace_aggrate_default = NANOSEC; /* 1 hz */ 169dtrace_optval_t dtrace_statusrate_default = NANOSEC; /* 1 hz */ 170dtrace_optval_t dtrace_statusrate_max = (hrtime_t)10 * NANOSEC; /* 6/minute */ 171dtrace_optval_t dtrace_switchrate_default = NANOSEC; /* 1 hz */ 172dtrace_optval_t dtrace_nspec_default = 1; 173dtrace_optval_t dtrace_specsize_default = 32 * 1024; 174dtrace_optval_t dtrace_stackframes_default = 20; 175dtrace_optval_t dtrace_ustackframes_default = 20; 176dtrace_optval_t dtrace_jstackframes_default = 50; 177dtrace_optval_t dtrace_jstackstrsize_default = 512; 178int dtrace_msgdsize_max = 128; 179hrtime_t dtrace_chill_max = 500 * (NANOSEC / MILLISEC); /* 500 ms */ 180hrtime_t dtrace_chill_interval = NANOSEC; /* 1000 ms */ 181int dtrace_devdepth_max = 32; 182int dtrace_err_verbose; 183hrtime_t dtrace_deadman_interval = NANOSEC; 184hrtime_t dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC; 185hrtime_t dtrace_deadman_user = (hrtime_t)30 * NANOSEC; 186hrtime_t dtrace_unregister_defunct_reap = (hrtime_t)60 * NANOSEC; 187#if !defined(sun) 188int dtrace_memstr_max = 4096; 189#endif 190 191/* 192 * DTrace External Variables 193 * 194 * As dtrace(7D) is a kernel module, any DTrace variables are obviously 195 * available to DTrace consumers via the backtick (`) syntax. One of these, 196 * dtrace_zero, is made deliberately so: it is provided as a source of 197 * well-known, zero-filled memory. While this variable is not documented, 198 * it is used by some translators as an implementation detail. 199 */ 200const char dtrace_zero[256] = { 0 }; /* zero-filled memory */ 201 202/* 203 * DTrace Internal Variables 204 */ 205#if defined(sun) 206static dev_info_t *dtrace_devi; /* device info */ 207#endif 208#if defined(sun) 209static vmem_t *dtrace_arena; /* probe ID arena */ 210static vmem_t *dtrace_minor; /* minor number arena */ 211#else 212static taskq_t *dtrace_taskq; /* task queue */ 213static struct unrhdr *dtrace_arena; /* Probe ID number. */ 214#endif 215static dtrace_probe_t **dtrace_probes; /* array of all probes */ 216static int dtrace_nprobes; /* number of probes */ 217static dtrace_provider_t *dtrace_provider; /* provider list */ 218static dtrace_meta_t *dtrace_meta_pid; /* user-land meta provider */ 219static int dtrace_opens; /* number of opens */ 220static int dtrace_helpers; /* number of helpers */ 221static int dtrace_getf; /* number of unpriv getf()s */ 222#if defined(sun) 223static void *dtrace_softstate; /* softstate pointer */ 224#endif 225static dtrace_hash_t *dtrace_bymod; /* probes hashed by module */ 226static dtrace_hash_t *dtrace_byfunc; /* probes hashed by function */ 227static dtrace_hash_t *dtrace_byname; /* probes hashed by name */ 228static dtrace_toxrange_t *dtrace_toxrange; /* toxic range array */ 229static int dtrace_toxranges; /* number of toxic ranges */ 230static int dtrace_toxranges_max; /* size of toxic range array */ 231static dtrace_anon_t dtrace_anon; /* anonymous enabling */ 232static kmem_cache_t *dtrace_state_cache; /* cache for dynamic state */ 233static uint64_t dtrace_vtime_references; /* number of vtimestamp refs */ 234static kthread_t *dtrace_panicked; /* panicking thread */ 235static dtrace_ecb_t *dtrace_ecb_create_cache; /* cached created ECB */ 236static dtrace_genid_t dtrace_probegen; /* current probe generation */ 237static dtrace_helpers_t *dtrace_deferred_pid; /* deferred helper list */ 238static dtrace_enabling_t *dtrace_retained; /* list of retained enablings */ 239static dtrace_genid_t dtrace_retained_gen; /* current retained enab gen */ 240static dtrace_dynvar_t dtrace_dynhash_sink; /* end of dynamic hash chains */ 241static int dtrace_dynvar_failclean; /* dynvars failed to clean */ 242#if !defined(sun) 243static struct mtx dtrace_unr_mtx; 244MTX_SYSINIT(dtrace_unr_mtx, &dtrace_unr_mtx, "Unique resource identifier", MTX_DEF); 245int dtrace_in_probe; /* non-zero if executing a probe */ 246#if defined(__i386__) || defined(__amd64__) || defined(__mips__) || defined(__powerpc__) 247uintptr_t dtrace_in_probe_addr; /* Address of invop when already in probe */ 248#endif 249static eventhandler_tag dtrace_kld_load_tag; 250static eventhandler_tag dtrace_kld_unload_try_tag; 251#endif 252 253/* 254 * DTrace Locking 255 * DTrace is protected by three (relatively coarse-grained) locks: 256 * 257 * (1) dtrace_lock is required to manipulate essentially any DTrace state, 258 * including enabling state, probes, ECBs, consumer state, helper state, 259 * etc. Importantly, dtrace_lock is _not_ required when in probe context; 260 * probe context is lock-free -- synchronization is handled via the 261 * dtrace_sync() cross call mechanism. 262 * 263 * (2) dtrace_provider_lock is required when manipulating provider state, or 264 * when provider state must be held constant. 265 * 266 * (3) dtrace_meta_lock is required when manipulating meta provider state, or 267 * when meta provider state must be held constant. 268 * 269 * The lock ordering between these three locks is dtrace_meta_lock before 270 * dtrace_provider_lock before dtrace_lock. (In particular, there are 271 * several places where dtrace_provider_lock is held by the framework as it 272 * calls into the providers -- which then call back into the framework, 273 * grabbing dtrace_lock.) 274 * 275 * There are two other locks in the mix: mod_lock and cpu_lock. With respect 276 * to dtrace_provider_lock and dtrace_lock, cpu_lock continues its historical 277 * role as a coarse-grained lock; it is acquired before both of these locks. 278 * With respect to dtrace_meta_lock, its behavior is stranger: cpu_lock must 279 * be acquired _between_ dtrace_meta_lock and any other DTrace locks. 280 * mod_lock is similar with respect to dtrace_provider_lock in that it must be 281 * acquired _between_ dtrace_provider_lock and dtrace_lock. 282 */ 283static kmutex_t dtrace_lock; /* probe state lock */ 284static kmutex_t dtrace_provider_lock; /* provider state lock */ 285static kmutex_t dtrace_meta_lock; /* meta-provider state lock */ 286 287#if !defined(sun) 288/* XXX FreeBSD hacks. */ 289#define cr_suid cr_svuid 290#define cr_sgid cr_svgid 291#define ipaddr_t in_addr_t 292#define mod_modname pathname 293#define vuprintf vprintf 294#define ttoproc(_a) ((_a)->td_proc) 295#define crgetzoneid(_a) 0 296#define NCPU MAXCPU 297#define SNOCD 0 298#define CPU_ON_INTR(_a) 0 299 300#define PRIV_EFFECTIVE (1 << 0) 301#define PRIV_DTRACE_KERNEL (1 << 1) 302#define PRIV_DTRACE_PROC (1 << 2) 303#define PRIV_DTRACE_USER (1 << 3) 304#define PRIV_PROC_OWNER (1 << 4) 305#define PRIV_PROC_ZONE (1 << 5) 306#define PRIV_ALL ~0 307 308SYSCTL_DECL(_debug_dtrace); 309SYSCTL_DECL(_kern_dtrace); 310#endif 311 312#if defined(sun) 313#define curcpu CPU->cpu_id 314#endif 315 316 317/* 318 * DTrace Provider Variables 319 * 320 * These are the variables relating to DTrace as a provider (that is, the 321 * provider of the BEGIN, END, and ERROR probes). 322 */ 323static dtrace_pattr_t dtrace_provider_attr = { 324{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 325{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 326{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 327{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 328{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 329}; 330 331static void 332dtrace_nullop(void) 333{} 334 335static dtrace_pops_t dtrace_provider_ops = { 336 (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop, 337 (void (*)(void *, modctl_t *))dtrace_nullop, 338 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 339 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 340 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 341 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 342 NULL, 343 NULL, 344 NULL, 345 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop 346}; 347 348static dtrace_id_t dtrace_probeid_begin; /* special BEGIN probe */ 349static dtrace_id_t dtrace_probeid_end; /* special END probe */ 350dtrace_id_t dtrace_probeid_error; /* special ERROR probe */ 351 352/* 353 * DTrace Helper Tracing Variables 354 */ 355uint32_t dtrace_helptrace_next = 0; 356uint32_t dtrace_helptrace_nlocals; 357char *dtrace_helptrace_buffer; 358int dtrace_helptrace_bufsize = 512 * 1024; 359 360#ifdef DEBUG 361int dtrace_helptrace_enabled = 1; 362#else 363int dtrace_helptrace_enabled = 0; 364#endif 365 366/* 367 * DTrace Error Hashing 368 * 369 * On DEBUG kernels, DTrace will track the errors that has seen in a hash 370 * table. This is very useful for checking coverage of tests that are 371 * expected to induce DIF or DOF processing errors, and may be useful for 372 * debugging problems in the DIF code generator or in DOF generation . The 373 * error hash may be examined with the ::dtrace_errhash MDB dcmd. 374 */ 375#ifdef DEBUG 376static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ]; 377static const char *dtrace_errlast; 378static kthread_t *dtrace_errthread; 379static kmutex_t dtrace_errlock; 380#endif 381 382/* 383 * DTrace Macros and Constants 384 * 385 * These are various macros that are useful in various spots in the 386 * implementation, along with a few random constants that have no meaning 387 * outside of the implementation. There is no real structure to this cpp 388 * mishmash -- but is there ever? 389 */ 390#define DTRACE_HASHSTR(hash, probe) \ 391 dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs))) 392 393#define DTRACE_HASHNEXT(hash, probe) \ 394 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs) 395 396#define DTRACE_HASHPREV(hash, probe) \ 397 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs) 398 399#define DTRACE_HASHEQ(hash, lhs, rhs) \ 400 (strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \ 401 *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0) 402 403#define DTRACE_AGGHASHSIZE_SLEW 17 404 405#define DTRACE_V4MAPPED_OFFSET (sizeof (uint32_t) * 3) 406 407/* 408 * The key for a thread-local variable consists of the lower 61 bits of the 409 * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL. 410 * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never 411 * equal to a variable identifier. This is necessary (but not sufficient) to 412 * assure that global associative arrays never collide with thread-local 413 * variables. To guarantee that they cannot collide, we must also define the 414 * order for keying dynamic variables. That order is: 415 * 416 * [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ] 417 * 418 * Because the variable-key and the tls-key are in orthogonal spaces, there is 419 * no way for a global variable key signature to match a thread-local key 420 * signature. 421 */ 422#if defined(sun) 423#define DTRACE_TLS_THRKEY(where) { \ 424 uint_t intr = 0; \ 425 uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \ 426 for (; actv; actv >>= 1) \ 427 intr++; \ 428 ASSERT(intr < (1 << 3)); \ 429 (where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \ 430 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 431} 432#else 433#define DTRACE_TLS_THRKEY(where) { \ 434 solaris_cpu_t *_c = &solaris_cpu[curcpu]; \ 435 uint_t intr = 0; \ 436 uint_t actv = _c->cpu_intr_actv; \ 437 for (; actv; actv >>= 1) \ 438 intr++; \ 439 ASSERT(intr < (1 << 3)); \ 440 (where) = ((curthread->td_tid + DIF_VARIABLE_MAX) & \ 441 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 442} 443#endif 444 445#define DT_BSWAP_8(x) ((x) & 0xff) 446#define DT_BSWAP_16(x) ((DT_BSWAP_8(x) << 8) | DT_BSWAP_8((x) >> 8)) 447#define DT_BSWAP_32(x) ((DT_BSWAP_16(x) << 16) | DT_BSWAP_16((x) >> 16)) 448#define DT_BSWAP_64(x) ((DT_BSWAP_32(x) << 32) | DT_BSWAP_32((x) >> 32)) 449 450#define DT_MASK_LO 0x00000000FFFFFFFFULL 451 452#define DTRACE_STORE(type, tomax, offset, what) \ 453 *((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what); 454 455#ifndef __x86 456#define DTRACE_ALIGNCHECK(addr, size, flags) \ 457 if (addr & (size - 1)) { \ 458 *flags |= CPU_DTRACE_BADALIGN; \ 459 cpu_core[curcpu].cpuc_dtrace_illval = addr; \ 460 return (0); \ 461 } 462#else 463#define DTRACE_ALIGNCHECK(addr, size, flags) 464#endif 465 466/* 467 * Test whether a range of memory starting at testaddr of size testsz falls 468 * within the range of memory described by addr, sz. We take care to avoid 469 * problems with overflow and underflow of the unsigned quantities, and 470 * disallow all negative sizes. Ranges of size 0 are allowed. 471 */ 472#define DTRACE_INRANGE(testaddr, testsz, baseaddr, basesz) \ 473 ((testaddr) - (uintptr_t)(baseaddr) < (basesz) && \ 474 (testaddr) + (testsz) - (uintptr_t)(baseaddr) <= (basesz) && \ 475 (testaddr) + (testsz) >= (testaddr)) 476 477/* 478 * Test whether alloc_sz bytes will fit in the scratch region. We isolate 479 * alloc_sz on the righthand side of the comparison in order to avoid overflow 480 * or underflow in the comparison with it. This is simpler than the INRANGE 481 * check above, because we know that the dtms_scratch_ptr is valid in the 482 * range. Allocations of size zero are allowed. 483 */ 484#define DTRACE_INSCRATCH(mstate, alloc_sz) \ 485 ((mstate)->dtms_scratch_base + (mstate)->dtms_scratch_size - \ 486 (mstate)->dtms_scratch_ptr >= (alloc_sz)) 487 488#define DTRACE_LOADFUNC(bits) \ 489/*CSTYLED*/ \ 490uint##bits##_t \ 491dtrace_load##bits(uintptr_t addr) \ 492{ \ 493 size_t size = bits / NBBY; \ 494 /*CSTYLED*/ \ 495 uint##bits##_t rval; \ 496 int i; \ 497 volatile uint16_t *flags = (volatile uint16_t *) \ 498 &cpu_core[curcpu].cpuc_dtrace_flags; \ 499 \ 500 DTRACE_ALIGNCHECK(addr, size, flags); \ 501 \ 502 for (i = 0; i < dtrace_toxranges; i++) { \ 503 if (addr >= dtrace_toxrange[i].dtt_limit) \ 504 continue; \ 505 \ 506 if (addr + size <= dtrace_toxrange[i].dtt_base) \ 507 continue; \ 508 \ 509 /* \ 510 * This address falls within a toxic region; return 0. \ 511 */ \ 512 *flags |= CPU_DTRACE_BADADDR; \ 513 cpu_core[curcpu].cpuc_dtrace_illval = addr; \ 514 return (0); \ 515 } \ 516 \ 517 *flags |= CPU_DTRACE_NOFAULT; \ 518 /*CSTYLED*/ \ 519 rval = *((volatile uint##bits##_t *)addr); \ 520 *flags &= ~CPU_DTRACE_NOFAULT; \ 521 \ 522 return (!(*flags & CPU_DTRACE_FAULT) ? rval : 0); \ 523} 524 525#ifdef _LP64 526#define dtrace_loadptr dtrace_load64 527#else 528#define dtrace_loadptr dtrace_load32 529#endif 530 531#define DTRACE_DYNHASH_FREE 0 532#define DTRACE_DYNHASH_SINK 1 533#define DTRACE_DYNHASH_VALID 2 534 535#define DTRACE_MATCH_NEXT 0 536#define DTRACE_MATCH_DONE 1 537#define DTRACE_ANCHORED(probe) ((probe)->dtpr_func[0] != '\0') 538#define DTRACE_STATE_ALIGN 64 539 540#define DTRACE_FLAGS2FLT(flags) \ 541 (((flags) & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR : \ 542 ((flags) & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP : \ 543 ((flags) & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO : \ 544 ((flags) & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV : \ 545 ((flags) & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV : \ 546 ((flags) & CPU_DTRACE_TUPOFLOW) ? DTRACEFLT_TUPOFLOW : \ 547 ((flags) & CPU_DTRACE_BADALIGN) ? DTRACEFLT_BADALIGN : \ 548 ((flags) & CPU_DTRACE_NOSCRATCH) ? DTRACEFLT_NOSCRATCH : \ 549 ((flags) & CPU_DTRACE_BADSTACK) ? DTRACEFLT_BADSTACK : \ 550 DTRACEFLT_UNKNOWN) 551 552#define DTRACEACT_ISSTRING(act) \ 553 ((act)->dta_kind == DTRACEACT_DIFEXPR && \ 554 (act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) 555 556/* Function prototype definitions: */ 557static size_t dtrace_strlen(const char *, size_t); 558static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id); 559static void dtrace_enabling_provide(dtrace_provider_t *); 560static int dtrace_enabling_match(dtrace_enabling_t *, int *); 561static void dtrace_enabling_matchall(void); 562static void dtrace_enabling_reap(void); 563static dtrace_state_t *dtrace_anon_grab(void); 564static uint64_t dtrace_helper(int, dtrace_mstate_t *, 565 dtrace_state_t *, uint64_t, uint64_t); 566static dtrace_helpers_t *dtrace_helpers_create(proc_t *); 567static void dtrace_buffer_drop(dtrace_buffer_t *); 568static int dtrace_buffer_consumed(dtrace_buffer_t *, hrtime_t when); 569static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t, 570 dtrace_state_t *, dtrace_mstate_t *); 571static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t, 572 dtrace_optval_t); 573static int dtrace_ecb_create_enable(dtrace_probe_t *, void *); 574static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *); 575uint16_t dtrace_load16(uintptr_t); 576uint32_t dtrace_load32(uintptr_t); 577uint64_t dtrace_load64(uintptr_t); 578uint8_t dtrace_load8(uintptr_t); 579void dtrace_dynvar_clean(dtrace_dstate_t *); 580dtrace_dynvar_t *dtrace_dynvar(dtrace_dstate_t *, uint_t, dtrace_key_t *, 581 size_t, dtrace_dynvar_op_t, dtrace_mstate_t *, dtrace_vstate_t *); 582uintptr_t dtrace_dif_varstr(uintptr_t, dtrace_state_t *, dtrace_mstate_t *); 583static int dtrace_priv_proc(dtrace_state_t *); 584static void dtrace_getf_barrier(void); 585 586/* 587 * DTrace Probe Context Functions 588 * 589 * These functions are called from probe context. Because probe context is 590 * any context in which C may be called, arbitrarily locks may be held, 591 * interrupts may be disabled, we may be in arbitrary dispatched state, etc. 592 * As a result, functions called from probe context may only call other DTrace 593 * support functions -- they may not interact at all with the system at large. 594 * (Note that the ASSERT macro is made probe-context safe by redefining it in 595 * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary 596 * loads are to be performed from probe context, they _must_ be in terms of 597 * the safe dtrace_load*() variants. 598 * 599 * Some functions in this block are not actually called from probe context; 600 * for these functions, there will be a comment above the function reading 601 * "Note: not called from probe context." 602 */ 603void 604dtrace_panic(const char *format, ...) 605{ 606 va_list alist; 607 608 va_start(alist, format); 609 dtrace_vpanic(format, alist); 610 va_end(alist); 611} 612 613int 614dtrace_assfail(const char *a, const char *f, int l) 615{ 616 dtrace_panic("assertion failed: %s, file: %s, line: %d", a, f, l); 617 618 /* 619 * We just need something here that even the most clever compiler 620 * cannot optimize away. 621 */ 622 return (a[(uintptr_t)f]); 623} 624 625/* 626 * Atomically increment a specified error counter from probe context. 627 */ 628static void 629dtrace_error(uint32_t *counter) 630{ 631 /* 632 * Most counters stored to in probe context are per-CPU counters. 633 * However, there are some error conditions that are sufficiently 634 * arcane that they don't merit per-CPU storage. If these counters 635 * are incremented concurrently on different CPUs, scalability will be 636 * adversely affected -- but we don't expect them to be white-hot in a 637 * correctly constructed enabling... 638 */ 639 uint32_t oval, nval; 640 641 do { 642 oval = *counter; 643 644 if ((nval = oval + 1) == 0) { 645 /* 646 * If the counter would wrap, set it to 1 -- assuring 647 * that the counter is never zero when we have seen 648 * errors. (The counter must be 32-bits because we 649 * aren't guaranteed a 64-bit compare&swap operation.) 650 * To save this code both the infamy of being fingered 651 * by a priggish news story and the indignity of being 652 * the target of a neo-puritan witch trial, we're 653 * carefully avoiding any colorful description of the 654 * likelihood of this condition -- but suffice it to 655 * say that it is only slightly more likely than the 656 * overflow of predicate cache IDs, as discussed in 657 * dtrace_predicate_create(). 658 */ 659 nval = 1; 660 } 661 } while (dtrace_cas32(counter, oval, nval) != oval); 662} 663 664/* 665 * Use the DTRACE_LOADFUNC macro to define functions for each of loading a 666 * uint8_t, a uint16_t, a uint32_t and a uint64_t. 667 */ 668DTRACE_LOADFUNC(8) 669DTRACE_LOADFUNC(16) 670DTRACE_LOADFUNC(32) 671DTRACE_LOADFUNC(64) 672 673static int 674dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate) 675{ 676 if (dest < mstate->dtms_scratch_base) 677 return (0); 678 679 if (dest + size < dest) 680 return (0); 681 682 if (dest + size > mstate->dtms_scratch_ptr) 683 return (0); 684 685 return (1); 686} 687 688static int 689dtrace_canstore_statvar(uint64_t addr, size_t sz, 690 dtrace_statvar_t **svars, int nsvars) 691{ 692 int i; 693 694 for (i = 0; i < nsvars; i++) { 695 dtrace_statvar_t *svar = svars[i]; 696 697 if (svar == NULL || svar->dtsv_size == 0) 698 continue; 699 700 if (DTRACE_INRANGE(addr, sz, svar->dtsv_data, svar->dtsv_size)) 701 return (1); 702 } 703 704 return (0); 705} 706 707/* 708 * Check to see if the address is within a memory region to which a store may 709 * be issued. This includes the DTrace scratch areas, and any DTrace variable 710 * region. The caller of dtrace_canstore() is responsible for performing any 711 * alignment checks that are needed before stores are actually executed. 712 */ 713static int 714dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 715 dtrace_vstate_t *vstate) 716{ 717 /* 718 * First, check to see if the address is in scratch space... 719 */ 720 if (DTRACE_INRANGE(addr, sz, mstate->dtms_scratch_base, 721 mstate->dtms_scratch_size)) 722 return (1); 723 724 /* 725 * Now check to see if it's a dynamic variable. This check will pick 726 * up both thread-local variables and any global dynamically-allocated 727 * variables. 728 */ 729 if (DTRACE_INRANGE(addr, sz, vstate->dtvs_dynvars.dtds_base, 730 vstate->dtvs_dynvars.dtds_size)) { 731 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 732 uintptr_t base = (uintptr_t)dstate->dtds_base + 733 (dstate->dtds_hashsize * sizeof (dtrace_dynhash_t)); 734 uintptr_t chunkoffs; 735 736 /* 737 * Before we assume that we can store here, we need to make 738 * sure that it isn't in our metadata -- storing to our 739 * dynamic variable metadata would corrupt our state. For 740 * the range to not include any dynamic variable metadata, 741 * it must: 742 * 743 * (1) Start above the hash table that is at the base of 744 * the dynamic variable space 745 * 746 * (2) Have a starting chunk offset that is beyond the 747 * dtrace_dynvar_t that is at the base of every chunk 748 * 749 * (3) Not span a chunk boundary 750 * 751 */ 752 if (addr < base) 753 return (0); 754 755 chunkoffs = (addr - base) % dstate->dtds_chunksize; 756 757 if (chunkoffs < sizeof (dtrace_dynvar_t)) 758 return (0); 759 760 if (chunkoffs + sz > dstate->dtds_chunksize) 761 return (0); 762 763 return (1); 764 } 765 766 /* 767 * Finally, check the static local and global variables. These checks 768 * take the longest, so we perform them last. 769 */ 770 if (dtrace_canstore_statvar(addr, sz, 771 vstate->dtvs_locals, vstate->dtvs_nlocals)) 772 return (1); 773 774 if (dtrace_canstore_statvar(addr, sz, 775 vstate->dtvs_globals, vstate->dtvs_nglobals)) 776 return (1); 777 778 return (0); 779} 780 781 782/* 783 * Convenience routine to check to see if the address is within a memory 784 * region in which a load may be issued given the user's privilege level; 785 * if not, it sets the appropriate error flags and loads 'addr' into the 786 * illegal value slot. 787 * 788 * DTrace subroutines (DIF_SUBR_*) should use this helper to implement 789 * appropriate memory access protection. 790 */ 791static int 792dtrace_canload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 793 dtrace_vstate_t *vstate) 794{ 795 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 796 file_t *fp; 797 798 /* 799 * If we hold the privilege to read from kernel memory, then 800 * everything is readable. 801 */ 802 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 803 return (1); 804 805 /* 806 * You can obviously read that which you can store. 807 */ 808 if (dtrace_canstore(addr, sz, mstate, vstate)) 809 return (1); 810 811 /* 812 * We're allowed to read from our own string table. 813 */ 814 if (DTRACE_INRANGE(addr, sz, mstate->dtms_difo->dtdo_strtab, 815 mstate->dtms_difo->dtdo_strlen)) 816 return (1); 817 818 if (vstate->dtvs_state != NULL && 819 dtrace_priv_proc(vstate->dtvs_state)) { 820 proc_t *p; 821 822 /* 823 * When we have privileges to the current process, there are 824 * several context-related kernel structures that are safe to 825 * read, even absent the privilege to read from kernel memory. 826 * These reads are safe because these structures contain only 827 * state that (1) we're permitted to read, (2) is harmless or 828 * (3) contains pointers to additional kernel state that we're 829 * not permitted to read (and as such, do not present an 830 * opportunity for privilege escalation). Finally (and 831 * critically), because of the nature of their relation with 832 * the current thread context, the memory associated with these 833 * structures cannot change over the duration of probe context, 834 * and it is therefore impossible for this memory to be 835 * deallocated and reallocated as something else while it's 836 * being operated upon. 837 */ 838 if (DTRACE_INRANGE(addr, sz, curthread, sizeof (kthread_t))) 839 return (1); 840 841 if ((p = curthread->t_procp) != NULL && DTRACE_INRANGE(addr, 842 sz, curthread->t_procp, sizeof (proc_t))) { 843 return (1); 844 } 845 846 if (curthread->t_cred != NULL && DTRACE_INRANGE(addr, sz, 847 curthread->t_cred, sizeof (cred_t))) { 848 return (1); 849 } 850 851#if defined(sun) 852 if (p != NULL && p->p_pidp != NULL && DTRACE_INRANGE(addr, sz, 853 &(p->p_pidp->pid_id), sizeof (pid_t))) { 854 return (1); 855 } 856 857 if (curthread->t_cpu != NULL && DTRACE_INRANGE(addr, sz, 858 curthread->t_cpu, offsetof(cpu_t, cpu_pause_thread))) { 859 return (1); 860 } 861#endif 862 } 863 864 if ((fp = mstate->dtms_getf) != NULL) { 865 uintptr_t psz = sizeof (void *); 866 vnode_t *vp; 867 vnodeops_t *op; 868 869 /* 870 * When getf() returns a file_t, the enabling is implicitly 871 * granted the (transient) right to read the returned file_t 872 * as well as the v_path and v_op->vnop_name of the underlying 873 * vnode. These accesses are allowed after a successful 874 * getf() because the members that they refer to cannot change 875 * once set -- and the barrier logic in the kernel's closef() 876 * path assures that the file_t and its referenced vode_t 877 * cannot themselves be stale (that is, it impossible for 878 * either dtms_getf itself or its f_vnode member to reference 879 * freed memory). 880 */ 881 if (DTRACE_INRANGE(addr, sz, fp, sizeof (file_t))) 882 return (1); 883 884 if ((vp = fp->f_vnode) != NULL) { 885#if defined(sun) 886 if (DTRACE_INRANGE(addr, sz, &vp->v_path, psz)) 887 return (1); 888 if (vp->v_path != NULL && DTRACE_INRANGE(addr, sz, 889 vp->v_path, strlen(vp->v_path) + 1)) { 890 return (1); 891 } 892#endif 893 894 if (DTRACE_INRANGE(addr, sz, &vp->v_op, psz)) 895 return (1); 896 897#if defined(sun) 898 if ((op = vp->v_op) != NULL && 899 DTRACE_INRANGE(addr, sz, &op->vnop_name, psz)) { 900 return (1); 901 } 902 903 if (op != NULL && op->vnop_name != NULL && 904 DTRACE_INRANGE(addr, sz, op->vnop_name, 905 strlen(op->vnop_name) + 1)) { 906 return (1); 907 } 908#endif 909 } 910 } 911 912 DTRACE_CPUFLAG_SET(CPU_DTRACE_KPRIV); 913 *illval = addr; 914 return (0); 915} 916 917/* 918 * Convenience routine to check to see if a given string is within a memory 919 * region in which a load may be issued given the user's privilege level; 920 * this exists so that we don't need to issue unnecessary dtrace_strlen() 921 * calls in the event that the user has all privileges. 922 */ 923static int 924dtrace_strcanload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 925 dtrace_vstate_t *vstate) 926{ 927 size_t strsz; 928 929 /* 930 * If we hold the privilege to read from kernel memory, then 931 * everything is readable. 932 */ 933 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 934 return (1); 935 936 strsz = 1 + dtrace_strlen((char *)(uintptr_t)addr, sz); 937 if (dtrace_canload(addr, strsz, mstate, vstate)) 938 return (1); 939 940 return (0); 941} 942 943/* 944 * Convenience routine to check to see if a given variable is within a memory 945 * region in which a load may be issued given the user's privilege level. 946 */ 947static int 948dtrace_vcanload(void *src, dtrace_diftype_t *type, dtrace_mstate_t *mstate, 949 dtrace_vstate_t *vstate) 950{ 951 size_t sz; 952 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 953 954 /* 955 * If we hold the privilege to read from kernel memory, then 956 * everything is readable. 957 */ 958 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 959 return (1); 960 961 if (type->dtdt_kind == DIF_TYPE_STRING) 962 sz = dtrace_strlen(src, 963 vstate->dtvs_state->dts_options[DTRACEOPT_STRSIZE]) + 1; 964 else 965 sz = type->dtdt_size; 966 967 return (dtrace_canload((uintptr_t)src, sz, mstate, vstate)); 968} 969 970/* 971 * Convert a string to a signed integer using safe loads. 972 * 973 * NOTE: This function uses various macros from strtolctype.h to manipulate 974 * digit values, etc -- these have all been checked to ensure they make 975 * no additional function calls. 976 */ 977static int64_t 978dtrace_strtoll(char *input, int base, size_t limit) 979{ 980 uintptr_t pos = (uintptr_t)input; 981 int64_t val = 0; 982 int x; 983 boolean_t neg = B_FALSE; 984 char c, cc, ccc; 985 uintptr_t end = pos + limit; 986 987 /* 988 * Consume any whitespace preceding digits. 989 */ 990 while ((c = dtrace_load8(pos)) == ' ' || c == '\t') 991 pos++; 992 993 /* 994 * Handle an explicit sign if one is present. 995 */ 996 if (c == '-' || c == '+') { 997 if (c == '-') 998 neg = B_TRUE; 999 c = dtrace_load8(++pos); 1000 } 1001 1002 /* 1003 * Check for an explicit hexadecimal prefix ("0x" or "0X") and skip it 1004 * if present. 1005 */ 1006 if (base == 16 && c == '0' && ((cc = dtrace_load8(pos + 1)) == 'x' || 1007 cc == 'X') && isxdigit(ccc = dtrace_load8(pos + 2))) { 1008 pos += 2; 1009 c = ccc; 1010 } 1011 1012 /* 1013 * Read in contiguous digits until the first non-digit character. 1014 */ 1015 for (; pos < end && c != '\0' && lisalnum(c) && (x = DIGIT(c)) < base; 1016 c = dtrace_load8(++pos)) 1017 val = val * base + x; 1018 1019 return (neg ? -val : val); 1020} 1021 1022/* 1023 * Compare two strings using safe loads. 1024 */ 1025static int 1026dtrace_strncmp(char *s1, char *s2, size_t limit) 1027{ 1028 uint8_t c1, c2; 1029 volatile uint16_t *flags; 1030 1031 if (s1 == s2 || limit == 0) 1032 return (0); 1033 1034 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 1035 1036 do { 1037 if (s1 == NULL) { 1038 c1 = '\0'; 1039 } else { 1040 c1 = dtrace_load8((uintptr_t)s1++); 1041 } 1042 1043 if (s2 == NULL) { 1044 c2 = '\0'; 1045 } else { 1046 c2 = dtrace_load8((uintptr_t)s2++); 1047 } 1048 1049 if (c1 != c2) 1050 return (c1 - c2); 1051 } while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT)); 1052 1053 return (0); 1054} 1055 1056/* 1057 * Compute strlen(s) for a string using safe memory accesses. The additional 1058 * len parameter is used to specify a maximum length to ensure completion. 1059 */ 1060static size_t 1061dtrace_strlen(const char *s, size_t lim) 1062{ 1063 uint_t len; 1064 1065 for (len = 0; len != lim; len++) { 1066 if (dtrace_load8((uintptr_t)s++) == '\0') 1067 break; 1068 } 1069 1070 return (len); 1071} 1072 1073/* 1074 * Check if an address falls within a toxic region. 1075 */ 1076static int 1077dtrace_istoxic(uintptr_t kaddr, size_t size) 1078{ 1079 uintptr_t taddr, tsize; 1080 int i; 1081 1082 for (i = 0; i < dtrace_toxranges; i++) { 1083 taddr = dtrace_toxrange[i].dtt_base; 1084 tsize = dtrace_toxrange[i].dtt_limit - taddr; 1085 1086 if (kaddr - taddr < tsize) { 1087 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 1088 cpu_core[curcpu].cpuc_dtrace_illval = kaddr; 1089 return (1); 1090 } 1091 1092 if (taddr - kaddr < size) { 1093 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 1094 cpu_core[curcpu].cpuc_dtrace_illval = taddr; 1095 return (1); 1096 } 1097 } 1098 1099 return (0); 1100} 1101 1102/* 1103 * Copy src to dst using safe memory accesses. The src is assumed to be unsafe 1104 * memory specified by the DIF program. The dst is assumed to be safe memory 1105 * that we can store to directly because it is managed by DTrace. As with 1106 * standard bcopy, overlapping copies are handled properly. 1107 */ 1108static void 1109dtrace_bcopy(const void *src, void *dst, size_t len) 1110{ 1111 if (len != 0) { 1112 uint8_t *s1 = dst; 1113 const uint8_t *s2 = src; 1114 1115 if (s1 <= s2) { 1116 do { 1117 *s1++ = dtrace_load8((uintptr_t)s2++); 1118 } while (--len != 0); 1119 } else { 1120 s2 += len; 1121 s1 += len; 1122 1123 do { 1124 *--s1 = dtrace_load8((uintptr_t)--s2); 1125 } while (--len != 0); 1126 } 1127 } 1128} 1129 1130/* 1131 * Copy src to dst using safe memory accesses, up to either the specified 1132 * length, or the point that a nul byte is encountered. The src is assumed to 1133 * be unsafe memory specified by the DIF program. The dst is assumed to be 1134 * safe memory that we can store to directly because it is managed by DTrace. 1135 * Unlike dtrace_bcopy(), overlapping regions are not handled. 1136 */ 1137static void 1138dtrace_strcpy(const void *src, void *dst, size_t len) 1139{ 1140 if (len != 0) { 1141 uint8_t *s1 = dst, c; 1142 const uint8_t *s2 = src; 1143 1144 do { 1145 *s1++ = c = dtrace_load8((uintptr_t)s2++); 1146 } while (--len != 0 && c != '\0'); 1147 } 1148} 1149 1150/* 1151 * Copy src to dst, deriving the size and type from the specified (BYREF) 1152 * variable type. The src is assumed to be unsafe memory specified by the DIF 1153 * program. The dst is assumed to be DTrace variable memory that is of the 1154 * specified type; we assume that we can store to directly. 1155 */ 1156static void 1157dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type) 1158{ 1159 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 1160 1161 if (type->dtdt_kind == DIF_TYPE_STRING) { 1162 dtrace_strcpy(src, dst, type->dtdt_size); 1163 } else { 1164 dtrace_bcopy(src, dst, type->dtdt_size); 1165 } 1166} 1167 1168/* 1169 * Compare s1 to s2 using safe memory accesses. The s1 data is assumed to be 1170 * unsafe memory specified by the DIF program. The s2 data is assumed to be 1171 * safe memory that we can access directly because it is managed by DTrace. 1172 */ 1173static int 1174dtrace_bcmp(const void *s1, const void *s2, size_t len) 1175{ 1176 volatile uint16_t *flags; 1177 1178 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 1179 1180 if (s1 == s2) 1181 return (0); 1182 1183 if (s1 == NULL || s2 == NULL) 1184 return (1); 1185 1186 if (s1 != s2 && len != 0) { 1187 const uint8_t *ps1 = s1; 1188 const uint8_t *ps2 = s2; 1189 1190 do { 1191 if (dtrace_load8((uintptr_t)ps1++) != *ps2++) 1192 return (1); 1193 } while (--len != 0 && !(*flags & CPU_DTRACE_FAULT)); 1194 } 1195 return (0); 1196} 1197 1198/* 1199 * Zero the specified region using a simple byte-by-byte loop. Note that this 1200 * is for safe DTrace-managed memory only. 1201 */ 1202static void 1203dtrace_bzero(void *dst, size_t len) 1204{ 1205 uchar_t *cp; 1206 1207 for (cp = dst; len != 0; len--) 1208 *cp++ = 0; 1209} 1210 1211static void 1212dtrace_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum) 1213{ 1214 uint64_t result[2]; 1215 1216 result[0] = addend1[0] + addend2[0]; 1217 result[1] = addend1[1] + addend2[1] + 1218 (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0); 1219 1220 sum[0] = result[0]; 1221 sum[1] = result[1]; 1222} 1223 1224/* 1225 * Shift the 128-bit value in a by b. If b is positive, shift left. 1226 * If b is negative, shift right. 1227 */ 1228static void 1229dtrace_shift_128(uint64_t *a, int b) 1230{ 1231 uint64_t mask; 1232 1233 if (b == 0) 1234 return; 1235 1236 if (b < 0) { 1237 b = -b; 1238 if (b >= 64) { 1239 a[0] = a[1] >> (b - 64); 1240 a[1] = 0; 1241 } else { 1242 a[0] >>= b; 1243 mask = 1LL << (64 - b); 1244 mask -= 1; 1245 a[0] |= ((a[1] & mask) << (64 - b)); 1246 a[1] >>= b; 1247 } 1248 } else { 1249 if (b >= 64) { 1250 a[1] = a[0] << (b - 64); 1251 a[0] = 0; 1252 } else { 1253 a[1] <<= b; 1254 mask = a[0] >> (64 - b); 1255 a[1] |= mask; 1256 a[0] <<= b; 1257 } 1258 } 1259} 1260 1261/* 1262 * The basic idea is to break the 2 64-bit values into 4 32-bit values, 1263 * use native multiplication on those, and then re-combine into the 1264 * resulting 128-bit value. 1265 * 1266 * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) = 1267 * hi1 * hi2 << 64 + 1268 * hi1 * lo2 << 32 + 1269 * hi2 * lo1 << 32 + 1270 * lo1 * lo2 1271 */ 1272static void 1273dtrace_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product) 1274{ 1275 uint64_t hi1, hi2, lo1, lo2; 1276 uint64_t tmp[2]; 1277 1278 hi1 = factor1 >> 32; 1279 hi2 = factor2 >> 32; 1280 1281 lo1 = factor1 & DT_MASK_LO; 1282 lo2 = factor2 & DT_MASK_LO; 1283 1284 product[0] = lo1 * lo2; 1285 product[1] = hi1 * hi2; 1286 1287 tmp[0] = hi1 * lo2; 1288 tmp[1] = 0; 1289 dtrace_shift_128(tmp, 32); 1290 dtrace_add_128(product, tmp, product); 1291 1292 tmp[0] = hi2 * lo1; 1293 tmp[1] = 0; 1294 dtrace_shift_128(tmp, 32); 1295 dtrace_add_128(product, tmp, product); 1296} 1297 1298/* 1299 * This privilege check should be used by actions and subroutines to 1300 * verify that the user credentials of the process that enabled the 1301 * invoking ECB match the target credentials 1302 */ 1303static int 1304dtrace_priv_proc_common_user(dtrace_state_t *state) 1305{ 1306 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 1307 1308 /* 1309 * We should always have a non-NULL state cred here, since if cred 1310 * is null (anonymous tracing), we fast-path bypass this routine. 1311 */ 1312 ASSERT(s_cr != NULL); 1313 1314 if ((cr = CRED()) != NULL && 1315 s_cr->cr_uid == cr->cr_uid && 1316 s_cr->cr_uid == cr->cr_ruid && 1317 s_cr->cr_uid == cr->cr_suid && 1318 s_cr->cr_gid == cr->cr_gid && 1319 s_cr->cr_gid == cr->cr_rgid && 1320 s_cr->cr_gid == cr->cr_sgid) 1321 return (1); 1322 1323 return (0); 1324} 1325 1326/* 1327 * This privilege check should be used by actions and subroutines to 1328 * verify that the zone of the process that enabled the invoking ECB 1329 * matches the target credentials 1330 */ 1331static int 1332dtrace_priv_proc_common_zone(dtrace_state_t *state) 1333{ 1334#if defined(sun) 1335 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 1336 1337 /* 1338 * We should always have a non-NULL state cred here, since if cred 1339 * is null (anonymous tracing), we fast-path bypass this routine. 1340 */ 1341 ASSERT(s_cr != NULL); 1342 1343 if ((cr = CRED()) != NULL && s_cr->cr_zone == cr->cr_zone) 1344 return (1); 1345 1346 return (0); 1347#else 1348 return (1); 1349#endif 1350} 1351 1352/* 1353 * This privilege check should be used by actions and subroutines to 1354 * verify that the process has not setuid or changed credentials. 1355 */ 1356static int 1357dtrace_priv_proc_common_nocd(void) 1358{ 1359 proc_t *proc; 1360 1361 if ((proc = ttoproc(curthread)) != NULL && 1362 !(proc->p_flag & SNOCD)) 1363 return (1); 1364 1365 return (0); 1366} 1367 1368static int 1369dtrace_priv_proc_destructive(dtrace_state_t *state) 1370{ 1371 int action = state->dts_cred.dcr_action; 1372 1373 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) && 1374 dtrace_priv_proc_common_zone(state) == 0) 1375 goto bad; 1376 1377 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER) == 0) && 1378 dtrace_priv_proc_common_user(state) == 0) 1379 goto bad; 1380 1381 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG) == 0) && 1382 dtrace_priv_proc_common_nocd() == 0) 1383 goto bad; 1384 1385 return (1); 1386 1387bad: 1388 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1389 1390 return (0); 1391} 1392 1393static int 1394dtrace_priv_proc_control(dtrace_state_t *state) 1395{ 1396 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL) 1397 return (1); 1398 1399 if (dtrace_priv_proc_common_zone(state) && 1400 dtrace_priv_proc_common_user(state) && 1401 dtrace_priv_proc_common_nocd()) 1402 return (1); 1403 1404 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1405 1406 return (0); 1407} 1408 1409static int 1410dtrace_priv_proc(dtrace_state_t *state) 1411{ 1412 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC) 1413 return (1); 1414 1415 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1416 1417 return (0); 1418} 1419 1420static int 1421dtrace_priv_kernel(dtrace_state_t *state) 1422{ 1423 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL) 1424 return (1); 1425 1426 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1427 1428 return (0); 1429} 1430 1431static int 1432dtrace_priv_kernel_destructive(dtrace_state_t *state) 1433{ 1434 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE) 1435 return (1); 1436 1437 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1438 1439 return (0); 1440} 1441 1442/* 1443 * Determine if the dte_cond of the specified ECB allows for processing of 1444 * the current probe to continue. Note that this routine may allow continued 1445 * processing, but with access(es) stripped from the mstate's dtms_access 1446 * field. 1447 */ 1448static int 1449dtrace_priv_probe(dtrace_state_t *state, dtrace_mstate_t *mstate, 1450 dtrace_ecb_t *ecb) 1451{ 1452 dtrace_probe_t *probe = ecb->dte_probe; 1453 dtrace_provider_t *prov = probe->dtpr_provider; 1454 dtrace_pops_t *pops = &prov->dtpv_pops; 1455 int mode = DTRACE_MODE_NOPRIV_DROP; 1456 1457 ASSERT(ecb->dte_cond); 1458 1459#if defined(sun) 1460 if (pops->dtps_mode != NULL) { 1461 mode = pops->dtps_mode(prov->dtpv_arg, 1462 probe->dtpr_id, probe->dtpr_arg); 1463 1464 ASSERT((mode & DTRACE_MODE_USER) || 1465 (mode & DTRACE_MODE_KERNEL)); 1466 ASSERT((mode & DTRACE_MODE_NOPRIV_RESTRICT) || 1467 (mode & DTRACE_MODE_NOPRIV_DROP)); 1468 } 1469 1470 /* 1471 * If the dte_cond bits indicate that this consumer is only allowed to 1472 * see user-mode firings of this probe, call the provider's dtps_mode() 1473 * entry point to check that the probe was fired while in a user 1474 * context. If that's not the case, use the policy specified by the 1475 * provider to determine if we drop the probe or merely restrict 1476 * operation. 1477 */ 1478 if (ecb->dte_cond & DTRACE_COND_USERMODE) { 1479 ASSERT(mode != DTRACE_MODE_NOPRIV_DROP); 1480 1481 if (!(mode & DTRACE_MODE_USER)) { 1482 if (mode & DTRACE_MODE_NOPRIV_DROP) 1483 return (0); 1484 1485 mstate->dtms_access &= ~DTRACE_ACCESS_ARGS; 1486 } 1487 } 1488#endif 1489 1490 /* 1491 * This is more subtle than it looks. We have to be absolutely certain 1492 * that CRED() isn't going to change out from under us so it's only 1493 * legit to examine that structure if we're in constrained situations. 1494 * Currently, the only times we'll this check is if a non-super-user 1495 * has enabled the profile or syscall providers -- providers that 1496 * allow visibility of all processes. For the profile case, the check 1497 * above will ensure that we're examining a user context. 1498 */ 1499 if (ecb->dte_cond & DTRACE_COND_OWNER) { 1500 cred_t *cr; 1501 cred_t *s_cr = state->dts_cred.dcr_cred; 1502 proc_t *proc; 1503 1504 ASSERT(s_cr != NULL); 1505 1506 if ((cr = CRED()) == NULL || 1507 s_cr->cr_uid != cr->cr_uid || 1508 s_cr->cr_uid != cr->cr_ruid || 1509 s_cr->cr_uid != cr->cr_suid || 1510 s_cr->cr_gid != cr->cr_gid || 1511 s_cr->cr_gid != cr->cr_rgid || 1512 s_cr->cr_gid != cr->cr_sgid || 1513 (proc = ttoproc(curthread)) == NULL || 1514 (proc->p_flag & SNOCD)) { 1515 if (mode & DTRACE_MODE_NOPRIV_DROP) 1516 return (0); 1517 1518#if defined(sun) 1519 mstate->dtms_access &= ~DTRACE_ACCESS_PROC; 1520#endif 1521 } 1522 } 1523 1524#if defined(sun) 1525 /* 1526 * If our dte_cond is set to DTRACE_COND_ZONEOWNER and we are not 1527 * in our zone, check to see if our mode policy is to restrict rather 1528 * than to drop; if to restrict, strip away both DTRACE_ACCESS_PROC 1529 * and DTRACE_ACCESS_ARGS 1530 */ 1531 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) { 1532 cred_t *cr; 1533 cred_t *s_cr = state->dts_cred.dcr_cred; 1534 1535 ASSERT(s_cr != NULL); 1536 1537 if ((cr = CRED()) == NULL || 1538 s_cr->cr_zone->zone_id != cr->cr_zone->zone_id) { 1539 if (mode & DTRACE_MODE_NOPRIV_DROP) 1540 return (0); 1541 1542 mstate->dtms_access &= 1543 ~(DTRACE_ACCESS_PROC | DTRACE_ACCESS_ARGS); 1544 } 1545 } 1546#endif 1547 1548 return (1); 1549} 1550 1551/* 1552 * Note: not called from probe context. This function is called 1553 * asynchronously (and at a regular interval) from outside of probe context to 1554 * clean the dirty dynamic variable lists on all CPUs. Dynamic variable 1555 * cleaning is explained in detail in <sys/dtrace_impl.h>. 1556 */ 1557void 1558dtrace_dynvar_clean(dtrace_dstate_t *dstate) 1559{ 1560 dtrace_dynvar_t *dirty; 1561 dtrace_dstate_percpu_t *dcpu; 1562 dtrace_dynvar_t **rinsep; 1563 int i, j, work = 0; 1564 1565 for (i = 0; i < NCPU; i++) { 1566 dcpu = &dstate->dtds_percpu[i]; 1567 rinsep = &dcpu->dtdsc_rinsing; 1568 1569 /* 1570 * If the dirty list is NULL, there is no dirty work to do. 1571 */ 1572 if (dcpu->dtdsc_dirty == NULL) 1573 continue; 1574 1575 if (dcpu->dtdsc_rinsing != NULL) { 1576 /* 1577 * If the rinsing list is non-NULL, then it is because 1578 * this CPU was selected to accept another CPU's 1579 * dirty list -- and since that time, dirty buffers 1580 * have accumulated. This is a highly unlikely 1581 * condition, but we choose to ignore the dirty 1582 * buffers -- they'll be picked up a future cleanse. 1583 */ 1584 continue; 1585 } 1586 1587 if (dcpu->dtdsc_clean != NULL) { 1588 /* 1589 * If the clean list is non-NULL, then we're in a 1590 * situation where a CPU has done deallocations (we 1591 * have a non-NULL dirty list) but no allocations (we 1592 * also have a non-NULL clean list). We can't simply 1593 * move the dirty list into the clean list on this 1594 * CPU, yet we also don't want to allow this condition 1595 * to persist, lest a short clean list prevent a 1596 * massive dirty list from being cleaned (which in 1597 * turn could lead to otherwise avoidable dynamic 1598 * drops). To deal with this, we look for some CPU 1599 * with a NULL clean list, NULL dirty list, and NULL 1600 * rinsing list -- and then we borrow this CPU to 1601 * rinse our dirty list. 1602 */ 1603 for (j = 0; j < NCPU; j++) { 1604 dtrace_dstate_percpu_t *rinser; 1605 1606 rinser = &dstate->dtds_percpu[j]; 1607 1608 if (rinser->dtdsc_rinsing != NULL) 1609 continue; 1610 1611 if (rinser->dtdsc_dirty != NULL) 1612 continue; 1613 1614 if (rinser->dtdsc_clean != NULL) 1615 continue; 1616 1617 rinsep = &rinser->dtdsc_rinsing; 1618 break; 1619 } 1620 1621 if (j == NCPU) { 1622 /* 1623 * We were unable to find another CPU that 1624 * could accept this dirty list -- we are 1625 * therefore unable to clean it now. 1626 */ 1627 dtrace_dynvar_failclean++; 1628 continue; 1629 } 1630 } 1631 1632 work = 1; 1633 1634 /* 1635 * Atomically move the dirty list aside. 1636 */ 1637 do { 1638 dirty = dcpu->dtdsc_dirty; 1639 1640 /* 1641 * Before we zap the dirty list, set the rinsing list. 1642 * (This allows for a potential assertion in 1643 * dtrace_dynvar(): if a free dynamic variable appears 1644 * on a hash chain, either the dirty list or the 1645 * rinsing list for some CPU must be non-NULL.) 1646 */ 1647 *rinsep = dirty; 1648 dtrace_membar_producer(); 1649 } while (dtrace_casptr(&dcpu->dtdsc_dirty, 1650 dirty, NULL) != dirty); 1651 } 1652 1653 if (!work) { 1654 /* 1655 * We have no work to do; we can simply return. 1656 */ 1657 return; 1658 } 1659 1660 dtrace_sync(); 1661 1662 for (i = 0; i < NCPU; i++) { 1663 dcpu = &dstate->dtds_percpu[i]; 1664 1665 if (dcpu->dtdsc_rinsing == NULL) 1666 continue; 1667 1668 /* 1669 * We are now guaranteed that no hash chain contains a pointer 1670 * into this dirty list; we can make it clean. 1671 */ 1672 ASSERT(dcpu->dtdsc_clean == NULL); 1673 dcpu->dtdsc_clean = dcpu->dtdsc_rinsing; 1674 dcpu->dtdsc_rinsing = NULL; 1675 } 1676 1677 /* 1678 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make 1679 * sure that all CPUs have seen all of the dtdsc_clean pointers. 1680 * This prevents a race whereby a CPU incorrectly decides that 1681 * the state should be something other than DTRACE_DSTATE_CLEAN 1682 * after dtrace_dynvar_clean() has completed. 1683 */ 1684 dtrace_sync(); 1685 1686 dstate->dtds_state = DTRACE_DSTATE_CLEAN; 1687} 1688 1689/* 1690 * Depending on the value of the op parameter, this function looks-up, 1691 * allocates or deallocates an arbitrarily-keyed dynamic variable. If an 1692 * allocation is requested, this function will return a pointer to a 1693 * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no 1694 * variable can be allocated. If NULL is returned, the appropriate counter 1695 * will be incremented. 1696 */ 1697dtrace_dynvar_t * 1698dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys, 1699 dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op, 1700 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate) 1701{ 1702 uint64_t hashval = DTRACE_DYNHASH_VALID; 1703 dtrace_dynhash_t *hash = dstate->dtds_hash; 1704 dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL; 1705 processorid_t me = curcpu, cpu = me; 1706 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me]; 1707 size_t bucket, ksize; 1708 size_t chunksize = dstate->dtds_chunksize; 1709 uintptr_t kdata, lock, nstate; 1710 uint_t i; 1711 1712 ASSERT(nkeys != 0); 1713 1714 /* 1715 * Hash the key. As with aggregations, we use Jenkins' "One-at-a-time" 1716 * algorithm. For the by-value portions, we perform the algorithm in 1717 * 16-bit chunks (as opposed to 8-bit chunks). This speeds things up a 1718 * bit, and seems to have only a minute effect on distribution. For 1719 * the by-reference data, we perform "One-at-a-time" iterating (safely) 1720 * over each referenced byte. It's painful to do this, but it's much 1721 * better than pathological hash distribution. The efficacy of the 1722 * hashing algorithm (and a comparison with other algorithms) may be 1723 * found by running the ::dtrace_dynstat MDB dcmd. 1724 */ 1725 for (i = 0; i < nkeys; i++) { 1726 if (key[i].dttk_size == 0) { 1727 uint64_t val = key[i].dttk_value; 1728 1729 hashval += (val >> 48) & 0xffff; 1730 hashval += (hashval << 10); 1731 hashval ^= (hashval >> 6); 1732 1733 hashval += (val >> 32) & 0xffff; 1734 hashval += (hashval << 10); 1735 hashval ^= (hashval >> 6); 1736 1737 hashval += (val >> 16) & 0xffff; 1738 hashval += (hashval << 10); 1739 hashval ^= (hashval >> 6); 1740 1741 hashval += val & 0xffff; 1742 hashval += (hashval << 10); 1743 hashval ^= (hashval >> 6); 1744 } else { 1745 /* 1746 * This is incredibly painful, but it beats the hell 1747 * out of the alternative. 1748 */ 1749 uint64_t j, size = key[i].dttk_size; 1750 uintptr_t base = (uintptr_t)key[i].dttk_value; 1751 1752 if (!dtrace_canload(base, size, mstate, vstate)) 1753 break; 1754 1755 for (j = 0; j < size; j++) { 1756 hashval += dtrace_load8(base + j); 1757 hashval += (hashval << 10); 1758 hashval ^= (hashval >> 6); 1759 } 1760 } 1761 } 1762 1763 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT)) 1764 return (NULL); 1765 1766 hashval += (hashval << 3); 1767 hashval ^= (hashval >> 11); 1768 hashval += (hashval << 15); 1769 1770 /* 1771 * There is a remote chance (ideally, 1 in 2^31) that our hashval 1772 * comes out to be one of our two sentinel hash values. If this 1773 * actually happens, we set the hashval to be a value known to be a 1774 * non-sentinel value. 1775 */ 1776 if (hashval == DTRACE_DYNHASH_FREE || hashval == DTRACE_DYNHASH_SINK) 1777 hashval = DTRACE_DYNHASH_VALID; 1778 1779 /* 1780 * Yes, it's painful to do a divide here. If the cycle count becomes 1781 * important here, tricks can be pulled to reduce it. (However, it's 1782 * critical that hash collisions be kept to an absolute minimum; 1783 * they're much more painful than a divide.) It's better to have a 1784 * solution that generates few collisions and still keeps things 1785 * relatively simple. 1786 */ 1787 bucket = hashval % dstate->dtds_hashsize; 1788 1789 if (op == DTRACE_DYNVAR_DEALLOC) { 1790 volatile uintptr_t *lockp = &hash[bucket].dtdh_lock; 1791 1792 for (;;) { 1793 while ((lock = *lockp) & 1) 1794 continue; 1795 1796 if (dtrace_casptr((volatile void *)lockp, 1797 (volatile void *)lock, (volatile void *)(lock + 1)) == (void *)lock) 1798 break; 1799 } 1800 1801 dtrace_membar_producer(); 1802 } 1803 1804top: 1805 prev = NULL; 1806 lock = hash[bucket].dtdh_lock; 1807 1808 dtrace_membar_consumer(); 1809 1810 start = hash[bucket].dtdh_chain; 1811 ASSERT(start != NULL && (start->dtdv_hashval == DTRACE_DYNHASH_SINK || 1812 start->dtdv_hashval != DTRACE_DYNHASH_FREE || 1813 op != DTRACE_DYNVAR_DEALLOC)); 1814 1815 for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) { 1816 dtrace_tuple_t *dtuple = &dvar->dtdv_tuple; 1817 dtrace_key_t *dkey = &dtuple->dtt_key[0]; 1818 1819 if (dvar->dtdv_hashval != hashval) { 1820 if (dvar->dtdv_hashval == DTRACE_DYNHASH_SINK) { 1821 /* 1822 * We've reached the sink, and therefore the 1823 * end of the hash chain; we can kick out of 1824 * the loop knowing that we have seen a valid 1825 * snapshot of state. 1826 */ 1827 ASSERT(dvar->dtdv_next == NULL); 1828 ASSERT(dvar == &dtrace_dynhash_sink); 1829 break; 1830 } 1831 1832 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) { 1833 /* 1834 * We've gone off the rails: somewhere along 1835 * the line, one of the members of this hash 1836 * chain was deleted. Note that we could also 1837 * detect this by simply letting this loop run 1838 * to completion, as we would eventually hit 1839 * the end of the dirty list. However, we 1840 * want to avoid running the length of the 1841 * dirty list unnecessarily (it might be quite 1842 * long), so we catch this as early as 1843 * possible by detecting the hash marker. In 1844 * this case, we simply set dvar to NULL and 1845 * break; the conditional after the loop will 1846 * send us back to top. 1847 */ 1848 dvar = NULL; 1849 break; 1850 } 1851 1852 goto next; 1853 } 1854 1855 if (dtuple->dtt_nkeys != nkeys) 1856 goto next; 1857 1858 for (i = 0; i < nkeys; i++, dkey++) { 1859 if (dkey->dttk_size != key[i].dttk_size) 1860 goto next; /* size or type mismatch */ 1861 1862 if (dkey->dttk_size != 0) { 1863 if (dtrace_bcmp( 1864 (void *)(uintptr_t)key[i].dttk_value, 1865 (void *)(uintptr_t)dkey->dttk_value, 1866 dkey->dttk_size)) 1867 goto next; 1868 } else { 1869 if (dkey->dttk_value != key[i].dttk_value) 1870 goto next; 1871 } 1872 } 1873 1874 if (op != DTRACE_DYNVAR_DEALLOC) 1875 return (dvar); 1876 1877 ASSERT(dvar->dtdv_next == NULL || 1878 dvar->dtdv_next->dtdv_hashval != DTRACE_DYNHASH_FREE); 1879 1880 if (prev != NULL) { 1881 ASSERT(hash[bucket].dtdh_chain != dvar); 1882 ASSERT(start != dvar); 1883 ASSERT(prev->dtdv_next == dvar); 1884 prev->dtdv_next = dvar->dtdv_next; 1885 } else { 1886 if (dtrace_casptr(&hash[bucket].dtdh_chain, 1887 start, dvar->dtdv_next) != start) { 1888 /* 1889 * We have failed to atomically swing the 1890 * hash table head pointer, presumably because 1891 * of a conflicting allocation on another CPU. 1892 * We need to reread the hash chain and try 1893 * again. 1894 */ 1895 goto top; 1896 } 1897 } 1898 1899 dtrace_membar_producer(); 1900 1901 /* 1902 * Now set the hash value to indicate that it's free. 1903 */ 1904 ASSERT(hash[bucket].dtdh_chain != dvar); 1905 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 1906 1907 dtrace_membar_producer(); 1908 1909 /* 1910 * Set the next pointer to point at the dirty list, and 1911 * atomically swing the dirty pointer to the newly freed dvar. 1912 */ 1913 do { 1914 next = dcpu->dtdsc_dirty; 1915 dvar->dtdv_next = next; 1916 } while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next); 1917 1918 /* 1919 * Finally, unlock this hash bucket. 1920 */ 1921 ASSERT(hash[bucket].dtdh_lock == lock); 1922 ASSERT(lock & 1); 1923 hash[bucket].dtdh_lock++; 1924 1925 return (NULL); 1926next: 1927 prev = dvar; 1928 continue; 1929 } 1930 1931 if (dvar == NULL) { 1932 /* 1933 * If dvar is NULL, it is because we went off the rails: 1934 * one of the elements that we traversed in the hash chain 1935 * was deleted while we were traversing it. In this case, 1936 * we assert that we aren't doing a dealloc (deallocs lock 1937 * the hash bucket to prevent themselves from racing with 1938 * one another), and retry the hash chain traversal. 1939 */ 1940 ASSERT(op != DTRACE_DYNVAR_DEALLOC); 1941 goto top; 1942 } 1943 1944 if (op != DTRACE_DYNVAR_ALLOC) { 1945 /* 1946 * If we are not to allocate a new variable, we want to 1947 * return NULL now. Before we return, check that the value 1948 * of the lock word hasn't changed. If it has, we may have 1949 * seen an inconsistent snapshot. 1950 */ 1951 if (op == DTRACE_DYNVAR_NOALLOC) { 1952 if (hash[bucket].dtdh_lock != lock) 1953 goto top; 1954 } else { 1955 ASSERT(op == DTRACE_DYNVAR_DEALLOC); 1956 ASSERT(hash[bucket].dtdh_lock == lock); 1957 ASSERT(lock & 1); 1958 hash[bucket].dtdh_lock++; 1959 } 1960 1961 return (NULL); 1962 } 1963 1964 /* 1965 * We need to allocate a new dynamic variable. The size we need is the 1966 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the 1967 * size of any auxiliary key data (rounded up to 8-byte alignment) plus 1968 * the size of any referred-to data (dsize). We then round the final 1969 * size up to the chunksize for allocation. 1970 */ 1971 for (ksize = 0, i = 0; i < nkeys; i++) 1972 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 1973 1974 /* 1975 * This should be pretty much impossible, but could happen if, say, 1976 * strange DIF specified the tuple. Ideally, this should be an 1977 * assertion and not an error condition -- but that requires that the 1978 * chunksize calculation in dtrace_difo_chunksize() be absolutely 1979 * bullet-proof. (That is, it must not be able to be fooled by 1980 * malicious DIF.) Given the lack of backwards branches in DIF, 1981 * solving this would presumably not amount to solving the Halting 1982 * Problem -- but it still seems awfully hard. 1983 */ 1984 if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) + 1985 ksize + dsize > chunksize) { 1986 dcpu->dtdsc_drops++; 1987 return (NULL); 1988 } 1989 1990 nstate = DTRACE_DSTATE_EMPTY; 1991 1992 do { 1993retry: 1994 free = dcpu->dtdsc_free; 1995 1996 if (free == NULL) { 1997 dtrace_dynvar_t *clean = dcpu->dtdsc_clean; 1998 void *rval; 1999 2000 if (clean == NULL) { 2001 /* 2002 * We're out of dynamic variable space on 2003 * this CPU. Unless we have tried all CPUs, 2004 * we'll try to allocate from a different 2005 * CPU. 2006 */ 2007 switch (dstate->dtds_state) { 2008 case DTRACE_DSTATE_CLEAN: { 2009 void *sp = &dstate->dtds_state; 2010 2011 if (++cpu >= NCPU) 2012 cpu = 0; 2013 2014 if (dcpu->dtdsc_dirty != NULL && 2015 nstate == DTRACE_DSTATE_EMPTY) 2016 nstate = DTRACE_DSTATE_DIRTY; 2017 2018 if (dcpu->dtdsc_rinsing != NULL) 2019 nstate = DTRACE_DSTATE_RINSING; 2020 2021 dcpu = &dstate->dtds_percpu[cpu]; 2022 2023 if (cpu != me) 2024 goto retry; 2025 2026 (void) dtrace_cas32(sp, 2027 DTRACE_DSTATE_CLEAN, nstate); 2028 2029 /* 2030 * To increment the correct bean 2031 * counter, take another lap. 2032 */ 2033 goto retry; 2034 } 2035 2036 case DTRACE_DSTATE_DIRTY: 2037 dcpu->dtdsc_dirty_drops++; 2038 break; 2039 2040 case DTRACE_DSTATE_RINSING: 2041 dcpu->dtdsc_rinsing_drops++; 2042 break; 2043 2044 case DTRACE_DSTATE_EMPTY: 2045 dcpu->dtdsc_drops++; 2046 break; 2047 } 2048 2049 DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP); 2050 return (NULL); 2051 } 2052 2053 /* 2054 * The clean list appears to be non-empty. We want to 2055 * move the clean list to the free list; we start by 2056 * moving the clean pointer aside. 2057 */ 2058 if (dtrace_casptr(&dcpu->dtdsc_clean, 2059 clean, NULL) != clean) { 2060 /* 2061 * We are in one of two situations: 2062 * 2063 * (a) The clean list was switched to the 2064 * free list by another CPU. 2065 * 2066 * (b) The clean list was added to by the 2067 * cleansing cyclic. 2068 * 2069 * In either of these situations, we can 2070 * just reattempt the free list allocation. 2071 */ 2072 goto retry; 2073 } 2074 2075 ASSERT(clean->dtdv_hashval == DTRACE_DYNHASH_FREE); 2076 2077 /* 2078 * Now we'll move the clean list to our free list. 2079 * It's impossible for this to fail: the only way 2080 * the free list can be updated is through this 2081 * code path, and only one CPU can own the clean list. 2082 * Thus, it would only be possible for this to fail if 2083 * this code were racing with dtrace_dynvar_clean(). 2084 * (That is, if dtrace_dynvar_clean() updated the clean 2085 * list, and we ended up racing to update the free 2086 * list.) This race is prevented by the dtrace_sync() 2087 * in dtrace_dynvar_clean() -- which flushes the 2088 * owners of the clean lists out before resetting 2089 * the clean lists. 2090 */ 2091 dcpu = &dstate->dtds_percpu[me]; 2092 rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean); 2093 ASSERT(rval == NULL); 2094 goto retry; 2095 } 2096 2097 dvar = free; 2098 new_free = dvar->dtdv_next; 2099 } while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free); 2100 2101 /* 2102 * We have now allocated a new chunk. We copy the tuple keys into the 2103 * tuple array and copy any referenced key data into the data space 2104 * following the tuple array. As we do this, we relocate dttk_value 2105 * in the final tuple to point to the key data address in the chunk. 2106 */ 2107 kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys]; 2108 dvar->dtdv_data = (void *)(kdata + ksize); 2109 dvar->dtdv_tuple.dtt_nkeys = nkeys; 2110 2111 for (i = 0; i < nkeys; i++) { 2112 dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i]; 2113 size_t kesize = key[i].dttk_size; 2114 2115 if (kesize != 0) { 2116 dtrace_bcopy( 2117 (const void *)(uintptr_t)key[i].dttk_value, 2118 (void *)kdata, kesize); 2119 dkey->dttk_value = kdata; 2120 kdata += P2ROUNDUP(kesize, sizeof (uint64_t)); 2121 } else { 2122 dkey->dttk_value = key[i].dttk_value; 2123 } 2124 2125 dkey->dttk_size = kesize; 2126 } 2127 2128 ASSERT(dvar->dtdv_hashval == DTRACE_DYNHASH_FREE); 2129 dvar->dtdv_hashval = hashval; 2130 dvar->dtdv_next = start; 2131 2132 if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start) 2133 return (dvar); 2134 2135 /* 2136 * The cas has failed. Either another CPU is adding an element to 2137 * this hash chain, or another CPU is deleting an element from this 2138 * hash chain. The simplest way to deal with both of these cases 2139 * (though not necessarily the most efficient) is to free our 2140 * allocated block and tail-call ourselves. Note that the free is 2141 * to the dirty list and _not_ to the free list. This is to prevent 2142 * races with allocators, above. 2143 */ 2144 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 2145 2146 dtrace_membar_producer(); 2147 2148 do { 2149 free = dcpu->dtdsc_dirty; 2150 dvar->dtdv_next = free; 2151 } while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free); 2152 2153 return (dtrace_dynvar(dstate, nkeys, key, dsize, op, mstate, vstate)); 2154} 2155 2156/*ARGSUSED*/ 2157static void 2158dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg) 2159{ 2160 if ((int64_t)nval < (int64_t)*oval) 2161 *oval = nval; 2162} 2163 2164/*ARGSUSED*/ 2165static void 2166dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg) 2167{ 2168 if ((int64_t)nval > (int64_t)*oval) 2169 *oval = nval; 2170} 2171 2172static void 2173dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval, uint64_t incr) 2174{ 2175 int i, zero = DTRACE_QUANTIZE_ZEROBUCKET; 2176 int64_t val = (int64_t)nval; 2177 2178 if (val < 0) { 2179 for (i = 0; i < zero; i++) { 2180 if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) { 2181 quanta[i] += incr; 2182 return; 2183 } 2184 } 2185 } else { 2186 for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) { 2187 if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) { 2188 quanta[i - 1] += incr; 2189 return; 2190 } 2191 } 2192 2193 quanta[DTRACE_QUANTIZE_NBUCKETS - 1] += incr; 2194 return; 2195 } 2196 2197 ASSERT(0); 2198} 2199 2200static void 2201dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr) 2202{ 2203 uint64_t arg = *lquanta++; 2204 int32_t base = DTRACE_LQUANTIZE_BASE(arg); 2205 uint16_t step = DTRACE_LQUANTIZE_STEP(arg); 2206 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg); 2207 int32_t val = (int32_t)nval, level; 2208 2209 ASSERT(step != 0); 2210 ASSERT(levels != 0); 2211 2212 if (val < base) { 2213 /* 2214 * This is an underflow. 2215 */ 2216 lquanta[0] += incr; 2217 return; 2218 } 2219 2220 level = (val - base) / step; 2221 2222 if (level < levels) { 2223 lquanta[level + 1] += incr; 2224 return; 2225 } 2226 2227 /* 2228 * This is an overflow. 2229 */ 2230 lquanta[levels + 1] += incr; 2231} 2232 2233static int 2234dtrace_aggregate_llquantize_bucket(uint16_t factor, uint16_t low, 2235 uint16_t high, uint16_t nsteps, int64_t value) 2236{ 2237 int64_t this = 1, last, next; 2238 int base = 1, order; 2239 2240 ASSERT(factor <= nsteps); 2241 ASSERT(nsteps % factor == 0); 2242 2243 for (order = 0; order < low; order++) 2244 this *= factor; 2245 2246 /* 2247 * If our value is less than our factor taken to the power of the 2248 * low order of magnitude, it goes into the zeroth bucket. 2249 */ 2250 if (value < (last = this)) 2251 return (0); 2252 2253 for (this *= factor; order <= high; order++) { 2254 int nbuckets = this > nsteps ? nsteps : this; 2255 2256 if ((next = this * factor) < this) { 2257 /* 2258 * We should not generally get log/linear quantizations 2259 * with a high magnitude that allows 64-bits to 2260 * overflow, but we nonetheless protect against this 2261 * by explicitly checking for overflow, and clamping 2262 * our value accordingly. 2263 */ 2264 value = this - 1; 2265 } 2266 2267 if (value < this) { 2268 /* 2269 * If our value lies within this order of magnitude, 2270 * determine its position by taking the offset within 2271 * the order of magnitude, dividing by the bucket 2272 * width, and adding to our (accumulated) base. 2273 */ 2274 return (base + (value - last) / (this / nbuckets)); 2275 } 2276 2277 base += nbuckets - (nbuckets / factor); 2278 last = this; 2279 this = next; 2280 } 2281 2282 /* 2283 * Our value is greater than or equal to our factor taken to the 2284 * power of one plus the high magnitude -- return the top bucket. 2285 */ 2286 return (base); 2287} 2288 2289static void 2290dtrace_aggregate_llquantize(uint64_t *llquanta, uint64_t nval, uint64_t incr) 2291{ 2292 uint64_t arg = *llquanta++; 2293 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(arg); 2294 uint16_t low = DTRACE_LLQUANTIZE_LOW(arg); 2295 uint16_t high = DTRACE_LLQUANTIZE_HIGH(arg); 2296 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(arg); 2297 2298 llquanta[dtrace_aggregate_llquantize_bucket(factor, 2299 low, high, nsteps, nval)] += incr; 2300} 2301 2302/*ARGSUSED*/ 2303static void 2304dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg) 2305{ 2306 data[0]++; 2307 data[1] += nval; 2308} 2309 2310/*ARGSUSED*/ 2311static void 2312dtrace_aggregate_stddev(uint64_t *data, uint64_t nval, uint64_t arg) 2313{ 2314 int64_t snval = (int64_t)nval; 2315 uint64_t tmp[2]; 2316 2317 data[0]++; 2318 data[1] += nval; 2319 2320 /* 2321 * What we want to say here is: 2322 * 2323 * data[2] += nval * nval; 2324 * 2325 * But given that nval is 64-bit, we could easily overflow, so 2326 * we do this as 128-bit arithmetic. 2327 */ 2328 if (snval < 0) 2329 snval = -snval; 2330 2331 dtrace_multiply_128((uint64_t)snval, (uint64_t)snval, tmp); 2332 dtrace_add_128(data + 2, tmp, data + 2); 2333} 2334 2335/*ARGSUSED*/ 2336static void 2337dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg) 2338{ 2339 *oval = *oval + 1; 2340} 2341 2342/*ARGSUSED*/ 2343static void 2344dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg) 2345{ 2346 *oval += nval; 2347} 2348 2349/* 2350 * Aggregate given the tuple in the principal data buffer, and the aggregating 2351 * action denoted by the specified dtrace_aggregation_t. The aggregation 2352 * buffer is specified as the buf parameter. This routine does not return 2353 * failure; if there is no space in the aggregation buffer, the data will be 2354 * dropped, and a corresponding counter incremented. 2355 */ 2356static void 2357dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf, 2358 intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg) 2359{ 2360 dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec; 2361 uint32_t i, ndx, size, fsize; 2362 uint32_t align = sizeof (uint64_t) - 1; 2363 dtrace_aggbuffer_t *agb; 2364 dtrace_aggkey_t *key; 2365 uint32_t hashval = 0, limit, isstr; 2366 caddr_t tomax, data, kdata; 2367 dtrace_actkind_t action; 2368 dtrace_action_t *act; 2369 uintptr_t offs; 2370 2371 if (buf == NULL) 2372 return; 2373 2374 if (!agg->dtag_hasarg) { 2375 /* 2376 * Currently, only quantize() and lquantize() take additional 2377 * arguments, and they have the same semantics: an increment 2378 * value that defaults to 1 when not present. If additional 2379 * aggregating actions take arguments, the setting of the 2380 * default argument value will presumably have to become more 2381 * sophisticated... 2382 */ 2383 arg = 1; 2384 } 2385 2386 action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION; 2387 size = rec->dtrd_offset - agg->dtag_base; 2388 fsize = size + rec->dtrd_size; 2389 2390 ASSERT(dbuf->dtb_tomax != NULL); 2391 data = dbuf->dtb_tomax + offset + agg->dtag_base; 2392 2393 if ((tomax = buf->dtb_tomax) == NULL) { 2394 dtrace_buffer_drop(buf); 2395 return; 2396 } 2397 2398 /* 2399 * The metastructure is always at the bottom of the buffer. 2400 */ 2401 agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size - 2402 sizeof (dtrace_aggbuffer_t)); 2403 2404 if (buf->dtb_offset == 0) { 2405 /* 2406 * We just kludge up approximately 1/8th of the size to be 2407 * buckets. If this guess ends up being routinely 2408 * off-the-mark, we may need to dynamically readjust this 2409 * based on past performance. 2410 */ 2411 uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t); 2412 2413 if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) < 2414 (uintptr_t)tomax || hashsize == 0) { 2415 /* 2416 * We've been given a ludicrously small buffer; 2417 * increment our drop count and leave. 2418 */ 2419 dtrace_buffer_drop(buf); 2420 return; 2421 } 2422 2423 /* 2424 * And now, a pathetic attempt to try to get a an odd (or 2425 * perchance, a prime) hash size for better hash distribution. 2426 */ 2427 if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3)) 2428 hashsize -= DTRACE_AGGHASHSIZE_SLEW; 2429 2430 agb->dtagb_hashsize = hashsize; 2431 agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb - 2432 agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *)); 2433 agb->dtagb_free = (uintptr_t)agb->dtagb_hash; 2434 2435 for (i = 0; i < agb->dtagb_hashsize; i++) 2436 agb->dtagb_hash[i] = NULL; 2437 } 2438 2439 ASSERT(agg->dtag_first != NULL); 2440 ASSERT(agg->dtag_first->dta_intuple); 2441 2442 /* 2443 * Calculate the hash value based on the key. Note that we _don't_ 2444 * include the aggid in the hashing (but we will store it as part of 2445 * the key). The hashing algorithm is Bob Jenkins' "One-at-a-time" 2446 * algorithm: a simple, quick algorithm that has no known funnels, and 2447 * gets good distribution in practice. The efficacy of the hashing 2448 * algorithm (and a comparison with other algorithms) may be found by 2449 * running the ::dtrace_aggstat MDB dcmd. 2450 */ 2451 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 2452 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2453 limit = i + act->dta_rec.dtrd_size; 2454 ASSERT(limit <= size); 2455 isstr = DTRACEACT_ISSTRING(act); 2456 2457 for (; i < limit; i++) { 2458 hashval += data[i]; 2459 hashval += (hashval << 10); 2460 hashval ^= (hashval >> 6); 2461 2462 if (isstr && data[i] == '\0') 2463 break; 2464 } 2465 } 2466 2467 hashval += (hashval << 3); 2468 hashval ^= (hashval >> 11); 2469 hashval += (hashval << 15); 2470 2471 /* 2472 * Yes, the divide here is expensive -- but it's generally the least 2473 * of the performance issues given the amount of data that we iterate 2474 * over to compute hash values, compare data, etc. 2475 */ 2476 ndx = hashval % agb->dtagb_hashsize; 2477 2478 for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) { 2479 ASSERT((caddr_t)key >= tomax); 2480 ASSERT((caddr_t)key < tomax + buf->dtb_size); 2481 2482 if (hashval != key->dtak_hashval || key->dtak_size != size) 2483 continue; 2484 2485 kdata = key->dtak_data; 2486 ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size); 2487 2488 for (act = agg->dtag_first; act->dta_intuple; 2489 act = act->dta_next) { 2490 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2491 limit = i + act->dta_rec.dtrd_size; 2492 ASSERT(limit <= size); 2493 isstr = DTRACEACT_ISSTRING(act); 2494 2495 for (; i < limit; i++) { 2496 if (kdata[i] != data[i]) 2497 goto next; 2498 2499 if (isstr && data[i] == '\0') 2500 break; 2501 } 2502 } 2503 2504 if (action != key->dtak_action) { 2505 /* 2506 * We are aggregating on the same value in the same 2507 * aggregation with two different aggregating actions. 2508 * (This should have been picked up in the compiler, 2509 * so we may be dealing with errant or devious DIF.) 2510 * This is an error condition; we indicate as much, 2511 * and return. 2512 */ 2513 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 2514 return; 2515 } 2516 2517 /* 2518 * This is a hit: we need to apply the aggregator to 2519 * the value at this key. 2520 */ 2521 agg->dtag_aggregate((uint64_t *)(kdata + size), expr, arg); 2522 return; 2523next: 2524 continue; 2525 } 2526 2527 /* 2528 * We didn't find it. We need to allocate some zero-filled space, 2529 * link it into the hash table appropriately, and apply the aggregator 2530 * to the (zero-filled) value. 2531 */ 2532 offs = buf->dtb_offset; 2533 while (offs & (align - 1)) 2534 offs += sizeof (uint32_t); 2535 2536 /* 2537 * If we don't have enough room to both allocate a new key _and_ 2538 * its associated data, increment the drop count and return. 2539 */ 2540 if ((uintptr_t)tomax + offs + fsize > 2541 agb->dtagb_free - sizeof (dtrace_aggkey_t)) { 2542 dtrace_buffer_drop(buf); 2543 return; 2544 } 2545 2546 /*CONSTCOND*/ 2547 ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1))); 2548 key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t)); 2549 agb->dtagb_free -= sizeof (dtrace_aggkey_t); 2550 2551 key->dtak_data = kdata = tomax + offs; 2552 buf->dtb_offset = offs + fsize; 2553 2554 /* 2555 * Now copy the data across. 2556 */ 2557 *((dtrace_aggid_t *)kdata) = agg->dtag_id; 2558 2559 for (i = sizeof (dtrace_aggid_t); i < size; i++) 2560 kdata[i] = data[i]; 2561 2562 /* 2563 * Because strings are not zeroed out by default, we need to iterate 2564 * looking for actions that store strings, and we need to explicitly 2565 * pad these strings out with zeroes. 2566 */ 2567 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 2568 int nul; 2569 2570 if (!DTRACEACT_ISSTRING(act)) 2571 continue; 2572 2573 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2574 limit = i + act->dta_rec.dtrd_size; 2575 ASSERT(limit <= size); 2576 2577 for (nul = 0; i < limit; i++) { 2578 if (nul) { 2579 kdata[i] = '\0'; 2580 continue; 2581 } 2582 2583 if (data[i] != '\0') 2584 continue; 2585 2586 nul = 1; 2587 } 2588 } 2589 2590 for (i = size; i < fsize; i++) 2591 kdata[i] = 0; 2592 2593 key->dtak_hashval = hashval; 2594 key->dtak_size = size; 2595 key->dtak_action = action; 2596 key->dtak_next = agb->dtagb_hash[ndx]; 2597 agb->dtagb_hash[ndx] = key; 2598 2599 /* 2600 * Finally, apply the aggregator. 2601 */ 2602 *((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial; 2603 agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), expr, arg); 2604} 2605 2606/* 2607 * Given consumer state, this routine finds a speculation in the INACTIVE 2608 * state and transitions it into the ACTIVE state. If there is no speculation 2609 * in the INACTIVE state, 0 is returned. In this case, no error counter is 2610 * incremented -- it is up to the caller to take appropriate action. 2611 */ 2612static int 2613dtrace_speculation(dtrace_state_t *state) 2614{ 2615 int i = 0; 2616 dtrace_speculation_state_t current; 2617 uint32_t *stat = &state->dts_speculations_unavail, count; 2618 2619 while (i < state->dts_nspeculations) { 2620 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2621 2622 current = spec->dtsp_state; 2623 2624 if (current != DTRACESPEC_INACTIVE) { 2625 if (current == DTRACESPEC_COMMITTINGMANY || 2626 current == DTRACESPEC_COMMITTING || 2627 current == DTRACESPEC_DISCARDING) 2628 stat = &state->dts_speculations_busy; 2629 i++; 2630 continue; 2631 } 2632 2633 if (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2634 current, DTRACESPEC_ACTIVE) == current) 2635 return (i + 1); 2636 } 2637 2638 /* 2639 * We couldn't find a speculation. If we found as much as a single 2640 * busy speculation buffer, we'll attribute this failure as "busy" 2641 * instead of "unavail". 2642 */ 2643 do { 2644 count = *stat; 2645 } while (dtrace_cas32(stat, count, count + 1) != count); 2646 2647 return (0); 2648} 2649 2650/* 2651 * This routine commits an active speculation. If the specified speculation 2652 * is not in a valid state to perform a commit(), this routine will silently do 2653 * nothing. The state of the specified speculation is transitioned according 2654 * to the state transition diagram outlined in <sys/dtrace_impl.h> 2655 */ 2656static void 2657dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu, 2658 dtrace_specid_t which) 2659{ 2660 dtrace_speculation_t *spec; 2661 dtrace_buffer_t *src, *dest; 2662 uintptr_t daddr, saddr, dlimit, slimit; 2663 dtrace_speculation_state_t current, new = 0; 2664 intptr_t offs; 2665 uint64_t timestamp; 2666 2667 if (which == 0) 2668 return; 2669 2670 if (which > state->dts_nspeculations) { 2671 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2672 return; 2673 } 2674 2675 spec = &state->dts_speculations[which - 1]; 2676 src = &spec->dtsp_buffer[cpu]; 2677 dest = &state->dts_buffer[cpu]; 2678 2679 do { 2680 current = spec->dtsp_state; 2681 2682 if (current == DTRACESPEC_COMMITTINGMANY) 2683 break; 2684 2685 switch (current) { 2686 case DTRACESPEC_INACTIVE: 2687 case DTRACESPEC_DISCARDING: 2688 return; 2689 2690 case DTRACESPEC_COMMITTING: 2691 /* 2692 * This is only possible if we are (a) commit()'ing 2693 * without having done a prior speculate() on this CPU 2694 * and (b) racing with another commit() on a different 2695 * CPU. There's nothing to do -- we just assert that 2696 * our offset is 0. 2697 */ 2698 ASSERT(src->dtb_offset == 0); 2699 return; 2700 2701 case DTRACESPEC_ACTIVE: 2702 new = DTRACESPEC_COMMITTING; 2703 break; 2704 2705 case DTRACESPEC_ACTIVEONE: 2706 /* 2707 * This speculation is active on one CPU. If our 2708 * buffer offset is non-zero, we know that the one CPU 2709 * must be us. Otherwise, we are committing on a 2710 * different CPU from the speculate(), and we must 2711 * rely on being asynchronously cleaned. 2712 */ 2713 if (src->dtb_offset != 0) { 2714 new = DTRACESPEC_COMMITTING; 2715 break; 2716 } 2717 /*FALLTHROUGH*/ 2718 2719 case DTRACESPEC_ACTIVEMANY: 2720 new = DTRACESPEC_COMMITTINGMANY; 2721 break; 2722 2723 default: 2724 ASSERT(0); 2725 } 2726 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2727 current, new) != current); 2728 2729 /* 2730 * We have set the state to indicate that we are committing this 2731 * speculation. Now reserve the necessary space in the destination 2732 * buffer. 2733 */ 2734 if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset, 2735 sizeof (uint64_t), state, NULL)) < 0) { 2736 dtrace_buffer_drop(dest); 2737 goto out; 2738 } 2739 2740 /* 2741 * We have sufficient space to copy the speculative buffer into the 2742 * primary buffer. First, modify the speculative buffer, filling 2743 * in the timestamp of all entries with the current time. The data 2744 * must have the commit() time rather than the time it was traced, 2745 * so that all entries in the primary buffer are in timestamp order. 2746 */ 2747 timestamp = dtrace_gethrtime(); 2748 saddr = (uintptr_t)src->dtb_tomax; 2749 slimit = saddr + src->dtb_offset; 2750 while (saddr < slimit) { 2751 size_t size; 2752 dtrace_rechdr_t *dtrh = (dtrace_rechdr_t *)saddr; 2753 2754 if (dtrh->dtrh_epid == DTRACE_EPIDNONE) { 2755 saddr += sizeof (dtrace_epid_t); 2756 continue; 2757 } 2758 ASSERT3U(dtrh->dtrh_epid, <=, state->dts_necbs); 2759 size = state->dts_ecbs[dtrh->dtrh_epid - 1]->dte_size; 2760 2761 ASSERT3U(saddr + size, <=, slimit); 2762 ASSERT3U(size, >=, sizeof (dtrace_rechdr_t)); 2763 ASSERT3U(DTRACE_RECORD_LOAD_TIMESTAMP(dtrh), ==, UINT64_MAX); 2764 2765 DTRACE_RECORD_STORE_TIMESTAMP(dtrh, timestamp); 2766 2767 saddr += size; 2768 } 2769 2770 /* 2771 * Copy the buffer across. (Note that this is a 2772 * highly subobtimal bcopy(); in the unlikely event that this becomes 2773 * a serious performance issue, a high-performance DTrace-specific 2774 * bcopy() should obviously be invented.) 2775 */ 2776 daddr = (uintptr_t)dest->dtb_tomax + offs; 2777 dlimit = daddr + src->dtb_offset; 2778 saddr = (uintptr_t)src->dtb_tomax; 2779 2780 /* 2781 * First, the aligned portion. 2782 */ 2783 while (dlimit - daddr >= sizeof (uint64_t)) { 2784 *((uint64_t *)daddr) = *((uint64_t *)saddr); 2785 2786 daddr += sizeof (uint64_t); 2787 saddr += sizeof (uint64_t); 2788 } 2789 2790 /* 2791 * Now any left-over bit... 2792 */ 2793 while (dlimit - daddr) 2794 *((uint8_t *)daddr++) = *((uint8_t *)saddr++); 2795 2796 /* 2797 * Finally, commit the reserved space in the destination buffer. 2798 */ 2799 dest->dtb_offset = offs + src->dtb_offset; 2800 2801out: 2802 /* 2803 * If we're lucky enough to be the only active CPU on this speculation 2804 * buffer, we can just set the state back to DTRACESPEC_INACTIVE. 2805 */ 2806 if (current == DTRACESPEC_ACTIVE || 2807 (current == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) { 2808 uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state, 2809 DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE); 2810 2811 ASSERT(rval == DTRACESPEC_COMMITTING); 2812 } 2813 2814 src->dtb_offset = 0; 2815 src->dtb_xamot_drops += src->dtb_drops; 2816 src->dtb_drops = 0; 2817} 2818 2819/* 2820 * This routine discards an active speculation. If the specified speculation 2821 * is not in a valid state to perform a discard(), this routine will silently 2822 * do nothing. The state of the specified speculation is transitioned 2823 * according to the state transition diagram outlined in <sys/dtrace_impl.h> 2824 */ 2825static void 2826dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu, 2827 dtrace_specid_t which) 2828{ 2829 dtrace_speculation_t *spec; 2830 dtrace_speculation_state_t current, new = 0; 2831 dtrace_buffer_t *buf; 2832 2833 if (which == 0) 2834 return; 2835 2836 if (which > state->dts_nspeculations) { 2837 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2838 return; 2839 } 2840 2841 spec = &state->dts_speculations[which - 1]; 2842 buf = &spec->dtsp_buffer[cpu]; 2843 2844 do { 2845 current = spec->dtsp_state; 2846 2847 switch (current) { 2848 case DTRACESPEC_INACTIVE: 2849 case DTRACESPEC_COMMITTINGMANY: 2850 case DTRACESPEC_COMMITTING: 2851 case DTRACESPEC_DISCARDING: 2852 return; 2853 2854 case DTRACESPEC_ACTIVE: 2855 case DTRACESPEC_ACTIVEMANY: 2856 new = DTRACESPEC_DISCARDING; 2857 break; 2858 2859 case DTRACESPEC_ACTIVEONE: 2860 if (buf->dtb_offset != 0) { 2861 new = DTRACESPEC_INACTIVE; 2862 } else { 2863 new = DTRACESPEC_DISCARDING; 2864 } 2865 break; 2866 2867 default: 2868 ASSERT(0); 2869 } 2870 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2871 current, new) != current); 2872 2873 buf->dtb_offset = 0; 2874 buf->dtb_drops = 0; 2875} 2876 2877/* 2878 * Note: not called from probe context. This function is called 2879 * asynchronously from cross call context to clean any speculations that are 2880 * in the COMMITTINGMANY or DISCARDING states. These speculations may not be 2881 * transitioned back to the INACTIVE state until all CPUs have cleaned the 2882 * speculation. 2883 */ 2884static void 2885dtrace_speculation_clean_here(dtrace_state_t *state) 2886{ 2887 dtrace_icookie_t cookie; 2888 processorid_t cpu = curcpu; 2889 dtrace_buffer_t *dest = &state->dts_buffer[cpu]; 2890 dtrace_specid_t i; 2891 2892 cookie = dtrace_interrupt_disable(); 2893 2894 if (dest->dtb_tomax == NULL) { 2895 dtrace_interrupt_enable(cookie); 2896 return; 2897 } 2898 2899 for (i = 0; i < state->dts_nspeculations; i++) { 2900 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2901 dtrace_buffer_t *src = &spec->dtsp_buffer[cpu]; 2902 2903 if (src->dtb_tomax == NULL) 2904 continue; 2905 2906 if (spec->dtsp_state == DTRACESPEC_DISCARDING) { 2907 src->dtb_offset = 0; 2908 continue; 2909 } 2910 2911 if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 2912 continue; 2913 2914 if (src->dtb_offset == 0) 2915 continue; 2916 2917 dtrace_speculation_commit(state, cpu, i + 1); 2918 } 2919 2920 dtrace_interrupt_enable(cookie); 2921} 2922 2923/* 2924 * Note: not called from probe context. This function is called 2925 * asynchronously (and at a regular interval) to clean any speculations that 2926 * are in the COMMITTINGMANY or DISCARDING states. If it discovers that there 2927 * is work to be done, it cross calls all CPUs to perform that work; 2928 * COMMITMANY and DISCARDING speculations may not be transitioned back to the 2929 * INACTIVE state until they have been cleaned by all CPUs. 2930 */ 2931static void 2932dtrace_speculation_clean(dtrace_state_t *state) 2933{ 2934 int work = 0, rv; 2935 dtrace_specid_t i; 2936 2937 for (i = 0; i < state->dts_nspeculations; i++) { 2938 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2939 2940 ASSERT(!spec->dtsp_cleaning); 2941 2942 if (spec->dtsp_state != DTRACESPEC_DISCARDING && 2943 spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 2944 continue; 2945 2946 work++; 2947 spec->dtsp_cleaning = 1; 2948 } 2949 2950 if (!work) 2951 return; 2952 2953 dtrace_xcall(DTRACE_CPUALL, 2954 (dtrace_xcall_t)dtrace_speculation_clean_here, state); 2955 2956 /* 2957 * We now know that all CPUs have committed or discarded their 2958 * speculation buffers, as appropriate. We can now set the state 2959 * to inactive. 2960 */ 2961 for (i = 0; i < state->dts_nspeculations; i++) { 2962 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2963 dtrace_speculation_state_t current, new; 2964 2965 if (!spec->dtsp_cleaning) 2966 continue; 2967 2968 current = spec->dtsp_state; 2969 ASSERT(current == DTRACESPEC_DISCARDING || 2970 current == DTRACESPEC_COMMITTINGMANY); 2971 2972 new = DTRACESPEC_INACTIVE; 2973 2974 rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new); 2975 ASSERT(rv == current); 2976 spec->dtsp_cleaning = 0; 2977 } 2978} 2979 2980/* 2981 * Called as part of a speculate() to get the speculative buffer associated 2982 * with a given speculation. Returns NULL if the specified speculation is not 2983 * in an ACTIVE state. If the speculation is in the ACTIVEONE state -- and 2984 * the active CPU is not the specified CPU -- the speculation will be 2985 * atomically transitioned into the ACTIVEMANY state. 2986 */ 2987static dtrace_buffer_t * 2988dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid, 2989 dtrace_specid_t which) 2990{ 2991 dtrace_speculation_t *spec; 2992 dtrace_speculation_state_t current, new = 0; 2993 dtrace_buffer_t *buf; 2994 2995 if (which == 0) 2996 return (NULL); 2997 2998 if (which > state->dts_nspeculations) { 2999 cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 3000 return (NULL); 3001 } 3002 3003 spec = &state->dts_speculations[which - 1]; 3004 buf = &spec->dtsp_buffer[cpuid]; 3005 3006 do { 3007 current = spec->dtsp_state; 3008 3009 switch (current) { 3010 case DTRACESPEC_INACTIVE: 3011 case DTRACESPEC_COMMITTINGMANY: 3012 case DTRACESPEC_DISCARDING: 3013 return (NULL); 3014 3015 case DTRACESPEC_COMMITTING: 3016 ASSERT(buf->dtb_offset == 0); 3017 return (NULL); 3018 3019 case DTRACESPEC_ACTIVEONE: 3020 /* 3021 * This speculation is currently active on one CPU. 3022 * Check the offset in the buffer; if it's non-zero, 3023 * that CPU must be us (and we leave the state alone). 3024 * If it's zero, assume that we're starting on a new 3025 * CPU -- and change the state to indicate that the 3026 * speculation is active on more than one CPU. 3027 */ 3028 if (buf->dtb_offset != 0) 3029 return (buf); 3030 3031 new = DTRACESPEC_ACTIVEMANY; 3032 break; 3033 3034 case DTRACESPEC_ACTIVEMANY: 3035 return (buf); 3036 3037 case DTRACESPEC_ACTIVE: 3038 new = DTRACESPEC_ACTIVEONE; 3039 break; 3040 3041 default: 3042 ASSERT(0); 3043 } 3044 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 3045 current, new) != current); 3046 3047 ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY); 3048 return (buf); 3049} 3050 3051/* 3052 * Return a string. In the event that the user lacks the privilege to access 3053 * arbitrary kernel memory, we copy the string out to scratch memory so that we 3054 * don't fail access checking. 3055 * 3056 * dtrace_dif_variable() uses this routine as a helper for various 3057 * builtin values such as 'execname' and 'probefunc.' 3058 */ 3059uintptr_t 3060dtrace_dif_varstr(uintptr_t addr, dtrace_state_t *state, 3061 dtrace_mstate_t *mstate) 3062{ 3063 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3064 uintptr_t ret; 3065 size_t strsz; 3066 3067 /* 3068 * The easy case: this probe is allowed to read all of memory, so 3069 * we can just return this as a vanilla pointer. 3070 */ 3071 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 3072 return (addr); 3073 3074 /* 3075 * This is the tougher case: we copy the string in question from 3076 * kernel memory into scratch memory and return it that way: this 3077 * ensures that we won't trip up when access checking tests the 3078 * BYREF return value. 3079 */ 3080 strsz = dtrace_strlen((char *)addr, size) + 1; 3081 3082 if (mstate->dtms_scratch_ptr + strsz > 3083 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 3084 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3085 return (0); 3086 } 3087 3088 dtrace_strcpy((const void *)addr, (void *)mstate->dtms_scratch_ptr, 3089 strsz); 3090 ret = mstate->dtms_scratch_ptr; 3091 mstate->dtms_scratch_ptr += strsz; 3092 return (ret); 3093} 3094 3095/* 3096 * Return a string from a memoy address which is known to have one or 3097 * more concatenated, individually zero terminated, sub-strings. 3098 * In the event that the user lacks the privilege to access 3099 * arbitrary kernel memory, we copy the string out to scratch memory so that we 3100 * don't fail access checking. 3101 * 3102 * dtrace_dif_variable() uses this routine as a helper for various 3103 * builtin values such as 'execargs'. 3104 */ 3105static uintptr_t 3106dtrace_dif_varstrz(uintptr_t addr, size_t strsz, dtrace_state_t *state, 3107 dtrace_mstate_t *mstate) 3108{ 3109 char *p; 3110 size_t i; 3111 uintptr_t ret; 3112 3113 if (mstate->dtms_scratch_ptr + strsz > 3114 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 3115 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3116 return (0); 3117 } 3118 3119 dtrace_bcopy((const void *)addr, (void *)mstate->dtms_scratch_ptr, 3120 strsz); 3121 3122 /* Replace sub-string termination characters with a space. */ 3123 for (p = (char *) mstate->dtms_scratch_ptr, i = 0; i < strsz - 1; 3124 p++, i++) 3125 if (*p == '\0') 3126 *p = ' '; 3127 3128 ret = mstate->dtms_scratch_ptr; 3129 mstate->dtms_scratch_ptr += strsz; 3130 return (ret); 3131} 3132 3133/* 3134 * This function implements the DIF emulator's variable lookups. The emulator 3135 * passes a reserved variable identifier and optional built-in array index. 3136 */ 3137static uint64_t 3138dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, 3139 uint64_t ndx) 3140{ 3141 /* 3142 * If we're accessing one of the uncached arguments, we'll turn this 3143 * into a reference in the args array. 3144 */ 3145 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) { 3146 ndx = v - DIF_VAR_ARG0; 3147 v = DIF_VAR_ARGS; 3148 } 3149 3150 switch (v) { 3151 case DIF_VAR_ARGS: 3152 ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS); 3153 if (ndx >= sizeof (mstate->dtms_arg) / 3154 sizeof (mstate->dtms_arg[0])) { 3155 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 3156 dtrace_provider_t *pv; 3157 uint64_t val; 3158 3159 pv = mstate->dtms_probe->dtpr_provider; 3160 if (pv->dtpv_pops.dtps_getargval != NULL) 3161 val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg, 3162 mstate->dtms_probe->dtpr_id, 3163 mstate->dtms_probe->dtpr_arg, ndx, aframes); 3164 else 3165 val = dtrace_getarg(ndx, aframes); 3166 3167 /* 3168 * This is regrettably required to keep the compiler 3169 * from tail-optimizing the call to dtrace_getarg(). 3170 * The condition always evaluates to true, but the 3171 * compiler has no way of figuring that out a priori. 3172 * (None of this would be necessary if the compiler 3173 * could be relied upon to _always_ tail-optimize 3174 * the call to dtrace_getarg() -- but it can't.) 3175 */ 3176 if (mstate->dtms_probe != NULL) 3177 return (val); 3178 3179 ASSERT(0); 3180 } 3181 3182 return (mstate->dtms_arg[ndx]); 3183 3184#if defined(sun) 3185 case DIF_VAR_UREGS: { 3186 klwp_t *lwp; 3187 3188 if (!dtrace_priv_proc(state)) 3189 return (0); 3190 3191 if ((lwp = curthread->t_lwp) == NULL) { 3192 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 3193 cpu_core[curcpu].cpuc_dtrace_illval = NULL; 3194 return (0); 3195 } 3196 3197 return (dtrace_getreg(lwp->lwp_regs, ndx)); 3198 return (0); 3199 } 3200#else 3201 case DIF_VAR_UREGS: { 3202 struct trapframe *tframe; 3203 3204 if (!dtrace_priv_proc(state)) 3205 return (0); 3206 3207 if ((tframe = curthread->td_frame) == NULL) { 3208 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 3209 cpu_core[curcpu].cpuc_dtrace_illval = 0; 3210 return (0); 3211 } 3212 3213 return (dtrace_getreg(tframe, ndx)); 3214 } 3215#endif 3216 3217 case DIF_VAR_CURTHREAD: 3218 if (!dtrace_priv_proc(state)) 3219 return (0); 3220 return ((uint64_t)(uintptr_t)curthread); 3221 3222 case DIF_VAR_TIMESTAMP: 3223 if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) { 3224 mstate->dtms_timestamp = dtrace_gethrtime(); 3225 mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP; 3226 } 3227 return (mstate->dtms_timestamp); 3228 3229 case DIF_VAR_VTIMESTAMP: 3230 ASSERT(dtrace_vtime_references != 0); 3231 return (curthread->t_dtrace_vtime); 3232 3233 case DIF_VAR_WALLTIMESTAMP: 3234 if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) { 3235 mstate->dtms_walltimestamp = dtrace_gethrestime(); 3236 mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP; 3237 } 3238 return (mstate->dtms_walltimestamp); 3239 3240#if defined(sun) 3241 case DIF_VAR_IPL: 3242 if (!dtrace_priv_kernel(state)) 3243 return (0); 3244 if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) { 3245 mstate->dtms_ipl = dtrace_getipl(); 3246 mstate->dtms_present |= DTRACE_MSTATE_IPL; 3247 } 3248 return (mstate->dtms_ipl); 3249#endif 3250 3251 case DIF_VAR_EPID: 3252 ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID); 3253 return (mstate->dtms_epid); 3254 3255 case DIF_VAR_ID: 3256 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3257 return (mstate->dtms_probe->dtpr_id); 3258 3259 case DIF_VAR_STACKDEPTH: 3260 if (!dtrace_priv_kernel(state)) 3261 return (0); 3262 if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) { 3263 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 3264 3265 mstate->dtms_stackdepth = dtrace_getstackdepth(aframes); 3266 mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH; 3267 } 3268 return (mstate->dtms_stackdepth); 3269 3270 case DIF_VAR_USTACKDEPTH: 3271 if (!dtrace_priv_proc(state)) 3272 return (0); 3273 if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) { 3274 /* 3275 * See comment in DIF_VAR_PID. 3276 */ 3277 if (DTRACE_ANCHORED(mstate->dtms_probe) && 3278 CPU_ON_INTR(CPU)) { 3279 mstate->dtms_ustackdepth = 0; 3280 } else { 3281 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3282 mstate->dtms_ustackdepth = 3283 dtrace_getustackdepth(); 3284 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3285 } 3286 mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH; 3287 } 3288 return (mstate->dtms_ustackdepth); 3289 3290 case DIF_VAR_CALLER: 3291 if (!dtrace_priv_kernel(state)) 3292 return (0); 3293 if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) { 3294 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 3295 3296 if (!DTRACE_ANCHORED(mstate->dtms_probe)) { 3297 /* 3298 * If this is an unanchored probe, we are 3299 * required to go through the slow path: 3300 * dtrace_caller() only guarantees correct 3301 * results for anchored probes. 3302 */ 3303 pc_t caller[2] = {0, 0}; 3304 3305 dtrace_getpcstack(caller, 2, aframes, 3306 (uint32_t *)(uintptr_t)mstate->dtms_arg[0]); 3307 mstate->dtms_caller = caller[1]; 3308 } else if ((mstate->dtms_caller = 3309 dtrace_caller(aframes)) == -1) { 3310 /* 3311 * We have failed to do this the quick way; 3312 * we must resort to the slower approach of 3313 * calling dtrace_getpcstack(). 3314 */ 3315 pc_t caller = 0; 3316 3317 dtrace_getpcstack(&caller, 1, aframes, NULL); 3318 mstate->dtms_caller = caller; 3319 } 3320 3321 mstate->dtms_present |= DTRACE_MSTATE_CALLER; 3322 } 3323 return (mstate->dtms_caller); 3324 3325 case DIF_VAR_UCALLER: 3326 if (!dtrace_priv_proc(state)) 3327 return (0); 3328 3329 if (!(mstate->dtms_present & DTRACE_MSTATE_UCALLER)) { 3330 uint64_t ustack[3]; 3331 3332 /* 3333 * dtrace_getupcstack() fills in the first uint64_t 3334 * with the current PID. The second uint64_t will 3335 * be the program counter at user-level. The third 3336 * uint64_t will contain the caller, which is what 3337 * we're after. 3338 */ 3339 ustack[2] = 0; 3340 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3341 dtrace_getupcstack(ustack, 3); 3342 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3343 mstate->dtms_ucaller = ustack[2]; 3344 mstate->dtms_present |= DTRACE_MSTATE_UCALLER; 3345 } 3346 3347 return (mstate->dtms_ucaller); 3348 3349 case DIF_VAR_PROBEPROV: 3350 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3351 return (dtrace_dif_varstr( 3352 (uintptr_t)mstate->dtms_probe->dtpr_provider->dtpv_name, 3353 state, mstate)); 3354 3355 case DIF_VAR_PROBEMOD: 3356 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3357 return (dtrace_dif_varstr( 3358 (uintptr_t)mstate->dtms_probe->dtpr_mod, 3359 state, mstate)); 3360 3361 case DIF_VAR_PROBEFUNC: 3362 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3363 return (dtrace_dif_varstr( 3364 (uintptr_t)mstate->dtms_probe->dtpr_func, 3365 state, mstate)); 3366 3367 case DIF_VAR_PROBENAME: 3368 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3369 return (dtrace_dif_varstr( 3370 (uintptr_t)mstate->dtms_probe->dtpr_name, 3371 state, mstate)); 3372 3373 case DIF_VAR_PID: 3374 if (!dtrace_priv_proc(state)) 3375 return (0); 3376 3377#if defined(sun) 3378 /* 3379 * Note that we are assuming that an unanchored probe is 3380 * always due to a high-level interrupt. (And we're assuming 3381 * that there is only a single high level interrupt.) 3382 */ 3383 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3384 return (pid0.pid_id); 3385 3386 /* 3387 * It is always safe to dereference one's own t_procp pointer: 3388 * it always points to a valid, allocated proc structure. 3389 * Further, it is always safe to dereference the p_pidp member 3390 * of one's own proc structure. (These are truisms becuase 3391 * threads and processes don't clean up their own state -- 3392 * they leave that task to whomever reaps them.) 3393 */ 3394 return ((uint64_t)curthread->t_procp->p_pidp->pid_id); 3395#else 3396 return ((uint64_t)curproc->p_pid); 3397#endif 3398 3399 case DIF_VAR_PPID: 3400 if (!dtrace_priv_proc(state)) 3401 return (0); 3402 3403#if defined(sun) 3404 /* 3405 * See comment in DIF_VAR_PID. 3406 */ 3407 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3408 return (pid0.pid_id); 3409 3410 /* 3411 * It is always safe to dereference one's own t_procp pointer: 3412 * it always points to a valid, allocated proc structure. 3413 * (This is true because threads don't clean up their own 3414 * state -- they leave that task to whomever reaps them.) 3415 */ 3416 return ((uint64_t)curthread->t_procp->p_ppid); 3417#else 3418 return ((uint64_t)curproc->p_pptr->p_pid); 3419#endif 3420 3421 case DIF_VAR_TID: 3422#if defined(sun) 3423 /* 3424 * See comment in DIF_VAR_PID. 3425 */ 3426 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3427 return (0); 3428#endif 3429 3430 return ((uint64_t)curthread->t_tid); 3431 3432 case DIF_VAR_EXECARGS: { 3433 struct pargs *p_args = curthread->td_proc->p_args; 3434 3435 if (p_args == NULL) 3436 return(0); 3437 3438 return (dtrace_dif_varstrz( 3439 (uintptr_t) p_args->ar_args, p_args->ar_length, state, mstate)); 3440 } 3441 3442 case DIF_VAR_EXECNAME: 3443#if defined(sun) 3444 if (!dtrace_priv_proc(state)) 3445 return (0); 3446 3447 /* 3448 * See comment in DIF_VAR_PID. 3449 */ 3450 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3451 return ((uint64_t)(uintptr_t)p0.p_user.u_comm); 3452 3453 /* 3454 * It is always safe to dereference one's own t_procp pointer: 3455 * it always points to a valid, allocated proc structure. 3456 * (This is true because threads don't clean up their own 3457 * state -- they leave that task to whomever reaps them.) 3458 */ 3459 return (dtrace_dif_varstr( 3460 (uintptr_t)curthread->t_procp->p_user.u_comm, 3461 state, mstate)); 3462#else 3463 return (dtrace_dif_varstr( 3464 (uintptr_t) curthread->td_proc->p_comm, state, mstate)); 3465#endif 3466 3467 case DIF_VAR_ZONENAME: 3468#if defined(sun) 3469 if (!dtrace_priv_proc(state)) 3470 return (0); 3471 3472 /* 3473 * See comment in DIF_VAR_PID. 3474 */ 3475 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3476 return ((uint64_t)(uintptr_t)p0.p_zone->zone_name); 3477 3478 /* 3479 * It is always safe to dereference one's own t_procp pointer: 3480 * it always points to a valid, allocated proc structure. 3481 * (This is true because threads don't clean up their own 3482 * state -- they leave that task to whomever reaps them.) 3483 */ 3484 return (dtrace_dif_varstr( 3485 (uintptr_t)curthread->t_procp->p_zone->zone_name, 3486 state, mstate)); 3487#else 3488 return (0); 3489#endif 3490 3491 case DIF_VAR_UID: 3492 if (!dtrace_priv_proc(state)) 3493 return (0); 3494 3495#if defined(sun) 3496 /* 3497 * See comment in DIF_VAR_PID. 3498 */ 3499 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3500 return ((uint64_t)p0.p_cred->cr_uid); 3501#endif 3502 3503 /* 3504 * It is always safe to dereference one's own t_procp pointer: 3505 * it always points to a valid, allocated proc structure. 3506 * (This is true because threads don't clean up their own 3507 * state -- they leave that task to whomever reaps them.) 3508 * 3509 * Additionally, it is safe to dereference one's own process 3510 * credential, since this is never NULL after process birth. 3511 */ 3512 return ((uint64_t)curthread->t_procp->p_cred->cr_uid); 3513 3514 case DIF_VAR_GID: 3515 if (!dtrace_priv_proc(state)) 3516 return (0); 3517 3518#if defined(sun) 3519 /* 3520 * See comment in DIF_VAR_PID. 3521 */ 3522 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3523 return ((uint64_t)p0.p_cred->cr_gid); 3524#endif 3525 3526 /* 3527 * It is always safe to dereference one's own t_procp pointer: 3528 * it always points to a valid, allocated proc structure. 3529 * (This is true because threads don't clean up their own 3530 * state -- they leave that task to whomever reaps them.) 3531 * 3532 * Additionally, it is safe to dereference one's own process 3533 * credential, since this is never NULL after process birth. 3534 */ 3535 return ((uint64_t)curthread->t_procp->p_cred->cr_gid); 3536 3537 case DIF_VAR_ERRNO: { 3538#if defined(sun) 3539 klwp_t *lwp; 3540 if (!dtrace_priv_proc(state)) 3541 return (0); 3542 3543 /* 3544 * See comment in DIF_VAR_PID. 3545 */ 3546 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3547 return (0); 3548 3549 /* 3550 * It is always safe to dereference one's own t_lwp pointer in 3551 * the event that this pointer is non-NULL. (This is true 3552 * because threads and lwps don't clean up their own state -- 3553 * they leave that task to whomever reaps them.) 3554 */ 3555 if ((lwp = curthread->t_lwp) == NULL) 3556 return (0); 3557 3558 return ((uint64_t)lwp->lwp_errno); 3559#else 3560 return (curthread->td_errno); 3561#endif 3562 } 3563#if !defined(sun) 3564 case DIF_VAR_CPU: { 3565 return curcpu; 3566 } 3567#endif 3568 default: 3569 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 3570 return (0); 3571 } 3572} 3573 3574 3575typedef enum dtrace_json_state { 3576 DTRACE_JSON_REST = 1, 3577 DTRACE_JSON_OBJECT, 3578 DTRACE_JSON_STRING, 3579 DTRACE_JSON_STRING_ESCAPE, 3580 DTRACE_JSON_STRING_ESCAPE_UNICODE, 3581 DTRACE_JSON_COLON, 3582 DTRACE_JSON_COMMA, 3583 DTRACE_JSON_VALUE, 3584 DTRACE_JSON_IDENTIFIER, 3585 DTRACE_JSON_NUMBER, 3586 DTRACE_JSON_NUMBER_FRAC, 3587 DTRACE_JSON_NUMBER_EXP, 3588 DTRACE_JSON_COLLECT_OBJECT 3589} dtrace_json_state_t; 3590 3591/* 3592 * This function possesses just enough knowledge about JSON to extract a single 3593 * value from a JSON string and store it in the scratch buffer. It is able 3594 * to extract nested object values, and members of arrays by index. 3595 * 3596 * elemlist is a list of JSON keys, stored as packed NUL-terminated strings, to 3597 * be looked up as we descend into the object tree. e.g. 3598 * 3599 * foo[0].bar.baz[32] --> "foo" NUL "0" NUL "bar" NUL "baz" NUL "32" NUL 3600 * with nelems = 5. 3601 * 3602 * The run time of this function must be bounded above by strsize to limit the 3603 * amount of work done in probe context. As such, it is implemented as a 3604 * simple state machine, reading one character at a time using safe loads 3605 * until we find the requested element, hit a parsing error or run off the 3606 * end of the object or string. 3607 * 3608 * As there is no way for a subroutine to return an error without interrupting 3609 * clause execution, we simply return NULL in the event of a missing key or any 3610 * other error condition. Each NULL return in this function is commented with 3611 * the error condition it represents -- parsing or otherwise. 3612 * 3613 * The set of states for the state machine closely matches the JSON 3614 * specification (http://json.org/). Briefly: 3615 * 3616 * DTRACE_JSON_REST: 3617 * Skip whitespace until we find either a top-level Object, moving 3618 * to DTRACE_JSON_OBJECT; or an Array, moving to DTRACE_JSON_VALUE. 3619 * 3620 * DTRACE_JSON_OBJECT: 3621 * Locate the next key String in an Object. Sets a flag to denote 3622 * the next String as a key string and moves to DTRACE_JSON_STRING. 3623 * 3624 * DTRACE_JSON_COLON: 3625 * Skip whitespace until we find the colon that separates key Strings 3626 * from their values. Once found, move to DTRACE_JSON_VALUE. 3627 * 3628 * DTRACE_JSON_VALUE: 3629 * Detects the type of the next value (String, Number, Identifier, Object 3630 * or Array) and routes to the states that process that type. Here we also 3631 * deal with the element selector list if we are requested to traverse down 3632 * into the object tree. 3633 * 3634 * DTRACE_JSON_COMMA: 3635 * Skip whitespace until we find the comma that separates key-value pairs 3636 * in Objects (returning to DTRACE_JSON_OBJECT) or values in Arrays 3637 * (similarly DTRACE_JSON_VALUE). All following literal value processing 3638 * states return to this state at the end of their value, unless otherwise 3639 * noted. 3640 * 3641 * DTRACE_JSON_NUMBER, DTRACE_JSON_NUMBER_FRAC, DTRACE_JSON_NUMBER_EXP: 3642 * Processes a Number literal from the JSON, including any exponent 3643 * component that may be present. Numbers are returned as strings, which 3644 * may be passed to strtoll() if an integer is required. 3645 * 3646 * DTRACE_JSON_IDENTIFIER: 3647 * Processes a "true", "false" or "null" literal in the JSON. 3648 * 3649 * DTRACE_JSON_STRING, DTRACE_JSON_STRING_ESCAPE, 3650 * DTRACE_JSON_STRING_ESCAPE_UNICODE: 3651 * Processes a String literal from the JSON, whether the String denotes 3652 * a key, a value or part of a larger Object. Handles all escape sequences 3653 * present in the specification, including four-digit unicode characters, 3654 * but merely includes the escape sequence without converting it to the 3655 * actual escaped character. If the String is flagged as a key, we 3656 * move to DTRACE_JSON_COLON rather than DTRACE_JSON_COMMA. 3657 * 3658 * DTRACE_JSON_COLLECT_OBJECT: 3659 * This state collects an entire Object (or Array), correctly handling 3660 * embedded strings. If the full element selector list matches this nested 3661 * object, we return the Object in full as a string. If not, we use this 3662 * state to skip to the next value at this level and continue processing. 3663 * 3664 * NOTE: This function uses various macros from strtolctype.h to manipulate 3665 * digit values, etc -- these have all been checked to ensure they make 3666 * no additional function calls. 3667 */ 3668static char * 3669dtrace_json(uint64_t size, uintptr_t json, char *elemlist, int nelems, 3670 char *dest) 3671{ 3672 dtrace_json_state_t state = DTRACE_JSON_REST; 3673 int64_t array_elem = INT64_MIN; 3674 int64_t array_pos = 0; 3675 uint8_t escape_unicount = 0; 3676 boolean_t string_is_key = B_FALSE; 3677 boolean_t collect_object = B_FALSE; 3678 boolean_t found_key = B_FALSE; 3679 boolean_t in_array = B_FALSE; 3680 uint32_t braces = 0, brackets = 0; 3681 char *elem = elemlist; 3682 char *dd = dest; 3683 uintptr_t cur; 3684 3685 for (cur = json; cur < json + size; cur++) { 3686 char cc = dtrace_load8(cur); 3687 if (cc == '\0') 3688 return (NULL); 3689 3690 switch (state) { 3691 case DTRACE_JSON_REST: 3692 if (isspace(cc)) 3693 break; 3694 3695 if (cc == '{') { 3696 state = DTRACE_JSON_OBJECT; 3697 break; 3698 } 3699 3700 if (cc == '[') { 3701 in_array = B_TRUE; 3702 array_pos = 0; 3703 array_elem = dtrace_strtoll(elem, 10, size); 3704 found_key = array_elem == 0 ? B_TRUE : B_FALSE; 3705 state = DTRACE_JSON_VALUE; 3706 break; 3707 } 3708 3709 /* 3710 * ERROR: expected to find a top-level object or array. 3711 */ 3712 return (NULL); 3713 case DTRACE_JSON_OBJECT: 3714 if (isspace(cc)) 3715 break; 3716 3717 if (cc == '"') { 3718 state = DTRACE_JSON_STRING; 3719 string_is_key = B_TRUE; 3720 break; 3721 } 3722 3723 /* 3724 * ERROR: either the object did not start with a key 3725 * string, or we've run off the end of the object 3726 * without finding the requested key. 3727 */ 3728 return (NULL); 3729 case DTRACE_JSON_STRING: 3730 if (cc == '\\') { 3731 *dd++ = '\\'; 3732 state = DTRACE_JSON_STRING_ESCAPE; 3733 break; 3734 } 3735 3736 if (cc == '"') { 3737 if (collect_object) { 3738 /* 3739 * We don't reset the dest here, as 3740 * the string is part of a larger 3741 * object being collected. 3742 */ 3743 *dd++ = cc; 3744 collect_object = B_FALSE; 3745 state = DTRACE_JSON_COLLECT_OBJECT; 3746 break; 3747 } 3748 *dd = '\0'; 3749 dd = dest; /* reset string buffer */ 3750 if (string_is_key) { 3751 if (dtrace_strncmp(dest, elem, 3752 size) == 0) 3753 found_key = B_TRUE; 3754 } else if (found_key) { 3755 if (nelems > 1) { 3756 /* 3757 * We expected an object, not 3758 * this string. 3759 */ 3760 return (NULL); 3761 } 3762 return (dest); 3763 } 3764 state = string_is_key ? DTRACE_JSON_COLON : 3765 DTRACE_JSON_COMMA; 3766 string_is_key = B_FALSE; 3767 break; 3768 } 3769 3770 *dd++ = cc; 3771 break; 3772 case DTRACE_JSON_STRING_ESCAPE: 3773 *dd++ = cc; 3774 if (cc == 'u') { 3775 escape_unicount = 0; 3776 state = DTRACE_JSON_STRING_ESCAPE_UNICODE; 3777 } else { 3778 state = DTRACE_JSON_STRING; 3779 } 3780 break; 3781 case DTRACE_JSON_STRING_ESCAPE_UNICODE: 3782 if (!isxdigit(cc)) { 3783 /* 3784 * ERROR: invalid unicode escape, expected 3785 * four valid hexidecimal digits. 3786 */ 3787 return (NULL); 3788 } 3789 3790 *dd++ = cc; 3791 if (++escape_unicount == 4) 3792 state = DTRACE_JSON_STRING; 3793 break; 3794 case DTRACE_JSON_COLON: 3795 if (isspace(cc)) 3796 break; 3797 3798 if (cc == ':') { 3799 state = DTRACE_JSON_VALUE; 3800 break; 3801 } 3802 3803 /* 3804 * ERROR: expected a colon. 3805 */ 3806 return (NULL); 3807 case DTRACE_JSON_COMMA: 3808 if (isspace(cc)) 3809 break; 3810 3811 if (cc == ',') { 3812 if (in_array) { 3813 state = DTRACE_JSON_VALUE; 3814 if (++array_pos == array_elem) 3815 found_key = B_TRUE; 3816 } else { 3817 state = DTRACE_JSON_OBJECT; 3818 } 3819 break; 3820 } 3821 3822 /* 3823 * ERROR: either we hit an unexpected character, or 3824 * we reached the end of the object or array without 3825 * finding the requested key. 3826 */ 3827 return (NULL); 3828 case DTRACE_JSON_IDENTIFIER: 3829 if (islower(cc)) { 3830 *dd++ = cc; 3831 break; 3832 } 3833 3834 *dd = '\0'; 3835 dd = dest; /* reset string buffer */ 3836 3837 if (dtrace_strncmp(dest, "true", 5) == 0 || 3838 dtrace_strncmp(dest, "false", 6) == 0 || 3839 dtrace_strncmp(dest, "null", 5) == 0) { 3840 if (found_key) { 3841 if (nelems > 1) { 3842 /* 3843 * ERROR: We expected an object, 3844 * not this identifier. 3845 */ 3846 return (NULL); 3847 } 3848 return (dest); 3849 } else { 3850 cur--; 3851 state = DTRACE_JSON_COMMA; 3852 break; 3853 } 3854 } 3855 3856 /* 3857 * ERROR: we did not recognise the identifier as one 3858 * of those in the JSON specification. 3859 */ 3860 return (NULL); 3861 case DTRACE_JSON_NUMBER: 3862 if (cc == '.') { 3863 *dd++ = cc; 3864 state = DTRACE_JSON_NUMBER_FRAC; 3865 break; 3866 } 3867 3868 if (cc == 'x' || cc == 'X') { 3869 /* 3870 * ERROR: specification explicitly excludes 3871 * hexidecimal or octal numbers. 3872 */ 3873 return (NULL); 3874 } 3875 3876 /* FALLTHRU */ 3877 case DTRACE_JSON_NUMBER_FRAC: 3878 if (cc == 'e' || cc == 'E') { 3879 *dd++ = cc; 3880 state = DTRACE_JSON_NUMBER_EXP; 3881 break; 3882 } 3883 3884 if (cc == '+' || cc == '-') { 3885 /* 3886 * ERROR: expect sign as part of exponent only. 3887 */ 3888 return (NULL); 3889 } 3890 /* FALLTHRU */ 3891 case DTRACE_JSON_NUMBER_EXP: 3892 if (isdigit(cc) || cc == '+' || cc == '-') { 3893 *dd++ = cc; 3894 break; 3895 } 3896 3897 *dd = '\0'; 3898 dd = dest; /* reset string buffer */ 3899 if (found_key) { 3900 if (nelems > 1) { 3901 /* 3902 * ERROR: We expected an object, not 3903 * this number. 3904 */ 3905 return (NULL); 3906 } 3907 return (dest); 3908 } 3909 3910 cur--; 3911 state = DTRACE_JSON_COMMA; 3912 break; 3913 case DTRACE_JSON_VALUE: 3914 if (isspace(cc)) 3915 break; 3916 3917 if (cc == '{' || cc == '[') { 3918 if (nelems > 1 && found_key) { 3919 in_array = cc == '[' ? B_TRUE : B_FALSE; 3920 /* 3921 * If our element selector directs us 3922 * to descend into this nested object, 3923 * then move to the next selector 3924 * element in the list and restart the 3925 * state machine. 3926 */ 3927 while (*elem != '\0') 3928 elem++; 3929 elem++; /* skip the inter-element NUL */ 3930 nelems--; 3931 dd = dest; 3932 if (in_array) { 3933 state = DTRACE_JSON_VALUE; 3934 array_pos = 0; 3935 array_elem = dtrace_strtoll( 3936 elem, 10, size); 3937 found_key = array_elem == 0 ? 3938 B_TRUE : B_FALSE; 3939 } else { 3940 found_key = B_FALSE; 3941 state = DTRACE_JSON_OBJECT; 3942 } 3943 break; 3944 } 3945 3946 /* 3947 * Otherwise, we wish to either skip this 3948 * nested object or return it in full. 3949 */ 3950 if (cc == '[') 3951 brackets = 1; 3952 else 3953 braces = 1; 3954 *dd++ = cc; 3955 state = DTRACE_JSON_COLLECT_OBJECT; 3956 break; 3957 } 3958 3959 if (cc == '"') { 3960 state = DTRACE_JSON_STRING; 3961 break; 3962 } 3963 3964 if (islower(cc)) { 3965 /* 3966 * Here we deal with true, false and null. 3967 */ 3968 *dd++ = cc; 3969 state = DTRACE_JSON_IDENTIFIER; 3970 break; 3971 } 3972 3973 if (cc == '-' || isdigit(cc)) { 3974 *dd++ = cc; 3975 state = DTRACE_JSON_NUMBER; 3976 break; 3977 } 3978 3979 /* 3980 * ERROR: unexpected character at start of value. 3981 */ 3982 return (NULL); 3983 case DTRACE_JSON_COLLECT_OBJECT: 3984 if (cc == '\0') 3985 /* 3986 * ERROR: unexpected end of input. 3987 */ 3988 return (NULL); 3989 3990 *dd++ = cc; 3991 if (cc == '"') { 3992 collect_object = B_TRUE; 3993 state = DTRACE_JSON_STRING; 3994 break; 3995 } 3996 3997 if (cc == ']') { 3998 if (brackets-- == 0) { 3999 /* 4000 * ERROR: unbalanced brackets. 4001 */ 4002 return (NULL); 4003 } 4004 } else if (cc == '}') { 4005 if (braces-- == 0) { 4006 /* 4007 * ERROR: unbalanced braces. 4008 */ 4009 return (NULL); 4010 } 4011 } else if (cc == '{') { 4012 braces++; 4013 } else if (cc == '[') { 4014 brackets++; 4015 } 4016 4017 if (brackets == 0 && braces == 0) { 4018 if (found_key) { 4019 *dd = '\0'; 4020 return (dest); 4021 } 4022 dd = dest; /* reset string buffer */ 4023 state = DTRACE_JSON_COMMA; 4024 } 4025 break; 4026 } 4027 } 4028 return (NULL); 4029} 4030 4031/* 4032 * Emulate the execution of DTrace ID subroutines invoked by the call opcode. 4033 * Notice that we don't bother validating the proper number of arguments or 4034 * their types in the tuple stack. This isn't needed because all argument 4035 * interpretation is safe because of our load safety -- the worst that can 4036 * happen is that a bogus program can obtain bogus results. 4037 */ 4038static void 4039dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, 4040 dtrace_key_t *tupregs, int nargs, 4041 dtrace_mstate_t *mstate, dtrace_state_t *state) 4042{ 4043 volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 4044 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 4045 dtrace_vstate_t *vstate = &state->dts_vstate; 4046 4047#if defined(sun) 4048 union { 4049 mutex_impl_t mi; 4050 uint64_t mx; 4051 } m; 4052 4053 union { 4054 krwlock_t ri; 4055 uintptr_t rw; 4056 } r; 4057#else 4058 struct thread *lowner; 4059 union { 4060 struct lock_object *li; 4061 uintptr_t lx; 4062 } l; 4063#endif 4064 4065 switch (subr) { 4066 case DIF_SUBR_RAND: 4067 regs[rd] = (dtrace_gethrtime() * 2416 + 374441) % 1771875; 4068 break; 4069 4070#if defined(sun) 4071 case DIF_SUBR_MUTEX_OWNED: 4072 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 4073 mstate, vstate)) { 4074 regs[rd] = 0; 4075 break; 4076 } 4077 4078 m.mx = dtrace_load64(tupregs[0].dttk_value); 4079 if (MUTEX_TYPE_ADAPTIVE(&m.mi)) 4080 regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER; 4081 else 4082 regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock); 4083 break; 4084 4085 case DIF_SUBR_MUTEX_OWNER: 4086 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 4087 mstate, vstate)) { 4088 regs[rd] = 0; 4089 break; 4090 } 4091 4092 m.mx = dtrace_load64(tupregs[0].dttk_value); 4093 if (MUTEX_TYPE_ADAPTIVE(&m.mi) && 4094 MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER) 4095 regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi); 4096 else 4097 regs[rd] = 0; 4098 break; 4099 4100 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: 4101 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 4102 mstate, vstate)) { 4103 regs[rd] = 0; 4104 break; 4105 } 4106 4107 m.mx = dtrace_load64(tupregs[0].dttk_value); 4108 regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi); 4109 break; 4110 4111 case DIF_SUBR_MUTEX_TYPE_SPIN: 4112 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 4113 mstate, vstate)) { 4114 regs[rd] = 0; 4115 break; 4116 } 4117 4118 m.mx = dtrace_load64(tupregs[0].dttk_value); 4119 regs[rd] = MUTEX_TYPE_SPIN(&m.mi); 4120 break; 4121 4122 case DIF_SUBR_RW_READ_HELD: { 4123 uintptr_t tmp; 4124 4125 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 4126 mstate, vstate)) { 4127 regs[rd] = 0; 4128 break; 4129 } 4130 4131 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 4132 regs[rd] = _RW_READ_HELD(&r.ri, tmp); 4133 break; 4134 } 4135 4136 case DIF_SUBR_RW_WRITE_HELD: 4137 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 4138 mstate, vstate)) { 4139 regs[rd] = 0; 4140 break; 4141 } 4142 4143 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 4144 regs[rd] = _RW_WRITE_HELD(&r.ri); 4145 break; 4146 4147 case DIF_SUBR_RW_ISWRITER: 4148 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 4149 mstate, vstate)) { 4150 regs[rd] = 0; 4151 break; 4152 } 4153 4154 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 4155 regs[rd] = _RW_ISWRITER(&r.ri); 4156 break; 4157 4158#else 4159 case DIF_SUBR_MUTEX_OWNED: 4160 if (!dtrace_canload(tupregs[0].dttk_value, 4161 sizeof (struct lock_object), mstate, vstate)) { 4162 regs[rd] = 0; 4163 break; 4164 } 4165 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 4166 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); 4167 break; 4168 4169 case DIF_SUBR_MUTEX_OWNER: 4170 if (!dtrace_canload(tupregs[0].dttk_value, 4171 sizeof (struct lock_object), mstate, vstate)) { 4172 regs[rd] = 0; 4173 break; 4174 } 4175 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 4176 LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); 4177 regs[rd] = (uintptr_t)lowner; 4178 break; 4179 4180 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: 4181 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx), 4182 mstate, vstate)) { 4183 regs[rd] = 0; 4184 break; 4185 } 4186 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 4187 /* XXX - should be only LC_SLEEPABLE? */ 4188 regs[rd] = (LOCK_CLASS(l.li)->lc_flags & 4189 (LC_SLEEPLOCK | LC_SLEEPABLE)) != 0; 4190 break; 4191 4192 case DIF_SUBR_MUTEX_TYPE_SPIN: 4193 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx), 4194 mstate, vstate)) { 4195 regs[rd] = 0; 4196 break; 4197 } 4198 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 4199 regs[rd] = (LOCK_CLASS(l.li)->lc_flags & LC_SPINLOCK) != 0; 4200 break; 4201 4202 case DIF_SUBR_RW_READ_HELD: 4203 case DIF_SUBR_SX_SHARED_HELD: 4204 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 4205 mstate, vstate)) { 4206 regs[rd] = 0; 4207 break; 4208 } 4209 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 4210 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) && 4211 lowner == NULL; 4212 break; 4213 4214 case DIF_SUBR_RW_WRITE_HELD: 4215 case DIF_SUBR_SX_EXCLUSIVE_HELD: 4216 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 4217 mstate, vstate)) { 4218 regs[rd] = 0; 4219 break; 4220 } 4221 l.lx = dtrace_loadptr(tupregs[0].dttk_value); 4222 LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); 4223 regs[rd] = (lowner == curthread); 4224 break; 4225 4226 case DIF_SUBR_RW_ISWRITER: 4227 case DIF_SUBR_SX_ISEXCLUSIVE: 4228 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 4229 mstate, vstate)) { 4230 regs[rd] = 0; 4231 break; 4232 } 4233 l.lx = dtrace_loadptr(tupregs[0].dttk_value); 4234 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) && 4235 lowner != NULL; 4236 break; 4237#endif /* ! defined(sun) */ 4238 4239 case DIF_SUBR_BCOPY: { 4240 /* 4241 * We need to be sure that the destination is in the scratch 4242 * region -- no other region is allowed. 4243 */ 4244 uintptr_t src = tupregs[0].dttk_value; 4245 uintptr_t dest = tupregs[1].dttk_value; 4246 size_t size = tupregs[2].dttk_value; 4247 4248 if (!dtrace_inscratch(dest, size, mstate)) { 4249 *flags |= CPU_DTRACE_BADADDR; 4250 *illval = regs[rd]; 4251 break; 4252 } 4253 4254 if (!dtrace_canload(src, size, mstate, vstate)) { 4255 regs[rd] = 0; 4256 break; 4257 } 4258 4259 dtrace_bcopy((void *)src, (void *)dest, size); 4260 break; 4261 } 4262 4263 case DIF_SUBR_ALLOCA: 4264 case DIF_SUBR_COPYIN: { 4265 uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 4266 uint64_t size = 4267 tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value; 4268 size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size; 4269 4270 /* 4271 * This action doesn't require any credential checks since 4272 * probes will not activate in user contexts to which the 4273 * enabling user does not have permissions. 4274 */ 4275 4276 /* 4277 * Rounding up the user allocation size could have overflowed 4278 * a large, bogus allocation (like -1ULL) to 0. 4279 */ 4280 if (scratch_size < size || 4281 !DTRACE_INSCRATCH(mstate, scratch_size)) { 4282 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4283 regs[rd] = 0; 4284 break; 4285 } 4286 4287 if (subr == DIF_SUBR_COPYIN) { 4288 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4289 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); 4290 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4291 } 4292 4293 mstate->dtms_scratch_ptr += scratch_size; 4294 regs[rd] = dest; 4295 break; 4296 } 4297 4298 case DIF_SUBR_COPYINTO: { 4299 uint64_t size = tupregs[1].dttk_value; 4300 uintptr_t dest = tupregs[2].dttk_value; 4301 4302 /* 4303 * This action doesn't require any credential checks since 4304 * probes will not activate in user contexts to which the 4305 * enabling user does not have permissions. 4306 */ 4307 if (!dtrace_inscratch(dest, size, mstate)) { 4308 *flags |= CPU_DTRACE_BADADDR; 4309 *illval = regs[rd]; 4310 break; 4311 } 4312 4313 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4314 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); 4315 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4316 break; 4317 } 4318 4319 case DIF_SUBR_COPYINSTR: { 4320 uintptr_t dest = mstate->dtms_scratch_ptr; 4321 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4322 4323 if (nargs > 1 && tupregs[1].dttk_value < size) 4324 size = tupregs[1].dttk_value + 1; 4325 4326 /* 4327 * This action doesn't require any credential checks since 4328 * probes will not activate in user contexts to which the 4329 * enabling user does not have permissions. 4330 */ 4331 if (!DTRACE_INSCRATCH(mstate, size)) { 4332 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4333 regs[rd] = 0; 4334 break; 4335 } 4336 4337 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4338 dtrace_copyinstr(tupregs[0].dttk_value, dest, size, flags); 4339 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4340 4341 ((char *)dest)[size - 1] = '\0'; 4342 mstate->dtms_scratch_ptr += size; 4343 regs[rd] = dest; 4344 break; 4345 } 4346 4347#if defined(sun) 4348 case DIF_SUBR_MSGSIZE: 4349 case DIF_SUBR_MSGDSIZE: { 4350 uintptr_t baddr = tupregs[0].dttk_value, daddr; 4351 uintptr_t wptr, rptr; 4352 size_t count = 0; 4353 int cont = 0; 4354 4355 while (baddr != 0 && !(*flags & CPU_DTRACE_FAULT)) { 4356 4357 if (!dtrace_canload(baddr, sizeof (mblk_t), mstate, 4358 vstate)) { 4359 regs[rd] = 0; 4360 break; 4361 } 4362 4363 wptr = dtrace_loadptr(baddr + 4364 offsetof(mblk_t, b_wptr)); 4365 4366 rptr = dtrace_loadptr(baddr + 4367 offsetof(mblk_t, b_rptr)); 4368 4369 if (wptr < rptr) { 4370 *flags |= CPU_DTRACE_BADADDR; 4371 *illval = tupregs[0].dttk_value; 4372 break; 4373 } 4374 4375 daddr = dtrace_loadptr(baddr + 4376 offsetof(mblk_t, b_datap)); 4377 4378 baddr = dtrace_loadptr(baddr + 4379 offsetof(mblk_t, b_cont)); 4380 4381 /* 4382 * We want to prevent against denial-of-service here, 4383 * so we're only going to search the list for 4384 * dtrace_msgdsize_max mblks. 4385 */ 4386 if (cont++ > dtrace_msgdsize_max) { 4387 *flags |= CPU_DTRACE_ILLOP; 4388 break; 4389 } 4390 4391 if (subr == DIF_SUBR_MSGDSIZE) { 4392 if (dtrace_load8(daddr + 4393 offsetof(dblk_t, db_type)) != M_DATA) 4394 continue; 4395 } 4396 4397 count += wptr - rptr; 4398 } 4399 4400 if (!(*flags & CPU_DTRACE_FAULT)) 4401 regs[rd] = count; 4402 4403 break; 4404 } 4405#endif 4406 4407 case DIF_SUBR_PROGENYOF: { 4408 pid_t pid = tupregs[0].dttk_value; 4409 proc_t *p; 4410 int rval = 0; 4411 4412 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4413 4414 for (p = curthread->t_procp; p != NULL; p = p->p_parent) { 4415#if defined(sun) 4416 if (p->p_pidp->pid_id == pid) { 4417#else 4418 if (p->p_pid == pid) { 4419#endif 4420 rval = 1; 4421 break; 4422 } 4423 } 4424 4425 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4426 4427 regs[rd] = rval; 4428 break; 4429 } 4430 4431 case DIF_SUBR_SPECULATION: 4432 regs[rd] = dtrace_speculation(state); 4433 break; 4434 4435 case DIF_SUBR_COPYOUT: { 4436 uintptr_t kaddr = tupregs[0].dttk_value; 4437 uintptr_t uaddr = tupregs[1].dttk_value; 4438 uint64_t size = tupregs[2].dttk_value; 4439 4440 if (!dtrace_destructive_disallow && 4441 dtrace_priv_proc_control(state) && 4442 !dtrace_istoxic(kaddr, size)) { 4443 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4444 dtrace_copyout(kaddr, uaddr, size, flags); 4445 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4446 } 4447 break; 4448 } 4449 4450 case DIF_SUBR_COPYOUTSTR: { 4451 uintptr_t kaddr = tupregs[0].dttk_value; 4452 uintptr_t uaddr = tupregs[1].dttk_value; 4453 uint64_t size = tupregs[2].dttk_value; 4454 4455 if (!dtrace_destructive_disallow && 4456 dtrace_priv_proc_control(state) && 4457 !dtrace_istoxic(kaddr, size)) { 4458 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4459 dtrace_copyoutstr(kaddr, uaddr, size, flags); 4460 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4461 } 4462 break; 4463 } 4464 4465 case DIF_SUBR_STRLEN: { 4466 size_t sz; 4467 uintptr_t addr = (uintptr_t)tupregs[0].dttk_value; 4468 sz = dtrace_strlen((char *)addr, 4469 state->dts_options[DTRACEOPT_STRSIZE]); 4470 4471 if (!dtrace_canload(addr, sz + 1, mstate, vstate)) { 4472 regs[rd] = 0; 4473 break; 4474 } 4475 4476 regs[rd] = sz; 4477 4478 break; 4479 } 4480 4481 case DIF_SUBR_STRCHR: 4482 case DIF_SUBR_STRRCHR: { 4483 /* 4484 * We're going to iterate over the string looking for the 4485 * specified character. We will iterate until we have reached 4486 * the string length or we have found the character. If this 4487 * is DIF_SUBR_STRRCHR, we will look for the last occurrence 4488 * of the specified character instead of the first. 4489 */ 4490 uintptr_t saddr = tupregs[0].dttk_value; 4491 uintptr_t addr = tupregs[0].dttk_value; 4492 uintptr_t limit = addr + state->dts_options[DTRACEOPT_STRSIZE]; 4493 char c, target = (char)tupregs[1].dttk_value; 4494 4495 for (regs[rd] = 0; addr < limit; addr++) { 4496 if ((c = dtrace_load8(addr)) == target) { 4497 regs[rd] = addr; 4498 4499 if (subr == DIF_SUBR_STRCHR) 4500 break; 4501 } 4502 4503 if (c == '\0') 4504 break; 4505 } 4506 4507 if (!dtrace_canload(saddr, addr - saddr, mstate, vstate)) { 4508 regs[rd] = 0; 4509 break; 4510 } 4511 4512 break; 4513 } 4514 4515 case DIF_SUBR_STRSTR: 4516 case DIF_SUBR_INDEX: 4517 case DIF_SUBR_RINDEX: { 4518 /* 4519 * We're going to iterate over the string looking for the 4520 * specified string. We will iterate until we have reached 4521 * the string length or we have found the string. (Yes, this 4522 * is done in the most naive way possible -- but considering 4523 * that the string we're searching for is likely to be 4524 * relatively short, the complexity of Rabin-Karp or similar 4525 * hardly seems merited.) 4526 */ 4527 char *addr = (char *)(uintptr_t)tupregs[0].dttk_value; 4528 char *substr = (char *)(uintptr_t)tupregs[1].dttk_value; 4529 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4530 size_t len = dtrace_strlen(addr, size); 4531 size_t sublen = dtrace_strlen(substr, size); 4532 char *limit = addr + len, *orig = addr; 4533 int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1; 4534 int inc = 1; 4535 4536 regs[rd] = notfound; 4537 4538 if (!dtrace_canload((uintptr_t)addr, len + 1, mstate, vstate)) { 4539 regs[rd] = 0; 4540 break; 4541 } 4542 4543 if (!dtrace_canload((uintptr_t)substr, sublen + 1, mstate, 4544 vstate)) { 4545 regs[rd] = 0; 4546 break; 4547 } 4548 4549 /* 4550 * strstr() and index()/rindex() have similar semantics if 4551 * both strings are the empty string: strstr() returns a 4552 * pointer to the (empty) string, and index() and rindex() 4553 * both return index 0 (regardless of any position argument). 4554 */ 4555 if (sublen == 0 && len == 0) { 4556 if (subr == DIF_SUBR_STRSTR) 4557 regs[rd] = (uintptr_t)addr; 4558 else 4559 regs[rd] = 0; 4560 break; 4561 } 4562 4563 if (subr != DIF_SUBR_STRSTR) { 4564 if (subr == DIF_SUBR_RINDEX) { 4565 limit = orig - 1; 4566 addr += len; 4567 inc = -1; 4568 } 4569 4570 /* 4571 * Both index() and rindex() take an optional position 4572 * argument that denotes the starting position. 4573 */ 4574 if (nargs == 3) { 4575 int64_t pos = (int64_t)tupregs[2].dttk_value; 4576 4577 /* 4578 * If the position argument to index() is 4579 * negative, Perl implicitly clamps it at 4580 * zero. This semantic is a little surprising 4581 * given the special meaning of negative 4582 * positions to similar Perl functions like 4583 * substr(), but it appears to reflect a 4584 * notion that index() can start from a 4585 * negative index and increment its way up to 4586 * the string. Given this notion, Perl's 4587 * rindex() is at least self-consistent in 4588 * that it implicitly clamps positions greater 4589 * than the string length to be the string 4590 * length. Where Perl completely loses 4591 * coherence, however, is when the specified 4592 * substring is the empty string (""). In 4593 * this case, even if the position is 4594 * negative, rindex() returns 0 -- and even if 4595 * the position is greater than the length, 4596 * index() returns the string length. These 4597 * semantics violate the notion that index() 4598 * should never return a value less than the 4599 * specified position and that rindex() should 4600 * never return a value greater than the 4601 * specified position. (One assumes that 4602 * these semantics are artifacts of Perl's 4603 * implementation and not the results of 4604 * deliberate design -- it beggars belief that 4605 * even Larry Wall could desire such oddness.) 4606 * While in the abstract one would wish for 4607 * consistent position semantics across 4608 * substr(), index() and rindex() -- or at the 4609 * very least self-consistent position 4610 * semantics for index() and rindex() -- we 4611 * instead opt to keep with the extant Perl 4612 * semantics, in all their broken glory. (Do 4613 * we have more desire to maintain Perl's 4614 * semantics than Perl does? Probably.) 4615 */ 4616 if (subr == DIF_SUBR_RINDEX) { 4617 if (pos < 0) { 4618 if (sublen == 0) 4619 regs[rd] = 0; 4620 break; 4621 } 4622 4623 if (pos > len) 4624 pos = len; 4625 } else { 4626 if (pos < 0) 4627 pos = 0; 4628 4629 if (pos >= len) { 4630 if (sublen == 0) 4631 regs[rd] = len; 4632 break; 4633 } 4634 } 4635 4636 addr = orig + pos; 4637 } 4638 } 4639 4640 for (regs[rd] = notfound; addr != limit; addr += inc) { 4641 if (dtrace_strncmp(addr, substr, sublen) == 0) { 4642 if (subr != DIF_SUBR_STRSTR) { 4643 /* 4644 * As D index() and rindex() are 4645 * modeled on Perl (and not on awk), 4646 * we return a zero-based (and not a 4647 * one-based) index. (For you Perl 4648 * weenies: no, we're not going to add 4649 * $[ -- and shouldn't you be at a con 4650 * or something?) 4651 */ 4652 regs[rd] = (uintptr_t)(addr - orig); 4653 break; 4654 } 4655 4656 ASSERT(subr == DIF_SUBR_STRSTR); 4657 regs[rd] = (uintptr_t)addr; 4658 break; 4659 } 4660 } 4661 4662 break; 4663 } 4664 4665 case DIF_SUBR_STRTOK: { 4666 uintptr_t addr = tupregs[0].dttk_value; 4667 uintptr_t tokaddr = tupregs[1].dttk_value; 4668 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4669 uintptr_t limit, toklimit = tokaddr + size; 4670 uint8_t c = 0, tokmap[32]; /* 256 / 8 */ 4671 char *dest = (char *)mstate->dtms_scratch_ptr; 4672 int i; 4673 4674 /* 4675 * Check both the token buffer and (later) the input buffer, 4676 * since both could be non-scratch addresses. 4677 */ 4678 if (!dtrace_strcanload(tokaddr, size, mstate, vstate)) { 4679 regs[rd] = 0; 4680 break; 4681 } 4682 4683 if (!DTRACE_INSCRATCH(mstate, size)) { 4684 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4685 regs[rd] = 0; 4686 break; 4687 } 4688 4689 if (addr == 0) { 4690 /* 4691 * If the address specified is NULL, we use our saved 4692 * strtok pointer from the mstate. Note that this 4693 * means that the saved strtok pointer is _only_ 4694 * valid within multiple enablings of the same probe -- 4695 * it behaves like an implicit clause-local variable. 4696 */ 4697 addr = mstate->dtms_strtok; 4698 } else { 4699 /* 4700 * If the user-specified address is non-NULL we must 4701 * access check it. This is the only time we have 4702 * a chance to do so, since this address may reside 4703 * in the string table of this clause-- future calls 4704 * (when we fetch addr from mstate->dtms_strtok) 4705 * would fail this access check. 4706 */ 4707 if (!dtrace_strcanload(addr, size, mstate, vstate)) { 4708 regs[rd] = 0; 4709 break; 4710 } 4711 } 4712 4713 /* 4714 * First, zero the token map, and then process the token 4715 * string -- setting a bit in the map for every character 4716 * found in the token string. 4717 */ 4718 for (i = 0; i < sizeof (tokmap); i++) 4719 tokmap[i] = 0; 4720 4721 for (; tokaddr < toklimit; tokaddr++) { 4722 if ((c = dtrace_load8(tokaddr)) == '\0') 4723 break; 4724 4725 ASSERT((c >> 3) < sizeof (tokmap)); 4726 tokmap[c >> 3] |= (1 << (c & 0x7)); 4727 } 4728 4729 for (limit = addr + size; addr < limit; addr++) { 4730 /* 4731 * We're looking for a character that is _not_ contained 4732 * in the token string. 4733 */ 4734 if ((c = dtrace_load8(addr)) == '\0') 4735 break; 4736 4737 if (!(tokmap[c >> 3] & (1 << (c & 0x7)))) 4738 break; 4739 } 4740 4741 if (c == '\0') { 4742 /* 4743 * We reached the end of the string without finding 4744 * any character that was not in the token string. 4745 * We return NULL in this case, and we set the saved 4746 * address to NULL as well. 4747 */ 4748 regs[rd] = 0; 4749 mstate->dtms_strtok = 0; 4750 break; 4751 } 4752 4753 /* 4754 * From here on, we're copying into the destination string. 4755 */ 4756 for (i = 0; addr < limit && i < size - 1; addr++) { 4757 if ((c = dtrace_load8(addr)) == '\0') 4758 break; 4759 4760 if (tokmap[c >> 3] & (1 << (c & 0x7))) 4761 break; 4762 4763 ASSERT(i < size); 4764 dest[i++] = c; 4765 } 4766 4767 ASSERT(i < size); 4768 dest[i] = '\0'; 4769 regs[rd] = (uintptr_t)dest; 4770 mstate->dtms_scratch_ptr += size; 4771 mstate->dtms_strtok = addr; 4772 break; 4773 } 4774 4775 case DIF_SUBR_SUBSTR: { 4776 uintptr_t s = tupregs[0].dttk_value; 4777 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4778 char *d = (char *)mstate->dtms_scratch_ptr; 4779 int64_t index = (int64_t)tupregs[1].dttk_value; 4780 int64_t remaining = (int64_t)tupregs[2].dttk_value; 4781 size_t len = dtrace_strlen((char *)s, size); 4782 int64_t i; 4783 4784 if (!dtrace_canload(s, len + 1, mstate, vstate)) { 4785 regs[rd] = 0; 4786 break; 4787 } 4788 4789 if (!DTRACE_INSCRATCH(mstate, size)) { 4790 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4791 regs[rd] = 0; 4792 break; 4793 } 4794 4795 if (nargs <= 2) 4796 remaining = (int64_t)size; 4797 4798 if (index < 0) { 4799 index += len; 4800 4801 if (index < 0 && index + remaining > 0) { 4802 remaining += index; 4803 index = 0; 4804 } 4805 } 4806 4807 if (index >= len || index < 0) { 4808 remaining = 0; 4809 } else if (remaining < 0) { 4810 remaining += len - index; 4811 } else if (index + remaining > size) { 4812 remaining = size - index; 4813 } 4814 4815 for (i = 0; i < remaining; i++) { 4816 if ((d[i] = dtrace_load8(s + index + i)) == '\0') 4817 break; 4818 } 4819 4820 d[i] = '\0'; 4821 4822 mstate->dtms_scratch_ptr += size; 4823 regs[rd] = (uintptr_t)d; 4824 break; 4825 } 4826 4827 case DIF_SUBR_JSON: { 4828 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4829 uintptr_t json = tupregs[0].dttk_value; 4830 size_t jsonlen = dtrace_strlen((char *)json, size); 4831 uintptr_t elem = tupregs[1].dttk_value; 4832 size_t elemlen = dtrace_strlen((char *)elem, size); 4833 4834 char *dest = (char *)mstate->dtms_scratch_ptr; 4835 char *elemlist = (char *)mstate->dtms_scratch_ptr + jsonlen + 1; 4836 char *ee = elemlist; 4837 int nelems = 1; 4838 uintptr_t cur; 4839 4840 if (!dtrace_canload(json, jsonlen + 1, mstate, vstate) || 4841 !dtrace_canload(elem, elemlen + 1, mstate, vstate)) { 4842 regs[rd] = 0; 4843 break; 4844 } 4845 4846 if (!DTRACE_INSCRATCH(mstate, jsonlen + 1 + elemlen + 1)) { 4847 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4848 regs[rd] = 0; 4849 break; 4850 } 4851 4852 /* 4853 * Read the element selector and split it up into a packed list 4854 * of strings. 4855 */ 4856 for (cur = elem; cur < elem + elemlen; cur++) { 4857 char cc = dtrace_load8(cur); 4858 4859 if (cur == elem && cc == '[') { 4860 /* 4861 * If the first element selector key is 4862 * actually an array index then ignore the 4863 * bracket. 4864 */ 4865 continue; 4866 } 4867 4868 if (cc == ']') 4869 continue; 4870 4871 if (cc == '.' || cc == '[') { 4872 nelems++; 4873 cc = '\0'; 4874 } 4875 4876 *ee++ = cc; 4877 } 4878 *ee++ = '\0'; 4879 4880 if ((regs[rd] = (uintptr_t)dtrace_json(size, json, elemlist, 4881 nelems, dest)) != 0) 4882 mstate->dtms_scratch_ptr += jsonlen + 1; 4883 break; 4884 } 4885 4886 case DIF_SUBR_TOUPPER: 4887 case DIF_SUBR_TOLOWER: { 4888 uintptr_t s = tupregs[0].dttk_value; 4889 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4890 char *dest = (char *)mstate->dtms_scratch_ptr, c; 4891 size_t len = dtrace_strlen((char *)s, size); 4892 char lower, upper, convert; 4893 int64_t i; 4894 4895 if (subr == DIF_SUBR_TOUPPER) { 4896 lower = 'a'; 4897 upper = 'z'; 4898 convert = 'A'; 4899 } else { 4900 lower = 'A'; 4901 upper = 'Z'; 4902 convert = 'a'; 4903 } 4904 4905 if (!dtrace_canload(s, len + 1, mstate, vstate)) { 4906 regs[rd] = 0; 4907 break; 4908 } 4909 4910 if (!DTRACE_INSCRATCH(mstate, size)) { 4911 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4912 regs[rd] = 0; 4913 break; 4914 } 4915 4916 for (i = 0; i < size - 1; i++) { 4917 if ((c = dtrace_load8(s + i)) == '\0') 4918 break; 4919 4920 if (c >= lower && c <= upper) 4921 c = convert + (c - lower); 4922 4923 dest[i] = c; 4924 } 4925 4926 ASSERT(i < size); 4927 dest[i] = '\0'; 4928 regs[rd] = (uintptr_t)dest; 4929 mstate->dtms_scratch_ptr += size; 4930 break; 4931 } 4932 4933#if defined(sun) 4934 case DIF_SUBR_GETMAJOR: 4935#ifdef _LP64 4936 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64; 4937#else 4938 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ; 4939#endif 4940 break; 4941 4942 case DIF_SUBR_GETMINOR: 4943#ifdef _LP64 4944 regs[rd] = tupregs[0].dttk_value & MAXMIN64; 4945#else 4946 regs[rd] = tupregs[0].dttk_value & MAXMIN; 4947#endif 4948 break; 4949 4950 case DIF_SUBR_DDI_PATHNAME: { 4951 /* 4952 * This one is a galactic mess. We are going to roughly 4953 * emulate ddi_pathname(), but it's made more complicated 4954 * by the fact that we (a) want to include the minor name and 4955 * (b) must proceed iteratively instead of recursively. 4956 */ 4957 uintptr_t dest = mstate->dtms_scratch_ptr; 4958 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4959 char *start = (char *)dest, *end = start + size - 1; 4960 uintptr_t daddr = tupregs[0].dttk_value; 4961 int64_t minor = (int64_t)tupregs[1].dttk_value; 4962 char *s; 4963 int i, len, depth = 0; 4964 4965 /* 4966 * Due to all the pointer jumping we do and context we must 4967 * rely upon, we just mandate that the user must have kernel 4968 * read privileges to use this routine. 4969 */ 4970 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) == 0) { 4971 *flags |= CPU_DTRACE_KPRIV; 4972 *illval = daddr; 4973 regs[rd] = 0; 4974 } 4975 4976 if (!DTRACE_INSCRATCH(mstate, size)) { 4977 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4978 regs[rd] = 0; 4979 break; 4980 } 4981 4982 *end = '\0'; 4983 4984 /* 4985 * We want to have a name for the minor. In order to do this, 4986 * we need to walk the minor list from the devinfo. We want 4987 * to be sure that we don't infinitely walk a circular list, 4988 * so we check for circularity by sending a scout pointer 4989 * ahead two elements for every element that we iterate over; 4990 * if the list is circular, these will ultimately point to the 4991 * same element. You may recognize this little trick as the 4992 * answer to a stupid interview question -- one that always 4993 * seems to be asked by those who had to have it laboriously 4994 * explained to them, and who can't even concisely describe 4995 * the conditions under which one would be forced to resort to 4996 * this technique. Needless to say, those conditions are 4997 * found here -- and probably only here. Is this the only use 4998 * of this infamous trick in shipping, production code? If it 4999 * isn't, it probably should be... 5000 */ 5001 if (minor != -1) { 5002 uintptr_t maddr = dtrace_loadptr(daddr + 5003 offsetof(struct dev_info, devi_minor)); 5004 5005 uintptr_t next = offsetof(struct ddi_minor_data, next); 5006 uintptr_t name = offsetof(struct ddi_minor_data, 5007 d_minor) + offsetof(struct ddi_minor, name); 5008 uintptr_t dev = offsetof(struct ddi_minor_data, 5009 d_minor) + offsetof(struct ddi_minor, dev); 5010 uintptr_t scout; 5011 5012 if (maddr != NULL) 5013 scout = dtrace_loadptr(maddr + next); 5014 5015 while (maddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 5016 uint64_t m; 5017#ifdef _LP64 5018 m = dtrace_load64(maddr + dev) & MAXMIN64; 5019#else 5020 m = dtrace_load32(maddr + dev) & MAXMIN; 5021#endif 5022 if (m != minor) { 5023 maddr = dtrace_loadptr(maddr + next); 5024 5025 if (scout == NULL) 5026 continue; 5027 5028 scout = dtrace_loadptr(scout + next); 5029 5030 if (scout == NULL) 5031 continue; 5032 5033 scout = dtrace_loadptr(scout + next); 5034 5035 if (scout == NULL) 5036 continue; 5037 5038 if (scout == maddr) { 5039 *flags |= CPU_DTRACE_ILLOP; 5040 break; 5041 } 5042 5043 continue; 5044 } 5045 5046 /* 5047 * We have the minor data. Now we need to 5048 * copy the minor's name into the end of the 5049 * pathname. 5050 */ 5051 s = (char *)dtrace_loadptr(maddr + name); 5052 len = dtrace_strlen(s, size); 5053 5054 if (*flags & CPU_DTRACE_FAULT) 5055 break; 5056 5057 if (len != 0) { 5058 if ((end -= (len + 1)) < start) 5059 break; 5060 5061 *end = ':'; 5062 } 5063 5064 for (i = 1; i <= len; i++) 5065 end[i] = dtrace_load8((uintptr_t)s++); 5066 break; 5067 } 5068 } 5069 5070 while (daddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 5071 ddi_node_state_t devi_state; 5072 5073 devi_state = dtrace_load32(daddr + 5074 offsetof(struct dev_info, devi_node_state)); 5075 5076 if (*flags & CPU_DTRACE_FAULT) 5077 break; 5078 5079 if (devi_state >= DS_INITIALIZED) { 5080 s = (char *)dtrace_loadptr(daddr + 5081 offsetof(struct dev_info, devi_addr)); 5082 len = dtrace_strlen(s, size); 5083 5084 if (*flags & CPU_DTRACE_FAULT) 5085 break; 5086 5087 if (len != 0) { 5088 if ((end -= (len + 1)) < start) 5089 break; 5090 5091 *end = '@'; 5092 } 5093 5094 for (i = 1; i <= len; i++) 5095 end[i] = dtrace_load8((uintptr_t)s++); 5096 } 5097 5098 /* 5099 * Now for the node name... 5100 */ 5101 s = (char *)dtrace_loadptr(daddr + 5102 offsetof(struct dev_info, devi_node_name)); 5103 5104 daddr = dtrace_loadptr(daddr + 5105 offsetof(struct dev_info, devi_parent)); 5106 5107 /* 5108 * If our parent is NULL (that is, if we're the root 5109 * node), we're going to use the special path 5110 * "devices". 5111 */ 5112 if (daddr == 0) 5113 s = "devices"; 5114 5115 len = dtrace_strlen(s, size); 5116 if (*flags & CPU_DTRACE_FAULT) 5117 break; 5118 5119 if ((end -= (len + 1)) < start) 5120 break; 5121 5122 for (i = 1; i <= len; i++) 5123 end[i] = dtrace_load8((uintptr_t)s++); 5124 *end = '/'; 5125 5126 if (depth++ > dtrace_devdepth_max) { 5127 *flags |= CPU_DTRACE_ILLOP; 5128 break; 5129 } 5130 } 5131 5132 if (end < start) 5133 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5134 5135 if (daddr == 0) { 5136 regs[rd] = (uintptr_t)end; 5137 mstate->dtms_scratch_ptr += size; 5138 } 5139 5140 break; 5141 } 5142#endif 5143 5144 case DIF_SUBR_STRJOIN: { 5145 char *d = (char *)mstate->dtms_scratch_ptr; 5146 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 5147 uintptr_t s1 = tupregs[0].dttk_value; 5148 uintptr_t s2 = tupregs[1].dttk_value; 5149 int i = 0; 5150 5151 if (!dtrace_strcanload(s1, size, mstate, vstate) || 5152 !dtrace_strcanload(s2, size, mstate, vstate)) { 5153 regs[rd] = 0; 5154 break; 5155 } 5156 5157 if (!DTRACE_INSCRATCH(mstate, size)) { 5158 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5159 regs[rd] = 0; 5160 break; 5161 } 5162 5163 for (;;) { 5164 if (i >= size) { 5165 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5166 regs[rd] = 0; 5167 break; 5168 } 5169 5170 if ((d[i++] = dtrace_load8(s1++)) == '\0') { 5171 i--; 5172 break; 5173 } 5174 } 5175 5176 for (;;) { 5177 if (i >= size) { 5178 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5179 regs[rd] = 0; 5180 break; 5181 } 5182 5183 if ((d[i++] = dtrace_load8(s2++)) == '\0') 5184 break; 5185 } 5186 5187 if (i < size) { 5188 mstate->dtms_scratch_ptr += i; 5189 regs[rd] = (uintptr_t)d; 5190 } 5191 5192 break; 5193 } 5194 5195 case DIF_SUBR_STRTOLL: { 5196 uintptr_t s = tupregs[0].dttk_value; 5197 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 5198 int base = 10; 5199 5200 if (nargs > 1) { 5201 if ((base = tupregs[1].dttk_value) <= 1 || 5202 base > ('z' - 'a' + 1) + ('9' - '0' + 1)) { 5203 *flags |= CPU_DTRACE_ILLOP; 5204 break; 5205 } 5206 } 5207 5208 if (!dtrace_strcanload(s, size, mstate, vstate)) { 5209 regs[rd] = INT64_MIN; 5210 break; 5211 } 5212 5213 regs[rd] = dtrace_strtoll((char *)s, base, size); 5214 break; 5215 } 5216 5217 case DIF_SUBR_LLTOSTR: { 5218 int64_t i = (int64_t)tupregs[0].dttk_value; 5219 uint64_t val, digit; 5220 uint64_t size = 65; /* enough room for 2^64 in binary */ 5221 char *end = (char *)mstate->dtms_scratch_ptr + size - 1; 5222 int base = 10; 5223 5224 if (nargs > 1) { 5225 if ((base = tupregs[1].dttk_value) <= 1 || 5226 base > ('z' - 'a' + 1) + ('9' - '0' + 1)) { 5227 *flags |= CPU_DTRACE_ILLOP; 5228 break; 5229 } 5230 } 5231 5232 val = (base == 10 && i < 0) ? i * -1 : i; 5233 5234 if (!DTRACE_INSCRATCH(mstate, size)) { 5235 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5236 regs[rd] = 0; 5237 break; 5238 } 5239 5240 for (*end-- = '\0'; val; val /= base) { 5241 if ((digit = val % base) <= '9' - '0') { 5242 *end-- = '0' + digit; 5243 } else { 5244 *end-- = 'a' + (digit - ('9' - '0') - 1); 5245 } 5246 } 5247 5248 if (i == 0 && base == 16) 5249 *end-- = '0'; 5250 5251 if (base == 16) 5252 *end-- = 'x'; 5253 5254 if (i == 0 || base == 8 || base == 16) 5255 *end-- = '0'; 5256 5257 if (i < 0 && base == 10) 5258 *end-- = '-'; 5259 5260 regs[rd] = (uintptr_t)end + 1; 5261 mstate->dtms_scratch_ptr += size; 5262 break; 5263 } 5264 5265 case DIF_SUBR_HTONS: 5266 case DIF_SUBR_NTOHS: 5267#if BYTE_ORDER == BIG_ENDIAN 5268 regs[rd] = (uint16_t)tupregs[0].dttk_value; 5269#else 5270 regs[rd] = DT_BSWAP_16((uint16_t)tupregs[0].dttk_value); 5271#endif 5272 break; 5273 5274 5275 case DIF_SUBR_HTONL: 5276 case DIF_SUBR_NTOHL: 5277#if BYTE_ORDER == BIG_ENDIAN 5278 regs[rd] = (uint32_t)tupregs[0].dttk_value; 5279#else 5280 regs[rd] = DT_BSWAP_32((uint32_t)tupregs[0].dttk_value); 5281#endif 5282 break; 5283 5284 5285 case DIF_SUBR_HTONLL: 5286 case DIF_SUBR_NTOHLL: 5287#if BYTE_ORDER == BIG_ENDIAN 5288 regs[rd] = (uint64_t)tupregs[0].dttk_value; 5289#else 5290 regs[rd] = DT_BSWAP_64((uint64_t)tupregs[0].dttk_value); 5291#endif 5292 break; 5293 5294 5295 case DIF_SUBR_DIRNAME: 5296 case DIF_SUBR_BASENAME: { 5297 char *dest = (char *)mstate->dtms_scratch_ptr; 5298 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 5299 uintptr_t src = tupregs[0].dttk_value; 5300 int i, j, len = dtrace_strlen((char *)src, size); 5301 int lastbase = -1, firstbase = -1, lastdir = -1; 5302 int start, end; 5303 5304 if (!dtrace_canload(src, len + 1, mstate, vstate)) { 5305 regs[rd] = 0; 5306 break; 5307 } 5308 5309 if (!DTRACE_INSCRATCH(mstate, size)) { 5310 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5311 regs[rd] = 0; 5312 break; 5313 } 5314 5315 /* 5316 * The basename and dirname for a zero-length string is 5317 * defined to be "." 5318 */ 5319 if (len == 0) { 5320 len = 1; 5321 src = (uintptr_t)"."; 5322 } 5323 5324 /* 5325 * Start from the back of the string, moving back toward the 5326 * front until we see a character that isn't a slash. That 5327 * character is the last character in the basename. 5328 */ 5329 for (i = len - 1; i >= 0; i--) { 5330 if (dtrace_load8(src + i) != '/') 5331 break; 5332 } 5333 5334 if (i >= 0) 5335 lastbase = i; 5336 5337 /* 5338 * Starting from the last character in the basename, move 5339 * towards the front until we find a slash. The character 5340 * that we processed immediately before that is the first 5341 * character in the basename. 5342 */ 5343 for (; i >= 0; i--) { 5344 if (dtrace_load8(src + i) == '/') 5345 break; 5346 } 5347 5348 if (i >= 0) 5349 firstbase = i + 1; 5350 5351 /* 5352 * Now keep going until we find a non-slash character. That 5353 * character is the last character in the dirname. 5354 */ 5355 for (; i >= 0; i--) { 5356 if (dtrace_load8(src + i) != '/') 5357 break; 5358 } 5359 5360 if (i >= 0) 5361 lastdir = i; 5362 5363 ASSERT(!(lastbase == -1 && firstbase != -1)); 5364 ASSERT(!(firstbase == -1 && lastdir != -1)); 5365 5366 if (lastbase == -1) { 5367 /* 5368 * We didn't find a non-slash character. We know that 5369 * the length is non-zero, so the whole string must be 5370 * slashes. In either the dirname or the basename 5371 * case, we return '/'. 5372 */ 5373 ASSERT(firstbase == -1); 5374 firstbase = lastbase = lastdir = 0; 5375 } 5376 5377 if (firstbase == -1) { 5378 /* 5379 * The entire string consists only of a basename 5380 * component. If we're looking for dirname, we need 5381 * to change our string to be just "."; if we're 5382 * looking for a basename, we'll just set the first 5383 * character of the basename to be 0. 5384 */ 5385 if (subr == DIF_SUBR_DIRNAME) { 5386 ASSERT(lastdir == -1); 5387 src = (uintptr_t)"."; 5388 lastdir = 0; 5389 } else { 5390 firstbase = 0; 5391 } 5392 } 5393 5394 if (subr == DIF_SUBR_DIRNAME) { 5395 if (lastdir == -1) { 5396 /* 5397 * We know that we have a slash in the name -- 5398 * or lastdir would be set to 0, above. And 5399 * because lastdir is -1, we know that this 5400 * slash must be the first character. (That 5401 * is, the full string must be of the form 5402 * "/basename".) In this case, the last 5403 * character of the directory name is 0. 5404 */ 5405 lastdir = 0; 5406 } 5407 5408 start = 0; 5409 end = lastdir; 5410 } else { 5411 ASSERT(subr == DIF_SUBR_BASENAME); 5412 ASSERT(firstbase != -1 && lastbase != -1); 5413 start = firstbase; 5414 end = lastbase; 5415 } 5416 5417 for (i = start, j = 0; i <= end && j < size - 1; i++, j++) 5418 dest[j] = dtrace_load8(src + i); 5419 5420 dest[j] = '\0'; 5421 regs[rd] = (uintptr_t)dest; 5422 mstate->dtms_scratch_ptr += size; 5423 break; 5424 } 5425 5426 case DIF_SUBR_GETF: { 5427 uintptr_t fd = tupregs[0].dttk_value; 5428 struct filedesc *fdp; 5429 file_t *fp; 5430 5431 if (!dtrace_priv_proc(state)) { 5432 regs[rd] = 0; 5433 break; 5434 } 5435 fdp = curproc->p_fd; 5436 FILEDESC_SLOCK(fdp); 5437 fp = fget_locked(fdp, fd); 5438 mstate->dtms_getf = fp; 5439 regs[rd] = (uintptr_t)fp; 5440 FILEDESC_SUNLOCK(fdp); 5441 break; 5442 } 5443 5444 case DIF_SUBR_CLEANPATH: { 5445 char *dest = (char *)mstate->dtms_scratch_ptr, c; 5446 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 5447 uintptr_t src = tupregs[0].dttk_value; 5448 int i = 0, j = 0; 5449#if defined(sun) 5450 zone_t *z; 5451#endif 5452 5453 if (!dtrace_strcanload(src, size, mstate, vstate)) { 5454 regs[rd] = 0; 5455 break; 5456 } 5457 5458 if (!DTRACE_INSCRATCH(mstate, size)) { 5459 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5460 regs[rd] = 0; 5461 break; 5462 } 5463 5464 /* 5465 * Move forward, loading each character. 5466 */ 5467 do { 5468 c = dtrace_load8(src + i++); 5469next: 5470 if (j + 5 >= size) /* 5 = strlen("/..c\0") */ 5471 break; 5472 5473 if (c != '/') { 5474 dest[j++] = c; 5475 continue; 5476 } 5477 5478 c = dtrace_load8(src + i++); 5479 5480 if (c == '/') { 5481 /* 5482 * We have two slashes -- we can just advance 5483 * to the next character. 5484 */ 5485 goto next; 5486 } 5487 5488 if (c != '.') { 5489 /* 5490 * This is not "." and it's not ".." -- we can 5491 * just store the "/" and this character and 5492 * drive on. 5493 */ 5494 dest[j++] = '/'; 5495 dest[j++] = c; 5496 continue; 5497 } 5498 5499 c = dtrace_load8(src + i++); 5500 5501 if (c == '/') { 5502 /* 5503 * This is a "/./" component. We're not going 5504 * to store anything in the destination buffer; 5505 * we're just going to go to the next component. 5506 */ 5507 goto next; 5508 } 5509 5510 if (c != '.') { 5511 /* 5512 * This is not ".." -- we can just store the 5513 * "/." and this character and continue 5514 * processing. 5515 */ 5516 dest[j++] = '/'; 5517 dest[j++] = '.'; 5518 dest[j++] = c; 5519 continue; 5520 } 5521 5522 c = dtrace_load8(src + i++); 5523 5524 if (c != '/' && c != '\0') { 5525 /* 5526 * This is not ".." -- it's "..[mumble]". 5527 * We'll store the "/.." and this character 5528 * and continue processing. 5529 */ 5530 dest[j++] = '/'; 5531 dest[j++] = '.'; 5532 dest[j++] = '.'; 5533 dest[j++] = c; 5534 continue; 5535 } 5536 5537 /* 5538 * This is "/../" or "/..\0". We need to back up 5539 * our destination pointer until we find a "/". 5540 */ 5541 i--; 5542 while (j != 0 && dest[--j] != '/') 5543 continue; 5544 5545 if (c == '\0') 5546 dest[++j] = '/'; 5547 } while (c != '\0'); 5548 5549 dest[j] = '\0'; 5550 5551#if defined(sun) 5552 if (mstate->dtms_getf != NULL && 5553 !(mstate->dtms_access & DTRACE_ACCESS_KERNEL) && 5554 (z = state->dts_cred.dcr_cred->cr_zone) != kcred->cr_zone) { 5555 /* 5556 * If we've done a getf() as a part of this ECB and we 5557 * don't have kernel access (and we're not in the global 5558 * zone), check if the path we cleaned up begins with 5559 * the zone's root path, and trim it off if so. Note 5560 * that this is an output cleanliness issue, not a 5561 * security issue: knowing one's zone root path does 5562 * not enable privilege escalation. 5563 */ 5564 if (strstr(dest, z->zone_rootpath) == dest) 5565 dest += strlen(z->zone_rootpath) - 1; 5566 } 5567#endif 5568 5569 regs[rd] = (uintptr_t)dest; 5570 mstate->dtms_scratch_ptr += size; 5571 break; 5572 } 5573 5574 case DIF_SUBR_INET_NTOA: 5575 case DIF_SUBR_INET_NTOA6: 5576 case DIF_SUBR_INET_NTOP: { 5577 size_t size; 5578 int af, argi, i; 5579 char *base, *end; 5580 5581 if (subr == DIF_SUBR_INET_NTOP) { 5582 af = (int)tupregs[0].dttk_value; 5583 argi = 1; 5584 } else { 5585 af = subr == DIF_SUBR_INET_NTOA ? AF_INET: AF_INET6; 5586 argi = 0; 5587 } 5588 5589 if (af == AF_INET) { 5590 ipaddr_t ip4; 5591 uint8_t *ptr8, val; 5592 5593 /* 5594 * Safely load the IPv4 address. 5595 */ 5596 ip4 = dtrace_load32(tupregs[argi].dttk_value); 5597 5598 /* 5599 * Check an IPv4 string will fit in scratch. 5600 */ 5601 size = INET_ADDRSTRLEN; 5602 if (!DTRACE_INSCRATCH(mstate, size)) { 5603 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5604 regs[rd] = 0; 5605 break; 5606 } 5607 base = (char *)mstate->dtms_scratch_ptr; 5608 end = (char *)mstate->dtms_scratch_ptr + size - 1; 5609 5610 /* 5611 * Stringify as a dotted decimal quad. 5612 */ 5613 *end-- = '\0'; 5614 ptr8 = (uint8_t *)&ip4; 5615 for (i = 3; i >= 0; i--) { 5616 val = ptr8[i]; 5617 5618 if (val == 0) { 5619 *end-- = '0'; 5620 } else { 5621 for (; val; val /= 10) { 5622 *end-- = '0' + (val % 10); 5623 } 5624 } 5625 5626 if (i > 0) 5627 *end-- = '.'; 5628 } 5629 ASSERT(end + 1 >= base); 5630 5631 } else if (af == AF_INET6) { 5632 struct in6_addr ip6; 5633 int firstzero, tryzero, numzero, v6end; 5634 uint16_t val; 5635 const char digits[] = "0123456789abcdef"; 5636 5637 /* 5638 * Stringify using RFC 1884 convention 2 - 16 bit 5639 * hexadecimal values with a zero-run compression. 5640 * Lower case hexadecimal digits are used. 5641 * eg, fe80::214:4fff:fe0b:76c8. 5642 * The IPv4 embedded form is returned for inet_ntop, 5643 * just the IPv4 string is returned for inet_ntoa6. 5644 */ 5645 5646 /* 5647 * Safely load the IPv6 address. 5648 */ 5649 dtrace_bcopy( 5650 (void *)(uintptr_t)tupregs[argi].dttk_value, 5651 (void *)(uintptr_t)&ip6, sizeof (struct in6_addr)); 5652 5653 /* 5654 * Check an IPv6 string will fit in scratch. 5655 */ 5656 size = INET6_ADDRSTRLEN; 5657 if (!DTRACE_INSCRATCH(mstate, size)) { 5658 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5659 regs[rd] = 0; 5660 break; 5661 } 5662 base = (char *)mstate->dtms_scratch_ptr; 5663 end = (char *)mstate->dtms_scratch_ptr + size - 1; 5664 *end-- = '\0'; 5665 5666 /* 5667 * Find the longest run of 16 bit zero values 5668 * for the single allowed zero compression - "::". 5669 */ 5670 firstzero = -1; 5671 tryzero = -1; 5672 numzero = 1; 5673 for (i = 0; i < sizeof (struct in6_addr); i++) { 5674#if defined(sun) 5675 if (ip6._S6_un._S6_u8[i] == 0 && 5676#else 5677 if (ip6.__u6_addr.__u6_addr8[i] == 0 && 5678#endif 5679 tryzero == -1 && i % 2 == 0) { 5680 tryzero = i; 5681 continue; 5682 } 5683 5684 if (tryzero != -1 && 5685#if defined(sun) 5686 (ip6._S6_un._S6_u8[i] != 0 || 5687#else 5688 (ip6.__u6_addr.__u6_addr8[i] != 0 || 5689#endif 5690 i == sizeof (struct in6_addr) - 1)) { 5691 5692 if (i - tryzero <= numzero) { 5693 tryzero = -1; 5694 continue; 5695 } 5696 5697 firstzero = tryzero; 5698 numzero = i - i % 2 - tryzero; 5699 tryzero = -1; 5700 5701#if defined(sun) 5702 if (ip6._S6_un._S6_u8[i] == 0 && 5703#else 5704 if (ip6.__u6_addr.__u6_addr8[i] == 0 && 5705#endif 5706 i == sizeof (struct in6_addr) - 1) 5707 numzero += 2; 5708 } 5709 } 5710 ASSERT(firstzero + numzero <= sizeof (struct in6_addr)); 5711 5712 /* 5713 * Check for an IPv4 embedded address. 5714 */ 5715 v6end = sizeof (struct in6_addr) - 2; 5716 if (IN6_IS_ADDR_V4MAPPED(&ip6) || 5717 IN6_IS_ADDR_V4COMPAT(&ip6)) { 5718 for (i = sizeof (struct in6_addr) - 1; 5719 i >= DTRACE_V4MAPPED_OFFSET; i--) { 5720 ASSERT(end >= base); 5721 5722#if defined(sun) 5723 val = ip6._S6_un._S6_u8[i]; 5724#else 5725 val = ip6.__u6_addr.__u6_addr8[i]; 5726#endif 5727 5728 if (val == 0) { 5729 *end-- = '0'; 5730 } else { 5731 for (; val; val /= 10) { 5732 *end-- = '0' + val % 10; 5733 } 5734 } 5735 5736 if (i > DTRACE_V4MAPPED_OFFSET) 5737 *end-- = '.'; 5738 } 5739 5740 if (subr == DIF_SUBR_INET_NTOA6) 5741 goto inetout; 5742 5743 /* 5744 * Set v6end to skip the IPv4 address that 5745 * we have already stringified. 5746 */ 5747 v6end = 10; 5748 } 5749 5750 /* 5751 * Build the IPv6 string by working through the 5752 * address in reverse. 5753 */ 5754 for (i = v6end; i >= 0; i -= 2) { 5755 ASSERT(end >= base); 5756 5757 if (i == firstzero + numzero - 2) { 5758 *end-- = ':'; 5759 *end-- = ':'; 5760 i -= numzero - 2; 5761 continue; 5762 } 5763 5764 if (i < 14 && i != firstzero - 2) 5765 *end-- = ':'; 5766 5767#if defined(sun) 5768 val = (ip6._S6_un._S6_u8[i] << 8) + 5769 ip6._S6_un._S6_u8[i + 1]; 5770#else 5771 val = (ip6.__u6_addr.__u6_addr8[i] << 8) + 5772 ip6.__u6_addr.__u6_addr8[i + 1]; 5773#endif 5774 5775 if (val == 0) { 5776 *end-- = '0'; 5777 } else { 5778 for (; val; val /= 16) { 5779 *end-- = digits[val % 16]; 5780 } 5781 } 5782 } 5783 ASSERT(end + 1 >= base); 5784 5785 } else { 5786 /* 5787 * The user didn't use AH_INET or AH_INET6. 5788 */ 5789 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 5790 regs[rd] = 0; 5791 break; 5792 } 5793 5794inetout: regs[rd] = (uintptr_t)end + 1; 5795 mstate->dtms_scratch_ptr += size; 5796 break; 5797 } 5798 5799 case DIF_SUBR_MEMREF: { 5800 uintptr_t size = 2 * sizeof(uintptr_t); 5801 uintptr_t *memref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t)); 5802 size_t scratch_size = ((uintptr_t) memref - mstate->dtms_scratch_ptr) + size; 5803 5804 /* address and length */ 5805 memref[0] = tupregs[0].dttk_value; 5806 memref[1] = tupregs[1].dttk_value; 5807 5808 regs[rd] = (uintptr_t) memref; 5809 mstate->dtms_scratch_ptr += scratch_size; 5810 break; 5811 } 5812 5813#if !defined(sun) 5814 case DIF_SUBR_MEMSTR: { 5815 char *str = (char *)mstate->dtms_scratch_ptr; 5816 uintptr_t mem = tupregs[0].dttk_value; 5817 char c = tupregs[1].dttk_value; 5818 size_t size = tupregs[2].dttk_value; 5819 uint8_t n; 5820 int i; 5821 5822 regs[rd] = 0; 5823 5824 if (size == 0) 5825 break; 5826 5827 if (!dtrace_canload(mem, size - 1, mstate, vstate)) 5828 break; 5829 5830 if (!DTRACE_INSCRATCH(mstate, size)) { 5831 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5832 break; 5833 } 5834 5835 if (dtrace_memstr_max != 0 && size > dtrace_memstr_max) { 5836 *flags |= CPU_DTRACE_ILLOP; 5837 break; 5838 } 5839 5840 for (i = 0; i < size - 1; i++) { 5841 n = dtrace_load8(mem++); 5842 str[i] = (n == 0) ? c : n; 5843 } 5844 str[size - 1] = 0; 5845 5846 regs[rd] = (uintptr_t)str; 5847 mstate->dtms_scratch_ptr += size; 5848 break; 5849 } 5850#endif 5851 5852 case DIF_SUBR_TYPEREF: { 5853 uintptr_t size = 4 * sizeof(uintptr_t); 5854 uintptr_t *typeref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t)); 5855 size_t scratch_size = ((uintptr_t) typeref - mstate->dtms_scratch_ptr) + size; 5856 5857 /* address, num_elements, type_str, type_len */ 5858 typeref[0] = tupregs[0].dttk_value; 5859 typeref[1] = tupregs[1].dttk_value; 5860 typeref[2] = tupregs[2].dttk_value; 5861 typeref[3] = tupregs[3].dttk_value; 5862 5863 regs[rd] = (uintptr_t) typeref; 5864 mstate->dtms_scratch_ptr += scratch_size; 5865 break; 5866 } 5867 } 5868} 5869 5870/* 5871 * Emulate the execution of DTrace IR instructions specified by the given 5872 * DIF object. This function is deliberately void of assertions as all of 5873 * the necessary checks are handled by a call to dtrace_difo_validate(). 5874 */ 5875static uint64_t 5876dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, 5877 dtrace_vstate_t *vstate, dtrace_state_t *state) 5878{ 5879 const dif_instr_t *text = difo->dtdo_buf; 5880 const uint_t textlen = difo->dtdo_len; 5881 const char *strtab = difo->dtdo_strtab; 5882 const uint64_t *inttab = difo->dtdo_inttab; 5883 5884 uint64_t rval = 0; 5885 dtrace_statvar_t *svar; 5886 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 5887 dtrace_difv_t *v; 5888 volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 5889 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 5890 5891 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 5892 uint64_t regs[DIF_DIR_NREGS]; 5893 uint64_t *tmp; 5894 5895 uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0; 5896 int64_t cc_r; 5897 uint_t pc = 0, id, opc = 0; 5898 uint8_t ttop = 0; 5899 dif_instr_t instr; 5900 uint_t r1, r2, rd; 5901 5902 /* 5903 * We stash the current DIF object into the machine state: we need it 5904 * for subsequent access checking. 5905 */ 5906 mstate->dtms_difo = difo; 5907 5908 regs[DIF_REG_R0] = 0; /* %r0 is fixed at zero */ 5909 5910 while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) { 5911 opc = pc; 5912 5913 instr = text[pc++]; 5914 r1 = DIF_INSTR_R1(instr); 5915 r2 = DIF_INSTR_R2(instr); 5916 rd = DIF_INSTR_RD(instr); 5917 5918 switch (DIF_INSTR_OP(instr)) { 5919 case DIF_OP_OR: 5920 regs[rd] = regs[r1] | regs[r2]; 5921 break; 5922 case DIF_OP_XOR: 5923 regs[rd] = regs[r1] ^ regs[r2]; 5924 break; 5925 case DIF_OP_AND: 5926 regs[rd] = regs[r1] & regs[r2]; 5927 break; 5928 case DIF_OP_SLL: 5929 regs[rd] = regs[r1] << regs[r2]; 5930 break; 5931 case DIF_OP_SRL: 5932 regs[rd] = regs[r1] >> regs[r2]; 5933 break; 5934 case DIF_OP_SUB: 5935 regs[rd] = regs[r1] - regs[r2]; 5936 break; 5937 case DIF_OP_ADD: 5938 regs[rd] = regs[r1] + regs[r2]; 5939 break; 5940 case DIF_OP_MUL: 5941 regs[rd] = regs[r1] * regs[r2]; 5942 break; 5943 case DIF_OP_SDIV: 5944 if (regs[r2] == 0) { 5945 regs[rd] = 0; 5946 *flags |= CPU_DTRACE_DIVZERO; 5947 } else { 5948 regs[rd] = (int64_t)regs[r1] / 5949 (int64_t)regs[r2]; 5950 } 5951 break; 5952 5953 case DIF_OP_UDIV: 5954 if (regs[r2] == 0) { 5955 regs[rd] = 0; 5956 *flags |= CPU_DTRACE_DIVZERO; 5957 } else { 5958 regs[rd] = regs[r1] / regs[r2]; 5959 } 5960 break; 5961 5962 case DIF_OP_SREM: 5963 if (regs[r2] == 0) { 5964 regs[rd] = 0; 5965 *flags |= CPU_DTRACE_DIVZERO; 5966 } else { 5967 regs[rd] = (int64_t)regs[r1] % 5968 (int64_t)regs[r2]; 5969 } 5970 break; 5971 5972 case DIF_OP_UREM: 5973 if (regs[r2] == 0) { 5974 regs[rd] = 0; 5975 *flags |= CPU_DTRACE_DIVZERO; 5976 } else { 5977 regs[rd] = regs[r1] % regs[r2]; 5978 } 5979 break; 5980 5981 case DIF_OP_NOT: 5982 regs[rd] = ~regs[r1]; 5983 break; 5984 case DIF_OP_MOV: 5985 regs[rd] = regs[r1]; 5986 break; 5987 case DIF_OP_CMP: 5988 cc_r = regs[r1] - regs[r2]; 5989 cc_n = cc_r < 0; 5990 cc_z = cc_r == 0; 5991 cc_v = 0; 5992 cc_c = regs[r1] < regs[r2]; 5993 break; 5994 case DIF_OP_TST: 5995 cc_n = cc_v = cc_c = 0; 5996 cc_z = regs[r1] == 0; 5997 break; 5998 case DIF_OP_BA: 5999 pc = DIF_INSTR_LABEL(instr); 6000 break; 6001 case DIF_OP_BE: 6002 if (cc_z) 6003 pc = DIF_INSTR_LABEL(instr); 6004 break; 6005 case DIF_OP_BNE: 6006 if (cc_z == 0) 6007 pc = DIF_INSTR_LABEL(instr); 6008 break; 6009 case DIF_OP_BG: 6010 if ((cc_z | (cc_n ^ cc_v)) == 0) 6011 pc = DIF_INSTR_LABEL(instr); 6012 break; 6013 case DIF_OP_BGU: 6014 if ((cc_c | cc_z) == 0) 6015 pc = DIF_INSTR_LABEL(instr); 6016 break; 6017 case DIF_OP_BGE: 6018 if ((cc_n ^ cc_v) == 0) 6019 pc = DIF_INSTR_LABEL(instr); 6020 break; 6021 case DIF_OP_BGEU: 6022 if (cc_c == 0) 6023 pc = DIF_INSTR_LABEL(instr); 6024 break; 6025 case DIF_OP_BL: 6026 if (cc_n ^ cc_v) 6027 pc = DIF_INSTR_LABEL(instr); 6028 break; 6029 case DIF_OP_BLU: 6030 if (cc_c) 6031 pc = DIF_INSTR_LABEL(instr); 6032 break; 6033 case DIF_OP_BLE: 6034 if (cc_z | (cc_n ^ cc_v)) 6035 pc = DIF_INSTR_LABEL(instr); 6036 break; 6037 case DIF_OP_BLEU: 6038 if (cc_c | cc_z) 6039 pc = DIF_INSTR_LABEL(instr); 6040 break; 6041 case DIF_OP_RLDSB: 6042 if (!dtrace_canload(regs[r1], 1, mstate, vstate)) 6043 break; 6044 /*FALLTHROUGH*/ 6045 case DIF_OP_LDSB: 6046 regs[rd] = (int8_t)dtrace_load8(regs[r1]); 6047 break; 6048 case DIF_OP_RLDSH: 6049 if (!dtrace_canload(regs[r1], 2, mstate, vstate)) 6050 break; 6051 /*FALLTHROUGH*/ 6052 case DIF_OP_LDSH: 6053 regs[rd] = (int16_t)dtrace_load16(regs[r1]); 6054 break; 6055 case DIF_OP_RLDSW: 6056 if (!dtrace_canload(regs[r1], 4, mstate, vstate)) 6057 break; 6058 /*FALLTHROUGH*/ 6059 case DIF_OP_LDSW: 6060 regs[rd] = (int32_t)dtrace_load32(regs[r1]); 6061 break; 6062 case DIF_OP_RLDUB: 6063 if (!dtrace_canload(regs[r1], 1, mstate, vstate)) 6064 break; 6065 /*FALLTHROUGH*/ 6066 case DIF_OP_LDUB: 6067 regs[rd] = dtrace_load8(regs[r1]); 6068 break; 6069 case DIF_OP_RLDUH: 6070 if (!dtrace_canload(regs[r1], 2, mstate, vstate)) 6071 break; 6072 /*FALLTHROUGH*/ 6073 case DIF_OP_LDUH: 6074 regs[rd] = dtrace_load16(regs[r1]); 6075 break; 6076 case DIF_OP_RLDUW: 6077 if (!dtrace_canload(regs[r1], 4, mstate, vstate)) 6078 break; 6079 /*FALLTHROUGH*/ 6080 case DIF_OP_LDUW: 6081 regs[rd] = dtrace_load32(regs[r1]); 6082 break; 6083 case DIF_OP_RLDX: 6084 if (!dtrace_canload(regs[r1], 8, mstate, vstate)) 6085 break; 6086 /*FALLTHROUGH*/ 6087 case DIF_OP_LDX: 6088 regs[rd] = dtrace_load64(regs[r1]); 6089 break; 6090 case DIF_OP_ULDSB: 6091 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6092 regs[rd] = (int8_t) 6093 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 6094 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6095 break; 6096 case DIF_OP_ULDSH: 6097 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6098 regs[rd] = (int16_t) 6099 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 6100 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6101 break; 6102 case DIF_OP_ULDSW: 6103 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6104 regs[rd] = (int32_t) 6105 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 6106 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6107 break; 6108 case DIF_OP_ULDUB: 6109 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6110 regs[rd] = 6111 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 6112 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6113 break; 6114 case DIF_OP_ULDUH: 6115 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6116 regs[rd] = 6117 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 6118 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6119 break; 6120 case DIF_OP_ULDUW: 6121 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6122 regs[rd] = 6123 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 6124 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6125 break; 6126 case DIF_OP_ULDX: 6127 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6128 regs[rd] = 6129 dtrace_fuword64((void *)(uintptr_t)regs[r1]); 6130 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6131 break; 6132 case DIF_OP_RET: 6133 rval = regs[rd]; 6134 pc = textlen; 6135 break; 6136 case DIF_OP_NOP: 6137 break; 6138 case DIF_OP_SETX: 6139 regs[rd] = inttab[DIF_INSTR_INTEGER(instr)]; 6140 break; 6141 case DIF_OP_SETS: 6142 regs[rd] = (uint64_t)(uintptr_t) 6143 (strtab + DIF_INSTR_STRING(instr)); 6144 break; 6145 case DIF_OP_SCMP: { 6146 size_t sz = state->dts_options[DTRACEOPT_STRSIZE]; 6147 uintptr_t s1 = regs[r1]; 6148 uintptr_t s2 = regs[r2]; 6149 6150 if (s1 != 0 && 6151 !dtrace_strcanload(s1, sz, mstate, vstate)) 6152 break; 6153 if (s2 != 0 && 6154 !dtrace_strcanload(s2, sz, mstate, vstate)) 6155 break; 6156 6157 cc_r = dtrace_strncmp((char *)s1, (char *)s2, sz); 6158 6159 cc_n = cc_r < 0; 6160 cc_z = cc_r == 0; 6161 cc_v = cc_c = 0; 6162 break; 6163 } 6164 case DIF_OP_LDGA: 6165 regs[rd] = dtrace_dif_variable(mstate, state, 6166 r1, regs[r2]); 6167 break; 6168 case DIF_OP_LDGS: 6169 id = DIF_INSTR_VAR(instr); 6170 6171 if (id >= DIF_VAR_OTHER_UBASE) { 6172 uintptr_t a; 6173 6174 id -= DIF_VAR_OTHER_UBASE; 6175 svar = vstate->dtvs_globals[id]; 6176 ASSERT(svar != NULL); 6177 v = &svar->dtsv_var; 6178 6179 if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) { 6180 regs[rd] = svar->dtsv_data; 6181 break; 6182 } 6183 6184 a = (uintptr_t)svar->dtsv_data; 6185 6186 if (*(uint8_t *)a == UINT8_MAX) { 6187 /* 6188 * If the 0th byte is set to UINT8_MAX 6189 * then this is to be treated as a 6190 * reference to a NULL variable. 6191 */ 6192 regs[rd] = 0; 6193 } else { 6194 regs[rd] = a + sizeof (uint64_t); 6195 } 6196 6197 break; 6198 } 6199 6200 regs[rd] = dtrace_dif_variable(mstate, state, id, 0); 6201 break; 6202 6203 case DIF_OP_STGS: 6204 id = DIF_INSTR_VAR(instr); 6205 6206 ASSERT(id >= DIF_VAR_OTHER_UBASE); 6207 id -= DIF_VAR_OTHER_UBASE; 6208 6209 svar = vstate->dtvs_globals[id]; 6210 ASSERT(svar != NULL); 6211 v = &svar->dtsv_var; 6212 6213 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 6214 uintptr_t a = (uintptr_t)svar->dtsv_data; 6215 6216 ASSERT(a != 0); 6217 ASSERT(svar->dtsv_size != 0); 6218 6219 if (regs[rd] == 0) { 6220 *(uint8_t *)a = UINT8_MAX; 6221 break; 6222 } else { 6223 *(uint8_t *)a = 0; 6224 a += sizeof (uint64_t); 6225 } 6226 if (!dtrace_vcanload( 6227 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 6228 mstate, vstate)) 6229 break; 6230 6231 dtrace_vcopy((void *)(uintptr_t)regs[rd], 6232 (void *)a, &v->dtdv_type); 6233 break; 6234 } 6235 6236 svar->dtsv_data = regs[rd]; 6237 break; 6238 6239 case DIF_OP_LDTA: 6240 /* 6241 * There are no DTrace built-in thread-local arrays at 6242 * present. This opcode is saved for future work. 6243 */ 6244 *flags |= CPU_DTRACE_ILLOP; 6245 regs[rd] = 0; 6246 break; 6247 6248 case DIF_OP_LDLS: 6249 id = DIF_INSTR_VAR(instr); 6250 6251 if (id < DIF_VAR_OTHER_UBASE) { 6252 /* 6253 * For now, this has no meaning. 6254 */ 6255 regs[rd] = 0; 6256 break; 6257 } 6258 6259 id -= DIF_VAR_OTHER_UBASE; 6260 6261 ASSERT(id < vstate->dtvs_nlocals); 6262 ASSERT(vstate->dtvs_locals != NULL); 6263 6264 svar = vstate->dtvs_locals[id]; 6265 ASSERT(svar != NULL); 6266 v = &svar->dtsv_var; 6267 6268 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 6269 uintptr_t a = (uintptr_t)svar->dtsv_data; 6270 size_t sz = v->dtdv_type.dtdt_size; 6271 6272 sz += sizeof (uint64_t); 6273 ASSERT(svar->dtsv_size == NCPU * sz); 6274 a += curcpu * sz; 6275 6276 if (*(uint8_t *)a == UINT8_MAX) { 6277 /* 6278 * If the 0th byte is set to UINT8_MAX 6279 * then this is to be treated as a 6280 * reference to a NULL variable. 6281 */ 6282 regs[rd] = 0; 6283 } else { 6284 regs[rd] = a + sizeof (uint64_t); 6285 } 6286 6287 break; 6288 } 6289 6290 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 6291 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 6292 regs[rd] = tmp[curcpu]; 6293 break; 6294 6295 case DIF_OP_STLS: 6296 id = DIF_INSTR_VAR(instr); 6297 6298 ASSERT(id >= DIF_VAR_OTHER_UBASE); 6299 id -= DIF_VAR_OTHER_UBASE; 6300 ASSERT(id < vstate->dtvs_nlocals); 6301 6302 ASSERT(vstate->dtvs_locals != NULL); 6303 svar = vstate->dtvs_locals[id]; 6304 ASSERT(svar != NULL); 6305 v = &svar->dtsv_var; 6306 6307 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 6308 uintptr_t a = (uintptr_t)svar->dtsv_data; 6309 size_t sz = v->dtdv_type.dtdt_size; 6310 6311 sz += sizeof (uint64_t); 6312 ASSERT(svar->dtsv_size == NCPU * sz); 6313 a += curcpu * sz; 6314 6315 if (regs[rd] == 0) { 6316 *(uint8_t *)a = UINT8_MAX; 6317 break; 6318 } else { 6319 *(uint8_t *)a = 0; 6320 a += sizeof (uint64_t); 6321 } 6322 6323 if (!dtrace_vcanload( 6324 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 6325 mstate, vstate)) 6326 break; 6327 6328 dtrace_vcopy((void *)(uintptr_t)regs[rd], 6329 (void *)a, &v->dtdv_type); 6330 break; 6331 } 6332 6333 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 6334 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 6335 tmp[curcpu] = regs[rd]; 6336 break; 6337 6338 case DIF_OP_LDTS: { 6339 dtrace_dynvar_t *dvar; 6340 dtrace_key_t *key; 6341 6342 id = DIF_INSTR_VAR(instr); 6343 ASSERT(id >= DIF_VAR_OTHER_UBASE); 6344 id -= DIF_VAR_OTHER_UBASE; 6345 v = &vstate->dtvs_tlocals[id]; 6346 6347 key = &tupregs[DIF_DTR_NREGS]; 6348 key[0].dttk_value = (uint64_t)id; 6349 key[0].dttk_size = 0; 6350 DTRACE_TLS_THRKEY(key[1].dttk_value); 6351 key[1].dttk_size = 0; 6352 6353 dvar = dtrace_dynvar(dstate, 2, key, 6354 sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC, 6355 mstate, vstate); 6356 6357 if (dvar == NULL) { 6358 regs[rd] = 0; 6359 break; 6360 } 6361 6362 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 6363 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 6364 } else { 6365 regs[rd] = *((uint64_t *)dvar->dtdv_data); 6366 } 6367 6368 break; 6369 } 6370 6371 case DIF_OP_STTS: { 6372 dtrace_dynvar_t *dvar; 6373 dtrace_key_t *key; 6374 6375 id = DIF_INSTR_VAR(instr); 6376 ASSERT(id >= DIF_VAR_OTHER_UBASE); 6377 id -= DIF_VAR_OTHER_UBASE; 6378 6379 key = &tupregs[DIF_DTR_NREGS]; 6380 key[0].dttk_value = (uint64_t)id; 6381 key[0].dttk_size = 0; 6382 DTRACE_TLS_THRKEY(key[1].dttk_value); 6383 key[1].dttk_size = 0; 6384 v = &vstate->dtvs_tlocals[id]; 6385 6386 dvar = dtrace_dynvar(dstate, 2, key, 6387 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 6388 v->dtdv_type.dtdt_size : sizeof (uint64_t), 6389 regs[rd] ? DTRACE_DYNVAR_ALLOC : 6390 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 6391 6392 /* 6393 * Given that we're storing to thread-local data, 6394 * we need to flush our predicate cache. 6395 */ 6396 curthread->t_predcache = 0; 6397 6398 if (dvar == NULL) 6399 break; 6400 6401 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 6402 if (!dtrace_vcanload( 6403 (void *)(uintptr_t)regs[rd], 6404 &v->dtdv_type, mstate, vstate)) 6405 break; 6406 6407 dtrace_vcopy((void *)(uintptr_t)regs[rd], 6408 dvar->dtdv_data, &v->dtdv_type); 6409 } else { 6410 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 6411 } 6412 6413 break; 6414 } 6415 6416 case DIF_OP_SRA: 6417 regs[rd] = (int64_t)regs[r1] >> regs[r2]; 6418 break; 6419 6420 case DIF_OP_CALL: 6421 dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd, 6422 regs, tupregs, ttop, mstate, state); 6423 break; 6424 6425 case DIF_OP_PUSHTR: 6426 if (ttop == DIF_DTR_NREGS) { 6427 *flags |= CPU_DTRACE_TUPOFLOW; 6428 break; 6429 } 6430 6431 if (r1 == DIF_TYPE_STRING) { 6432 /* 6433 * If this is a string type and the size is 0, 6434 * we'll use the system-wide default string 6435 * size. Note that we are _not_ looking at 6436 * the value of the DTRACEOPT_STRSIZE option; 6437 * had this been set, we would expect to have 6438 * a non-zero size value in the "pushtr". 6439 */ 6440 tupregs[ttop].dttk_size = 6441 dtrace_strlen((char *)(uintptr_t)regs[rd], 6442 regs[r2] ? regs[r2] : 6443 dtrace_strsize_default) + 1; 6444 } else { 6445 tupregs[ttop].dttk_size = regs[r2]; 6446 } 6447 6448 tupregs[ttop++].dttk_value = regs[rd]; 6449 break; 6450 6451 case DIF_OP_PUSHTV: 6452 if (ttop == DIF_DTR_NREGS) { 6453 *flags |= CPU_DTRACE_TUPOFLOW; 6454 break; 6455 } 6456 6457 tupregs[ttop].dttk_value = regs[rd]; 6458 tupregs[ttop++].dttk_size = 0; 6459 break; 6460 6461 case DIF_OP_POPTS: 6462 if (ttop != 0) 6463 ttop--; 6464 break; 6465 6466 case DIF_OP_FLUSHTS: 6467 ttop = 0; 6468 break; 6469 6470 case DIF_OP_LDGAA: 6471 case DIF_OP_LDTAA: { 6472 dtrace_dynvar_t *dvar; 6473 dtrace_key_t *key = tupregs; 6474 uint_t nkeys = ttop; 6475 6476 id = DIF_INSTR_VAR(instr); 6477 ASSERT(id >= DIF_VAR_OTHER_UBASE); 6478 id -= DIF_VAR_OTHER_UBASE; 6479 6480 key[nkeys].dttk_value = (uint64_t)id; 6481 key[nkeys++].dttk_size = 0; 6482 6483 if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) { 6484 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 6485 key[nkeys++].dttk_size = 0; 6486 v = &vstate->dtvs_tlocals[id]; 6487 } else { 6488 v = &vstate->dtvs_globals[id]->dtsv_var; 6489 } 6490 6491 dvar = dtrace_dynvar(dstate, nkeys, key, 6492 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 6493 v->dtdv_type.dtdt_size : sizeof (uint64_t), 6494 DTRACE_DYNVAR_NOALLOC, mstate, vstate); 6495 6496 if (dvar == NULL) { 6497 regs[rd] = 0; 6498 break; 6499 } 6500 6501 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 6502 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 6503 } else { 6504 regs[rd] = *((uint64_t *)dvar->dtdv_data); 6505 } 6506 6507 break; 6508 } 6509 6510 case DIF_OP_STGAA: 6511 case DIF_OP_STTAA: { 6512 dtrace_dynvar_t *dvar; 6513 dtrace_key_t *key = tupregs; 6514 uint_t nkeys = ttop; 6515 6516 id = DIF_INSTR_VAR(instr); 6517 ASSERT(id >= DIF_VAR_OTHER_UBASE); 6518 id -= DIF_VAR_OTHER_UBASE; 6519 6520 key[nkeys].dttk_value = (uint64_t)id; 6521 key[nkeys++].dttk_size = 0; 6522 6523 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) { 6524 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 6525 key[nkeys++].dttk_size = 0; 6526 v = &vstate->dtvs_tlocals[id]; 6527 } else { 6528 v = &vstate->dtvs_globals[id]->dtsv_var; 6529 } 6530 6531 dvar = dtrace_dynvar(dstate, nkeys, key, 6532 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 6533 v->dtdv_type.dtdt_size : sizeof (uint64_t), 6534 regs[rd] ? DTRACE_DYNVAR_ALLOC : 6535 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 6536 6537 if (dvar == NULL) 6538 break; 6539 6540 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 6541 if (!dtrace_vcanload( 6542 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 6543 mstate, vstate)) 6544 break; 6545 6546 dtrace_vcopy((void *)(uintptr_t)regs[rd], 6547 dvar->dtdv_data, &v->dtdv_type); 6548 } else { 6549 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 6550 } 6551 6552 break; 6553 } 6554 6555 case DIF_OP_ALLOCS: { 6556 uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 6557 size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1]; 6558 6559 /* 6560 * Rounding up the user allocation size could have 6561 * overflowed large, bogus allocations (like -1ULL) to 6562 * 0. 6563 */ 6564 if (size < regs[r1] || 6565 !DTRACE_INSCRATCH(mstate, size)) { 6566 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 6567 regs[rd] = 0; 6568 break; 6569 } 6570 6571 dtrace_bzero((void *) mstate->dtms_scratch_ptr, size); 6572 mstate->dtms_scratch_ptr += size; 6573 regs[rd] = ptr; 6574 break; 6575 } 6576 6577 case DIF_OP_COPYS: 6578 if (!dtrace_canstore(regs[rd], regs[r2], 6579 mstate, vstate)) { 6580 *flags |= CPU_DTRACE_BADADDR; 6581 *illval = regs[rd]; 6582 break; 6583 } 6584 6585 if (!dtrace_canload(regs[r1], regs[r2], mstate, vstate)) 6586 break; 6587 6588 dtrace_bcopy((void *)(uintptr_t)regs[r1], 6589 (void *)(uintptr_t)regs[rd], (size_t)regs[r2]); 6590 break; 6591 6592 case DIF_OP_STB: 6593 if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) { 6594 *flags |= CPU_DTRACE_BADADDR; 6595 *illval = regs[rd]; 6596 break; 6597 } 6598 *((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1]; 6599 break; 6600 6601 case DIF_OP_STH: 6602 if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) { 6603 *flags |= CPU_DTRACE_BADADDR; 6604 *illval = regs[rd]; 6605 break; 6606 } 6607 if (regs[rd] & 1) { 6608 *flags |= CPU_DTRACE_BADALIGN; 6609 *illval = regs[rd]; 6610 break; 6611 } 6612 *((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1]; 6613 break; 6614 6615 case DIF_OP_STW: 6616 if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) { 6617 *flags |= CPU_DTRACE_BADADDR; 6618 *illval = regs[rd]; 6619 break; 6620 } 6621 if (regs[rd] & 3) { 6622 *flags |= CPU_DTRACE_BADALIGN; 6623 *illval = regs[rd]; 6624 break; 6625 } 6626 *((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1]; 6627 break; 6628 6629 case DIF_OP_STX: 6630 if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) { 6631 *flags |= CPU_DTRACE_BADADDR; 6632 *illval = regs[rd]; 6633 break; 6634 } 6635 if (regs[rd] & 7) { 6636 *flags |= CPU_DTRACE_BADALIGN; 6637 *illval = regs[rd]; 6638 break; 6639 } 6640 *((uint64_t *)(uintptr_t)regs[rd]) = regs[r1]; 6641 break; 6642 } 6643 } 6644 6645 if (!(*flags & CPU_DTRACE_FAULT)) 6646 return (rval); 6647 6648 mstate->dtms_fltoffs = opc * sizeof (dif_instr_t); 6649 mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS; 6650 6651 return (0); 6652} 6653 6654static void 6655dtrace_action_breakpoint(dtrace_ecb_t *ecb) 6656{ 6657 dtrace_probe_t *probe = ecb->dte_probe; 6658 dtrace_provider_t *prov = probe->dtpr_provider; 6659 char c[DTRACE_FULLNAMELEN + 80], *str; 6660 char *msg = "dtrace: breakpoint action at probe "; 6661 char *ecbmsg = " (ecb "; 6662 uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4)); 6663 uintptr_t val = (uintptr_t)ecb; 6664 int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0; 6665 6666 if (dtrace_destructive_disallow) 6667 return; 6668 6669 /* 6670 * It's impossible to be taking action on the NULL probe. 6671 */ 6672 ASSERT(probe != NULL); 6673 6674 /* 6675 * This is a poor man's (destitute man's?) sprintf(): we want to 6676 * print the provider name, module name, function name and name of 6677 * the probe, along with the hex address of the ECB with the breakpoint 6678 * action -- all of which we must place in the character buffer by 6679 * hand. 6680 */ 6681 while (*msg != '\0') 6682 c[i++] = *msg++; 6683 6684 for (str = prov->dtpv_name; *str != '\0'; str++) 6685 c[i++] = *str; 6686 c[i++] = ':'; 6687 6688 for (str = probe->dtpr_mod; *str != '\0'; str++) 6689 c[i++] = *str; 6690 c[i++] = ':'; 6691 6692 for (str = probe->dtpr_func; *str != '\0'; str++) 6693 c[i++] = *str; 6694 c[i++] = ':'; 6695 6696 for (str = probe->dtpr_name; *str != '\0'; str++) 6697 c[i++] = *str; 6698 6699 while (*ecbmsg != '\0') 6700 c[i++] = *ecbmsg++; 6701 6702 while (shift >= 0) { 6703 mask = (uintptr_t)0xf << shift; 6704 6705 if (val >= ((uintptr_t)1 << shift)) 6706 c[i++] = "0123456789abcdef"[(val & mask) >> shift]; 6707 shift -= 4; 6708 } 6709 6710 c[i++] = ')'; 6711 c[i] = '\0'; 6712 6713#if defined(sun) 6714 debug_enter(c); 6715#else 6716 kdb_enter(KDB_WHY_DTRACE, "breakpoint action"); 6717#endif 6718} 6719 6720static void 6721dtrace_action_panic(dtrace_ecb_t *ecb) 6722{ 6723 dtrace_probe_t *probe = ecb->dte_probe; 6724 6725 /* 6726 * It's impossible to be taking action on the NULL probe. 6727 */ 6728 ASSERT(probe != NULL); 6729 6730 if (dtrace_destructive_disallow) 6731 return; 6732 6733 if (dtrace_panicked != NULL) 6734 return; 6735 6736 if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL) 6737 return; 6738 6739 /* 6740 * We won the right to panic. (We want to be sure that only one 6741 * thread calls panic() from dtrace_probe(), and that panic() is 6742 * called exactly once.) 6743 */ 6744 dtrace_panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)", 6745 probe->dtpr_provider->dtpv_name, probe->dtpr_mod, 6746 probe->dtpr_func, probe->dtpr_name, (void *)ecb); 6747} 6748 6749static void 6750dtrace_action_raise(uint64_t sig) 6751{ 6752 if (dtrace_destructive_disallow) 6753 return; 6754 6755 if (sig >= NSIG) { 6756 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 6757 return; 6758 } 6759 6760#if defined(sun) 6761 /* 6762 * raise() has a queue depth of 1 -- we ignore all subsequent 6763 * invocations of the raise() action. 6764 */ 6765 if (curthread->t_dtrace_sig == 0) 6766 curthread->t_dtrace_sig = (uint8_t)sig; 6767 6768 curthread->t_sig_check = 1; 6769 aston(curthread); 6770#else 6771 struct proc *p = curproc; 6772 PROC_LOCK(p); 6773 kern_psignal(p, sig); 6774 PROC_UNLOCK(p); 6775#endif 6776} 6777 6778static void 6779dtrace_action_stop(void) 6780{ 6781 if (dtrace_destructive_disallow) 6782 return; 6783 6784#if defined(sun) 6785 if (!curthread->t_dtrace_stop) { 6786 curthread->t_dtrace_stop = 1; 6787 curthread->t_sig_check = 1; 6788 aston(curthread); 6789 } 6790#else 6791 struct proc *p = curproc; 6792 PROC_LOCK(p); 6793 kern_psignal(p, SIGSTOP); 6794 PROC_UNLOCK(p); 6795#endif 6796} 6797 6798static void 6799dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val) 6800{ 6801 hrtime_t now; 6802 volatile uint16_t *flags; 6803#if defined(sun) 6804 cpu_t *cpu = CPU; 6805#else 6806 cpu_t *cpu = &solaris_cpu[curcpu]; 6807#endif 6808 6809 if (dtrace_destructive_disallow) 6810 return; 6811 6812 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 6813 6814 now = dtrace_gethrtime(); 6815 6816 if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) { 6817 /* 6818 * We need to advance the mark to the current time. 6819 */ 6820 cpu->cpu_dtrace_chillmark = now; 6821 cpu->cpu_dtrace_chilled = 0; 6822 } 6823 6824 /* 6825 * Now check to see if the requested chill time would take us over 6826 * the maximum amount of time allowed in the chill interval. (Or 6827 * worse, if the calculation itself induces overflow.) 6828 */ 6829 if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max || 6830 cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) { 6831 *flags |= CPU_DTRACE_ILLOP; 6832 return; 6833 } 6834 6835 while (dtrace_gethrtime() - now < val) 6836 continue; 6837 6838 /* 6839 * Normally, we assure that the value of the variable "timestamp" does 6840 * not change within an ECB. The presence of chill() represents an 6841 * exception to this rule, however. 6842 */ 6843 mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP; 6844 cpu->cpu_dtrace_chilled += val; 6845} 6846 6847static void 6848dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state, 6849 uint64_t *buf, uint64_t arg) 6850{ 6851 int nframes = DTRACE_USTACK_NFRAMES(arg); 6852 int strsize = DTRACE_USTACK_STRSIZE(arg); 6853 uint64_t *pcs = &buf[1], *fps; 6854 char *str = (char *)&pcs[nframes]; 6855 int size, offs = 0, i, j; 6856 uintptr_t old = mstate->dtms_scratch_ptr, saved; 6857 uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 6858 char *sym; 6859 6860 /* 6861 * Should be taking a faster path if string space has not been 6862 * allocated. 6863 */ 6864 ASSERT(strsize != 0); 6865 6866 /* 6867 * We will first allocate some temporary space for the frame pointers. 6868 */ 6869 fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 6870 size = (uintptr_t)fps - mstate->dtms_scratch_ptr + 6871 (nframes * sizeof (uint64_t)); 6872 6873 if (!DTRACE_INSCRATCH(mstate, size)) { 6874 /* 6875 * Not enough room for our frame pointers -- need to indicate 6876 * that we ran out of scratch space. 6877 */ 6878 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 6879 return; 6880 } 6881 6882 mstate->dtms_scratch_ptr += size; 6883 saved = mstate->dtms_scratch_ptr; 6884 6885 /* 6886 * Now get a stack with both program counters and frame pointers. 6887 */ 6888 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6889 dtrace_getufpstack(buf, fps, nframes + 1); 6890 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6891 6892 /* 6893 * If that faulted, we're cooked. 6894 */ 6895 if (*flags & CPU_DTRACE_FAULT) 6896 goto out; 6897 6898 /* 6899 * Now we want to walk up the stack, calling the USTACK helper. For 6900 * each iteration, we restore the scratch pointer. 6901 */ 6902 for (i = 0; i < nframes; i++) { 6903 mstate->dtms_scratch_ptr = saved; 6904 6905 if (offs >= strsize) 6906 break; 6907 6908 sym = (char *)(uintptr_t)dtrace_helper( 6909 DTRACE_HELPER_ACTION_USTACK, 6910 mstate, state, pcs[i], fps[i]); 6911 6912 /* 6913 * If we faulted while running the helper, we're going to 6914 * clear the fault and null out the corresponding string. 6915 */ 6916 if (*flags & CPU_DTRACE_FAULT) { 6917 *flags &= ~CPU_DTRACE_FAULT; 6918 str[offs++] = '\0'; 6919 continue; 6920 } 6921 6922 if (sym == NULL) { 6923 str[offs++] = '\0'; 6924 continue; 6925 } 6926 6927 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6928 6929 /* 6930 * Now copy in the string that the helper returned to us. 6931 */ 6932 for (j = 0; offs + j < strsize; j++) { 6933 if ((str[offs + j] = sym[j]) == '\0') 6934 break; 6935 } 6936 6937 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6938 6939 offs += j + 1; 6940 } 6941 6942 if (offs >= strsize) { 6943 /* 6944 * If we didn't have room for all of the strings, we don't 6945 * abort processing -- this needn't be a fatal error -- but we 6946 * still want to increment a counter (dts_stkstroverflows) to 6947 * allow this condition to be warned about. (If this is from 6948 * a jstack() action, it is easily tuned via jstackstrsize.) 6949 */ 6950 dtrace_error(&state->dts_stkstroverflows); 6951 } 6952 6953 while (offs < strsize) 6954 str[offs++] = '\0'; 6955 6956out: 6957 mstate->dtms_scratch_ptr = old; 6958} 6959 6960static void 6961dtrace_store_by_ref(dtrace_difo_t *dp, caddr_t tomax, size_t size, 6962 size_t *valoffsp, uint64_t *valp, uint64_t end, int intuple, int dtkind) 6963{ 6964 volatile uint16_t *flags; 6965 uint64_t val = *valp; 6966 size_t valoffs = *valoffsp; 6967 6968 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 6969 ASSERT(dtkind == DIF_TF_BYREF || dtkind == DIF_TF_BYUREF); 6970 6971 /* 6972 * If this is a string, we're going to only load until we find the zero 6973 * byte -- after which we'll store zero bytes. 6974 */ 6975 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) { 6976 char c = '\0' + 1; 6977 size_t s; 6978 6979 for (s = 0; s < size; s++) { 6980 if (c != '\0' && dtkind == DIF_TF_BYREF) { 6981 c = dtrace_load8(val++); 6982 } else if (c != '\0' && dtkind == DIF_TF_BYUREF) { 6983 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6984 c = dtrace_fuword8((void *)(uintptr_t)val++); 6985 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6986 if (*flags & CPU_DTRACE_FAULT) 6987 break; 6988 } 6989 6990 DTRACE_STORE(uint8_t, tomax, valoffs++, c); 6991 6992 if (c == '\0' && intuple) 6993 break; 6994 } 6995 } else { 6996 uint8_t c; 6997 while (valoffs < end) { 6998 if (dtkind == DIF_TF_BYREF) { 6999 c = dtrace_load8(val++); 7000 } else if (dtkind == DIF_TF_BYUREF) { 7001 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 7002 c = dtrace_fuword8((void *)(uintptr_t)val++); 7003 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 7004 if (*flags & CPU_DTRACE_FAULT) 7005 break; 7006 } 7007 7008 DTRACE_STORE(uint8_t, tomax, 7009 valoffs++, c); 7010 } 7011 } 7012 7013 *valp = val; 7014 *valoffsp = valoffs; 7015} 7016 7017/* 7018 * If you're looking for the epicenter of DTrace, you just found it. This 7019 * is the function called by the provider to fire a probe -- from which all 7020 * subsequent probe-context DTrace activity emanates. 7021 */ 7022void 7023dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1, 7024 uintptr_t arg2, uintptr_t arg3, uintptr_t arg4) 7025{ 7026 processorid_t cpuid; 7027 dtrace_icookie_t cookie; 7028 dtrace_probe_t *probe; 7029 dtrace_mstate_t mstate; 7030 dtrace_ecb_t *ecb; 7031 dtrace_action_t *act; 7032 intptr_t offs; 7033 size_t size; 7034 int vtime, onintr; 7035 volatile uint16_t *flags; 7036 hrtime_t now; 7037 7038 if (panicstr != NULL) 7039 return; 7040 7041#if defined(sun) 7042 /* 7043 * Kick out immediately if this CPU is still being born (in which case 7044 * curthread will be set to -1) or the current thread can't allow 7045 * probes in its current context. 7046 */ 7047 if (((uintptr_t)curthread & 1) || (curthread->t_flag & T_DONTDTRACE)) 7048 return; 7049#endif 7050 7051 cookie = dtrace_interrupt_disable(); 7052 probe = dtrace_probes[id - 1]; 7053 cpuid = curcpu; 7054 onintr = CPU_ON_INTR(CPU); 7055 7056 if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE && 7057 probe->dtpr_predcache == curthread->t_predcache) { 7058 /* 7059 * We have hit in the predicate cache; we know that 7060 * this predicate would evaluate to be false. 7061 */ 7062 dtrace_interrupt_enable(cookie); 7063 return; 7064 } 7065 7066#if defined(sun) 7067 if (panic_quiesce) { 7068#else 7069 if (panicstr != NULL) { 7070#endif 7071 /* 7072 * We don't trace anything if we're panicking. 7073 */ 7074 dtrace_interrupt_enable(cookie); 7075 return; 7076 } 7077 7078 now = dtrace_gethrtime(); 7079 vtime = dtrace_vtime_references != 0; 7080 7081 if (vtime && curthread->t_dtrace_start) 7082 curthread->t_dtrace_vtime += now - curthread->t_dtrace_start; 7083 7084 mstate.dtms_difo = NULL; 7085 mstate.dtms_probe = probe; 7086 mstate.dtms_strtok = 0; 7087 mstate.dtms_arg[0] = arg0; 7088 mstate.dtms_arg[1] = arg1; 7089 mstate.dtms_arg[2] = arg2; 7090 mstate.dtms_arg[3] = arg3; 7091 mstate.dtms_arg[4] = arg4; 7092 7093 flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags; 7094 7095 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 7096 dtrace_predicate_t *pred = ecb->dte_predicate; 7097 dtrace_state_t *state = ecb->dte_state; 7098 dtrace_buffer_t *buf = &state->dts_buffer[cpuid]; 7099 dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid]; 7100 dtrace_vstate_t *vstate = &state->dts_vstate; 7101 dtrace_provider_t *prov = probe->dtpr_provider; 7102 uint64_t tracememsize = 0; 7103 int committed = 0; 7104 caddr_t tomax; 7105 7106 /* 7107 * A little subtlety with the following (seemingly innocuous) 7108 * declaration of the automatic 'val': by looking at the 7109 * code, you might think that it could be declared in the 7110 * action processing loop, below. (That is, it's only used in 7111 * the action processing loop.) However, it must be declared 7112 * out of that scope because in the case of DIF expression 7113 * arguments to aggregating actions, one iteration of the 7114 * action loop will use the last iteration's value. 7115 */ 7116 uint64_t val = 0; 7117 7118 mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE; 7119 mstate.dtms_getf = NULL; 7120 7121 *flags &= ~CPU_DTRACE_ERROR; 7122 7123 if (prov == dtrace_provider) { 7124 /* 7125 * If dtrace itself is the provider of this probe, 7126 * we're only going to continue processing the ECB if 7127 * arg0 (the dtrace_state_t) is equal to the ECB's 7128 * creating state. (This prevents disjoint consumers 7129 * from seeing one another's metaprobes.) 7130 */ 7131 if (arg0 != (uint64_t)(uintptr_t)state) 7132 continue; 7133 } 7134 7135 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) { 7136 /* 7137 * We're not currently active. If our provider isn't 7138 * the dtrace pseudo provider, we're not interested. 7139 */ 7140 if (prov != dtrace_provider) 7141 continue; 7142 7143 /* 7144 * Now we must further check if we are in the BEGIN 7145 * probe. If we are, we will only continue processing 7146 * if we're still in WARMUP -- if one BEGIN enabling 7147 * has invoked the exit() action, we don't want to 7148 * evaluate subsequent BEGIN enablings. 7149 */ 7150 if (probe->dtpr_id == dtrace_probeid_begin && 7151 state->dts_activity != DTRACE_ACTIVITY_WARMUP) { 7152 ASSERT(state->dts_activity == 7153 DTRACE_ACTIVITY_DRAINING); 7154 continue; 7155 } 7156 } 7157 7158 if (ecb->dte_cond) { 7159 /* 7160 * If the dte_cond bits indicate that this 7161 * consumer is only allowed to see user-mode firings 7162 * of this probe, call the provider's dtps_usermode() 7163 * entry point to check that the probe was fired 7164 * while in a user context. Skip this ECB if that's 7165 * not the case. 7166 */ 7167 if ((ecb->dte_cond & DTRACE_COND_USERMODE) && 7168 prov->dtpv_pops.dtps_usermode(prov->dtpv_arg, 7169 probe->dtpr_id, probe->dtpr_arg) == 0) 7170 continue; 7171 7172#if defined(sun) 7173 /* 7174 * This is more subtle than it looks. We have to be 7175 * absolutely certain that CRED() isn't going to 7176 * change out from under us so it's only legit to 7177 * examine that structure if we're in constrained 7178 * situations. Currently, the only times we'll this 7179 * check is if a non-super-user has enabled the 7180 * profile or syscall providers -- providers that 7181 * allow visibility of all processes. For the 7182 * profile case, the check above will ensure that 7183 * we're examining a user context. 7184 */ 7185 if (ecb->dte_cond & DTRACE_COND_OWNER) { 7186 cred_t *cr; 7187 cred_t *s_cr = 7188 ecb->dte_state->dts_cred.dcr_cred; 7189 proc_t *proc; 7190 7191 ASSERT(s_cr != NULL); 7192 7193 if ((cr = CRED()) == NULL || 7194 s_cr->cr_uid != cr->cr_uid || 7195 s_cr->cr_uid != cr->cr_ruid || 7196 s_cr->cr_uid != cr->cr_suid || 7197 s_cr->cr_gid != cr->cr_gid || 7198 s_cr->cr_gid != cr->cr_rgid || 7199 s_cr->cr_gid != cr->cr_sgid || 7200 (proc = ttoproc(curthread)) == NULL || 7201 (proc->p_flag & SNOCD)) 7202 continue; 7203 } 7204 7205 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) { 7206 cred_t *cr; 7207 cred_t *s_cr = 7208 ecb->dte_state->dts_cred.dcr_cred; 7209 7210 ASSERT(s_cr != NULL); 7211 7212 if ((cr = CRED()) == NULL || 7213 s_cr->cr_zone->zone_id != 7214 cr->cr_zone->zone_id) 7215 continue; 7216 } 7217#endif 7218 } 7219 7220 if (now - state->dts_alive > dtrace_deadman_timeout) { 7221 /* 7222 * We seem to be dead. Unless we (a) have kernel 7223 * destructive permissions (b) have explicitly enabled 7224 * destructive actions and (c) destructive actions have 7225 * not been disabled, we're going to transition into 7226 * the KILLED state, from which no further processing 7227 * on this state will be performed. 7228 */ 7229 if (!dtrace_priv_kernel_destructive(state) || 7230 !state->dts_cred.dcr_destructive || 7231 dtrace_destructive_disallow) { 7232 void *activity = &state->dts_activity; 7233 dtrace_activity_t current; 7234 7235 do { 7236 current = state->dts_activity; 7237 } while (dtrace_cas32(activity, current, 7238 DTRACE_ACTIVITY_KILLED) != current); 7239 7240 continue; 7241 } 7242 } 7243 7244 if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed, 7245 ecb->dte_alignment, state, &mstate)) < 0) 7246 continue; 7247 7248 tomax = buf->dtb_tomax; 7249 ASSERT(tomax != NULL); 7250 7251 if (ecb->dte_size != 0) { 7252 dtrace_rechdr_t dtrh; 7253 if (!(mstate.dtms_present & DTRACE_MSTATE_TIMESTAMP)) { 7254 mstate.dtms_timestamp = dtrace_gethrtime(); 7255 mstate.dtms_present |= DTRACE_MSTATE_TIMESTAMP; 7256 } 7257 ASSERT3U(ecb->dte_size, >=, sizeof (dtrace_rechdr_t)); 7258 dtrh.dtrh_epid = ecb->dte_epid; 7259 DTRACE_RECORD_STORE_TIMESTAMP(&dtrh, 7260 mstate.dtms_timestamp); 7261 *((dtrace_rechdr_t *)(tomax + offs)) = dtrh; 7262 } 7263 7264 mstate.dtms_epid = ecb->dte_epid; 7265 mstate.dtms_present |= DTRACE_MSTATE_EPID; 7266 7267 if (state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) 7268 mstate.dtms_access = DTRACE_ACCESS_KERNEL; 7269 else 7270 mstate.dtms_access = 0; 7271 7272 if (pred != NULL) { 7273 dtrace_difo_t *dp = pred->dtp_difo; 7274 int rval; 7275 7276 rval = dtrace_dif_emulate(dp, &mstate, vstate, state); 7277 7278 if (!(*flags & CPU_DTRACE_ERROR) && !rval) { 7279 dtrace_cacheid_t cid = probe->dtpr_predcache; 7280 7281 if (cid != DTRACE_CACHEIDNONE && !onintr) { 7282 /* 7283 * Update the predicate cache... 7284 */ 7285 ASSERT(cid == pred->dtp_cacheid); 7286 curthread->t_predcache = cid; 7287 } 7288 7289 continue; 7290 } 7291 } 7292 7293 for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) && 7294 act != NULL; act = act->dta_next) { 7295 size_t valoffs; 7296 dtrace_difo_t *dp; 7297 dtrace_recdesc_t *rec = &act->dta_rec; 7298 7299 size = rec->dtrd_size; 7300 valoffs = offs + rec->dtrd_offset; 7301 7302 if (DTRACEACT_ISAGG(act->dta_kind)) { 7303 uint64_t v = 0xbad; 7304 dtrace_aggregation_t *agg; 7305 7306 agg = (dtrace_aggregation_t *)act; 7307 7308 if ((dp = act->dta_difo) != NULL) 7309 v = dtrace_dif_emulate(dp, 7310 &mstate, vstate, state); 7311 7312 if (*flags & CPU_DTRACE_ERROR) 7313 continue; 7314 7315 /* 7316 * Note that we always pass the expression 7317 * value from the previous iteration of the 7318 * action loop. This value will only be used 7319 * if there is an expression argument to the 7320 * aggregating action, denoted by the 7321 * dtag_hasarg field. 7322 */ 7323 dtrace_aggregate(agg, buf, 7324 offs, aggbuf, v, val); 7325 continue; 7326 } 7327 7328 switch (act->dta_kind) { 7329 case DTRACEACT_STOP: 7330 if (dtrace_priv_proc_destructive(state)) 7331 dtrace_action_stop(); 7332 continue; 7333 7334 case DTRACEACT_BREAKPOINT: 7335 if (dtrace_priv_kernel_destructive(state)) 7336 dtrace_action_breakpoint(ecb); 7337 continue; 7338 7339 case DTRACEACT_PANIC: 7340 if (dtrace_priv_kernel_destructive(state)) 7341 dtrace_action_panic(ecb); 7342 continue; 7343 7344 case DTRACEACT_STACK: 7345 if (!dtrace_priv_kernel(state)) 7346 continue; 7347 7348 dtrace_getpcstack((pc_t *)(tomax + valoffs), 7349 size / sizeof (pc_t), probe->dtpr_aframes, 7350 DTRACE_ANCHORED(probe) ? NULL : 7351 (uint32_t *)arg0); 7352 continue; 7353 7354 case DTRACEACT_JSTACK: 7355 case DTRACEACT_USTACK: 7356 if (!dtrace_priv_proc(state)) 7357 continue; 7358 7359 /* 7360 * See comment in DIF_VAR_PID. 7361 */ 7362 if (DTRACE_ANCHORED(mstate.dtms_probe) && 7363 CPU_ON_INTR(CPU)) { 7364 int depth = DTRACE_USTACK_NFRAMES( 7365 rec->dtrd_arg) + 1; 7366 7367 dtrace_bzero((void *)(tomax + valoffs), 7368 DTRACE_USTACK_STRSIZE(rec->dtrd_arg) 7369 + depth * sizeof (uint64_t)); 7370 7371 continue; 7372 } 7373 7374 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 && 7375 curproc->p_dtrace_helpers != NULL) { 7376 /* 7377 * This is the slow path -- we have 7378 * allocated string space, and we're 7379 * getting the stack of a process that 7380 * has helpers. Call into a separate 7381 * routine to perform this processing. 7382 */ 7383 dtrace_action_ustack(&mstate, state, 7384 (uint64_t *)(tomax + valoffs), 7385 rec->dtrd_arg); 7386 continue; 7387 } 7388 7389 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 7390 dtrace_getupcstack((uint64_t *) 7391 (tomax + valoffs), 7392 DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1); 7393 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 7394 continue; 7395 7396 default: 7397 break; 7398 } 7399 7400 dp = act->dta_difo; 7401 ASSERT(dp != NULL); 7402 7403 val = dtrace_dif_emulate(dp, &mstate, vstate, state); 7404 7405 if (*flags & CPU_DTRACE_ERROR) 7406 continue; 7407 7408 switch (act->dta_kind) { 7409 case DTRACEACT_SPECULATE: { 7410 dtrace_rechdr_t *dtrh; 7411 7412 ASSERT(buf == &state->dts_buffer[cpuid]); 7413 buf = dtrace_speculation_buffer(state, 7414 cpuid, val); 7415 7416 if (buf == NULL) { 7417 *flags |= CPU_DTRACE_DROP; 7418 continue; 7419 } 7420 7421 offs = dtrace_buffer_reserve(buf, 7422 ecb->dte_needed, ecb->dte_alignment, 7423 state, NULL); 7424 7425 if (offs < 0) { 7426 *flags |= CPU_DTRACE_DROP; 7427 continue; 7428 } 7429 7430 tomax = buf->dtb_tomax; 7431 ASSERT(tomax != NULL); 7432 7433 if (ecb->dte_size == 0) 7434 continue; 7435 7436 ASSERT3U(ecb->dte_size, >=, 7437 sizeof (dtrace_rechdr_t)); 7438 dtrh = ((void *)(tomax + offs)); 7439 dtrh->dtrh_epid = ecb->dte_epid; 7440 /* 7441 * When the speculation is committed, all of 7442 * the records in the speculative buffer will 7443 * have their timestamps set to the commit 7444 * time. Until then, it is set to a sentinel 7445 * value, for debugability. 7446 */ 7447 DTRACE_RECORD_STORE_TIMESTAMP(dtrh, UINT64_MAX); 7448 continue; 7449 } 7450 7451 case DTRACEACT_PRINTM: { 7452 /* The DIF returns a 'memref'. */ 7453 uintptr_t *memref = (uintptr_t *)(uintptr_t) val; 7454 7455 /* Get the size from the memref. */ 7456 size = memref[1]; 7457 7458 /* 7459 * Check if the size exceeds the allocated 7460 * buffer size. 7461 */ 7462 if (size + sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) { 7463 /* Flag a drop! */ 7464 *flags |= CPU_DTRACE_DROP; 7465 continue; 7466 } 7467 7468 /* Store the size in the buffer first. */ 7469 DTRACE_STORE(uintptr_t, tomax, 7470 valoffs, size); 7471 7472 /* 7473 * Offset the buffer address to the start 7474 * of the data. 7475 */ 7476 valoffs += sizeof(uintptr_t); 7477 7478 /* 7479 * Reset to the memory address rather than 7480 * the memref array, then let the BYREF 7481 * code below do the work to store the 7482 * memory data in the buffer. 7483 */ 7484 val = memref[0]; 7485 break; 7486 } 7487 7488 case DTRACEACT_PRINTT: { 7489 /* The DIF returns a 'typeref'. */ 7490 uintptr_t *typeref = (uintptr_t *)(uintptr_t) val; 7491 char c = '\0' + 1; 7492 size_t s; 7493 7494 /* 7495 * Get the type string length and round it 7496 * up so that the data that follows is 7497 * aligned for easy access. 7498 */ 7499 size_t typs = strlen((char *) typeref[2]) + 1; 7500 typs = roundup(typs, sizeof(uintptr_t)); 7501 7502 /* 7503 *Get the size from the typeref using the 7504 * number of elements and the type size. 7505 */ 7506 size = typeref[1] * typeref[3]; 7507 7508 /* 7509 * Check if the size exceeds the allocated 7510 * buffer size. 7511 */ 7512 if (size + typs + 2 * sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) { 7513 /* Flag a drop! */ 7514 *flags |= CPU_DTRACE_DROP; 7515 7516 } 7517 7518 /* Store the size in the buffer first. */ 7519 DTRACE_STORE(uintptr_t, tomax, 7520 valoffs, size); 7521 valoffs += sizeof(uintptr_t); 7522 7523 /* Store the type size in the buffer. */ 7524 DTRACE_STORE(uintptr_t, tomax, 7525 valoffs, typeref[3]); 7526 valoffs += sizeof(uintptr_t); 7527 7528 val = typeref[2]; 7529 7530 for (s = 0; s < typs; s++) { 7531 if (c != '\0') 7532 c = dtrace_load8(val++); 7533 7534 DTRACE_STORE(uint8_t, tomax, 7535 valoffs++, c); 7536 } 7537 7538 /* 7539 * Reset to the memory address rather than 7540 * the typeref array, then let the BYREF 7541 * code below do the work to store the 7542 * memory data in the buffer. 7543 */ 7544 val = typeref[0]; 7545 break; 7546 } 7547 7548 case DTRACEACT_CHILL: 7549 if (dtrace_priv_kernel_destructive(state)) 7550 dtrace_action_chill(&mstate, val); 7551 continue; 7552 7553 case DTRACEACT_RAISE: 7554 if (dtrace_priv_proc_destructive(state)) 7555 dtrace_action_raise(val); 7556 continue; 7557 7558 case DTRACEACT_COMMIT: 7559 ASSERT(!committed); 7560 7561 /* 7562 * We need to commit our buffer state. 7563 */ 7564 if (ecb->dte_size) 7565 buf->dtb_offset = offs + ecb->dte_size; 7566 buf = &state->dts_buffer[cpuid]; 7567 dtrace_speculation_commit(state, cpuid, val); 7568 committed = 1; 7569 continue; 7570 7571 case DTRACEACT_DISCARD: 7572 dtrace_speculation_discard(state, cpuid, val); 7573 continue; 7574 7575 case DTRACEACT_DIFEXPR: 7576 case DTRACEACT_LIBACT: 7577 case DTRACEACT_PRINTF: 7578 case DTRACEACT_PRINTA: 7579 case DTRACEACT_SYSTEM: 7580 case DTRACEACT_FREOPEN: 7581 case DTRACEACT_TRACEMEM: 7582 break; 7583 7584 case DTRACEACT_TRACEMEM_DYNSIZE: 7585 tracememsize = val; 7586 break; 7587 7588 case DTRACEACT_SYM: 7589 case DTRACEACT_MOD: 7590 if (!dtrace_priv_kernel(state)) 7591 continue; 7592 break; 7593 7594 case DTRACEACT_USYM: 7595 case DTRACEACT_UMOD: 7596 case DTRACEACT_UADDR: { 7597#if defined(sun) 7598 struct pid *pid = curthread->t_procp->p_pidp; 7599#endif 7600 7601 if (!dtrace_priv_proc(state)) 7602 continue; 7603 7604 DTRACE_STORE(uint64_t, tomax, 7605#if defined(sun) 7606 valoffs, (uint64_t)pid->pid_id); 7607#else 7608 valoffs, (uint64_t) curproc->p_pid); 7609#endif 7610 DTRACE_STORE(uint64_t, tomax, 7611 valoffs + sizeof (uint64_t), val); 7612 7613 continue; 7614 } 7615 7616 case DTRACEACT_EXIT: { 7617 /* 7618 * For the exit action, we are going to attempt 7619 * to atomically set our activity to be 7620 * draining. If this fails (either because 7621 * another CPU has beat us to the exit action, 7622 * or because our current activity is something 7623 * other than ACTIVE or WARMUP), we will 7624 * continue. This assures that the exit action 7625 * can be successfully recorded at most once 7626 * when we're in the ACTIVE state. If we're 7627 * encountering the exit() action while in 7628 * COOLDOWN, however, we want to honor the new 7629 * status code. (We know that we're the only 7630 * thread in COOLDOWN, so there is no race.) 7631 */ 7632 void *activity = &state->dts_activity; 7633 dtrace_activity_t current = state->dts_activity; 7634 7635 if (current == DTRACE_ACTIVITY_COOLDOWN) 7636 break; 7637 7638 if (current != DTRACE_ACTIVITY_WARMUP) 7639 current = DTRACE_ACTIVITY_ACTIVE; 7640 7641 if (dtrace_cas32(activity, current, 7642 DTRACE_ACTIVITY_DRAINING) != current) { 7643 *flags |= CPU_DTRACE_DROP; 7644 continue; 7645 } 7646 7647 break; 7648 } 7649 7650 default: 7651 ASSERT(0); 7652 } 7653 7654 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF || 7655 dp->dtdo_rtype.dtdt_flags & DIF_TF_BYUREF) { 7656 uintptr_t end = valoffs + size; 7657 7658 if (tracememsize != 0 && 7659 valoffs + tracememsize < end) { 7660 end = valoffs + tracememsize; 7661 tracememsize = 0; 7662 } 7663 7664 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF && 7665 !dtrace_vcanload((void *)(uintptr_t)val, 7666 &dp->dtdo_rtype, &mstate, vstate)) 7667 continue; 7668 7669 dtrace_store_by_ref(dp, tomax, size, &valoffs, 7670 &val, end, act->dta_intuple, 7671 dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF ? 7672 DIF_TF_BYREF: DIF_TF_BYUREF); 7673 continue; 7674 } 7675 7676 switch (size) { 7677 case 0: 7678 break; 7679 7680 case sizeof (uint8_t): 7681 DTRACE_STORE(uint8_t, tomax, valoffs, val); 7682 break; 7683 case sizeof (uint16_t): 7684 DTRACE_STORE(uint16_t, tomax, valoffs, val); 7685 break; 7686 case sizeof (uint32_t): 7687 DTRACE_STORE(uint32_t, tomax, valoffs, val); 7688 break; 7689 case sizeof (uint64_t): 7690 DTRACE_STORE(uint64_t, tomax, valoffs, val); 7691 break; 7692 default: 7693 /* 7694 * Any other size should have been returned by 7695 * reference, not by value. 7696 */ 7697 ASSERT(0); 7698 break; 7699 } 7700 } 7701 7702 if (*flags & CPU_DTRACE_DROP) 7703 continue; 7704 7705 if (*flags & CPU_DTRACE_FAULT) { 7706 int ndx; 7707 dtrace_action_t *err; 7708 7709 buf->dtb_errors++; 7710 7711 if (probe->dtpr_id == dtrace_probeid_error) { 7712 /* 7713 * There's nothing we can do -- we had an 7714 * error on the error probe. We bump an 7715 * error counter to at least indicate that 7716 * this condition happened. 7717 */ 7718 dtrace_error(&state->dts_dblerrors); 7719 continue; 7720 } 7721 7722 if (vtime) { 7723 /* 7724 * Before recursing on dtrace_probe(), we 7725 * need to explicitly clear out our start 7726 * time to prevent it from being accumulated 7727 * into t_dtrace_vtime. 7728 */ 7729 curthread->t_dtrace_start = 0; 7730 } 7731 7732 /* 7733 * Iterate over the actions to figure out which action 7734 * we were processing when we experienced the error. 7735 * Note that act points _past_ the faulting action; if 7736 * act is ecb->dte_action, the fault was in the 7737 * predicate, if it's ecb->dte_action->dta_next it's 7738 * in action #1, and so on. 7739 */ 7740 for (err = ecb->dte_action, ndx = 0; 7741 err != act; err = err->dta_next, ndx++) 7742 continue; 7743 7744 dtrace_probe_error(state, ecb->dte_epid, ndx, 7745 (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ? 7746 mstate.dtms_fltoffs : -1, DTRACE_FLAGS2FLT(*flags), 7747 cpu_core[cpuid].cpuc_dtrace_illval); 7748 7749 continue; 7750 } 7751 7752 if (!committed) 7753 buf->dtb_offset = offs + ecb->dte_size; 7754 } 7755 7756 if (vtime) 7757 curthread->t_dtrace_start = dtrace_gethrtime(); 7758 7759 dtrace_interrupt_enable(cookie); 7760} 7761 7762/* 7763 * DTrace Probe Hashing Functions 7764 * 7765 * The functions in this section (and indeed, the functions in remaining 7766 * sections) are not _called_ from probe context. (Any exceptions to this are 7767 * marked with a "Note:".) Rather, they are called from elsewhere in the 7768 * DTrace framework to look-up probes in, add probes to and remove probes from 7769 * the DTrace probe hashes. (Each probe is hashed by each element of the 7770 * probe tuple -- allowing for fast lookups, regardless of what was 7771 * specified.) 7772 */ 7773static uint_t 7774dtrace_hash_str(const char *p) 7775{ 7776 unsigned int g; 7777 uint_t hval = 0; 7778 7779 while (*p) { 7780 hval = (hval << 4) + *p++; 7781 if ((g = (hval & 0xf0000000)) != 0) 7782 hval ^= g >> 24; 7783 hval &= ~g; 7784 } 7785 return (hval); 7786} 7787 7788static dtrace_hash_t * 7789dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs) 7790{ 7791 dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP); 7792 7793 hash->dth_stroffs = stroffs; 7794 hash->dth_nextoffs = nextoffs; 7795 hash->dth_prevoffs = prevoffs; 7796 7797 hash->dth_size = 1; 7798 hash->dth_mask = hash->dth_size - 1; 7799 7800 hash->dth_tab = kmem_zalloc(hash->dth_size * 7801 sizeof (dtrace_hashbucket_t *), KM_SLEEP); 7802 7803 return (hash); 7804} 7805 7806static void 7807dtrace_hash_destroy(dtrace_hash_t *hash) 7808{ 7809#ifdef DEBUG 7810 int i; 7811 7812 for (i = 0; i < hash->dth_size; i++) 7813 ASSERT(hash->dth_tab[i] == NULL); 7814#endif 7815 7816 kmem_free(hash->dth_tab, 7817 hash->dth_size * sizeof (dtrace_hashbucket_t *)); 7818 kmem_free(hash, sizeof (dtrace_hash_t)); 7819} 7820 7821static void 7822dtrace_hash_resize(dtrace_hash_t *hash) 7823{ 7824 int size = hash->dth_size, i, ndx; 7825 int new_size = hash->dth_size << 1; 7826 int new_mask = new_size - 1; 7827 dtrace_hashbucket_t **new_tab, *bucket, *next; 7828 7829 ASSERT((new_size & new_mask) == 0); 7830 7831 new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP); 7832 7833 for (i = 0; i < size; i++) { 7834 for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) { 7835 dtrace_probe_t *probe = bucket->dthb_chain; 7836 7837 ASSERT(probe != NULL); 7838 ndx = DTRACE_HASHSTR(hash, probe) & new_mask; 7839 7840 next = bucket->dthb_next; 7841 bucket->dthb_next = new_tab[ndx]; 7842 new_tab[ndx] = bucket; 7843 } 7844 } 7845 7846 kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *)); 7847 hash->dth_tab = new_tab; 7848 hash->dth_size = new_size; 7849 hash->dth_mask = new_mask; 7850} 7851 7852static void 7853dtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new) 7854{ 7855 int hashval = DTRACE_HASHSTR(hash, new); 7856 int ndx = hashval & hash->dth_mask; 7857 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 7858 dtrace_probe_t **nextp, **prevp; 7859 7860 for (; bucket != NULL; bucket = bucket->dthb_next) { 7861 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new)) 7862 goto add; 7863 } 7864 7865 if ((hash->dth_nbuckets >> 1) > hash->dth_size) { 7866 dtrace_hash_resize(hash); 7867 dtrace_hash_add(hash, new); 7868 return; 7869 } 7870 7871 bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP); 7872 bucket->dthb_next = hash->dth_tab[ndx]; 7873 hash->dth_tab[ndx] = bucket; 7874 hash->dth_nbuckets++; 7875 7876add: 7877 nextp = DTRACE_HASHNEXT(hash, new); 7878 ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL); 7879 *nextp = bucket->dthb_chain; 7880 7881 if (bucket->dthb_chain != NULL) { 7882 prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain); 7883 ASSERT(*prevp == NULL); 7884 *prevp = new; 7885 } 7886 7887 bucket->dthb_chain = new; 7888 bucket->dthb_len++; 7889} 7890 7891static dtrace_probe_t * 7892dtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template) 7893{ 7894 int hashval = DTRACE_HASHSTR(hash, template); 7895 int ndx = hashval & hash->dth_mask; 7896 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 7897 7898 for (; bucket != NULL; bucket = bucket->dthb_next) { 7899 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 7900 return (bucket->dthb_chain); 7901 } 7902 7903 return (NULL); 7904} 7905 7906static int 7907dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template) 7908{ 7909 int hashval = DTRACE_HASHSTR(hash, template); 7910 int ndx = hashval & hash->dth_mask; 7911 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 7912 7913 for (; bucket != NULL; bucket = bucket->dthb_next) { 7914 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 7915 return (bucket->dthb_len); 7916 } 7917 7918 return (0); 7919} 7920 7921static void 7922dtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe) 7923{ 7924 int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask; 7925 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 7926 7927 dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe); 7928 dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe); 7929 7930 /* 7931 * Find the bucket that we're removing this probe from. 7932 */ 7933 for (; bucket != NULL; bucket = bucket->dthb_next) { 7934 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe)) 7935 break; 7936 } 7937 7938 ASSERT(bucket != NULL); 7939 7940 if (*prevp == NULL) { 7941 if (*nextp == NULL) { 7942 /* 7943 * The removed probe was the only probe on this 7944 * bucket; we need to remove the bucket. 7945 */ 7946 dtrace_hashbucket_t *b = hash->dth_tab[ndx]; 7947 7948 ASSERT(bucket->dthb_chain == probe); 7949 ASSERT(b != NULL); 7950 7951 if (b == bucket) { 7952 hash->dth_tab[ndx] = bucket->dthb_next; 7953 } else { 7954 while (b->dthb_next != bucket) 7955 b = b->dthb_next; 7956 b->dthb_next = bucket->dthb_next; 7957 } 7958 7959 ASSERT(hash->dth_nbuckets > 0); 7960 hash->dth_nbuckets--; 7961 kmem_free(bucket, sizeof (dtrace_hashbucket_t)); 7962 return; 7963 } 7964 7965 bucket->dthb_chain = *nextp; 7966 } else { 7967 *(DTRACE_HASHNEXT(hash, *prevp)) = *nextp; 7968 } 7969 7970 if (*nextp != NULL) 7971 *(DTRACE_HASHPREV(hash, *nextp)) = *prevp; 7972} 7973 7974/* 7975 * DTrace Utility Functions 7976 * 7977 * These are random utility functions that are _not_ called from probe context. 7978 */ 7979static int 7980dtrace_badattr(const dtrace_attribute_t *a) 7981{ 7982 return (a->dtat_name > DTRACE_STABILITY_MAX || 7983 a->dtat_data > DTRACE_STABILITY_MAX || 7984 a->dtat_class > DTRACE_CLASS_MAX); 7985} 7986 7987/* 7988 * Return a duplicate copy of a string. If the specified string is NULL, 7989 * this function returns a zero-length string. 7990 */ 7991static char * 7992dtrace_strdup(const char *str) 7993{ 7994 char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP); 7995 7996 if (str != NULL) 7997 (void) strcpy(new, str); 7998 7999 return (new); 8000} 8001 8002#define DTRACE_ISALPHA(c) \ 8003 (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z')) 8004 8005static int 8006dtrace_badname(const char *s) 8007{ 8008 char c; 8009 8010 if (s == NULL || (c = *s++) == '\0') 8011 return (0); 8012 8013 if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.') 8014 return (1); 8015 8016 while ((c = *s++) != '\0') { 8017 if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') && 8018 c != '-' && c != '_' && c != '.' && c != '`') 8019 return (1); 8020 } 8021 8022 return (0); 8023} 8024 8025static void 8026dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp, zoneid_t *zoneidp) 8027{ 8028 uint32_t priv; 8029 8030#if defined(sun) 8031 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 8032 /* 8033 * For DTRACE_PRIV_ALL, the uid and zoneid don't matter. 8034 */ 8035 priv = DTRACE_PRIV_ALL; 8036 } else { 8037 *uidp = crgetuid(cr); 8038 *zoneidp = crgetzoneid(cr); 8039 8040 priv = 0; 8041 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) 8042 priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER; 8043 else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) 8044 priv |= DTRACE_PRIV_USER; 8045 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) 8046 priv |= DTRACE_PRIV_PROC; 8047 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 8048 priv |= DTRACE_PRIV_OWNER; 8049 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 8050 priv |= DTRACE_PRIV_ZONEOWNER; 8051 } 8052#else 8053 priv = DTRACE_PRIV_ALL; 8054#endif 8055 8056 *privp = priv; 8057} 8058 8059#ifdef DTRACE_ERRDEBUG 8060static void 8061dtrace_errdebug(const char *str) 8062{ 8063 int hval = dtrace_hash_str(str) % DTRACE_ERRHASHSZ; 8064 int occupied = 0; 8065 8066 mutex_enter(&dtrace_errlock); 8067 dtrace_errlast = str; 8068 dtrace_errthread = curthread; 8069 8070 while (occupied++ < DTRACE_ERRHASHSZ) { 8071 if (dtrace_errhash[hval].dter_msg == str) { 8072 dtrace_errhash[hval].dter_count++; 8073 goto out; 8074 } 8075 8076 if (dtrace_errhash[hval].dter_msg != NULL) { 8077 hval = (hval + 1) % DTRACE_ERRHASHSZ; 8078 continue; 8079 } 8080 8081 dtrace_errhash[hval].dter_msg = str; 8082 dtrace_errhash[hval].dter_count = 1; 8083 goto out; 8084 } 8085 8086 panic("dtrace: undersized error hash"); 8087out: 8088 mutex_exit(&dtrace_errlock); 8089} 8090#endif 8091 8092/* 8093 * DTrace Matching Functions 8094 * 8095 * These functions are used to match groups of probes, given some elements of 8096 * a probe tuple, or some globbed expressions for elements of a probe tuple. 8097 */ 8098static int 8099dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid, 8100 zoneid_t zoneid) 8101{ 8102 if (priv != DTRACE_PRIV_ALL) { 8103 uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags; 8104 uint32_t match = priv & ppriv; 8105 8106 /* 8107 * No PRIV_DTRACE_* privileges... 8108 */ 8109 if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER | 8110 DTRACE_PRIV_KERNEL)) == 0) 8111 return (0); 8112 8113 /* 8114 * No matching bits, but there were bits to match... 8115 */ 8116 if (match == 0 && ppriv != 0) 8117 return (0); 8118 8119 /* 8120 * Need to have permissions to the process, but don't... 8121 */ 8122 if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 && 8123 uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) { 8124 return (0); 8125 } 8126 8127 /* 8128 * Need to be in the same zone unless we possess the 8129 * privilege to examine all zones. 8130 */ 8131 if (((ppriv & ~match) & DTRACE_PRIV_ZONEOWNER) != 0 && 8132 zoneid != prp->dtpr_provider->dtpv_priv.dtpp_zoneid) { 8133 return (0); 8134 } 8135 } 8136 8137 return (1); 8138} 8139 8140/* 8141 * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which 8142 * consists of input pattern strings and an ops-vector to evaluate them. 8143 * This function returns >0 for match, 0 for no match, and <0 for error. 8144 */ 8145static int 8146dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp, 8147 uint32_t priv, uid_t uid, zoneid_t zoneid) 8148{ 8149 dtrace_provider_t *pvp = prp->dtpr_provider; 8150 int rv; 8151 8152 if (pvp->dtpv_defunct) 8153 return (0); 8154 8155 if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0) 8156 return (rv); 8157 8158 if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0) 8159 return (rv); 8160 8161 if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0) 8162 return (rv); 8163 8164 if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0) 8165 return (rv); 8166 8167 if (dtrace_match_priv(prp, priv, uid, zoneid) == 0) 8168 return (0); 8169 8170 return (rv); 8171} 8172 8173/* 8174 * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN) 8175 * interface for matching a glob pattern 'p' to an input string 's'. Unlike 8176 * libc's version, the kernel version only applies to 8-bit ASCII strings. 8177 * In addition, all of the recursion cases except for '*' matching have been 8178 * unwound. For '*', we still implement recursive evaluation, but a depth 8179 * counter is maintained and matching is aborted if we recurse too deep. 8180 * The function returns 0 if no match, >0 if match, and <0 if recursion error. 8181 */ 8182static int 8183dtrace_match_glob(const char *s, const char *p, int depth) 8184{ 8185 const char *olds; 8186 char s1, c; 8187 int gs; 8188 8189 if (depth > DTRACE_PROBEKEY_MAXDEPTH) 8190 return (-1); 8191 8192 if (s == NULL) 8193 s = ""; /* treat NULL as empty string */ 8194 8195top: 8196 olds = s; 8197 s1 = *s++; 8198 8199 if (p == NULL) 8200 return (0); 8201 8202 if ((c = *p++) == '\0') 8203 return (s1 == '\0'); 8204 8205 switch (c) { 8206 case '[': { 8207 int ok = 0, notflag = 0; 8208 char lc = '\0'; 8209 8210 if (s1 == '\0') 8211 return (0); 8212 8213 if (*p == '!') { 8214 notflag = 1; 8215 p++; 8216 } 8217 8218 if ((c = *p++) == '\0') 8219 return (0); 8220 8221 do { 8222 if (c == '-' && lc != '\0' && *p != ']') { 8223 if ((c = *p++) == '\0') 8224 return (0); 8225 if (c == '\\' && (c = *p++) == '\0') 8226 return (0); 8227 8228 if (notflag) { 8229 if (s1 < lc || s1 > c) 8230 ok++; 8231 else 8232 return (0); 8233 } else if (lc <= s1 && s1 <= c) 8234 ok++; 8235 8236 } else if (c == '\\' && (c = *p++) == '\0') 8237 return (0); 8238 8239 lc = c; /* save left-hand 'c' for next iteration */ 8240 8241 if (notflag) { 8242 if (s1 != c) 8243 ok++; 8244 else 8245 return (0); 8246 } else if (s1 == c) 8247 ok++; 8248 8249 if ((c = *p++) == '\0') 8250 return (0); 8251 8252 } while (c != ']'); 8253 8254 if (ok) 8255 goto top; 8256 8257 return (0); 8258 } 8259 8260 case '\\': 8261 if ((c = *p++) == '\0') 8262 return (0); 8263 /*FALLTHRU*/ 8264 8265 default: 8266 if (c != s1) 8267 return (0); 8268 /*FALLTHRU*/ 8269 8270 case '?': 8271 if (s1 != '\0') 8272 goto top; 8273 return (0); 8274 8275 case '*': 8276 while (*p == '*') 8277 p++; /* consecutive *'s are identical to a single one */ 8278 8279 if (*p == '\0') 8280 return (1); 8281 8282 for (s = olds; *s != '\0'; s++) { 8283 if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0) 8284 return (gs); 8285 } 8286 8287 return (0); 8288 } 8289} 8290 8291/*ARGSUSED*/ 8292static int 8293dtrace_match_string(const char *s, const char *p, int depth) 8294{ 8295 return (s != NULL && strcmp(s, p) == 0); 8296} 8297 8298/*ARGSUSED*/ 8299static int 8300dtrace_match_nul(const char *s, const char *p, int depth) 8301{ 8302 return (1); /* always match the empty pattern */ 8303} 8304 8305/*ARGSUSED*/ 8306static int 8307dtrace_match_nonzero(const char *s, const char *p, int depth) 8308{ 8309 return (s != NULL && s[0] != '\0'); 8310} 8311 8312static int 8313dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid, 8314 zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *), void *arg) 8315{ 8316 dtrace_probe_t template, *probe; 8317 dtrace_hash_t *hash = NULL; 8318 int len, best = INT_MAX, nmatched = 0; 8319 dtrace_id_t i; 8320 8321 ASSERT(MUTEX_HELD(&dtrace_lock)); 8322 8323 /* 8324 * If the probe ID is specified in the key, just lookup by ID and 8325 * invoke the match callback once if a matching probe is found. 8326 */ 8327 if (pkp->dtpk_id != DTRACE_IDNONE) { 8328 if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL && 8329 dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) { 8330 (void) (*matched)(probe, arg); 8331 nmatched++; 8332 } 8333 return (nmatched); 8334 } 8335 8336 template.dtpr_mod = (char *)pkp->dtpk_mod; 8337 template.dtpr_func = (char *)pkp->dtpk_func; 8338 template.dtpr_name = (char *)pkp->dtpk_name; 8339 8340 /* 8341 * We want to find the most distinct of the module name, function 8342 * name, and name. So for each one that is not a glob pattern or 8343 * empty string, we perform a lookup in the corresponding hash and 8344 * use the hash table with the fewest collisions to do our search. 8345 */ 8346 if (pkp->dtpk_mmatch == &dtrace_match_string && 8347 (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) { 8348 best = len; 8349 hash = dtrace_bymod; 8350 } 8351 8352 if (pkp->dtpk_fmatch == &dtrace_match_string && 8353 (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) { 8354 best = len; 8355 hash = dtrace_byfunc; 8356 } 8357 8358 if (pkp->dtpk_nmatch == &dtrace_match_string && 8359 (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) { 8360 best = len; 8361 hash = dtrace_byname; 8362 } 8363 8364 /* 8365 * If we did not select a hash table, iterate over every probe and 8366 * invoke our callback for each one that matches our input probe key. 8367 */ 8368 if (hash == NULL) { 8369 for (i = 0; i < dtrace_nprobes; i++) { 8370 if ((probe = dtrace_probes[i]) == NULL || 8371 dtrace_match_probe(probe, pkp, priv, uid, 8372 zoneid) <= 0) 8373 continue; 8374 8375 nmatched++; 8376 8377 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 8378 break; 8379 } 8380 8381 return (nmatched); 8382 } 8383 8384 /* 8385 * If we selected a hash table, iterate over each probe of the same key 8386 * name and invoke the callback for every probe that matches the other 8387 * attributes of our input probe key. 8388 */ 8389 for (probe = dtrace_hash_lookup(hash, &template); probe != NULL; 8390 probe = *(DTRACE_HASHNEXT(hash, probe))) { 8391 8392 if (dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0) 8393 continue; 8394 8395 nmatched++; 8396 8397 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 8398 break; 8399 } 8400 8401 return (nmatched); 8402} 8403 8404/* 8405 * Return the function pointer dtrace_probecmp() should use to compare the 8406 * specified pattern with a string. For NULL or empty patterns, we select 8407 * dtrace_match_nul(). For glob pattern strings, we use dtrace_match_glob(). 8408 * For non-empty non-glob strings, we use dtrace_match_string(). 8409 */ 8410static dtrace_probekey_f * 8411dtrace_probekey_func(const char *p) 8412{ 8413 char c; 8414 8415 if (p == NULL || *p == '\0') 8416 return (&dtrace_match_nul); 8417 8418 while ((c = *p++) != '\0') { 8419 if (c == '[' || c == '?' || c == '*' || c == '\\') 8420 return (&dtrace_match_glob); 8421 } 8422 8423 return (&dtrace_match_string); 8424} 8425 8426/* 8427 * Build a probe comparison key for use with dtrace_match_probe() from the 8428 * given probe description. By convention, a null key only matches anchored 8429 * probes: if each field is the empty string, reset dtpk_fmatch to 8430 * dtrace_match_nonzero(). 8431 */ 8432static void 8433dtrace_probekey(dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp) 8434{ 8435 pkp->dtpk_prov = pdp->dtpd_provider; 8436 pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider); 8437 8438 pkp->dtpk_mod = pdp->dtpd_mod; 8439 pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod); 8440 8441 pkp->dtpk_func = pdp->dtpd_func; 8442 pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func); 8443 8444 pkp->dtpk_name = pdp->dtpd_name; 8445 pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name); 8446 8447 pkp->dtpk_id = pdp->dtpd_id; 8448 8449 if (pkp->dtpk_id == DTRACE_IDNONE && 8450 pkp->dtpk_pmatch == &dtrace_match_nul && 8451 pkp->dtpk_mmatch == &dtrace_match_nul && 8452 pkp->dtpk_fmatch == &dtrace_match_nul && 8453 pkp->dtpk_nmatch == &dtrace_match_nul) 8454 pkp->dtpk_fmatch = &dtrace_match_nonzero; 8455} 8456 8457/* 8458 * DTrace Provider-to-Framework API Functions 8459 * 8460 * These functions implement much of the Provider-to-Framework API, as 8461 * described in <sys/dtrace.h>. The parts of the API not in this section are 8462 * the functions in the API for probe management (found below), and 8463 * dtrace_probe() itself (found above). 8464 */ 8465 8466/* 8467 * Register the calling provider with the DTrace framework. This should 8468 * generally be called by DTrace providers in their attach(9E) entry point. 8469 */ 8470int 8471dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv, 8472 cred_t *cr, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp) 8473{ 8474 dtrace_provider_t *provider; 8475 8476 if (name == NULL || pap == NULL || pops == NULL || idp == NULL) { 8477 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 8478 "arguments", name ? name : "<NULL>"); 8479 return (EINVAL); 8480 } 8481 8482 if (name[0] == '\0' || dtrace_badname(name)) { 8483 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 8484 "provider name", name); 8485 return (EINVAL); 8486 } 8487 8488 if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) || 8489 pops->dtps_enable == NULL || pops->dtps_disable == NULL || 8490 pops->dtps_destroy == NULL || 8491 ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) { 8492 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 8493 "provider ops", name); 8494 return (EINVAL); 8495 } 8496 8497 if (dtrace_badattr(&pap->dtpa_provider) || 8498 dtrace_badattr(&pap->dtpa_mod) || 8499 dtrace_badattr(&pap->dtpa_func) || 8500 dtrace_badattr(&pap->dtpa_name) || 8501 dtrace_badattr(&pap->dtpa_args)) { 8502 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 8503 "provider attributes", name); 8504 return (EINVAL); 8505 } 8506 8507 if (priv & ~DTRACE_PRIV_ALL) { 8508 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 8509 "privilege attributes", name); 8510 return (EINVAL); 8511 } 8512 8513 if ((priv & DTRACE_PRIV_KERNEL) && 8514 (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) && 8515 pops->dtps_usermode == NULL) { 8516 cmn_err(CE_WARN, "failed to register provider '%s': need " 8517 "dtps_usermode() op for given privilege attributes", name); 8518 return (EINVAL); 8519 } 8520 8521 provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP); 8522 provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 8523 (void) strcpy(provider->dtpv_name, name); 8524 8525 provider->dtpv_attr = *pap; 8526 provider->dtpv_priv.dtpp_flags = priv; 8527 if (cr != NULL) { 8528 provider->dtpv_priv.dtpp_uid = crgetuid(cr); 8529 provider->dtpv_priv.dtpp_zoneid = crgetzoneid(cr); 8530 } 8531 provider->dtpv_pops = *pops; 8532 8533 if (pops->dtps_provide == NULL) { 8534 ASSERT(pops->dtps_provide_module != NULL); 8535 provider->dtpv_pops.dtps_provide = 8536 (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop; 8537 } 8538 8539 if (pops->dtps_provide_module == NULL) { 8540 ASSERT(pops->dtps_provide != NULL); 8541 provider->dtpv_pops.dtps_provide_module = 8542 (void (*)(void *, modctl_t *))dtrace_nullop; 8543 } 8544 8545 if (pops->dtps_suspend == NULL) { 8546 ASSERT(pops->dtps_resume == NULL); 8547 provider->dtpv_pops.dtps_suspend = 8548 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 8549 provider->dtpv_pops.dtps_resume = 8550 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 8551 } 8552 8553 provider->dtpv_arg = arg; 8554 *idp = (dtrace_provider_id_t)provider; 8555 8556 if (pops == &dtrace_provider_ops) { 8557 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 8558 ASSERT(MUTEX_HELD(&dtrace_lock)); 8559 ASSERT(dtrace_anon.dta_enabling == NULL); 8560 8561 /* 8562 * We make sure that the DTrace provider is at the head of 8563 * the provider chain. 8564 */ 8565 provider->dtpv_next = dtrace_provider; 8566 dtrace_provider = provider; 8567 return (0); 8568 } 8569 8570 mutex_enter(&dtrace_provider_lock); 8571 mutex_enter(&dtrace_lock); 8572 8573 /* 8574 * If there is at least one provider registered, we'll add this 8575 * provider after the first provider. 8576 */ 8577 if (dtrace_provider != NULL) { 8578 provider->dtpv_next = dtrace_provider->dtpv_next; 8579 dtrace_provider->dtpv_next = provider; 8580 } else { 8581 dtrace_provider = provider; 8582 } 8583 8584 if (dtrace_retained != NULL) { 8585 dtrace_enabling_provide(provider); 8586 8587 /* 8588 * Now we need to call dtrace_enabling_matchall() -- which 8589 * will acquire cpu_lock and dtrace_lock. We therefore need 8590 * to drop all of our locks before calling into it... 8591 */ 8592 mutex_exit(&dtrace_lock); 8593 mutex_exit(&dtrace_provider_lock); 8594 dtrace_enabling_matchall(); 8595 8596 return (0); 8597 } 8598 8599 mutex_exit(&dtrace_lock); 8600 mutex_exit(&dtrace_provider_lock); 8601 8602 return (0); 8603} 8604 8605/* 8606 * Unregister the specified provider from the DTrace framework. This should 8607 * generally be called by DTrace providers in their detach(9E) entry point. 8608 */ 8609int 8610dtrace_unregister(dtrace_provider_id_t id) 8611{ 8612 dtrace_provider_t *old = (dtrace_provider_t *)id; 8613 dtrace_provider_t *prev = NULL; 8614 int i, self = 0, noreap = 0; 8615 dtrace_probe_t *probe, *first = NULL; 8616 8617 if (old->dtpv_pops.dtps_enable == 8618 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop) { 8619 /* 8620 * If DTrace itself is the provider, we're called with locks 8621 * already held. 8622 */ 8623 ASSERT(old == dtrace_provider); 8624#if defined(sun) 8625 ASSERT(dtrace_devi != NULL); 8626#endif 8627 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 8628 ASSERT(MUTEX_HELD(&dtrace_lock)); 8629 self = 1; 8630 8631 if (dtrace_provider->dtpv_next != NULL) { 8632 /* 8633 * There's another provider here; return failure. 8634 */ 8635 return (EBUSY); 8636 } 8637 } else { 8638 mutex_enter(&dtrace_provider_lock); 8639#if defined(sun) 8640 mutex_enter(&mod_lock); 8641#endif 8642 mutex_enter(&dtrace_lock); 8643 } 8644 8645 /* 8646 * If anyone has /dev/dtrace open, or if there are anonymous enabled 8647 * probes, we refuse to let providers slither away, unless this 8648 * provider has already been explicitly invalidated. 8649 */ 8650 if (!old->dtpv_defunct && 8651 (dtrace_opens || (dtrace_anon.dta_state != NULL && 8652 dtrace_anon.dta_state->dts_necbs > 0))) { 8653 if (!self) { 8654 mutex_exit(&dtrace_lock); 8655#if defined(sun) 8656 mutex_exit(&mod_lock); 8657#endif 8658 mutex_exit(&dtrace_provider_lock); 8659 } 8660 return (EBUSY); 8661 } 8662 8663 /* 8664 * Attempt to destroy the probes associated with this provider. 8665 */ 8666 for (i = 0; i < dtrace_nprobes; i++) { 8667 if ((probe = dtrace_probes[i]) == NULL) 8668 continue; 8669 8670 if (probe->dtpr_provider != old) 8671 continue; 8672 8673 if (probe->dtpr_ecb == NULL) 8674 continue; 8675 8676 /* 8677 * If we are trying to unregister a defunct provider, and the 8678 * provider was made defunct within the interval dictated by 8679 * dtrace_unregister_defunct_reap, we'll (asynchronously) 8680 * attempt to reap our enablings. To denote that the provider 8681 * should reattempt to unregister itself at some point in the 8682 * future, we will return a differentiable error code (EAGAIN 8683 * instead of EBUSY) in this case. 8684 */ 8685 if (dtrace_gethrtime() - old->dtpv_defunct > 8686 dtrace_unregister_defunct_reap) 8687 noreap = 1; 8688 8689 if (!self) { 8690 mutex_exit(&dtrace_lock); 8691#if defined(sun) 8692 mutex_exit(&mod_lock); 8693#endif 8694 mutex_exit(&dtrace_provider_lock); 8695 } 8696 8697 if (noreap) 8698 return (EBUSY); 8699 8700 (void) taskq_dispatch(dtrace_taskq, 8701 (task_func_t *)dtrace_enabling_reap, NULL, TQ_SLEEP); 8702 8703 return (EAGAIN); 8704 } 8705 8706 /* 8707 * All of the probes for this provider are disabled; we can safely 8708 * remove all of them from their hash chains and from the probe array. 8709 */ 8710 for (i = 0; i < dtrace_nprobes; i++) { 8711 if ((probe = dtrace_probes[i]) == NULL) 8712 continue; 8713 8714 if (probe->dtpr_provider != old) 8715 continue; 8716 8717 dtrace_probes[i] = NULL; 8718 8719 dtrace_hash_remove(dtrace_bymod, probe); 8720 dtrace_hash_remove(dtrace_byfunc, probe); 8721 dtrace_hash_remove(dtrace_byname, probe); 8722 8723 if (first == NULL) { 8724 first = probe; 8725 probe->dtpr_nextmod = NULL; 8726 } else { 8727 probe->dtpr_nextmod = first; 8728 first = probe; 8729 } 8730 } 8731 8732 /* 8733 * The provider's probes have been removed from the hash chains and 8734 * from the probe array. Now issue a dtrace_sync() to be sure that 8735 * everyone has cleared out from any probe array processing. 8736 */ 8737 dtrace_sync(); 8738 8739 for (probe = first; probe != NULL; probe = first) { 8740 first = probe->dtpr_nextmod; 8741 8742 old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id, 8743 probe->dtpr_arg); 8744 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 8745 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 8746 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 8747#if defined(sun) 8748 vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1); 8749#else 8750 free_unr(dtrace_arena, probe->dtpr_id); 8751#endif 8752 kmem_free(probe, sizeof (dtrace_probe_t)); 8753 } 8754 8755 if ((prev = dtrace_provider) == old) { 8756#if defined(sun) 8757 ASSERT(self || dtrace_devi == NULL); 8758 ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL); 8759#endif 8760 dtrace_provider = old->dtpv_next; 8761 } else { 8762 while (prev != NULL && prev->dtpv_next != old) 8763 prev = prev->dtpv_next; 8764 8765 if (prev == NULL) { 8766 panic("attempt to unregister non-existent " 8767 "dtrace provider %p\n", (void *)id); 8768 } 8769 8770 prev->dtpv_next = old->dtpv_next; 8771 } 8772 8773 if (!self) { 8774 mutex_exit(&dtrace_lock); 8775#if defined(sun) 8776 mutex_exit(&mod_lock); 8777#endif 8778 mutex_exit(&dtrace_provider_lock); 8779 } 8780 8781 kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1); 8782 kmem_free(old, sizeof (dtrace_provider_t)); 8783 8784 return (0); 8785} 8786 8787/* 8788 * Invalidate the specified provider. All subsequent probe lookups for the 8789 * specified provider will fail, but its probes will not be removed. 8790 */ 8791void 8792dtrace_invalidate(dtrace_provider_id_t id) 8793{ 8794 dtrace_provider_t *pvp = (dtrace_provider_t *)id; 8795 8796 ASSERT(pvp->dtpv_pops.dtps_enable != 8797 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 8798 8799 mutex_enter(&dtrace_provider_lock); 8800 mutex_enter(&dtrace_lock); 8801 8802 pvp->dtpv_defunct = dtrace_gethrtime(); 8803 8804 mutex_exit(&dtrace_lock); 8805 mutex_exit(&dtrace_provider_lock); 8806} 8807 8808/* 8809 * Indicate whether or not DTrace has attached. 8810 */ 8811int 8812dtrace_attached(void) 8813{ 8814 /* 8815 * dtrace_provider will be non-NULL iff the DTrace driver has 8816 * attached. (It's non-NULL because DTrace is always itself a 8817 * provider.) 8818 */ 8819 return (dtrace_provider != NULL); 8820} 8821 8822/* 8823 * Remove all the unenabled probes for the given provider. This function is 8824 * not unlike dtrace_unregister(), except that it doesn't remove the provider 8825 * -- just as many of its associated probes as it can. 8826 */ 8827int 8828dtrace_condense(dtrace_provider_id_t id) 8829{ 8830 dtrace_provider_t *prov = (dtrace_provider_t *)id; 8831 int i; 8832 dtrace_probe_t *probe; 8833 8834 /* 8835 * Make sure this isn't the dtrace provider itself. 8836 */ 8837 ASSERT(prov->dtpv_pops.dtps_enable != 8838 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 8839 8840 mutex_enter(&dtrace_provider_lock); 8841 mutex_enter(&dtrace_lock); 8842 8843 /* 8844 * Attempt to destroy the probes associated with this provider. 8845 */ 8846 for (i = 0; i < dtrace_nprobes; i++) { 8847 if ((probe = dtrace_probes[i]) == NULL) 8848 continue; 8849 8850 if (probe->dtpr_provider != prov) 8851 continue; 8852 8853 if (probe->dtpr_ecb != NULL) 8854 continue; 8855 8856 dtrace_probes[i] = NULL; 8857 8858 dtrace_hash_remove(dtrace_bymod, probe); 8859 dtrace_hash_remove(dtrace_byfunc, probe); 8860 dtrace_hash_remove(dtrace_byname, probe); 8861 8862 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1, 8863 probe->dtpr_arg); 8864 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 8865 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 8866 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 8867 kmem_free(probe, sizeof (dtrace_probe_t)); 8868#if defined(sun) 8869 vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1); 8870#else 8871 free_unr(dtrace_arena, i + 1); 8872#endif 8873 } 8874 8875 mutex_exit(&dtrace_lock); 8876 mutex_exit(&dtrace_provider_lock); 8877 8878 return (0); 8879} 8880 8881/* 8882 * DTrace Probe Management Functions 8883 * 8884 * The functions in this section perform the DTrace probe management, 8885 * including functions to create probes, look-up probes, and call into the 8886 * providers to request that probes be provided. Some of these functions are 8887 * in the Provider-to-Framework API; these functions can be identified by the 8888 * fact that they are not declared "static". 8889 */ 8890 8891/* 8892 * Create a probe with the specified module name, function name, and name. 8893 */ 8894dtrace_id_t 8895dtrace_probe_create(dtrace_provider_id_t prov, const char *mod, 8896 const char *func, const char *name, int aframes, void *arg) 8897{ 8898 dtrace_probe_t *probe, **probes; 8899 dtrace_provider_t *provider = (dtrace_provider_t *)prov; 8900 dtrace_id_t id; 8901 8902 if (provider == dtrace_provider) { 8903 ASSERT(MUTEX_HELD(&dtrace_lock)); 8904 } else { 8905 mutex_enter(&dtrace_lock); 8906 } 8907 8908#if defined(sun) 8909 id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1, 8910 VM_BESTFIT | VM_SLEEP); 8911#else 8912 id = alloc_unr(dtrace_arena); 8913#endif 8914 probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP); 8915 8916 probe->dtpr_id = id; 8917 probe->dtpr_gen = dtrace_probegen++; 8918 probe->dtpr_mod = dtrace_strdup(mod); 8919 probe->dtpr_func = dtrace_strdup(func); 8920 probe->dtpr_name = dtrace_strdup(name); 8921 probe->dtpr_arg = arg; 8922 probe->dtpr_aframes = aframes; 8923 probe->dtpr_provider = provider; 8924 8925 dtrace_hash_add(dtrace_bymod, probe); 8926 dtrace_hash_add(dtrace_byfunc, probe); 8927 dtrace_hash_add(dtrace_byname, probe); 8928 8929 if (id - 1 >= dtrace_nprobes) { 8930 size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *); 8931 size_t nsize = osize << 1; 8932 8933 if (nsize == 0) { 8934 ASSERT(osize == 0); 8935 ASSERT(dtrace_probes == NULL); 8936 nsize = sizeof (dtrace_probe_t *); 8937 } 8938 8939 probes = kmem_zalloc(nsize, KM_SLEEP); 8940 8941 if (dtrace_probes == NULL) { 8942 ASSERT(osize == 0); 8943 dtrace_probes = probes; 8944 dtrace_nprobes = 1; 8945 } else { 8946 dtrace_probe_t **oprobes = dtrace_probes; 8947 8948 bcopy(oprobes, probes, osize); 8949 dtrace_membar_producer(); 8950 dtrace_probes = probes; 8951 8952 dtrace_sync(); 8953 8954 /* 8955 * All CPUs are now seeing the new probes array; we can 8956 * safely free the old array. 8957 */ 8958 kmem_free(oprobes, osize); 8959 dtrace_nprobes <<= 1; 8960 } 8961 8962 ASSERT(id - 1 < dtrace_nprobes); 8963 } 8964 8965 ASSERT(dtrace_probes[id - 1] == NULL); 8966 dtrace_probes[id - 1] = probe; 8967 8968 if (provider != dtrace_provider) 8969 mutex_exit(&dtrace_lock); 8970 8971 return (id); 8972} 8973 8974static dtrace_probe_t * 8975dtrace_probe_lookup_id(dtrace_id_t id) 8976{ 8977 ASSERT(MUTEX_HELD(&dtrace_lock)); 8978 8979 if (id == 0 || id > dtrace_nprobes) 8980 return (NULL); 8981 8982 return (dtrace_probes[id - 1]); 8983} 8984 8985static int 8986dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg) 8987{ 8988 *((dtrace_id_t *)arg) = probe->dtpr_id; 8989 8990 return (DTRACE_MATCH_DONE); 8991} 8992 8993/* 8994 * Look up a probe based on provider and one or more of module name, function 8995 * name and probe name. 8996 */ 8997dtrace_id_t 8998dtrace_probe_lookup(dtrace_provider_id_t prid, char *mod, 8999 char *func, char *name) 9000{ 9001 dtrace_probekey_t pkey; 9002 dtrace_id_t id; 9003 int match; 9004 9005 pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name; 9006 pkey.dtpk_pmatch = &dtrace_match_string; 9007 pkey.dtpk_mod = mod; 9008 pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul; 9009 pkey.dtpk_func = func; 9010 pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul; 9011 pkey.dtpk_name = name; 9012 pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul; 9013 pkey.dtpk_id = DTRACE_IDNONE; 9014 9015 mutex_enter(&dtrace_lock); 9016 match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0, 9017 dtrace_probe_lookup_match, &id); 9018 mutex_exit(&dtrace_lock); 9019 9020 ASSERT(match == 1 || match == 0); 9021 return (match ? id : 0); 9022} 9023 9024/* 9025 * Returns the probe argument associated with the specified probe. 9026 */ 9027void * 9028dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid) 9029{ 9030 dtrace_probe_t *probe; 9031 void *rval = NULL; 9032 9033 mutex_enter(&dtrace_lock); 9034 9035 if ((probe = dtrace_probe_lookup_id(pid)) != NULL && 9036 probe->dtpr_provider == (dtrace_provider_t *)id) 9037 rval = probe->dtpr_arg; 9038 9039 mutex_exit(&dtrace_lock); 9040 9041 return (rval); 9042} 9043 9044/* 9045 * Copy a probe into a probe description. 9046 */ 9047static void 9048dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp) 9049{ 9050 bzero(pdp, sizeof (dtrace_probedesc_t)); 9051 pdp->dtpd_id = prp->dtpr_id; 9052 9053 (void) strncpy(pdp->dtpd_provider, 9054 prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN - 1); 9055 9056 (void) strncpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN - 1); 9057 (void) strncpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN - 1); 9058 (void) strncpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN - 1); 9059} 9060 9061/* 9062 * Called to indicate that a probe -- or probes -- should be provided by a 9063 * specfied provider. If the specified description is NULL, the provider will 9064 * be told to provide all of its probes. (This is done whenever a new 9065 * consumer comes along, or whenever a retained enabling is to be matched.) If 9066 * the specified description is non-NULL, the provider is given the 9067 * opportunity to dynamically provide the specified probe, allowing providers 9068 * to support the creation of probes on-the-fly. (So-called _autocreated_ 9069 * probes.) If the provider is NULL, the operations will be applied to all 9070 * providers; if the provider is non-NULL the operations will only be applied 9071 * to the specified provider. The dtrace_provider_lock must be held, and the 9072 * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation 9073 * will need to grab the dtrace_lock when it reenters the framework through 9074 * dtrace_probe_lookup(), dtrace_probe_create(), etc. 9075 */ 9076static void 9077dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv) 9078{ 9079#if defined(sun) 9080 modctl_t *ctl; 9081#endif 9082 int all = 0; 9083 9084 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 9085 9086 if (prv == NULL) { 9087 all = 1; 9088 prv = dtrace_provider; 9089 } 9090 9091 do { 9092 /* 9093 * First, call the blanket provide operation. 9094 */ 9095 prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc); 9096 9097#if defined(sun) 9098 /* 9099 * Now call the per-module provide operation. We will grab 9100 * mod_lock to prevent the list from being modified. Note 9101 * that this also prevents the mod_busy bits from changing. 9102 * (mod_busy can only be changed with mod_lock held.) 9103 */ 9104 mutex_enter(&mod_lock); 9105 9106 ctl = &modules; 9107 do { 9108 if (ctl->mod_busy || ctl->mod_mp == NULL) 9109 continue; 9110 9111 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 9112 9113 } while ((ctl = ctl->mod_next) != &modules); 9114 9115 mutex_exit(&mod_lock); 9116#endif 9117 } while (all && (prv = prv->dtpv_next) != NULL); 9118} 9119 9120#if defined(sun) 9121/* 9122 * Iterate over each probe, and call the Framework-to-Provider API function 9123 * denoted by offs. 9124 */ 9125static void 9126dtrace_probe_foreach(uintptr_t offs) 9127{ 9128 dtrace_provider_t *prov; 9129 void (*func)(void *, dtrace_id_t, void *); 9130 dtrace_probe_t *probe; 9131 dtrace_icookie_t cookie; 9132 int i; 9133 9134 /* 9135 * We disable interrupts to walk through the probe array. This is 9136 * safe -- the dtrace_sync() in dtrace_unregister() assures that we 9137 * won't see stale data. 9138 */ 9139 cookie = dtrace_interrupt_disable(); 9140 9141 for (i = 0; i < dtrace_nprobes; i++) { 9142 if ((probe = dtrace_probes[i]) == NULL) 9143 continue; 9144 9145 if (probe->dtpr_ecb == NULL) { 9146 /* 9147 * This probe isn't enabled -- don't call the function. 9148 */ 9149 continue; 9150 } 9151 9152 prov = probe->dtpr_provider; 9153 func = *((void(**)(void *, dtrace_id_t, void *)) 9154 ((uintptr_t)&prov->dtpv_pops + offs)); 9155 9156 func(prov->dtpv_arg, i + 1, probe->dtpr_arg); 9157 } 9158 9159 dtrace_interrupt_enable(cookie); 9160} 9161#endif 9162 9163static int 9164dtrace_probe_enable(dtrace_probedesc_t *desc, dtrace_enabling_t *enab) 9165{ 9166 dtrace_probekey_t pkey; 9167 uint32_t priv; 9168 uid_t uid; 9169 zoneid_t zoneid; 9170 9171 ASSERT(MUTEX_HELD(&dtrace_lock)); 9172 dtrace_ecb_create_cache = NULL; 9173 9174 if (desc == NULL) { 9175 /* 9176 * If we're passed a NULL description, we're being asked to 9177 * create an ECB with a NULL probe. 9178 */ 9179 (void) dtrace_ecb_create_enable(NULL, enab); 9180 return (0); 9181 } 9182 9183 dtrace_probekey(desc, &pkey); 9184 dtrace_cred2priv(enab->dten_vstate->dtvs_state->dts_cred.dcr_cred, 9185 &priv, &uid, &zoneid); 9186 9187 return (dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable, 9188 enab)); 9189} 9190 9191/* 9192 * DTrace Helper Provider Functions 9193 */ 9194static void 9195dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr) 9196{ 9197 attr->dtat_name = DOF_ATTR_NAME(dofattr); 9198 attr->dtat_data = DOF_ATTR_DATA(dofattr); 9199 attr->dtat_class = DOF_ATTR_CLASS(dofattr); 9200} 9201 9202static void 9203dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov, 9204 const dof_provider_t *dofprov, char *strtab) 9205{ 9206 hprov->dthpv_provname = strtab + dofprov->dofpv_name; 9207 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider, 9208 dofprov->dofpv_provattr); 9209 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod, 9210 dofprov->dofpv_modattr); 9211 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func, 9212 dofprov->dofpv_funcattr); 9213 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name, 9214 dofprov->dofpv_nameattr); 9215 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args, 9216 dofprov->dofpv_argsattr); 9217} 9218 9219static void 9220dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 9221{ 9222 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 9223 dof_hdr_t *dof = (dof_hdr_t *)daddr; 9224 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 9225 dof_provider_t *provider; 9226 dof_probe_t *probe; 9227 uint32_t *off, *enoff; 9228 uint8_t *arg; 9229 char *strtab; 9230 uint_t i, nprobes; 9231 dtrace_helper_provdesc_t dhpv; 9232 dtrace_helper_probedesc_t dhpb; 9233 dtrace_meta_t *meta = dtrace_meta_pid; 9234 dtrace_mops_t *mops = &meta->dtm_mops; 9235 void *parg; 9236 9237 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 9238 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 9239 provider->dofpv_strtab * dof->dofh_secsize); 9240 prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 9241 provider->dofpv_probes * dof->dofh_secsize); 9242 arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 9243 provider->dofpv_prargs * dof->dofh_secsize); 9244 off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 9245 provider->dofpv_proffs * dof->dofh_secsize); 9246 9247 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 9248 off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset); 9249 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 9250 enoff = NULL; 9251 9252 /* 9253 * See dtrace_helper_provider_validate(). 9254 */ 9255 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 9256 provider->dofpv_prenoffs != DOF_SECT_NONE) { 9257 enoff_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 9258 provider->dofpv_prenoffs * dof->dofh_secsize); 9259 enoff = (uint32_t *)(uintptr_t)(daddr + enoff_sec->dofs_offset); 9260 } 9261 9262 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 9263 9264 /* 9265 * Create the provider. 9266 */ 9267 dtrace_dofprov2hprov(&dhpv, provider, strtab); 9268 9269 if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL) 9270 return; 9271 9272 meta->dtm_count++; 9273 9274 /* 9275 * Create the probes. 9276 */ 9277 for (i = 0; i < nprobes; i++) { 9278 probe = (dof_probe_t *)(uintptr_t)(daddr + 9279 prb_sec->dofs_offset + i * prb_sec->dofs_entsize); 9280 9281 dhpb.dthpb_mod = dhp->dofhp_mod; 9282 dhpb.dthpb_func = strtab + probe->dofpr_func; 9283 dhpb.dthpb_name = strtab + probe->dofpr_name; 9284 dhpb.dthpb_base = probe->dofpr_addr; 9285 dhpb.dthpb_offs = off + probe->dofpr_offidx; 9286 dhpb.dthpb_noffs = probe->dofpr_noffs; 9287 if (enoff != NULL) { 9288 dhpb.dthpb_enoffs = enoff + probe->dofpr_enoffidx; 9289 dhpb.dthpb_nenoffs = probe->dofpr_nenoffs; 9290 } else { 9291 dhpb.dthpb_enoffs = NULL; 9292 dhpb.dthpb_nenoffs = 0; 9293 } 9294 dhpb.dthpb_args = arg + probe->dofpr_argidx; 9295 dhpb.dthpb_nargc = probe->dofpr_nargc; 9296 dhpb.dthpb_xargc = probe->dofpr_xargc; 9297 dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv; 9298 dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv; 9299 9300 mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb); 9301 } 9302} 9303 9304static void 9305dtrace_helper_provide(dof_helper_t *dhp, pid_t pid) 9306{ 9307 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 9308 dof_hdr_t *dof = (dof_hdr_t *)daddr; 9309 int i; 9310 9311 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 9312 9313 for (i = 0; i < dof->dofh_secnum; i++) { 9314 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 9315 dof->dofh_secoff + i * dof->dofh_secsize); 9316 9317 if (sec->dofs_type != DOF_SECT_PROVIDER) 9318 continue; 9319 9320 dtrace_helper_provide_one(dhp, sec, pid); 9321 } 9322 9323 /* 9324 * We may have just created probes, so we must now rematch against 9325 * any retained enablings. Note that this call will acquire both 9326 * cpu_lock and dtrace_lock; the fact that we are holding 9327 * dtrace_meta_lock now is what defines the ordering with respect to 9328 * these three locks. 9329 */ 9330 dtrace_enabling_matchall(); 9331} 9332 9333static void 9334dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 9335{ 9336 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 9337 dof_hdr_t *dof = (dof_hdr_t *)daddr; 9338 dof_sec_t *str_sec; 9339 dof_provider_t *provider; 9340 char *strtab; 9341 dtrace_helper_provdesc_t dhpv; 9342 dtrace_meta_t *meta = dtrace_meta_pid; 9343 dtrace_mops_t *mops = &meta->dtm_mops; 9344 9345 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 9346 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 9347 provider->dofpv_strtab * dof->dofh_secsize); 9348 9349 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 9350 9351 /* 9352 * Create the provider. 9353 */ 9354 dtrace_dofprov2hprov(&dhpv, provider, strtab); 9355 9356 mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid); 9357 9358 meta->dtm_count--; 9359} 9360 9361static void 9362dtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid) 9363{ 9364 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 9365 dof_hdr_t *dof = (dof_hdr_t *)daddr; 9366 int i; 9367 9368 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 9369 9370 for (i = 0; i < dof->dofh_secnum; i++) { 9371 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 9372 dof->dofh_secoff + i * dof->dofh_secsize); 9373 9374 if (sec->dofs_type != DOF_SECT_PROVIDER) 9375 continue; 9376 9377 dtrace_helper_provider_remove_one(dhp, sec, pid); 9378 } 9379} 9380 9381/* 9382 * DTrace Meta Provider-to-Framework API Functions 9383 * 9384 * These functions implement the Meta Provider-to-Framework API, as described 9385 * in <sys/dtrace.h>. 9386 */ 9387int 9388dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg, 9389 dtrace_meta_provider_id_t *idp) 9390{ 9391 dtrace_meta_t *meta; 9392 dtrace_helpers_t *help, *next; 9393 int i; 9394 9395 *idp = DTRACE_METAPROVNONE; 9396 9397 /* 9398 * We strictly don't need the name, but we hold onto it for 9399 * debuggability. All hail error queues! 9400 */ 9401 if (name == NULL) { 9402 cmn_err(CE_WARN, "failed to register meta-provider: " 9403 "invalid name"); 9404 return (EINVAL); 9405 } 9406 9407 if (mops == NULL || 9408 mops->dtms_create_probe == NULL || 9409 mops->dtms_provide_pid == NULL || 9410 mops->dtms_remove_pid == NULL) { 9411 cmn_err(CE_WARN, "failed to register meta-register %s: " 9412 "invalid ops", name); 9413 return (EINVAL); 9414 } 9415 9416 meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP); 9417 meta->dtm_mops = *mops; 9418 meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 9419 (void) strcpy(meta->dtm_name, name); 9420 meta->dtm_arg = arg; 9421 9422 mutex_enter(&dtrace_meta_lock); 9423 mutex_enter(&dtrace_lock); 9424 9425 if (dtrace_meta_pid != NULL) { 9426 mutex_exit(&dtrace_lock); 9427 mutex_exit(&dtrace_meta_lock); 9428 cmn_err(CE_WARN, "failed to register meta-register %s: " 9429 "user-land meta-provider exists", name); 9430 kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1); 9431 kmem_free(meta, sizeof (dtrace_meta_t)); 9432 return (EINVAL); 9433 } 9434 9435 dtrace_meta_pid = meta; 9436 *idp = (dtrace_meta_provider_id_t)meta; 9437 9438 /* 9439 * If there are providers and probes ready to go, pass them 9440 * off to the new meta provider now. 9441 */ 9442 9443 help = dtrace_deferred_pid; 9444 dtrace_deferred_pid = NULL; 9445 9446 mutex_exit(&dtrace_lock); 9447 9448 while (help != NULL) { 9449 for (i = 0; i < help->dthps_nprovs; i++) { 9450 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 9451 help->dthps_pid); 9452 } 9453 9454 next = help->dthps_next; 9455 help->dthps_next = NULL; 9456 help->dthps_prev = NULL; 9457 help->dthps_deferred = 0; 9458 help = next; 9459 } 9460 9461 mutex_exit(&dtrace_meta_lock); 9462 9463 return (0); 9464} 9465 9466int 9467dtrace_meta_unregister(dtrace_meta_provider_id_t id) 9468{ 9469 dtrace_meta_t **pp, *old = (dtrace_meta_t *)id; 9470 9471 mutex_enter(&dtrace_meta_lock); 9472 mutex_enter(&dtrace_lock); 9473 9474 if (old == dtrace_meta_pid) { 9475 pp = &dtrace_meta_pid; 9476 } else { 9477 panic("attempt to unregister non-existent " 9478 "dtrace meta-provider %p\n", (void *)old); 9479 } 9480 9481 if (old->dtm_count != 0) { 9482 mutex_exit(&dtrace_lock); 9483 mutex_exit(&dtrace_meta_lock); 9484 return (EBUSY); 9485 } 9486 9487 *pp = NULL; 9488 9489 mutex_exit(&dtrace_lock); 9490 mutex_exit(&dtrace_meta_lock); 9491 9492 kmem_free(old->dtm_name, strlen(old->dtm_name) + 1); 9493 kmem_free(old, sizeof (dtrace_meta_t)); 9494 9495 return (0); 9496} 9497 9498 9499/* 9500 * DTrace DIF Object Functions 9501 */ 9502static int 9503dtrace_difo_err(uint_t pc, const char *format, ...) 9504{ 9505 if (dtrace_err_verbose) { 9506 va_list alist; 9507 9508 (void) uprintf("dtrace DIF object error: [%u]: ", pc); 9509 va_start(alist, format); 9510 (void) vuprintf(format, alist); 9511 va_end(alist); 9512 } 9513 9514#ifdef DTRACE_ERRDEBUG 9515 dtrace_errdebug(format); 9516#endif 9517 return (1); 9518} 9519 9520/* 9521 * Validate a DTrace DIF object by checking the IR instructions. The following 9522 * rules are currently enforced by dtrace_difo_validate(): 9523 * 9524 * 1. Each instruction must have a valid opcode 9525 * 2. Each register, string, variable, or subroutine reference must be valid 9526 * 3. No instruction can modify register %r0 (must be zero) 9527 * 4. All instruction reserved bits must be set to zero 9528 * 5. The last instruction must be a "ret" instruction 9529 * 6. All branch targets must reference a valid instruction _after_ the branch 9530 */ 9531static int 9532dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs, 9533 cred_t *cr) 9534{ 9535 int err = 0, i; 9536 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 9537 int kcheckload; 9538 uint_t pc; 9539 9540 kcheckload = cr == NULL || 9541 (vstate->dtvs_state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) == 0; 9542 9543 dp->dtdo_destructive = 0; 9544 9545 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) { 9546 dif_instr_t instr = dp->dtdo_buf[pc]; 9547 9548 uint_t r1 = DIF_INSTR_R1(instr); 9549 uint_t r2 = DIF_INSTR_R2(instr); 9550 uint_t rd = DIF_INSTR_RD(instr); 9551 uint_t rs = DIF_INSTR_RS(instr); 9552 uint_t label = DIF_INSTR_LABEL(instr); 9553 uint_t v = DIF_INSTR_VAR(instr); 9554 uint_t subr = DIF_INSTR_SUBR(instr); 9555 uint_t type = DIF_INSTR_TYPE(instr); 9556 uint_t op = DIF_INSTR_OP(instr); 9557 9558 switch (op) { 9559 case DIF_OP_OR: 9560 case DIF_OP_XOR: 9561 case DIF_OP_AND: 9562 case DIF_OP_SLL: 9563 case DIF_OP_SRL: 9564 case DIF_OP_SRA: 9565 case DIF_OP_SUB: 9566 case DIF_OP_ADD: 9567 case DIF_OP_MUL: 9568 case DIF_OP_SDIV: 9569 case DIF_OP_UDIV: 9570 case DIF_OP_SREM: 9571 case DIF_OP_UREM: 9572 case DIF_OP_COPYS: 9573 if (r1 >= nregs) 9574 err += efunc(pc, "invalid register %u\n", r1); 9575 if (r2 >= nregs) 9576 err += efunc(pc, "invalid register %u\n", r2); 9577 if (rd >= nregs) 9578 err += efunc(pc, "invalid register %u\n", rd); 9579 if (rd == 0) 9580 err += efunc(pc, "cannot write to %r0\n"); 9581 break; 9582 case DIF_OP_NOT: 9583 case DIF_OP_MOV: 9584 case DIF_OP_ALLOCS: 9585 if (r1 >= nregs) 9586 err += efunc(pc, "invalid register %u\n", r1); 9587 if (r2 != 0) 9588 err += efunc(pc, "non-zero reserved bits\n"); 9589 if (rd >= nregs) 9590 err += efunc(pc, "invalid register %u\n", rd); 9591 if (rd == 0) 9592 err += efunc(pc, "cannot write to %r0\n"); 9593 break; 9594 case DIF_OP_LDSB: 9595 case DIF_OP_LDSH: 9596 case DIF_OP_LDSW: 9597 case DIF_OP_LDUB: 9598 case DIF_OP_LDUH: 9599 case DIF_OP_LDUW: 9600 case DIF_OP_LDX: 9601 if (r1 >= nregs) 9602 err += efunc(pc, "invalid register %u\n", r1); 9603 if (r2 != 0) 9604 err += efunc(pc, "non-zero reserved bits\n"); 9605 if (rd >= nregs) 9606 err += efunc(pc, "invalid register %u\n", rd); 9607 if (rd == 0) 9608 err += efunc(pc, "cannot write to %r0\n"); 9609 if (kcheckload) 9610 dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op + 9611 DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd); 9612 break; 9613 case DIF_OP_RLDSB: 9614 case DIF_OP_RLDSH: 9615 case DIF_OP_RLDSW: 9616 case DIF_OP_RLDUB: 9617 case DIF_OP_RLDUH: 9618 case DIF_OP_RLDUW: 9619 case DIF_OP_RLDX: 9620 if (r1 >= nregs) 9621 err += efunc(pc, "invalid register %u\n", r1); 9622 if (r2 != 0) 9623 err += efunc(pc, "non-zero reserved bits\n"); 9624 if (rd >= nregs) 9625 err += efunc(pc, "invalid register %u\n", rd); 9626 if (rd == 0) 9627 err += efunc(pc, "cannot write to %r0\n"); 9628 break; 9629 case DIF_OP_ULDSB: 9630 case DIF_OP_ULDSH: 9631 case DIF_OP_ULDSW: 9632 case DIF_OP_ULDUB: 9633 case DIF_OP_ULDUH: 9634 case DIF_OP_ULDUW: 9635 case DIF_OP_ULDX: 9636 if (r1 >= nregs) 9637 err += efunc(pc, "invalid register %u\n", r1); 9638 if (r2 != 0) 9639 err += efunc(pc, "non-zero reserved bits\n"); 9640 if (rd >= nregs) 9641 err += efunc(pc, "invalid register %u\n", rd); 9642 if (rd == 0) 9643 err += efunc(pc, "cannot write to %r0\n"); 9644 break; 9645 case DIF_OP_STB: 9646 case DIF_OP_STH: 9647 case DIF_OP_STW: 9648 case DIF_OP_STX: 9649 if (r1 >= nregs) 9650 err += efunc(pc, "invalid register %u\n", r1); 9651 if (r2 != 0) 9652 err += efunc(pc, "non-zero reserved bits\n"); 9653 if (rd >= nregs) 9654 err += efunc(pc, "invalid register %u\n", rd); 9655 if (rd == 0) 9656 err += efunc(pc, "cannot write to 0 address\n"); 9657 break; 9658 case DIF_OP_CMP: 9659 case DIF_OP_SCMP: 9660 if (r1 >= nregs) 9661 err += efunc(pc, "invalid register %u\n", r1); 9662 if (r2 >= nregs) 9663 err += efunc(pc, "invalid register %u\n", r2); 9664 if (rd != 0) 9665 err += efunc(pc, "non-zero reserved bits\n"); 9666 break; 9667 case DIF_OP_TST: 9668 if (r1 >= nregs) 9669 err += efunc(pc, "invalid register %u\n", r1); 9670 if (r2 != 0 || rd != 0) 9671 err += efunc(pc, "non-zero reserved bits\n"); 9672 break; 9673 case DIF_OP_BA: 9674 case DIF_OP_BE: 9675 case DIF_OP_BNE: 9676 case DIF_OP_BG: 9677 case DIF_OP_BGU: 9678 case DIF_OP_BGE: 9679 case DIF_OP_BGEU: 9680 case DIF_OP_BL: 9681 case DIF_OP_BLU: 9682 case DIF_OP_BLE: 9683 case DIF_OP_BLEU: 9684 if (label >= dp->dtdo_len) { 9685 err += efunc(pc, "invalid branch target %u\n", 9686 label); 9687 } 9688 if (label <= pc) { 9689 err += efunc(pc, "backward branch to %u\n", 9690 label); 9691 } 9692 break; 9693 case DIF_OP_RET: 9694 if (r1 != 0 || r2 != 0) 9695 err += efunc(pc, "non-zero reserved bits\n"); 9696 if (rd >= nregs) 9697 err += efunc(pc, "invalid register %u\n", rd); 9698 break; 9699 case DIF_OP_NOP: 9700 case DIF_OP_POPTS: 9701 case DIF_OP_FLUSHTS: 9702 if (r1 != 0 || r2 != 0 || rd != 0) 9703 err += efunc(pc, "non-zero reserved bits\n"); 9704 break; 9705 case DIF_OP_SETX: 9706 if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) { 9707 err += efunc(pc, "invalid integer ref %u\n", 9708 DIF_INSTR_INTEGER(instr)); 9709 } 9710 if (rd >= nregs) 9711 err += efunc(pc, "invalid register %u\n", rd); 9712 if (rd == 0) 9713 err += efunc(pc, "cannot write to %r0\n"); 9714 break; 9715 case DIF_OP_SETS: 9716 if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) { 9717 err += efunc(pc, "invalid string ref %u\n", 9718 DIF_INSTR_STRING(instr)); 9719 } 9720 if (rd >= nregs) 9721 err += efunc(pc, "invalid register %u\n", rd); 9722 if (rd == 0) 9723 err += efunc(pc, "cannot write to %r0\n"); 9724 break; 9725 case DIF_OP_LDGA: 9726 case DIF_OP_LDTA: 9727 if (r1 > DIF_VAR_ARRAY_MAX) 9728 err += efunc(pc, "invalid array %u\n", r1); 9729 if (r2 >= nregs) 9730 err += efunc(pc, "invalid register %u\n", r2); 9731 if (rd >= nregs) 9732 err += efunc(pc, "invalid register %u\n", rd); 9733 if (rd == 0) 9734 err += efunc(pc, "cannot write to %r0\n"); 9735 break; 9736 case DIF_OP_LDGS: 9737 case DIF_OP_LDTS: 9738 case DIF_OP_LDLS: 9739 case DIF_OP_LDGAA: 9740 case DIF_OP_LDTAA: 9741 if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX) 9742 err += efunc(pc, "invalid variable %u\n", v); 9743 if (rd >= nregs) 9744 err += efunc(pc, "invalid register %u\n", rd); 9745 if (rd == 0) 9746 err += efunc(pc, "cannot write to %r0\n"); 9747 break; 9748 case DIF_OP_STGS: 9749 case DIF_OP_STTS: 9750 case DIF_OP_STLS: 9751 case DIF_OP_STGAA: 9752 case DIF_OP_STTAA: 9753 if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX) 9754 err += efunc(pc, "invalid variable %u\n", v); 9755 if (rs >= nregs) 9756 err += efunc(pc, "invalid register %u\n", rd); 9757 break; 9758 case DIF_OP_CALL: 9759 if (subr > DIF_SUBR_MAX) 9760 err += efunc(pc, "invalid subr %u\n", subr); 9761 if (rd >= nregs) 9762 err += efunc(pc, "invalid register %u\n", rd); 9763 if (rd == 0) 9764 err += efunc(pc, "cannot write to %r0\n"); 9765 9766 if (subr == DIF_SUBR_COPYOUT || 9767 subr == DIF_SUBR_COPYOUTSTR) { 9768 dp->dtdo_destructive = 1; 9769 } 9770 9771 if (subr == DIF_SUBR_GETF) { 9772 /* 9773 * If we have a getf() we need to record that 9774 * in our state. Note that our state can be 9775 * NULL if this is a helper -- but in that 9776 * case, the call to getf() is itself illegal, 9777 * and will be caught (slightly later) when 9778 * the helper is validated. 9779 */ 9780 if (vstate->dtvs_state != NULL) 9781 vstate->dtvs_state->dts_getf++; 9782 } 9783 9784 break; 9785 case DIF_OP_PUSHTR: 9786 if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF) 9787 err += efunc(pc, "invalid ref type %u\n", type); 9788 if (r2 >= nregs) 9789 err += efunc(pc, "invalid register %u\n", r2); 9790 if (rs >= nregs) 9791 err += efunc(pc, "invalid register %u\n", rs); 9792 break; 9793 case DIF_OP_PUSHTV: 9794 if (type != DIF_TYPE_CTF) 9795 err += efunc(pc, "invalid val type %u\n", type); 9796 if (r2 >= nregs) 9797 err += efunc(pc, "invalid register %u\n", r2); 9798 if (rs >= nregs) 9799 err += efunc(pc, "invalid register %u\n", rs); 9800 break; 9801 default: 9802 err += efunc(pc, "invalid opcode %u\n", 9803 DIF_INSTR_OP(instr)); 9804 } 9805 } 9806 9807 if (dp->dtdo_len != 0 && 9808 DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) { 9809 err += efunc(dp->dtdo_len - 1, 9810 "expected 'ret' as last DIF instruction\n"); 9811 } 9812 9813 if (!(dp->dtdo_rtype.dtdt_flags & (DIF_TF_BYREF | DIF_TF_BYUREF))) { 9814 /* 9815 * If we're not returning by reference, the size must be either 9816 * 0 or the size of one of the base types. 9817 */ 9818 switch (dp->dtdo_rtype.dtdt_size) { 9819 case 0: 9820 case sizeof (uint8_t): 9821 case sizeof (uint16_t): 9822 case sizeof (uint32_t): 9823 case sizeof (uint64_t): 9824 break; 9825 9826 default: 9827 err += efunc(dp->dtdo_len - 1, "bad return size\n"); 9828 } 9829 } 9830 9831 for (i = 0; i < dp->dtdo_varlen && err == 0; i++) { 9832 dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL; 9833 dtrace_diftype_t *vt, *et; 9834 uint_t id, ndx; 9835 9836 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL && 9837 v->dtdv_scope != DIFV_SCOPE_THREAD && 9838 v->dtdv_scope != DIFV_SCOPE_LOCAL) { 9839 err += efunc(i, "unrecognized variable scope %d\n", 9840 v->dtdv_scope); 9841 break; 9842 } 9843 9844 if (v->dtdv_kind != DIFV_KIND_ARRAY && 9845 v->dtdv_kind != DIFV_KIND_SCALAR) { 9846 err += efunc(i, "unrecognized variable type %d\n", 9847 v->dtdv_kind); 9848 break; 9849 } 9850 9851 if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) { 9852 err += efunc(i, "%d exceeds variable id limit\n", id); 9853 break; 9854 } 9855 9856 if (id < DIF_VAR_OTHER_UBASE) 9857 continue; 9858 9859 /* 9860 * For user-defined variables, we need to check that this 9861 * definition is identical to any previous definition that we 9862 * encountered. 9863 */ 9864 ndx = id - DIF_VAR_OTHER_UBASE; 9865 9866 switch (v->dtdv_scope) { 9867 case DIFV_SCOPE_GLOBAL: 9868 if (ndx < vstate->dtvs_nglobals) { 9869 dtrace_statvar_t *svar; 9870 9871 if ((svar = vstate->dtvs_globals[ndx]) != NULL) 9872 existing = &svar->dtsv_var; 9873 } 9874 9875 break; 9876 9877 case DIFV_SCOPE_THREAD: 9878 if (ndx < vstate->dtvs_ntlocals) 9879 existing = &vstate->dtvs_tlocals[ndx]; 9880 break; 9881 9882 case DIFV_SCOPE_LOCAL: 9883 if (ndx < vstate->dtvs_nlocals) { 9884 dtrace_statvar_t *svar; 9885 9886 if ((svar = vstate->dtvs_locals[ndx]) != NULL) 9887 existing = &svar->dtsv_var; 9888 } 9889 9890 break; 9891 } 9892 9893 vt = &v->dtdv_type; 9894 9895 if (vt->dtdt_flags & DIF_TF_BYREF) { 9896 if (vt->dtdt_size == 0) { 9897 err += efunc(i, "zero-sized variable\n"); 9898 break; 9899 } 9900 9901 if (v->dtdv_scope == DIFV_SCOPE_GLOBAL && 9902 vt->dtdt_size > dtrace_global_maxsize) { 9903 err += efunc(i, "oversized by-ref global\n"); 9904 break; 9905 } 9906 } 9907 9908 if (existing == NULL || existing->dtdv_id == 0) 9909 continue; 9910 9911 ASSERT(existing->dtdv_id == v->dtdv_id); 9912 ASSERT(existing->dtdv_scope == v->dtdv_scope); 9913 9914 if (existing->dtdv_kind != v->dtdv_kind) 9915 err += efunc(i, "%d changed variable kind\n", id); 9916 9917 et = &existing->dtdv_type; 9918 9919 if (vt->dtdt_flags != et->dtdt_flags) { 9920 err += efunc(i, "%d changed variable type flags\n", id); 9921 break; 9922 } 9923 9924 if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) { 9925 err += efunc(i, "%d changed variable type size\n", id); 9926 break; 9927 } 9928 } 9929 9930 return (err); 9931} 9932 9933/* 9934 * Validate a DTrace DIF object that it is to be used as a helper. Helpers 9935 * are much more constrained than normal DIFOs. Specifically, they may 9936 * not: 9937 * 9938 * 1. Make calls to subroutines other than copyin(), copyinstr() or 9939 * miscellaneous string routines 9940 * 2. Access DTrace variables other than the args[] array, and the 9941 * curthread, pid, ppid, tid, execname, zonename, uid and gid variables. 9942 * 3. Have thread-local variables. 9943 * 4. Have dynamic variables. 9944 */ 9945static int 9946dtrace_difo_validate_helper(dtrace_difo_t *dp) 9947{ 9948 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 9949 int err = 0; 9950 uint_t pc; 9951 9952 for (pc = 0; pc < dp->dtdo_len; pc++) { 9953 dif_instr_t instr = dp->dtdo_buf[pc]; 9954 9955 uint_t v = DIF_INSTR_VAR(instr); 9956 uint_t subr = DIF_INSTR_SUBR(instr); 9957 uint_t op = DIF_INSTR_OP(instr); 9958 9959 switch (op) { 9960 case DIF_OP_OR: 9961 case DIF_OP_XOR: 9962 case DIF_OP_AND: 9963 case DIF_OP_SLL: 9964 case DIF_OP_SRL: 9965 case DIF_OP_SRA: 9966 case DIF_OP_SUB: 9967 case DIF_OP_ADD: 9968 case DIF_OP_MUL: 9969 case DIF_OP_SDIV: 9970 case DIF_OP_UDIV: 9971 case DIF_OP_SREM: 9972 case DIF_OP_UREM: 9973 case DIF_OP_COPYS: 9974 case DIF_OP_NOT: 9975 case DIF_OP_MOV: 9976 case DIF_OP_RLDSB: 9977 case DIF_OP_RLDSH: 9978 case DIF_OP_RLDSW: 9979 case DIF_OP_RLDUB: 9980 case DIF_OP_RLDUH: 9981 case DIF_OP_RLDUW: 9982 case DIF_OP_RLDX: 9983 case DIF_OP_ULDSB: 9984 case DIF_OP_ULDSH: 9985 case DIF_OP_ULDSW: 9986 case DIF_OP_ULDUB: 9987 case DIF_OP_ULDUH: 9988 case DIF_OP_ULDUW: 9989 case DIF_OP_ULDX: 9990 case DIF_OP_STB: 9991 case DIF_OP_STH: 9992 case DIF_OP_STW: 9993 case DIF_OP_STX: 9994 case DIF_OP_ALLOCS: 9995 case DIF_OP_CMP: 9996 case DIF_OP_SCMP: 9997 case DIF_OP_TST: 9998 case DIF_OP_BA: 9999 case DIF_OP_BE: 10000 case DIF_OP_BNE: 10001 case DIF_OP_BG: 10002 case DIF_OP_BGU: 10003 case DIF_OP_BGE: 10004 case DIF_OP_BGEU: 10005 case DIF_OP_BL: 10006 case DIF_OP_BLU: 10007 case DIF_OP_BLE: 10008 case DIF_OP_BLEU: 10009 case DIF_OP_RET: 10010 case DIF_OP_NOP: 10011 case DIF_OP_POPTS: 10012 case DIF_OP_FLUSHTS: 10013 case DIF_OP_SETX: 10014 case DIF_OP_SETS: 10015 case DIF_OP_LDGA: 10016 case DIF_OP_LDLS: 10017 case DIF_OP_STGS: 10018 case DIF_OP_STLS: 10019 case DIF_OP_PUSHTR: 10020 case DIF_OP_PUSHTV: 10021 break; 10022 10023 case DIF_OP_LDGS: 10024 if (v >= DIF_VAR_OTHER_UBASE) 10025 break; 10026 10027 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) 10028 break; 10029 10030 if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID || 10031 v == DIF_VAR_PPID || v == DIF_VAR_TID || 10032 v == DIF_VAR_EXECARGS || 10033 v == DIF_VAR_EXECNAME || v == DIF_VAR_ZONENAME || 10034 v == DIF_VAR_UID || v == DIF_VAR_GID) 10035 break; 10036 10037 err += efunc(pc, "illegal variable %u\n", v); 10038 break; 10039 10040 case DIF_OP_LDTA: 10041 case DIF_OP_LDTS: 10042 case DIF_OP_LDGAA: 10043 case DIF_OP_LDTAA: 10044 err += efunc(pc, "illegal dynamic variable load\n"); 10045 break; 10046 10047 case DIF_OP_STTS: 10048 case DIF_OP_STGAA: 10049 case DIF_OP_STTAA: 10050 err += efunc(pc, "illegal dynamic variable store\n"); 10051 break; 10052 10053 case DIF_OP_CALL: 10054 if (subr == DIF_SUBR_ALLOCA || 10055 subr == DIF_SUBR_BCOPY || 10056 subr == DIF_SUBR_COPYIN || 10057 subr == DIF_SUBR_COPYINTO || 10058 subr == DIF_SUBR_COPYINSTR || 10059 subr == DIF_SUBR_INDEX || 10060 subr == DIF_SUBR_INET_NTOA || 10061 subr == DIF_SUBR_INET_NTOA6 || 10062 subr == DIF_SUBR_INET_NTOP || 10063 subr == DIF_SUBR_JSON || 10064 subr == DIF_SUBR_LLTOSTR || 10065 subr == DIF_SUBR_STRTOLL || 10066 subr == DIF_SUBR_RINDEX || 10067 subr == DIF_SUBR_STRCHR || 10068 subr == DIF_SUBR_STRJOIN || 10069 subr == DIF_SUBR_STRRCHR || 10070 subr == DIF_SUBR_STRSTR || 10071 subr == DIF_SUBR_HTONS || 10072 subr == DIF_SUBR_HTONL || 10073 subr == DIF_SUBR_HTONLL || 10074 subr == DIF_SUBR_NTOHS || 10075 subr == DIF_SUBR_NTOHL || 10076 subr == DIF_SUBR_NTOHLL || 10077 subr == DIF_SUBR_MEMREF || 10078#if !defined(sun) 10079 subr == DIF_SUBR_MEMSTR || 10080#endif 10081 subr == DIF_SUBR_TYPEREF) 10082 break; 10083 10084 err += efunc(pc, "invalid subr %u\n", subr); 10085 break; 10086 10087 default: 10088 err += efunc(pc, "invalid opcode %u\n", 10089 DIF_INSTR_OP(instr)); 10090 } 10091 } 10092 10093 return (err); 10094} 10095 10096/* 10097 * Returns 1 if the expression in the DIF object can be cached on a per-thread 10098 * basis; 0 if not. 10099 */ 10100static int 10101dtrace_difo_cacheable(dtrace_difo_t *dp) 10102{ 10103 int i; 10104 10105 if (dp == NULL) 10106 return (0); 10107 10108 for (i = 0; i < dp->dtdo_varlen; i++) { 10109 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 10110 10111 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL) 10112 continue; 10113 10114 switch (v->dtdv_id) { 10115 case DIF_VAR_CURTHREAD: 10116 case DIF_VAR_PID: 10117 case DIF_VAR_TID: 10118 case DIF_VAR_EXECARGS: 10119 case DIF_VAR_EXECNAME: 10120 case DIF_VAR_ZONENAME: 10121 break; 10122 10123 default: 10124 return (0); 10125 } 10126 } 10127 10128 /* 10129 * This DIF object may be cacheable. Now we need to look for any 10130 * array loading instructions, any memory loading instructions, or 10131 * any stores to thread-local variables. 10132 */ 10133 for (i = 0; i < dp->dtdo_len; i++) { 10134 uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]); 10135 10136 if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) || 10137 (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) || 10138 (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) || 10139 op == DIF_OP_LDGA || op == DIF_OP_STTS) 10140 return (0); 10141 } 10142 10143 return (1); 10144} 10145 10146static void 10147dtrace_difo_hold(dtrace_difo_t *dp) 10148{ 10149 int i; 10150 10151 ASSERT(MUTEX_HELD(&dtrace_lock)); 10152 10153 dp->dtdo_refcnt++; 10154 ASSERT(dp->dtdo_refcnt != 0); 10155 10156 /* 10157 * We need to check this DIF object for references to the variable 10158 * DIF_VAR_VTIMESTAMP. 10159 */ 10160 for (i = 0; i < dp->dtdo_varlen; i++) { 10161 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 10162 10163 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 10164 continue; 10165 10166 if (dtrace_vtime_references++ == 0) 10167 dtrace_vtime_enable(); 10168 } 10169} 10170 10171/* 10172 * This routine calculates the dynamic variable chunksize for a given DIF 10173 * object. The calculation is not fool-proof, and can probably be tricked by 10174 * malicious DIF -- but it works for all compiler-generated DIF. Because this 10175 * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail 10176 * if a dynamic variable size exceeds the chunksize. 10177 */ 10178static void 10179dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 10180{ 10181 uint64_t sval = 0; 10182 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 10183 const dif_instr_t *text = dp->dtdo_buf; 10184 uint_t pc, srd = 0; 10185 uint_t ttop = 0; 10186 size_t size, ksize; 10187 uint_t id, i; 10188 10189 for (pc = 0; pc < dp->dtdo_len; pc++) { 10190 dif_instr_t instr = text[pc]; 10191 uint_t op = DIF_INSTR_OP(instr); 10192 uint_t rd = DIF_INSTR_RD(instr); 10193 uint_t r1 = DIF_INSTR_R1(instr); 10194 uint_t nkeys = 0; 10195 uchar_t scope = 0; 10196 10197 dtrace_key_t *key = tupregs; 10198 10199 switch (op) { 10200 case DIF_OP_SETX: 10201 sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)]; 10202 srd = rd; 10203 continue; 10204 10205 case DIF_OP_STTS: 10206 key = &tupregs[DIF_DTR_NREGS]; 10207 key[0].dttk_size = 0; 10208 key[1].dttk_size = 0; 10209 nkeys = 2; 10210 scope = DIFV_SCOPE_THREAD; 10211 break; 10212 10213 case DIF_OP_STGAA: 10214 case DIF_OP_STTAA: 10215 nkeys = ttop; 10216 10217 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) 10218 key[nkeys++].dttk_size = 0; 10219 10220 key[nkeys++].dttk_size = 0; 10221 10222 if (op == DIF_OP_STTAA) { 10223 scope = DIFV_SCOPE_THREAD; 10224 } else { 10225 scope = DIFV_SCOPE_GLOBAL; 10226 } 10227 10228 break; 10229 10230 case DIF_OP_PUSHTR: 10231 if (ttop == DIF_DTR_NREGS) 10232 return; 10233 10234 if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) { 10235 /* 10236 * If the register for the size of the "pushtr" 10237 * is %r0 (or the value is 0) and the type is 10238 * a string, we'll use the system-wide default 10239 * string size. 10240 */ 10241 tupregs[ttop++].dttk_size = 10242 dtrace_strsize_default; 10243 } else { 10244 if (srd == 0) 10245 return; 10246 10247 tupregs[ttop++].dttk_size = sval; 10248 } 10249 10250 break; 10251 10252 case DIF_OP_PUSHTV: 10253 if (ttop == DIF_DTR_NREGS) 10254 return; 10255 10256 tupregs[ttop++].dttk_size = 0; 10257 break; 10258 10259 case DIF_OP_FLUSHTS: 10260 ttop = 0; 10261 break; 10262 10263 case DIF_OP_POPTS: 10264 if (ttop != 0) 10265 ttop--; 10266 break; 10267 } 10268 10269 sval = 0; 10270 srd = 0; 10271 10272 if (nkeys == 0) 10273 continue; 10274 10275 /* 10276 * We have a dynamic variable allocation; calculate its size. 10277 */ 10278 for (ksize = 0, i = 0; i < nkeys; i++) 10279 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 10280 10281 size = sizeof (dtrace_dynvar_t); 10282 size += sizeof (dtrace_key_t) * (nkeys - 1); 10283 size += ksize; 10284 10285 /* 10286 * Now we need to determine the size of the stored data. 10287 */ 10288 id = DIF_INSTR_VAR(instr); 10289 10290 for (i = 0; i < dp->dtdo_varlen; i++) { 10291 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 10292 10293 if (v->dtdv_id == id && v->dtdv_scope == scope) { 10294 size += v->dtdv_type.dtdt_size; 10295 break; 10296 } 10297 } 10298 10299 if (i == dp->dtdo_varlen) 10300 return; 10301 10302 /* 10303 * We have the size. If this is larger than the chunk size 10304 * for our dynamic variable state, reset the chunk size. 10305 */ 10306 size = P2ROUNDUP(size, sizeof (uint64_t)); 10307 10308 if (size > vstate->dtvs_dynvars.dtds_chunksize) 10309 vstate->dtvs_dynvars.dtds_chunksize = size; 10310 } 10311} 10312 10313static void 10314dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 10315{ 10316 int i, oldsvars, osz, nsz, otlocals, ntlocals; 10317 uint_t id; 10318 10319 ASSERT(MUTEX_HELD(&dtrace_lock)); 10320 ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0); 10321 10322 for (i = 0; i < dp->dtdo_varlen; i++) { 10323 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 10324 dtrace_statvar_t *svar, ***svarp = NULL; 10325 size_t dsize = 0; 10326 uint8_t scope = v->dtdv_scope; 10327 int *np = NULL; 10328 10329 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 10330 continue; 10331 10332 id -= DIF_VAR_OTHER_UBASE; 10333 10334 switch (scope) { 10335 case DIFV_SCOPE_THREAD: 10336 while (id >= (otlocals = vstate->dtvs_ntlocals)) { 10337 dtrace_difv_t *tlocals; 10338 10339 if ((ntlocals = (otlocals << 1)) == 0) 10340 ntlocals = 1; 10341 10342 osz = otlocals * sizeof (dtrace_difv_t); 10343 nsz = ntlocals * sizeof (dtrace_difv_t); 10344 10345 tlocals = kmem_zalloc(nsz, KM_SLEEP); 10346 10347 if (osz != 0) { 10348 bcopy(vstate->dtvs_tlocals, 10349 tlocals, osz); 10350 kmem_free(vstate->dtvs_tlocals, osz); 10351 } 10352 10353 vstate->dtvs_tlocals = tlocals; 10354 vstate->dtvs_ntlocals = ntlocals; 10355 } 10356 10357 vstate->dtvs_tlocals[id] = *v; 10358 continue; 10359 10360 case DIFV_SCOPE_LOCAL: 10361 np = &vstate->dtvs_nlocals; 10362 svarp = &vstate->dtvs_locals; 10363 10364 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 10365 dsize = NCPU * (v->dtdv_type.dtdt_size + 10366 sizeof (uint64_t)); 10367 else 10368 dsize = NCPU * sizeof (uint64_t); 10369 10370 break; 10371 10372 case DIFV_SCOPE_GLOBAL: 10373 np = &vstate->dtvs_nglobals; 10374 svarp = &vstate->dtvs_globals; 10375 10376 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 10377 dsize = v->dtdv_type.dtdt_size + 10378 sizeof (uint64_t); 10379 10380 break; 10381 10382 default: 10383 ASSERT(0); 10384 } 10385 10386 while (id >= (oldsvars = *np)) { 10387 dtrace_statvar_t **statics; 10388 int newsvars, oldsize, newsize; 10389 10390 if ((newsvars = (oldsvars << 1)) == 0) 10391 newsvars = 1; 10392 10393 oldsize = oldsvars * sizeof (dtrace_statvar_t *); 10394 newsize = newsvars * sizeof (dtrace_statvar_t *); 10395 10396 statics = kmem_zalloc(newsize, KM_SLEEP); 10397 10398 if (oldsize != 0) { 10399 bcopy(*svarp, statics, oldsize); 10400 kmem_free(*svarp, oldsize); 10401 } 10402 10403 *svarp = statics; 10404 *np = newsvars; 10405 } 10406 10407 if ((svar = (*svarp)[id]) == NULL) { 10408 svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP); 10409 svar->dtsv_var = *v; 10410 10411 if ((svar->dtsv_size = dsize) != 0) { 10412 svar->dtsv_data = (uint64_t)(uintptr_t) 10413 kmem_zalloc(dsize, KM_SLEEP); 10414 } 10415 10416 (*svarp)[id] = svar; 10417 } 10418 10419 svar->dtsv_refcnt++; 10420 } 10421 10422 dtrace_difo_chunksize(dp, vstate); 10423 dtrace_difo_hold(dp); 10424} 10425 10426static dtrace_difo_t * 10427dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 10428{ 10429 dtrace_difo_t *new; 10430 size_t sz; 10431 10432 ASSERT(dp->dtdo_buf != NULL); 10433 ASSERT(dp->dtdo_refcnt != 0); 10434 10435 new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 10436 10437 ASSERT(dp->dtdo_buf != NULL); 10438 sz = dp->dtdo_len * sizeof (dif_instr_t); 10439 new->dtdo_buf = kmem_alloc(sz, KM_SLEEP); 10440 bcopy(dp->dtdo_buf, new->dtdo_buf, sz); 10441 new->dtdo_len = dp->dtdo_len; 10442 10443 if (dp->dtdo_strtab != NULL) { 10444 ASSERT(dp->dtdo_strlen != 0); 10445 new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP); 10446 bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen); 10447 new->dtdo_strlen = dp->dtdo_strlen; 10448 } 10449 10450 if (dp->dtdo_inttab != NULL) { 10451 ASSERT(dp->dtdo_intlen != 0); 10452 sz = dp->dtdo_intlen * sizeof (uint64_t); 10453 new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP); 10454 bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz); 10455 new->dtdo_intlen = dp->dtdo_intlen; 10456 } 10457 10458 if (dp->dtdo_vartab != NULL) { 10459 ASSERT(dp->dtdo_varlen != 0); 10460 sz = dp->dtdo_varlen * sizeof (dtrace_difv_t); 10461 new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP); 10462 bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz); 10463 new->dtdo_varlen = dp->dtdo_varlen; 10464 } 10465 10466 dtrace_difo_init(new, vstate); 10467 return (new); 10468} 10469 10470static void 10471dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 10472{ 10473 int i; 10474 10475 ASSERT(dp->dtdo_refcnt == 0); 10476 10477 for (i = 0; i < dp->dtdo_varlen; i++) { 10478 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 10479 dtrace_statvar_t *svar, **svarp = NULL; 10480 uint_t id; 10481 uint8_t scope = v->dtdv_scope; 10482 int *np = NULL; 10483 10484 switch (scope) { 10485 case DIFV_SCOPE_THREAD: 10486 continue; 10487 10488 case DIFV_SCOPE_LOCAL: 10489 np = &vstate->dtvs_nlocals; 10490 svarp = vstate->dtvs_locals; 10491 break; 10492 10493 case DIFV_SCOPE_GLOBAL: 10494 np = &vstate->dtvs_nglobals; 10495 svarp = vstate->dtvs_globals; 10496 break; 10497 10498 default: 10499 ASSERT(0); 10500 } 10501 10502 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 10503 continue; 10504 10505 id -= DIF_VAR_OTHER_UBASE; 10506 ASSERT(id < *np); 10507 10508 svar = svarp[id]; 10509 ASSERT(svar != NULL); 10510 ASSERT(svar->dtsv_refcnt > 0); 10511 10512 if (--svar->dtsv_refcnt > 0) 10513 continue; 10514 10515 if (svar->dtsv_size != 0) { 10516 ASSERT(svar->dtsv_data != 0); 10517 kmem_free((void *)(uintptr_t)svar->dtsv_data, 10518 svar->dtsv_size); 10519 } 10520 10521 kmem_free(svar, sizeof (dtrace_statvar_t)); 10522 svarp[id] = NULL; 10523 } 10524 10525 if (dp->dtdo_buf != NULL) 10526 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 10527 if (dp->dtdo_inttab != NULL) 10528 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 10529 if (dp->dtdo_strtab != NULL) 10530 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 10531 if (dp->dtdo_vartab != NULL) 10532 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 10533 10534 kmem_free(dp, sizeof (dtrace_difo_t)); 10535} 10536 10537static void 10538dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 10539{ 10540 int i; 10541 10542 ASSERT(MUTEX_HELD(&dtrace_lock)); 10543 ASSERT(dp->dtdo_refcnt != 0); 10544 10545 for (i = 0; i < dp->dtdo_varlen; i++) { 10546 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 10547 10548 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 10549 continue; 10550 10551 ASSERT(dtrace_vtime_references > 0); 10552 if (--dtrace_vtime_references == 0) 10553 dtrace_vtime_disable(); 10554 } 10555 10556 if (--dp->dtdo_refcnt == 0) 10557 dtrace_difo_destroy(dp, vstate); 10558} 10559 10560/* 10561 * DTrace Format Functions 10562 */ 10563static uint16_t 10564dtrace_format_add(dtrace_state_t *state, char *str) 10565{ 10566 char *fmt, **new; 10567 uint16_t ndx, len = strlen(str) + 1; 10568 10569 fmt = kmem_zalloc(len, KM_SLEEP); 10570 bcopy(str, fmt, len); 10571 10572 for (ndx = 0; ndx < state->dts_nformats; ndx++) { 10573 if (state->dts_formats[ndx] == NULL) { 10574 state->dts_formats[ndx] = fmt; 10575 return (ndx + 1); 10576 } 10577 } 10578 10579 if (state->dts_nformats == USHRT_MAX) { 10580 /* 10581 * This is only likely if a denial-of-service attack is being 10582 * attempted. As such, it's okay to fail silently here. 10583 */ 10584 kmem_free(fmt, len); 10585 return (0); 10586 } 10587 10588 /* 10589 * For simplicity, we always resize the formats array to be exactly the 10590 * number of formats. 10591 */ 10592 ndx = state->dts_nformats++; 10593 new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP); 10594 10595 if (state->dts_formats != NULL) { 10596 ASSERT(ndx != 0); 10597 bcopy(state->dts_formats, new, ndx * sizeof (char *)); 10598 kmem_free(state->dts_formats, ndx * sizeof (char *)); 10599 } 10600 10601 state->dts_formats = new; 10602 state->dts_formats[ndx] = fmt; 10603 10604 return (ndx + 1); 10605} 10606 10607static void 10608dtrace_format_remove(dtrace_state_t *state, uint16_t format) 10609{ 10610 char *fmt; 10611 10612 ASSERT(state->dts_formats != NULL); 10613 ASSERT(format <= state->dts_nformats); 10614 ASSERT(state->dts_formats[format - 1] != NULL); 10615 10616 fmt = state->dts_formats[format - 1]; 10617 kmem_free(fmt, strlen(fmt) + 1); 10618 state->dts_formats[format - 1] = NULL; 10619} 10620 10621static void 10622dtrace_format_destroy(dtrace_state_t *state) 10623{ 10624 int i; 10625 10626 if (state->dts_nformats == 0) { 10627 ASSERT(state->dts_formats == NULL); 10628 return; 10629 } 10630 10631 ASSERT(state->dts_formats != NULL); 10632 10633 for (i = 0; i < state->dts_nformats; i++) { 10634 char *fmt = state->dts_formats[i]; 10635 10636 if (fmt == NULL) 10637 continue; 10638 10639 kmem_free(fmt, strlen(fmt) + 1); 10640 } 10641 10642 kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *)); 10643 state->dts_nformats = 0; 10644 state->dts_formats = NULL; 10645} 10646 10647/* 10648 * DTrace Predicate Functions 10649 */ 10650static dtrace_predicate_t * 10651dtrace_predicate_create(dtrace_difo_t *dp) 10652{ 10653 dtrace_predicate_t *pred; 10654 10655 ASSERT(MUTEX_HELD(&dtrace_lock)); 10656 ASSERT(dp->dtdo_refcnt != 0); 10657 10658 pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP); 10659 pred->dtp_difo = dp; 10660 pred->dtp_refcnt = 1; 10661 10662 if (!dtrace_difo_cacheable(dp)) 10663 return (pred); 10664 10665 if (dtrace_predcache_id == DTRACE_CACHEIDNONE) { 10666 /* 10667 * This is only theoretically possible -- we have had 2^32 10668 * cacheable predicates on this machine. We cannot allow any 10669 * more predicates to become cacheable: as unlikely as it is, 10670 * there may be a thread caching a (now stale) predicate cache 10671 * ID. (N.B.: the temptation is being successfully resisted to 10672 * have this cmn_err() "Holy shit -- we executed this code!") 10673 */ 10674 return (pred); 10675 } 10676 10677 pred->dtp_cacheid = dtrace_predcache_id++; 10678 10679 return (pred); 10680} 10681 10682static void 10683dtrace_predicate_hold(dtrace_predicate_t *pred) 10684{ 10685 ASSERT(MUTEX_HELD(&dtrace_lock)); 10686 ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0); 10687 ASSERT(pred->dtp_refcnt > 0); 10688 10689 pred->dtp_refcnt++; 10690} 10691 10692static void 10693dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate) 10694{ 10695 dtrace_difo_t *dp = pred->dtp_difo; 10696 10697 ASSERT(MUTEX_HELD(&dtrace_lock)); 10698 ASSERT(dp != NULL && dp->dtdo_refcnt != 0); 10699 ASSERT(pred->dtp_refcnt > 0); 10700 10701 if (--pred->dtp_refcnt == 0) { 10702 dtrace_difo_release(pred->dtp_difo, vstate); 10703 kmem_free(pred, sizeof (dtrace_predicate_t)); 10704 } 10705} 10706 10707/* 10708 * DTrace Action Description Functions 10709 */ 10710static dtrace_actdesc_t * 10711dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple, 10712 uint64_t uarg, uint64_t arg) 10713{ 10714 dtrace_actdesc_t *act; 10715 10716#if defined(sun) 10717 ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL && 10718 arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA)); 10719#endif 10720 10721 act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP); 10722 act->dtad_kind = kind; 10723 act->dtad_ntuple = ntuple; 10724 act->dtad_uarg = uarg; 10725 act->dtad_arg = arg; 10726 act->dtad_refcnt = 1; 10727 10728 return (act); 10729} 10730 10731static void 10732dtrace_actdesc_hold(dtrace_actdesc_t *act) 10733{ 10734 ASSERT(act->dtad_refcnt >= 1); 10735 act->dtad_refcnt++; 10736} 10737 10738static void 10739dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate) 10740{ 10741 dtrace_actkind_t kind = act->dtad_kind; 10742 dtrace_difo_t *dp; 10743 10744 ASSERT(act->dtad_refcnt >= 1); 10745 10746 if (--act->dtad_refcnt != 0) 10747 return; 10748 10749 if ((dp = act->dtad_difo) != NULL) 10750 dtrace_difo_release(dp, vstate); 10751 10752 if (DTRACEACT_ISPRINTFLIKE(kind)) { 10753 char *str = (char *)(uintptr_t)act->dtad_arg; 10754 10755#if defined(sun) 10756 ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) || 10757 (str == NULL && act->dtad_kind == DTRACEACT_PRINTA)); 10758#endif 10759 10760 if (str != NULL) 10761 kmem_free(str, strlen(str) + 1); 10762 } 10763 10764 kmem_free(act, sizeof (dtrace_actdesc_t)); 10765} 10766 10767/* 10768 * DTrace ECB Functions 10769 */ 10770static dtrace_ecb_t * 10771dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe) 10772{ 10773 dtrace_ecb_t *ecb; 10774 dtrace_epid_t epid; 10775 10776 ASSERT(MUTEX_HELD(&dtrace_lock)); 10777 10778 ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP); 10779 ecb->dte_predicate = NULL; 10780 ecb->dte_probe = probe; 10781 10782 /* 10783 * The default size is the size of the default action: recording 10784 * the header. 10785 */ 10786 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_rechdr_t); 10787 ecb->dte_alignment = sizeof (dtrace_epid_t); 10788 10789 epid = state->dts_epid++; 10790 10791 if (epid - 1 >= state->dts_necbs) { 10792 dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs; 10793 int necbs = state->dts_necbs << 1; 10794 10795 ASSERT(epid == state->dts_necbs + 1); 10796 10797 if (necbs == 0) { 10798 ASSERT(oecbs == NULL); 10799 necbs = 1; 10800 } 10801 10802 ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP); 10803 10804 if (oecbs != NULL) 10805 bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs)); 10806 10807 dtrace_membar_producer(); 10808 state->dts_ecbs = ecbs; 10809 10810 if (oecbs != NULL) { 10811 /* 10812 * If this state is active, we must dtrace_sync() 10813 * before we can free the old dts_ecbs array: we're 10814 * coming in hot, and there may be active ring 10815 * buffer processing (which indexes into the dts_ecbs 10816 * array) on another CPU. 10817 */ 10818 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 10819 dtrace_sync(); 10820 10821 kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs)); 10822 } 10823 10824 dtrace_membar_producer(); 10825 state->dts_necbs = necbs; 10826 } 10827 10828 ecb->dte_state = state; 10829 10830 ASSERT(state->dts_ecbs[epid - 1] == NULL); 10831 dtrace_membar_producer(); 10832 state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb; 10833 10834 return (ecb); 10835} 10836 10837static void 10838dtrace_ecb_enable(dtrace_ecb_t *ecb) 10839{ 10840 dtrace_probe_t *probe = ecb->dte_probe; 10841 10842 ASSERT(MUTEX_HELD(&cpu_lock)); 10843 ASSERT(MUTEX_HELD(&dtrace_lock)); 10844 ASSERT(ecb->dte_next == NULL); 10845 10846 if (probe == NULL) { 10847 /* 10848 * This is the NULL probe -- there's nothing to do. 10849 */ 10850 return; 10851 } 10852 10853 if (probe->dtpr_ecb == NULL) { 10854 dtrace_provider_t *prov = probe->dtpr_provider; 10855 10856 /* 10857 * We're the first ECB on this probe. 10858 */ 10859 probe->dtpr_ecb = probe->dtpr_ecb_last = ecb; 10860 10861 if (ecb->dte_predicate != NULL) 10862 probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid; 10863 10864 prov->dtpv_pops.dtps_enable(prov->dtpv_arg, 10865 probe->dtpr_id, probe->dtpr_arg); 10866 } else { 10867 /* 10868 * This probe is already active. Swing the last pointer to 10869 * point to the new ECB, and issue a dtrace_sync() to assure 10870 * that all CPUs have seen the change. 10871 */ 10872 ASSERT(probe->dtpr_ecb_last != NULL); 10873 probe->dtpr_ecb_last->dte_next = ecb; 10874 probe->dtpr_ecb_last = ecb; 10875 probe->dtpr_predcache = 0; 10876 10877 dtrace_sync(); 10878 } 10879} 10880 10881static void 10882dtrace_ecb_resize(dtrace_ecb_t *ecb) 10883{ 10884 dtrace_action_t *act; 10885 uint32_t curneeded = UINT32_MAX; 10886 uint32_t aggbase = UINT32_MAX; 10887 10888 /* 10889 * If we record anything, we always record the dtrace_rechdr_t. (And 10890 * we always record it first.) 10891 */ 10892 ecb->dte_size = sizeof (dtrace_rechdr_t); 10893 ecb->dte_alignment = sizeof (dtrace_epid_t); 10894 10895 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 10896 dtrace_recdesc_t *rec = &act->dta_rec; 10897 ASSERT(rec->dtrd_size > 0 || rec->dtrd_alignment == 1); 10898 10899 ecb->dte_alignment = MAX(ecb->dte_alignment, 10900 rec->dtrd_alignment); 10901 10902 if (DTRACEACT_ISAGG(act->dta_kind)) { 10903 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 10904 10905 ASSERT(rec->dtrd_size != 0); 10906 ASSERT(agg->dtag_first != NULL); 10907 ASSERT(act->dta_prev->dta_intuple); 10908 ASSERT(aggbase != UINT32_MAX); 10909 ASSERT(curneeded != UINT32_MAX); 10910 10911 agg->dtag_base = aggbase; 10912 10913 curneeded = P2ROUNDUP(curneeded, rec->dtrd_alignment); 10914 rec->dtrd_offset = curneeded; 10915 curneeded += rec->dtrd_size; 10916 ecb->dte_needed = MAX(ecb->dte_needed, curneeded); 10917 10918 aggbase = UINT32_MAX; 10919 curneeded = UINT32_MAX; 10920 } else if (act->dta_intuple) { 10921 if (curneeded == UINT32_MAX) { 10922 /* 10923 * This is the first record in a tuple. Align 10924 * curneeded to be at offset 4 in an 8-byte 10925 * aligned block. 10926 */ 10927 ASSERT(act->dta_prev == NULL || 10928 !act->dta_prev->dta_intuple); 10929 ASSERT3U(aggbase, ==, UINT32_MAX); 10930 curneeded = P2PHASEUP(ecb->dte_size, 10931 sizeof (uint64_t), sizeof (dtrace_aggid_t)); 10932 10933 aggbase = curneeded - sizeof (dtrace_aggid_t); 10934 ASSERT(IS_P2ALIGNED(aggbase, 10935 sizeof (uint64_t))); 10936 } 10937 curneeded = P2ROUNDUP(curneeded, rec->dtrd_alignment); 10938 rec->dtrd_offset = curneeded; 10939 curneeded += rec->dtrd_size; 10940 } else { 10941 /* tuples must be followed by an aggregation */ 10942 ASSERT(act->dta_prev == NULL || 10943 !act->dta_prev->dta_intuple); 10944 10945 ecb->dte_size = P2ROUNDUP(ecb->dte_size, 10946 rec->dtrd_alignment); 10947 rec->dtrd_offset = ecb->dte_size; 10948 ecb->dte_size += rec->dtrd_size; 10949 ecb->dte_needed = MAX(ecb->dte_needed, ecb->dte_size); 10950 } 10951 } 10952 10953 if ((act = ecb->dte_action) != NULL && 10954 !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) && 10955 ecb->dte_size == sizeof (dtrace_rechdr_t)) { 10956 /* 10957 * If the size is still sizeof (dtrace_rechdr_t), then all 10958 * actions store no data; set the size to 0. 10959 */ 10960 ecb->dte_size = 0; 10961 } 10962 10963 ecb->dte_size = P2ROUNDUP(ecb->dte_size, sizeof (dtrace_epid_t)); 10964 ecb->dte_needed = P2ROUNDUP(ecb->dte_needed, (sizeof (dtrace_epid_t))); 10965 ecb->dte_state->dts_needed = MAX(ecb->dte_state->dts_needed, 10966 ecb->dte_needed); 10967} 10968 10969static dtrace_action_t * 10970dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 10971{ 10972 dtrace_aggregation_t *agg; 10973 size_t size = sizeof (uint64_t); 10974 int ntuple = desc->dtad_ntuple; 10975 dtrace_action_t *act; 10976 dtrace_recdesc_t *frec; 10977 dtrace_aggid_t aggid; 10978 dtrace_state_t *state = ecb->dte_state; 10979 10980 agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP); 10981 agg->dtag_ecb = ecb; 10982 10983 ASSERT(DTRACEACT_ISAGG(desc->dtad_kind)); 10984 10985 switch (desc->dtad_kind) { 10986 case DTRACEAGG_MIN: 10987 agg->dtag_initial = INT64_MAX; 10988 agg->dtag_aggregate = dtrace_aggregate_min; 10989 break; 10990 10991 case DTRACEAGG_MAX: 10992 agg->dtag_initial = INT64_MIN; 10993 agg->dtag_aggregate = dtrace_aggregate_max; 10994 break; 10995 10996 case DTRACEAGG_COUNT: 10997 agg->dtag_aggregate = dtrace_aggregate_count; 10998 break; 10999 11000 case DTRACEAGG_QUANTIZE: 11001 agg->dtag_aggregate = dtrace_aggregate_quantize; 11002 size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) * 11003 sizeof (uint64_t); 11004 break; 11005 11006 case DTRACEAGG_LQUANTIZE: { 11007 uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg); 11008 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg); 11009 11010 agg->dtag_initial = desc->dtad_arg; 11011 agg->dtag_aggregate = dtrace_aggregate_lquantize; 11012 11013 if (step == 0 || levels == 0) 11014 goto err; 11015 11016 size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t); 11017 break; 11018 } 11019 11020 case DTRACEAGG_LLQUANTIZE: { 11021 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(desc->dtad_arg); 11022 uint16_t low = DTRACE_LLQUANTIZE_LOW(desc->dtad_arg); 11023 uint16_t high = DTRACE_LLQUANTIZE_HIGH(desc->dtad_arg); 11024 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(desc->dtad_arg); 11025 int64_t v; 11026 11027 agg->dtag_initial = desc->dtad_arg; 11028 agg->dtag_aggregate = dtrace_aggregate_llquantize; 11029 11030 if (factor < 2 || low >= high || nsteps < factor) 11031 goto err; 11032 11033 /* 11034 * Now check that the number of steps evenly divides a power 11035 * of the factor. (This assures both integer bucket size and 11036 * linearity within each magnitude.) 11037 */ 11038 for (v = factor; v < nsteps; v *= factor) 11039 continue; 11040 11041 if ((v % nsteps) || (nsteps % factor)) 11042 goto err; 11043 11044 size = (dtrace_aggregate_llquantize_bucket(factor, 11045 low, high, nsteps, INT64_MAX) + 2) * sizeof (uint64_t); 11046 break; 11047 } 11048 11049 case DTRACEAGG_AVG: 11050 agg->dtag_aggregate = dtrace_aggregate_avg; 11051 size = sizeof (uint64_t) * 2; 11052 break; 11053 11054 case DTRACEAGG_STDDEV: 11055 agg->dtag_aggregate = dtrace_aggregate_stddev; 11056 size = sizeof (uint64_t) * 4; 11057 break; 11058 11059 case DTRACEAGG_SUM: 11060 agg->dtag_aggregate = dtrace_aggregate_sum; 11061 break; 11062 11063 default: 11064 goto err; 11065 } 11066 11067 agg->dtag_action.dta_rec.dtrd_size = size; 11068 11069 if (ntuple == 0) 11070 goto err; 11071 11072 /* 11073 * We must make sure that we have enough actions for the n-tuple. 11074 */ 11075 for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) { 11076 if (DTRACEACT_ISAGG(act->dta_kind)) 11077 break; 11078 11079 if (--ntuple == 0) { 11080 /* 11081 * This is the action with which our n-tuple begins. 11082 */ 11083 agg->dtag_first = act; 11084 goto success; 11085 } 11086 } 11087 11088 /* 11089 * This n-tuple is short by ntuple elements. Return failure. 11090 */ 11091 ASSERT(ntuple != 0); 11092err: 11093 kmem_free(agg, sizeof (dtrace_aggregation_t)); 11094 return (NULL); 11095 11096success: 11097 /* 11098 * If the last action in the tuple has a size of zero, it's actually 11099 * an expression argument for the aggregating action. 11100 */ 11101 ASSERT(ecb->dte_action_last != NULL); 11102 act = ecb->dte_action_last; 11103 11104 if (act->dta_kind == DTRACEACT_DIFEXPR) { 11105 ASSERT(act->dta_difo != NULL); 11106 11107 if (act->dta_difo->dtdo_rtype.dtdt_size == 0) 11108 agg->dtag_hasarg = 1; 11109 } 11110 11111 /* 11112 * We need to allocate an id for this aggregation. 11113 */ 11114#if defined(sun) 11115 aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1, 11116 VM_BESTFIT | VM_SLEEP); 11117#else 11118 aggid = alloc_unr(state->dts_aggid_arena); 11119#endif 11120 11121 if (aggid - 1 >= state->dts_naggregations) { 11122 dtrace_aggregation_t **oaggs = state->dts_aggregations; 11123 dtrace_aggregation_t **aggs; 11124 int naggs = state->dts_naggregations << 1; 11125 int onaggs = state->dts_naggregations; 11126 11127 ASSERT(aggid == state->dts_naggregations + 1); 11128 11129 if (naggs == 0) { 11130 ASSERT(oaggs == NULL); 11131 naggs = 1; 11132 } 11133 11134 aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP); 11135 11136 if (oaggs != NULL) { 11137 bcopy(oaggs, aggs, onaggs * sizeof (*aggs)); 11138 kmem_free(oaggs, onaggs * sizeof (*aggs)); 11139 } 11140 11141 state->dts_aggregations = aggs; 11142 state->dts_naggregations = naggs; 11143 } 11144 11145 ASSERT(state->dts_aggregations[aggid - 1] == NULL); 11146 state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg; 11147 11148 frec = &agg->dtag_first->dta_rec; 11149 if (frec->dtrd_alignment < sizeof (dtrace_aggid_t)) 11150 frec->dtrd_alignment = sizeof (dtrace_aggid_t); 11151 11152 for (act = agg->dtag_first; act != NULL; act = act->dta_next) { 11153 ASSERT(!act->dta_intuple); 11154 act->dta_intuple = 1; 11155 } 11156 11157 return (&agg->dtag_action); 11158} 11159 11160static void 11161dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act) 11162{ 11163 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 11164 dtrace_state_t *state = ecb->dte_state; 11165 dtrace_aggid_t aggid = agg->dtag_id; 11166 11167 ASSERT(DTRACEACT_ISAGG(act->dta_kind)); 11168#if defined(sun) 11169 vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1); 11170#else 11171 free_unr(state->dts_aggid_arena, aggid); 11172#endif 11173 11174 ASSERT(state->dts_aggregations[aggid - 1] == agg); 11175 state->dts_aggregations[aggid - 1] = NULL; 11176 11177 kmem_free(agg, sizeof (dtrace_aggregation_t)); 11178} 11179 11180static int 11181dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 11182{ 11183 dtrace_action_t *action, *last; 11184 dtrace_difo_t *dp = desc->dtad_difo; 11185 uint32_t size = 0, align = sizeof (uint8_t), mask; 11186 uint16_t format = 0; 11187 dtrace_recdesc_t *rec; 11188 dtrace_state_t *state = ecb->dte_state; 11189 dtrace_optval_t *opt = state->dts_options, nframes = 0, strsize; 11190 uint64_t arg = desc->dtad_arg; 11191 11192 ASSERT(MUTEX_HELD(&dtrace_lock)); 11193 ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1); 11194 11195 if (DTRACEACT_ISAGG(desc->dtad_kind)) { 11196 /* 11197 * If this is an aggregating action, there must be neither 11198 * a speculate nor a commit on the action chain. 11199 */ 11200 dtrace_action_t *act; 11201 11202 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 11203 if (act->dta_kind == DTRACEACT_COMMIT) 11204 return (EINVAL); 11205 11206 if (act->dta_kind == DTRACEACT_SPECULATE) 11207 return (EINVAL); 11208 } 11209 11210 action = dtrace_ecb_aggregation_create(ecb, desc); 11211 11212 if (action == NULL) 11213 return (EINVAL); 11214 } else { 11215 if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) || 11216 (desc->dtad_kind == DTRACEACT_DIFEXPR && 11217 dp != NULL && dp->dtdo_destructive)) { 11218 state->dts_destructive = 1; 11219 } 11220 11221 switch (desc->dtad_kind) { 11222 case DTRACEACT_PRINTF: 11223 case DTRACEACT_PRINTA: 11224 case DTRACEACT_SYSTEM: 11225 case DTRACEACT_FREOPEN: 11226 case DTRACEACT_DIFEXPR: 11227 /* 11228 * We know that our arg is a string -- turn it into a 11229 * format. 11230 */ 11231 if (arg == 0) { 11232 ASSERT(desc->dtad_kind == DTRACEACT_PRINTA || 11233 desc->dtad_kind == DTRACEACT_DIFEXPR); 11234 format = 0; 11235 } else { 11236 ASSERT(arg != 0); 11237#if defined(sun) 11238 ASSERT(arg > KERNELBASE); 11239#endif 11240 format = dtrace_format_add(state, 11241 (char *)(uintptr_t)arg); 11242 } 11243 11244 /*FALLTHROUGH*/ 11245 case DTRACEACT_LIBACT: 11246 case DTRACEACT_TRACEMEM: 11247 case DTRACEACT_TRACEMEM_DYNSIZE: 11248 if (dp == NULL) 11249 return (EINVAL); 11250 11251 if ((size = dp->dtdo_rtype.dtdt_size) != 0) 11252 break; 11253 11254 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) { 11255 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 11256 return (EINVAL); 11257 11258 size = opt[DTRACEOPT_STRSIZE]; 11259 } 11260 11261 break; 11262 11263 case DTRACEACT_STACK: 11264 if ((nframes = arg) == 0) { 11265 nframes = opt[DTRACEOPT_STACKFRAMES]; 11266 ASSERT(nframes > 0); 11267 arg = nframes; 11268 } 11269 11270 size = nframes * sizeof (pc_t); 11271 break; 11272 11273 case DTRACEACT_JSTACK: 11274 if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0) 11275 strsize = opt[DTRACEOPT_JSTACKSTRSIZE]; 11276 11277 if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) 11278 nframes = opt[DTRACEOPT_JSTACKFRAMES]; 11279 11280 arg = DTRACE_USTACK_ARG(nframes, strsize); 11281 11282 /*FALLTHROUGH*/ 11283 case DTRACEACT_USTACK: 11284 if (desc->dtad_kind != DTRACEACT_JSTACK && 11285 (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) { 11286 strsize = DTRACE_USTACK_STRSIZE(arg); 11287 nframes = opt[DTRACEOPT_USTACKFRAMES]; 11288 ASSERT(nframes > 0); 11289 arg = DTRACE_USTACK_ARG(nframes, strsize); 11290 } 11291 11292 /* 11293 * Save a slot for the pid. 11294 */ 11295 size = (nframes + 1) * sizeof (uint64_t); 11296 size += DTRACE_USTACK_STRSIZE(arg); 11297 size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t))); 11298 11299 break; 11300 11301 case DTRACEACT_SYM: 11302 case DTRACEACT_MOD: 11303 if (dp == NULL || ((size = dp->dtdo_rtype.dtdt_size) != 11304 sizeof (uint64_t)) || 11305 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 11306 return (EINVAL); 11307 break; 11308 11309 case DTRACEACT_USYM: 11310 case DTRACEACT_UMOD: 11311 case DTRACEACT_UADDR: 11312 if (dp == NULL || 11313 (dp->dtdo_rtype.dtdt_size != sizeof (uint64_t)) || 11314 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 11315 return (EINVAL); 11316 11317 /* 11318 * We have a slot for the pid, plus a slot for the 11319 * argument. To keep things simple (aligned with 11320 * bitness-neutral sizing), we store each as a 64-bit 11321 * quantity. 11322 */ 11323 size = 2 * sizeof (uint64_t); 11324 break; 11325 11326 case DTRACEACT_STOP: 11327 case DTRACEACT_BREAKPOINT: 11328 case DTRACEACT_PANIC: 11329 break; 11330 11331 case DTRACEACT_CHILL: 11332 case DTRACEACT_DISCARD: 11333 case DTRACEACT_RAISE: 11334 if (dp == NULL) 11335 return (EINVAL); 11336 break; 11337 11338 case DTRACEACT_EXIT: 11339 if (dp == NULL || 11340 (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) || 11341 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 11342 return (EINVAL); 11343 break; 11344 11345 case DTRACEACT_SPECULATE: 11346 if (ecb->dte_size > sizeof (dtrace_rechdr_t)) 11347 return (EINVAL); 11348 11349 if (dp == NULL) 11350 return (EINVAL); 11351 11352 state->dts_speculates = 1; 11353 break; 11354 11355 case DTRACEACT_PRINTM: 11356 size = dp->dtdo_rtype.dtdt_size; 11357 break; 11358 11359 case DTRACEACT_PRINTT: 11360 size = dp->dtdo_rtype.dtdt_size; 11361 break; 11362 11363 case DTRACEACT_COMMIT: { 11364 dtrace_action_t *act = ecb->dte_action; 11365 11366 for (; act != NULL; act = act->dta_next) { 11367 if (act->dta_kind == DTRACEACT_COMMIT) 11368 return (EINVAL); 11369 } 11370 11371 if (dp == NULL) 11372 return (EINVAL); 11373 break; 11374 } 11375 11376 default: 11377 return (EINVAL); 11378 } 11379 11380 if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) { 11381 /* 11382 * If this is a data-storing action or a speculate, 11383 * we must be sure that there isn't a commit on the 11384 * action chain. 11385 */ 11386 dtrace_action_t *act = ecb->dte_action; 11387 11388 for (; act != NULL; act = act->dta_next) { 11389 if (act->dta_kind == DTRACEACT_COMMIT) 11390 return (EINVAL); 11391 } 11392 } 11393 11394 action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP); 11395 action->dta_rec.dtrd_size = size; 11396 } 11397 11398 action->dta_refcnt = 1; 11399 rec = &action->dta_rec; 11400 size = rec->dtrd_size; 11401 11402 for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) { 11403 if (!(size & mask)) { 11404 align = mask + 1; 11405 break; 11406 } 11407 } 11408 11409 action->dta_kind = desc->dtad_kind; 11410 11411 if ((action->dta_difo = dp) != NULL) 11412 dtrace_difo_hold(dp); 11413 11414 rec->dtrd_action = action->dta_kind; 11415 rec->dtrd_arg = arg; 11416 rec->dtrd_uarg = desc->dtad_uarg; 11417 rec->dtrd_alignment = (uint16_t)align; 11418 rec->dtrd_format = format; 11419 11420 if ((last = ecb->dte_action_last) != NULL) { 11421 ASSERT(ecb->dte_action != NULL); 11422 action->dta_prev = last; 11423 last->dta_next = action; 11424 } else { 11425 ASSERT(ecb->dte_action == NULL); 11426 ecb->dte_action = action; 11427 } 11428 11429 ecb->dte_action_last = action; 11430 11431 return (0); 11432} 11433 11434static void 11435dtrace_ecb_action_remove(dtrace_ecb_t *ecb) 11436{ 11437 dtrace_action_t *act = ecb->dte_action, *next; 11438 dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate; 11439 dtrace_difo_t *dp; 11440 uint16_t format; 11441 11442 if (act != NULL && act->dta_refcnt > 1) { 11443 ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1); 11444 act->dta_refcnt--; 11445 } else { 11446 for (; act != NULL; act = next) { 11447 next = act->dta_next; 11448 ASSERT(next != NULL || act == ecb->dte_action_last); 11449 ASSERT(act->dta_refcnt == 1); 11450 11451 if ((format = act->dta_rec.dtrd_format) != 0) 11452 dtrace_format_remove(ecb->dte_state, format); 11453 11454 if ((dp = act->dta_difo) != NULL) 11455 dtrace_difo_release(dp, vstate); 11456 11457 if (DTRACEACT_ISAGG(act->dta_kind)) { 11458 dtrace_ecb_aggregation_destroy(ecb, act); 11459 } else { 11460 kmem_free(act, sizeof (dtrace_action_t)); 11461 } 11462 } 11463 } 11464 11465 ecb->dte_action = NULL; 11466 ecb->dte_action_last = NULL; 11467 ecb->dte_size = 0; 11468} 11469 11470static void 11471dtrace_ecb_disable(dtrace_ecb_t *ecb) 11472{ 11473 /* 11474 * We disable the ECB by removing it from its probe. 11475 */ 11476 dtrace_ecb_t *pecb, *prev = NULL; 11477 dtrace_probe_t *probe = ecb->dte_probe; 11478 11479 ASSERT(MUTEX_HELD(&dtrace_lock)); 11480 11481 if (probe == NULL) { 11482 /* 11483 * This is the NULL probe; there is nothing to disable. 11484 */ 11485 return; 11486 } 11487 11488 for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) { 11489 if (pecb == ecb) 11490 break; 11491 prev = pecb; 11492 } 11493 11494 ASSERT(pecb != NULL); 11495 11496 if (prev == NULL) { 11497 probe->dtpr_ecb = ecb->dte_next; 11498 } else { 11499 prev->dte_next = ecb->dte_next; 11500 } 11501 11502 if (ecb == probe->dtpr_ecb_last) { 11503 ASSERT(ecb->dte_next == NULL); 11504 probe->dtpr_ecb_last = prev; 11505 } 11506 11507 /* 11508 * The ECB has been disconnected from the probe; now sync to assure 11509 * that all CPUs have seen the change before returning. 11510 */ 11511 dtrace_sync(); 11512 11513 if (probe->dtpr_ecb == NULL) { 11514 /* 11515 * That was the last ECB on the probe; clear the predicate 11516 * cache ID for the probe, disable it and sync one more time 11517 * to assure that we'll never hit it again. 11518 */ 11519 dtrace_provider_t *prov = probe->dtpr_provider; 11520 11521 ASSERT(ecb->dte_next == NULL); 11522 ASSERT(probe->dtpr_ecb_last == NULL); 11523 probe->dtpr_predcache = DTRACE_CACHEIDNONE; 11524 prov->dtpv_pops.dtps_disable(prov->dtpv_arg, 11525 probe->dtpr_id, probe->dtpr_arg); 11526 dtrace_sync(); 11527 } else { 11528 /* 11529 * There is at least one ECB remaining on the probe. If there 11530 * is _exactly_ one, set the probe's predicate cache ID to be 11531 * the predicate cache ID of the remaining ECB. 11532 */ 11533 ASSERT(probe->dtpr_ecb_last != NULL); 11534 ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE); 11535 11536 if (probe->dtpr_ecb == probe->dtpr_ecb_last) { 11537 dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate; 11538 11539 ASSERT(probe->dtpr_ecb->dte_next == NULL); 11540 11541 if (p != NULL) 11542 probe->dtpr_predcache = p->dtp_cacheid; 11543 } 11544 11545 ecb->dte_next = NULL; 11546 } 11547} 11548 11549static void 11550dtrace_ecb_destroy(dtrace_ecb_t *ecb) 11551{ 11552 dtrace_state_t *state = ecb->dte_state; 11553 dtrace_vstate_t *vstate = &state->dts_vstate; 11554 dtrace_predicate_t *pred; 11555 dtrace_epid_t epid = ecb->dte_epid; 11556 11557 ASSERT(MUTEX_HELD(&dtrace_lock)); 11558 ASSERT(ecb->dte_next == NULL); 11559 ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb); 11560 11561 if ((pred = ecb->dte_predicate) != NULL) 11562 dtrace_predicate_release(pred, vstate); 11563 11564 dtrace_ecb_action_remove(ecb); 11565 11566 ASSERT(state->dts_ecbs[epid - 1] == ecb); 11567 state->dts_ecbs[epid - 1] = NULL; 11568 11569 kmem_free(ecb, sizeof (dtrace_ecb_t)); 11570} 11571 11572static dtrace_ecb_t * 11573dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe, 11574 dtrace_enabling_t *enab) 11575{ 11576 dtrace_ecb_t *ecb; 11577 dtrace_predicate_t *pred; 11578 dtrace_actdesc_t *act; 11579 dtrace_provider_t *prov; 11580 dtrace_ecbdesc_t *desc = enab->dten_current; 11581 11582 ASSERT(MUTEX_HELD(&dtrace_lock)); 11583 ASSERT(state != NULL); 11584 11585 ecb = dtrace_ecb_add(state, probe); 11586 ecb->dte_uarg = desc->dted_uarg; 11587 11588 if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) { 11589 dtrace_predicate_hold(pred); 11590 ecb->dte_predicate = pred; 11591 } 11592 11593 if (probe != NULL) { 11594 /* 11595 * If the provider shows more leg than the consumer is old 11596 * enough to see, we need to enable the appropriate implicit 11597 * predicate bits to prevent the ecb from activating at 11598 * revealing times. 11599 * 11600 * Providers specifying DTRACE_PRIV_USER at register time 11601 * are stating that they need the /proc-style privilege 11602 * model to be enforced, and this is what DTRACE_COND_OWNER 11603 * and DTRACE_COND_ZONEOWNER will then do at probe time. 11604 */ 11605 prov = probe->dtpr_provider; 11606 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) && 11607 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 11608 ecb->dte_cond |= DTRACE_COND_OWNER; 11609 11610 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLZONE) && 11611 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 11612 ecb->dte_cond |= DTRACE_COND_ZONEOWNER; 11613 11614 /* 11615 * If the provider shows us kernel innards and the user 11616 * is lacking sufficient privilege, enable the 11617 * DTRACE_COND_USERMODE implicit predicate. 11618 */ 11619 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) && 11620 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL)) 11621 ecb->dte_cond |= DTRACE_COND_USERMODE; 11622 } 11623 11624 if (dtrace_ecb_create_cache != NULL) { 11625 /* 11626 * If we have a cached ecb, we'll use its action list instead 11627 * of creating our own (saving both time and space). 11628 */ 11629 dtrace_ecb_t *cached = dtrace_ecb_create_cache; 11630 dtrace_action_t *act = cached->dte_action; 11631 11632 if (act != NULL) { 11633 ASSERT(act->dta_refcnt > 0); 11634 act->dta_refcnt++; 11635 ecb->dte_action = act; 11636 ecb->dte_action_last = cached->dte_action_last; 11637 ecb->dte_needed = cached->dte_needed; 11638 ecb->dte_size = cached->dte_size; 11639 ecb->dte_alignment = cached->dte_alignment; 11640 } 11641 11642 return (ecb); 11643 } 11644 11645 for (act = desc->dted_action; act != NULL; act = act->dtad_next) { 11646 if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) { 11647 dtrace_ecb_destroy(ecb); 11648 return (NULL); 11649 } 11650 } 11651 11652 dtrace_ecb_resize(ecb); 11653 11654 return (dtrace_ecb_create_cache = ecb); 11655} 11656 11657static int 11658dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg) 11659{ 11660 dtrace_ecb_t *ecb; 11661 dtrace_enabling_t *enab = arg; 11662 dtrace_state_t *state = enab->dten_vstate->dtvs_state; 11663 11664 ASSERT(state != NULL); 11665 11666 if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) { 11667 /* 11668 * This probe was created in a generation for which this 11669 * enabling has previously created ECBs; we don't want to 11670 * enable it again, so just kick out. 11671 */ 11672 return (DTRACE_MATCH_NEXT); 11673 } 11674 11675 if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL) 11676 return (DTRACE_MATCH_DONE); 11677 11678 dtrace_ecb_enable(ecb); 11679 return (DTRACE_MATCH_NEXT); 11680} 11681 11682static dtrace_ecb_t * 11683dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id) 11684{ 11685 dtrace_ecb_t *ecb; 11686 11687 ASSERT(MUTEX_HELD(&dtrace_lock)); 11688 11689 if (id == 0 || id > state->dts_necbs) 11690 return (NULL); 11691 11692 ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL); 11693 ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id); 11694 11695 return (state->dts_ecbs[id - 1]); 11696} 11697 11698static dtrace_aggregation_t * 11699dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id) 11700{ 11701 dtrace_aggregation_t *agg; 11702 11703 ASSERT(MUTEX_HELD(&dtrace_lock)); 11704 11705 if (id == 0 || id > state->dts_naggregations) 11706 return (NULL); 11707 11708 ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL); 11709 ASSERT((agg = state->dts_aggregations[id - 1]) == NULL || 11710 agg->dtag_id == id); 11711 11712 return (state->dts_aggregations[id - 1]); 11713} 11714 11715/* 11716 * DTrace Buffer Functions 11717 * 11718 * The following functions manipulate DTrace buffers. Most of these functions 11719 * are called in the context of establishing or processing consumer state; 11720 * exceptions are explicitly noted. 11721 */ 11722 11723/* 11724 * Note: called from cross call context. This function switches the two 11725 * buffers on a given CPU. The atomicity of this operation is assured by 11726 * disabling interrupts while the actual switch takes place; the disabling of 11727 * interrupts serializes the execution with any execution of dtrace_probe() on 11728 * the same CPU. 11729 */ 11730static void 11731dtrace_buffer_switch(dtrace_buffer_t *buf) 11732{ 11733 caddr_t tomax = buf->dtb_tomax; 11734 caddr_t xamot = buf->dtb_xamot; 11735 dtrace_icookie_t cookie; 11736 hrtime_t now; 11737 11738 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 11739 ASSERT(!(buf->dtb_flags & DTRACEBUF_RING)); 11740 11741 cookie = dtrace_interrupt_disable(); 11742 now = dtrace_gethrtime(); 11743 buf->dtb_tomax = xamot; 11744 buf->dtb_xamot = tomax; 11745 buf->dtb_xamot_drops = buf->dtb_drops; 11746 buf->dtb_xamot_offset = buf->dtb_offset; 11747 buf->dtb_xamot_errors = buf->dtb_errors; 11748 buf->dtb_xamot_flags = buf->dtb_flags; 11749 buf->dtb_offset = 0; 11750 buf->dtb_drops = 0; 11751 buf->dtb_errors = 0; 11752 buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED); 11753 buf->dtb_interval = now - buf->dtb_switched; 11754 buf->dtb_switched = now; 11755 dtrace_interrupt_enable(cookie); 11756} 11757 11758/* 11759 * Note: called from cross call context. This function activates a buffer 11760 * on a CPU. As with dtrace_buffer_switch(), the atomicity of the operation 11761 * is guaranteed by the disabling of interrupts. 11762 */ 11763static void 11764dtrace_buffer_activate(dtrace_state_t *state) 11765{ 11766 dtrace_buffer_t *buf; 11767 dtrace_icookie_t cookie = dtrace_interrupt_disable(); 11768 11769 buf = &state->dts_buffer[curcpu]; 11770 11771 if (buf->dtb_tomax != NULL) { 11772 /* 11773 * We might like to assert that the buffer is marked inactive, 11774 * but this isn't necessarily true: the buffer for the CPU 11775 * that processes the BEGIN probe has its buffer activated 11776 * manually. In this case, we take the (harmless) action 11777 * re-clearing the bit INACTIVE bit. 11778 */ 11779 buf->dtb_flags &= ~DTRACEBUF_INACTIVE; 11780 } 11781 11782 dtrace_interrupt_enable(cookie); 11783} 11784 11785static int 11786dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags, 11787 processorid_t cpu, int *factor) 11788{ 11789#if defined(sun) 11790 cpu_t *cp; 11791#endif 11792 dtrace_buffer_t *buf; 11793 int allocated = 0, desired = 0; 11794 11795#if defined(sun) 11796 ASSERT(MUTEX_HELD(&cpu_lock)); 11797 ASSERT(MUTEX_HELD(&dtrace_lock)); 11798 11799 *factor = 1; 11800 11801 if (size > dtrace_nonroot_maxsize && 11802 !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE)) 11803 return (EFBIG); 11804 11805 cp = cpu_list; 11806 11807 do { 11808 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 11809 continue; 11810 11811 buf = &bufs[cp->cpu_id]; 11812 11813 /* 11814 * If there is already a buffer allocated for this CPU, it 11815 * is only possible that this is a DR event. In this case, 11816 */ 11817 if (buf->dtb_tomax != NULL) { 11818 ASSERT(buf->dtb_size == size); 11819 continue; 11820 } 11821 11822 ASSERT(buf->dtb_xamot == NULL); 11823 11824 if ((buf->dtb_tomax = kmem_zalloc(size, 11825 KM_NOSLEEP | KM_NORMALPRI)) == NULL) 11826 goto err; 11827 11828 buf->dtb_size = size; 11829 buf->dtb_flags = flags; 11830 buf->dtb_offset = 0; 11831 buf->dtb_drops = 0; 11832 11833 if (flags & DTRACEBUF_NOSWITCH) 11834 continue; 11835 11836 if ((buf->dtb_xamot = kmem_zalloc(size, 11837 KM_NOSLEEP | KM_NORMALPRI)) == NULL) 11838 goto err; 11839 } while ((cp = cp->cpu_next) != cpu_list); 11840 11841 return (0); 11842 11843err: 11844 cp = cpu_list; 11845 11846 do { 11847 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 11848 continue; 11849 11850 buf = &bufs[cp->cpu_id]; 11851 desired += 2; 11852 11853 if (buf->dtb_xamot != NULL) { 11854 ASSERT(buf->dtb_tomax != NULL); 11855 ASSERT(buf->dtb_size == size); 11856 kmem_free(buf->dtb_xamot, size); 11857 allocated++; 11858 } 11859 11860 if (buf->dtb_tomax != NULL) { 11861 ASSERT(buf->dtb_size == size); 11862 kmem_free(buf->dtb_tomax, size); 11863 allocated++; 11864 } 11865 11866 buf->dtb_tomax = NULL; 11867 buf->dtb_xamot = NULL; 11868 buf->dtb_size = 0; 11869 } while ((cp = cp->cpu_next) != cpu_list); 11870#else 11871 int i; 11872 11873 *factor = 1; 11874#if defined(__amd64__) || defined(__mips__) || defined(__powerpc__) 11875 /* 11876 * FreeBSD isn't good at limiting the amount of memory we 11877 * ask to malloc, so let's place a limit here before trying 11878 * to do something that might well end in tears at bedtime. 11879 */ 11880 if (size > physmem * PAGE_SIZE / (128 * (mp_maxid + 1))) 11881 return (ENOMEM); 11882#endif 11883 11884 ASSERT(MUTEX_HELD(&dtrace_lock)); 11885 CPU_FOREACH(i) { 11886 if (cpu != DTRACE_CPUALL && cpu != i) 11887 continue; 11888 11889 buf = &bufs[i]; 11890 11891 /* 11892 * If there is already a buffer allocated for this CPU, it 11893 * is only possible that this is a DR event. In this case, 11894 * the buffer size must match our specified size. 11895 */ 11896 if (buf->dtb_tomax != NULL) { 11897 ASSERT(buf->dtb_size == size); 11898 continue; 11899 } 11900 11901 ASSERT(buf->dtb_xamot == NULL); 11902 11903 if ((buf->dtb_tomax = kmem_zalloc(size, 11904 KM_NOSLEEP | KM_NORMALPRI)) == NULL) 11905 goto err; 11906 11907 buf->dtb_size = size; 11908 buf->dtb_flags = flags; 11909 buf->dtb_offset = 0; 11910 buf->dtb_drops = 0; 11911 11912 if (flags & DTRACEBUF_NOSWITCH) 11913 continue; 11914 11915 if ((buf->dtb_xamot = kmem_zalloc(size, 11916 KM_NOSLEEP | KM_NORMALPRI)) == NULL) 11917 goto err; 11918 } 11919 11920 return (0); 11921 11922err: 11923 /* 11924 * Error allocating memory, so free the buffers that were 11925 * allocated before the failed allocation. 11926 */ 11927 CPU_FOREACH(i) { 11928 if (cpu != DTRACE_CPUALL && cpu != i) 11929 continue; 11930 11931 buf = &bufs[i]; 11932 desired += 2; 11933 11934 if (buf->dtb_xamot != NULL) { 11935 ASSERT(buf->dtb_tomax != NULL); 11936 ASSERT(buf->dtb_size == size); 11937 kmem_free(buf->dtb_xamot, size); 11938 allocated++; 11939 } 11940 11941 if (buf->dtb_tomax != NULL) { 11942 ASSERT(buf->dtb_size == size); 11943 kmem_free(buf->dtb_tomax, size); 11944 allocated++; 11945 } 11946 11947 buf->dtb_tomax = NULL; 11948 buf->dtb_xamot = NULL; 11949 buf->dtb_size = 0; 11950 11951 } 11952#endif 11953 *factor = desired / (allocated > 0 ? allocated : 1); 11954 11955 return (ENOMEM); 11956} 11957 11958/* 11959 * Note: called from probe context. This function just increments the drop 11960 * count on a buffer. It has been made a function to allow for the 11961 * possibility of understanding the source of mysterious drop counts. (A 11962 * problem for which one may be particularly disappointed that DTrace cannot 11963 * be used to understand DTrace.) 11964 */ 11965static void 11966dtrace_buffer_drop(dtrace_buffer_t *buf) 11967{ 11968 buf->dtb_drops++; 11969} 11970 11971/* 11972 * Note: called from probe context. This function is called to reserve space 11973 * in a buffer. If mstate is non-NULL, sets the scratch base and size in the 11974 * mstate. Returns the new offset in the buffer, or a negative value if an 11975 * error has occurred. 11976 */ 11977static intptr_t 11978dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align, 11979 dtrace_state_t *state, dtrace_mstate_t *mstate) 11980{ 11981 intptr_t offs = buf->dtb_offset, soffs; 11982 intptr_t woffs; 11983 caddr_t tomax; 11984 size_t total; 11985 11986 if (buf->dtb_flags & DTRACEBUF_INACTIVE) 11987 return (-1); 11988 11989 if ((tomax = buf->dtb_tomax) == NULL) { 11990 dtrace_buffer_drop(buf); 11991 return (-1); 11992 } 11993 11994 if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) { 11995 while (offs & (align - 1)) { 11996 /* 11997 * Assert that our alignment is off by a number which 11998 * is itself sizeof (uint32_t) aligned. 11999 */ 12000 ASSERT(!((align - (offs & (align - 1))) & 12001 (sizeof (uint32_t) - 1))); 12002 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 12003 offs += sizeof (uint32_t); 12004 } 12005 12006 if ((soffs = offs + needed) > buf->dtb_size) { 12007 dtrace_buffer_drop(buf); 12008 return (-1); 12009 } 12010 12011 if (mstate == NULL) 12012 return (offs); 12013 12014 mstate->dtms_scratch_base = (uintptr_t)tomax + soffs; 12015 mstate->dtms_scratch_size = buf->dtb_size - soffs; 12016 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 12017 12018 return (offs); 12019 } 12020 12021 if (buf->dtb_flags & DTRACEBUF_FILL) { 12022 if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN && 12023 (buf->dtb_flags & DTRACEBUF_FULL)) 12024 return (-1); 12025 goto out; 12026 } 12027 12028 total = needed + (offs & (align - 1)); 12029 12030 /* 12031 * For a ring buffer, life is quite a bit more complicated. Before 12032 * we can store any padding, we need to adjust our wrapping offset. 12033 * (If we've never before wrapped or we're not about to, no adjustment 12034 * is required.) 12035 */ 12036 if ((buf->dtb_flags & DTRACEBUF_WRAPPED) || 12037 offs + total > buf->dtb_size) { 12038 woffs = buf->dtb_xamot_offset; 12039 12040 if (offs + total > buf->dtb_size) { 12041 /* 12042 * We can't fit in the end of the buffer. First, a 12043 * sanity check that we can fit in the buffer at all. 12044 */ 12045 if (total > buf->dtb_size) { 12046 dtrace_buffer_drop(buf); 12047 return (-1); 12048 } 12049 12050 /* 12051 * We're going to be storing at the top of the buffer, 12052 * so now we need to deal with the wrapped offset. We 12053 * only reset our wrapped offset to 0 if it is 12054 * currently greater than the current offset. If it 12055 * is less than the current offset, it is because a 12056 * previous allocation induced a wrap -- but the 12057 * allocation didn't subsequently take the space due 12058 * to an error or false predicate evaluation. In this 12059 * case, we'll just leave the wrapped offset alone: if 12060 * the wrapped offset hasn't been advanced far enough 12061 * for this allocation, it will be adjusted in the 12062 * lower loop. 12063 */ 12064 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 12065 if (woffs >= offs) 12066 woffs = 0; 12067 } else { 12068 woffs = 0; 12069 } 12070 12071 /* 12072 * Now we know that we're going to be storing to the 12073 * top of the buffer and that there is room for us 12074 * there. We need to clear the buffer from the current 12075 * offset to the end (there may be old gunk there). 12076 */ 12077 while (offs < buf->dtb_size) 12078 tomax[offs++] = 0; 12079 12080 /* 12081 * We need to set our offset to zero. And because we 12082 * are wrapping, we need to set the bit indicating as 12083 * much. We can also adjust our needed space back 12084 * down to the space required by the ECB -- we know 12085 * that the top of the buffer is aligned. 12086 */ 12087 offs = 0; 12088 total = needed; 12089 buf->dtb_flags |= DTRACEBUF_WRAPPED; 12090 } else { 12091 /* 12092 * There is room for us in the buffer, so we simply 12093 * need to check the wrapped offset. 12094 */ 12095 if (woffs < offs) { 12096 /* 12097 * The wrapped offset is less than the offset. 12098 * This can happen if we allocated buffer space 12099 * that induced a wrap, but then we didn't 12100 * subsequently take the space due to an error 12101 * or false predicate evaluation. This is 12102 * okay; we know that _this_ allocation isn't 12103 * going to induce a wrap. We still can't 12104 * reset the wrapped offset to be zero, 12105 * however: the space may have been trashed in 12106 * the previous failed probe attempt. But at 12107 * least the wrapped offset doesn't need to 12108 * be adjusted at all... 12109 */ 12110 goto out; 12111 } 12112 } 12113 12114 while (offs + total > woffs) { 12115 dtrace_epid_t epid = *(uint32_t *)(tomax + woffs); 12116 size_t size; 12117 12118 if (epid == DTRACE_EPIDNONE) { 12119 size = sizeof (uint32_t); 12120 } else { 12121 ASSERT3U(epid, <=, state->dts_necbs); 12122 ASSERT(state->dts_ecbs[epid - 1] != NULL); 12123 12124 size = state->dts_ecbs[epid - 1]->dte_size; 12125 } 12126 12127 ASSERT(woffs + size <= buf->dtb_size); 12128 ASSERT(size != 0); 12129 12130 if (woffs + size == buf->dtb_size) { 12131 /* 12132 * We've reached the end of the buffer; we want 12133 * to set the wrapped offset to 0 and break 12134 * out. However, if the offs is 0, then we're 12135 * in a strange edge-condition: the amount of 12136 * space that we want to reserve plus the size 12137 * of the record that we're overwriting is 12138 * greater than the size of the buffer. This 12139 * is problematic because if we reserve the 12140 * space but subsequently don't consume it (due 12141 * to a failed predicate or error) the wrapped 12142 * offset will be 0 -- yet the EPID at offset 0 12143 * will not be committed. This situation is 12144 * relatively easy to deal with: if we're in 12145 * this case, the buffer is indistinguishable 12146 * from one that hasn't wrapped; we need only 12147 * finish the job by clearing the wrapped bit, 12148 * explicitly setting the offset to be 0, and 12149 * zero'ing out the old data in the buffer. 12150 */ 12151 if (offs == 0) { 12152 buf->dtb_flags &= ~DTRACEBUF_WRAPPED; 12153 buf->dtb_offset = 0; 12154 woffs = total; 12155 12156 while (woffs < buf->dtb_size) 12157 tomax[woffs++] = 0; 12158 } 12159 12160 woffs = 0; 12161 break; 12162 } 12163 12164 woffs += size; 12165 } 12166 12167 /* 12168 * We have a wrapped offset. It may be that the wrapped offset 12169 * has become zero -- that's okay. 12170 */ 12171 buf->dtb_xamot_offset = woffs; 12172 } 12173 12174out: 12175 /* 12176 * Now we can plow the buffer with any necessary padding. 12177 */ 12178 while (offs & (align - 1)) { 12179 /* 12180 * Assert that our alignment is off by a number which 12181 * is itself sizeof (uint32_t) aligned. 12182 */ 12183 ASSERT(!((align - (offs & (align - 1))) & 12184 (sizeof (uint32_t) - 1))); 12185 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 12186 offs += sizeof (uint32_t); 12187 } 12188 12189 if (buf->dtb_flags & DTRACEBUF_FILL) { 12190 if (offs + needed > buf->dtb_size - state->dts_reserve) { 12191 buf->dtb_flags |= DTRACEBUF_FULL; 12192 return (-1); 12193 } 12194 } 12195 12196 if (mstate == NULL) 12197 return (offs); 12198 12199 /* 12200 * For ring buffers and fill buffers, the scratch space is always 12201 * the inactive buffer. 12202 */ 12203 mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot; 12204 mstate->dtms_scratch_size = buf->dtb_size; 12205 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 12206 12207 return (offs); 12208} 12209 12210static void 12211dtrace_buffer_polish(dtrace_buffer_t *buf) 12212{ 12213 ASSERT(buf->dtb_flags & DTRACEBUF_RING); 12214 ASSERT(MUTEX_HELD(&dtrace_lock)); 12215 12216 if (!(buf->dtb_flags & DTRACEBUF_WRAPPED)) 12217 return; 12218 12219 /* 12220 * We need to polish the ring buffer. There are three cases: 12221 * 12222 * - The first (and presumably most common) is that there is no gap 12223 * between the buffer offset and the wrapped offset. In this case, 12224 * there is nothing in the buffer that isn't valid data; we can 12225 * mark the buffer as polished and return. 12226 * 12227 * - The second (less common than the first but still more common 12228 * than the third) is that there is a gap between the buffer offset 12229 * and the wrapped offset, and the wrapped offset is larger than the 12230 * buffer offset. This can happen because of an alignment issue, or 12231 * can happen because of a call to dtrace_buffer_reserve() that 12232 * didn't subsequently consume the buffer space. In this case, 12233 * we need to zero the data from the buffer offset to the wrapped 12234 * offset. 12235 * 12236 * - The third (and least common) is that there is a gap between the 12237 * buffer offset and the wrapped offset, but the wrapped offset is 12238 * _less_ than the buffer offset. This can only happen because a 12239 * call to dtrace_buffer_reserve() induced a wrap, but the space 12240 * was not subsequently consumed. In this case, we need to zero the 12241 * space from the offset to the end of the buffer _and_ from the 12242 * top of the buffer to the wrapped offset. 12243 */ 12244 if (buf->dtb_offset < buf->dtb_xamot_offset) { 12245 bzero(buf->dtb_tomax + buf->dtb_offset, 12246 buf->dtb_xamot_offset - buf->dtb_offset); 12247 } 12248 12249 if (buf->dtb_offset > buf->dtb_xamot_offset) { 12250 bzero(buf->dtb_tomax + buf->dtb_offset, 12251 buf->dtb_size - buf->dtb_offset); 12252 bzero(buf->dtb_tomax, buf->dtb_xamot_offset); 12253 } 12254} 12255 12256/* 12257 * This routine determines if data generated at the specified time has likely 12258 * been entirely consumed at user-level. This routine is called to determine 12259 * if an ECB on a defunct probe (but for an active enabling) can be safely 12260 * disabled and destroyed. 12261 */ 12262static int 12263dtrace_buffer_consumed(dtrace_buffer_t *bufs, hrtime_t when) 12264{ 12265 int i; 12266 12267 for (i = 0; i < NCPU; i++) { 12268 dtrace_buffer_t *buf = &bufs[i]; 12269 12270 if (buf->dtb_size == 0) 12271 continue; 12272 12273 if (buf->dtb_flags & DTRACEBUF_RING) 12274 return (0); 12275 12276 if (!buf->dtb_switched && buf->dtb_offset != 0) 12277 return (0); 12278 12279 if (buf->dtb_switched - buf->dtb_interval < when) 12280 return (0); 12281 } 12282 12283 return (1); 12284} 12285 12286static void 12287dtrace_buffer_free(dtrace_buffer_t *bufs) 12288{ 12289 int i; 12290 12291 for (i = 0; i < NCPU; i++) { 12292 dtrace_buffer_t *buf = &bufs[i]; 12293 12294 if (buf->dtb_tomax == NULL) { 12295 ASSERT(buf->dtb_xamot == NULL); 12296 ASSERT(buf->dtb_size == 0); 12297 continue; 12298 } 12299 12300 if (buf->dtb_xamot != NULL) { 12301 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 12302 kmem_free(buf->dtb_xamot, buf->dtb_size); 12303 } 12304 12305 kmem_free(buf->dtb_tomax, buf->dtb_size); 12306 buf->dtb_size = 0; 12307 buf->dtb_tomax = NULL; 12308 buf->dtb_xamot = NULL; 12309 } 12310} 12311 12312/* 12313 * DTrace Enabling Functions 12314 */ 12315static dtrace_enabling_t * 12316dtrace_enabling_create(dtrace_vstate_t *vstate) 12317{ 12318 dtrace_enabling_t *enab; 12319 12320 enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP); 12321 enab->dten_vstate = vstate; 12322 12323 return (enab); 12324} 12325 12326static void 12327dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb) 12328{ 12329 dtrace_ecbdesc_t **ndesc; 12330 size_t osize, nsize; 12331 12332 /* 12333 * We can't add to enablings after we've enabled them, or after we've 12334 * retained them. 12335 */ 12336 ASSERT(enab->dten_probegen == 0); 12337 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 12338 12339 if (enab->dten_ndesc < enab->dten_maxdesc) { 12340 enab->dten_desc[enab->dten_ndesc++] = ecb; 12341 return; 12342 } 12343 12344 osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 12345 12346 if (enab->dten_maxdesc == 0) { 12347 enab->dten_maxdesc = 1; 12348 } else { 12349 enab->dten_maxdesc <<= 1; 12350 } 12351 12352 ASSERT(enab->dten_ndesc < enab->dten_maxdesc); 12353 12354 nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 12355 ndesc = kmem_zalloc(nsize, KM_SLEEP); 12356 bcopy(enab->dten_desc, ndesc, osize); 12357 if (enab->dten_desc != NULL) 12358 kmem_free(enab->dten_desc, osize); 12359 12360 enab->dten_desc = ndesc; 12361 enab->dten_desc[enab->dten_ndesc++] = ecb; 12362} 12363 12364static void 12365dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb, 12366 dtrace_probedesc_t *pd) 12367{ 12368 dtrace_ecbdesc_t *new; 12369 dtrace_predicate_t *pred; 12370 dtrace_actdesc_t *act; 12371 12372 /* 12373 * We're going to create a new ECB description that matches the 12374 * specified ECB in every way, but has the specified probe description. 12375 */ 12376 new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 12377 12378 if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL) 12379 dtrace_predicate_hold(pred); 12380 12381 for (act = ecb->dted_action; act != NULL; act = act->dtad_next) 12382 dtrace_actdesc_hold(act); 12383 12384 new->dted_action = ecb->dted_action; 12385 new->dted_pred = ecb->dted_pred; 12386 new->dted_probe = *pd; 12387 new->dted_uarg = ecb->dted_uarg; 12388 12389 dtrace_enabling_add(enab, new); 12390} 12391 12392static void 12393dtrace_enabling_dump(dtrace_enabling_t *enab) 12394{ 12395 int i; 12396 12397 for (i = 0; i < enab->dten_ndesc; i++) { 12398 dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe; 12399 12400 cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i, 12401 desc->dtpd_provider, desc->dtpd_mod, 12402 desc->dtpd_func, desc->dtpd_name); 12403 } 12404} 12405 12406static void 12407dtrace_enabling_destroy(dtrace_enabling_t *enab) 12408{ 12409 int i; 12410 dtrace_ecbdesc_t *ep; 12411 dtrace_vstate_t *vstate = enab->dten_vstate; 12412 12413 ASSERT(MUTEX_HELD(&dtrace_lock)); 12414 12415 for (i = 0; i < enab->dten_ndesc; i++) { 12416 dtrace_actdesc_t *act, *next; 12417 dtrace_predicate_t *pred; 12418 12419 ep = enab->dten_desc[i]; 12420 12421 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) 12422 dtrace_predicate_release(pred, vstate); 12423 12424 for (act = ep->dted_action; act != NULL; act = next) { 12425 next = act->dtad_next; 12426 dtrace_actdesc_release(act, vstate); 12427 } 12428 12429 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 12430 } 12431 12432 if (enab->dten_desc != NULL) 12433 kmem_free(enab->dten_desc, 12434 enab->dten_maxdesc * sizeof (dtrace_enabling_t *)); 12435 12436 /* 12437 * If this was a retained enabling, decrement the dts_nretained count 12438 * and take it off of the dtrace_retained list. 12439 */ 12440 if (enab->dten_prev != NULL || enab->dten_next != NULL || 12441 dtrace_retained == enab) { 12442 ASSERT(enab->dten_vstate->dtvs_state != NULL); 12443 ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0); 12444 enab->dten_vstate->dtvs_state->dts_nretained--; 12445 dtrace_retained_gen++; 12446 } 12447 12448 if (enab->dten_prev == NULL) { 12449 if (dtrace_retained == enab) { 12450 dtrace_retained = enab->dten_next; 12451 12452 if (dtrace_retained != NULL) 12453 dtrace_retained->dten_prev = NULL; 12454 } 12455 } else { 12456 ASSERT(enab != dtrace_retained); 12457 ASSERT(dtrace_retained != NULL); 12458 enab->dten_prev->dten_next = enab->dten_next; 12459 } 12460 12461 if (enab->dten_next != NULL) { 12462 ASSERT(dtrace_retained != NULL); 12463 enab->dten_next->dten_prev = enab->dten_prev; 12464 } 12465 12466 kmem_free(enab, sizeof (dtrace_enabling_t)); 12467} 12468 12469static int 12470dtrace_enabling_retain(dtrace_enabling_t *enab) 12471{ 12472 dtrace_state_t *state; 12473 12474 ASSERT(MUTEX_HELD(&dtrace_lock)); 12475 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 12476 ASSERT(enab->dten_vstate != NULL); 12477 12478 state = enab->dten_vstate->dtvs_state; 12479 ASSERT(state != NULL); 12480 12481 /* 12482 * We only allow each state to retain dtrace_retain_max enablings. 12483 */ 12484 if (state->dts_nretained >= dtrace_retain_max) 12485 return (ENOSPC); 12486 12487 state->dts_nretained++; 12488 dtrace_retained_gen++; 12489 12490 if (dtrace_retained == NULL) { 12491 dtrace_retained = enab; 12492 return (0); 12493 } 12494 12495 enab->dten_next = dtrace_retained; 12496 dtrace_retained->dten_prev = enab; 12497 dtrace_retained = enab; 12498 12499 return (0); 12500} 12501 12502static int 12503dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match, 12504 dtrace_probedesc_t *create) 12505{ 12506 dtrace_enabling_t *new, *enab; 12507 int found = 0, err = ENOENT; 12508 12509 ASSERT(MUTEX_HELD(&dtrace_lock)); 12510 ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN); 12511 ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN); 12512 ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN); 12513 ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN); 12514 12515 new = dtrace_enabling_create(&state->dts_vstate); 12516 12517 /* 12518 * Iterate over all retained enablings, looking for enablings that 12519 * match the specified state. 12520 */ 12521 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 12522 int i; 12523 12524 /* 12525 * dtvs_state can only be NULL for helper enablings -- and 12526 * helper enablings can't be retained. 12527 */ 12528 ASSERT(enab->dten_vstate->dtvs_state != NULL); 12529 12530 if (enab->dten_vstate->dtvs_state != state) 12531 continue; 12532 12533 /* 12534 * Now iterate over each probe description; we're looking for 12535 * an exact match to the specified probe description. 12536 */ 12537 for (i = 0; i < enab->dten_ndesc; i++) { 12538 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 12539 dtrace_probedesc_t *pd = &ep->dted_probe; 12540 12541 if (strcmp(pd->dtpd_provider, match->dtpd_provider)) 12542 continue; 12543 12544 if (strcmp(pd->dtpd_mod, match->dtpd_mod)) 12545 continue; 12546 12547 if (strcmp(pd->dtpd_func, match->dtpd_func)) 12548 continue; 12549 12550 if (strcmp(pd->dtpd_name, match->dtpd_name)) 12551 continue; 12552 12553 /* 12554 * We have a winning probe! Add it to our growing 12555 * enabling. 12556 */ 12557 found = 1; 12558 dtrace_enabling_addlike(new, ep, create); 12559 } 12560 } 12561 12562 if (!found || (err = dtrace_enabling_retain(new)) != 0) { 12563 dtrace_enabling_destroy(new); 12564 return (err); 12565 } 12566 12567 return (0); 12568} 12569 12570static void 12571dtrace_enabling_retract(dtrace_state_t *state) 12572{ 12573 dtrace_enabling_t *enab, *next; 12574 12575 ASSERT(MUTEX_HELD(&dtrace_lock)); 12576 12577 /* 12578 * Iterate over all retained enablings, destroy the enablings retained 12579 * for the specified state. 12580 */ 12581 for (enab = dtrace_retained; enab != NULL; enab = next) { 12582 next = enab->dten_next; 12583 12584 /* 12585 * dtvs_state can only be NULL for helper enablings -- and 12586 * helper enablings can't be retained. 12587 */ 12588 ASSERT(enab->dten_vstate->dtvs_state != NULL); 12589 12590 if (enab->dten_vstate->dtvs_state == state) { 12591 ASSERT(state->dts_nretained > 0); 12592 dtrace_enabling_destroy(enab); 12593 } 12594 } 12595 12596 ASSERT(state->dts_nretained == 0); 12597} 12598 12599static int 12600dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched) 12601{ 12602 int i = 0; 12603 int matched = 0; 12604 12605 ASSERT(MUTEX_HELD(&cpu_lock)); 12606 ASSERT(MUTEX_HELD(&dtrace_lock)); 12607 12608 for (i = 0; i < enab->dten_ndesc; i++) { 12609 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 12610 12611 enab->dten_current = ep; 12612 enab->dten_error = 0; 12613 12614 matched += dtrace_probe_enable(&ep->dted_probe, enab); 12615 12616 if (enab->dten_error != 0) { 12617 /* 12618 * If we get an error half-way through enabling the 12619 * probes, we kick out -- perhaps with some number of 12620 * them enabled. Leaving enabled probes enabled may 12621 * be slightly confusing for user-level, but we expect 12622 * that no one will attempt to actually drive on in 12623 * the face of such errors. If this is an anonymous 12624 * enabling (indicated with a NULL nmatched pointer), 12625 * we cmn_err() a message. We aren't expecting to 12626 * get such an error -- such as it can exist at all, 12627 * it would be a result of corrupted DOF in the driver 12628 * properties. 12629 */ 12630 if (nmatched == NULL) { 12631 cmn_err(CE_WARN, "dtrace_enabling_match() " 12632 "error on %p: %d", (void *)ep, 12633 enab->dten_error); 12634 } 12635 12636 return (enab->dten_error); 12637 } 12638 } 12639 12640 enab->dten_probegen = dtrace_probegen; 12641 if (nmatched != NULL) 12642 *nmatched = matched; 12643 12644 return (0); 12645} 12646 12647static void 12648dtrace_enabling_matchall(void) 12649{ 12650 dtrace_enabling_t *enab; 12651 12652 mutex_enter(&cpu_lock); 12653 mutex_enter(&dtrace_lock); 12654 12655 /* 12656 * Iterate over all retained enablings to see if any probes match 12657 * against them. We only perform this operation on enablings for which 12658 * we have sufficient permissions by virtue of being in the global zone 12659 * or in the same zone as the DTrace client. Because we can be called 12660 * after dtrace_detach() has been called, we cannot assert that there 12661 * are retained enablings. We can safely load from dtrace_retained, 12662 * however: the taskq_destroy() at the end of dtrace_detach() will 12663 * block pending our completion. 12664 */ 12665 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 12666#if defined(sun) 12667 cred_t *cr = enab->dten_vstate->dtvs_state->dts_cred.dcr_cred; 12668 12669 if (INGLOBALZONE(curproc) || 12670 cr != NULL && getzoneid() == crgetzoneid(cr)) 12671#endif 12672 (void) dtrace_enabling_match(enab, NULL); 12673 } 12674 12675 mutex_exit(&dtrace_lock); 12676 mutex_exit(&cpu_lock); 12677} 12678 12679/* 12680 * If an enabling is to be enabled without having matched probes (that is, if 12681 * dtrace_state_go() is to be called on the underlying dtrace_state_t), the 12682 * enabling must be _primed_ by creating an ECB for every ECB description. 12683 * This must be done to assure that we know the number of speculations, the 12684 * number of aggregations, the minimum buffer size needed, etc. before we 12685 * transition out of DTRACE_ACTIVITY_INACTIVE. To do this without actually 12686 * enabling any probes, we create ECBs for every ECB decription, but with a 12687 * NULL probe -- which is exactly what this function does. 12688 */ 12689static void 12690dtrace_enabling_prime(dtrace_state_t *state) 12691{ 12692 dtrace_enabling_t *enab; 12693 int i; 12694 12695 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 12696 ASSERT(enab->dten_vstate->dtvs_state != NULL); 12697 12698 if (enab->dten_vstate->dtvs_state != state) 12699 continue; 12700 12701 /* 12702 * We don't want to prime an enabling more than once, lest 12703 * we allow a malicious user to induce resource exhaustion. 12704 * (The ECBs that result from priming an enabling aren't 12705 * leaked -- but they also aren't deallocated until the 12706 * consumer state is destroyed.) 12707 */ 12708 if (enab->dten_primed) 12709 continue; 12710 12711 for (i = 0; i < enab->dten_ndesc; i++) { 12712 enab->dten_current = enab->dten_desc[i]; 12713 (void) dtrace_probe_enable(NULL, enab); 12714 } 12715 12716 enab->dten_primed = 1; 12717 } 12718} 12719 12720/* 12721 * Called to indicate that probes should be provided due to retained 12722 * enablings. This is implemented in terms of dtrace_probe_provide(), but it 12723 * must take an initial lap through the enabling calling the dtps_provide() 12724 * entry point explicitly to allow for autocreated probes. 12725 */ 12726static void 12727dtrace_enabling_provide(dtrace_provider_t *prv) 12728{ 12729 int i, all = 0; 12730 dtrace_probedesc_t desc; 12731 dtrace_genid_t gen; 12732 12733 ASSERT(MUTEX_HELD(&dtrace_lock)); 12734 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 12735 12736 if (prv == NULL) { 12737 all = 1; 12738 prv = dtrace_provider; 12739 } 12740 12741 do { 12742 dtrace_enabling_t *enab; 12743 void *parg = prv->dtpv_arg; 12744 12745retry: 12746 gen = dtrace_retained_gen; 12747 for (enab = dtrace_retained; enab != NULL; 12748 enab = enab->dten_next) { 12749 for (i = 0; i < enab->dten_ndesc; i++) { 12750 desc = enab->dten_desc[i]->dted_probe; 12751 mutex_exit(&dtrace_lock); 12752 prv->dtpv_pops.dtps_provide(parg, &desc); 12753 mutex_enter(&dtrace_lock); 12754 /* 12755 * Process the retained enablings again if 12756 * they have changed while we weren't holding 12757 * dtrace_lock. 12758 */ 12759 if (gen != dtrace_retained_gen) 12760 goto retry; 12761 } 12762 } 12763 } while (all && (prv = prv->dtpv_next) != NULL); 12764 12765 mutex_exit(&dtrace_lock); 12766 dtrace_probe_provide(NULL, all ? NULL : prv); 12767 mutex_enter(&dtrace_lock); 12768} 12769 12770/* 12771 * Called to reap ECBs that are attached to probes from defunct providers. 12772 */ 12773static void 12774dtrace_enabling_reap(void) 12775{ 12776 dtrace_provider_t *prov; 12777 dtrace_probe_t *probe; 12778 dtrace_ecb_t *ecb; 12779 hrtime_t when; 12780 int i; 12781 12782 mutex_enter(&cpu_lock); 12783 mutex_enter(&dtrace_lock); 12784 12785 for (i = 0; i < dtrace_nprobes; i++) { 12786 if ((probe = dtrace_probes[i]) == NULL) 12787 continue; 12788 12789 if (probe->dtpr_ecb == NULL) 12790 continue; 12791 12792 prov = probe->dtpr_provider; 12793 12794 if ((when = prov->dtpv_defunct) == 0) 12795 continue; 12796 12797 /* 12798 * We have ECBs on a defunct provider: we want to reap these 12799 * ECBs to allow the provider to unregister. The destruction 12800 * of these ECBs must be done carefully: if we destroy the ECB 12801 * and the consumer later wishes to consume an EPID that 12802 * corresponds to the destroyed ECB (and if the EPID metadata 12803 * has not been previously consumed), the consumer will abort 12804 * processing on the unknown EPID. To reduce (but not, sadly, 12805 * eliminate) the possibility of this, we will only destroy an 12806 * ECB for a defunct provider if, for the state that 12807 * corresponds to the ECB: 12808 * 12809 * (a) There is no speculative tracing (which can effectively 12810 * cache an EPID for an arbitrary amount of time). 12811 * 12812 * (b) The principal buffers have been switched twice since the 12813 * provider became defunct. 12814 * 12815 * (c) The aggregation buffers are of zero size or have been 12816 * switched twice since the provider became defunct. 12817 * 12818 * We use dts_speculates to determine (a) and call a function 12819 * (dtrace_buffer_consumed()) to determine (b) and (c). Note 12820 * that as soon as we've been unable to destroy one of the ECBs 12821 * associated with the probe, we quit trying -- reaping is only 12822 * fruitful in as much as we can destroy all ECBs associated 12823 * with the defunct provider's probes. 12824 */ 12825 while ((ecb = probe->dtpr_ecb) != NULL) { 12826 dtrace_state_t *state = ecb->dte_state; 12827 dtrace_buffer_t *buf = state->dts_buffer; 12828 dtrace_buffer_t *aggbuf = state->dts_aggbuffer; 12829 12830 if (state->dts_speculates) 12831 break; 12832 12833 if (!dtrace_buffer_consumed(buf, when)) 12834 break; 12835 12836 if (!dtrace_buffer_consumed(aggbuf, when)) 12837 break; 12838 12839 dtrace_ecb_disable(ecb); 12840 ASSERT(probe->dtpr_ecb != ecb); 12841 dtrace_ecb_destroy(ecb); 12842 } 12843 } 12844 12845 mutex_exit(&dtrace_lock); 12846 mutex_exit(&cpu_lock); 12847} 12848 12849/* 12850 * DTrace DOF Functions 12851 */ 12852/*ARGSUSED*/ 12853static void 12854dtrace_dof_error(dof_hdr_t *dof, const char *str) 12855{ 12856 if (dtrace_err_verbose) 12857 cmn_err(CE_WARN, "failed to process DOF: %s", str); 12858 12859#ifdef DTRACE_ERRDEBUG 12860 dtrace_errdebug(str); 12861#endif 12862} 12863 12864/* 12865 * Create DOF out of a currently enabled state. Right now, we only create 12866 * DOF containing the run-time options -- but this could be expanded to create 12867 * complete DOF representing the enabled state. 12868 */ 12869static dof_hdr_t * 12870dtrace_dof_create(dtrace_state_t *state) 12871{ 12872 dof_hdr_t *dof; 12873 dof_sec_t *sec; 12874 dof_optdesc_t *opt; 12875 int i, len = sizeof (dof_hdr_t) + 12876 roundup(sizeof (dof_sec_t), sizeof (uint64_t)) + 12877 sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 12878 12879 ASSERT(MUTEX_HELD(&dtrace_lock)); 12880 12881 dof = kmem_zalloc(len, KM_SLEEP); 12882 dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0; 12883 dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1; 12884 dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2; 12885 dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3; 12886 12887 dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE; 12888 dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE; 12889 dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION; 12890 dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION; 12891 dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS; 12892 dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS; 12893 12894 dof->dofh_flags = 0; 12895 dof->dofh_hdrsize = sizeof (dof_hdr_t); 12896 dof->dofh_secsize = sizeof (dof_sec_t); 12897 dof->dofh_secnum = 1; /* only DOF_SECT_OPTDESC */ 12898 dof->dofh_secoff = sizeof (dof_hdr_t); 12899 dof->dofh_loadsz = len; 12900 dof->dofh_filesz = len; 12901 dof->dofh_pad = 0; 12902 12903 /* 12904 * Fill in the option section header... 12905 */ 12906 sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t)); 12907 sec->dofs_type = DOF_SECT_OPTDESC; 12908 sec->dofs_align = sizeof (uint64_t); 12909 sec->dofs_flags = DOF_SECF_LOAD; 12910 sec->dofs_entsize = sizeof (dof_optdesc_t); 12911 12912 opt = (dof_optdesc_t *)((uintptr_t)sec + 12913 roundup(sizeof (dof_sec_t), sizeof (uint64_t))); 12914 12915 sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof; 12916 sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 12917 12918 for (i = 0; i < DTRACEOPT_MAX; i++) { 12919 opt[i].dofo_option = i; 12920 opt[i].dofo_strtab = DOF_SECIDX_NONE; 12921 opt[i].dofo_value = state->dts_options[i]; 12922 } 12923 12924 return (dof); 12925} 12926 12927static dof_hdr_t * 12928dtrace_dof_copyin(uintptr_t uarg, int *errp) 12929{ 12930 dof_hdr_t hdr, *dof; 12931 12932 ASSERT(!MUTEX_HELD(&dtrace_lock)); 12933 12934 /* 12935 * First, we're going to copyin() the sizeof (dof_hdr_t). 12936 */ 12937 if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) { 12938 dtrace_dof_error(NULL, "failed to copyin DOF header"); 12939 *errp = EFAULT; 12940 return (NULL); 12941 } 12942 12943 /* 12944 * Now we'll allocate the entire DOF and copy it in -- provided 12945 * that the length isn't outrageous. 12946 */ 12947 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) { 12948 dtrace_dof_error(&hdr, "load size exceeds maximum"); 12949 *errp = E2BIG; 12950 return (NULL); 12951 } 12952 12953 if (hdr.dofh_loadsz < sizeof (hdr)) { 12954 dtrace_dof_error(&hdr, "invalid load size"); 12955 *errp = EINVAL; 12956 return (NULL); 12957 } 12958 12959 dof = kmem_alloc(hdr.dofh_loadsz, KM_SLEEP); 12960 12961 if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0 || 12962 dof->dofh_loadsz != hdr.dofh_loadsz) { 12963 kmem_free(dof, hdr.dofh_loadsz); 12964 *errp = EFAULT; 12965 return (NULL); 12966 } 12967 12968 return (dof); 12969} 12970 12971#if !defined(sun) 12972static __inline uchar_t 12973dtrace_dof_char(char c) { 12974 switch (c) { 12975 case '0': 12976 case '1': 12977 case '2': 12978 case '3': 12979 case '4': 12980 case '5': 12981 case '6': 12982 case '7': 12983 case '8': 12984 case '9': 12985 return (c - '0'); 12986 case 'A': 12987 case 'B': 12988 case 'C': 12989 case 'D': 12990 case 'E': 12991 case 'F': 12992 return (c - 'A' + 10); 12993 case 'a': 12994 case 'b': 12995 case 'c': 12996 case 'd': 12997 case 'e': 12998 case 'f': 12999 return (c - 'a' + 10); 13000 } 13001 /* Should not reach here. */ 13002 return (0); 13003} 13004#endif 13005 13006static dof_hdr_t * 13007dtrace_dof_property(const char *name) 13008{ 13009 uchar_t *buf; 13010 uint64_t loadsz; 13011 unsigned int len, i; 13012 dof_hdr_t *dof; 13013 13014#if defined(sun) 13015 /* 13016 * Unfortunately, array of values in .conf files are always (and 13017 * only) interpreted to be integer arrays. We must read our DOF 13018 * as an integer array, and then squeeze it into a byte array. 13019 */ 13020 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0, 13021 (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS) 13022 return (NULL); 13023 13024 for (i = 0; i < len; i++) 13025 buf[i] = (uchar_t)(((int *)buf)[i]); 13026 13027 if (len < sizeof (dof_hdr_t)) { 13028 ddi_prop_free(buf); 13029 dtrace_dof_error(NULL, "truncated header"); 13030 return (NULL); 13031 } 13032 13033 if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) { 13034 ddi_prop_free(buf); 13035 dtrace_dof_error(NULL, "truncated DOF"); 13036 return (NULL); 13037 } 13038 13039 if (loadsz >= dtrace_dof_maxsize) { 13040 ddi_prop_free(buf); 13041 dtrace_dof_error(NULL, "oversized DOF"); 13042 return (NULL); 13043 } 13044 13045 dof = kmem_alloc(loadsz, KM_SLEEP); 13046 bcopy(buf, dof, loadsz); 13047 ddi_prop_free(buf); 13048#else 13049 char *p; 13050 char *p_env; 13051 13052 if ((p_env = getenv(name)) == NULL) 13053 return (NULL); 13054 13055 len = strlen(p_env) / 2; 13056 13057 buf = kmem_alloc(len, KM_SLEEP); 13058 13059 dof = (dof_hdr_t *) buf; 13060 13061 p = p_env; 13062 13063 for (i = 0; i < len; i++) { 13064 buf[i] = (dtrace_dof_char(p[0]) << 4) | 13065 dtrace_dof_char(p[1]); 13066 p += 2; 13067 } 13068 13069 freeenv(p_env); 13070 13071 if (len < sizeof (dof_hdr_t)) { 13072 kmem_free(buf, 0); 13073 dtrace_dof_error(NULL, "truncated header"); 13074 return (NULL); 13075 } 13076 13077 if (len < (loadsz = dof->dofh_loadsz)) { 13078 kmem_free(buf, 0); 13079 dtrace_dof_error(NULL, "truncated DOF"); 13080 return (NULL); 13081 } 13082 13083 if (loadsz >= dtrace_dof_maxsize) { 13084 kmem_free(buf, 0); 13085 dtrace_dof_error(NULL, "oversized DOF"); 13086 return (NULL); 13087 } 13088#endif 13089 13090 return (dof); 13091} 13092 13093static void 13094dtrace_dof_destroy(dof_hdr_t *dof) 13095{ 13096 kmem_free(dof, dof->dofh_loadsz); 13097} 13098 13099/* 13100 * Return the dof_sec_t pointer corresponding to a given section index. If the 13101 * index is not valid, dtrace_dof_error() is called and NULL is returned. If 13102 * a type other than DOF_SECT_NONE is specified, the header is checked against 13103 * this type and NULL is returned if the types do not match. 13104 */ 13105static dof_sec_t * 13106dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i) 13107{ 13108 dof_sec_t *sec = (dof_sec_t *)(uintptr_t) 13109 ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize); 13110 13111 if (i >= dof->dofh_secnum) { 13112 dtrace_dof_error(dof, "referenced section index is invalid"); 13113 return (NULL); 13114 } 13115 13116 if (!(sec->dofs_flags & DOF_SECF_LOAD)) { 13117 dtrace_dof_error(dof, "referenced section is not loadable"); 13118 return (NULL); 13119 } 13120 13121 if (type != DOF_SECT_NONE && type != sec->dofs_type) { 13122 dtrace_dof_error(dof, "referenced section is the wrong type"); 13123 return (NULL); 13124 } 13125 13126 return (sec); 13127} 13128 13129static dtrace_probedesc_t * 13130dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc) 13131{ 13132 dof_probedesc_t *probe; 13133 dof_sec_t *strtab; 13134 uintptr_t daddr = (uintptr_t)dof; 13135 uintptr_t str; 13136 size_t size; 13137 13138 if (sec->dofs_type != DOF_SECT_PROBEDESC) { 13139 dtrace_dof_error(dof, "invalid probe section"); 13140 return (NULL); 13141 } 13142 13143 if (sec->dofs_align != sizeof (dof_secidx_t)) { 13144 dtrace_dof_error(dof, "bad alignment in probe description"); 13145 return (NULL); 13146 } 13147 13148 if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) { 13149 dtrace_dof_error(dof, "truncated probe description"); 13150 return (NULL); 13151 } 13152 13153 probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset); 13154 strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab); 13155 13156 if (strtab == NULL) 13157 return (NULL); 13158 13159 str = daddr + strtab->dofs_offset; 13160 size = strtab->dofs_size; 13161 13162 if (probe->dofp_provider >= strtab->dofs_size) { 13163 dtrace_dof_error(dof, "corrupt probe provider"); 13164 return (NULL); 13165 } 13166 13167 (void) strncpy(desc->dtpd_provider, 13168 (char *)(str + probe->dofp_provider), 13169 MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider)); 13170 13171 if (probe->dofp_mod >= strtab->dofs_size) { 13172 dtrace_dof_error(dof, "corrupt probe module"); 13173 return (NULL); 13174 } 13175 13176 (void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod), 13177 MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod)); 13178 13179 if (probe->dofp_func >= strtab->dofs_size) { 13180 dtrace_dof_error(dof, "corrupt probe function"); 13181 return (NULL); 13182 } 13183 13184 (void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func), 13185 MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func)); 13186 13187 if (probe->dofp_name >= strtab->dofs_size) { 13188 dtrace_dof_error(dof, "corrupt probe name"); 13189 return (NULL); 13190 } 13191 13192 (void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name), 13193 MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name)); 13194 13195 return (desc); 13196} 13197 13198static dtrace_difo_t * 13199dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 13200 cred_t *cr) 13201{ 13202 dtrace_difo_t *dp; 13203 size_t ttl = 0; 13204 dof_difohdr_t *dofd; 13205 uintptr_t daddr = (uintptr_t)dof; 13206 size_t max = dtrace_difo_maxsize; 13207 int i, l, n; 13208 13209 static const struct { 13210 int section; 13211 int bufoffs; 13212 int lenoffs; 13213 int entsize; 13214 int align; 13215 const char *msg; 13216 } difo[] = { 13217 { DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf), 13218 offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t), 13219 sizeof (dif_instr_t), "multiple DIF sections" }, 13220 13221 { DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab), 13222 offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t), 13223 sizeof (uint64_t), "multiple integer tables" }, 13224 13225 { DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab), 13226 offsetof(dtrace_difo_t, dtdo_strlen), 0, 13227 sizeof (char), "multiple string tables" }, 13228 13229 { DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab), 13230 offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t), 13231 sizeof (uint_t), "multiple variable tables" }, 13232 13233 { DOF_SECT_NONE, 0, 0, 0, 0, NULL } 13234 }; 13235 13236 if (sec->dofs_type != DOF_SECT_DIFOHDR) { 13237 dtrace_dof_error(dof, "invalid DIFO header section"); 13238 return (NULL); 13239 } 13240 13241 if (sec->dofs_align != sizeof (dof_secidx_t)) { 13242 dtrace_dof_error(dof, "bad alignment in DIFO header"); 13243 return (NULL); 13244 } 13245 13246 if (sec->dofs_size < sizeof (dof_difohdr_t) || 13247 sec->dofs_size % sizeof (dof_secidx_t)) { 13248 dtrace_dof_error(dof, "bad size in DIFO header"); 13249 return (NULL); 13250 } 13251 13252 dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 13253 n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1; 13254 13255 dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 13256 dp->dtdo_rtype = dofd->dofd_rtype; 13257 13258 for (l = 0; l < n; l++) { 13259 dof_sec_t *subsec; 13260 void **bufp; 13261 uint32_t *lenp; 13262 13263 if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE, 13264 dofd->dofd_links[l])) == NULL) 13265 goto err; /* invalid section link */ 13266 13267 if (ttl + subsec->dofs_size > max) { 13268 dtrace_dof_error(dof, "exceeds maximum size"); 13269 goto err; 13270 } 13271 13272 ttl += subsec->dofs_size; 13273 13274 for (i = 0; difo[i].section != DOF_SECT_NONE; i++) { 13275 if (subsec->dofs_type != difo[i].section) 13276 continue; 13277 13278 if (!(subsec->dofs_flags & DOF_SECF_LOAD)) { 13279 dtrace_dof_error(dof, "section not loaded"); 13280 goto err; 13281 } 13282 13283 if (subsec->dofs_align != difo[i].align) { 13284 dtrace_dof_error(dof, "bad alignment"); 13285 goto err; 13286 } 13287 13288 bufp = (void **)((uintptr_t)dp + difo[i].bufoffs); 13289 lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs); 13290 13291 if (*bufp != NULL) { 13292 dtrace_dof_error(dof, difo[i].msg); 13293 goto err; 13294 } 13295 13296 if (difo[i].entsize != subsec->dofs_entsize) { 13297 dtrace_dof_error(dof, "entry size mismatch"); 13298 goto err; 13299 } 13300 13301 if (subsec->dofs_entsize != 0 && 13302 (subsec->dofs_size % subsec->dofs_entsize) != 0) { 13303 dtrace_dof_error(dof, "corrupt entry size"); 13304 goto err; 13305 } 13306 13307 *lenp = subsec->dofs_size; 13308 *bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP); 13309 bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset), 13310 *bufp, subsec->dofs_size); 13311 13312 if (subsec->dofs_entsize != 0) 13313 *lenp /= subsec->dofs_entsize; 13314 13315 break; 13316 } 13317 13318 /* 13319 * If we encounter a loadable DIFO sub-section that is not 13320 * known to us, assume this is a broken program and fail. 13321 */ 13322 if (difo[i].section == DOF_SECT_NONE && 13323 (subsec->dofs_flags & DOF_SECF_LOAD)) { 13324 dtrace_dof_error(dof, "unrecognized DIFO subsection"); 13325 goto err; 13326 } 13327 } 13328 13329 if (dp->dtdo_buf == NULL) { 13330 /* 13331 * We can't have a DIF object without DIF text. 13332 */ 13333 dtrace_dof_error(dof, "missing DIF text"); 13334 goto err; 13335 } 13336 13337 /* 13338 * Before we validate the DIF object, run through the variable table 13339 * looking for the strings -- if any of their size are under, we'll set 13340 * their size to be the system-wide default string size. Note that 13341 * this should _not_ happen if the "strsize" option has been set -- 13342 * in this case, the compiler should have set the size to reflect the 13343 * setting of the option. 13344 */ 13345 for (i = 0; i < dp->dtdo_varlen; i++) { 13346 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 13347 dtrace_diftype_t *t = &v->dtdv_type; 13348 13349 if (v->dtdv_id < DIF_VAR_OTHER_UBASE) 13350 continue; 13351 13352 if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0) 13353 t->dtdt_size = dtrace_strsize_default; 13354 } 13355 13356 if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0) 13357 goto err; 13358 13359 dtrace_difo_init(dp, vstate); 13360 return (dp); 13361 13362err: 13363 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 13364 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 13365 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 13366 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 13367 13368 kmem_free(dp, sizeof (dtrace_difo_t)); 13369 return (NULL); 13370} 13371 13372static dtrace_predicate_t * 13373dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 13374 cred_t *cr) 13375{ 13376 dtrace_difo_t *dp; 13377 13378 if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL) 13379 return (NULL); 13380 13381 return (dtrace_predicate_create(dp)); 13382} 13383 13384static dtrace_actdesc_t * 13385dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 13386 cred_t *cr) 13387{ 13388 dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next; 13389 dof_actdesc_t *desc; 13390 dof_sec_t *difosec; 13391 size_t offs; 13392 uintptr_t daddr = (uintptr_t)dof; 13393 uint64_t arg; 13394 dtrace_actkind_t kind; 13395 13396 if (sec->dofs_type != DOF_SECT_ACTDESC) { 13397 dtrace_dof_error(dof, "invalid action section"); 13398 return (NULL); 13399 } 13400 13401 if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) { 13402 dtrace_dof_error(dof, "truncated action description"); 13403 return (NULL); 13404 } 13405 13406 if (sec->dofs_align != sizeof (uint64_t)) { 13407 dtrace_dof_error(dof, "bad alignment in action description"); 13408 return (NULL); 13409 } 13410 13411 if (sec->dofs_size < sec->dofs_entsize) { 13412 dtrace_dof_error(dof, "section entry size exceeds total size"); 13413 return (NULL); 13414 } 13415 13416 if (sec->dofs_entsize != sizeof (dof_actdesc_t)) { 13417 dtrace_dof_error(dof, "bad entry size in action description"); 13418 return (NULL); 13419 } 13420 13421 if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) { 13422 dtrace_dof_error(dof, "actions exceed dtrace_actions_max"); 13423 return (NULL); 13424 } 13425 13426 for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) { 13427 desc = (dof_actdesc_t *)(daddr + 13428 (uintptr_t)sec->dofs_offset + offs); 13429 kind = (dtrace_actkind_t)desc->dofa_kind; 13430 13431 if ((DTRACEACT_ISPRINTFLIKE(kind) && 13432 (kind != DTRACEACT_PRINTA || 13433 desc->dofa_strtab != DOF_SECIDX_NONE)) || 13434 (kind == DTRACEACT_DIFEXPR && 13435 desc->dofa_strtab != DOF_SECIDX_NONE)) { 13436 dof_sec_t *strtab; 13437 char *str, *fmt; 13438 uint64_t i; 13439 13440 /* 13441 * The argument to these actions is an index into the 13442 * DOF string table. For printf()-like actions, this 13443 * is the format string. For print(), this is the 13444 * CTF type of the expression result. 13445 */ 13446 if ((strtab = dtrace_dof_sect(dof, 13447 DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL) 13448 goto err; 13449 13450 str = (char *)((uintptr_t)dof + 13451 (uintptr_t)strtab->dofs_offset); 13452 13453 for (i = desc->dofa_arg; i < strtab->dofs_size; i++) { 13454 if (str[i] == '\0') 13455 break; 13456 } 13457 13458 if (i >= strtab->dofs_size) { 13459 dtrace_dof_error(dof, "bogus format string"); 13460 goto err; 13461 } 13462 13463 if (i == desc->dofa_arg) { 13464 dtrace_dof_error(dof, "empty format string"); 13465 goto err; 13466 } 13467 13468 i -= desc->dofa_arg; 13469 fmt = kmem_alloc(i + 1, KM_SLEEP); 13470 bcopy(&str[desc->dofa_arg], fmt, i + 1); 13471 arg = (uint64_t)(uintptr_t)fmt; 13472 } else { 13473 if (kind == DTRACEACT_PRINTA) { 13474 ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE); 13475 arg = 0; 13476 } else { 13477 arg = desc->dofa_arg; 13478 } 13479 } 13480 13481 act = dtrace_actdesc_create(kind, desc->dofa_ntuple, 13482 desc->dofa_uarg, arg); 13483 13484 if (last != NULL) { 13485 last->dtad_next = act; 13486 } else { 13487 first = act; 13488 } 13489 13490 last = act; 13491 13492 if (desc->dofa_difo == DOF_SECIDX_NONE) 13493 continue; 13494 13495 if ((difosec = dtrace_dof_sect(dof, 13496 DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL) 13497 goto err; 13498 13499 act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr); 13500 13501 if (act->dtad_difo == NULL) 13502 goto err; 13503 } 13504 13505 ASSERT(first != NULL); 13506 return (first); 13507 13508err: 13509 for (act = first; act != NULL; act = next) { 13510 next = act->dtad_next; 13511 dtrace_actdesc_release(act, vstate); 13512 } 13513 13514 return (NULL); 13515} 13516 13517static dtrace_ecbdesc_t * 13518dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 13519 cred_t *cr) 13520{ 13521 dtrace_ecbdesc_t *ep; 13522 dof_ecbdesc_t *ecb; 13523 dtrace_probedesc_t *desc; 13524 dtrace_predicate_t *pred = NULL; 13525 13526 if (sec->dofs_size < sizeof (dof_ecbdesc_t)) { 13527 dtrace_dof_error(dof, "truncated ECB description"); 13528 return (NULL); 13529 } 13530 13531 if (sec->dofs_align != sizeof (uint64_t)) { 13532 dtrace_dof_error(dof, "bad alignment in ECB description"); 13533 return (NULL); 13534 } 13535 13536 ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset); 13537 sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes); 13538 13539 if (sec == NULL) 13540 return (NULL); 13541 13542 ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 13543 ep->dted_uarg = ecb->dofe_uarg; 13544 desc = &ep->dted_probe; 13545 13546 if (dtrace_dof_probedesc(dof, sec, desc) == NULL) 13547 goto err; 13548 13549 if (ecb->dofe_pred != DOF_SECIDX_NONE) { 13550 if ((sec = dtrace_dof_sect(dof, 13551 DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL) 13552 goto err; 13553 13554 if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL) 13555 goto err; 13556 13557 ep->dted_pred.dtpdd_predicate = pred; 13558 } 13559 13560 if (ecb->dofe_actions != DOF_SECIDX_NONE) { 13561 if ((sec = dtrace_dof_sect(dof, 13562 DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL) 13563 goto err; 13564 13565 ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr); 13566 13567 if (ep->dted_action == NULL) 13568 goto err; 13569 } 13570 13571 return (ep); 13572 13573err: 13574 if (pred != NULL) 13575 dtrace_predicate_release(pred, vstate); 13576 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 13577 return (NULL); 13578} 13579 13580/* 13581 * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the 13582 * specified DOF. At present, this amounts to simply adding 'ubase' to the 13583 * site of any user SETX relocations to account for load object base address. 13584 * In the future, if we need other relocations, this function can be extended. 13585 */ 13586static int 13587dtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase) 13588{ 13589 uintptr_t daddr = (uintptr_t)dof; 13590 dof_relohdr_t *dofr = 13591 (dof_relohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 13592 dof_sec_t *ss, *rs, *ts; 13593 dof_relodesc_t *r; 13594 uint_t i, n; 13595 13596 if (sec->dofs_size < sizeof (dof_relohdr_t) || 13597 sec->dofs_align != sizeof (dof_secidx_t)) { 13598 dtrace_dof_error(dof, "invalid relocation header"); 13599 return (-1); 13600 } 13601 13602 ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab); 13603 rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec); 13604 ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec); 13605 13606 if (ss == NULL || rs == NULL || ts == NULL) 13607 return (-1); /* dtrace_dof_error() has been called already */ 13608 13609 if (rs->dofs_entsize < sizeof (dof_relodesc_t) || 13610 rs->dofs_align != sizeof (uint64_t)) { 13611 dtrace_dof_error(dof, "invalid relocation section"); 13612 return (-1); 13613 } 13614 13615 r = (dof_relodesc_t *)(uintptr_t)(daddr + rs->dofs_offset); 13616 n = rs->dofs_size / rs->dofs_entsize; 13617 13618 for (i = 0; i < n; i++) { 13619 uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset; 13620 13621 switch (r->dofr_type) { 13622 case DOF_RELO_NONE: 13623 break; 13624 case DOF_RELO_SETX: 13625 if (r->dofr_offset >= ts->dofs_size || r->dofr_offset + 13626 sizeof (uint64_t) > ts->dofs_size) { 13627 dtrace_dof_error(dof, "bad relocation offset"); 13628 return (-1); 13629 } 13630 13631 if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) { 13632 dtrace_dof_error(dof, "misaligned setx relo"); 13633 return (-1); 13634 } 13635 13636 *(uint64_t *)taddr += ubase; 13637 break; 13638 default: 13639 dtrace_dof_error(dof, "invalid relocation type"); 13640 return (-1); 13641 } 13642 13643 r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize); 13644 } 13645 13646 return (0); 13647} 13648 13649/* 13650 * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated 13651 * header: it should be at the front of a memory region that is at least 13652 * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in 13653 * size. It need not be validated in any other way. 13654 */ 13655static int 13656dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr, 13657 dtrace_enabling_t **enabp, uint64_t ubase, int noprobes) 13658{ 13659 uint64_t len = dof->dofh_loadsz, seclen; 13660 uintptr_t daddr = (uintptr_t)dof; 13661 dtrace_ecbdesc_t *ep; 13662 dtrace_enabling_t *enab; 13663 uint_t i; 13664 13665 ASSERT(MUTEX_HELD(&dtrace_lock)); 13666 ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t)); 13667 13668 /* 13669 * Check the DOF header identification bytes. In addition to checking 13670 * valid settings, we also verify that unused bits/bytes are zeroed so 13671 * we can use them later without fear of regressing existing binaries. 13672 */ 13673 if (bcmp(&dof->dofh_ident[DOF_ID_MAG0], 13674 DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) { 13675 dtrace_dof_error(dof, "DOF magic string mismatch"); 13676 return (-1); 13677 } 13678 13679 if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 && 13680 dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) { 13681 dtrace_dof_error(dof, "DOF has invalid data model"); 13682 return (-1); 13683 } 13684 13685 if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) { 13686 dtrace_dof_error(dof, "DOF encoding mismatch"); 13687 return (-1); 13688 } 13689 13690 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 13691 dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_2) { 13692 dtrace_dof_error(dof, "DOF version mismatch"); 13693 return (-1); 13694 } 13695 13696 if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) { 13697 dtrace_dof_error(dof, "DOF uses unsupported instruction set"); 13698 return (-1); 13699 } 13700 13701 if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) { 13702 dtrace_dof_error(dof, "DOF uses too many integer registers"); 13703 return (-1); 13704 } 13705 13706 if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) { 13707 dtrace_dof_error(dof, "DOF uses too many tuple registers"); 13708 return (-1); 13709 } 13710 13711 for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) { 13712 if (dof->dofh_ident[i] != 0) { 13713 dtrace_dof_error(dof, "DOF has invalid ident byte set"); 13714 return (-1); 13715 } 13716 } 13717 13718 if (dof->dofh_flags & ~DOF_FL_VALID) { 13719 dtrace_dof_error(dof, "DOF has invalid flag bits set"); 13720 return (-1); 13721 } 13722 13723 if (dof->dofh_secsize == 0) { 13724 dtrace_dof_error(dof, "zero section header size"); 13725 return (-1); 13726 } 13727 13728 /* 13729 * Check that the section headers don't exceed the amount of DOF 13730 * data. Note that we cast the section size and number of sections 13731 * to uint64_t's to prevent possible overflow in the multiplication. 13732 */ 13733 seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize; 13734 13735 if (dof->dofh_secoff > len || seclen > len || 13736 dof->dofh_secoff + seclen > len) { 13737 dtrace_dof_error(dof, "truncated section headers"); 13738 return (-1); 13739 } 13740 13741 if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) { 13742 dtrace_dof_error(dof, "misaligned section headers"); 13743 return (-1); 13744 } 13745 13746 if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) { 13747 dtrace_dof_error(dof, "misaligned section size"); 13748 return (-1); 13749 } 13750 13751 /* 13752 * Take an initial pass through the section headers to be sure that 13753 * the headers don't have stray offsets. If the 'noprobes' flag is 13754 * set, do not permit sections relating to providers, probes, or args. 13755 */ 13756 for (i = 0; i < dof->dofh_secnum; i++) { 13757 dof_sec_t *sec = (dof_sec_t *)(daddr + 13758 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 13759 13760 if (noprobes) { 13761 switch (sec->dofs_type) { 13762 case DOF_SECT_PROVIDER: 13763 case DOF_SECT_PROBES: 13764 case DOF_SECT_PRARGS: 13765 case DOF_SECT_PROFFS: 13766 dtrace_dof_error(dof, "illegal sections " 13767 "for enabling"); 13768 return (-1); 13769 } 13770 } 13771 13772 if (DOF_SEC_ISLOADABLE(sec->dofs_type) && 13773 !(sec->dofs_flags & DOF_SECF_LOAD)) { 13774 dtrace_dof_error(dof, "loadable section with load " 13775 "flag unset"); 13776 return (-1); 13777 } 13778 13779 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 13780 continue; /* just ignore non-loadable sections */ 13781 13782 if (sec->dofs_align & (sec->dofs_align - 1)) { 13783 dtrace_dof_error(dof, "bad section alignment"); 13784 return (-1); 13785 } 13786 13787 if (sec->dofs_offset & (sec->dofs_align - 1)) { 13788 dtrace_dof_error(dof, "misaligned section"); 13789 return (-1); 13790 } 13791 13792 if (sec->dofs_offset > len || sec->dofs_size > len || 13793 sec->dofs_offset + sec->dofs_size > len) { 13794 dtrace_dof_error(dof, "corrupt section header"); 13795 return (-1); 13796 } 13797 13798 if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr + 13799 sec->dofs_offset + sec->dofs_size - 1) != '\0') { 13800 dtrace_dof_error(dof, "non-terminating string table"); 13801 return (-1); 13802 } 13803 } 13804 13805 /* 13806 * Take a second pass through the sections and locate and perform any 13807 * relocations that are present. We do this after the first pass to 13808 * be sure that all sections have had their headers validated. 13809 */ 13810 for (i = 0; i < dof->dofh_secnum; i++) { 13811 dof_sec_t *sec = (dof_sec_t *)(daddr + 13812 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 13813 13814 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 13815 continue; /* skip sections that are not loadable */ 13816 13817 switch (sec->dofs_type) { 13818 case DOF_SECT_URELHDR: 13819 if (dtrace_dof_relocate(dof, sec, ubase) != 0) 13820 return (-1); 13821 break; 13822 } 13823 } 13824 13825 if ((enab = *enabp) == NULL) 13826 enab = *enabp = dtrace_enabling_create(vstate); 13827 13828 for (i = 0; i < dof->dofh_secnum; i++) { 13829 dof_sec_t *sec = (dof_sec_t *)(daddr + 13830 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 13831 13832 if (sec->dofs_type != DOF_SECT_ECBDESC) 13833 continue; 13834 13835 if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) { 13836 dtrace_enabling_destroy(enab); 13837 *enabp = NULL; 13838 return (-1); 13839 } 13840 13841 dtrace_enabling_add(enab, ep); 13842 } 13843 13844 return (0); 13845} 13846 13847/* 13848 * Process DOF for any options. This routine assumes that the DOF has been 13849 * at least processed by dtrace_dof_slurp(). 13850 */ 13851static int 13852dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state) 13853{ 13854 int i, rval; 13855 uint32_t entsize; 13856 size_t offs; 13857 dof_optdesc_t *desc; 13858 13859 for (i = 0; i < dof->dofh_secnum; i++) { 13860 dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof + 13861 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 13862 13863 if (sec->dofs_type != DOF_SECT_OPTDESC) 13864 continue; 13865 13866 if (sec->dofs_align != sizeof (uint64_t)) { 13867 dtrace_dof_error(dof, "bad alignment in " 13868 "option description"); 13869 return (EINVAL); 13870 } 13871 13872 if ((entsize = sec->dofs_entsize) == 0) { 13873 dtrace_dof_error(dof, "zeroed option entry size"); 13874 return (EINVAL); 13875 } 13876 13877 if (entsize < sizeof (dof_optdesc_t)) { 13878 dtrace_dof_error(dof, "bad option entry size"); 13879 return (EINVAL); 13880 } 13881 13882 for (offs = 0; offs < sec->dofs_size; offs += entsize) { 13883 desc = (dof_optdesc_t *)((uintptr_t)dof + 13884 (uintptr_t)sec->dofs_offset + offs); 13885 13886 if (desc->dofo_strtab != DOF_SECIDX_NONE) { 13887 dtrace_dof_error(dof, "non-zero option string"); 13888 return (EINVAL); 13889 } 13890 13891 if (desc->dofo_value == DTRACEOPT_UNSET) { 13892 dtrace_dof_error(dof, "unset option"); 13893 return (EINVAL); 13894 } 13895 13896 if ((rval = dtrace_state_option(state, 13897 desc->dofo_option, desc->dofo_value)) != 0) { 13898 dtrace_dof_error(dof, "rejected option"); 13899 return (rval); 13900 } 13901 } 13902 } 13903 13904 return (0); 13905} 13906 13907/* 13908 * DTrace Consumer State Functions 13909 */ 13910static int 13911dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size) 13912{ 13913 size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize; 13914 void *base; 13915 uintptr_t limit; 13916 dtrace_dynvar_t *dvar, *next, *start; 13917 int i; 13918 13919 ASSERT(MUTEX_HELD(&dtrace_lock)); 13920 ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL); 13921 13922 bzero(dstate, sizeof (dtrace_dstate_t)); 13923 13924 if ((dstate->dtds_chunksize = chunksize) == 0) 13925 dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE; 13926 13927 if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t))) 13928 size = min; 13929 13930 if ((base = kmem_zalloc(size, KM_NOSLEEP | KM_NORMALPRI)) == NULL) 13931 return (ENOMEM); 13932 13933 dstate->dtds_size = size; 13934 dstate->dtds_base = base; 13935 dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP); 13936 bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t)); 13937 13938 hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)); 13939 13940 if (hashsize != 1 && (hashsize & 1)) 13941 hashsize--; 13942 13943 dstate->dtds_hashsize = hashsize; 13944 dstate->dtds_hash = dstate->dtds_base; 13945 13946 /* 13947 * Set all of our hash buckets to point to the single sink, and (if 13948 * it hasn't already been set), set the sink's hash value to be the 13949 * sink sentinel value. The sink is needed for dynamic variable 13950 * lookups to know that they have iterated over an entire, valid hash 13951 * chain. 13952 */ 13953 for (i = 0; i < hashsize; i++) 13954 dstate->dtds_hash[i].dtdh_chain = &dtrace_dynhash_sink; 13955 13956 if (dtrace_dynhash_sink.dtdv_hashval != DTRACE_DYNHASH_SINK) 13957 dtrace_dynhash_sink.dtdv_hashval = DTRACE_DYNHASH_SINK; 13958 13959 /* 13960 * Determine number of active CPUs. Divide free list evenly among 13961 * active CPUs. 13962 */ 13963 start = (dtrace_dynvar_t *) 13964 ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t)); 13965 limit = (uintptr_t)base + size; 13966 13967 maxper = (limit - (uintptr_t)start) / NCPU; 13968 maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize; 13969 13970#if !defined(sun) 13971 CPU_FOREACH(i) { 13972#else 13973 for (i = 0; i < NCPU; i++) { 13974#endif 13975 dstate->dtds_percpu[i].dtdsc_free = dvar = start; 13976 13977 /* 13978 * If we don't even have enough chunks to make it once through 13979 * NCPUs, we're just going to allocate everything to the first 13980 * CPU. And if we're on the last CPU, we're going to allocate 13981 * whatever is left over. In either case, we set the limit to 13982 * be the limit of the dynamic variable space. 13983 */ 13984 if (maxper == 0 || i == NCPU - 1) { 13985 limit = (uintptr_t)base + size; 13986 start = NULL; 13987 } else { 13988 limit = (uintptr_t)start + maxper; 13989 start = (dtrace_dynvar_t *)limit; 13990 } 13991 13992 ASSERT(limit <= (uintptr_t)base + size); 13993 13994 for (;;) { 13995 next = (dtrace_dynvar_t *)((uintptr_t)dvar + 13996 dstate->dtds_chunksize); 13997 13998 if ((uintptr_t)next + dstate->dtds_chunksize >= limit) 13999 break; 14000 14001 dvar->dtdv_next = next; 14002 dvar = next; 14003 } 14004 14005 if (maxper == 0) 14006 break; 14007 } 14008 14009 return (0); 14010} 14011 14012static void 14013dtrace_dstate_fini(dtrace_dstate_t *dstate) 14014{ 14015 ASSERT(MUTEX_HELD(&cpu_lock)); 14016 14017 if (dstate->dtds_base == NULL) 14018 return; 14019 14020 kmem_free(dstate->dtds_base, dstate->dtds_size); 14021 kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu); 14022} 14023 14024static void 14025dtrace_vstate_fini(dtrace_vstate_t *vstate) 14026{ 14027 /* 14028 * Logical XOR, where are you? 14029 */ 14030 ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL)); 14031 14032 if (vstate->dtvs_nglobals > 0) { 14033 kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals * 14034 sizeof (dtrace_statvar_t *)); 14035 } 14036 14037 if (vstate->dtvs_ntlocals > 0) { 14038 kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals * 14039 sizeof (dtrace_difv_t)); 14040 } 14041 14042 ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL)); 14043 14044 if (vstate->dtvs_nlocals > 0) { 14045 kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals * 14046 sizeof (dtrace_statvar_t *)); 14047 } 14048} 14049 14050#if defined(sun) 14051static void 14052dtrace_state_clean(dtrace_state_t *state) 14053{ 14054 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) 14055 return; 14056 14057 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); 14058 dtrace_speculation_clean(state); 14059} 14060 14061static void 14062dtrace_state_deadman(dtrace_state_t *state) 14063{ 14064 hrtime_t now; 14065 14066 dtrace_sync(); 14067 14068 now = dtrace_gethrtime(); 14069 14070 if (state != dtrace_anon.dta_state && 14071 now - state->dts_laststatus >= dtrace_deadman_user) 14072 return; 14073 14074 /* 14075 * We must be sure that dts_alive never appears to be less than the 14076 * value upon entry to dtrace_state_deadman(), and because we lack a 14077 * dtrace_cas64(), we cannot store to it atomically. We thus instead 14078 * store INT64_MAX to it, followed by a memory barrier, followed by 14079 * the new value. This assures that dts_alive never appears to be 14080 * less than its true value, regardless of the order in which the 14081 * stores to the underlying storage are issued. 14082 */ 14083 state->dts_alive = INT64_MAX; 14084 dtrace_membar_producer(); 14085 state->dts_alive = now; 14086} 14087#else 14088static void 14089dtrace_state_clean(void *arg) 14090{ 14091 dtrace_state_t *state = arg; 14092 dtrace_optval_t *opt = state->dts_options; 14093 14094 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) 14095 return; 14096 14097 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); 14098 dtrace_speculation_clean(state); 14099 14100 callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC, 14101 dtrace_state_clean, state); 14102} 14103 14104static void 14105dtrace_state_deadman(void *arg) 14106{ 14107 dtrace_state_t *state = arg; 14108 hrtime_t now; 14109 14110 dtrace_sync(); 14111 14112 dtrace_debug_output(); 14113 14114 now = dtrace_gethrtime(); 14115 14116 if (state != dtrace_anon.dta_state && 14117 now - state->dts_laststatus >= dtrace_deadman_user) 14118 return; 14119 14120 /* 14121 * We must be sure that dts_alive never appears to be less than the 14122 * value upon entry to dtrace_state_deadman(), and because we lack a 14123 * dtrace_cas64(), we cannot store to it atomically. We thus instead 14124 * store INT64_MAX to it, followed by a memory barrier, followed by 14125 * the new value. This assures that dts_alive never appears to be 14126 * less than its true value, regardless of the order in which the 14127 * stores to the underlying storage are issued. 14128 */ 14129 state->dts_alive = INT64_MAX; 14130 dtrace_membar_producer(); 14131 state->dts_alive = now; 14132 14133 callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC, 14134 dtrace_state_deadman, state); 14135} 14136#endif 14137 14138static dtrace_state_t * 14139#if defined(sun) 14140dtrace_state_create(dev_t *devp, cred_t *cr) 14141#else 14142dtrace_state_create(struct cdev *dev) 14143#endif 14144{ 14145#if defined(sun) 14146 minor_t minor; 14147 major_t major; 14148#else 14149 cred_t *cr = NULL; 14150 int m = 0; 14151#endif 14152 char c[30]; 14153 dtrace_state_t *state; 14154 dtrace_optval_t *opt; 14155 int bufsize = NCPU * sizeof (dtrace_buffer_t), i; 14156 14157 ASSERT(MUTEX_HELD(&dtrace_lock)); 14158 ASSERT(MUTEX_HELD(&cpu_lock)); 14159 14160#if defined(sun) 14161 minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1, 14162 VM_BESTFIT | VM_SLEEP); 14163 14164 if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) { 14165 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 14166 return (NULL); 14167 } 14168 14169 state = ddi_get_soft_state(dtrace_softstate, minor); 14170#else 14171 if (dev != NULL) { 14172 cr = dev->si_cred; 14173 m = dev2unit(dev); 14174 } 14175 14176 /* Allocate memory for the state. */ 14177 state = kmem_zalloc(sizeof(dtrace_state_t), KM_SLEEP); 14178#endif 14179 14180 state->dts_epid = DTRACE_EPIDNONE + 1; 14181 14182 (void) snprintf(c, sizeof (c), "dtrace_aggid_%d", m); 14183#if defined(sun) 14184 state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1, 14185 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 14186 14187 if (devp != NULL) { 14188 major = getemajor(*devp); 14189 } else { 14190 major = ddi_driver_major(dtrace_devi); 14191 } 14192 14193 state->dts_dev = makedevice(major, minor); 14194 14195 if (devp != NULL) 14196 *devp = state->dts_dev; 14197#else 14198 state->dts_aggid_arena = new_unrhdr(1, INT_MAX, &dtrace_unr_mtx); 14199 state->dts_dev = dev; 14200#endif 14201 14202 /* 14203 * We allocate NCPU buffers. On the one hand, this can be quite 14204 * a bit of memory per instance (nearly 36K on a Starcat). On the 14205 * other hand, it saves an additional memory reference in the probe 14206 * path. 14207 */ 14208 state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP); 14209 state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP); 14210 14211#if defined(sun) 14212 state->dts_cleaner = CYCLIC_NONE; 14213 state->dts_deadman = CYCLIC_NONE; 14214#else 14215 callout_init(&state->dts_cleaner, CALLOUT_MPSAFE); 14216 callout_init(&state->dts_deadman, CALLOUT_MPSAFE); 14217#endif 14218 state->dts_vstate.dtvs_state = state; 14219 14220 for (i = 0; i < DTRACEOPT_MAX; i++) 14221 state->dts_options[i] = DTRACEOPT_UNSET; 14222 14223 /* 14224 * Set the default options. 14225 */ 14226 opt = state->dts_options; 14227 opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH; 14228 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO; 14229 opt[DTRACEOPT_NSPEC] = dtrace_nspec_default; 14230 opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default; 14231 opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL; 14232 opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default; 14233 opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default; 14234 opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default; 14235 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default; 14236 opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default; 14237 opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default; 14238 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default; 14239 opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default; 14240 opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default; 14241 14242 state->dts_activity = DTRACE_ACTIVITY_INACTIVE; 14243 14244 /* 14245 * Depending on the user credentials, we set flag bits which alter probe 14246 * visibility or the amount of destructiveness allowed. In the case of 14247 * actual anonymous tracing, or the possession of all privileges, all of 14248 * the normal checks are bypassed. 14249 */ 14250 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 14251 state->dts_cred.dcr_visible = DTRACE_CRV_ALL; 14252 state->dts_cred.dcr_action = DTRACE_CRA_ALL; 14253 } else { 14254 /* 14255 * Set up the credentials for this instantiation. We take a 14256 * hold on the credential to prevent it from disappearing on 14257 * us; this in turn prevents the zone_t referenced by this 14258 * credential from disappearing. This means that we can 14259 * examine the credential and the zone from probe context. 14260 */ 14261 crhold(cr); 14262 state->dts_cred.dcr_cred = cr; 14263 14264 /* 14265 * CRA_PROC means "we have *some* privilege for dtrace" and 14266 * unlocks the use of variables like pid, zonename, etc. 14267 */ 14268 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) || 14269 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 14270 state->dts_cred.dcr_action |= DTRACE_CRA_PROC; 14271 } 14272 14273 /* 14274 * dtrace_user allows use of syscall and profile providers. 14275 * If the user also has proc_owner and/or proc_zone, we 14276 * extend the scope to include additional visibility and 14277 * destructive power. 14278 */ 14279 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) { 14280 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) { 14281 state->dts_cred.dcr_visible |= 14282 DTRACE_CRV_ALLPROC; 14283 14284 state->dts_cred.dcr_action |= 14285 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 14286 } 14287 14288 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) { 14289 state->dts_cred.dcr_visible |= 14290 DTRACE_CRV_ALLZONE; 14291 14292 state->dts_cred.dcr_action |= 14293 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 14294 } 14295 14296 /* 14297 * If we have all privs in whatever zone this is, 14298 * we can do destructive things to processes which 14299 * have altered credentials. 14300 */ 14301#if defined(sun) 14302 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 14303 cr->cr_zone->zone_privset)) { 14304 state->dts_cred.dcr_action |= 14305 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 14306 } 14307#endif 14308 } 14309 14310 /* 14311 * Holding the dtrace_kernel privilege also implies that 14312 * the user has the dtrace_user privilege from a visibility 14313 * perspective. But without further privileges, some 14314 * destructive actions are not available. 14315 */ 14316 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) { 14317 /* 14318 * Make all probes in all zones visible. However, 14319 * this doesn't mean that all actions become available 14320 * to all zones. 14321 */ 14322 state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL | 14323 DTRACE_CRV_ALLPROC | DTRACE_CRV_ALLZONE; 14324 14325 state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL | 14326 DTRACE_CRA_PROC; 14327 /* 14328 * Holding proc_owner means that destructive actions 14329 * for *this* zone are allowed. 14330 */ 14331 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 14332 state->dts_cred.dcr_action |= 14333 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 14334 14335 /* 14336 * Holding proc_zone means that destructive actions 14337 * for this user/group ID in all zones is allowed. 14338 */ 14339 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 14340 state->dts_cred.dcr_action |= 14341 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 14342 14343#if defined(sun) 14344 /* 14345 * If we have all privs in whatever zone this is, 14346 * we can do destructive things to processes which 14347 * have altered credentials. 14348 */ 14349 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 14350 cr->cr_zone->zone_privset)) { 14351 state->dts_cred.dcr_action |= 14352 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 14353 } 14354#endif 14355 } 14356 14357 /* 14358 * Holding the dtrace_proc privilege gives control over fasttrap 14359 * and pid providers. We need to grant wider destructive 14360 * privileges in the event that the user has proc_owner and/or 14361 * proc_zone. 14362 */ 14363 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 14364 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 14365 state->dts_cred.dcr_action |= 14366 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 14367 14368 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 14369 state->dts_cred.dcr_action |= 14370 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 14371 } 14372 } 14373 14374 return (state); 14375} 14376 14377static int 14378dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which) 14379{ 14380 dtrace_optval_t *opt = state->dts_options, size; 14381 processorid_t cpu = 0;; 14382 int flags = 0, rval, factor, divisor = 1; 14383 14384 ASSERT(MUTEX_HELD(&dtrace_lock)); 14385 ASSERT(MUTEX_HELD(&cpu_lock)); 14386 ASSERT(which < DTRACEOPT_MAX); 14387 ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE || 14388 (state == dtrace_anon.dta_state && 14389 state->dts_activity == DTRACE_ACTIVITY_ACTIVE)); 14390 14391 if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0) 14392 return (0); 14393 14394 if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET) 14395 cpu = opt[DTRACEOPT_CPU]; 14396 14397 if (which == DTRACEOPT_SPECSIZE) 14398 flags |= DTRACEBUF_NOSWITCH; 14399 14400 if (which == DTRACEOPT_BUFSIZE) { 14401 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING) 14402 flags |= DTRACEBUF_RING; 14403 14404 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL) 14405 flags |= DTRACEBUF_FILL; 14406 14407 if (state != dtrace_anon.dta_state || 14408 state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 14409 flags |= DTRACEBUF_INACTIVE; 14410 } 14411 14412 for (size = opt[which]; size >= sizeof (uint64_t); size /= divisor) { 14413 /* 14414 * The size must be 8-byte aligned. If the size is not 8-byte 14415 * aligned, drop it down by the difference. 14416 */ 14417 if (size & (sizeof (uint64_t) - 1)) 14418 size -= size & (sizeof (uint64_t) - 1); 14419 14420 if (size < state->dts_reserve) { 14421 /* 14422 * Buffers always must be large enough to accommodate 14423 * their prereserved space. We return E2BIG instead 14424 * of ENOMEM in this case to allow for user-level 14425 * software to differentiate the cases. 14426 */ 14427 return (E2BIG); 14428 } 14429 14430 rval = dtrace_buffer_alloc(buf, size, flags, cpu, &factor); 14431 14432 if (rval != ENOMEM) { 14433 opt[which] = size; 14434 return (rval); 14435 } 14436 14437 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 14438 return (rval); 14439 14440 for (divisor = 2; divisor < factor; divisor <<= 1) 14441 continue; 14442 } 14443 14444 return (ENOMEM); 14445} 14446 14447static int 14448dtrace_state_buffers(dtrace_state_t *state) 14449{ 14450 dtrace_speculation_t *spec = state->dts_speculations; 14451 int rval, i; 14452 14453 if ((rval = dtrace_state_buffer(state, state->dts_buffer, 14454 DTRACEOPT_BUFSIZE)) != 0) 14455 return (rval); 14456 14457 if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer, 14458 DTRACEOPT_AGGSIZE)) != 0) 14459 return (rval); 14460 14461 for (i = 0; i < state->dts_nspeculations; i++) { 14462 if ((rval = dtrace_state_buffer(state, 14463 spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0) 14464 return (rval); 14465 } 14466 14467 return (0); 14468} 14469 14470static void 14471dtrace_state_prereserve(dtrace_state_t *state) 14472{ 14473 dtrace_ecb_t *ecb; 14474 dtrace_probe_t *probe; 14475 14476 state->dts_reserve = 0; 14477 14478 if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL) 14479 return; 14480 14481 /* 14482 * If our buffer policy is a "fill" buffer policy, we need to set the 14483 * prereserved space to be the space required by the END probes. 14484 */ 14485 probe = dtrace_probes[dtrace_probeid_end - 1]; 14486 ASSERT(probe != NULL); 14487 14488 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 14489 if (ecb->dte_state != state) 14490 continue; 14491 14492 state->dts_reserve += ecb->dte_needed + ecb->dte_alignment; 14493 } 14494} 14495 14496static int 14497dtrace_state_go(dtrace_state_t *state, processorid_t *cpu) 14498{ 14499 dtrace_optval_t *opt = state->dts_options, sz, nspec; 14500 dtrace_speculation_t *spec; 14501 dtrace_buffer_t *buf; 14502#if defined(sun) 14503 cyc_handler_t hdlr; 14504 cyc_time_t when; 14505#endif 14506 int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t); 14507 dtrace_icookie_t cookie; 14508 14509 mutex_enter(&cpu_lock); 14510 mutex_enter(&dtrace_lock); 14511 14512 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 14513 rval = EBUSY; 14514 goto out; 14515 } 14516 14517 /* 14518 * Before we can perform any checks, we must prime all of the 14519 * retained enablings that correspond to this state. 14520 */ 14521 dtrace_enabling_prime(state); 14522 14523 if (state->dts_destructive && !state->dts_cred.dcr_destructive) { 14524 rval = EACCES; 14525 goto out; 14526 } 14527 14528 dtrace_state_prereserve(state); 14529 14530 /* 14531 * Now we want to do is try to allocate our speculations. 14532 * We do not automatically resize the number of speculations; if 14533 * this fails, we will fail the operation. 14534 */ 14535 nspec = opt[DTRACEOPT_NSPEC]; 14536 ASSERT(nspec != DTRACEOPT_UNSET); 14537 14538 if (nspec > INT_MAX) { 14539 rval = ENOMEM; 14540 goto out; 14541 } 14542 14543 spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t), 14544 KM_NOSLEEP | KM_NORMALPRI); 14545 14546 if (spec == NULL) { 14547 rval = ENOMEM; 14548 goto out; 14549 } 14550 14551 state->dts_speculations = spec; 14552 state->dts_nspeculations = (int)nspec; 14553 14554 for (i = 0; i < nspec; i++) { 14555 if ((buf = kmem_zalloc(bufsize, 14556 KM_NOSLEEP | KM_NORMALPRI)) == NULL) { 14557 rval = ENOMEM; 14558 goto err; 14559 } 14560 14561 spec[i].dtsp_buffer = buf; 14562 } 14563 14564 if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) { 14565 if (dtrace_anon.dta_state == NULL) { 14566 rval = ENOENT; 14567 goto out; 14568 } 14569 14570 if (state->dts_necbs != 0) { 14571 rval = EALREADY; 14572 goto out; 14573 } 14574 14575 state->dts_anon = dtrace_anon_grab(); 14576 ASSERT(state->dts_anon != NULL); 14577 state = state->dts_anon; 14578 14579 /* 14580 * We want "grabanon" to be set in the grabbed state, so we'll 14581 * copy that option value from the grabbing state into the 14582 * grabbed state. 14583 */ 14584 state->dts_options[DTRACEOPT_GRABANON] = 14585 opt[DTRACEOPT_GRABANON]; 14586 14587 *cpu = dtrace_anon.dta_beganon; 14588 14589 /* 14590 * If the anonymous state is active (as it almost certainly 14591 * is if the anonymous enabling ultimately matched anything), 14592 * we don't allow any further option processing -- but we 14593 * don't return failure. 14594 */ 14595 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 14596 goto out; 14597 } 14598 14599 if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET && 14600 opt[DTRACEOPT_AGGSIZE] != 0) { 14601 if (state->dts_aggregations == NULL) { 14602 /* 14603 * We're not going to create an aggregation buffer 14604 * because we don't have any ECBs that contain 14605 * aggregations -- set this option to 0. 14606 */ 14607 opt[DTRACEOPT_AGGSIZE] = 0; 14608 } else { 14609 /* 14610 * If we have an aggregation buffer, we must also have 14611 * a buffer to use as scratch. 14612 */ 14613 if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET || 14614 opt[DTRACEOPT_BUFSIZE] < state->dts_needed) { 14615 opt[DTRACEOPT_BUFSIZE] = state->dts_needed; 14616 } 14617 } 14618 } 14619 14620 if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET && 14621 opt[DTRACEOPT_SPECSIZE] != 0) { 14622 if (!state->dts_speculates) { 14623 /* 14624 * We're not going to create speculation buffers 14625 * because we don't have any ECBs that actually 14626 * speculate -- set the speculation size to 0. 14627 */ 14628 opt[DTRACEOPT_SPECSIZE] = 0; 14629 } 14630 } 14631 14632 /* 14633 * The bare minimum size for any buffer that we're actually going to 14634 * do anything to is sizeof (uint64_t). 14635 */ 14636 sz = sizeof (uint64_t); 14637 14638 if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) || 14639 (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) || 14640 (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) { 14641 /* 14642 * A buffer size has been explicitly set to 0 (or to a size 14643 * that will be adjusted to 0) and we need the space -- we 14644 * need to return failure. We return ENOSPC to differentiate 14645 * it from failing to allocate a buffer due to failure to meet 14646 * the reserve (for which we return E2BIG). 14647 */ 14648 rval = ENOSPC; 14649 goto out; 14650 } 14651 14652 if ((rval = dtrace_state_buffers(state)) != 0) 14653 goto err; 14654 14655 if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET) 14656 sz = dtrace_dstate_defsize; 14657 14658 do { 14659 rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz); 14660 14661 if (rval == 0) 14662 break; 14663 14664 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 14665 goto err; 14666 } while (sz >>= 1); 14667 14668 opt[DTRACEOPT_DYNVARSIZE] = sz; 14669 14670 if (rval != 0) 14671 goto err; 14672 14673 if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max) 14674 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max; 14675 14676 if (opt[DTRACEOPT_CLEANRATE] == 0) 14677 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 14678 14679 if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min) 14680 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min; 14681 14682 if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max) 14683 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 14684 14685 state->dts_alive = state->dts_laststatus = dtrace_gethrtime(); 14686#if defined(sun) 14687 hdlr.cyh_func = (cyc_func_t)dtrace_state_clean; 14688 hdlr.cyh_arg = state; 14689 hdlr.cyh_level = CY_LOW_LEVEL; 14690 14691 when.cyt_when = 0; 14692 when.cyt_interval = opt[DTRACEOPT_CLEANRATE]; 14693 14694 state->dts_cleaner = cyclic_add(&hdlr, &when); 14695 14696 hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman; 14697 hdlr.cyh_arg = state; 14698 hdlr.cyh_level = CY_LOW_LEVEL; 14699 14700 when.cyt_when = 0; 14701 when.cyt_interval = dtrace_deadman_interval; 14702 14703 state->dts_deadman = cyclic_add(&hdlr, &when); 14704#else 14705 callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC, 14706 dtrace_state_clean, state); 14707 callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC, 14708 dtrace_state_deadman, state); 14709#endif 14710 14711 state->dts_activity = DTRACE_ACTIVITY_WARMUP; 14712 14713#if defined(sun) 14714 if (state->dts_getf != 0 && 14715 !(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)) { 14716 /* 14717 * We don't have kernel privs but we have at least one call 14718 * to getf(); we need to bump our zone's count, and (if 14719 * this is the first enabling to have an unprivileged call 14720 * to getf()) we need to hook into closef(). 14721 */ 14722 state->dts_cred.dcr_cred->cr_zone->zone_dtrace_getf++; 14723 14724 if (dtrace_getf++ == 0) { 14725 ASSERT(dtrace_closef == NULL); 14726 dtrace_closef = dtrace_getf_barrier; 14727 } 14728 } 14729#endif 14730 14731 /* 14732 * Now it's time to actually fire the BEGIN probe. We need to disable 14733 * interrupts here both to record the CPU on which we fired the BEGIN 14734 * probe (the data from this CPU will be processed first at user 14735 * level) and to manually activate the buffer for this CPU. 14736 */ 14737 cookie = dtrace_interrupt_disable(); 14738 *cpu = curcpu; 14739 ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE); 14740 state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE; 14741 14742 dtrace_probe(dtrace_probeid_begin, 14743 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 14744 dtrace_interrupt_enable(cookie); 14745 /* 14746 * We may have had an exit action from a BEGIN probe; only change our 14747 * state to ACTIVE if we're still in WARMUP. 14748 */ 14749 ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP || 14750 state->dts_activity == DTRACE_ACTIVITY_DRAINING); 14751 14752 if (state->dts_activity == DTRACE_ACTIVITY_WARMUP) 14753 state->dts_activity = DTRACE_ACTIVITY_ACTIVE; 14754 14755 /* 14756 * Regardless of whether or not now we're in ACTIVE or DRAINING, we 14757 * want each CPU to transition its principal buffer out of the 14758 * INACTIVE state. Doing this assures that no CPU will suddenly begin 14759 * processing an ECB halfway down a probe's ECB chain; all CPUs will 14760 * atomically transition from processing none of a state's ECBs to 14761 * processing all of them. 14762 */ 14763 dtrace_xcall(DTRACE_CPUALL, 14764 (dtrace_xcall_t)dtrace_buffer_activate, state); 14765 goto out; 14766 14767err: 14768 dtrace_buffer_free(state->dts_buffer); 14769 dtrace_buffer_free(state->dts_aggbuffer); 14770 14771 if ((nspec = state->dts_nspeculations) == 0) { 14772 ASSERT(state->dts_speculations == NULL); 14773 goto out; 14774 } 14775 14776 spec = state->dts_speculations; 14777 ASSERT(spec != NULL); 14778 14779 for (i = 0; i < state->dts_nspeculations; i++) { 14780 if ((buf = spec[i].dtsp_buffer) == NULL) 14781 break; 14782 14783 dtrace_buffer_free(buf); 14784 kmem_free(buf, bufsize); 14785 } 14786 14787 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 14788 state->dts_nspeculations = 0; 14789 state->dts_speculations = NULL; 14790 14791out: 14792 mutex_exit(&dtrace_lock); 14793 mutex_exit(&cpu_lock); 14794 14795 return (rval); 14796} 14797 14798static int 14799dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu) 14800{ 14801 dtrace_icookie_t cookie; 14802 14803 ASSERT(MUTEX_HELD(&dtrace_lock)); 14804 14805 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE && 14806 state->dts_activity != DTRACE_ACTIVITY_DRAINING) 14807 return (EINVAL); 14808 14809 /* 14810 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync 14811 * to be sure that every CPU has seen it. See below for the details 14812 * on why this is done. 14813 */ 14814 state->dts_activity = DTRACE_ACTIVITY_DRAINING; 14815 dtrace_sync(); 14816 14817 /* 14818 * By this point, it is impossible for any CPU to be still processing 14819 * with DTRACE_ACTIVITY_ACTIVE. We can thus set our activity to 14820 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any 14821 * other CPU in dtrace_buffer_reserve(). This allows dtrace_probe() 14822 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN 14823 * iff we're in the END probe. 14824 */ 14825 state->dts_activity = DTRACE_ACTIVITY_COOLDOWN; 14826 dtrace_sync(); 14827 ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN); 14828 14829 /* 14830 * Finally, we can release the reserve and call the END probe. We 14831 * disable interrupts across calling the END probe to allow us to 14832 * return the CPU on which we actually called the END probe. This 14833 * allows user-land to be sure that this CPU's principal buffer is 14834 * processed last. 14835 */ 14836 state->dts_reserve = 0; 14837 14838 cookie = dtrace_interrupt_disable(); 14839 *cpu = curcpu; 14840 dtrace_probe(dtrace_probeid_end, 14841 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 14842 dtrace_interrupt_enable(cookie); 14843 14844 state->dts_activity = DTRACE_ACTIVITY_STOPPED; 14845 dtrace_sync(); 14846 14847#if defined(sun) 14848 if (state->dts_getf != 0 && 14849 !(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)) { 14850 /* 14851 * We don't have kernel privs but we have at least one call 14852 * to getf(); we need to lower our zone's count, and (if 14853 * this is the last enabling to have an unprivileged call 14854 * to getf()) we need to clear the closef() hook. 14855 */ 14856 ASSERT(state->dts_cred.dcr_cred->cr_zone->zone_dtrace_getf > 0); 14857 ASSERT(dtrace_closef == dtrace_getf_barrier); 14858 ASSERT(dtrace_getf > 0); 14859 14860 state->dts_cred.dcr_cred->cr_zone->zone_dtrace_getf--; 14861 14862 if (--dtrace_getf == 0) 14863 dtrace_closef = NULL; 14864 } 14865#endif 14866 14867 return (0); 14868} 14869 14870static int 14871dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option, 14872 dtrace_optval_t val) 14873{ 14874 ASSERT(MUTEX_HELD(&dtrace_lock)); 14875 14876 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 14877 return (EBUSY); 14878 14879 if (option >= DTRACEOPT_MAX) 14880 return (EINVAL); 14881 14882 if (option != DTRACEOPT_CPU && val < 0) 14883 return (EINVAL); 14884 14885 switch (option) { 14886 case DTRACEOPT_DESTRUCTIVE: 14887 if (dtrace_destructive_disallow) 14888 return (EACCES); 14889 14890 state->dts_cred.dcr_destructive = 1; 14891 break; 14892 14893 case DTRACEOPT_BUFSIZE: 14894 case DTRACEOPT_DYNVARSIZE: 14895 case DTRACEOPT_AGGSIZE: 14896 case DTRACEOPT_SPECSIZE: 14897 case DTRACEOPT_STRSIZE: 14898 if (val < 0) 14899 return (EINVAL); 14900 14901 if (val >= LONG_MAX) { 14902 /* 14903 * If this is an otherwise negative value, set it to 14904 * the highest multiple of 128m less than LONG_MAX. 14905 * Technically, we're adjusting the size without 14906 * regard to the buffer resizing policy, but in fact, 14907 * this has no effect -- if we set the buffer size to 14908 * ~LONG_MAX and the buffer policy is ultimately set to 14909 * be "manual", the buffer allocation is guaranteed to 14910 * fail, if only because the allocation requires two 14911 * buffers. (We set the the size to the highest 14912 * multiple of 128m because it ensures that the size 14913 * will remain a multiple of a megabyte when 14914 * repeatedly halved -- all the way down to 15m.) 14915 */ 14916 val = LONG_MAX - (1 << 27) + 1; 14917 } 14918 } 14919 14920 state->dts_options[option] = val; 14921 14922 return (0); 14923} 14924 14925static void 14926dtrace_state_destroy(dtrace_state_t *state) 14927{ 14928 dtrace_ecb_t *ecb; 14929 dtrace_vstate_t *vstate = &state->dts_vstate; 14930#if defined(sun) 14931 minor_t minor = getminor(state->dts_dev); 14932#endif 14933 int i, bufsize = NCPU * sizeof (dtrace_buffer_t); 14934 dtrace_speculation_t *spec = state->dts_speculations; 14935 int nspec = state->dts_nspeculations; 14936 uint32_t match; 14937 14938 ASSERT(MUTEX_HELD(&dtrace_lock)); 14939 ASSERT(MUTEX_HELD(&cpu_lock)); 14940 14941 /* 14942 * First, retract any retained enablings for this state. 14943 */ 14944 dtrace_enabling_retract(state); 14945 ASSERT(state->dts_nretained == 0); 14946 14947 if (state->dts_activity == DTRACE_ACTIVITY_ACTIVE || 14948 state->dts_activity == DTRACE_ACTIVITY_DRAINING) { 14949 /* 14950 * We have managed to come into dtrace_state_destroy() on a 14951 * hot enabling -- almost certainly because of a disorderly 14952 * shutdown of a consumer. (That is, a consumer that is 14953 * exiting without having called dtrace_stop().) In this case, 14954 * we're going to set our activity to be KILLED, and then 14955 * issue a sync to be sure that everyone is out of probe 14956 * context before we start blowing away ECBs. 14957 */ 14958 state->dts_activity = DTRACE_ACTIVITY_KILLED; 14959 dtrace_sync(); 14960 } 14961 14962 /* 14963 * Release the credential hold we took in dtrace_state_create(). 14964 */ 14965 if (state->dts_cred.dcr_cred != NULL) 14966 crfree(state->dts_cred.dcr_cred); 14967 14968 /* 14969 * Now we can safely disable and destroy any enabled probes. Because 14970 * any DTRACE_PRIV_KERNEL probes may actually be slowing our progress 14971 * (especially if they're all enabled), we take two passes through the 14972 * ECBs: in the first, we disable just DTRACE_PRIV_KERNEL probes, and 14973 * in the second we disable whatever is left over. 14974 */ 14975 for (match = DTRACE_PRIV_KERNEL; ; match = 0) { 14976 for (i = 0; i < state->dts_necbs; i++) { 14977 if ((ecb = state->dts_ecbs[i]) == NULL) 14978 continue; 14979 14980 if (match && ecb->dte_probe != NULL) { 14981 dtrace_probe_t *probe = ecb->dte_probe; 14982 dtrace_provider_t *prov = probe->dtpr_provider; 14983 14984 if (!(prov->dtpv_priv.dtpp_flags & match)) 14985 continue; 14986 } 14987 14988 dtrace_ecb_disable(ecb); 14989 dtrace_ecb_destroy(ecb); 14990 } 14991 14992 if (!match) 14993 break; 14994 } 14995 14996 /* 14997 * Before we free the buffers, perform one more sync to assure that 14998 * every CPU is out of probe context. 14999 */ 15000 dtrace_sync(); 15001 15002 dtrace_buffer_free(state->dts_buffer); 15003 dtrace_buffer_free(state->dts_aggbuffer); 15004 15005 for (i = 0; i < nspec; i++) 15006 dtrace_buffer_free(spec[i].dtsp_buffer); 15007 15008#if defined(sun) 15009 if (state->dts_cleaner != CYCLIC_NONE) 15010 cyclic_remove(state->dts_cleaner); 15011 15012 if (state->dts_deadman != CYCLIC_NONE) 15013 cyclic_remove(state->dts_deadman); 15014#else 15015 callout_stop(&state->dts_cleaner); 15016 callout_drain(&state->dts_cleaner); 15017 callout_stop(&state->dts_deadman); 15018 callout_drain(&state->dts_deadman); 15019#endif 15020 15021 dtrace_dstate_fini(&vstate->dtvs_dynvars); 15022 dtrace_vstate_fini(vstate); 15023 if (state->dts_ecbs != NULL) 15024 kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *)); 15025 15026 if (state->dts_aggregations != NULL) { 15027#ifdef DEBUG 15028 for (i = 0; i < state->dts_naggregations; i++) 15029 ASSERT(state->dts_aggregations[i] == NULL); 15030#endif 15031 ASSERT(state->dts_naggregations > 0); 15032 kmem_free(state->dts_aggregations, 15033 state->dts_naggregations * sizeof (dtrace_aggregation_t *)); 15034 } 15035 15036 kmem_free(state->dts_buffer, bufsize); 15037 kmem_free(state->dts_aggbuffer, bufsize); 15038 15039 for (i = 0; i < nspec; i++) 15040 kmem_free(spec[i].dtsp_buffer, bufsize); 15041 15042 if (spec != NULL) 15043 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 15044 15045 dtrace_format_destroy(state); 15046 15047 if (state->dts_aggid_arena != NULL) { 15048#if defined(sun) 15049 vmem_destroy(state->dts_aggid_arena); 15050#else 15051 delete_unrhdr(state->dts_aggid_arena); 15052#endif 15053 state->dts_aggid_arena = NULL; 15054 } 15055#if defined(sun) 15056 ddi_soft_state_free(dtrace_softstate, minor); 15057 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 15058#endif 15059} 15060 15061/* 15062 * DTrace Anonymous Enabling Functions 15063 */ 15064static dtrace_state_t * 15065dtrace_anon_grab(void) 15066{ 15067 dtrace_state_t *state; 15068 15069 ASSERT(MUTEX_HELD(&dtrace_lock)); 15070 15071 if ((state = dtrace_anon.dta_state) == NULL) { 15072 ASSERT(dtrace_anon.dta_enabling == NULL); 15073 return (NULL); 15074 } 15075 15076 ASSERT(dtrace_anon.dta_enabling != NULL); 15077 ASSERT(dtrace_retained != NULL); 15078 15079 dtrace_enabling_destroy(dtrace_anon.dta_enabling); 15080 dtrace_anon.dta_enabling = NULL; 15081 dtrace_anon.dta_state = NULL; 15082 15083 return (state); 15084} 15085 15086static void 15087dtrace_anon_property(void) 15088{ 15089 int i, rv; 15090 dtrace_state_t *state; 15091 dof_hdr_t *dof; 15092 char c[32]; /* enough for "dof-data-" + digits */ 15093 15094 ASSERT(MUTEX_HELD(&dtrace_lock)); 15095 ASSERT(MUTEX_HELD(&cpu_lock)); 15096 15097 for (i = 0; ; i++) { 15098 (void) snprintf(c, sizeof (c), "dof-data-%d", i); 15099 15100 dtrace_err_verbose = 1; 15101 15102 if ((dof = dtrace_dof_property(c)) == NULL) { 15103 dtrace_err_verbose = 0; 15104 break; 15105 } 15106 15107#if defined(sun) 15108 /* 15109 * We want to create anonymous state, so we need to transition 15110 * the kernel debugger to indicate that DTrace is active. If 15111 * this fails (e.g. because the debugger has modified text in 15112 * some way), we won't continue with the processing. 15113 */ 15114 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 15115 cmn_err(CE_NOTE, "kernel debugger active; anonymous " 15116 "enabling ignored."); 15117 dtrace_dof_destroy(dof); 15118 break; 15119 } 15120#endif 15121 15122 /* 15123 * If we haven't allocated an anonymous state, we'll do so now. 15124 */ 15125 if ((state = dtrace_anon.dta_state) == NULL) { 15126#if defined(sun) 15127 state = dtrace_state_create(NULL, NULL); 15128#else 15129 state = dtrace_state_create(NULL); 15130#endif 15131 dtrace_anon.dta_state = state; 15132 15133 if (state == NULL) { 15134 /* 15135 * This basically shouldn't happen: the only 15136 * failure mode from dtrace_state_create() is a 15137 * failure of ddi_soft_state_zalloc() that 15138 * itself should never happen. Still, the 15139 * interface allows for a failure mode, and 15140 * we want to fail as gracefully as possible: 15141 * we'll emit an error message and cease 15142 * processing anonymous state in this case. 15143 */ 15144 cmn_err(CE_WARN, "failed to create " 15145 "anonymous state"); 15146 dtrace_dof_destroy(dof); 15147 break; 15148 } 15149 } 15150 15151 rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(), 15152 &dtrace_anon.dta_enabling, 0, B_TRUE); 15153 15154 if (rv == 0) 15155 rv = dtrace_dof_options(dof, state); 15156 15157 dtrace_err_verbose = 0; 15158 dtrace_dof_destroy(dof); 15159 15160 if (rv != 0) { 15161 /* 15162 * This is malformed DOF; chuck any anonymous state 15163 * that we created. 15164 */ 15165 ASSERT(dtrace_anon.dta_enabling == NULL); 15166 dtrace_state_destroy(state); 15167 dtrace_anon.dta_state = NULL; 15168 break; 15169 } 15170 15171 ASSERT(dtrace_anon.dta_enabling != NULL); 15172 } 15173 15174 if (dtrace_anon.dta_enabling != NULL) { 15175 int rval; 15176 15177 /* 15178 * dtrace_enabling_retain() can only fail because we are 15179 * trying to retain more enablings than are allowed -- but 15180 * we only have one anonymous enabling, and we are guaranteed 15181 * to be allowed at least one retained enabling; we assert 15182 * that dtrace_enabling_retain() returns success. 15183 */ 15184 rval = dtrace_enabling_retain(dtrace_anon.dta_enabling); 15185 ASSERT(rval == 0); 15186 15187 dtrace_enabling_dump(dtrace_anon.dta_enabling); 15188 } 15189} 15190 15191/* 15192 * DTrace Helper Functions 15193 */ 15194static void 15195dtrace_helper_trace(dtrace_helper_action_t *helper, 15196 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where) 15197{ 15198 uint32_t size, next, nnext, i; 15199 dtrace_helptrace_t *ent; 15200 uint16_t flags = cpu_core[curcpu].cpuc_dtrace_flags; 15201 15202 if (!dtrace_helptrace_enabled) 15203 return; 15204 15205 ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals); 15206 15207 /* 15208 * What would a tracing framework be without its own tracing 15209 * framework? (Well, a hell of a lot simpler, for starters...) 15210 */ 15211 size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals * 15212 sizeof (uint64_t) - sizeof (uint64_t); 15213 15214 /* 15215 * Iterate until we can allocate a slot in the trace buffer. 15216 */ 15217 do { 15218 next = dtrace_helptrace_next; 15219 15220 if (next + size < dtrace_helptrace_bufsize) { 15221 nnext = next + size; 15222 } else { 15223 nnext = size; 15224 } 15225 } while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next); 15226 15227 /* 15228 * We have our slot; fill it in. 15229 */ 15230 if (nnext == size) 15231 next = 0; 15232 15233 ent = (dtrace_helptrace_t *)&dtrace_helptrace_buffer[next]; 15234 ent->dtht_helper = helper; 15235 ent->dtht_where = where; 15236 ent->dtht_nlocals = vstate->dtvs_nlocals; 15237 15238 ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ? 15239 mstate->dtms_fltoffs : -1; 15240 ent->dtht_fault = DTRACE_FLAGS2FLT(flags); 15241 ent->dtht_illval = cpu_core[curcpu].cpuc_dtrace_illval; 15242 15243 for (i = 0; i < vstate->dtvs_nlocals; i++) { 15244 dtrace_statvar_t *svar; 15245 15246 if ((svar = vstate->dtvs_locals[i]) == NULL) 15247 continue; 15248 15249 ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t)); 15250 ent->dtht_locals[i] = 15251 ((uint64_t *)(uintptr_t)svar->dtsv_data)[curcpu]; 15252 } 15253} 15254 15255static uint64_t 15256dtrace_helper(int which, dtrace_mstate_t *mstate, 15257 dtrace_state_t *state, uint64_t arg0, uint64_t arg1) 15258{ 15259 uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 15260 uint64_t sarg0 = mstate->dtms_arg[0]; 15261 uint64_t sarg1 = mstate->dtms_arg[1]; 15262 uint64_t rval = 0; 15263 dtrace_helpers_t *helpers = curproc->p_dtrace_helpers; 15264 dtrace_helper_action_t *helper; 15265 dtrace_vstate_t *vstate; 15266 dtrace_difo_t *pred; 15267 int i, trace = dtrace_helptrace_enabled; 15268 15269 ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS); 15270 15271 if (helpers == NULL) 15272 return (0); 15273 15274 if ((helper = helpers->dthps_actions[which]) == NULL) 15275 return (0); 15276 15277 vstate = &helpers->dthps_vstate; 15278 mstate->dtms_arg[0] = arg0; 15279 mstate->dtms_arg[1] = arg1; 15280 15281 /* 15282 * Now iterate over each helper. If its predicate evaluates to 'true', 15283 * we'll call the corresponding actions. Note that the below calls 15284 * to dtrace_dif_emulate() may set faults in machine state. This is 15285 * okay: our caller (the outer dtrace_dif_emulate()) will simply plow 15286 * the stored DIF offset with its own (which is the desired behavior). 15287 * Also, note the calls to dtrace_dif_emulate() may allocate scratch 15288 * from machine state; this is okay, too. 15289 */ 15290 for (; helper != NULL; helper = helper->dtha_next) { 15291 if ((pred = helper->dtha_predicate) != NULL) { 15292 if (trace) 15293 dtrace_helper_trace(helper, mstate, vstate, 0); 15294 15295 if (!dtrace_dif_emulate(pred, mstate, vstate, state)) 15296 goto next; 15297 15298 if (*flags & CPU_DTRACE_FAULT) 15299 goto err; 15300 } 15301 15302 for (i = 0; i < helper->dtha_nactions; i++) { 15303 if (trace) 15304 dtrace_helper_trace(helper, 15305 mstate, vstate, i + 1); 15306 15307 rval = dtrace_dif_emulate(helper->dtha_actions[i], 15308 mstate, vstate, state); 15309 15310 if (*flags & CPU_DTRACE_FAULT) 15311 goto err; 15312 } 15313 15314next: 15315 if (trace) 15316 dtrace_helper_trace(helper, mstate, vstate, 15317 DTRACE_HELPTRACE_NEXT); 15318 } 15319 15320 if (trace) 15321 dtrace_helper_trace(helper, mstate, vstate, 15322 DTRACE_HELPTRACE_DONE); 15323 15324 /* 15325 * Restore the arg0 that we saved upon entry. 15326 */ 15327 mstate->dtms_arg[0] = sarg0; 15328 mstate->dtms_arg[1] = sarg1; 15329 15330 return (rval); 15331 15332err: 15333 if (trace) 15334 dtrace_helper_trace(helper, mstate, vstate, 15335 DTRACE_HELPTRACE_ERR); 15336 15337 /* 15338 * Restore the arg0 that we saved upon entry. 15339 */ 15340 mstate->dtms_arg[0] = sarg0; 15341 mstate->dtms_arg[1] = sarg1; 15342 15343 return (0); 15344} 15345 15346static void 15347dtrace_helper_action_destroy(dtrace_helper_action_t *helper, 15348 dtrace_vstate_t *vstate) 15349{ 15350 int i; 15351 15352 if (helper->dtha_predicate != NULL) 15353 dtrace_difo_release(helper->dtha_predicate, vstate); 15354 15355 for (i = 0; i < helper->dtha_nactions; i++) { 15356 ASSERT(helper->dtha_actions[i] != NULL); 15357 dtrace_difo_release(helper->dtha_actions[i], vstate); 15358 } 15359 15360 kmem_free(helper->dtha_actions, 15361 helper->dtha_nactions * sizeof (dtrace_difo_t *)); 15362 kmem_free(helper, sizeof (dtrace_helper_action_t)); 15363} 15364 15365static int 15366dtrace_helper_destroygen(int gen) 15367{ 15368 proc_t *p = curproc; 15369 dtrace_helpers_t *help = p->p_dtrace_helpers; 15370 dtrace_vstate_t *vstate; 15371 int i; 15372 15373 ASSERT(MUTEX_HELD(&dtrace_lock)); 15374 15375 if (help == NULL || gen > help->dthps_generation) 15376 return (EINVAL); 15377 15378 vstate = &help->dthps_vstate; 15379 15380 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 15381 dtrace_helper_action_t *last = NULL, *h, *next; 15382 15383 for (h = help->dthps_actions[i]; h != NULL; h = next) { 15384 next = h->dtha_next; 15385 15386 if (h->dtha_generation == gen) { 15387 if (last != NULL) { 15388 last->dtha_next = next; 15389 } else { 15390 help->dthps_actions[i] = next; 15391 } 15392 15393 dtrace_helper_action_destroy(h, vstate); 15394 } else { 15395 last = h; 15396 } 15397 } 15398 } 15399 15400 /* 15401 * Interate until we've cleared out all helper providers with the 15402 * given generation number. 15403 */ 15404 for (;;) { 15405 dtrace_helper_provider_t *prov; 15406 15407 /* 15408 * Look for a helper provider with the right generation. We 15409 * have to start back at the beginning of the list each time 15410 * because we drop dtrace_lock. It's unlikely that we'll make 15411 * more than two passes. 15412 */ 15413 for (i = 0; i < help->dthps_nprovs; i++) { 15414 prov = help->dthps_provs[i]; 15415 15416 if (prov->dthp_generation == gen) 15417 break; 15418 } 15419 15420 /* 15421 * If there were no matches, we're done. 15422 */ 15423 if (i == help->dthps_nprovs) 15424 break; 15425 15426 /* 15427 * Move the last helper provider into this slot. 15428 */ 15429 help->dthps_nprovs--; 15430 help->dthps_provs[i] = help->dthps_provs[help->dthps_nprovs]; 15431 help->dthps_provs[help->dthps_nprovs] = NULL; 15432 15433 mutex_exit(&dtrace_lock); 15434 15435 /* 15436 * If we have a meta provider, remove this helper provider. 15437 */ 15438 mutex_enter(&dtrace_meta_lock); 15439 if (dtrace_meta_pid != NULL) { 15440 ASSERT(dtrace_deferred_pid == NULL); 15441 dtrace_helper_provider_remove(&prov->dthp_prov, 15442 p->p_pid); 15443 } 15444 mutex_exit(&dtrace_meta_lock); 15445 15446 dtrace_helper_provider_destroy(prov); 15447 15448 mutex_enter(&dtrace_lock); 15449 } 15450 15451 return (0); 15452} 15453 15454static int 15455dtrace_helper_validate(dtrace_helper_action_t *helper) 15456{ 15457 int err = 0, i; 15458 dtrace_difo_t *dp; 15459 15460 if ((dp = helper->dtha_predicate) != NULL) 15461 err += dtrace_difo_validate_helper(dp); 15462 15463 for (i = 0; i < helper->dtha_nactions; i++) 15464 err += dtrace_difo_validate_helper(helper->dtha_actions[i]); 15465 15466 return (err == 0); 15467} 15468 15469static int 15470dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep) 15471{ 15472 dtrace_helpers_t *help; 15473 dtrace_helper_action_t *helper, *last; 15474 dtrace_actdesc_t *act; 15475 dtrace_vstate_t *vstate; 15476 dtrace_predicate_t *pred; 15477 int count = 0, nactions = 0, i; 15478 15479 if (which < 0 || which >= DTRACE_NHELPER_ACTIONS) 15480 return (EINVAL); 15481 15482 help = curproc->p_dtrace_helpers; 15483 last = help->dthps_actions[which]; 15484 vstate = &help->dthps_vstate; 15485 15486 for (count = 0; last != NULL; last = last->dtha_next) { 15487 count++; 15488 if (last->dtha_next == NULL) 15489 break; 15490 } 15491 15492 /* 15493 * If we already have dtrace_helper_actions_max helper actions for this 15494 * helper action type, we'll refuse to add a new one. 15495 */ 15496 if (count >= dtrace_helper_actions_max) 15497 return (ENOSPC); 15498 15499 helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP); 15500 helper->dtha_generation = help->dthps_generation; 15501 15502 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) { 15503 ASSERT(pred->dtp_difo != NULL); 15504 dtrace_difo_hold(pred->dtp_difo); 15505 helper->dtha_predicate = pred->dtp_difo; 15506 } 15507 15508 for (act = ep->dted_action; act != NULL; act = act->dtad_next) { 15509 if (act->dtad_kind != DTRACEACT_DIFEXPR) 15510 goto err; 15511 15512 if (act->dtad_difo == NULL) 15513 goto err; 15514 15515 nactions++; 15516 } 15517 15518 helper->dtha_actions = kmem_zalloc(sizeof (dtrace_difo_t *) * 15519 (helper->dtha_nactions = nactions), KM_SLEEP); 15520 15521 for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) { 15522 dtrace_difo_hold(act->dtad_difo); 15523 helper->dtha_actions[i++] = act->dtad_difo; 15524 } 15525 15526 if (!dtrace_helper_validate(helper)) 15527 goto err; 15528 15529 if (last == NULL) { 15530 help->dthps_actions[which] = helper; 15531 } else { 15532 last->dtha_next = helper; 15533 } 15534 15535 if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) { 15536 dtrace_helptrace_nlocals = vstate->dtvs_nlocals; 15537 dtrace_helptrace_next = 0; 15538 } 15539 15540 return (0); 15541err: 15542 dtrace_helper_action_destroy(helper, vstate); 15543 return (EINVAL); 15544} 15545 15546static void 15547dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help, 15548 dof_helper_t *dofhp) 15549{ 15550 ASSERT(MUTEX_NOT_HELD(&dtrace_lock)); 15551 15552 mutex_enter(&dtrace_meta_lock); 15553 mutex_enter(&dtrace_lock); 15554 15555 if (!dtrace_attached() || dtrace_meta_pid == NULL) { 15556 /* 15557 * If the dtrace module is loaded but not attached, or if 15558 * there aren't isn't a meta provider registered to deal with 15559 * these provider descriptions, we need to postpone creating 15560 * the actual providers until later. 15561 */ 15562 15563 if (help->dthps_next == NULL && help->dthps_prev == NULL && 15564 dtrace_deferred_pid != help) { 15565 help->dthps_deferred = 1; 15566 help->dthps_pid = p->p_pid; 15567 help->dthps_next = dtrace_deferred_pid; 15568 help->dthps_prev = NULL; 15569 if (dtrace_deferred_pid != NULL) 15570 dtrace_deferred_pid->dthps_prev = help; 15571 dtrace_deferred_pid = help; 15572 } 15573 15574 mutex_exit(&dtrace_lock); 15575 15576 } else if (dofhp != NULL) { 15577 /* 15578 * If the dtrace module is loaded and we have a particular 15579 * helper provider description, pass that off to the 15580 * meta provider. 15581 */ 15582 15583 mutex_exit(&dtrace_lock); 15584 15585 dtrace_helper_provide(dofhp, p->p_pid); 15586 15587 } else { 15588 /* 15589 * Otherwise, just pass all the helper provider descriptions 15590 * off to the meta provider. 15591 */ 15592 15593 int i; 15594 mutex_exit(&dtrace_lock); 15595 15596 for (i = 0; i < help->dthps_nprovs; i++) { 15597 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 15598 p->p_pid); 15599 } 15600 } 15601 15602 mutex_exit(&dtrace_meta_lock); 15603} 15604 15605static int 15606dtrace_helper_provider_add(dof_helper_t *dofhp, int gen) 15607{ 15608 dtrace_helpers_t *help; 15609 dtrace_helper_provider_t *hprov, **tmp_provs; 15610 uint_t tmp_maxprovs, i; 15611 15612 ASSERT(MUTEX_HELD(&dtrace_lock)); 15613 15614 help = curproc->p_dtrace_helpers; 15615 ASSERT(help != NULL); 15616 15617 /* 15618 * If we already have dtrace_helper_providers_max helper providers, 15619 * we're refuse to add a new one. 15620 */ 15621 if (help->dthps_nprovs >= dtrace_helper_providers_max) 15622 return (ENOSPC); 15623 15624 /* 15625 * Check to make sure this isn't a duplicate. 15626 */ 15627 for (i = 0; i < help->dthps_nprovs; i++) { 15628 if (dofhp->dofhp_dof == 15629 help->dthps_provs[i]->dthp_prov.dofhp_dof) 15630 return (EALREADY); 15631 } 15632 15633 hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP); 15634 hprov->dthp_prov = *dofhp; 15635 hprov->dthp_ref = 1; 15636 hprov->dthp_generation = gen; 15637 15638 /* 15639 * Allocate a bigger table for helper providers if it's already full. 15640 */ 15641 if (help->dthps_maxprovs == help->dthps_nprovs) { 15642 tmp_maxprovs = help->dthps_maxprovs; 15643 tmp_provs = help->dthps_provs; 15644 15645 if (help->dthps_maxprovs == 0) 15646 help->dthps_maxprovs = 2; 15647 else 15648 help->dthps_maxprovs *= 2; 15649 if (help->dthps_maxprovs > dtrace_helper_providers_max) 15650 help->dthps_maxprovs = dtrace_helper_providers_max; 15651 15652 ASSERT(tmp_maxprovs < help->dthps_maxprovs); 15653 15654 help->dthps_provs = kmem_zalloc(help->dthps_maxprovs * 15655 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 15656 15657 if (tmp_provs != NULL) { 15658 bcopy(tmp_provs, help->dthps_provs, tmp_maxprovs * 15659 sizeof (dtrace_helper_provider_t *)); 15660 kmem_free(tmp_provs, tmp_maxprovs * 15661 sizeof (dtrace_helper_provider_t *)); 15662 } 15663 } 15664 15665 help->dthps_provs[help->dthps_nprovs] = hprov; 15666 help->dthps_nprovs++; 15667 15668 return (0); 15669} 15670 15671static void 15672dtrace_helper_provider_destroy(dtrace_helper_provider_t *hprov) 15673{ 15674 mutex_enter(&dtrace_lock); 15675 15676 if (--hprov->dthp_ref == 0) { 15677 dof_hdr_t *dof; 15678 mutex_exit(&dtrace_lock); 15679 dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof; 15680 dtrace_dof_destroy(dof); 15681 kmem_free(hprov, sizeof (dtrace_helper_provider_t)); 15682 } else { 15683 mutex_exit(&dtrace_lock); 15684 } 15685} 15686 15687static int 15688dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec) 15689{ 15690 uintptr_t daddr = (uintptr_t)dof; 15691 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 15692 dof_provider_t *provider; 15693 dof_probe_t *probe; 15694 uint8_t *arg; 15695 char *strtab, *typestr; 15696 dof_stridx_t typeidx; 15697 size_t typesz; 15698 uint_t nprobes, j, k; 15699 15700 ASSERT(sec->dofs_type == DOF_SECT_PROVIDER); 15701 15702 if (sec->dofs_offset & (sizeof (uint_t) - 1)) { 15703 dtrace_dof_error(dof, "misaligned section offset"); 15704 return (-1); 15705 } 15706 15707 /* 15708 * The section needs to be large enough to contain the DOF provider 15709 * structure appropriate for the given version. 15710 */ 15711 if (sec->dofs_size < 15712 ((dof->dofh_ident[DOF_ID_VERSION] == DOF_VERSION_1) ? 15713 offsetof(dof_provider_t, dofpv_prenoffs) : 15714 sizeof (dof_provider_t))) { 15715 dtrace_dof_error(dof, "provider section too small"); 15716 return (-1); 15717 } 15718 15719 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 15720 str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab); 15721 prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes); 15722 arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs); 15723 off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs); 15724 15725 if (str_sec == NULL || prb_sec == NULL || 15726 arg_sec == NULL || off_sec == NULL) 15727 return (-1); 15728 15729 enoff_sec = NULL; 15730 15731 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 15732 provider->dofpv_prenoffs != DOF_SECT_NONE && 15733 (enoff_sec = dtrace_dof_sect(dof, DOF_SECT_PRENOFFS, 15734 provider->dofpv_prenoffs)) == NULL) 15735 return (-1); 15736 15737 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 15738 15739 if (provider->dofpv_name >= str_sec->dofs_size || 15740 strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) { 15741 dtrace_dof_error(dof, "invalid provider name"); 15742 return (-1); 15743 } 15744 15745 if (prb_sec->dofs_entsize == 0 || 15746 prb_sec->dofs_entsize > prb_sec->dofs_size) { 15747 dtrace_dof_error(dof, "invalid entry size"); 15748 return (-1); 15749 } 15750 15751 if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) { 15752 dtrace_dof_error(dof, "misaligned entry size"); 15753 return (-1); 15754 } 15755 15756 if (off_sec->dofs_entsize != sizeof (uint32_t)) { 15757 dtrace_dof_error(dof, "invalid entry size"); 15758 return (-1); 15759 } 15760 15761 if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) { 15762 dtrace_dof_error(dof, "misaligned section offset"); 15763 return (-1); 15764 } 15765 15766 if (arg_sec->dofs_entsize != sizeof (uint8_t)) { 15767 dtrace_dof_error(dof, "invalid entry size"); 15768 return (-1); 15769 } 15770 15771 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 15772 15773 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 15774 15775 /* 15776 * Take a pass through the probes to check for errors. 15777 */ 15778 for (j = 0; j < nprobes; j++) { 15779 probe = (dof_probe_t *)(uintptr_t)(daddr + 15780 prb_sec->dofs_offset + j * prb_sec->dofs_entsize); 15781 15782 if (probe->dofpr_func >= str_sec->dofs_size) { 15783 dtrace_dof_error(dof, "invalid function name"); 15784 return (-1); 15785 } 15786 15787 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) { 15788 dtrace_dof_error(dof, "function name too long"); 15789 return (-1); 15790 } 15791 15792 if (probe->dofpr_name >= str_sec->dofs_size || 15793 strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) { 15794 dtrace_dof_error(dof, "invalid probe name"); 15795 return (-1); 15796 } 15797 15798 /* 15799 * The offset count must not wrap the index, and the offsets 15800 * must also not overflow the section's data. 15801 */ 15802 if (probe->dofpr_offidx + probe->dofpr_noffs < 15803 probe->dofpr_offidx || 15804 (probe->dofpr_offidx + probe->dofpr_noffs) * 15805 off_sec->dofs_entsize > off_sec->dofs_size) { 15806 dtrace_dof_error(dof, "invalid probe offset"); 15807 return (-1); 15808 } 15809 15810 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) { 15811 /* 15812 * If there's no is-enabled offset section, make sure 15813 * there aren't any is-enabled offsets. Otherwise 15814 * perform the same checks as for probe offsets 15815 * (immediately above). 15816 */ 15817 if (enoff_sec == NULL) { 15818 if (probe->dofpr_enoffidx != 0 || 15819 probe->dofpr_nenoffs != 0) { 15820 dtrace_dof_error(dof, "is-enabled " 15821 "offsets with null section"); 15822 return (-1); 15823 } 15824 } else if (probe->dofpr_enoffidx + 15825 probe->dofpr_nenoffs < probe->dofpr_enoffidx || 15826 (probe->dofpr_enoffidx + probe->dofpr_nenoffs) * 15827 enoff_sec->dofs_entsize > enoff_sec->dofs_size) { 15828 dtrace_dof_error(dof, "invalid is-enabled " 15829 "offset"); 15830 return (-1); 15831 } 15832 15833 if (probe->dofpr_noffs + probe->dofpr_nenoffs == 0) { 15834 dtrace_dof_error(dof, "zero probe and " 15835 "is-enabled offsets"); 15836 return (-1); 15837 } 15838 } else if (probe->dofpr_noffs == 0) { 15839 dtrace_dof_error(dof, "zero probe offsets"); 15840 return (-1); 15841 } 15842 15843 if (probe->dofpr_argidx + probe->dofpr_xargc < 15844 probe->dofpr_argidx || 15845 (probe->dofpr_argidx + probe->dofpr_xargc) * 15846 arg_sec->dofs_entsize > arg_sec->dofs_size) { 15847 dtrace_dof_error(dof, "invalid args"); 15848 return (-1); 15849 } 15850 15851 typeidx = probe->dofpr_nargv; 15852 typestr = strtab + probe->dofpr_nargv; 15853 for (k = 0; k < probe->dofpr_nargc; k++) { 15854 if (typeidx >= str_sec->dofs_size) { 15855 dtrace_dof_error(dof, "bad " 15856 "native argument type"); 15857 return (-1); 15858 } 15859 15860 typesz = strlen(typestr) + 1; 15861 if (typesz > DTRACE_ARGTYPELEN) { 15862 dtrace_dof_error(dof, "native " 15863 "argument type too long"); 15864 return (-1); 15865 } 15866 typeidx += typesz; 15867 typestr += typesz; 15868 } 15869 15870 typeidx = probe->dofpr_xargv; 15871 typestr = strtab + probe->dofpr_xargv; 15872 for (k = 0; k < probe->dofpr_xargc; k++) { 15873 if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) { 15874 dtrace_dof_error(dof, "bad " 15875 "native argument index"); 15876 return (-1); 15877 } 15878 15879 if (typeidx >= str_sec->dofs_size) { 15880 dtrace_dof_error(dof, "bad " 15881 "translated argument type"); 15882 return (-1); 15883 } 15884 15885 typesz = strlen(typestr) + 1; 15886 if (typesz > DTRACE_ARGTYPELEN) { 15887 dtrace_dof_error(dof, "translated argument " 15888 "type too long"); 15889 return (-1); 15890 } 15891 15892 typeidx += typesz; 15893 typestr += typesz; 15894 } 15895 } 15896 15897 return (0); 15898} 15899 15900static int 15901dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp) 15902{ 15903 dtrace_helpers_t *help; 15904 dtrace_vstate_t *vstate; 15905 dtrace_enabling_t *enab = NULL; 15906 int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1; 15907 uintptr_t daddr = (uintptr_t)dof; 15908 15909 ASSERT(MUTEX_HELD(&dtrace_lock)); 15910 15911 if ((help = curproc->p_dtrace_helpers) == NULL) 15912 help = dtrace_helpers_create(curproc); 15913 15914 vstate = &help->dthps_vstate; 15915 15916 if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab, 15917 dhp != NULL ? dhp->dofhp_addr : 0, B_FALSE)) != 0) { 15918 dtrace_dof_destroy(dof); 15919 return (rv); 15920 } 15921 15922 /* 15923 * Look for helper providers and validate their descriptions. 15924 */ 15925 if (dhp != NULL) { 15926 for (i = 0; i < dof->dofh_secnum; i++) { 15927 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 15928 dof->dofh_secoff + i * dof->dofh_secsize); 15929 15930 if (sec->dofs_type != DOF_SECT_PROVIDER) 15931 continue; 15932 15933 if (dtrace_helper_provider_validate(dof, sec) != 0) { 15934 dtrace_enabling_destroy(enab); 15935 dtrace_dof_destroy(dof); 15936 return (-1); 15937 } 15938 15939 nprovs++; 15940 } 15941 } 15942 15943 /* 15944 * Now we need to walk through the ECB descriptions in the enabling. 15945 */ 15946 for (i = 0; i < enab->dten_ndesc; i++) { 15947 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 15948 dtrace_probedesc_t *desc = &ep->dted_probe; 15949 15950 if (strcmp(desc->dtpd_provider, "dtrace") != 0) 15951 continue; 15952 15953 if (strcmp(desc->dtpd_mod, "helper") != 0) 15954 continue; 15955 15956 if (strcmp(desc->dtpd_func, "ustack") != 0) 15957 continue; 15958 15959 if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK, 15960 ep)) != 0) { 15961 /* 15962 * Adding this helper action failed -- we are now going 15963 * to rip out the entire generation and return failure. 15964 */ 15965 (void) dtrace_helper_destroygen(help->dthps_generation); 15966 dtrace_enabling_destroy(enab); 15967 dtrace_dof_destroy(dof); 15968 return (-1); 15969 } 15970 15971 nhelpers++; 15972 } 15973 15974 if (nhelpers < enab->dten_ndesc) 15975 dtrace_dof_error(dof, "unmatched helpers"); 15976 15977 gen = help->dthps_generation++; 15978 dtrace_enabling_destroy(enab); 15979 15980 if (dhp != NULL && nprovs > 0) { 15981 dhp->dofhp_dof = (uint64_t)(uintptr_t)dof; 15982 if (dtrace_helper_provider_add(dhp, gen) == 0) { 15983 mutex_exit(&dtrace_lock); 15984 dtrace_helper_provider_register(curproc, help, dhp); 15985 mutex_enter(&dtrace_lock); 15986 15987 destroy = 0; 15988 } 15989 } 15990 15991 if (destroy) 15992 dtrace_dof_destroy(dof); 15993 15994 return (gen); 15995} 15996 15997static dtrace_helpers_t * 15998dtrace_helpers_create(proc_t *p) 15999{ 16000 dtrace_helpers_t *help; 16001 16002 ASSERT(MUTEX_HELD(&dtrace_lock)); 16003 ASSERT(p->p_dtrace_helpers == NULL); 16004 16005 help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP); 16006 help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) * 16007 DTRACE_NHELPER_ACTIONS, KM_SLEEP); 16008 16009 p->p_dtrace_helpers = help; 16010 dtrace_helpers++; 16011 16012 return (help); 16013} 16014 16015#if defined(sun) 16016static 16017#endif 16018void 16019dtrace_helpers_destroy(proc_t *p) 16020{ 16021 dtrace_helpers_t *help; 16022 dtrace_vstate_t *vstate; 16023#if defined(sun) 16024 proc_t *p = curproc; 16025#endif 16026 int i; 16027 16028 mutex_enter(&dtrace_lock); 16029 16030 ASSERT(p->p_dtrace_helpers != NULL); 16031 ASSERT(dtrace_helpers > 0); 16032 16033 help = p->p_dtrace_helpers; 16034 vstate = &help->dthps_vstate; 16035 16036 /* 16037 * We're now going to lose the help from this process. 16038 */ 16039 p->p_dtrace_helpers = NULL; 16040 dtrace_sync(); 16041 16042 /* 16043 * Destory the helper actions. 16044 */ 16045 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 16046 dtrace_helper_action_t *h, *next; 16047 16048 for (h = help->dthps_actions[i]; h != NULL; h = next) { 16049 next = h->dtha_next; 16050 dtrace_helper_action_destroy(h, vstate); 16051 h = next; 16052 } 16053 } 16054 16055 mutex_exit(&dtrace_lock); 16056 16057 /* 16058 * Destroy the helper providers. 16059 */ 16060 if (help->dthps_maxprovs > 0) { 16061 mutex_enter(&dtrace_meta_lock); 16062 if (dtrace_meta_pid != NULL) { 16063 ASSERT(dtrace_deferred_pid == NULL); 16064 16065 for (i = 0; i < help->dthps_nprovs; i++) { 16066 dtrace_helper_provider_remove( 16067 &help->dthps_provs[i]->dthp_prov, p->p_pid); 16068 } 16069 } else { 16070 mutex_enter(&dtrace_lock); 16071 ASSERT(help->dthps_deferred == 0 || 16072 help->dthps_next != NULL || 16073 help->dthps_prev != NULL || 16074 help == dtrace_deferred_pid); 16075 16076 /* 16077 * Remove the helper from the deferred list. 16078 */ 16079 if (help->dthps_next != NULL) 16080 help->dthps_next->dthps_prev = help->dthps_prev; 16081 if (help->dthps_prev != NULL) 16082 help->dthps_prev->dthps_next = help->dthps_next; 16083 if (dtrace_deferred_pid == help) { 16084 dtrace_deferred_pid = help->dthps_next; 16085 ASSERT(help->dthps_prev == NULL); 16086 } 16087 16088 mutex_exit(&dtrace_lock); 16089 } 16090 16091 mutex_exit(&dtrace_meta_lock); 16092 16093 for (i = 0; i < help->dthps_nprovs; i++) { 16094 dtrace_helper_provider_destroy(help->dthps_provs[i]); 16095 } 16096 16097 kmem_free(help->dthps_provs, help->dthps_maxprovs * 16098 sizeof (dtrace_helper_provider_t *)); 16099 } 16100 16101 mutex_enter(&dtrace_lock); 16102 16103 dtrace_vstate_fini(&help->dthps_vstate); 16104 kmem_free(help->dthps_actions, 16105 sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS); 16106 kmem_free(help, sizeof (dtrace_helpers_t)); 16107 16108 --dtrace_helpers; 16109 mutex_exit(&dtrace_lock); 16110} 16111 16112#if defined(sun) 16113static 16114#endif 16115void 16116dtrace_helpers_duplicate(proc_t *from, proc_t *to) 16117{ 16118 dtrace_helpers_t *help, *newhelp; 16119 dtrace_helper_action_t *helper, *new, *last; 16120 dtrace_difo_t *dp; 16121 dtrace_vstate_t *vstate; 16122 int i, j, sz, hasprovs = 0; 16123 16124 mutex_enter(&dtrace_lock); 16125 ASSERT(from->p_dtrace_helpers != NULL); 16126 ASSERT(dtrace_helpers > 0); 16127 16128 help = from->p_dtrace_helpers; 16129 newhelp = dtrace_helpers_create(to); 16130 ASSERT(to->p_dtrace_helpers != NULL); 16131 16132 newhelp->dthps_generation = help->dthps_generation; 16133 vstate = &newhelp->dthps_vstate; 16134 16135 /* 16136 * Duplicate the helper actions. 16137 */ 16138 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 16139 if ((helper = help->dthps_actions[i]) == NULL) 16140 continue; 16141 16142 for (last = NULL; helper != NULL; helper = helper->dtha_next) { 16143 new = kmem_zalloc(sizeof (dtrace_helper_action_t), 16144 KM_SLEEP); 16145 new->dtha_generation = helper->dtha_generation; 16146 16147 if ((dp = helper->dtha_predicate) != NULL) { 16148 dp = dtrace_difo_duplicate(dp, vstate); 16149 new->dtha_predicate = dp; 16150 } 16151 16152 new->dtha_nactions = helper->dtha_nactions; 16153 sz = sizeof (dtrace_difo_t *) * new->dtha_nactions; 16154 new->dtha_actions = kmem_alloc(sz, KM_SLEEP); 16155 16156 for (j = 0; j < new->dtha_nactions; j++) { 16157 dtrace_difo_t *dp = helper->dtha_actions[j]; 16158 16159 ASSERT(dp != NULL); 16160 dp = dtrace_difo_duplicate(dp, vstate); 16161 new->dtha_actions[j] = dp; 16162 } 16163 16164 if (last != NULL) { 16165 last->dtha_next = new; 16166 } else { 16167 newhelp->dthps_actions[i] = new; 16168 } 16169 16170 last = new; 16171 } 16172 } 16173 16174 /* 16175 * Duplicate the helper providers and register them with the 16176 * DTrace framework. 16177 */ 16178 if (help->dthps_nprovs > 0) { 16179 newhelp->dthps_nprovs = help->dthps_nprovs; 16180 newhelp->dthps_maxprovs = help->dthps_nprovs; 16181 newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs * 16182 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 16183 for (i = 0; i < newhelp->dthps_nprovs; i++) { 16184 newhelp->dthps_provs[i] = help->dthps_provs[i]; 16185 newhelp->dthps_provs[i]->dthp_ref++; 16186 } 16187 16188 hasprovs = 1; 16189 } 16190 16191 mutex_exit(&dtrace_lock); 16192 16193 if (hasprovs) 16194 dtrace_helper_provider_register(to, newhelp, NULL); 16195} 16196 16197/* 16198 * DTrace Hook Functions 16199 */ 16200static void 16201dtrace_module_loaded(modctl_t *ctl) 16202{ 16203 dtrace_provider_t *prv; 16204 16205 mutex_enter(&dtrace_provider_lock); 16206#if defined(sun) 16207 mutex_enter(&mod_lock); 16208#endif 16209 16210#if defined(sun) 16211 ASSERT(ctl->mod_busy); 16212#endif 16213 16214 /* 16215 * We're going to call each providers per-module provide operation 16216 * specifying only this module. 16217 */ 16218 for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next) 16219 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 16220 16221#if defined(sun) 16222 mutex_exit(&mod_lock); 16223#endif 16224 mutex_exit(&dtrace_provider_lock); 16225 16226 /* 16227 * If we have any retained enablings, we need to match against them. 16228 * Enabling probes requires that cpu_lock be held, and we cannot hold 16229 * cpu_lock here -- it is legal for cpu_lock to be held when loading a 16230 * module. (In particular, this happens when loading scheduling 16231 * classes.) So if we have any retained enablings, we need to dispatch 16232 * our task queue to do the match for us. 16233 */ 16234 mutex_enter(&dtrace_lock); 16235 16236 if (dtrace_retained == NULL) { 16237 mutex_exit(&dtrace_lock); 16238 return; 16239 } 16240 16241 (void) taskq_dispatch(dtrace_taskq, 16242 (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP); 16243 16244 mutex_exit(&dtrace_lock); 16245 16246 /* 16247 * And now, for a little heuristic sleaze: in general, we want to 16248 * match modules as soon as they load. However, we cannot guarantee 16249 * this, because it would lead us to the lock ordering violation 16250 * outlined above. The common case, of course, is that cpu_lock is 16251 * _not_ held -- so we delay here for a clock tick, hoping that that's 16252 * long enough for the task queue to do its work. If it's not, it's 16253 * not a serious problem -- it just means that the module that we 16254 * just loaded may not be immediately instrumentable. 16255 */ 16256 delay(1); 16257} 16258 16259static void 16260#if defined(sun) 16261dtrace_module_unloaded(modctl_t *ctl) 16262#else 16263dtrace_module_unloaded(modctl_t *ctl, int *error) 16264#endif 16265{ 16266 dtrace_probe_t template, *probe, *first, *next; 16267 dtrace_provider_t *prov; 16268#if !defined(sun) 16269 char modname[DTRACE_MODNAMELEN]; 16270 size_t len; 16271#endif 16272 16273#if defined(sun) 16274 template.dtpr_mod = ctl->mod_modname; 16275#else 16276 /* Handle the fact that ctl->filename may end in ".ko". */ 16277 strlcpy(modname, ctl->filename, sizeof(modname)); 16278 len = strlen(ctl->filename); 16279 if (len > 3 && strcmp(modname + len - 3, ".ko") == 0) 16280 modname[len - 3] = '\0'; 16281 template.dtpr_mod = modname; 16282#endif 16283 16284 mutex_enter(&dtrace_provider_lock); 16285#if defined(sun) 16286 mutex_enter(&mod_lock); 16287#endif 16288 mutex_enter(&dtrace_lock); 16289 16290#if !defined(sun) 16291 if (ctl->nenabled > 0) { 16292 /* Don't allow unloads if a probe is enabled. */ 16293 mutex_exit(&dtrace_provider_lock); 16294 mutex_exit(&dtrace_lock); 16295 *error = -1; 16296 printf( 16297 "kldunload: attempt to unload module that has DTrace probes enabled\n"); 16298 return; 16299 } 16300#endif 16301 16302 if (dtrace_bymod == NULL) { 16303 /* 16304 * The DTrace module is loaded (obviously) but not attached; 16305 * we don't have any work to do. 16306 */ 16307 mutex_exit(&dtrace_provider_lock); 16308#if defined(sun) 16309 mutex_exit(&mod_lock); 16310#endif 16311 mutex_exit(&dtrace_lock); 16312 return; 16313 } 16314 16315 for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template); 16316 probe != NULL; probe = probe->dtpr_nextmod) { 16317 if (probe->dtpr_ecb != NULL) { 16318 mutex_exit(&dtrace_provider_lock); 16319#if defined(sun) 16320 mutex_exit(&mod_lock); 16321#endif 16322 mutex_exit(&dtrace_lock); 16323 16324 /* 16325 * This shouldn't _actually_ be possible -- we're 16326 * unloading a module that has an enabled probe in it. 16327 * (It's normally up to the provider to make sure that 16328 * this can't happen.) However, because dtps_enable() 16329 * doesn't have a failure mode, there can be an 16330 * enable/unload race. Upshot: we don't want to 16331 * assert, but we're not going to disable the 16332 * probe, either. 16333 */ 16334 if (dtrace_err_verbose) { 16335#if defined(sun) 16336 cmn_err(CE_WARN, "unloaded module '%s' had " 16337 "enabled probes", ctl->mod_modname); 16338#else 16339 cmn_err(CE_WARN, "unloaded module '%s' had " 16340 "enabled probes", modname); 16341#endif 16342 } 16343 16344 return; 16345 } 16346 } 16347 16348 probe = first; 16349 16350 for (first = NULL; probe != NULL; probe = next) { 16351 ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe); 16352 16353 dtrace_probes[probe->dtpr_id - 1] = NULL; 16354 16355 next = probe->dtpr_nextmod; 16356 dtrace_hash_remove(dtrace_bymod, probe); 16357 dtrace_hash_remove(dtrace_byfunc, probe); 16358 dtrace_hash_remove(dtrace_byname, probe); 16359 16360 if (first == NULL) { 16361 first = probe; 16362 probe->dtpr_nextmod = NULL; 16363 } else { 16364 probe->dtpr_nextmod = first; 16365 first = probe; 16366 } 16367 } 16368 16369 /* 16370 * We've removed all of the module's probes from the hash chains and 16371 * from the probe array. Now issue a dtrace_sync() to be sure that 16372 * everyone has cleared out from any probe array processing. 16373 */ 16374 dtrace_sync(); 16375 16376 for (probe = first; probe != NULL; probe = first) { 16377 first = probe->dtpr_nextmod; 16378 prov = probe->dtpr_provider; 16379 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id, 16380 probe->dtpr_arg); 16381 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 16382 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 16383 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 16384#if defined(sun) 16385 vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1); 16386#else 16387 free_unr(dtrace_arena, probe->dtpr_id); 16388#endif 16389 kmem_free(probe, sizeof (dtrace_probe_t)); 16390 } 16391 16392 mutex_exit(&dtrace_lock); 16393#if defined(sun) 16394 mutex_exit(&mod_lock); 16395#endif 16396 mutex_exit(&dtrace_provider_lock); 16397} 16398 16399#if !defined(sun) 16400static void 16401dtrace_kld_load(void *arg __unused, linker_file_t lf) 16402{ 16403 16404 dtrace_module_loaded(lf); 16405} 16406 16407static void 16408dtrace_kld_unload_try(void *arg __unused, linker_file_t lf, int *error) 16409{ 16410 16411 if (*error != 0) 16412 /* We already have an error, so don't do anything. */ 16413 return; 16414 dtrace_module_unloaded(lf, error); 16415} 16416#endif 16417 16418#if defined(sun) 16419static void 16420dtrace_suspend(void) 16421{ 16422 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend)); 16423} 16424 16425static void 16426dtrace_resume(void) 16427{ 16428 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume)); 16429} 16430#endif 16431 16432static int 16433dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu) 16434{ 16435 ASSERT(MUTEX_HELD(&cpu_lock)); 16436 mutex_enter(&dtrace_lock); 16437 16438 switch (what) { 16439 case CPU_CONFIG: { 16440 dtrace_state_t *state; 16441 dtrace_optval_t *opt, rs, c; 16442 16443 /* 16444 * For now, we only allocate a new buffer for anonymous state. 16445 */ 16446 if ((state = dtrace_anon.dta_state) == NULL) 16447 break; 16448 16449 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 16450 break; 16451 16452 opt = state->dts_options; 16453 c = opt[DTRACEOPT_CPU]; 16454 16455 if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu) 16456 break; 16457 16458 /* 16459 * Regardless of what the actual policy is, we're going to 16460 * temporarily set our resize policy to be manual. We're 16461 * also going to temporarily set our CPU option to denote 16462 * the newly configured CPU. 16463 */ 16464 rs = opt[DTRACEOPT_BUFRESIZE]; 16465 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL; 16466 opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu; 16467 16468 (void) dtrace_state_buffers(state); 16469 16470 opt[DTRACEOPT_BUFRESIZE] = rs; 16471 opt[DTRACEOPT_CPU] = c; 16472 16473 break; 16474 } 16475 16476 case CPU_UNCONFIG: 16477 /* 16478 * We don't free the buffer in the CPU_UNCONFIG case. (The 16479 * buffer will be freed when the consumer exits.) 16480 */ 16481 break; 16482 16483 default: 16484 break; 16485 } 16486 16487 mutex_exit(&dtrace_lock); 16488 return (0); 16489} 16490 16491#if defined(sun) 16492static void 16493dtrace_cpu_setup_initial(processorid_t cpu) 16494{ 16495 (void) dtrace_cpu_setup(CPU_CONFIG, cpu); 16496} 16497#endif 16498 16499static void 16500dtrace_toxrange_add(uintptr_t base, uintptr_t limit) 16501{ 16502 if (dtrace_toxranges >= dtrace_toxranges_max) { 16503 int osize, nsize; 16504 dtrace_toxrange_t *range; 16505 16506 osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 16507 16508 if (osize == 0) { 16509 ASSERT(dtrace_toxrange == NULL); 16510 ASSERT(dtrace_toxranges_max == 0); 16511 dtrace_toxranges_max = 1; 16512 } else { 16513 dtrace_toxranges_max <<= 1; 16514 } 16515 16516 nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 16517 range = kmem_zalloc(nsize, KM_SLEEP); 16518 16519 if (dtrace_toxrange != NULL) { 16520 ASSERT(osize != 0); 16521 bcopy(dtrace_toxrange, range, osize); 16522 kmem_free(dtrace_toxrange, osize); 16523 } 16524 16525 dtrace_toxrange = range; 16526 } 16527 16528 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == 0); 16529 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == 0); 16530 16531 dtrace_toxrange[dtrace_toxranges].dtt_base = base; 16532 dtrace_toxrange[dtrace_toxranges].dtt_limit = limit; 16533 dtrace_toxranges++; 16534} 16535 16536static void 16537dtrace_getf_barrier() 16538{ 16539#if defined(sun) 16540 /* 16541 * When we have unprivileged (that is, non-DTRACE_CRV_KERNEL) enablings 16542 * that contain calls to getf(), this routine will be called on every 16543 * closef() before either the underlying vnode is released or the 16544 * file_t itself is freed. By the time we are here, it is essential 16545 * that the file_t can no longer be accessed from a call to getf() 16546 * in probe context -- that assures that a dtrace_sync() can be used 16547 * to clear out any enablings referring to the old structures. 16548 */ 16549 if (curthread->t_procp->p_zone->zone_dtrace_getf != 0 || 16550 kcred->cr_zone->zone_dtrace_getf != 0) 16551 dtrace_sync(); 16552#endif 16553} 16554 16555/* 16556 * DTrace Driver Cookbook Functions 16557 */ 16558#if defined(sun) 16559/*ARGSUSED*/ 16560static int 16561dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) 16562{ 16563 dtrace_provider_id_t id; 16564 dtrace_state_t *state = NULL; 16565 dtrace_enabling_t *enab; 16566 16567 mutex_enter(&cpu_lock); 16568 mutex_enter(&dtrace_provider_lock); 16569 mutex_enter(&dtrace_lock); 16570 16571 if (ddi_soft_state_init(&dtrace_softstate, 16572 sizeof (dtrace_state_t), 0) != 0) { 16573 cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state"); 16574 mutex_exit(&cpu_lock); 16575 mutex_exit(&dtrace_provider_lock); 16576 mutex_exit(&dtrace_lock); 16577 return (DDI_FAILURE); 16578 } 16579 16580 if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR, 16581 DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE || 16582 ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR, 16583 DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) { 16584 cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes"); 16585 ddi_remove_minor_node(devi, NULL); 16586 ddi_soft_state_fini(&dtrace_softstate); 16587 mutex_exit(&cpu_lock); 16588 mutex_exit(&dtrace_provider_lock); 16589 mutex_exit(&dtrace_lock); 16590 return (DDI_FAILURE); 16591 } 16592 16593 ddi_report_dev(devi); 16594 dtrace_devi = devi; 16595 16596 dtrace_modload = dtrace_module_loaded; 16597 dtrace_modunload = dtrace_module_unloaded; 16598 dtrace_cpu_init = dtrace_cpu_setup_initial; 16599 dtrace_helpers_cleanup = dtrace_helpers_destroy; 16600 dtrace_helpers_fork = dtrace_helpers_duplicate; 16601 dtrace_cpustart_init = dtrace_suspend; 16602 dtrace_cpustart_fini = dtrace_resume; 16603 dtrace_debugger_init = dtrace_suspend; 16604 dtrace_debugger_fini = dtrace_resume; 16605 16606 register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 16607 16608 ASSERT(MUTEX_HELD(&cpu_lock)); 16609 16610 dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1, 16611 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 16612 dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE, 16613 UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0, 16614 VM_SLEEP | VMC_IDENTIFIER); 16615 dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri, 16616 1, INT_MAX, 0); 16617 16618 dtrace_state_cache = kmem_cache_create("dtrace_state_cache", 16619 sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN, 16620 NULL, NULL, NULL, NULL, NULL, 0); 16621 16622 ASSERT(MUTEX_HELD(&cpu_lock)); 16623 dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod), 16624 offsetof(dtrace_probe_t, dtpr_nextmod), 16625 offsetof(dtrace_probe_t, dtpr_prevmod)); 16626 16627 dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func), 16628 offsetof(dtrace_probe_t, dtpr_nextfunc), 16629 offsetof(dtrace_probe_t, dtpr_prevfunc)); 16630 16631 dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name), 16632 offsetof(dtrace_probe_t, dtpr_nextname), 16633 offsetof(dtrace_probe_t, dtpr_prevname)); 16634 16635 if (dtrace_retain_max < 1) { 16636 cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; " 16637 "setting to 1", dtrace_retain_max); 16638 dtrace_retain_max = 1; 16639 } 16640 16641 /* 16642 * Now discover our toxic ranges. 16643 */ 16644 dtrace_toxic_ranges(dtrace_toxrange_add); 16645 16646 /* 16647 * Before we register ourselves as a provider to our own framework, 16648 * we would like to assert that dtrace_provider is NULL -- but that's 16649 * not true if we were loaded as a dependency of a DTrace provider. 16650 * Once we've registered, we can assert that dtrace_provider is our 16651 * pseudo provider. 16652 */ 16653 (void) dtrace_register("dtrace", &dtrace_provider_attr, 16654 DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id); 16655 16656 ASSERT(dtrace_provider != NULL); 16657 ASSERT((dtrace_provider_id_t)dtrace_provider == id); 16658 16659 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t) 16660 dtrace_provider, NULL, NULL, "BEGIN", 0, NULL); 16661 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t) 16662 dtrace_provider, NULL, NULL, "END", 0, NULL); 16663 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t) 16664 dtrace_provider, NULL, NULL, "ERROR", 1, NULL); 16665 16666 dtrace_anon_property(); 16667 mutex_exit(&cpu_lock); 16668 16669 /* 16670 * If DTrace helper tracing is enabled, we need to allocate the 16671 * trace buffer and initialize the values. 16672 */ 16673 if (dtrace_helptrace_enabled) { 16674 ASSERT(dtrace_helptrace_buffer == NULL); 16675 dtrace_helptrace_buffer = 16676 kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP); 16677 dtrace_helptrace_next = 0; 16678 } 16679 16680 /* 16681 * If there are already providers, we must ask them to provide their 16682 * probes, and then match any anonymous enabling against them. Note 16683 * that there should be no other retained enablings at this time: 16684 * the only retained enablings at this time should be the anonymous 16685 * enabling. 16686 */ 16687 if (dtrace_anon.dta_enabling != NULL) { 16688 ASSERT(dtrace_retained == dtrace_anon.dta_enabling); 16689 16690 dtrace_enabling_provide(NULL); 16691 state = dtrace_anon.dta_state; 16692 16693 /* 16694 * We couldn't hold cpu_lock across the above call to 16695 * dtrace_enabling_provide(), but we must hold it to actually 16696 * enable the probes. We have to drop all of our locks, pick 16697 * up cpu_lock, and regain our locks before matching the 16698 * retained anonymous enabling. 16699 */ 16700 mutex_exit(&dtrace_lock); 16701 mutex_exit(&dtrace_provider_lock); 16702 16703 mutex_enter(&cpu_lock); 16704 mutex_enter(&dtrace_provider_lock); 16705 mutex_enter(&dtrace_lock); 16706 16707 if ((enab = dtrace_anon.dta_enabling) != NULL) 16708 (void) dtrace_enabling_match(enab, NULL); 16709 16710 mutex_exit(&cpu_lock); 16711 } 16712 16713 mutex_exit(&dtrace_lock); 16714 mutex_exit(&dtrace_provider_lock); 16715 16716 if (state != NULL) { 16717 /* 16718 * If we created any anonymous state, set it going now. 16719 */ 16720 (void) dtrace_state_go(state, &dtrace_anon.dta_beganon); 16721 } 16722 16723 return (DDI_SUCCESS); 16724} 16725#endif 16726 16727#if !defined(sun) 16728#if __FreeBSD_version >= 800039 16729static void dtrace_dtr(void *); 16730#endif 16731#endif 16732 16733/*ARGSUSED*/ 16734static int 16735#if defined(sun) 16736dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p) 16737#else 16738dtrace_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 16739#endif 16740{ 16741 dtrace_state_t *state; 16742 uint32_t priv; 16743 uid_t uid; 16744 zoneid_t zoneid; 16745 16746#if defined(sun) 16747 if (getminor(*devp) == DTRACEMNRN_HELPER) 16748 return (0); 16749 16750 /* 16751 * If this wasn't an open with the "helper" minor, then it must be 16752 * the "dtrace" minor. 16753 */ 16754 if (getminor(*devp) == DTRACEMNRN_DTRACE) 16755 return (ENXIO); 16756#else 16757 cred_t *cred_p = NULL; 16758 16759#if __FreeBSD_version < 800039 16760 /* 16761 * The first minor device is the one that is cloned so there is 16762 * nothing more to do here. 16763 */ 16764 if (dev2unit(dev) == 0) 16765 return 0; 16766 16767 /* 16768 * Devices are cloned, so if the DTrace state has already 16769 * been allocated, that means this device belongs to a 16770 * different client. Each client should open '/dev/dtrace' 16771 * to get a cloned device. 16772 */ 16773 if (dev->si_drv1 != NULL) 16774 return (EBUSY); 16775#endif 16776 16777 cred_p = dev->si_cred; 16778#endif 16779 16780 /* 16781 * If no DTRACE_PRIV_* bits are set in the credential, then the 16782 * caller lacks sufficient permission to do anything with DTrace. 16783 */ 16784 dtrace_cred2priv(cred_p, &priv, &uid, &zoneid); 16785 if (priv == DTRACE_PRIV_NONE) { 16786#if !defined(sun) 16787#if __FreeBSD_version < 800039 16788 /* Destroy the cloned device. */ 16789 destroy_dev(dev); 16790#endif 16791#endif 16792 16793 return (EACCES); 16794 } 16795 16796 /* 16797 * Ask all providers to provide all their probes. 16798 */ 16799 mutex_enter(&dtrace_provider_lock); 16800 dtrace_probe_provide(NULL, NULL); 16801 mutex_exit(&dtrace_provider_lock); 16802 16803 mutex_enter(&cpu_lock); 16804 mutex_enter(&dtrace_lock); 16805 dtrace_opens++; 16806 dtrace_membar_producer(); 16807 16808#if defined(sun) 16809 /* 16810 * If the kernel debugger is active (that is, if the kernel debugger 16811 * modified text in some way), we won't allow the open. 16812 */ 16813 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 16814 dtrace_opens--; 16815 mutex_exit(&cpu_lock); 16816 mutex_exit(&dtrace_lock); 16817 return (EBUSY); 16818 } 16819 16820 state = dtrace_state_create(devp, cred_p); 16821#else 16822 state = dtrace_state_create(dev); 16823#if __FreeBSD_version < 800039 16824 dev->si_drv1 = state; 16825#else 16826 devfs_set_cdevpriv(state, dtrace_dtr); 16827#endif 16828#endif 16829 16830 mutex_exit(&cpu_lock); 16831 16832 if (state == NULL) { 16833#if defined(sun) 16834 if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL) 16835 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 16836#else 16837 --dtrace_opens; 16838#endif 16839 mutex_exit(&dtrace_lock); 16840#if !defined(sun) 16841#if __FreeBSD_version < 800039 16842 /* Destroy the cloned device. */ 16843 destroy_dev(dev); 16844#endif 16845#endif 16846 return (EAGAIN); 16847 } 16848 16849 mutex_exit(&dtrace_lock); 16850 16851 return (0); 16852} 16853 16854/*ARGSUSED*/ 16855#if defined(sun) 16856static int 16857dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p) 16858#elif __FreeBSD_version < 800039 16859static int 16860dtrace_close(struct cdev *dev, int flags, int fmt __unused, struct thread *td) 16861#else 16862static void 16863dtrace_dtr(void *data) 16864#endif 16865{ 16866#if defined(sun) 16867 minor_t minor = getminor(dev); 16868 dtrace_state_t *state; 16869 16870 if (minor == DTRACEMNRN_HELPER) 16871 return (0); 16872 16873 state = ddi_get_soft_state(dtrace_softstate, minor); 16874#else 16875#if __FreeBSD_version < 800039 16876 dtrace_state_t *state = dev->si_drv1; 16877 16878 /* Check if this is not a cloned device. */ 16879 if (dev2unit(dev) == 0) 16880 return (0); 16881#else 16882 dtrace_state_t *state = data; 16883#endif 16884 16885#endif 16886 16887 mutex_enter(&cpu_lock); 16888 mutex_enter(&dtrace_lock); 16889 16890 if (state != NULL) { 16891 if (state->dts_anon) { 16892 /* 16893 * There is anonymous state. Destroy that first. 16894 */ 16895 ASSERT(dtrace_anon.dta_state == NULL); 16896 dtrace_state_destroy(state->dts_anon); 16897 } 16898 16899 dtrace_state_destroy(state); 16900 16901#if !defined(sun) 16902 kmem_free(state, 0); 16903#if __FreeBSD_version < 800039 16904 dev->si_drv1 = NULL; 16905#endif 16906#endif 16907 } 16908 16909 ASSERT(dtrace_opens > 0); 16910#if defined(sun) 16911 /* 16912 * Only relinquish control of the kernel debugger interface when there 16913 * are no consumers and no anonymous enablings. 16914 */ 16915 if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL) 16916 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 16917#else 16918 --dtrace_opens; 16919#endif 16920 16921 mutex_exit(&dtrace_lock); 16922 mutex_exit(&cpu_lock); 16923 16924#if __FreeBSD_version < 800039 16925 /* Schedule this cloned device to be destroyed. */ 16926 destroy_dev_sched(dev); 16927#endif 16928 16929#if defined(sun) || __FreeBSD_version < 800039 16930 return (0); 16931#endif 16932} 16933 16934#if defined(sun) 16935/*ARGSUSED*/ 16936static int 16937dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv) 16938{ 16939 int rval; 16940 dof_helper_t help, *dhp = NULL; 16941 16942 switch (cmd) { 16943 case DTRACEHIOC_ADDDOF: 16944 if (copyin((void *)arg, &help, sizeof (help)) != 0) { 16945 dtrace_dof_error(NULL, "failed to copyin DOF helper"); 16946 return (EFAULT); 16947 } 16948 16949 dhp = &help; 16950 arg = (intptr_t)help.dofhp_dof; 16951 /*FALLTHROUGH*/ 16952 16953 case DTRACEHIOC_ADD: { 16954 dof_hdr_t *dof = dtrace_dof_copyin(arg, &rval); 16955 16956 if (dof == NULL) 16957 return (rval); 16958 16959 mutex_enter(&dtrace_lock); 16960 16961 /* 16962 * dtrace_helper_slurp() takes responsibility for the dof -- 16963 * it may free it now or it may save it and free it later. 16964 */ 16965 if ((rval = dtrace_helper_slurp(dof, dhp)) != -1) { 16966 *rv = rval; 16967 rval = 0; 16968 } else { 16969 rval = EINVAL; 16970 } 16971 16972 mutex_exit(&dtrace_lock); 16973 return (rval); 16974 } 16975 16976 case DTRACEHIOC_REMOVE: { 16977 mutex_enter(&dtrace_lock); 16978 rval = dtrace_helper_destroygen(arg); 16979 mutex_exit(&dtrace_lock); 16980 16981 return (rval); 16982 } 16983 16984 default: 16985 break; 16986 } 16987 16988 return (ENOTTY); 16989} 16990 16991/*ARGSUSED*/ 16992static int 16993dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv) 16994{ 16995 minor_t minor = getminor(dev); 16996 dtrace_state_t *state; 16997 int rval; 16998 16999 if (minor == DTRACEMNRN_HELPER) 17000 return (dtrace_ioctl_helper(cmd, arg, rv)); 17001 17002 state = ddi_get_soft_state(dtrace_softstate, minor); 17003 17004 if (state->dts_anon) { 17005 ASSERT(dtrace_anon.dta_state == NULL); 17006 state = state->dts_anon; 17007 } 17008 17009 switch (cmd) { 17010 case DTRACEIOC_PROVIDER: { 17011 dtrace_providerdesc_t pvd; 17012 dtrace_provider_t *pvp; 17013 17014 if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0) 17015 return (EFAULT); 17016 17017 pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0'; 17018 mutex_enter(&dtrace_provider_lock); 17019 17020 for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) { 17021 if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0) 17022 break; 17023 } 17024 17025 mutex_exit(&dtrace_provider_lock); 17026 17027 if (pvp == NULL) 17028 return (ESRCH); 17029 17030 bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t)); 17031 bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t)); 17032 17033 if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0) 17034 return (EFAULT); 17035 17036 return (0); 17037 } 17038 17039 case DTRACEIOC_EPROBE: { 17040 dtrace_eprobedesc_t epdesc; 17041 dtrace_ecb_t *ecb; 17042 dtrace_action_t *act; 17043 void *buf; 17044 size_t size; 17045 uintptr_t dest; 17046 int nrecs; 17047 17048 if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0) 17049 return (EFAULT); 17050 17051 mutex_enter(&dtrace_lock); 17052 17053 if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) { 17054 mutex_exit(&dtrace_lock); 17055 return (EINVAL); 17056 } 17057 17058 if (ecb->dte_probe == NULL) { 17059 mutex_exit(&dtrace_lock); 17060 return (EINVAL); 17061 } 17062 17063 epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id; 17064 epdesc.dtepd_uarg = ecb->dte_uarg; 17065 epdesc.dtepd_size = ecb->dte_size; 17066 17067 nrecs = epdesc.dtepd_nrecs; 17068 epdesc.dtepd_nrecs = 0; 17069 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 17070 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 17071 continue; 17072 17073 epdesc.dtepd_nrecs++; 17074 } 17075 17076 /* 17077 * Now that we have the size, we need to allocate a temporary 17078 * buffer in which to store the complete description. We need 17079 * the temporary buffer to be able to drop dtrace_lock() 17080 * across the copyout(), below. 17081 */ 17082 size = sizeof (dtrace_eprobedesc_t) + 17083 (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t)); 17084 17085 buf = kmem_alloc(size, KM_SLEEP); 17086 dest = (uintptr_t)buf; 17087 17088 bcopy(&epdesc, (void *)dest, sizeof (epdesc)); 17089 dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]); 17090 17091 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 17092 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 17093 continue; 17094 17095 if (nrecs-- == 0) 17096 break; 17097 17098 bcopy(&act->dta_rec, (void *)dest, 17099 sizeof (dtrace_recdesc_t)); 17100 dest += sizeof (dtrace_recdesc_t); 17101 } 17102 17103 mutex_exit(&dtrace_lock); 17104 17105 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 17106 kmem_free(buf, size); 17107 return (EFAULT); 17108 } 17109 17110 kmem_free(buf, size); 17111 return (0); 17112 } 17113 17114 case DTRACEIOC_AGGDESC: { 17115 dtrace_aggdesc_t aggdesc; 17116 dtrace_action_t *act; 17117 dtrace_aggregation_t *agg; 17118 int nrecs; 17119 uint32_t offs; 17120 dtrace_recdesc_t *lrec; 17121 void *buf; 17122 size_t size; 17123 uintptr_t dest; 17124 17125 if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0) 17126 return (EFAULT); 17127 17128 mutex_enter(&dtrace_lock); 17129 17130 if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) { 17131 mutex_exit(&dtrace_lock); 17132 return (EINVAL); 17133 } 17134 17135 aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid; 17136 17137 nrecs = aggdesc.dtagd_nrecs; 17138 aggdesc.dtagd_nrecs = 0; 17139 17140 offs = agg->dtag_base; 17141 lrec = &agg->dtag_action.dta_rec; 17142 aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs; 17143 17144 for (act = agg->dtag_first; ; act = act->dta_next) { 17145 ASSERT(act->dta_intuple || 17146 DTRACEACT_ISAGG(act->dta_kind)); 17147 17148 /* 17149 * If this action has a record size of zero, it 17150 * denotes an argument to the aggregating action. 17151 * Because the presence of this record doesn't (or 17152 * shouldn't) affect the way the data is interpreted, 17153 * we don't copy it out to save user-level the 17154 * confusion of dealing with a zero-length record. 17155 */ 17156 if (act->dta_rec.dtrd_size == 0) { 17157 ASSERT(agg->dtag_hasarg); 17158 continue; 17159 } 17160 17161 aggdesc.dtagd_nrecs++; 17162 17163 if (act == &agg->dtag_action) 17164 break; 17165 } 17166 17167 /* 17168 * Now that we have the size, we need to allocate a temporary 17169 * buffer in which to store the complete description. We need 17170 * the temporary buffer to be able to drop dtrace_lock() 17171 * across the copyout(), below. 17172 */ 17173 size = sizeof (dtrace_aggdesc_t) + 17174 (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t)); 17175 17176 buf = kmem_alloc(size, KM_SLEEP); 17177 dest = (uintptr_t)buf; 17178 17179 bcopy(&aggdesc, (void *)dest, sizeof (aggdesc)); 17180 dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]); 17181 17182 for (act = agg->dtag_first; ; act = act->dta_next) { 17183 dtrace_recdesc_t rec = act->dta_rec; 17184 17185 /* 17186 * See the comment in the above loop for why we pass 17187 * over zero-length records. 17188 */ 17189 if (rec.dtrd_size == 0) { 17190 ASSERT(agg->dtag_hasarg); 17191 continue; 17192 } 17193 17194 if (nrecs-- == 0) 17195 break; 17196 17197 rec.dtrd_offset -= offs; 17198 bcopy(&rec, (void *)dest, sizeof (rec)); 17199 dest += sizeof (dtrace_recdesc_t); 17200 17201 if (act == &agg->dtag_action) 17202 break; 17203 } 17204 17205 mutex_exit(&dtrace_lock); 17206 17207 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 17208 kmem_free(buf, size); 17209 return (EFAULT); 17210 } 17211 17212 kmem_free(buf, size); 17213 return (0); 17214 } 17215 17216 case DTRACEIOC_ENABLE: { 17217 dof_hdr_t *dof; 17218 dtrace_enabling_t *enab = NULL; 17219 dtrace_vstate_t *vstate; 17220 int err = 0; 17221 17222 *rv = 0; 17223 17224 /* 17225 * If a NULL argument has been passed, we take this as our 17226 * cue to reevaluate our enablings. 17227 */ 17228 if (arg == NULL) { 17229 dtrace_enabling_matchall(); 17230 17231 return (0); 17232 } 17233 17234 if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL) 17235 return (rval); 17236 17237 mutex_enter(&cpu_lock); 17238 mutex_enter(&dtrace_lock); 17239 vstate = &state->dts_vstate; 17240 17241 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 17242 mutex_exit(&dtrace_lock); 17243 mutex_exit(&cpu_lock); 17244 dtrace_dof_destroy(dof); 17245 return (EBUSY); 17246 } 17247 17248 if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) { 17249 mutex_exit(&dtrace_lock); 17250 mutex_exit(&cpu_lock); 17251 dtrace_dof_destroy(dof); 17252 return (EINVAL); 17253 } 17254 17255 if ((rval = dtrace_dof_options(dof, state)) != 0) { 17256 dtrace_enabling_destroy(enab); 17257 mutex_exit(&dtrace_lock); 17258 mutex_exit(&cpu_lock); 17259 dtrace_dof_destroy(dof); 17260 return (rval); 17261 } 17262 17263 if ((err = dtrace_enabling_match(enab, rv)) == 0) { 17264 err = dtrace_enabling_retain(enab); 17265 } else { 17266 dtrace_enabling_destroy(enab); 17267 } 17268 17269 mutex_exit(&cpu_lock); 17270 mutex_exit(&dtrace_lock); 17271 dtrace_dof_destroy(dof); 17272 17273 return (err); 17274 } 17275 17276 case DTRACEIOC_REPLICATE: { 17277 dtrace_repldesc_t desc; 17278 dtrace_probedesc_t *match = &desc.dtrpd_match; 17279 dtrace_probedesc_t *create = &desc.dtrpd_create; 17280 int err; 17281 17282 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 17283 return (EFAULT); 17284 17285 match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 17286 match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 17287 match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 17288 match->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 17289 17290 create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 17291 create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 17292 create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 17293 create->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 17294 17295 mutex_enter(&dtrace_lock); 17296 err = dtrace_enabling_replicate(state, match, create); 17297 mutex_exit(&dtrace_lock); 17298 17299 return (err); 17300 } 17301 17302 case DTRACEIOC_PROBEMATCH: 17303 case DTRACEIOC_PROBES: { 17304 dtrace_probe_t *probe = NULL; 17305 dtrace_probedesc_t desc; 17306 dtrace_probekey_t pkey; 17307 dtrace_id_t i; 17308 int m = 0; 17309 uint32_t priv; 17310 uid_t uid; 17311 zoneid_t zoneid; 17312 17313 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 17314 return (EFAULT); 17315 17316 desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 17317 desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 17318 desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 17319 desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 17320 17321 /* 17322 * Before we attempt to match this probe, we want to give 17323 * all providers the opportunity to provide it. 17324 */ 17325 if (desc.dtpd_id == DTRACE_IDNONE) { 17326 mutex_enter(&dtrace_provider_lock); 17327 dtrace_probe_provide(&desc, NULL); 17328 mutex_exit(&dtrace_provider_lock); 17329 desc.dtpd_id++; 17330 } 17331 17332 if (cmd == DTRACEIOC_PROBEMATCH) { 17333 dtrace_probekey(&desc, &pkey); 17334 pkey.dtpk_id = DTRACE_IDNONE; 17335 } 17336 17337 dtrace_cred2priv(cr, &priv, &uid, &zoneid); 17338 17339 mutex_enter(&dtrace_lock); 17340 17341 if (cmd == DTRACEIOC_PROBEMATCH) { 17342 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 17343 if ((probe = dtrace_probes[i - 1]) != NULL && 17344 (m = dtrace_match_probe(probe, &pkey, 17345 priv, uid, zoneid)) != 0) 17346 break; 17347 } 17348 17349 if (m < 0) { 17350 mutex_exit(&dtrace_lock); 17351 return (EINVAL); 17352 } 17353 17354 } else { 17355 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 17356 if ((probe = dtrace_probes[i - 1]) != NULL && 17357 dtrace_match_priv(probe, priv, uid, zoneid)) 17358 break; 17359 } 17360 } 17361 17362 if (probe == NULL) { 17363 mutex_exit(&dtrace_lock); 17364 return (ESRCH); 17365 } 17366 17367 dtrace_probe_description(probe, &desc); 17368 mutex_exit(&dtrace_lock); 17369 17370 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 17371 return (EFAULT); 17372 17373 return (0); 17374 } 17375 17376 case DTRACEIOC_PROBEARG: { 17377 dtrace_argdesc_t desc; 17378 dtrace_probe_t *probe; 17379 dtrace_provider_t *prov; 17380 17381 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 17382 return (EFAULT); 17383 17384 if (desc.dtargd_id == DTRACE_IDNONE) 17385 return (EINVAL); 17386 17387 if (desc.dtargd_ndx == DTRACE_ARGNONE) 17388 return (EINVAL); 17389 17390 mutex_enter(&dtrace_provider_lock); 17391 mutex_enter(&mod_lock); 17392 mutex_enter(&dtrace_lock); 17393 17394 if (desc.dtargd_id > dtrace_nprobes) { 17395 mutex_exit(&dtrace_lock); 17396 mutex_exit(&mod_lock); 17397 mutex_exit(&dtrace_provider_lock); 17398 return (EINVAL); 17399 } 17400 17401 if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) { 17402 mutex_exit(&dtrace_lock); 17403 mutex_exit(&mod_lock); 17404 mutex_exit(&dtrace_provider_lock); 17405 return (EINVAL); 17406 } 17407 17408 mutex_exit(&dtrace_lock); 17409 17410 prov = probe->dtpr_provider; 17411 17412 if (prov->dtpv_pops.dtps_getargdesc == NULL) { 17413 /* 17414 * There isn't any typed information for this probe. 17415 * Set the argument number to DTRACE_ARGNONE. 17416 */ 17417 desc.dtargd_ndx = DTRACE_ARGNONE; 17418 } else { 17419 desc.dtargd_native[0] = '\0'; 17420 desc.dtargd_xlate[0] = '\0'; 17421 desc.dtargd_mapping = desc.dtargd_ndx; 17422 17423 prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg, 17424 probe->dtpr_id, probe->dtpr_arg, &desc); 17425 } 17426 17427 mutex_exit(&mod_lock); 17428 mutex_exit(&dtrace_provider_lock); 17429 17430 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 17431 return (EFAULT); 17432 17433 return (0); 17434 } 17435 17436 case DTRACEIOC_GO: { 17437 processorid_t cpuid; 17438 rval = dtrace_state_go(state, &cpuid); 17439 17440 if (rval != 0) 17441 return (rval); 17442 17443 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 17444 return (EFAULT); 17445 17446 return (0); 17447 } 17448 17449 case DTRACEIOC_STOP: { 17450 processorid_t cpuid; 17451 17452 mutex_enter(&dtrace_lock); 17453 rval = dtrace_state_stop(state, &cpuid); 17454 mutex_exit(&dtrace_lock); 17455 17456 if (rval != 0) 17457 return (rval); 17458 17459 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 17460 return (EFAULT); 17461 17462 return (0); 17463 } 17464 17465 case DTRACEIOC_DOFGET: { 17466 dof_hdr_t hdr, *dof; 17467 uint64_t len; 17468 17469 if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0) 17470 return (EFAULT); 17471 17472 mutex_enter(&dtrace_lock); 17473 dof = dtrace_dof_create(state); 17474 mutex_exit(&dtrace_lock); 17475 17476 len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz); 17477 rval = copyout(dof, (void *)arg, len); 17478 dtrace_dof_destroy(dof); 17479 17480 return (rval == 0 ? 0 : EFAULT); 17481 } 17482 17483 case DTRACEIOC_AGGSNAP: 17484 case DTRACEIOC_BUFSNAP: { 17485 dtrace_bufdesc_t desc; 17486 caddr_t cached; 17487 dtrace_buffer_t *buf; 17488 17489 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 17490 return (EFAULT); 17491 17492 if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU) 17493 return (EINVAL); 17494 17495 mutex_enter(&dtrace_lock); 17496 17497 if (cmd == DTRACEIOC_BUFSNAP) { 17498 buf = &state->dts_buffer[desc.dtbd_cpu]; 17499 } else { 17500 buf = &state->dts_aggbuffer[desc.dtbd_cpu]; 17501 } 17502 17503 if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) { 17504 size_t sz = buf->dtb_offset; 17505 17506 if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) { 17507 mutex_exit(&dtrace_lock); 17508 return (EBUSY); 17509 } 17510 17511 /* 17512 * If this buffer has already been consumed, we're 17513 * going to indicate that there's nothing left here 17514 * to consume. 17515 */ 17516 if (buf->dtb_flags & DTRACEBUF_CONSUMED) { 17517 mutex_exit(&dtrace_lock); 17518 17519 desc.dtbd_size = 0; 17520 desc.dtbd_drops = 0; 17521 desc.dtbd_errors = 0; 17522 desc.dtbd_oldest = 0; 17523 sz = sizeof (desc); 17524 17525 if (copyout(&desc, (void *)arg, sz) != 0) 17526 return (EFAULT); 17527 17528 return (0); 17529 } 17530 17531 /* 17532 * If this is a ring buffer that has wrapped, we want 17533 * to copy the whole thing out. 17534 */ 17535 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 17536 dtrace_buffer_polish(buf); 17537 sz = buf->dtb_size; 17538 } 17539 17540 if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) { 17541 mutex_exit(&dtrace_lock); 17542 return (EFAULT); 17543 } 17544 17545 desc.dtbd_size = sz; 17546 desc.dtbd_drops = buf->dtb_drops; 17547 desc.dtbd_errors = buf->dtb_errors; 17548 desc.dtbd_oldest = buf->dtb_xamot_offset; 17549 desc.dtbd_timestamp = dtrace_gethrtime(); 17550 17551 mutex_exit(&dtrace_lock); 17552 17553 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 17554 return (EFAULT); 17555 17556 buf->dtb_flags |= DTRACEBUF_CONSUMED; 17557 17558 return (0); 17559 } 17560 17561 if (buf->dtb_tomax == NULL) { 17562 ASSERT(buf->dtb_xamot == NULL); 17563 mutex_exit(&dtrace_lock); 17564 return (ENOENT); 17565 } 17566 17567 cached = buf->dtb_tomax; 17568 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 17569 17570 dtrace_xcall(desc.dtbd_cpu, 17571 (dtrace_xcall_t)dtrace_buffer_switch, buf); 17572 17573 state->dts_errors += buf->dtb_xamot_errors; 17574 17575 /* 17576 * If the buffers did not actually switch, then the cross call 17577 * did not take place -- presumably because the given CPU is 17578 * not in the ready set. If this is the case, we'll return 17579 * ENOENT. 17580 */ 17581 if (buf->dtb_tomax == cached) { 17582 ASSERT(buf->dtb_xamot != cached); 17583 mutex_exit(&dtrace_lock); 17584 return (ENOENT); 17585 } 17586 17587 ASSERT(cached == buf->dtb_xamot); 17588 17589 /* 17590 * We have our snapshot; now copy it out. 17591 */ 17592 if (copyout(buf->dtb_xamot, desc.dtbd_data, 17593 buf->dtb_xamot_offset) != 0) { 17594 mutex_exit(&dtrace_lock); 17595 return (EFAULT); 17596 } 17597 17598 desc.dtbd_size = buf->dtb_xamot_offset; 17599 desc.dtbd_drops = buf->dtb_xamot_drops; 17600 desc.dtbd_errors = buf->dtb_xamot_errors; 17601 desc.dtbd_oldest = 0; 17602 desc.dtbd_timestamp = buf->dtb_switched; 17603 17604 mutex_exit(&dtrace_lock); 17605 17606 /* 17607 * Finally, copy out the buffer description. 17608 */ 17609 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 17610 return (EFAULT); 17611 17612 return (0); 17613 } 17614 17615 case DTRACEIOC_CONF: { 17616 dtrace_conf_t conf; 17617 17618 bzero(&conf, sizeof (conf)); 17619 conf.dtc_difversion = DIF_VERSION; 17620 conf.dtc_difintregs = DIF_DIR_NREGS; 17621 conf.dtc_diftupregs = DIF_DTR_NREGS; 17622 conf.dtc_ctfmodel = CTF_MODEL_NATIVE; 17623 17624 if (copyout(&conf, (void *)arg, sizeof (conf)) != 0) 17625 return (EFAULT); 17626 17627 return (0); 17628 } 17629 17630 case DTRACEIOC_STATUS: { 17631 dtrace_status_t stat; 17632 dtrace_dstate_t *dstate; 17633 int i, j; 17634 uint64_t nerrs; 17635 17636 /* 17637 * See the comment in dtrace_state_deadman() for the reason 17638 * for setting dts_laststatus to INT64_MAX before setting 17639 * it to the correct value. 17640 */ 17641 state->dts_laststatus = INT64_MAX; 17642 dtrace_membar_producer(); 17643 state->dts_laststatus = dtrace_gethrtime(); 17644 17645 bzero(&stat, sizeof (stat)); 17646 17647 mutex_enter(&dtrace_lock); 17648 17649 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) { 17650 mutex_exit(&dtrace_lock); 17651 return (ENOENT); 17652 } 17653 17654 if (state->dts_activity == DTRACE_ACTIVITY_DRAINING) 17655 stat.dtst_exiting = 1; 17656 17657 nerrs = state->dts_errors; 17658 dstate = &state->dts_vstate.dtvs_dynvars; 17659 17660 for (i = 0; i < NCPU; i++) { 17661 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i]; 17662 17663 stat.dtst_dyndrops += dcpu->dtdsc_drops; 17664 stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops; 17665 stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops; 17666 17667 if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL) 17668 stat.dtst_filled++; 17669 17670 nerrs += state->dts_buffer[i].dtb_errors; 17671 17672 for (j = 0; j < state->dts_nspeculations; j++) { 17673 dtrace_speculation_t *spec; 17674 dtrace_buffer_t *buf; 17675 17676 spec = &state->dts_speculations[j]; 17677 buf = &spec->dtsp_buffer[i]; 17678 stat.dtst_specdrops += buf->dtb_xamot_drops; 17679 } 17680 } 17681 17682 stat.dtst_specdrops_busy = state->dts_speculations_busy; 17683 stat.dtst_specdrops_unavail = state->dts_speculations_unavail; 17684 stat.dtst_stkstroverflows = state->dts_stkstroverflows; 17685 stat.dtst_dblerrors = state->dts_dblerrors; 17686 stat.dtst_killed = 17687 (state->dts_activity == DTRACE_ACTIVITY_KILLED); 17688 stat.dtst_errors = nerrs; 17689 17690 mutex_exit(&dtrace_lock); 17691 17692 if (copyout(&stat, (void *)arg, sizeof (stat)) != 0) 17693 return (EFAULT); 17694 17695 return (0); 17696 } 17697 17698 case DTRACEIOC_FORMAT: { 17699 dtrace_fmtdesc_t fmt; 17700 char *str; 17701 int len; 17702 17703 if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0) 17704 return (EFAULT); 17705 17706 mutex_enter(&dtrace_lock); 17707 17708 if (fmt.dtfd_format == 0 || 17709 fmt.dtfd_format > state->dts_nformats) { 17710 mutex_exit(&dtrace_lock); 17711 return (EINVAL); 17712 } 17713 17714 /* 17715 * Format strings are allocated contiguously and they are 17716 * never freed; if a format index is less than the number 17717 * of formats, we can assert that the format map is non-NULL 17718 * and that the format for the specified index is non-NULL. 17719 */ 17720 ASSERT(state->dts_formats != NULL); 17721 str = state->dts_formats[fmt.dtfd_format - 1]; 17722 ASSERT(str != NULL); 17723 17724 len = strlen(str) + 1; 17725 17726 if (len > fmt.dtfd_length) { 17727 fmt.dtfd_length = len; 17728 17729 if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) { 17730 mutex_exit(&dtrace_lock); 17731 return (EINVAL); 17732 } 17733 } else { 17734 if (copyout(str, fmt.dtfd_string, len) != 0) { 17735 mutex_exit(&dtrace_lock); 17736 return (EINVAL); 17737 } 17738 } 17739 17740 mutex_exit(&dtrace_lock); 17741 return (0); 17742 } 17743 17744 default: 17745 break; 17746 } 17747 17748 return (ENOTTY); 17749} 17750 17751/*ARGSUSED*/ 17752static int 17753dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 17754{ 17755 dtrace_state_t *state; 17756 17757 switch (cmd) { 17758 case DDI_DETACH: 17759 break; 17760 17761 case DDI_SUSPEND: 17762 return (DDI_SUCCESS); 17763 17764 default: 17765 return (DDI_FAILURE); 17766 } 17767 17768 mutex_enter(&cpu_lock); 17769 mutex_enter(&dtrace_provider_lock); 17770 mutex_enter(&dtrace_lock); 17771 17772 ASSERT(dtrace_opens == 0); 17773 17774 if (dtrace_helpers > 0) { 17775 mutex_exit(&dtrace_provider_lock); 17776 mutex_exit(&dtrace_lock); 17777 mutex_exit(&cpu_lock); 17778 return (DDI_FAILURE); 17779 } 17780 17781 if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) { 17782 mutex_exit(&dtrace_provider_lock); 17783 mutex_exit(&dtrace_lock); 17784 mutex_exit(&cpu_lock); 17785 return (DDI_FAILURE); 17786 } 17787 17788 dtrace_provider = NULL; 17789 17790 if ((state = dtrace_anon_grab()) != NULL) { 17791 /* 17792 * If there were ECBs on this state, the provider should 17793 * have not been allowed to detach; assert that there is 17794 * none. 17795 */ 17796 ASSERT(state->dts_necbs == 0); 17797 dtrace_state_destroy(state); 17798 17799 /* 17800 * If we're being detached with anonymous state, we need to 17801 * indicate to the kernel debugger that DTrace is now inactive. 17802 */ 17803 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 17804 } 17805 17806 bzero(&dtrace_anon, sizeof (dtrace_anon_t)); 17807 unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 17808 dtrace_cpu_init = NULL; 17809 dtrace_helpers_cleanup = NULL; 17810 dtrace_helpers_fork = NULL; 17811 dtrace_cpustart_init = NULL; 17812 dtrace_cpustart_fini = NULL; 17813 dtrace_debugger_init = NULL; 17814 dtrace_debugger_fini = NULL; 17815 dtrace_modload = NULL; 17816 dtrace_modunload = NULL; 17817 17818 ASSERT(dtrace_getf == 0); 17819 ASSERT(dtrace_closef == NULL); 17820 17821 mutex_exit(&cpu_lock); 17822 17823 if (dtrace_helptrace_enabled) { 17824 kmem_free(dtrace_helptrace_buffer, dtrace_helptrace_bufsize); 17825 dtrace_helptrace_buffer = NULL; 17826 } 17827 17828 kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *)); 17829 dtrace_probes = NULL; 17830 dtrace_nprobes = 0; 17831 17832 dtrace_hash_destroy(dtrace_bymod); 17833 dtrace_hash_destroy(dtrace_byfunc); 17834 dtrace_hash_destroy(dtrace_byname); 17835 dtrace_bymod = NULL; 17836 dtrace_byfunc = NULL; 17837 dtrace_byname = NULL; 17838 17839 kmem_cache_destroy(dtrace_state_cache); 17840 vmem_destroy(dtrace_minor); 17841 vmem_destroy(dtrace_arena); 17842 17843 if (dtrace_toxrange != NULL) { 17844 kmem_free(dtrace_toxrange, 17845 dtrace_toxranges_max * sizeof (dtrace_toxrange_t)); 17846 dtrace_toxrange = NULL; 17847 dtrace_toxranges = 0; 17848 dtrace_toxranges_max = 0; 17849 } 17850 17851 ddi_remove_minor_node(dtrace_devi, NULL); 17852 dtrace_devi = NULL; 17853 17854 ddi_soft_state_fini(&dtrace_softstate); 17855 17856 ASSERT(dtrace_vtime_references == 0); 17857 ASSERT(dtrace_opens == 0); 17858 ASSERT(dtrace_retained == NULL); 17859 17860 mutex_exit(&dtrace_lock); 17861 mutex_exit(&dtrace_provider_lock); 17862 17863 /* 17864 * We don't destroy the task queue until after we have dropped our 17865 * locks (taskq_destroy() may block on running tasks). To prevent 17866 * attempting to do work after we have effectively detached but before 17867 * the task queue has been destroyed, all tasks dispatched via the 17868 * task queue must check that DTrace is still attached before 17869 * performing any operation. 17870 */ 17871 taskq_destroy(dtrace_taskq); 17872 dtrace_taskq = NULL; 17873 17874 return (DDI_SUCCESS); 17875} 17876#endif 17877 17878#if defined(sun) 17879/*ARGSUSED*/ 17880static int 17881dtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 17882{ 17883 int error; 17884 17885 switch (infocmd) { 17886 case DDI_INFO_DEVT2DEVINFO: 17887 *result = (void *)dtrace_devi; 17888 error = DDI_SUCCESS; 17889 break; 17890 case DDI_INFO_DEVT2INSTANCE: 17891 *result = (void *)0; 17892 error = DDI_SUCCESS; 17893 break; 17894 default: 17895 error = DDI_FAILURE; 17896 } 17897 return (error); 17898} 17899#endif 17900 17901#if defined(sun) 17902static struct cb_ops dtrace_cb_ops = { 17903 dtrace_open, /* open */ 17904 dtrace_close, /* close */ 17905 nulldev, /* strategy */ 17906 nulldev, /* print */ 17907 nodev, /* dump */ 17908 nodev, /* read */ 17909 nodev, /* write */ 17910 dtrace_ioctl, /* ioctl */ 17911 nodev, /* devmap */ 17912 nodev, /* mmap */ 17913 nodev, /* segmap */ 17914 nochpoll, /* poll */ 17915 ddi_prop_op, /* cb_prop_op */ 17916 0, /* streamtab */ 17917 D_NEW | D_MP /* Driver compatibility flag */ 17918}; 17919 17920static struct dev_ops dtrace_ops = { 17921 DEVO_REV, /* devo_rev */ 17922 0, /* refcnt */ 17923 dtrace_info, /* get_dev_info */ 17924 nulldev, /* identify */ 17925 nulldev, /* probe */ 17926 dtrace_attach, /* attach */ 17927 dtrace_detach, /* detach */ 17928 nodev, /* reset */ 17929 &dtrace_cb_ops, /* driver operations */ 17930 NULL, /* bus operations */ 17931 nodev /* dev power */ 17932}; 17933 17934static struct modldrv modldrv = { 17935 &mod_driverops, /* module type (this is a pseudo driver) */ 17936 "Dynamic Tracing", /* name of module */ 17937 &dtrace_ops, /* driver ops */ 17938}; 17939 17940static struct modlinkage modlinkage = { 17941 MODREV_1, 17942 (void *)&modldrv, 17943 NULL 17944}; 17945 17946int 17947_init(void) 17948{ 17949 return (mod_install(&modlinkage)); 17950} 17951 17952int 17953_info(struct modinfo *modinfop) 17954{ 17955 return (mod_info(&modlinkage, modinfop)); 17956} 17957 17958int 17959_fini(void) 17960{ 17961 return (mod_remove(&modlinkage)); 17962} 17963#else 17964 17965static d_ioctl_t dtrace_ioctl; 17966static d_ioctl_t dtrace_ioctl_helper; 17967static void dtrace_load(void *); 17968static int dtrace_unload(void); 17969#if __FreeBSD_version < 800039 17970static void dtrace_clone(void *, struct ucred *, char *, int , struct cdev **); 17971static struct clonedevs *dtrace_clones; /* Ptr to the array of cloned devices. */ 17972static eventhandler_tag eh_tag; /* Event handler tag. */ 17973#else 17974static struct cdev *dtrace_dev; 17975static struct cdev *helper_dev; 17976#endif 17977 17978void dtrace_invop_init(void); 17979void dtrace_invop_uninit(void); 17980 17981static struct cdevsw dtrace_cdevsw = { 17982 .d_version = D_VERSION, 17983#if __FreeBSD_version < 800039 17984 .d_flags = D_TRACKCLOSE | D_NEEDMINOR, 17985 .d_close = dtrace_close, 17986#endif 17987 .d_ioctl = dtrace_ioctl, 17988 .d_open = dtrace_open, 17989 .d_name = "dtrace", 17990}; 17991 17992static struct cdevsw helper_cdevsw = { 17993 .d_version = D_VERSION, 17994 .d_ioctl = dtrace_ioctl_helper, 17995 .d_name = "helper", 17996}; 17997 17998#include <dtrace_anon.c> 17999#if __FreeBSD_version < 800039 18000#include <dtrace_clone.c> 18001#endif 18002#include <dtrace_ioctl.c> 18003#include <dtrace_load.c> 18004#include <dtrace_modevent.c> 18005#include <dtrace_sysctl.c> 18006#include <dtrace_unload.c> 18007#include <dtrace_vtime.c> 18008#include <dtrace_hacks.c> 18009#include <dtrace_isa.c> 18010 18011SYSINIT(dtrace_load, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_load, NULL); 18012SYSUNINIT(dtrace_unload, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_unload, NULL); 18013SYSINIT(dtrace_anon_init, SI_SUB_DTRACE_ANON, SI_ORDER_FIRST, dtrace_anon_init, NULL); 18014 18015DEV_MODULE(dtrace, dtrace_modevent, NULL); 18016MODULE_VERSION(dtrace, 1); 18017MODULE_DEPEND(dtrace, cyclic, 1, 1, 1); 18018MODULE_DEPEND(dtrace, opensolaris, 1, 1, 1); 18019#endif 18020