1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* Portions Copyright 2013 Justin Hibbits */ 22/* 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27#include <sys/fasttrap_isa.h> 28#include <sys/fasttrap_impl.h> 29#include <sys/dtrace.h> 30#include <sys/dtrace_impl.h> 31#include <cddl/dev/dtrace/dtrace_cddl.h> 32#include <sys/proc.h> 33#include <sys/types.h> 34#include <sys/uio.h> 35#include <sys/ptrace.h> 36#include <sys/rmlock.h> 37#include <sys/sysent.h> 38 39#define OP(x) ((x) >> 26) 40#define OPX(x) (((x) >> 2) & 0x3FF) 41#define OP_BO(x) (((x) & 0x03E00000) >> 21) 42#define OP_BI(x) (((x) & 0x001F0000) >> 16) 43#define OP_RS(x) (((x) & 0x03E00000) >> 21) 44#define OP_RA(x) (((x) & 0x001F0000) >> 16) 45#define OP_RB(x) (((x) & 0x0000F100) >> 11) 46 47 48static int 49proc_ops(int op, proc_t *p, void *kaddr, off_t uaddr, size_t len) 50{ 51 struct iovec iov; 52 struct uio uio; 53 54 iov.iov_base = kaddr; 55 iov.iov_len = len; 56 uio.uio_offset = uaddr; 57 uio.uio_iov = &iov; 58 uio.uio_resid = len; 59 uio.uio_iovcnt = 1; 60 uio.uio_segflg = UIO_SYSSPACE; 61 uio.uio_td = curthread; 62 uio.uio_rw = op; 63 PHOLD(p); 64 if (proc_rwmem(p, &uio) != 0) { 65 PRELE(p); 66 return (-1); 67 } 68 PRELE(p); 69 70 return (0); 71} 72 73static int 74uread(proc_t *p, void *kaddr, size_t len, uintptr_t uaddr) 75{ 76 77 return (proc_ops(UIO_READ, p, kaddr, uaddr, len)); 78} 79 80static int 81uwrite(proc_t *p, void *kaddr, size_t len, uintptr_t uaddr) 82{ 83 84 return (proc_ops(UIO_WRITE, p, kaddr, uaddr, len)); 85} 86 87int 88fasttrap_tracepoint_install(proc_t *p, fasttrap_tracepoint_t *tp) 89{ 90 fasttrap_instr_t instr = FASTTRAP_INSTR; 91 92 if (uwrite(p, &instr, 4, tp->ftt_pc) != 0) 93 return (-1); 94 95 return (0); 96} 97 98int 99fasttrap_tracepoint_remove(proc_t *p, fasttrap_tracepoint_t *tp) 100{ 101 uint32_t instr; 102 103 /* 104 * Distinguish between read or write failures and a changed 105 * instruction. 106 */ 107 if (uread(p, &instr, 4, tp->ftt_pc) != 0) 108 return (0); 109 if (instr != FASTTRAP_INSTR) 110 return (0); 111 if (uwrite(p, &tp->ftt_instr, 4, tp->ftt_pc) != 0) 112 return (-1); 113 114 return (0); 115} 116 117int 118fasttrap_tracepoint_init(proc_t *p, fasttrap_tracepoint_t *tp, uintptr_t pc, 119 fasttrap_probe_type_t type) 120{ 121 uint32_t instr; 122 //int32_t disp; 123 124 /* 125 * Read the instruction at the given address out of the process's 126 * address space. We don't have to worry about a debugger 127 * changing this instruction before we overwrite it with our trap 128 * instruction since P_PR_LOCK is set. 129 */ 130 if (uread(p, &instr, 4, pc) != 0) 131 return (-1); 132 133 /* 134 * Decode the instruction to fill in the probe flags. We can have 135 * the process execute most instructions on its own using a pc/npc 136 * trick, but pc-relative control transfer present a problem since 137 * we're relocating the instruction. We emulate these instructions 138 * in the kernel. We assume a default type and over-write that as 139 * needed. 140 * 141 * pc-relative instructions must be emulated for correctness; 142 * other instructions (which represent a large set of commonly traced 143 * instructions) are emulated or otherwise optimized for performance. 144 */ 145 tp->ftt_type = FASTTRAP_T_COMMON; 146 tp->ftt_instr = instr; 147 148 switch (OP(instr)) { 149 /* The following are invalid for trapping (invalid opcodes, tw/twi). */ 150 case 0: 151 case 1: 152 case 2: 153 case 4: 154 case 5: 155 case 6: 156 case 30: 157 case 39: 158 case 58: 159 case 62: 160 case 3: /* twi */ 161 return (-1); 162 case 31: /* tw */ 163 if (OPX(instr) == 4) 164 return (-1); 165 else if (OPX(instr) == 444 && OP_RS(instr) == OP_RA(instr) && 166 OP_RS(instr) == OP_RB(instr)) 167 tp->ftt_type = FASTTRAP_T_NOP; 168 break; 169 case 16: 170 tp->ftt_type = FASTTRAP_T_BC; 171 tp->ftt_dest = instr & 0x0000FFFC; /* Extract target address */ 172 if (instr & 0x00008000) 173 tp->ftt_dest |= 0xFFFF0000; 174 /* Use as offset if not absolute address. */ 175 if (!(instr & 0x02)) 176 tp->ftt_dest += pc; 177 tp->ftt_bo = OP_BO(instr); 178 tp->ftt_bi = OP_BI(instr); 179 break; 180 case 18: 181 tp->ftt_type = FASTTRAP_T_B; 182 tp->ftt_dest = instr & 0x03FFFFFC; /* Extract target address */ 183 if (instr & 0x02000000) 184 tp->ftt_dest |= 0xFC000000; 185 /* Use as offset if not absolute address. */ 186 if (!(instr & 0x02)) 187 tp->ftt_dest += pc; 188 break; 189 case 19: 190 switch (OPX(instr)) { 191 case 528: /* bcctr */ 192 tp->ftt_type = FASTTRAP_T_BCTR; 193 tp->ftt_bo = OP_BO(instr); 194 tp->ftt_bi = OP_BI(instr); 195 break; 196 case 16: /* bclr */ 197 tp->ftt_type = FASTTRAP_T_BCTR; 198 tp->ftt_bo = OP_BO(instr); 199 tp->ftt_bi = OP_BI(instr); 200 break; 201 }; 202 break; 203 case 24: 204 if (OP_RS(instr) == OP_RA(instr) && 205 (instr & 0x0000FFFF) == 0) 206 tp->ftt_type = FASTTRAP_T_NOP; 207 break; 208 }; 209 210 /* 211 * We don't know how this tracepoint is going to be used, but in case 212 * it's used as part of a function return probe, we need to indicate 213 * whether it's always a return site or only potentially a return 214 * site. If it's part of a return probe, it's always going to be a 215 * return from that function if it's a restore instruction or if 216 * the previous instruction was a return. If we could reliably 217 * distinguish jump tables from return sites, this wouldn't be 218 * necessary. 219 */ 220#if 0 221 if (tp->ftt_type != FASTTRAP_T_RESTORE && 222 (uread(p, &instr, 4, pc - sizeof (instr)) != 0 || 223 !(OP(instr) == 2 && OP3(instr) == OP3_RETURN))) 224 tp->ftt_flags |= FASTTRAP_F_RETMAYBE; 225#endif 226 227 return (0); 228} 229 230static uint64_t 231fasttrap_anarg(struct reg *rp, int argno) 232{ 233 uint64_t value; 234 proc_t *p = curproc; 235 236 /* The first 8 arguments are in registers. */ 237 if (argno < 8) 238 return rp->fixreg[argno + 3]; 239 240 /* Arguments on stack start after SP+LR (2 register slots). */ 241 if (SV_PROC_FLAG(p, SV_ILP32)) { 242 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 243 value = dtrace_fuword32((void *)(rp->fixreg[1] + 8 + 244 ((argno - 8) * sizeof(uint32_t)))); 245 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT | CPU_DTRACE_BADADDR); 246 } else { 247 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 248 value = dtrace_fuword64((void *)(rp->fixreg[1] + 16 + 249 ((argno - 8) * sizeof(uint32_t)))); 250 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT | CPU_DTRACE_BADADDR); 251 } 252 return value; 253} 254 255uint64_t 256fasttrap_pid_getarg(void *arg, dtrace_id_t id, void *parg, int argno, 257 int aframes) 258{ 259 struct reg r; 260 261 fill_regs(curthread, &r); 262 263 return (fasttrap_anarg(&r, argno)); 264} 265 266uint64_t 267fasttrap_usdt_getarg(void *arg, dtrace_id_t id, void *parg, int argno, 268 int aframes) 269{ 270 struct reg r; 271 272 fill_regs(curthread, &r); 273 274 return (fasttrap_anarg(&r, argno)); 275} 276 277static void 278fasttrap_usdt_args(fasttrap_probe_t *probe, struct reg *rp, int argc, 279 uintptr_t *argv) 280{ 281 int i, x, cap = MIN(argc, probe->ftp_nargs); 282 283 for (i = 0; i < cap; i++) { 284 x = probe->ftp_argmap[i]; 285 286 if (x < 8) 287 argv[i] = rp->fixreg[x]; 288 else 289 if (SV_PROC_FLAG(curproc, SV_ILP32)) 290 argv[i] = fuword32((void *)(rp->fixreg[1] + 8 + 291 (x * sizeof(uint32_t)))); 292 else 293 argv[i] = fuword32((void *)(rp->fixreg[1] + 16 + 294 (x * sizeof(uint64_t)))); 295 } 296 297 for (; i < argc; i++) { 298 argv[i] = 0; 299 } 300} 301 302static void 303fasttrap_return_common(struct reg *rp, uintptr_t pc, pid_t pid, 304 uintptr_t new_pc) 305{ 306 struct rm_priotracker tracker; 307 fasttrap_tracepoint_t *tp; 308 fasttrap_bucket_t *bucket; 309 fasttrap_id_t *id; 310 311 rm_rlock(&fasttrap_tp_lock, &tracker); 312 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)]; 313 314 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) { 315 if (pid == tp->ftt_pid && pc == tp->ftt_pc && 316 tp->ftt_proc->ftpc_acount != 0) 317 break; 318 } 319 320 /* 321 * Don't sweat it if we can't find the tracepoint again; unlike 322 * when we're in fasttrap_pid_probe(), finding the tracepoint here 323 * is not essential to the correct execution of the process. 324 */ 325 if (tp == NULL) { 326 rm_runlock(&fasttrap_tp_lock, &tracker); 327 return; 328 } 329 330 for (id = tp->ftt_retids; id != NULL; id = id->fti_next) { 331 /* 332 * If there's a branch that could act as a return site, we 333 * need to trace it, and check here if the program counter is 334 * external to the function. 335 */ 336 /* Skip function-local branches. */ 337 if ((new_pc - id->fti_probe->ftp_faddr) < id->fti_probe->ftp_fsize) 338 continue; 339 340 dtrace_probe(id->fti_probe->ftp_id, 341 pc - id->fti_probe->ftp_faddr, 342 rp->fixreg[3], rp->fixreg[4], 0, 0); 343 } 344 rm_runlock(&fasttrap_tp_lock, &tracker); 345} 346 347 348static int 349fasttrap_branch_taken(int bo, int bi, struct reg *regs) 350{ 351 int crzero = 0; 352 353 /* Branch always? */ 354 if ((bo & 0x14) == 0x14) 355 return 1; 356 357 /* Handle decrementing ctr */ 358 if (!(bo & 0x04)) { 359 --regs->ctr; 360 crzero = (regs->ctr == 0); 361 if (bo & 0x10) { 362 return (!(crzero ^ (bo >> 1))); 363 } 364 } 365 366 return (crzero | (((regs->cr >> (31 - bi)) ^ (bo >> 3)) ^ 1)); 367} 368 369 370int 371fasttrap_pid_probe(struct reg *rp) 372{ 373 struct rm_priotracker tracker; 374 proc_t *p = curproc; 375 uintptr_t pc = rp->pc; 376 uintptr_t new_pc = 0; 377 fasttrap_bucket_t *bucket; 378 fasttrap_tracepoint_t *tp, tp_local; 379 pid_t pid; 380 dtrace_icookie_t cookie; 381 uint_t is_enabled = 0; 382 383 /* 384 * It's possible that a user (in a veritable orgy of bad planning) 385 * could redirect this thread's flow of control before it reached the 386 * return probe fasttrap. In this case we need to kill the process 387 * since it's in a unrecoverable state. 388 */ 389 if (curthread->t_dtrace_step) { 390 ASSERT(curthread->t_dtrace_on); 391 fasttrap_sigtrap(p, curthread, pc); 392 return (0); 393 } 394 395 /* 396 * Clear all user tracing flags. 397 */ 398 curthread->t_dtrace_ft = 0; 399 curthread->t_dtrace_pc = 0; 400 curthread->t_dtrace_npc = 0; 401 curthread->t_dtrace_scrpc = 0; 402 curthread->t_dtrace_astpc = 0; 403 404 rm_rlock(&fasttrap_tp_lock, &tracker); 405 pid = p->p_pid; 406 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)]; 407 408 /* 409 * Lookup the tracepoint that the process just hit. 410 */ 411 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) { 412 if (pid == tp->ftt_pid && pc == tp->ftt_pc && 413 tp->ftt_proc->ftpc_acount != 0) 414 break; 415 } 416 417 /* 418 * If we couldn't find a matching tracepoint, either a tracepoint has 419 * been inserted without using the pid<pid> ioctl interface (see 420 * fasttrap_ioctl), or somehow we have mislaid this tracepoint. 421 */ 422 if (tp == NULL) { 423 rm_runlock(&fasttrap_tp_lock, &tracker); 424 return (-1); 425 } 426 427 if (tp->ftt_ids != NULL) { 428 fasttrap_id_t *id; 429 430 for (id = tp->ftt_ids; id != NULL; id = id->fti_next) { 431 fasttrap_probe_t *probe = id->fti_probe; 432 433 if (id->fti_ptype == DTFTP_ENTRY) { 434 /* 435 * We note that this was an entry 436 * probe to help ustack() find the 437 * first caller. 438 */ 439 cookie = dtrace_interrupt_disable(); 440 DTRACE_CPUFLAG_SET(CPU_DTRACE_ENTRY); 441 dtrace_probe(probe->ftp_id, rp->fixreg[3], 442 rp->fixreg[4], rp->fixreg[5], rp->fixreg[6], 443 rp->fixreg[7]); 444 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_ENTRY); 445 dtrace_interrupt_enable(cookie); 446 } else if (id->fti_ptype == DTFTP_IS_ENABLED) { 447 /* 448 * Note that in this case, we don't 449 * call dtrace_probe() since it's only 450 * an artificial probe meant to change 451 * the flow of control so that it 452 * encounters the true probe. 453 */ 454 is_enabled = 1; 455 } else if (probe->ftp_argmap == NULL) { 456 dtrace_probe(probe->ftp_id, rp->fixreg[3], 457 rp->fixreg[4], rp->fixreg[5], rp->fixreg[6], 458 rp->fixreg[7]); 459 } else { 460 uintptr_t t[5]; 461 462 fasttrap_usdt_args(probe, rp, 463 sizeof (t) / sizeof (t[0]), t); 464 465 dtrace_probe(probe->ftp_id, t[0], t[1], 466 t[2], t[3], t[4]); 467 } 468 } 469 } 470 471 /* 472 * We're about to do a bunch of work so we cache a local copy of 473 * the tracepoint to emulate the instruction, and then find the 474 * tracepoint again later if we need to light up any return probes. 475 */ 476 tp_local = *tp; 477 rm_runlock(&fasttrap_tp_lock, &tracker); 478 tp = &tp_local; 479 480 /* 481 * If there's an is-enabled probe connected to this tracepoint it 482 * means that there was a 'xor r3, r3, r3' 483 * instruction that was placed there by DTrace when the binary was 484 * linked. As this probe is, in fact, enabled, we need to stuff 1 485 * into R3. Accordingly, we can bypass all the instruction 486 * emulation logic since we know the inevitable result. It's possible 487 * that a user could construct a scenario where the 'is-enabled' 488 * probe was on some other instruction, but that would be a rather 489 * exotic way to shoot oneself in the foot. 490 */ 491 if (is_enabled) { 492 rp->fixreg[3] = 1; 493 new_pc = rp->pc + 4; 494 goto done; 495 } 496 497 498 switch (tp->ftt_type) { 499 case FASTTRAP_T_NOP: 500 new_pc = rp->pc + 4; 501 break; 502 case FASTTRAP_T_BC: 503 if (!fasttrap_branch_taken(tp->ftt_bo, tp->ftt_bi, rp)) 504 break; 505 /* FALLTHROUGH */ 506 case FASTTRAP_T_B: 507 if (tp->ftt_instr & 0x01) 508 rp->lr = rp->pc + 4; 509 new_pc = tp->ftt_dest; 510 break; 511 case FASTTRAP_T_BLR: 512 case FASTTRAP_T_BCTR: 513 if (!fasttrap_branch_taken(tp->ftt_bo, tp->ftt_bi, rp)) 514 break; 515 /* FALLTHROUGH */ 516 if (tp->ftt_type == FASTTRAP_T_BCTR) 517 new_pc = rp->ctr; 518 else 519 new_pc = rp->lr; 520 if (tp->ftt_instr & 0x01) 521 rp->lr = rp->pc + 4; 522 break; 523 case FASTTRAP_T_COMMON: 524 break; 525 }; 526done: 527 /* 528 * If there were no return probes when we first found the tracepoint, 529 * we should feel no obligation to honor any return probes that were 530 * subsequently enabled -- they'll just have to wait until the next 531 * time around. 532 */ 533 if (tp->ftt_retids != NULL) { 534 /* 535 * We need to wait until the results of the instruction are 536 * apparent before invoking any return probes. If this 537 * instruction was emulated we can just call 538 * fasttrap_return_common(); if it needs to be executed, we 539 * need to wait until the user thread returns to the kernel. 540 */ 541 if (tp->ftt_type != FASTTRAP_T_COMMON) { 542 fasttrap_return_common(rp, pc, pid, new_pc); 543 } else { 544 ASSERT(curthread->t_dtrace_ret != 0); 545 ASSERT(curthread->t_dtrace_pc == pc); 546 ASSERT(curthread->t_dtrace_scrpc != 0); 547 ASSERT(new_pc == curthread->t_dtrace_astpc); 548 } 549 } 550 551 rp->pc = new_pc; 552 set_regs(curthread, rp); 553 554 return (0); 555} 556 557int 558fasttrap_return_probe(struct reg *rp) 559{ 560 proc_t *p = curproc; 561 uintptr_t pc = curthread->t_dtrace_pc; 562 uintptr_t npc = curthread->t_dtrace_npc; 563 564 curthread->t_dtrace_pc = 0; 565 curthread->t_dtrace_npc = 0; 566 curthread->t_dtrace_scrpc = 0; 567 curthread->t_dtrace_astpc = 0; 568 569 /* 570 * We set rp->pc to the address of the traced instruction so 571 * that it appears to dtrace_probe() that we're on the original 572 * instruction, and so that the user can't easily detect our 573 * complex web of lies. dtrace_return_probe() (our caller) 574 * will correctly set %pc after we return. 575 */ 576 rp->pc = pc; 577 578 fasttrap_return_common(rp, pc, p->p_pid, npc); 579 580 return (0); 581} 582 583