1/* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Keith Packard <keithp@keithp.com> 26 * 27 */ 28 29#include <sys/cdefs.h> 30__FBSDID("$FreeBSD$"); 31 32#include <dev/drm2/drmP.h> 33#include <dev/drm2/drm.h> 34#include <dev/drm2/i915/i915_drm.h> 35#include <dev/drm2/i915/i915_drv.h> 36#include <dev/drm2/i915/intel_drv.h> 37#include <dev/drm2/i915/intel_ringbuffer.h> 38 39#include <sys/sysctl.h> 40 41enum { 42 ACTIVE_LIST, 43 FLUSHING_LIST, 44 INACTIVE_LIST, 45 PINNED_LIST, 46 DEFERRED_FREE_LIST, 47}; 48 49static const char * 50yesno(int v) 51{ 52 return (v ? "yes" : "no"); 53} 54 55static int 56i915_capabilities(struct drm_device *dev, struct sbuf *m, void *data) 57{ 58 const struct intel_device_info *info = INTEL_INFO(dev); 59 60 sbuf_printf(m, "gen: %d\n", info->gen); 61 if (HAS_PCH_SPLIT(dev)) 62 sbuf_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev)); 63#define B(x) sbuf_printf(m, #x ": %s\n", yesno(info->x)) 64 B(is_mobile); 65 B(is_i85x); 66 B(is_i915g); 67 B(is_i945gm); 68 B(is_g33); 69 B(need_gfx_hws); 70 B(is_g4x); 71 B(is_pineview); 72 B(has_fbc); 73 B(has_pipe_cxsr); 74 B(has_hotplug); 75 B(cursor_needs_physical); 76 B(has_overlay); 77 B(overlay_needs_physical); 78 B(supports_tv); 79 B(has_bsd_ring); 80 B(has_blt_ring); 81 B(has_llc); 82#undef B 83 84 return (0); 85} 86 87static const char * 88get_pin_flag(struct drm_i915_gem_object *obj) 89{ 90 if (obj->user_pin_count > 0) 91 return "P"; 92 else if (obj->pin_count > 0) 93 return "p"; 94 else 95 return " "; 96} 97 98static const char * 99get_tiling_flag(struct drm_i915_gem_object *obj) 100{ 101 switch (obj->tiling_mode) { 102 default: 103 case I915_TILING_NONE: return (" "); 104 case I915_TILING_X: return ("X"); 105 case I915_TILING_Y: return ("Y"); 106 } 107} 108 109static const char * 110cache_level_str(int type) 111{ 112 switch (type) { 113 case I915_CACHE_NONE: return " uncached"; 114 case I915_CACHE_LLC: return " snooped (LLC)"; 115 case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)"; 116 default: return (""); 117 } 118} 119 120static void 121describe_obj(struct sbuf *m, struct drm_i915_gem_object *obj) 122{ 123 124 sbuf_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d%s%s%s", 125 &obj->base, 126 get_pin_flag(obj), 127 get_tiling_flag(obj), 128 obj->base.size / 1024, 129 obj->base.read_domains, 130 obj->base.write_domain, 131 obj->last_rendering_seqno, 132 obj->last_fenced_seqno, 133 cache_level_str(obj->cache_level), 134 obj->dirty ? " dirty" : "", 135 obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 136 if (obj->base.name) 137 sbuf_printf(m, " (name: %d)", obj->base.name); 138 if (obj->fence_reg != I915_FENCE_REG_NONE) 139 sbuf_printf(m, " (fence: %d)", obj->fence_reg); 140 if (obj->gtt_space != NULL) 141 sbuf_printf(m, " (gtt offset: %08x, size: %08x)", 142 obj->gtt_offset, (unsigned int)obj->gtt_space->size); 143 if (obj->pin_mappable || obj->fault_mappable) { 144 char s[3], *t = s; 145 if (obj->pin_mappable) 146 *t++ = 'p'; 147 if (obj->fault_mappable) 148 *t++ = 'f'; 149 *t = '\0'; 150 sbuf_printf(m, " (%s mappable)", s); 151 } 152 if (obj->ring != NULL) 153 sbuf_printf(m, " (%s)", obj->ring->name); 154} 155 156static int 157i915_gem_object_list_info(struct drm_device *dev, struct sbuf *m, void *data) 158{ 159 uintptr_t list = (uintptr_t)data; 160 struct list_head *head; 161 drm_i915_private_t *dev_priv = dev->dev_private; 162 struct drm_i915_gem_object *obj; 163 size_t total_obj_size, total_gtt_size; 164 int count; 165 166 if (sx_xlock_sig(&dev->dev_struct_lock)) 167 return (EINTR); 168 169 switch (list) { 170 case ACTIVE_LIST: 171 sbuf_printf(m, "Active:\n"); 172 head = &dev_priv->mm.active_list; 173 break; 174 case INACTIVE_LIST: 175 sbuf_printf(m, "Inactive:\n"); 176 head = &dev_priv->mm.inactive_list; 177 break; 178 case PINNED_LIST: 179 sbuf_printf(m, "Pinned:\n"); 180 head = &dev_priv->mm.pinned_list; 181 break; 182 case FLUSHING_LIST: 183 sbuf_printf(m, "Flushing:\n"); 184 head = &dev_priv->mm.flushing_list; 185 break; 186 case DEFERRED_FREE_LIST: 187 sbuf_printf(m, "Deferred free:\n"); 188 head = &dev_priv->mm.deferred_free_list; 189 break; 190 default: 191 DRM_UNLOCK(dev); 192 return (EINVAL); 193 } 194 195 total_obj_size = total_gtt_size = count = 0; 196 list_for_each_entry(obj, head, mm_list) { 197 sbuf_printf(m, " "); 198 describe_obj(m, obj); 199 sbuf_printf(m, "\n"); 200 total_obj_size += obj->base.size; 201 total_gtt_size += obj->gtt_space->size; 202 count++; 203 } 204 DRM_UNLOCK(dev); 205 206 sbuf_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 207 count, total_obj_size, total_gtt_size); 208 return (0); 209} 210 211#define count_objects(list, member) do { \ 212 list_for_each_entry(obj, list, member) { \ 213 size += obj->gtt_space->size; \ 214 ++count; \ 215 if (obj->map_and_fenceable) { \ 216 mappable_size += obj->gtt_space->size; \ 217 ++mappable_count; \ 218 } \ 219 } \ 220} while (0) 221 222static int 223i915_gem_object_info(struct drm_device *dev, struct sbuf *m, void *data) 224{ 225 struct drm_i915_private *dev_priv = dev->dev_private; 226 u32 count, mappable_count; 227 size_t size, mappable_size; 228 struct drm_i915_gem_object *obj; 229 230 if (sx_xlock_sig(&dev->dev_struct_lock)) 231 return (EINTR); 232 sbuf_printf(m, "%u objects, %zu bytes\n", 233 dev_priv->mm.object_count, 234 dev_priv->mm.object_memory); 235 236 size = count = mappable_size = mappable_count = 0; 237 count_objects(&dev_priv->mm.gtt_list, gtt_list); 238 sbuf_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n", 239 count, mappable_count, size, mappable_size); 240 241 size = count = mappable_size = mappable_count = 0; 242 count_objects(&dev_priv->mm.active_list, mm_list); 243 count_objects(&dev_priv->mm.flushing_list, mm_list); 244 sbuf_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", 245 count, mappable_count, size, mappable_size); 246 247 size = count = mappable_size = mappable_count = 0; 248 count_objects(&dev_priv->mm.pinned_list, mm_list); 249 sbuf_printf(m, " %u [%u] pinned objects, %zu [%zu] bytes\n", 250 count, mappable_count, size, mappable_size); 251 252 size = count = mappable_size = mappable_count = 0; 253 count_objects(&dev_priv->mm.inactive_list, mm_list); 254 sbuf_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", 255 count, mappable_count, size, mappable_size); 256 257 size = count = mappable_size = mappable_count = 0; 258 count_objects(&dev_priv->mm.deferred_free_list, mm_list); 259 sbuf_printf(m, " %u [%u] freed objects, %zu [%zu] bytes\n", 260 count, mappable_count, size, mappable_size); 261 262 size = count = mappable_size = mappable_count = 0; 263 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { 264 if (obj->fault_mappable) { 265 size += obj->gtt_space->size; 266 ++count; 267 } 268 if (obj->pin_mappable) { 269 mappable_size += obj->gtt_space->size; 270 ++mappable_count; 271 } 272 } 273 sbuf_printf(m, "%u pinned mappable objects, %zu bytes\n", 274 mappable_count, mappable_size); 275 sbuf_printf(m, "%u fault mappable objects, %zu bytes\n", 276 count, size); 277 278 sbuf_printf(m, "%zu [%zu] gtt total\n", 279 dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total); 280 DRM_UNLOCK(dev); 281 282 return (0); 283} 284 285static int 286i915_gem_gtt_info(struct drm_device *dev, struct sbuf *m, void* data) 287{ 288 struct drm_i915_private *dev_priv = dev->dev_private; 289 struct drm_i915_gem_object *obj; 290 size_t total_obj_size, total_gtt_size; 291 int count; 292 293 if (sx_xlock_sig(&dev->dev_struct_lock)) 294 return (EINTR); 295 296 total_obj_size = total_gtt_size = count = 0; 297 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { 298 sbuf_printf(m, " "); 299 describe_obj(m, obj); 300 sbuf_printf(m, "\n"); 301 total_obj_size += obj->base.size; 302 total_gtt_size += obj->gtt_space->size; 303 count++; 304 } 305 306 DRM_UNLOCK(dev); 307 308 sbuf_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 309 count, total_obj_size, total_gtt_size); 310 311 return (0); 312} 313 314static int 315i915_gem_pageflip_info(struct drm_device *dev, struct sbuf *m, void *data) 316{ 317 struct intel_crtc *crtc; 318 struct drm_i915_gem_object *obj; 319 struct intel_unpin_work *work; 320 char pipe; 321 char plane; 322 323 if ((dev->driver->driver_features & DRIVER_MODESET) == 0) 324 return (0); 325 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { 326 pipe = pipe_name(crtc->pipe); 327 plane = plane_name(crtc->plane); 328 329 mtx_lock(&dev->event_lock); 330 work = crtc->unpin_work; 331 if (work == NULL) { 332 sbuf_printf(m, "No flip due on pipe %c (plane %c)\n", 333 pipe, plane); 334 } else { 335 if (!work->pending) { 336 sbuf_printf(m, "Flip queued on pipe %c (plane %c)\n", 337 pipe, plane); 338 } else { 339 sbuf_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n", 340 pipe, plane); 341 } 342 if (work->enable_stall_check) 343 sbuf_printf(m, "Stall check enabled, "); 344 else 345 sbuf_printf(m, "Stall check waiting for page flip ioctl, "); 346 sbuf_printf(m, "%d prepares\n", work->pending); 347 348 if (work->old_fb_obj) { 349 obj = work->old_fb_obj; 350 if (obj) 351 sbuf_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); 352 } 353 if (work->pending_flip_obj) { 354 obj = work->pending_flip_obj; 355 if (obj) 356 sbuf_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); 357 } 358 } 359 mtx_unlock(&dev->event_lock); 360 } 361 362 return (0); 363} 364 365static int 366i915_gem_request_info(struct drm_device *dev, struct sbuf *m, void *data) 367{ 368 drm_i915_private_t *dev_priv = dev->dev_private; 369 struct drm_i915_gem_request *gem_request; 370 int count; 371 372 if (sx_xlock_sig(&dev->dev_struct_lock)) 373 return (EINTR); 374 375 count = 0; 376 if (!list_empty(&dev_priv->rings[RCS].request_list)) { 377 sbuf_printf(m, "Render requests:\n"); 378 list_for_each_entry(gem_request, 379 &dev_priv->rings[RCS].request_list, 380 list) { 381 sbuf_printf(m, " %d @ %d\n", 382 gem_request->seqno, 383 (int) (jiffies - gem_request->emitted_jiffies)); 384 } 385 count++; 386 } 387 if (!list_empty(&dev_priv->rings[VCS].request_list)) { 388 sbuf_printf(m, "BSD requests:\n"); 389 list_for_each_entry(gem_request, 390 &dev_priv->rings[VCS].request_list, 391 list) { 392 sbuf_printf(m, " %d @ %d\n", 393 gem_request->seqno, 394 (int) (jiffies - gem_request->emitted_jiffies)); 395 } 396 count++; 397 } 398 if (!list_empty(&dev_priv->rings[BCS].request_list)) { 399 sbuf_printf(m, "BLT requests:\n"); 400 list_for_each_entry(gem_request, 401 &dev_priv->rings[BCS].request_list, 402 list) { 403 sbuf_printf(m, " %d @ %d\n", 404 gem_request->seqno, 405 (int) (jiffies - gem_request->emitted_jiffies)); 406 } 407 count++; 408 } 409 DRM_UNLOCK(dev); 410 411 if (count == 0) 412 sbuf_printf(m, "No requests\n"); 413 414 return 0; 415} 416 417static void 418i915_ring_seqno_info(struct sbuf *m, struct intel_ring_buffer *ring) 419{ 420 if (ring->get_seqno) { 421 sbuf_printf(m, "Current sequence (%s): %d\n", 422 ring->name, ring->get_seqno(ring)); 423 sbuf_printf(m, "Waiter sequence (%s): %d\n", 424 ring->name, ring->waiting_seqno); 425 sbuf_printf(m, "IRQ sequence (%s): %d\n", 426 ring->name, ring->irq_seqno); 427 } 428} 429 430static int 431i915_gem_seqno_info(struct drm_device *dev, struct sbuf *m, void *data) 432{ 433 drm_i915_private_t *dev_priv = dev->dev_private; 434 int i; 435 436 if (sx_xlock_sig(&dev->dev_struct_lock)) 437 return (EINTR); 438 for (i = 0; i < I915_NUM_RINGS; i++) 439 i915_ring_seqno_info(m, &dev_priv->rings[i]); 440 DRM_UNLOCK(dev); 441 return (0); 442} 443 444 445static int 446i915_interrupt_info(struct drm_device *dev, struct sbuf *m, void *data) 447{ 448 drm_i915_private_t *dev_priv = dev->dev_private; 449 int i, pipe; 450 451 if (sx_xlock_sig(&dev->dev_struct_lock)) 452 return (EINTR); 453 454 if (!HAS_PCH_SPLIT(dev)) { 455 sbuf_printf(m, "Interrupt enable: %08x\n", 456 I915_READ(IER)); 457 sbuf_printf(m, "Interrupt identity: %08x\n", 458 I915_READ(IIR)); 459 sbuf_printf(m, "Interrupt mask: %08x\n", 460 I915_READ(IMR)); 461 for_each_pipe(pipe) 462 sbuf_printf(m, "Pipe %c stat: %08x\n", 463 pipe_name(pipe), 464 I915_READ(PIPESTAT(pipe))); 465 } else { 466 sbuf_printf(m, "North Display Interrupt enable: %08x\n", 467 I915_READ(DEIER)); 468 sbuf_printf(m, "North Display Interrupt identity: %08x\n", 469 I915_READ(DEIIR)); 470 sbuf_printf(m, "North Display Interrupt mask: %08x\n", 471 I915_READ(DEIMR)); 472 sbuf_printf(m, "South Display Interrupt enable: %08x\n", 473 I915_READ(SDEIER)); 474 sbuf_printf(m, "South Display Interrupt identity: %08x\n", 475 I915_READ(SDEIIR)); 476 sbuf_printf(m, "South Display Interrupt mask: %08x\n", 477 I915_READ(SDEIMR)); 478 sbuf_printf(m, "Graphics Interrupt enable: %08x\n", 479 I915_READ(GTIER)); 480 sbuf_printf(m, "Graphics Interrupt identity: %08x\n", 481 I915_READ(GTIIR)); 482 sbuf_printf(m, "Graphics Interrupt mask: %08x\n", 483 I915_READ(GTIMR)); 484 } 485 sbuf_printf(m, "Interrupts received: %d\n", 486 atomic_read(&dev_priv->irq_received)); 487 for (i = 0; i < I915_NUM_RINGS; i++) { 488 if (IS_GEN6(dev) || IS_GEN7(dev)) { 489 sbuf_printf(m, "Graphics Interrupt mask (%s): %08x\n", 490 dev_priv->rings[i].name, 491 I915_READ_IMR(&dev_priv->rings[i])); 492 } 493 i915_ring_seqno_info(m, &dev_priv->rings[i]); 494 } 495 DRM_UNLOCK(dev); 496 497 return (0); 498} 499 500static int 501i915_gem_fence_regs_info(struct drm_device *dev, struct sbuf *m, void *data) 502{ 503 drm_i915_private_t *dev_priv = dev->dev_private; 504 int i; 505 506 if (sx_xlock_sig(&dev->dev_struct_lock)) 507 return (EINTR); 508 509 sbuf_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start); 510 sbuf_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); 511 for (i = 0; i < dev_priv->num_fence_regs; i++) { 512 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj; 513 514 sbuf_printf(m, "Fenced object[%2d] = ", i); 515 if (obj == NULL) 516 sbuf_printf(m, "unused"); 517 else 518 describe_obj(m, obj); 519 sbuf_printf(m, "\n"); 520 } 521 522 DRM_UNLOCK(dev); 523 return (0); 524} 525 526static int 527i915_hws_info(struct drm_device *dev, struct sbuf *m, void *data) 528{ 529 drm_i915_private_t *dev_priv = dev->dev_private; 530 struct intel_ring_buffer *ring; 531 const volatile u32 *hws; 532 int i; 533 534 ring = &dev_priv->rings[(uintptr_t)data]; 535 hws = (volatile u32 *)ring->status_page.page_addr; 536 if (hws == NULL) 537 return (0); 538 539 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) { 540 sbuf_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", 541 i * 4, 542 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]); 543 } 544 return (0); 545} 546 547static int 548i915_ringbuffer_data(struct drm_device *dev, struct sbuf *m, void *data) 549{ 550 drm_i915_private_t *dev_priv = dev->dev_private; 551 struct intel_ring_buffer *ring; 552 553 if (sx_xlock_sig(&dev->dev_struct_lock)) 554 return (EINTR); 555 ring = &dev_priv->rings[(uintptr_t)data]; 556 if (!ring->obj) { 557 sbuf_printf(m, "No ringbuffer setup\n"); 558 } else { 559 u8 *virt = ring->virtual_start; 560 uint32_t off; 561 562 for (off = 0; off < ring->size; off += 4) { 563 uint32_t *ptr = (uint32_t *)(virt + off); 564 sbuf_printf(m, "%08x : %08x\n", off, *ptr); 565 } 566 } 567 DRM_UNLOCK(dev); 568 return (0); 569} 570 571static int 572i915_ringbuffer_info(struct drm_device *dev, struct sbuf *m, void *data) 573{ 574 drm_i915_private_t *dev_priv = dev->dev_private; 575 struct intel_ring_buffer *ring; 576 577 ring = &dev_priv->rings[(uintptr_t)data]; 578 if (ring->size == 0) 579 return (0); 580 581 if (sx_xlock_sig(&dev->dev_struct_lock)) 582 return (EINTR); 583 584 sbuf_printf(m, "Ring %s:\n", ring->name); 585 sbuf_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR); 586 sbuf_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR); 587 sbuf_printf(m, " Size : %08x\n", ring->size); 588 sbuf_printf(m, " Active : %08x\n", intel_ring_get_active_head(ring)); 589 sbuf_printf(m, " NOPID : %08x\n", I915_READ_NOPID(ring)); 590 if (IS_GEN6(dev) || IS_GEN7(dev)) { 591 sbuf_printf(m, " Sync 0 : %08x\n", I915_READ_SYNC_0(ring)); 592 sbuf_printf(m, " Sync 1 : %08x\n", I915_READ_SYNC_1(ring)); 593 } 594 sbuf_printf(m, " Control : %08x\n", I915_READ_CTL(ring)); 595 sbuf_printf(m, " Start : %08x\n", I915_READ_START(ring)); 596 597 DRM_UNLOCK(dev); 598 599 return (0); 600} 601 602static const char * 603ring_str(int ring) 604{ 605 switch (ring) { 606 case RCS: return (" render"); 607 case VCS: return (" bsd"); 608 case BCS: return (" blt"); 609 default: return (""); 610 } 611} 612 613static const char * 614pin_flag(int pinned) 615{ 616 if (pinned > 0) 617 return (" P"); 618 else if (pinned < 0) 619 return (" p"); 620 else 621 return (""); 622} 623 624static const char *tiling_flag(int tiling) 625{ 626 switch (tiling) { 627 default: 628 case I915_TILING_NONE: return ""; 629 case I915_TILING_X: return " X"; 630 case I915_TILING_Y: return " Y"; 631 } 632} 633 634static const char *dirty_flag(int dirty) 635{ 636 return dirty ? " dirty" : ""; 637} 638 639static const char *purgeable_flag(int purgeable) 640{ 641 return purgeable ? " purgeable" : ""; 642} 643 644static void print_error_buffers(struct sbuf *m, const char *name, 645 struct drm_i915_error_buffer *err, int count) 646{ 647 648 sbuf_printf(m, "%s [%d]:\n", name, count); 649 650 while (count--) { 651 sbuf_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s%s", 652 err->gtt_offset, 653 err->size, 654 err->read_domains, 655 err->write_domain, 656 err->seqno, 657 pin_flag(err->pinned), 658 tiling_flag(err->tiling), 659 dirty_flag(err->dirty), 660 purgeable_flag(err->purgeable), 661 err->ring != -1 ? " " : "", 662 ring_str(err->ring), 663 cache_level_str(err->cache_level)); 664 665 if (err->name) 666 sbuf_printf(m, " (name: %d)", err->name); 667 if (err->fence_reg != I915_FENCE_REG_NONE) 668 sbuf_printf(m, " (fence: %d)", err->fence_reg); 669 670 sbuf_printf(m, "\n"); 671 err++; 672 } 673} 674 675static void 676i915_ring_error_state(struct sbuf *m, struct drm_device *dev, 677 struct drm_i915_error_state *error, unsigned ring) 678{ 679 680 sbuf_printf(m, "%s command stream:\n", ring_str(ring)); 681 sbuf_printf(m, " HEAD: 0x%08x\n", error->head[ring]); 682 sbuf_printf(m, " TAIL: 0x%08x\n", error->tail[ring]); 683 sbuf_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]); 684 sbuf_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]); 685 sbuf_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]); 686 sbuf_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]); 687 if (ring == RCS && INTEL_INFO(dev)->gen >= 4) { 688 sbuf_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1); 689 sbuf_printf(m, " BBADDR: 0x%08jx\n", (uintmax_t)error->bbaddr); 690 } 691 if (INTEL_INFO(dev)->gen >= 4) 692 sbuf_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]); 693 sbuf_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]); 694 if (INTEL_INFO(dev)->gen >= 6) { 695 sbuf_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]); 696 sbuf_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]); 697 sbuf_printf(m, " SYNC_0: 0x%08x\n", 698 error->semaphore_mboxes[ring][0]); 699 sbuf_printf(m, " SYNC_1: 0x%08x\n", 700 error->semaphore_mboxes[ring][1]); 701 } 702 sbuf_printf(m, " seqno: 0x%08x\n", error->seqno[ring]); 703 sbuf_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]); 704 sbuf_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]); 705} 706 707static int i915_error_state(struct drm_device *dev, struct sbuf *m, 708 void *unused) 709{ 710 drm_i915_private_t *dev_priv = dev->dev_private; 711 struct drm_i915_error_state *error; 712 int i, j, page, offset, elt; 713 714 mtx_lock(&dev_priv->error_lock); 715 if (!dev_priv->first_error) { 716 sbuf_printf(m, "no error state collected\n"); 717 goto out; 718 } 719 720 error = dev_priv->first_error; 721 722 sbuf_printf(m, "Time: %jd s %jd us\n", (intmax_t)error->time.tv_sec, 723 (intmax_t)error->time.tv_usec); 724 sbuf_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); 725 sbuf_printf(m, "EIR: 0x%08x\n", error->eir); 726 sbuf_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); 727 728 for (i = 0; i < dev_priv->num_fence_regs; i++) 729 sbuf_printf(m, " fence[%d] = %08jx\n", i, 730 (uintmax_t)error->fence[i]); 731 732 if (INTEL_INFO(dev)->gen >= 6) { 733 sbuf_printf(m, "ERROR: 0x%08x\n", error->error); 734 sbuf_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); 735 } 736 737 i915_ring_error_state(m, dev, error, RCS); 738 if (HAS_BLT(dev)) 739 i915_ring_error_state(m, dev, error, BCS); 740 if (HAS_BSD(dev)) 741 i915_ring_error_state(m, dev, error, VCS); 742 743 if (error->active_bo) 744 print_error_buffers(m, "Active", 745 error->active_bo, 746 error->active_bo_count); 747 748 if (error->pinned_bo) 749 print_error_buffers(m, "Pinned", 750 error->pinned_bo, 751 error->pinned_bo_count); 752 753 for (i = 0; i < DRM_ARRAY_SIZE(error->ring); i++) { 754 struct drm_i915_error_object *obj; 755 756 if ((obj = error->ring[i].batchbuffer)) { 757 sbuf_printf(m, "%s --- gtt_offset = 0x%08x\n", 758 dev_priv->rings[i].name, 759 obj->gtt_offset); 760 offset = 0; 761 for (page = 0; page < obj->page_count; page++) { 762 for (elt = 0; elt < PAGE_SIZE/4; elt++) { 763 sbuf_printf(m, "%08x : %08x\n", 764 offset, obj->pages[page][elt]); 765 offset += 4; 766 } 767 } 768 } 769 770 if (error->ring[i].num_requests) { 771 sbuf_printf(m, "%s --- %d requests\n", 772 dev_priv->rings[i].name, 773 error->ring[i].num_requests); 774 for (j = 0; j < error->ring[i].num_requests; j++) { 775 sbuf_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n", 776 error->ring[i].requests[j].seqno, 777 error->ring[i].requests[j].jiffies, 778 error->ring[i].requests[j].tail); 779 } 780 } 781 782 if ((obj = error->ring[i].ringbuffer)) { 783 sbuf_printf(m, "%s --- ringbuffer = 0x%08x\n", 784 dev_priv->rings[i].name, 785 obj->gtt_offset); 786 offset = 0; 787 for (page = 0; page < obj->page_count; page++) { 788 for (elt = 0; elt < PAGE_SIZE/4; elt++) { 789 sbuf_printf(m, "%08x : %08x\n", 790 offset, 791 obj->pages[page][elt]); 792 offset += 4; 793 } 794 } 795 } 796 } 797 798 if (error->overlay) 799 intel_overlay_print_error_state(m, error->overlay); 800 801 if (error->display) 802 intel_display_print_error_state(m, dev, error->display); 803 804out: 805 mtx_unlock(&dev_priv->error_lock); 806 807 return (0); 808} 809 810static int 811i915_rstdby_delays(struct drm_device *dev, struct sbuf *m, void *unused) 812{ 813 drm_i915_private_t *dev_priv = dev->dev_private; 814 u16 crstanddelay; 815 816 if (sx_xlock_sig(&dev->dev_struct_lock)) 817 return (EINTR); 818 crstanddelay = I915_READ16(CRSTANDVID); 819 DRM_UNLOCK(dev); 820 821 sbuf_printf(m, "w/ctx: %d, w/o ctx: %d\n", 822 (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f)); 823 824 return 0; 825} 826 827static int 828i915_cur_delayinfo(struct drm_device *dev, struct sbuf *m, void *unused) 829{ 830 drm_i915_private_t *dev_priv = dev->dev_private; 831 832 if (IS_GEN5(dev)) { 833 u16 rgvswctl = I915_READ16(MEMSWCTL); 834 u16 rgvstat = I915_READ16(MEMSTAT_ILK); 835 836 sbuf_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); 837 sbuf_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); 838 sbuf_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> 839 MEMSTAT_VID_SHIFT); 840 sbuf_printf(m, "Current P-state: %d\n", 841 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); 842 } else if (IS_GEN6(dev)) { 843 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 844 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); 845 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 846 u32 rpstat; 847 u32 rpupei, rpcurup, rpprevup; 848 u32 rpdownei, rpcurdown, rpprevdown; 849 int max_freq; 850 851 /* RPSTAT1 is in the GT power well */ 852 if (sx_xlock_sig(&dev->dev_struct_lock)) 853 return (EINTR); 854 gen6_gt_force_wake_get(dev_priv); 855 856 rpstat = I915_READ(GEN6_RPSTAT1); 857 rpupei = I915_READ(GEN6_RP_CUR_UP_EI); 858 rpcurup = I915_READ(GEN6_RP_CUR_UP); 859 rpprevup = I915_READ(GEN6_RP_PREV_UP); 860 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI); 861 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); 862 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); 863 864 gen6_gt_force_wake_put(dev_priv); 865 DRM_UNLOCK(dev); 866 867 sbuf_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 868 sbuf_printf(m, "RPSTAT1: 0x%08x\n", rpstat); 869 sbuf_printf(m, "Render p-state ratio: %d\n", 870 (gt_perf_status & 0xff00) >> 8); 871 sbuf_printf(m, "Render p-state VID: %d\n", 872 gt_perf_status & 0xff); 873 sbuf_printf(m, "Render p-state limit: %d\n", 874 rp_state_limits & 0xff); 875 sbuf_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >> 876 GEN6_CAGF_SHIFT) * 50); 877 sbuf_printf(m, "RP CUR UP EI: %dus\n", rpupei & 878 GEN6_CURICONT_MASK); 879 sbuf_printf(m, "RP CUR UP: %dus\n", rpcurup & 880 GEN6_CURBSYTAVG_MASK); 881 sbuf_printf(m, "RP PREV UP: %dus\n", rpprevup & 882 GEN6_CURBSYTAVG_MASK); 883 sbuf_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei & 884 GEN6_CURIAVG_MASK); 885 sbuf_printf(m, "RP CUR DOWN: %dus\n", rpcurdown & 886 GEN6_CURBSYTAVG_MASK); 887 sbuf_printf(m, "RP PREV DOWN: %dus\n", rpprevdown & 888 GEN6_CURBSYTAVG_MASK); 889 890 max_freq = (rp_state_cap & 0xff0000) >> 16; 891 sbuf_printf(m, "Lowest (RPN) frequency: %dMHz\n", 892 max_freq * 50); 893 894 max_freq = (rp_state_cap & 0xff00) >> 8; 895 sbuf_printf(m, "Nominal (RP1) frequency: %dMHz\n", 896 max_freq * 50); 897 898 max_freq = rp_state_cap & 0xff; 899 sbuf_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 900 max_freq * 50); 901 } else { 902 sbuf_printf(m, "no P-state info available\n"); 903 } 904 905 return 0; 906} 907 908static int 909i915_delayfreq_table(struct drm_device *dev, struct sbuf *m, void *unused) 910{ 911 drm_i915_private_t *dev_priv = dev->dev_private; 912 u32 delayfreq; 913 int i; 914 915 if (sx_xlock_sig(&dev->dev_struct_lock)) 916 return (EINTR); 917 for (i = 0; i < 16; i++) { 918 delayfreq = I915_READ(PXVFREQ_BASE + i * 4); 919 sbuf_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq, 920 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT); 921 } 922 DRM_UNLOCK(dev); 923 return (0); 924} 925 926static inline int 927MAP_TO_MV(int map) 928{ 929 return 1250 - (map * 25); 930} 931 932static int 933i915_inttoext_table(struct drm_device *dev, struct sbuf *m, void *unused) 934{ 935 drm_i915_private_t *dev_priv = dev->dev_private; 936 u32 inttoext; 937 int i; 938 939 if (sx_xlock_sig(&dev->dev_struct_lock)) 940 return (EINTR); 941 for (i = 1; i <= 32; i++) { 942 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4); 943 sbuf_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext); 944 } 945 DRM_UNLOCK(dev); 946 947 return (0); 948} 949 950static int 951ironlake_drpc_info(struct drm_device *dev, struct sbuf *m) 952{ 953 drm_i915_private_t *dev_priv = dev->dev_private; 954 u32 rgvmodectl; 955 u32 rstdbyctl; 956 u16 crstandvid; 957 958 if (sx_xlock_sig(&dev->dev_struct_lock)) 959 return (EINTR); 960 rgvmodectl = I915_READ(MEMMODECTL); 961 rstdbyctl = I915_READ(RSTDBYCTL); 962 crstandvid = I915_READ16(CRSTANDVID); 963 DRM_UNLOCK(dev); 964 965 sbuf_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? 966 "yes" : "no"); 967 sbuf_printf(m, "Boost freq: %d\n", 968 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >> 969 MEMMODE_BOOST_FREQ_SHIFT); 970 sbuf_printf(m, "HW control enabled: %s\n", 971 rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no"); 972 sbuf_printf(m, "SW control enabled: %s\n", 973 rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no"); 974 sbuf_printf(m, "Gated voltage change: %s\n", 975 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no"); 976 sbuf_printf(m, "Starting frequency: P%d\n", 977 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); 978 sbuf_printf(m, "Max P-state: P%d\n", 979 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT); 980 sbuf_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); 981 sbuf_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f)); 982 sbuf_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); 983 sbuf_printf(m, "Render standby enabled: %s\n", 984 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes"); 985 sbuf_printf(m, "Current RS state: "); 986 switch (rstdbyctl & RSX_STATUS_MASK) { 987 case RSX_STATUS_ON: 988 sbuf_printf(m, "on\n"); 989 break; 990 case RSX_STATUS_RC1: 991 sbuf_printf(m, "RC1\n"); 992 break; 993 case RSX_STATUS_RC1E: 994 sbuf_printf(m, "RC1E\n"); 995 break; 996 case RSX_STATUS_RS1: 997 sbuf_printf(m, "RS1\n"); 998 break; 999 case RSX_STATUS_RS2: 1000 sbuf_printf(m, "RS2 (RC6)\n"); 1001 break; 1002 case RSX_STATUS_RS3: 1003 sbuf_printf(m, "RC3 (RC6+)\n"); 1004 break; 1005 default: 1006 sbuf_printf(m, "unknown\n"); 1007 break; 1008 } 1009 1010 return 0; 1011} 1012 1013static int 1014gen6_drpc_info(struct drm_device *dev, struct sbuf *m) 1015{ 1016 drm_i915_private_t *dev_priv = dev->dev_private; 1017 u32 rpmodectl1, gt_core_status, rcctl1; 1018 unsigned forcewake_count; 1019 int count=0; 1020 1021 if (sx_xlock_sig(&dev->dev_struct_lock)) 1022 return (EINTR); 1023 1024 mtx_lock(&dev_priv->gt_lock); 1025 forcewake_count = dev_priv->forcewake_count; 1026 mtx_unlock(&dev_priv->gt_lock); 1027 1028 if (forcewake_count) { 1029 sbuf_printf(m, "RC information inaccurate because userspace " 1030 "holds a reference \n"); 1031 } else { 1032 /* NB: we cannot use forcewake, else we read the wrong values */ 1033 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) 1034 DRM_UDELAY(10); 1035 sbuf_printf(m, "RC information accurate: %s\n", yesno(count < 51)); 1036 } 1037 1038 gt_core_status = DRM_READ32(dev_priv->mmio_map, GEN6_GT_CORE_STATUS); 1039 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4); 1040 1041 rpmodectl1 = I915_READ(GEN6_RP_CONTROL); 1042 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1043 DRM_UNLOCK(dev); 1044 1045 sbuf_printf(m, "Video Turbo Mode: %s\n", 1046 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); 1047 sbuf_printf(m, "HW control enabled: %s\n", 1048 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1049 sbuf_printf(m, "SW control enabled: %s\n", 1050 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) == 1051 GEN6_RP_MEDIA_SW_MODE)); 1052 sbuf_printf(m, "RC1e Enabled: %s\n", 1053 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE)); 1054 sbuf_printf(m, "RC6 Enabled: %s\n", 1055 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE)); 1056 sbuf_printf(m, "Deep RC6 Enabled: %s\n", 1057 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE)); 1058 sbuf_printf(m, "Deepest RC6 Enabled: %s\n", 1059 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE)); 1060 sbuf_printf(m, "Current RC state: "); 1061 switch (gt_core_status & GEN6_RCn_MASK) { 1062 case GEN6_RC0: 1063 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK) 1064 sbuf_printf(m, "Core Power Down\n"); 1065 else 1066 sbuf_printf(m, "on\n"); 1067 break; 1068 case GEN6_RC3: 1069 sbuf_printf(m, "RC3\n"); 1070 break; 1071 case GEN6_RC6: 1072 sbuf_printf(m, "RC6\n"); 1073 break; 1074 case GEN6_RC7: 1075 sbuf_printf(m, "RC7\n"); 1076 break; 1077 default: 1078 sbuf_printf(m, "Unknown\n"); 1079 break; 1080 } 1081 1082 sbuf_printf(m, "Core Power Down: %s\n", 1083 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK)); 1084 return 0; 1085} 1086 1087static int i915_drpc_info(struct drm_device *dev, struct sbuf *m, void *unused) 1088{ 1089 1090 if (IS_GEN6(dev) || IS_GEN7(dev)) 1091 return (gen6_drpc_info(dev, m)); 1092 else 1093 return (ironlake_drpc_info(dev, m)); 1094} 1095static int 1096i915_fbc_status(struct drm_device *dev, struct sbuf *m, void *unused) 1097{ 1098 drm_i915_private_t *dev_priv = dev->dev_private; 1099 1100 if (!I915_HAS_FBC(dev)) { 1101 sbuf_printf(m, "FBC unsupported on this chipset"); 1102 return 0; 1103 } 1104 1105 if (intel_fbc_enabled(dev)) { 1106 sbuf_printf(m, "FBC enabled"); 1107 } else { 1108 sbuf_printf(m, "FBC disabled: "); 1109 switch (dev_priv->no_fbc_reason) { 1110 case FBC_NO_OUTPUT: 1111 sbuf_printf(m, "no outputs"); 1112 break; 1113 case FBC_STOLEN_TOO_SMALL: 1114 sbuf_printf(m, "not enough stolen memory"); 1115 break; 1116 case FBC_UNSUPPORTED_MODE: 1117 sbuf_printf(m, "mode not supported"); 1118 break; 1119 case FBC_MODE_TOO_LARGE: 1120 sbuf_printf(m, "mode too large"); 1121 break; 1122 case FBC_BAD_PLANE: 1123 sbuf_printf(m, "FBC unsupported on plane"); 1124 break; 1125 case FBC_NOT_TILED: 1126 sbuf_printf(m, "scanout buffer not tiled"); 1127 break; 1128 case FBC_MULTIPLE_PIPES: 1129 sbuf_printf(m, "multiple pipes are enabled"); 1130 break; 1131 default: 1132 sbuf_printf(m, "unknown reason"); 1133 } 1134 } 1135 return 0; 1136} 1137 1138static int 1139i915_sr_status(struct drm_device *dev, struct sbuf *m, void *unused) 1140{ 1141 drm_i915_private_t *dev_priv = dev->dev_private; 1142 bool sr_enabled = false; 1143 1144 if (HAS_PCH_SPLIT(dev)) 1145 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; 1146 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev)) 1147 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 1148 else if (IS_I915GM(dev)) 1149 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; 1150 else if (IS_PINEVIEW(dev)) 1151 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; 1152 1153 sbuf_printf(m, "self-refresh: %s", 1154 sr_enabled ? "enabled" : "disabled"); 1155 1156 return (0); 1157} 1158 1159static int i915_ring_freq_table(struct drm_device *dev, struct sbuf *m, 1160 void *unused) 1161{ 1162 drm_i915_private_t *dev_priv = dev->dev_private; 1163 int gpu_freq, ia_freq; 1164 1165 if (!(IS_GEN6(dev) || IS_GEN7(dev))) { 1166 sbuf_printf(m, "unsupported on this chipset"); 1167 return (0); 1168 } 1169 1170 if (sx_xlock_sig(&dev->dev_struct_lock)) 1171 return (EINTR); 1172 1173 sbuf_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n"); 1174 1175 for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay; 1176 gpu_freq++) { 1177 I915_WRITE(GEN6_PCODE_DATA, gpu_freq); 1178 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | 1179 GEN6_PCODE_READ_MIN_FREQ_TABLE); 1180 if (_intel_wait_for(dev, 1181 (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 1182 10, 1, "915frq")) { 1183 DRM_ERROR("pcode read of freq table timed out\n"); 1184 continue; 1185 } 1186 ia_freq = I915_READ(GEN6_PCODE_DATA); 1187 sbuf_printf(m, "%d\t\t%d\n", gpu_freq * 50, ia_freq * 100); 1188 } 1189 1190 DRM_UNLOCK(dev); 1191 1192 return (0); 1193} 1194 1195static int 1196i915_emon_status(struct drm_device *dev, struct sbuf *m, void *unused) 1197{ 1198 drm_i915_private_t *dev_priv = dev->dev_private; 1199 unsigned long temp, chipset, gfx; 1200 1201 if (!IS_GEN5(dev)) { 1202 sbuf_printf(m, "Not supported\n"); 1203 return (0); 1204 } 1205 1206 if (sx_xlock_sig(&dev->dev_struct_lock)) 1207 return (EINTR); 1208 temp = i915_mch_val(dev_priv); 1209 chipset = i915_chipset_val(dev_priv); 1210 gfx = i915_gfx_val(dev_priv); 1211 DRM_UNLOCK(dev); 1212 1213 sbuf_printf(m, "GMCH temp: %ld\n", temp); 1214 sbuf_printf(m, "Chipset power: %ld\n", chipset); 1215 sbuf_printf(m, "GFX power: %ld\n", gfx); 1216 sbuf_printf(m, "Total power: %ld\n", chipset + gfx); 1217 1218 return (0); 1219} 1220 1221static int 1222i915_gfxec(struct drm_device *dev, struct sbuf *m, void *unused) 1223{ 1224 drm_i915_private_t *dev_priv = dev->dev_private; 1225 1226 if (sx_xlock_sig(&dev->dev_struct_lock)) 1227 return (EINTR); 1228 sbuf_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4)); 1229 DRM_UNLOCK(dev); 1230 1231 return (0); 1232} 1233 1234#if 0 1235static int 1236i915_opregion(struct drm_device *dev, struct sbuf *m, void *unused) 1237{ 1238 drm_i915_private_t *dev_priv = dev->dev_private; 1239 struct intel_opregion *opregion = &dev_priv->opregion; 1240 1241 if (sx_xlock_sig(&dev->dev_struct_lock)) 1242 return (EINTR); 1243 if (opregion->header) 1244 seq_write(m, opregion->header, OPREGION_SIZE); 1245 DRM_UNLOCK(dev); 1246 1247 return 0; 1248} 1249#endif 1250 1251static int 1252i915_gem_framebuffer_info(struct drm_device *dev, struct sbuf *m, void *data) 1253{ 1254 drm_i915_private_t *dev_priv = dev->dev_private; 1255 struct intel_fbdev *ifbdev; 1256 struct intel_framebuffer *fb; 1257 1258 if (sx_xlock_sig(&dev->dev_struct_lock)) 1259 return (EINTR); 1260 1261 ifbdev = dev_priv->fbdev; 1262 if (ifbdev == NULL) { 1263 DRM_UNLOCK(dev); 1264 return (0); 1265 } 1266 fb = to_intel_framebuffer(ifbdev->helper.fb); 1267 1268 sbuf_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ", 1269 fb->base.width, 1270 fb->base.height, 1271 fb->base.depth, 1272 fb->base.bits_per_pixel); 1273 describe_obj(m, fb->obj); 1274 sbuf_printf(m, "\n"); 1275 1276 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) { 1277 if (&fb->base == ifbdev->helper.fb) 1278 continue; 1279 1280 sbuf_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ", 1281 fb->base.width, 1282 fb->base.height, 1283 fb->base.depth, 1284 fb->base.bits_per_pixel); 1285 describe_obj(m, fb->obj); 1286 sbuf_printf(m, "\n"); 1287 } 1288 1289 DRM_UNLOCK(dev); 1290 1291 return (0); 1292} 1293 1294static int 1295i915_context_status(struct drm_device *dev, struct sbuf *m, void *data) 1296{ 1297 drm_i915_private_t *dev_priv; 1298 int ret; 1299 1300 if ((dev->driver->driver_features & DRIVER_MODESET) == 0) 1301 return (0); 1302 1303 dev_priv = dev->dev_private; 1304 ret = sx_xlock_sig(&dev->mode_config.mutex); 1305 if (ret != 0) 1306 return (EINTR); 1307 1308 if (dev_priv->pwrctx != NULL) { 1309 sbuf_printf(m, "power context "); 1310 describe_obj(m, dev_priv->pwrctx); 1311 sbuf_printf(m, "\n"); 1312 } 1313 1314 if (dev_priv->renderctx != NULL) { 1315 sbuf_printf(m, "render context "); 1316 describe_obj(m, dev_priv->renderctx); 1317 sbuf_printf(m, "\n"); 1318 } 1319 1320 sx_xunlock(&dev->mode_config.mutex); 1321 1322 return (0); 1323} 1324 1325static int 1326i915_gen6_forcewake_count_info(struct drm_device *dev, struct sbuf *m, 1327 void *data) 1328{ 1329 struct drm_i915_private *dev_priv; 1330 unsigned forcewake_count; 1331 1332 dev_priv = dev->dev_private; 1333 mtx_lock(&dev_priv->gt_lock); 1334 forcewake_count = dev_priv->forcewake_count; 1335 mtx_unlock(&dev_priv->gt_lock); 1336 1337 sbuf_printf(m, "forcewake count = %u\n", forcewake_count); 1338 1339 return (0); 1340} 1341 1342static const char * 1343swizzle_string(unsigned swizzle) 1344{ 1345 1346 switch(swizzle) { 1347 case I915_BIT_6_SWIZZLE_NONE: 1348 return "none"; 1349 case I915_BIT_6_SWIZZLE_9: 1350 return "bit9"; 1351 case I915_BIT_6_SWIZZLE_9_10: 1352 return "bit9/bit10"; 1353 case I915_BIT_6_SWIZZLE_9_11: 1354 return "bit9/bit11"; 1355 case I915_BIT_6_SWIZZLE_9_10_11: 1356 return "bit9/bit10/bit11"; 1357 case I915_BIT_6_SWIZZLE_9_17: 1358 return "bit9/bit17"; 1359 case I915_BIT_6_SWIZZLE_9_10_17: 1360 return "bit9/bit10/bit17"; 1361 case I915_BIT_6_SWIZZLE_UNKNOWN: 1362 return "unknown"; 1363 } 1364 1365 return "bug"; 1366} 1367 1368static int 1369i915_swizzle_info(struct drm_device *dev, struct sbuf *m, void *data) 1370{ 1371 struct drm_i915_private *dev_priv; 1372 int ret; 1373 1374 dev_priv = dev->dev_private; 1375 ret = sx_xlock_sig(&dev->dev_struct_lock); 1376 if (ret != 0) 1377 return (EINTR); 1378 1379 sbuf_printf(m, "bit6 swizzle for X-tiling = %s\n", 1380 swizzle_string(dev_priv->mm.bit_6_swizzle_x)); 1381 sbuf_printf(m, "bit6 swizzle for Y-tiling = %s\n", 1382 swizzle_string(dev_priv->mm.bit_6_swizzle_y)); 1383 1384 if (IS_GEN3(dev) || IS_GEN4(dev)) { 1385 sbuf_printf(m, "DDC = 0x%08x\n", 1386 I915_READ(DCC)); 1387 sbuf_printf(m, "C0DRB3 = 0x%04x\n", 1388 I915_READ16(C0DRB3)); 1389 sbuf_printf(m, "C1DRB3 = 0x%04x\n", 1390 I915_READ16(C1DRB3)); 1391 } else if (IS_GEN6(dev) || IS_GEN7(dev)) { 1392 sbuf_printf(m, "MAD_DIMM_C0 = 0x%08x\n", 1393 I915_READ(MAD_DIMM_C0)); 1394 sbuf_printf(m, "MAD_DIMM_C1 = 0x%08x\n", 1395 I915_READ(MAD_DIMM_C1)); 1396 sbuf_printf(m, "MAD_DIMM_C2 = 0x%08x\n", 1397 I915_READ(MAD_DIMM_C2)); 1398 sbuf_printf(m, "TILECTL = 0x%08x\n", 1399 I915_READ(TILECTL)); 1400 sbuf_printf(m, "ARB_MODE = 0x%08x\n", 1401 I915_READ(ARB_MODE)); 1402 sbuf_printf(m, "DISP_ARB_CTL = 0x%08x\n", 1403 I915_READ(DISP_ARB_CTL)); 1404 } 1405 DRM_UNLOCK(dev); 1406 1407 return (0); 1408} 1409 1410static int 1411i915_ppgtt_info(struct drm_device *dev, struct sbuf *m, void *data) 1412{ 1413 struct drm_i915_private *dev_priv; 1414 struct intel_ring_buffer *ring; 1415 int i, ret; 1416 1417 dev_priv = dev->dev_private; 1418 1419 ret = sx_xlock_sig(&dev->dev_struct_lock); 1420 if (ret != 0) 1421 return (EINTR); 1422 if (INTEL_INFO(dev)->gen == 6) 1423 sbuf_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); 1424 1425 for (i = 0; i < I915_NUM_RINGS; i++) { 1426 ring = &dev_priv->rings[i]; 1427 1428 sbuf_printf(m, "%s\n", ring->name); 1429 if (INTEL_INFO(dev)->gen == 7) 1430 sbuf_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring))); 1431 sbuf_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring))); 1432 sbuf_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring))); 1433 sbuf_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring))); 1434 } 1435 if (dev_priv->mm.aliasing_ppgtt) { 1436 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 1437 1438 sbuf_printf(m, "aliasing PPGTT:\n"); 1439 sbuf_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset); 1440 } 1441 sbuf_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK)); 1442 DRM_UNLOCK(dev); 1443 1444 return (0); 1445} 1446 1447static int 1448i915_debug_set_wedged(SYSCTL_HANDLER_ARGS) 1449{ 1450 struct drm_device *dev; 1451 drm_i915_private_t *dev_priv; 1452 int error, wedged; 1453 1454 dev = arg1; 1455 dev_priv = dev->dev_private; 1456 if (dev_priv == NULL) 1457 return (EBUSY); 1458 wedged = dev_priv->mm.wedged; 1459 error = sysctl_handle_int(oidp, &wedged, 0, req); 1460 if (error || !req->newptr) 1461 return (error); 1462 DRM_INFO("Manually setting wedged to %d\n", wedged); 1463 i915_handle_error(dev, wedged); 1464 return (error); 1465} 1466 1467static int 1468i915_max_freq(SYSCTL_HANDLER_ARGS) 1469{ 1470 struct drm_device *dev; 1471 drm_i915_private_t *dev_priv; 1472 int error, max_freq; 1473 1474 dev = arg1; 1475 dev_priv = dev->dev_private; 1476 if (dev_priv == NULL) 1477 return (EBUSY); 1478 max_freq = dev_priv->max_delay * 50; 1479 error = sysctl_handle_int(oidp, &max_freq, 0, req); 1480 if (error || !req->newptr) 1481 return (error); 1482 DRM_DEBUG("Manually setting max freq to %d\n", max_freq); 1483 /* 1484 * Turbo will still be enabled, but won't go above the set value. 1485 */ 1486 dev_priv->max_delay = max_freq / 50; 1487 gen6_set_rps(dev, max_freq / 50); 1488 return (error); 1489} 1490 1491static int 1492i915_cache_sharing(SYSCTL_HANDLER_ARGS) 1493{ 1494 struct drm_device *dev; 1495 drm_i915_private_t *dev_priv; 1496 int error, snpcr, cache_sharing; 1497 1498 dev = arg1; 1499 dev_priv = dev->dev_private; 1500 if (dev_priv == NULL) 1501 return (EBUSY); 1502 DRM_LOCK(dev); 1503 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 1504 DRM_UNLOCK(dev); 1505 cache_sharing = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT; 1506 error = sysctl_handle_int(oidp, &cache_sharing, 0, req); 1507 if (error || !req->newptr) 1508 return (error); 1509 if (cache_sharing < 0 || cache_sharing > 3) 1510 return (EINVAL); 1511 DRM_DEBUG("Manually setting uncore sharing to %d\n", cache_sharing); 1512 1513 DRM_LOCK(dev); 1514 /* Update the cache sharing policy here as well */ 1515 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 1516 snpcr &= ~GEN6_MBC_SNPCR_MASK; 1517 snpcr |= (cache_sharing << GEN6_MBC_SNPCR_SHIFT); 1518 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); 1519 DRM_UNLOCK(dev); 1520 return (0); 1521} 1522 1523static struct i915_info_sysctl_list { 1524 const char *name; 1525 int (*ptr)(struct drm_device *dev, struct sbuf *m, void *data); 1526 int flags; 1527 void *data; 1528} i915_info_sysctl_list[] = { 1529 {"i915_capabilities", i915_capabilities, 0}, 1530 {"i915_gem_objects", i915_gem_object_info, 0}, 1531 {"i915_gem_gtt", i915_gem_gtt_info, 0}, 1532 {"i915_gem_active", i915_gem_object_list_info, 0, (void *)ACTIVE_LIST}, 1533 {"i915_gem_flushing", i915_gem_object_list_info, 0, 1534 (void *)FLUSHING_LIST}, 1535 {"i915_gem_inactive", i915_gem_object_list_info, 0, 1536 (void *)INACTIVE_LIST}, 1537 {"i915_gem_pinned", i915_gem_object_list_info, 0, 1538 (void *)PINNED_LIST}, 1539 {"i915_gem_deferred_free", i915_gem_object_list_info, 0, 1540 (void *)DEFERRED_FREE_LIST}, 1541 {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, 1542 {"i915_gem_request", i915_gem_request_info, 0}, 1543 {"i915_gem_seqno", i915_gem_seqno_info, 0}, 1544 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, 1545 {"i915_gem_interrupt", i915_interrupt_info, 0}, 1546 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS}, 1547 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS}, 1548 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS}, 1549 {"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RCS}, 1550 {"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RCS}, 1551 {"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)VCS}, 1552 {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS}, 1553 {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS}, 1554 {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS}, 1555 {"i915_error_state", i915_error_state, 0}, 1556 {"i915_rstdby_delays", i915_rstdby_delays, 0}, 1557 {"i915_cur_delayinfo", i915_cur_delayinfo, 0}, 1558 {"i915_delayfreq_table", i915_delayfreq_table, 0}, 1559 {"i915_inttoext_table", i915_inttoext_table, 0}, 1560 {"i915_drpc_info", i915_drpc_info, 0}, 1561 {"i915_emon_status", i915_emon_status, 0}, 1562 {"i915_ring_freq_table", i915_ring_freq_table, 0}, 1563 {"i915_gfxec", i915_gfxec, 0}, 1564 {"i915_fbc_status", i915_fbc_status, 0}, 1565 {"i915_sr_status", i915_sr_status, 0}, 1566#if 0 1567 {"i915_opregion", i915_opregion, 0}, 1568#endif 1569 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, 1570 {"i915_context_status", i915_context_status, 0}, 1571 {"i915_gen6_forcewake_count_info", i915_gen6_forcewake_count_info, 0}, 1572 {"i915_swizzle_info", i915_swizzle_info, 0}, 1573 {"i915_ppgtt_info", i915_ppgtt_info, 0}, 1574}; 1575 1576struct i915_info_sysctl_thunk { 1577 struct drm_device *dev; 1578 int idx; 1579 void *arg; 1580}; 1581 1582static int 1583i915_info_sysctl_handler(SYSCTL_HANDLER_ARGS) 1584{ 1585 struct sbuf m; 1586 struct i915_info_sysctl_thunk *thunk; 1587 struct drm_device *dev; 1588 drm_i915_private_t *dev_priv; 1589 int error; 1590 1591 thunk = arg1; 1592 dev = thunk->dev; 1593 dev_priv = dev->dev_private; 1594 if (dev_priv == NULL) 1595 return (EBUSY); 1596 error = sysctl_wire_old_buffer(req, 0); 1597 if (error != 0) 1598 return (error); 1599 sbuf_new_for_sysctl(&m, NULL, 128, req); 1600 error = i915_info_sysctl_list[thunk->idx].ptr(dev, &m, 1601 thunk->arg); 1602 if (error == 0) 1603 error = sbuf_finish(&m); 1604 sbuf_delete(&m); 1605 return (error); 1606} 1607 1608extern int i915_gem_sync_exec_requests; 1609extern int i915_fix_mi_batchbuffer_end; 1610extern int i915_intr_pf; 1611extern long i915_gem_wired_pages_cnt; 1612 1613int 1614i915_sysctl_init(struct drm_device *dev, struct sysctl_ctx_list *ctx, 1615 struct sysctl_oid *top) 1616{ 1617 struct sysctl_oid *oid, *info; 1618 struct i915_info_sysctl_thunk *thunks; 1619 int i, error; 1620 1621 thunks = malloc(sizeof(*thunks) * DRM_ARRAY_SIZE(i915_info_sysctl_list), 1622 DRM_MEM_DRIVER, M_WAITOK | M_ZERO); 1623 for (i = 0; i < DRM_ARRAY_SIZE(i915_info_sysctl_list); i++) { 1624 thunks[i].dev = dev; 1625 thunks[i].idx = i; 1626 thunks[i].arg = i915_info_sysctl_list[i].data; 1627 } 1628 dev->sysctl_private = thunks; 1629 info = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "info", 1630 CTLFLAG_RW, NULL, NULL); 1631 if (info == NULL) 1632 return (ENOMEM); 1633 for (i = 0; i < DRM_ARRAY_SIZE(i915_info_sysctl_list); i++) { 1634 oid = SYSCTL_ADD_OID(ctx, SYSCTL_CHILDREN(info), OID_AUTO, 1635 i915_info_sysctl_list[i].name, CTLTYPE_STRING | CTLFLAG_RD, 1636 &thunks[i], 0, i915_info_sysctl_handler, "A", NULL); 1637 if (oid == NULL) 1638 return (ENOMEM); 1639 } 1640 oid = SYSCTL_ADD_LONG(ctx, SYSCTL_CHILDREN(info), OID_AUTO, 1641 "i915_gem_wired_pages", CTLFLAG_RD, &i915_gem_wired_pages_cnt, 1642 NULL); 1643 oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "wedged", 1644 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, dev, 0, 1645 i915_debug_set_wedged, "I", NULL); 1646 if (oid == NULL) 1647 return (ENOMEM); 1648 oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "max_freq", 1649 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, dev, 0, i915_max_freq, 1650 "I", NULL); 1651 if (oid == NULL) 1652 return (ENOMEM); 1653 oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(top), OID_AUTO, 1654 "cache_sharing", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, dev, 1655 0, i915_cache_sharing, "I", NULL); 1656 if (oid == NULL) 1657 return (ENOMEM); 1658 oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "sync_exec", 1659 CTLFLAG_RW, &i915_gem_sync_exec_requests, 0, NULL); 1660 if (oid == NULL) 1661 return (ENOMEM); 1662 oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "fix_mi", 1663 CTLFLAG_RW, &i915_fix_mi_batchbuffer_end, 0, NULL); 1664 if (oid == NULL) 1665 return (ENOMEM); 1666 oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "intr_pf", 1667 CTLFLAG_RW, &i915_intr_pf, 0, NULL); 1668 if (oid == NULL) 1669 return (ENOMEM); 1670 1671 error = drm_add_busid_modesetting(dev, ctx, top); 1672 if (error != 0) 1673 return (error); 1674 1675 return (0); 1676} 1677 1678void 1679i915_sysctl_cleanup(struct drm_device *dev) 1680{ 1681 1682 free(dev->sysctl_private, DRM_MEM_DRIVER); 1683} 1684