1235783Skib/* 2235783Skib * Copyright �� 2008 Intel Corporation 3235783Skib * 4235783Skib * Permission is hereby granted, free of charge, to any person obtaining a 5235783Skib * copy of this software and associated documentation files (the "Software"), 6235783Skib * to deal in the Software without restriction, including without limitation 7235783Skib * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8235783Skib * and/or sell copies of the Software, and to permit persons to whom the 9235783Skib * Software is furnished to do so, subject to the following conditions: 10235783Skib * 11235783Skib * The above copyright notice and this permission notice (including the next 12235783Skib * paragraph) shall be included in all copies or substantial portions of the 13235783Skib * Software. 14235783Skib * 15235783Skib * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16235783Skib * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17235783Skib * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18235783Skib * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19235783Skib * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20235783Skib * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21235783Skib * IN THE SOFTWARE. 22235783Skib * 23235783Skib * Authors: 24235783Skib * Eric Anholt <eric@anholt.net> 25235783Skib * Keith Packard <keithp@keithp.com> 26235783Skib * 27235783Skib */ 28235783Skib 29235783Skib#include <sys/cdefs.h> 30235783Skib__FBSDID("$FreeBSD$"); 31235783Skib 32235783Skib#include <dev/drm2/drmP.h> 33235783Skib#include <dev/drm2/drm.h> 34235783Skib#include <dev/drm2/i915/i915_drm.h> 35235783Skib#include <dev/drm2/i915/i915_drv.h> 36235783Skib#include <dev/drm2/i915/intel_drv.h> 37235783Skib#include <dev/drm2/i915/intel_ringbuffer.h> 38235783Skib 39235783Skib#include <sys/sysctl.h> 40235783Skib 41235783Skibenum { 42235783Skib ACTIVE_LIST, 43235783Skib FLUSHING_LIST, 44235783Skib INACTIVE_LIST, 45235783Skib PINNED_LIST, 46235783Skib DEFERRED_FREE_LIST, 47235783Skib}; 48235783Skib 49235783Skibstatic const char * 50235783Skibyesno(int v) 51235783Skib{ 52235783Skib return (v ? "yes" : "no"); 53235783Skib} 54235783Skib 55235783Skibstatic int 56235783Skibi915_capabilities(struct drm_device *dev, struct sbuf *m, void *data) 57235783Skib{ 58235783Skib const struct intel_device_info *info = INTEL_INFO(dev); 59235783Skib 60235783Skib sbuf_printf(m, "gen: %d\n", info->gen); 61235783Skib if (HAS_PCH_SPLIT(dev)) 62235783Skib sbuf_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev)); 63235783Skib#define B(x) sbuf_printf(m, #x ": %s\n", yesno(info->x)) 64235783Skib B(is_mobile); 65235783Skib B(is_i85x); 66235783Skib B(is_i915g); 67235783Skib B(is_i945gm); 68235783Skib B(is_g33); 69235783Skib B(need_gfx_hws); 70235783Skib B(is_g4x); 71235783Skib B(is_pineview); 72235783Skib B(has_fbc); 73235783Skib B(has_pipe_cxsr); 74235783Skib B(has_hotplug); 75235783Skib B(cursor_needs_physical); 76235783Skib B(has_overlay); 77235783Skib B(overlay_needs_physical); 78235783Skib B(supports_tv); 79235783Skib B(has_bsd_ring); 80235783Skib B(has_blt_ring); 81235783Skib B(has_llc); 82235783Skib#undef B 83235783Skib 84235783Skib return (0); 85235783Skib} 86235783Skib 87235783Skibstatic const char * 88235783Skibget_pin_flag(struct drm_i915_gem_object *obj) 89235783Skib{ 90235783Skib if (obj->user_pin_count > 0) 91235783Skib return "P"; 92235783Skib else if (obj->pin_count > 0) 93235783Skib return "p"; 94235783Skib else 95235783Skib return " "; 96235783Skib} 97235783Skib 98235783Skibstatic const char * 99235783Skibget_tiling_flag(struct drm_i915_gem_object *obj) 100235783Skib{ 101235783Skib switch (obj->tiling_mode) { 102235783Skib default: 103235783Skib case I915_TILING_NONE: return (" "); 104235783Skib case I915_TILING_X: return ("X"); 105235783Skib case I915_TILING_Y: return ("Y"); 106235783Skib } 107235783Skib} 108235783Skib 109235783Skibstatic const char * 110235783Skibcache_level_str(int type) 111235783Skib{ 112235783Skib switch (type) { 113235783Skib case I915_CACHE_NONE: return " uncached"; 114235783Skib case I915_CACHE_LLC: return " snooped (LLC)"; 115235783Skib case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)"; 116235783Skib default: return (""); 117235783Skib } 118235783Skib} 119235783Skib 120235783Skibstatic void 121235783Skibdescribe_obj(struct sbuf *m, struct drm_i915_gem_object *obj) 122235783Skib{ 123235783Skib 124235783Skib sbuf_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d%s%s%s", 125235783Skib &obj->base, 126235783Skib get_pin_flag(obj), 127235783Skib get_tiling_flag(obj), 128235783Skib obj->base.size / 1024, 129235783Skib obj->base.read_domains, 130235783Skib obj->base.write_domain, 131235783Skib obj->last_rendering_seqno, 132235783Skib obj->last_fenced_seqno, 133235783Skib cache_level_str(obj->cache_level), 134235783Skib obj->dirty ? " dirty" : "", 135235783Skib obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 136235783Skib if (obj->base.name) 137235783Skib sbuf_printf(m, " (name: %d)", obj->base.name); 138235783Skib if (obj->fence_reg != I915_FENCE_REG_NONE) 139235783Skib sbuf_printf(m, " (fence: %d)", obj->fence_reg); 140235783Skib if (obj->gtt_space != NULL) 141235783Skib sbuf_printf(m, " (gtt offset: %08x, size: %08x)", 142235783Skib obj->gtt_offset, (unsigned int)obj->gtt_space->size); 143235783Skib if (obj->pin_mappable || obj->fault_mappable) { 144235783Skib char s[3], *t = s; 145235783Skib if (obj->pin_mappable) 146235783Skib *t++ = 'p'; 147235783Skib if (obj->fault_mappable) 148235783Skib *t++ = 'f'; 149235783Skib *t = '\0'; 150235783Skib sbuf_printf(m, " (%s mappable)", s); 151235783Skib } 152235783Skib if (obj->ring != NULL) 153235783Skib sbuf_printf(m, " (%s)", obj->ring->name); 154235783Skib} 155235783Skib 156235783Skibstatic int 157235783Skibi915_gem_object_list_info(struct drm_device *dev, struct sbuf *m, void *data) 158235783Skib{ 159235783Skib uintptr_t list = (uintptr_t)data; 160235783Skib struct list_head *head; 161235783Skib drm_i915_private_t *dev_priv = dev->dev_private; 162235783Skib struct drm_i915_gem_object *obj; 163235783Skib size_t total_obj_size, total_gtt_size; 164235783Skib int count; 165235783Skib 166235783Skib if (sx_xlock_sig(&dev->dev_struct_lock)) 167235783Skib return (EINTR); 168235783Skib 169235783Skib switch (list) { 170235783Skib case ACTIVE_LIST: 171235783Skib sbuf_printf(m, "Active:\n"); 172235783Skib head = &dev_priv->mm.active_list; 173235783Skib break; 174235783Skib case INACTIVE_LIST: 175235783Skib sbuf_printf(m, "Inactive:\n"); 176235783Skib head = &dev_priv->mm.inactive_list; 177235783Skib break; 178235783Skib case PINNED_LIST: 179235783Skib sbuf_printf(m, "Pinned:\n"); 180235783Skib head = &dev_priv->mm.pinned_list; 181235783Skib break; 182235783Skib case FLUSHING_LIST: 183235783Skib sbuf_printf(m, "Flushing:\n"); 184235783Skib head = &dev_priv->mm.flushing_list; 185235783Skib break; 186235783Skib case DEFERRED_FREE_LIST: 187235783Skib sbuf_printf(m, "Deferred free:\n"); 188235783Skib head = &dev_priv->mm.deferred_free_list; 189235783Skib break; 190235783Skib default: 191235783Skib DRM_UNLOCK(dev); 192235783Skib return (EINVAL); 193235783Skib } 194235783Skib 195235783Skib total_obj_size = total_gtt_size = count = 0; 196235783Skib list_for_each_entry(obj, head, mm_list) { 197235783Skib sbuf_printf(m, " "); 198235783Skib describe_obj(m, obj); 199235783Skib sbuf_printf(m, "\n"); 200235783Skib total_obj_size += obj->base.size; 201235783Skib total_gtt_size += obj->gtt_space->size; 202235783Skib count++; 203235783Skib } 204235783Skib DRM_UNLOCK(dev); 205235783Skib 206235783Skib sbuf_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 207235783Skib count, total_obj_size, total_gtt_size); 208235783Skib return (0); 209235783Skib} 210235783Skib 211235783Skib#define count_objects(list, member) do { \ 212235783Skib list_for_each_entry(obj, list, member) { \ 213235783Skib size += obj->gtt_space->size; \ 214235783Skib ++count; \ 215235783Skib if (obj->map_and_fenceable) { \ 216235783Skib mappable_size += obj->gtt_space->size; \ 217235783Skib ++mappable_count; \ 218235783Skib } \ 219235783Skib } \ 220235783Skib} while (0) 221235783Skib 222235783Skibstatic int 223235783Skibi915_gem_object_info(struct drm_device *dev, struct sbuf *m, void *data) 224235783Skib{ 225235783Skib struct drm_i915_private *dev_priv = dev->dev_private; 226235783Skib u32 count, mappable_count; 227235783Skib size_t size, mappable_size; 228235783Skib struct drm_i915_gem_object *obj; 229235783Skib 230235783Skib if (sx_xlock_sig(&dev->dev_struct_lock)) 231235783Skib return (EINTR); 232235783Skib sbuf_printf(m, "%u objects, %zu bytes\n", 233235783Skib dev_priv->mm.object_count, 234235783Skib dev_priv->mm.object_memory); 235235783Skib 236235783Skib size = count = mappable_size = mappable_count = 0; 237235783Skib count_objects(&dev_priv->mm.gtt_list, gtt_list); 238235783Skib sbuf_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n", 239235783Skib count, mappable_count, size, mappable_size); 240235783Skib 241235783Skib size = count = mappable_size = mappable_count = 0; 242235783Skib count_objects(&dev_priv->mm.active_list, mm_list); 243235783Skib count_objects(&dev_priv->mm.flushing_list, mm_list); 244235783Skib sbuf_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", 245235783Skib count, mappable_count, size, mappable_size); 246235783Skib 247235783Skib size = count = mappable_size = mappable_count = 0; 248235783Skib count_objects(&dev_priv->mm.pinned_list, mm_list); 249235783Skib sbuf_printf(m, " %u [%u] pinned objects, %zu [%zu] bytes\n", 250235783Skib count, mappable_count, size, mappable_size); 251235783Skib 252235783Skib size = count = mappable_size = mappable_count = 0; 253235783Skib count_objects(&dev_priv->mm.inactive_list, mm_list); 254235783Skib sbuf_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", 255235783Skib count, mappable_count, size, mappable_size); 256235783Skib 257235783Skib size = count = mappable_size = mappable_count = 0; 258235783Skib count_objects(&dev_priv->mm.deferred_free_list, mm_list); 259235783Skib sbuf_printf(m, " %u [%u] freed objects, %zu [%zu] bytes\n", 260235783Skib count, mappable_count, size, mappable_size); 261235783Skib 262235783Skib size = count = mappable_size = mappable_count = 0; 263235783Skib list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { 264235783Skib if (obj->fault_mappable) { 265235783Skib size += obj->gtt_space->size; 266235783Skib ++count; 267235783Skib } 268235783Skib if (obj->pin_mappable) { 269235783Skib mappable_size += obj->gtt_space->size; 270235783Skib ++mappable_count; 271235783Skib } 272235783Skib } 273235783Skib sbuf_printf(m, "%u pinned mappable objects, %zu bytes\n", 274235783Skib mappable_count, mappable_size); 275235783Skib sbuf_printf(m, "%u fault mappable objects, %zu bytes\n", 276235783Skib count, size); 277235783Skib 278235783Skib sbuf_printf(m, "%zu [%zu] gtt total\n", 279235783Skib dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total); 280235783Skib DRM_UNLOCK(dev); 281235783Skib 282235783Skib return (0); 283235783Skib} 284235783Skib 285235783Skibstatic int 286235783Skibi915_gem_gtt_info(struct drm_device *dev, struct sbuf *m, void* data) 287235783Skib{ 288235783Skib struct drm_i915_private *dev_priv = dev->dev_private; 289235783Skib struct drm_i915_gem_object *obj; 290235783Skib size_t total_obj_size, total_gtt_size; 291235783Skib int count; 292235783Skib 293235783Skib if (sx_xlock_sig(&dev->dev_struct_lock)) 294235783Skib return (EINTR); 295235783Skib 296235783Skib total_obj_size = total_gtt_size = count = 0; 297235783Skib list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { 298235783Skib sbuf_printf(m, " "); 299235783Skib describe_obj(m, obj); 300235783Skib sbuf_printf(m, "\n"); 301235783Skib total_obj_size += obj->base.size; 302235783Skib total_gtt_size += obj->gtt_space->size; 303235783Skib count++; 304235783Skib } 305235783Skib 306235783Skib DRM_UNLOCK(dev); 307235783Skib 308235783Skib sbuf_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 309235783Skib count, total_obj_size, total_gtt_size); 310235783Skib 311235783Skib return (0); 312235783Skib} 313235783Skib 314235783Skibstatic int 315235783Skibi915_gem_pageflip_info(struct drm_device *dev, struct sbuf *m, void *data) 316235783Skib{ 317235783Skib struct intel_crtc *crtc; 318235783Skib struct drm_i915_gem_object *obj; 319235783Skib struct intel_unpin_work *work; 320235783Skib char pipe; 321235783Skib char plane; 322235783Skib 323235783Skib if ((dev->driver->driver_features & DRIVER_MODESET) == 0) 324235783Skib return (0); 325235783Skib list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { 326235783Skib pipe = pipe_name(crtc->pipe); 327235783Skib plane = plane_name(crtc->plane); 328235783Skib 329235783Skib mtx_lock(&dev->event_lock); 330235783Skib work = crtc->unpin_work; 331235783Skib if (work == NULL) { 332235783Skib sbuf_printf(m, "No flip due on pipe %c (plane %c)\n", 333235783Skib pipe, plane); 334235783Skib } else { 335235783Skib if (!work->pending) { 336235783Skib sbuf_printf(m, "Flip queued on pipe %c (plane %c)\n", 337235783Skib pipe, plane); 338235783Skib } else { 339235783Skib sbuf_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n", 340235783Skib pipe, plane); 341235783Skib } 342235783Skib if (work->enable_stall_check) 343235783Skib sbuf_printf(m, "Stall check enabled, "); 344235783Skib else 345235783Skib sbuf_printf(m, "Stall check waiting for page flip ioctl, "); 346235783Skib sbuf_printf(m, "%d prepares\n", work->pending); 347235783Skib 348235783Skib if (work->old_fb_obj) { 349235783Skib obj = work->old_fb_obj; 350235783Skib if (obj) 351235783Skib sbuf_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); 352235783Skib } 353235783Skib if (work->pending_flip_obj) { 354235783Skib obj = work->pending_flip_obj; 355235783Skib if (obj) 356235783Skib sbuf_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); 357235783Skib } 358235783Skib } 359235783Skib mtx_unlock(&dev->event_lock); 360235783Skib } 361235783Skib 362235783Skib return (0); 363235783Skib} 364235783Skib 365235783Skibstatic int 366235783Skibi915_gem_request_info(struct drm_device *dev, struct sbuf *m, void *data) 367235783Skib{ 368235783Skib drm_i915_private_t *dev_priv = dev->dev_private; 369235783Skib struct drm_i915_gem_request *gem_request; 370235783Skib int count; 371235783Skib 372235783Skib if (sx_xlock_sig(&dev->dev_struct_lock)) 373235783Skib return (EINTR); 374235783Skib 375235783Skib count = 0; 376235783Skib if (!list_empty(&dev_priv->rings[RCS].request_list)) { 377235783Skib sbuf_printf(m, "Render requests:\n"); 378235783Skib list_for_each_entry(gem_request, 379235783Skib &dev_priv->rings[RCS].request_list, 380235783Skib list) { 381235783Skib sbuf_printf(m, " %d @ %d\n", 382235783Skib gem_request->seqno, 383235783Skib (int) (jiffies - gem_request->emitted_jiffies)); 384235783Skib } 385235783Skib count++; 386235783Skib } 387235783Skib if (!list_empty(&dev_priv->rings[VCS].request_list)) { 388235783Skib sbuf_printf(m, "BSD requests:\n"); 389235783Skib list_for_each_entry(gem_request, 390235783Skib &dev_priv->rings[VCS].request_list, 391235783Skib list) { 392235783Skib sbuf_printf(m, " %d @ %d\n", 393235783Skib gem_request->seqno, 394235783Skib (int) (jiffies - gem_request->emitted_jiffies)); 395235783Skib } 396235783Skib count++; 397235783Skib } 398235783Skib if (!list_empty(&dev_priv->rings[BCS].request_list)) { 399235783Skib sbuf_printf(m, "BLT requests:\n"); 400235783Skib list_for_each_entry(gem_request, 401235783Skib &dev_priv->rings[BCS].request_list, 402235783Skib list) { 403235783Skib sbuf_printf(m, " %d @ %d\n", 404235783Skib gem_request->seqno, 405235783Skib (int) (jiffies - gem_request->emitted_jiffies)); 406235783Skib } 407235783Skib count++; 408235783Skib } 409235783Skib DRM_UNLOCK(dev); 410235783Skib 411235783Skib if (count == 0) 412235783Skib sbuf_printf(m, "No requests\n"); 413235783Skib 414235783Skib return 0; 415235783Skib} 416235783Skib 417235783Skibstatic void 418235783Skibi915_ring_seqno_info(struct sbuf *m, struct intel_ring_buffer *ring) 419235783Skib{ 420235783Skib if (ring->get_seqno) { 421235783Skib sbuf_printf(m, "Current sequence (%s): %d\n", 422235783Skib ring->name, ring->get_seqno(ring)); 423235783Skib sbuf_printf(m, "Waiter sequence (%s): %d\n", 424235783Skib ring->name, ring->waiting_seqno); 425235783Skib sbuf_printf(m, "IRQ sequence (%s): %d\n", 426235783Skib ring->name, ring->irq_seqno); 427235783Skib } 428235783Skib} 429235783Skib 430235783Skibstatic int 431235783Skibi915_gem_seqno_info(struct drm_device *dev, struct sbuf *m, void *data) 432235783Skib{ 433235783Skib drm_i915_private_t *dev_priv = dev->dev_private; 434235783Skib int i; 435235783Skib 436235783Skib if (sx_xlock_sig(&dev->dev_struct_lock)) 437235783Skib return (EINTR); 438235783Skib for (i = 0; i < I915_NUM_RINGS; i++) 439235783Skib i915_ring_seqno_info(m, &dev_priv->rings[i]); 440235783Skib DRM_UNLOCK(dev); 441235783Skib return (0); 442235783Skib} 443235783Skib 444235783Skib 445235783Skibstatic int 446235783Skibi915_interrupt_info(struct drm_device *dev, struct sbuf *m, void *data) 447235783Skib{ 448235783Skib drm_i915_private_t *dev_priv = dev->dev_private; 449235783Skib int i, pipe; 450235783Skib 451235783Skib if (sx_xlock_sig(&dev->dev_struct_lock)) 452235783Skib return (EINTR); 453235783Skib 454235783Skib if (!HAS_PCH_SPLIT(dev)) { 455235783Skib sbuf_printf(m, "Interrupt enable: %08x\n", 456235783Skib I915_READ(IER)); 457235783Skib sbuf_printf(m, "Interrupt identity: %08x\n", 458235783Skib I915_READ(IIR)); 459235783Skib sbuf_printf(m, "Interrupt mask: %08x\n", 460235783Skib I915_READ(IMR)); 461235783Skib for_each_pipe(pipe) 462235783Skib sbuf_printf(m, "Pipe %c stat: %08x\n", 463235783Skib pipe_name(pipe), 464235783Skib I915_READ(PIPESTAT(pipe))); 465235783Skib } else { 466235783Skib sbuf_printf(m, "North Display Interrupt enable: %08x\n", 467235783Skib I915_READ(DEIER)); 468235783Skib sbuf_printf(m, "North Display Interrupt identity: %08x\n", 469235783Skib I915_READ(DEIIR)); 470235783Skib sbuf_printf(m, "North Display Interrupt mask: %08x\n", 471235783Skib I915_READ(DEIMR)); 472235783Skib sbuf_printf(m, "South Display Interrupt enable: %08x\n", 473235783Skib I915_READ(SDEIER)); 474235783Skib sbuf_printf(m, "South Display Interrupt identity: %08x\n", 475235783Skib I915_READ(SDEIIR)); 476235783Skib sbuf_printf(m, "South Display Interrupt mask: %08x\n", 477235783Skib I915_READ(SDEIMR)); 478235783Skib sbuf_printf(m, "Graphics Interrupt enable: %08x\n", 479235783Skib I915_READ(GTIER)); 480235783Skib sbuf_printf(m, "Graphics Interrupt identity: %08x\n", 481235783Skib I915_READ(GTIIR)); 482235783Skib sbuf_printf(m, "Graphics Interrupt mask: %08x\n", 483235783Skib I915_READ(GTIMR)); 484235783Skib } 485235783Skib sbuf_printf(m, "Interrupts received: %d\n", 486235783Skib atomic_read(&dev_priv->irq_received)); 487235783Skib for (i = 0; i < I915_NUM_RINGS; i++) { 488235783Skib if (IS_GEN6(dev) || IS_GEN7(dev)) { 489235783Skib sbuf_printf(m, "Graphics Interrupt mask (%s): %08x\n", 490235783Skib dev_priv->rings[i].name, 491235783Skib I915_READ_IMR(&dev_priv->rings[i])); 492235783Skib } 493235783Skib i915_ring_seqno_info(m, &dev_priv->rings[i]); 494235783Skib } 495235783Skib DRM_UNLOCK(dev); 496235783Skib 497235783Skib return (0); 498235783Skib} 499235783Skib 500235783Skibstatic int 501235783Skibi915_gem_fence_regs_info(struct drm_device *dev, struct sbuf *m, void *data) 502235783Skib{ 503235783Skib drm_i915_private_t *dev_priv = dev->dev_private; 504235783Skib int i; 505235783Skib 506235783Skib if (sx_xlock_sig(&dev->dev_struct_lock)) 507235783Skib return (EINTR); 508235783Skib 509235783Skib sbuf_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start); 510235783Skib sbuf_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); 511235783Skib for (i = 0; i < dev_priv->num_fence_regs; i++) { 512235783Skib struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj; 513235783Skib 514235783Skib sbuf_printf(m, "Fenced object[%2d] = ", i); 515235783Skib if (obj == NULL) 516235783Skib sbuf_printf(m, "unused"); 517235783Skib else 518235783Skib describe_obj(m, obj); 519235783Skib sbuf_printf(m, "\n"); 520235783Skib } 521235783Skib 522235783Skib DRM_UNLOCK(dev); 523235783Skib return (0); 524235783Skib} 525235783Skib 526235783Skibstatic int 527235783Skibi915_hws_info(struct drm_device *dev, struct sbuf *m, void *data) 528235783Skib{ 529235783Skib drm_i915_private_t *dev_priv = dev->dev_private; 530235783Skib struct intel_ring_buffer *ring; 531235783Skib const volatile u32 *hws; 532235783Skib int i; 533235783Skib 534235783Skib ring = &dev_priv->rings[(uintptr_t)data]; 535235783Skib hws = (volatile u32 *)ring->status_page.page_addr; 536235783Skib if (hws == NULL) 537235783Skib return (0); 538235783Skib 539235783Skib for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) { 540235783Skib sbuf_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", 541235783Skib i * 4, 542235783Skib hws[i], hws[i + 1], hws[i + 2], hws[i + 3]); 543235783Skib } 544235783Skib return (0); 545235783Skib} 546235783Skib 547235783Skibstatic int 548235783Skibi915_ringbuffer_data(struct drm_device *dev, struct sbuf *m, void *data) 549235783Skib{ 550235783Skib drm_i915_private_t *dev_priv = dev->dev_private; 551235783Skib struct intel_ring_buffer *ring; 552235783Skib 553235783Skib if (sx_xlock_sig(&dev->dev_struct_lock)) 554235783Skib return (EINTR); 555235783Skib ring = &dev_priv->rings[(uintptr_t)data]; 556235783Skib if (!ring->obj) { 557235783Skib sbuf_printf(m, "No ringbuffer setup\n"); 558235783Skib } else { 559235783Skib u8 *virt = ring->virtual_start; 560235783Skib uint32_t off; 561235783Skib 562235783Skib for (off = 0; off < ring->size; off += 4) { 563235783Skib uint32_t *ptr = (uint32_t *)(virt + off); 564235783Skib sbuf_printf(m, "%08x : %08x\n", off, *ptr); 565235783Skib } 566235783Skib } 567235783Skib DRM_UNLOCK(dev); 568235783Skib return (0); 569235783Skib} 570235783Skib 571235783Skibstatic int 572235783Skibi915_ringbuffer_info(struct drm_device *dev, struct sbuf *m, void *data) 573235783Skib{ 574235783Skib drm_i915_private_t *dev_priv = dev->dev_private; 575235783Skib struct intel_ring_buffer *ring; 576235783Skib 577235783Skib ring = &dev_priv->rings[(uintptr_t)data]; 578235783Skib if (ring->size == 0) 579235783Skib return (0); 580235783Skib 581235783Skib if (sx_xlock_sig(&dev->dev_struct_lock)) 582235783Skib return (EINTR); 583235783Skib 584235783Skib sbuf_printf(m, "Ring %s:\n", ring->name); 585235783Skib sbuf_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR); 586235783Skib sbuf_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR); 587235783Skib sbuf_printf(m, " Size : %08x\n", ring->size); 588235783Skib sbuf_printf(m, " Active : %08x\n", intel_ring_get_active_head(ring)); 589235783Skib sbuf_printf(m, " NOPID : %08x\n", I915_READ_NOPID(ring)); 590235783Skib if (IS_GEN6(dev) || IS_GEN7(dev)) { 591235783Skib sbuf_printf(m, " Sync 0 : %08x\n", I915_READ_SYNC_0(ring)); 592235783Skib sbuf_printf(m, " Sync 1 : %08x\n", I915_READ_SYNC_1(ring)); 593235783Skib } 594235783Skib sbuf_printf(m, " Control : %08x\n", I915_READ_CTL(ring)); 595235783Skib sbuf_printf(m, " Start : %08x\n", I915_READ_START(ring)); 596235783Skib 597235783Skib DRM_UNLOCK(dev); 598235783Skib 599235783Skib return (0); 600235783Skib} 601235783Skib 602235783Skibstatic const char * 603235783Skibring_str(int ring) 604235783Skib{ 605235783Skib switch (ring) { 606235783Skib case RCS: return (" render"); 607235783Skib case VCS: return (" bsd"); 608235783Skib case BCS: return (" blt"); 609235783Skib default: return (""); 610235783Skib } 611235783Skib} 612235783Skib 613235783Skibstatic const char * 614235783Skibpin_flag(int pinned) 615235783Skib{ 616235783Skib if (pinned > 0) 617235783Skib return (" P"); 618235783Skib else if (pinned < 0) 619235783Skib return (" p"); 620235783Skib else 621235783Skib return (""); 622235783Skib} 623235783Skib 624235783Skibstatic const char *tiling_flag(int tiling) 625235783Skib{ 626235783Skib switch (tiling) { 627235783Skib default: 628235783Skib case I915_TILING_NONE: return ""; 629235783Skib case I915_TILING_X: return " X"; 630235783Skib case I915_TILING_Y: return " Y"; 631235783Skib } 632235783Skib} 633235783Skib 634235783Skibstatic const char *dirty_flag(int dirty) 635235783Skib{ 636235783Skib return dirty ? " dirty" : ""; 637235783Skib} 638235783Skib 639235783Skibstatic const char *purgeable_flag(int purgeable) 640235783Skib{ 641235783Skib return purgeable ? " purgeable" : ""; 642235783Skib} 643235783Skib 644235783Skibstatic void print_error_buffers(struct sbuf *m, const char *name, 645235783Skib struct drm_i915_error_buffer *err, int count) 646235783Skib{ 647235783Skib 648235783Skib sbuf_printf(m, "%s [%d]:\n", name, count); 649235783Skib 650235783Skib while (count--) { 651235783Skib sbuf_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s%s", 652235783Skib err->gtt_offset, 653235783Skib err->size, 654235783Skib err->read_domains, 655235783Skib err->write_domain, 656235783Skib err->seqno, 657235783Skib pin_flag(err->pinned), 658235783Skib tiling_flag(err->tiling), 659235783Skib dirty_flag(err->dirty), 660235783Skib purgeable_flag(err->purgeable), 661235783Skib err->ring != -1 ? " " : "", 662235783Skib ring_str(err->ring), 663235783Skib cache_level_str(err->cache_level)); 664235783Skib 665235783Skib if (err->name) 666235783Skib sbuf_printf(m, " (name: %d)", err->name); 667235783Skib if (err->fence_reg != I915_FENCE_REG_NONE) 668235783Skib sbuf_printf(m, " (fence: %d)", err->fence_reg); 669235783Skib 670235783Skib sbuf_printf(m, "\n"); 671235783Skib err++; 672235783Skib } 673235783Skib} 674235783Skib 675235783Skibstatic void 676235783Skibi915_ring_error_state(struct sbuf *m, struct drm_device *dev, 677235783Skib struct drm_i915_error_state *error, unsigned ring) 678235783Skib{ 679235783Skib 680235783Skib sbuf_printf(m, "%s command stream:\n", ring_str(ring)); 681235783Skib sbuf_printf(m, " HEAD: 0x%08x\n", error->head[ring]); 682235783Skib sbuf_printf(m, " TAIL: 0x%08x\n", error->tail[ring]); 683235783Skib sbuf_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]); 684235783Skib sbuf_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]); 685235783Skib sbuf_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]); 686235783Skib sbuf_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]); 687235783Skib if (ring == RCS && INTEL_INFO(dev)->gen >= 4) { 688235783Skib sbuf_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1); 689235783Skib sbuf_printf(m, " BBADDR: 0x%08jx\n", (uintmax_t)error->bbaddr); 690235783Skib } 691235783Skib if (INTEL_INFO(dev)->gen >= 4) 692235783Skib sbuf_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]); 693235783Skib sbuf_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]); 694235783Skib if (INTEL_INFO(dev)->gen >= 6) { 695235783Skib sbuf_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]); 696235783Skib sbuf_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]); 697235783Skib sbuf_printf(m, " SYNC_0: 0x%08x\n", 698235783Skib error->semaphore_mboxes[ring][0]); 699235783Skib sbuf_printf(m, " SYNC_1: 0x%08x\n", 700235783Skib error->semaphore_mboxes[ring][1]); 701235783Skib } 702235783Skib sbuf_printf(m, " seqno: 0x%08x\n", error->seqno[ring]); 703235783Skib sbuf_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]); 704235783Skib sbuf_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]); 705235783Skib} 706235783Skib 707235783Skibstatic int i915_error_state(struct drm_device *dev, struct sbuf *m, 708235783Skib void *unused) 709235783Skib{ 710235783Skib drm_i915_private_t *dev_priv = dev->dev_private; 711235783Skib struct drm_i915_error_state *error; 712235783Skib int i, j, page, offset, elt; 713235783Skib 714235783Skib mtx_lock(&dev_priv->error_lock); 715235783Skib if (!dev_priv->first_error) { 716235783Skib sbuf_printf(m, "no error state collected\n"); 717235783Skib goto out; 718235783Skib } 719235783Skib 720235783Skib error = dev_priv->first_error; 721235783Skib 722235783Skib sbuf_printf(m, "Time: %jd s %jd us\n", (intmax_t)error->time.tv_sec, 723235783Skib (intmax_t)error->time.tv_usec); 724235783Skib sbuf_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); 725235783Skib sbuf_printf(m, "EIR: 0x%08x\n", error->eir); 726235783Skib sbuf_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); 727235783Skib 728235783Skib for (i = 0; i < dev_priv->num_fence_regs; i++) 729235783Skib sbuf_printf(m, " fence[%d] = %08jx\n", i, 730235783Skib (uintmax_t)error->fence[i]); 731235783Skib 732235783Skib if (INTEL_INFO(dev)->gen >= 6) { 733235783Skib sbuf_printf(m, "ERROR: 0x%08x\n", error->error); 734235783Skib sbuf_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); 735235783Skib } 736235783Skib 737235783Skib i915_ring_error_state(m, dev, error, RCS); 738235783Skib if (HAS_BLT(dev)) 739235783Skib i915_ring_error_state(m, dev, error, BCS); 740235783Skib if (HAS_BSD(dev)) 741235783Skib i915_ring_error_state(m, dev, error, VCS); 742235783Skib 743235783Skib if (error->active_bo) 744235783Skib print_error_buffers(m, "Active", 745235783Skib error->active_bo, 746235783Skib error->active_bo_count); 747235783Skib 748235783Skib if (error->pinned_bo) 749235783Skib print_error_buffers(m, "Pinned", 750235783Skib error->pinned_bo, 751235783Skib error->pinned_bo_count); 752235783Skib 753235783Skib for (i = 0; i < DRM_ARRAY_SIZE(error->ring); i++) { 754235783Skib struct drm_i915_error_object *obj; 755235783Skib 756235783Skib if ((obj = error->ring[i].batchbuffer)) { 757235783Skib sbuf_printf(m, "%s --- gtt_offset = 0x%08x\n", 758235783Skib dev_priv->rings[i].name, 759235783Skib obj->gtt_offset); 760235783Skib offset = 0; 761235783Skib for (page = 0; page < obj->page_count; page++) { 762235783Skib for (elt = 0; elt < PAGE_SIZE/4; elt++) { 763235783Skib sbuf_printf(m, "%08x : %08x\n", 764235783Skib offset, obj->pages[page][elt]); 765235783Skib offset += 4; 766235783Skib } 767235783Skib } 768235783Skib } 769235783Skib 770235783Skib if (error->ring[i].num_requests) { 771235783Skib sbuf_printf(m, "%s --- %d requests\n", 772235783Skib dev_priv->rings[i].name, 773235783Skib error->ring[i].num_requests); 774235783Skib for (j = 0; j < error->ring[i].num_requests; j++) { 775235783Skib sbuf_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n", 776235783Skib error->ring[i].requests[j].seqno, 777235783Skib error->ring[i].requests[j].jiffies, 778235783Skib error->ring[i].requests[j].tail); 779235783Skib } 780235783Skib } 781235783Skib 782235783Skib if ((obj = error->ring[i].ringbuffer)) { 783235783Skib sbuf_printf(m, "%s --- ringbuffer = 0x%08x\n", 784235783Skib dev_priv->rings[i].name, 785235783Skib obj->gtt_offset); 786235783Skib offset = 0; 787235783Skib for (page = 0; page < obj->page_count; page++) { 788235783Skib for (elt = 0; elt < PAGE_SIZE/4; elt++) { 789235783Skib sbuf_printf(m, "%08x : %08x\n", 790235783Skib offset, 791235783Skib obj->pages[page][elt]); 792235783Skib offset += 4; 793235783Skib } 794235783Skib } 795235783Skib } 796235783Skib } 797235783Skib 798235783Skib if (error->overlay) 799235783Skib intel_overlay_print_error_state(m, error->overlay); 800235783Skib 801235783Skib if (error->display) 802235783Skib intel_display_print_error_state(m, dev, error->display); 803235783Skib 804235783Skibout: 805235783Skib mtx_unlock(&dev_priv->error_lock); 806235783Skib 807235783Skib return (0); 808235783Skib} 809235783Skib 810235783Skibstatic int 811235783Skibi915_rstdby_delays(struct drm_device *dev, struct sbuf *m, void *unused) 812235783Skib{ 813235783Skib drm_i915_private_t *dev_priv = dev->dev_private; 814235783Skib u16 crstanddelay; 815235783Skib 816235783Skib if (sx_xlock_sig(&dev->dev_struct_lock)) 817235783Skib return (EINTR); 818235783Skib crstanddelay = I915_READ16(CRSTANDVID); 819235783Skib DRM_UNLOCK(dev); 820235783Skib 821235783Skib sbuf_printf(m, "w/ctx: %d, w/o ctx: %d\n", 822235783Skib (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f)); 823235783Skib 824235783Skib return 0; 825235783Skib} 826235783Skib 827235783Skibstatic int 828235783Skibi915_cur_delayinfo(struct drm_device *dev, struct sbuf *m, void *unused) 829235783Skib{ 830235783Skib drm_i915_private_t *dev_priv = dev->dev_private; 831235783Skib 832235783Skib if (IS_GEN5(dev)) { 833235783Skib u16 rgvswctl = I915_READ16(MEMSWCTL); 834235783Skib u16 rgvstat = I915_READ16(MEMSTAT_ILK); 835235783Skib 836235783Skib sbuf_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); 837235783Skib sbuf_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); 838235783Skib sbuf_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> 839235783Skib MEMSTAT_VID_SHIFT); 840235783Skib sbuf_printf(m, "Current P-state: %d\n", 841235783Skib (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); 842235783Skib } else if (IS_GEN6(dev)) { 843235783Skib u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 844235783Skib u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); 845235783Skib u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 846235783Skib u32 rpstat; 847235783Skib u32 rpupei, rpcurup, rpprevup; 848235783Skib u32 rpdownei, rpcurdown, rpprevdown; 849235783Skib int max_freq; 850235783Skib 851235783Skib /* RPSTAT1 is in the GT power well */ 852235783Skib if (sx_xlock_sig(&dev->dev_struct_lock)) 853235783Skib return (EINTR); 854235783Skib gen6_gt_force_wake_get(dev_priv); 855235783Skib 856235783Skib rpstat = I915_READ(GEN6_RPSTAT1); 857235783Skib rpupei = I915_READ(GEN6_RP_CUR_UP_EI); 858235783Skib rpcurup = I915_READ(GEN6_RP_CUR_UP); 859235783Skib rpprevup = I915_READ(GEN6_RP_PREV_UP); 860235783Skib rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI); 861235783Skib rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); 862235783Skib rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); 863235783Skib 864235783Skib gen6_gt_force_wake_put(dev_priv); 865235783Skib DRM_UNLOCK(dev); 866235783Skib 867235783Skib sbuf_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 868235783Skib sbuf_printf(m, "RPSTAT1: 0x%08x\n", rpstat); 869235783Skib sbuf_printf(m, "Render p-state ratio: %d\n", 870235783Skib (gt_perf_status & 0xff00) >> 8); 871235783Skib sbuf_printf(m, "Render p-state VID: %d\n", 872235783Skib gt_perf_status & 0xff); 873235783Skib sbuf_printf(m, "Render p-state limit: %d\n", 874235783Skib rp_state_limits & 0xff); 875235783Skib sbuf_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >> 876235783Skib GEN6_CAGF_SHIFT) * 50); 877235783Skib sbuf_printf(m, "RP CUR UP EI: %dus\n", rpupei & 878235783Skib GEN6_CURICONT_MASK); 879235783Skib sbuf_printf(m, "RP CUR UP: %dus\n", rpcurup & 880235783Skib GEN6_CURBSYTAVG_MASK); 881235783Skib sbuf_printf(m, "RP PREV UP: %dus\n", rpprevup & 882235783Skib GEN6_CURBSYTAVG_MASK); 883235783Skib sbuf_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei & 884235783Skib GEN6_CURIAVG_MASK); 885235783Skib sbuf_printf(m, "RP CUR DOWN: %dus\n", rpcurdown & 886235783Skib GEN6_CURBSYTAVG_MASK); 887235783Skib sbuf_printf(m, "RP PREV DOWN: %dus\n", rpprevdown & 888235783Skib GEN6_CURBSYTAVG_MASK); 889235783Skib 890235783Skib max_freq = (rp_state_cap & 0xff0000) >> 16; 891235783Skib sbuf_printf(m, "Lowest (RPN) frequency: %dMHz\n", 892235783Skib max_freq * 50); 893235783Skib 894235783Skib max_freq = (rp_state_cap & 0xff00) >> 8; 895235783Skib sbuf_printf(m, "Nominal (RP1) frequency: %dMHz\n", 896235783Skib max_freq * 50); 897235783Skib 898235783Skib max_freq = rp_state_cap & 0xff; 899235783Skib sbuf_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 900235783Skib max_freq * 50); 901235783Skib } else { 902235783Skib sbuf_printf(m, "no P-state info available\n"); 903235783Skib } 904235783Skib 905235783Skib return 0; 906235783Skib} 907235783Skib 908235783Skibstatic int 909235783Skibi915_delayfreq_table(struct drm_device *dev, struct sbuf *m, void *unused) 910235783Skib{ 911235783Skib drm_i915_private_t *dev_priv = dev->dev_private; 912235783Skib u32 delayfreq; 913235783Skib int i; 914235783Skib 915235783Skib if (sx_xlock_sig(&dev->dev_struct_lock)) 916235783Skib return (EINTR); 917235783Skib for (i = 0; i < 16; i++) { 918235783Skib delayfreq = I915_READ(PXVFREQ_BASE + i * 4); 919235783Skib sbuf_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq, 920235783Skib (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT); 921235783Skib } 922235783Skib DRM_UNLOCK(dev); 923235783Skib return (0); 924235783Skib} 925235783Skib 926235783Skibstatic inline int 927235783SkibMAP_TO_MV(int map) 928235783Skib{ 929235783Skib return 1250 - (map * 25); 930235783Skib} 931235783Skib 932235783Skibstatic int 933235783Skibi915_inttoext_table(struct drm_device *dev, struct sbuf *m, void *unused) 934235783Skib{ 935235783Skib drm_i915_private_t *dev_priv = dev->dev_private; 936235783Skib u32 inttoext; 937235783Skib int i; 938235783Skib 939235783Skib if (sx_xlock_sig(&dev->dev_struct_lock)) 940235783Skib return (EINTR); 941235783Skib for (i = 1; i <= 32; i++) { 942235783Skib inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4); 943235783Skib sbuf_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext); 944235783Skib } 945235783Skib DRM_UNLOCK(dev); 946235783Skib 947235783Skib return (0); 948235783Skib} 949235783Skib 950235783Skibstatic int 951235783Skibironlake_drpc_info(struct drm_device *dev, struct sbuf *m) 952235783Skib{ 953235783Skib drm_i915_private_t *dev_priv = dev->dev_private; 954235783Skib u32 rgvmodectl; 955235783Skib u32 rstdbyctl; 956235783Skib u16 crstandvid; 957235783Skib 958235783Skib if (sx_xlock_sig(&dev->dev_struct_lock)) 959235783Skib return (EINTR); 960235783Skib rgvmodectl = I915_READ(MEMMODECTL); 961235783Skib rstdbyctl = I915_READ(RSTDBYCTL); 962235783Skib crstandvid = I915_READ16(CRSTANDVID); 963235783Skib DRM_UNLOCK(dev); 964235783Skib 965235783Skib sbuf_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? 966235783Skib "yes" : "no"); 967235783Skib sbuf_printf(m, "Boost freq: %d\n", 968235783Skib (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >> 969235783Skib MEMMODE_BOOST_FREQ_SHIFT); 970235783Skib sbuf_printf(m, "HW control enabled: %s\n", 971235783Skib rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no"); 972235783Skib sbuf_printf(m, "SW control enabled: %s\n", 973235783Skib rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no"); 974235783Skib sbuf_printf(m, "Gated voltage change: %s\n", 975235783Skib rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no"); 976235783Skib sbuf_printf(m, "Starting frequency: P%d\n", 977235783Skib (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); 978235783Skib sbuf_printf(m, "Max P-state: P%d\n", 979235783Skib (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT); 980235783Skib sbuf_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); 981235783Skib sbuf_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f)); 982235783Skib sbuf_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); 983235783Skib sbuf_printf(m, "Render standby enabled: %s\n", 984235783Skib (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes"); 985235783Skib sbuf_printf(m, "Current RS state: "); 986235783Skib switch (rstdbyctl & RSX_STATUS_MASK) { 987235783Skib case RSX_STATUS_ON: 988235783Skib sbuf_printf(m, "on\n"); 989235783Skib break; 990235783Skib case RSX_STATUS_RC1: 991235783Skib sbuf_printf(m, "RC1\n"); 992235783Skib break; 993235783Skib case RSX_STATUS_RC1E: 994235783Skib sbuf_printf(m, "RC1E\n"); 995235783Skib break; 996235783Skib case RSX_STATUS_RS1: 997235783Skib sbuf_printf(m, "RS1\n"); 998235783Skib break; 999235783Skib case RSX_STATUS_RS2: 1000235783Skib sbuf_printf(m, "RS2 (RC6)\n"); 1001235783Skib break; 1002235783Skib case RSX_STATUS_RS3: 1003235783Skib sbuf_printf(m, "RC3 (RC6+)\n"); 1004235783Skib break; 1005235783Skib default: 1006235783Skib sbuf_printf(m, "unknown\n"); 1007235783Skib break; 1008235783Skib } 1009235783Skib 1010235783Skib return 0; 1011235783Skib} 1012235783Skib 1013235783Skibstatic int 1014235783Skibgen6_drpc_info(struct drm_device *dev, struct sbuf *m) 1015235783Skib{ 1016235783Skib drm_i915_private_t *dev_priv = dev->dev_private; 1017235783Skib u32 rpmodectl1, gt_core_status, rcctl1; 1018235783Skib unsigned forcewake_count; 1019235783Skib int count=0; 1020235783Skib 1021235783Skib if (sx_xlock_sig(&dev->dev_struct_lock)) 1022235783Skib return (EINTR); 1023235783Skib 1024235783Skib mtx_lock(&dev_priv->gt_lock); 1025235783Skib forcewake_count = dev_priv->forcewake_count; 1026235783Skib mtx_unlock(&dev_priv->gt_lock); 1027235783Skib 1028235783Skib if (forcewake_count) { 1029235783Skib sbuf_printf(m, "RC information inaccurate because userspace " 1030235783Skib "holds a reference \n"); 1031235783Skib } else { 1032235783Skib /* NB: we cannot use forcewake, else we read the wrong values */ 1033235783Skib while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) 1034235783Skib DRM_UDELAY(10); 1035235783Skib sbuf_printf(m, "RC information accurate: %s\n", yesno(count < 51)); 1036235783Skib } 1037235783Skib 1038235783Skib gt_core_status = DRM_READ32(dev_priv->mmio_map, GEN6_GT_CORE_STATUS); 1039235783Skib trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4); 1040235783Skib 1041235783Skib rpmodectl1 = I915_READ(GEN6_RP_CONTROL); 1042235783Skib rcctl1 = I915_READ(GEN6_RC_CONTROL); 1043235783Skib DRM_UNLOCK(dev); 1044235783Skib 1045235783Skib sbuf_printf(m, "Video Turbo Mode: %s\n", 1046235783Skib yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); 1047235783Skib sbuf_printf(m, "HW control enabled: %s\n", 1048235783Skib yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1049235783Skib sbuf_printf(m, "SW control enabled: %s\n", 1050235783Skib yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) == 1051235783Skib GEN6_RP_MEDIA_SW_MODE)); 1052235783Skib sbuf_printf(m, "RC1e Enabled: %s\n", 1053235783Skib yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE)); 1054235783Skib sbuf_printf(m, "RC6 Enabled: %s\n", 1055235783Skib yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE)); 1056235783Skib sbuf_printf(m, "Deep RC6 Enabled: %s\n", 1057235783Skib yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE)); 1058235783Skib sbuf_printf(m, "Deepest RC6 Enabled: %s\n", 1059235783Skib yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE)); 1060235783Skib sbuf_printf(m, "Current RC state: "); 1061235783Skib switch (gt_core_status & GEN6_RCn_MASK) { 1062235783Skib case GEN6_RC0: 1063235783Skib if (gt_core_status & GEN6_CORE_CPD_STATE_MASK) 1064235783Skib sbuf_printf(m, "Core Power Down\n"); 1065235783Skib else 1066235783Skib sbuf_printf(m, "on\n"); 1067235783Skib break; 1068235783Skib case GEN6_RC3: 1069235783Skib sbuf_printf(m, "RC3\n"); 1070235783Skib break; 1071235783Skib case GEN6_RC6: 1072235783Skib sbuf_printf(m, "RC6\n"); 1073235783Skib break; 1074235783Skib case GEN6_RC7: 1075235783Skib sbuf_printf(m, "RC7\n"); 1076235783Skib break; 1077235783Skib default: 1078235783Skib sbuf_printf(m, "Unknown\n"); 1079235783Skib break; 1080235783Skib } 1081235783Skib 1082235783Skib sbuf_printf(m, "Core Power Down: %s\n", 1083235783Skib yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK)); 1084235783Skib return 0; 1085235783Skib} 1086235783Skib 1087235783Skibstatic int i915_drpc_info(struct drm_device *dev, struct sbuf *m, void *unused) 1088235783Skib{ 1089235783Skib 1090235783Skib if (IS_GEN6(dev) || IS_GEN7(dev)) 1091235783Skib return (gen6_drpc_info(dev, m)); 1092235783Skib else 1093235783Skib return (ironlake_drpc_info(dev, m)); 1094235783Skib} 1095235783Skibstatic int 1096235783Skibi915_fbc_status(struct drm_device *dev, struct sbuf *m, void *unused) 1097235783Skib{ 1098235783Skib drm_i915_private_t *dev_priv = dev->dev_private; 1099235783Skib 1100235783Skib if (!I915_HAS_FBC(dev)) { 1101235783Skib sbuf_printf(m, "FBC unsupported on this chipset"); 1102235783Skib return 0; 1103235783Skib } 1104235783Skib 1105235783Skib if (intel_fbc_enabled(dev)) { 1106235783Skib sbuf_printf(m, "FBC enabled"); 1107235783Skib } else { 1108235783Skib sbuf_printf(m, "FBC disabled: "); 1109235783Skib switch (dev_priv->no_fbc_reason) { 1110235783Skib case FBC_NO_OUTPUT: 1111235783Skib sbuf_printf(m, "no outputs"); 1112235783Skib break; 1113235783Skib case FBC_STOLEN_TOO_SMALL: 1114235783Skib sbuf_printf(m, "not enough stolen memory"); 1115235783Skib break; 1116235783Skib case FBC_UNSUPPORTED_MODE: 1117235783Skib sbuf_printf(m, "mode not supported"); 1118235783Skib break; 1119235783Skib case FBC_MODE_TOO_LARGE: 1120235783Skib sbuf_printf(m, "mode too large"); 1121235783Skib break; 1122235783Skib case FBC_BAD_PLANE: 1123235783Skib sbuf_printf(m, "FBC unsupported on plane"); 1124235783Skib break; 1125235783Skib case FBC_NOT_TILED: 1126235783Skib sbuf_printf(m, "scanout buffer not tiled"); 1127235783Skib break; 1128235783Skib case FBC_MULTIPLE_PIPES: 1129235783Skib sbuf_printf(m, "multiple pipes are enabled"); 1130235783Skib break; 1131235783Skib default: 1132235783Skib sbuf_printf(m, "unknown reason"); 1133235783Skib } 1134235783Skib } 1135235783Skib return 0; 1136235783Skib} 1137235783Skib 1138235783Skibstatic int 1139235783Skibi915_sr_status(struct drm_device *dev, struct sbuf *m, void *unused) 1140235783Skib{ 1141235783Skib drm_i915_private_t *dev_priv = dev->dev_private; 1142235783Skib bool sr_enabled = false; 1143235783Skib 1144235783Skib if (HAS_PCH_SPLIT(dev)) 1145235783Skib sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; 1146235783Skib else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev)) 1147235783Skib sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 1148235783Skib else if (IS_I915GM(dev)) 1149235783Skib sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; 1150235783Skib else if (IS_PINEVIEW(dev)) 1151235783Skib sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; 1152235783Skib 1153235783Skib sbuf_printf(m, "self-refresh: %s", 1154235783Skib sr_enabled ? "enabled" : "disabled"); 1155235783Skib 1156235783Skib return (0); 1157235783Skib} 1158235783Skib 1159235783Skibstatic int i915_ring_freq_table(struct drm_device *dev, struct sbuf *m, 1160235783Skib void *unused) 1161235783Skib{ 1162235783Skib drm_i915_private_t *dev_priv = dev->dev_private; 1163235783Skib int gpu_freq, ia_freq; 1164235783Skib 1165235783Skib if (!(IS_GEN6(dev) || IS_GEN7(dev))) { 1166235783Skib sbuf_printf(m, "unsupported on this chipset"); 1167235783Skib return (0); 1168235783Skib } 1169235783Skib 1170235783Skib if (sx_xlock_sig(&dev->dev_struct_lock)) 1171235783Skib return (EINTR); 1172235783Skib 1173235783Skib sbuf_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n"); 1174235783Skib 1175235783Skib for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay; 1176235783Skib gpu_freq++) { 1177235783Skib I915_WRITE(GEN6_PCODE_DATA, gpu_freq); 1178235783Skib I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | 1179235783Skib GEN6_PCODE_READ_MIN_FREQ_TABLE); 1180235783Skib if (_intel_wait_for(dev, 1181235783Skib (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 1182235783Skib 10, 1, "915frq")) { 1183235783Skib DRM_ERROR("pcode read of freq table timed out\n"); 1184235783Skib continue; 1185235783Skib } 1186235783Skib ia_freq = I915_READ(GEN6_PCODE_DATA); 1187235783Skib sbuf_printf(m, "%d\t\t%d\n", gpu_freq * 50, ia_freq * 100); 1188235783Skib } 1189235783Skib 1190235783Skib DRM_UNLOCK(dev); 1191235783Skib 1192235783Skib return (0); 1193235783Skib} 1194235783Skib 1195235783Skibstatic int 1196235783Skibi915_emon_status(struct drm_device *dev, struct sbuf *m, void *unused) 1197235783Skib{ 1198235783Skib drm_i915_private_t *dev_priv = dev->dev_private; 1199235783Skib unsigned long temp, chipset, gfx; 1200235783Skib 1201235783Skib if (!IS_GEN5(dev)) { 1202235783Skib sbuf_printf(m, "Not supported\n"); 1203235783Skib return (0); 1204235783Skib } 1205235783Skib 1206235783Skib if (sx_xlock_sig(&dev->dev_struct_lock)) 1207235783Skib return (EINTR); 1208235783Skib temp = i915_mch_val(dev_priv); 1209235783Skib chipset = i915_chipset_val(dev_priv); 1210235783Skib gfx = i915_gfx_val(dev_priv); 1211235783Skib DRM_UNLOCK(dev); 1212235783Skib 1213235783Skib sbuf_printf(m, "GMCH temp: %ld\n", temp); 1214235783Skib sbuf_printf(m, "Chipset power: %ld\n", chipset); 1215235783Skib sbuf_printf(m, "GFX power: %ld\n", gfx); 1216235783Skib sbuf_printf(m, "Total power: %ld\n", chipset + gfx); 1217235783Skib 1218235783Skib return (0); 1219235783Skib} 1220235783Skib 1221235783Skibstatic int 1222235783Skibi915_gfxec(struct drm_device *dev, struct sbuf *m, void *unused) 1223235783Skib{ 1224235783Skib drm_i915_private_t *dev_priv = dev->dev_private; 1225235783Skib 1226235783Skib if (sx_xlock_sig(&dev->dev_struct_lock)) 1227235783Skib return (EINTR); 1228235783Skib sbuf_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4)); 1229235783Skib DRM_UNLOCK(dev); 1230235783Skib 1231235783Skib return (0); 1232235783Skib} 1233235783Skib 1234235783Skib#if 0 1235235783Skibstatic int 1236235783Skibi915_opregion(struct drm_device *dev, struct sbuf *m, void *unused) 1237235783Skib{ 1238235783Skib drm_i915_private_t *dev_priv = dev->dev_private; 1239235783Skib struct intel_opregion *opregion = &dev_priv->opregion; 1240235783Skib 1241235783Skib if (sx_xlock_sig(&dev->dev_struct_lock)) 1242235783Skib return (EINTR); 1243235783Skib if (opregion->header) 1244235783Skib seq_write(m, opregion->header, OPREGION_SIZE); 1245235783Skib DRM_UNLOCK(dev); 1246235783Skib 1247235783Skib return 0; 1248235783Skib} 1249235783Skib#endif 1250235783Skib 1251235783Skibstatic int 1252235783Skibi915_gem_framebuffer_info(struct drm_device *dev, struct sbuf *m, void *data) 1253235783Skib{ 1254235783Skib drm_i915_private_t *dev_priv = dev->dev_private; 1255235783Skib struct intel_fbdev *ifbdev; 1256235783Skib struct intel_framebuffer *fb; 1257235783Skib 1258235783Skib if (sx_xlock_sig(&dev->dev_struct_lock)) 1259235783Skib return (EINTR); 1260235783Skib 1261235783Skib ifbdev = dev_priv->fbdev; 1262235783Skib if (ifbdev == NULL) { 1263235783Skib DRM_UNLOCK(dev); 1264235783Skib return (0); 1265235783Skib } 1266235783Skib fb = to_intel_framebuffer(ifbdev->helper.fb); 1267235783Skib 1268235783Skib sbuf_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ", 1269235783Skib fb->base.width, 1270235783Skib fb->base.height, 1271235783Skib fb->base.depth, 1272235783Skib fb->base.bits_per_pixel); 1273235783Skib describe_obj(m, fb->obj); 1274235783Skib sbuf_printf(m, "\n"); 1275235783Skib 1276235783Skib list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) { 1277235783Skib if (&fb->base == ifbdev->helper.fb) 1278235783Skib continue; 1279235783Skib 1280235783Skib sbuf_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ", 1281235783Skib fb->base.width, 1282235783Skib fb->base.height, 1283235783Skib fb->base.depth, 1284235783Skib fb->base.bits_per_pixel); 1285235783Skib describe_obj(m, fb->obj); 1286235783Skib sbuf_printf(m, "\n"); 1287235783Skib } 1288235783Skib 1289235783Skib DRM_UNLOCK(dev); 1290235783Skib 1291235783Skib return (0); 1292235783Skib} 1293235783Skib 1294235783Skibstatic int 1295235783Skibi915_context_status(struct drm_device *dev, struct sbuf *m, void *data) 1296235783Skib{ 1297235783Skib drm_i915_private_t *dev_priv; 1298235783Skib int ret; 1299235783Skib 1300235783Skib if ((dev->driver->driver_features & DRIVER_MODESET) == 0) 1301235783Skib return (0); 1302235783Skib 1303235783Skib dev_priv = dev->dev_private; 1304235783Skib ret = sx_xlock_sig(&dev->mode_config.mutex); 1305235783Skib if (ret != 0) 1306235783Skib return (EINTR); 1307235783Skib 1308235783Skib if (dev_priv->pwrctx != NULL) { 1309235783Skib sbuf_printf(m, "power context "); 1310235783Skib describe_obj(m, dev_priv->pwrctx); 1311235783Skib sbuf_printf(m, "\n"); 1312235783Skib } 1313235783Skib 1314235783Skib if (dev_priv->renderctx != NULL) { 1315235783Skib sbuf_printf(m, "render context "); 1316235783Skib describe_obj(m, dev_priv->renderctx); 1317235783Skib sbuf_printf(m, "\n"); 1318235783Skib } 1319235783Skib 1320235783Skib sx_xunlock(&dev->mode_config.mutex); 1321235783Skib 1322235783Skib return (0); 1323235783Skib} 1324235783Skib 1325235783Skibstatic int 1326235783Skibi915_gen6_forcewake_count_info(struct drm_device *dev, struct sbuf *m, 1327235783Skib void *data) 1328235783Skib{ 1329235783Skib struct drm_i915_private *dev_priv; 1330235783Skib unsigned forcewake_count; 1331235783Skib 1332235783Skib dev_priv = dev->dev_private; 1333235783Skib mtx_lock(&dev_priv->gt_lock); 1334235783Skib forcewake_count = dev_priv->forcewake_count; 1335235783Skib mtx_unlock(&dev_priv->gt_lock); 1336235783Skib 1337235783Skib sbuf_printf(m, "forcewake count = %u\n", forcewake_count); 1338235783Skib 1339235783Skib return (0); 1340235783Skib} 1341235783Skib 1342235783Skibstatic const char * 1343235783Skibswizzle_string(unsigned swizzle) 1344235783Skib{ 1345235783Skib 1346235783Skib switch(swizzle) { 1347235783Skib case I915_BIT_6_SWIZZLE_NONE: 1348235783Skib return "none"; 1349235783Skib case I915_BIT_6_SWIZZLE_9: 1350235783Skib return "bit9"; 1351235783Skib case I915_BIT_6_SWIZZLE_9_10: 1352235783Skib return "bit9/bit10"; 1353235783Skib case I915_BIT_6_SWIZZLE_9_11: 1354235783Skib return "bit9/bit11"; 1355235783Skib case I915_BIT_6_SWIZZLE_9_10_11: 1356235783Skib return "bit9/bit10/bit11"; 1357235783Skib case I915_BIT_6_SWIZZLE_9_17: 1358235783Skib return "bit9/bit17"; 1359235783Skib case I915_BIT_6_SWIZZLE_9_10_17: 1360235783Skib return "bit9/bit10/bit17"; 1361235783Skib case I915_BIT_6_SWIZZLE_UNKNOWN: 1362235783Skib return "unknown"; 1363235783Skib } 1364235783Skib 1365235783Skib return "bug"; 1366235783Skib} 1367235783Skib 1368235783Skibstatic int 1369235783Skibi915_swizzle_info(struct drm_device *dev, struct sbuf *m, void *data) 1370235783Skib{ 1371235783Skib struct drm_i915_private *dev_priv; 1372235783Skib int ret; 1373235783Skib 1374235783Skib dev_priv = dev->dev_private; 1375235783Skib ret = sx_xlock_sig(&dev->dev_struct_lock); 1376235783Skib if (ret != 0) 1377235783Skib return (EINTR); 1378235783Skib 1379235783Skib sbuf_printf(m, "bit6 swizzle for X-tiling = %s\n", 1380235783Skib swizzle_string(dev_priv->mm.bit_6_swizzle_x)); 1381235783Skib sbuf_printf(m, "bit6 swizzle for Y-tiling = %s\n", 1382235783Skib swizzle_string(dev_priv->mm.bit_6_swizzle_y)); 1383235783Skib 1384235783Skib if (IS_GEN3(dev) || IS_GEN4(dev)) { 1385235783Skib sbuf_printf(m, "DDC = 0x%08x\n", 1386235783Skib I915_READ(DCC)); 1387235783Skib sbuf_printf(m, "C0DRB3 = 0x%04x\n", 1388235783Skib I915_READ16(C0DRB3)); 1389235783Skib sbuf_printf(m, "C1DRB3 = 0x%04x\n", 1390235783Skib I915_READ16(C1DRB3)); 1391235783Skib } else if (IS_GEN6(dev) || IS_GEN7(dev)) { 1392235783Skib sbuf_printf(m, "MAD_DIMM_C0 = 0x%08x\n", 1393235783Skib I915_READ(MAD_DIMM_C0)); 1394235783Skib sbuf_printf(m, "MAD_DIMM_C1 = 0x%08x\n", 1395235783Skib I915_READ(MAD_DIMM_C1)); 1396235783Skib sbuf_printf(m, "MAD_DIMM_C2 = 0x%08x\n", 1397235783Skib I915_READ(MAD_DIMM_C2)); 1398235783Skib sbuf_printf(m, "TILECTL = 0x%08x\n", 1399235783Skib I915_READ(TILECTL)); 1400235783Skib sbuf_printf(m, "ARB_MODE = 0x%08x\n", 1401235783Skib I915_READ(ARB_MODE)); 1402235783Skib sbuf_printf(m, "DISP_ARB_CTL = 0x%08x\n", 1403235783Skib I915_READ(DISP_ARB_CTL)); 1404235783Skib } 1405235783Skib DRM_UNLOCK(dev); 1406235783Skib 1407235783Skib return (0); 1408235783Skib} 1409235783Skib 1410235783Skibstatic int 1411235783Skibi915_ppgtt_info(struct drm_device *dev, struct sbuf *m, void *data) 1412235783Skib{ 1413235783Skib struct drm_i915_private *dev_priv; 1414235783Skib struct intel_ring_buffer *ring; 1415235783Skib int i, ret; 1416235783Skib 1417235783Skib dev_priv = dev->dev_private; 1418235783Skib 1419235783Skib ret = sx_xlock_sig(&dev->dev_struct_lock); 1420235783Skib if (ret != 0) 1421235783Skib return (EINTR); 1422235783Skib if (INTEL_INFO(dev)->gen == 6) 1423235783Skib sbuf_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); 1424235783Skib 1425235783Skib for (i = 0; i < I915_NUM_RINGS; i++) { 1426235783Skib ring = &dev_priv->rings[i]; 1427235783Skib 1428235783Skib sbuf_printf(m, "%s\n", ring->name); 1429235783Skib if (INTEL_INFO(dev)->gen == 7) 1430235783Skib sbuf_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring))); 1431235783Skib sbuf_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring))); 1432235783Skib sbuf_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring))); 1433235783Skib sbuf_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring))); 1434235783Skib } 1435235783Skib if (dev_priv->mm.aliasing_ppgtt) { 1436235783Skib struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 1437235783Skib 1438235783Skib sbuf_printf(m, "aliasing PPGTT:\n"); 1439235783Skib sbuf_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset); 1440235783Skib } 1441235783Skib sbuf_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK)); 1442235783Skib DRM_UNLOCK(dev); 1443235783Skib 1444235783Skib return (0); 1445235783Skib} 1446235783Skib 1447235783Skibstatic int 1448235783Skibi915_debug_set_wedged(SYSCTL_HANDLER_ARGS) 1449235783Skib{ 1450235783Skib struct drm_device *dev; 1451235783Skib drm_i915_private_t *dev_priv; 1452235783Skib int error, wedged; 1453235783Skib 1454235783Skib dev = arg1; 1455235783Skib dev_priv = dev->dev_private; 1456235783Skib if (dev_priv == NULL) 1457235783Skib return (EBUSY); 1458235783Skib wedged = dev_priv->mm.wedged; 1459235783Skib error = sysctl_handle_int(oidp, &wedged, 0, req); 1460235783Skib if (error || !req->newptr) 1461235783Skib return (error); 1462235783Skib DRM_INFO("Manually setting wedged to %d\n", wedged); 1463235783Skib i915_handle_error(dev, wedged); 1464235783Skib return (error); 1465235783Skib} 1466235783Skib 1467235783Skibstatic int 1468235783Skibi915_max_freq(SYSCTL_HANDLER_ARGS) 1469235783Skib{ 1470235783Skib struct drm_device *dev; 1471235783Skib drm_i915_private_t *dev_priv; 1472235783Skib int error, max_freq; 1473235783Skib 1474235783Skib dev = arg1; 1475235783Skib dev_priv = dev->dev_private; 1476235783Skib if (dev_priv == NULL) 1477235783Skib return (EBUSY); 1478235783Skib max_freq = dev_priv->max_delay * 50; 1479235783Skib error = sysctl_handle_int(oidp, &max_freq, 0, req); 1480235783Skib if (error || !req->newptr) 1481235783Skib return (error); 1482235783Skib DRM_DEBUG("Manually setting max freq to %d\n", max_freq); 1483235783Skib /* 1484235783Skib * Turbo will still be enabled, but won't go above the set value. 1485235783Skib */ 1486235783Skib dev_priv->max_delay = max_freq / 50; 1487235783Skib gen6_set_rps(dev, max_freq / 50); 1488235783Skib return (error); 1489235783Skib} 1490235783Skib 1491235783Skibstatic int 1492235783Skibi915_cache_sharing(SYSCTL_HANDLER_ARGS) 1493235783Skib{ 1494235783Skib struct drm_device *dev; 1495235783Skib drm_i915_private_t *dev_priv; 1496235783Skib int error, snpcr, cache_sharing; 1497235783Skib 1498235783Skib dev = arg1; 1499235783Skib dev_priv = dev->dev_private; 1500235783Skib if (dev_priv == NULL) 1501235783Skib return (EBUSY); 1502235783Skib DRM_LOCK(dev); 1503235783Skib snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 1504235783Skib DRM_UNLOCK(dev); 1505235783Skib cache_sharing = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT; 1506235783Skib error = sysctl_handle_int(oidp, &cache_sharing, 0, req); 1507235783Skib if (error || !req->newptr) 1508235783Skib return (error); 1509235783Skib if (cache_sharing < 0 || cache_sharing > 3) 1510235783Skib return (EINVAL); 1511235783Skib DRM_DEBUG("Manually setting uncore sharing to %d\n", cache_sharing); 1512235783Skib 1513235783Skib DRM_LOCK(dev); 1514235783Skib /* Update the cache sharing policy here as well */ 1515235783Skib snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 1516235783Skib snpcr &= ~GEN6_MBC_SNPCR_MASK; 1517235783Skib snpcr |= (cache_sharing << GEN6_MBC_SNPCR_SHIFT); 1518235783Skib I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); 1519235783Skib DRM_UNLOCK(dev); 1520235783Skib return (0); 1521235783Skib} 1522235783Skib 1523235783Skibstatic struct i915_info_sysctl_list { 1524235783Skib const char *name; 1525235783Skib int (*ptr)(struct drm_device *dev, struct sbuf *m, void *data); 1526235783Skib int flags; 1527235783Skib void *data; 1528235783Skib} i915_info_sysctl_list[] = { 1529235783Skib {"i915_capabilities", i915_capabilities, 0}, 1530235783Skib {"i915_gem_objects", i915_gem_object_info, 0}, 1531235783Skib {"i915_gem_gtt", i915_gem_gtt_info, 0}, 1532235783Skib {"i915_gem_active", i915_gem_object_list_info, 0, (void *)ACTIVE_LIST}, 1533235783Skib {"i915_gem_flushing", i915_gem_object_list_info, 0, 1534235783Skib (void *)FLUSHING_LIST}, 1535235783Skib {"i915_gem_inactive", i915_gem_object_list_info, 0, 1536235783Skib (void *)INACTIVE_LIST}, 1537235783Skib {"i915_gem_pinned", i915_gem_object_list_info, 0, 1538235783Skib (void *)PINNED_LIST}, 1539235783Skib {"i915_gem_deferred_free", i915_gem_object_list_info, 0, 1540235783Skib (void *)DEFERRED_FREE_LIST}, 1541235783Skib {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, 1542235783Skib {"i915_gem_request", i915_gem_request_info, 0}, 1543235783Skib {"i915_gem_seqno", i915_gem_seqno_info, 0}, 1544235783Skib {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, 1545235783Skib {"i915_gem_interrupt", i915_interrupt_info, 0}, 1546235783Skib {"i915_gem_hws", i915_hws_info, 0, (void *)RCS}, 1547235783Skib {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS}, 1548235783Skib {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS}, 1549235783Skib {"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RCS}, 1550235783Skib {"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RCS}, 1551235783Skib {"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)VCS}, 1552235783Skib {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS}, 1553235783Skib {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS}, 1554235783Skib {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS}, 1555235783Skib {"i915_error_state", i915_error_state, 0}, 1556235783Skib {"i915_rstdby_delays", i915_rstdby_delays, 0}, 1557235783Skib {"i915_cur_delayinfo", i915_cur_delayinfo, 0}, 1558235783Skib {"i915_delayfreq_table", i915_delayfreq_table, 0}, 1559235783Skib {"i915_inttoext_table", i915_inttoext_table, 0}, 1560235783Skib {"i915_drpc_info", i915_drpc_info, 0}, 1561235783Skib {"i915_emon_status", i915_emon_status, 0}, 1562235783Skib {"i915_ring_freq_table", i915_ring_freq_table, 0}, 1563235783Skib {"i915_gfxec", i915_gfxec, 0}, 1564235783Skib {"i915_fbc_status", i915_fbc_status, 0}, 1565235783Skib {"i915_sr_status", i915_sr_status, 0}, 1566235783Skib#if 0 1567235783Skib {"i915_opregion", i915_opregion, 0}, 1568235783Skib#endif 1569235783Skib {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, 1570235783Skib {"i915_context_status", i915_context_status, 0}, 1571235783Skib {"i915_gen6_forcewake_count_info", i915_gen6_forcewake_count_info, 0}, 1572235783Skib {"i915_swizzle_info", i915_swizzle_info, 0}, 1573235783Skib {"i915_ppgtt_info", i915_ppgtt_info, 0}, 1574235783Skib}; 1575235783Skib 1576235783Skibstruct i915_info_sysctl_thunk { 1577235783Skib struct drm_device *dev; 1578235783Skib int idx; 1579235783Skib void *arg; 1580235783Skib}; 1581235783Skib 1582235783Skibstatic int 1583235783Skibi915_info_sysctl_handler(SYSCTL_HANDLER_ARGS) 1584235783Skib{ 1585235783Skib struct sbuf m; 1586235783Skib struct i915_info_sysctl_thunk *thunk; 1587235783Skib struct drm_device *dev; 1588235783Skib drm_i915_private_t *dev_priv; 1589235783Skib int error; 1590235783Skib 1591235783Skib thunk = arg1; 1592235783Skib dev = thunk->dev; 1593235783Skib dev_priv = dev->dev_private; 1594235783Skib if (dev_priv == NULL) 1595235783Skib return (EBUSY); 1596235783Skib error = sysctl_wire_old_buffer(req, 0); 1597235783Skib if (error != 0) 1598235783Skib return (error); 1599235783Skib sbuf_new_for_sysctl(&m, NULL, 128, req); 1600235783Skib error = i915_info_sysctl_list[thunk->idx].ptr(dev, &m, 1601235783Skib thunk->arg); 1602235783Skib if (error == 0) 1603235783Skib error = sbuf_finish(&m); 1604235783Skib sbuf_delete(&m); 1605235783Skib return (error); 1606235783Skib} 1607235783Skib 1608235783Skibextern int i915_gem_sync_exec_requests; 1609235783Skibextern int i915_fix_mi_batchbuffer_end; 1610235783Skibextern int i915_intr_pf; 1611235783Skibextern long i915_gem_wired_pages_cnt; 1612235783Skib 1613235783Skibint 1614235783Skibi915_sysctl_init(struct drm_device *dev, struct sysctl_ctx_list *ctx, 1615235783Skib struct sysctl_oid *top) 1616235783Skib{ 1617235783Skib struct sysctl_oid *oid, *info; 1618235783Skib struct i915_info_sysctl_thunk *thunks; 1619235783Skib int i, error; 1620235783Skib 1621235783Skib thunks = malloc(sizeof(*thunks) * DRM_ARRAY_SIZE(i915_info_sysctl_list), 1622235783Skib DRM_MEM_DRIVER, M_WAITOK | M_ZERO); 1623235783Skib for (i = 0; i < DRM_ARRAY_SIZE(i915_info_sysctl_list); i++) { 1624235783Skib thunks[i].dev = dev; 1625235783Skib thunks[i].idx = i; 1626235783Skib thunks[i].arg = i915_info_sysctl_list[i].data; 1627235783Skib } 1628235783Skib dev->sysctl_private = thunks; 1629235783Skib info = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "info", 1630235783Skib CTLFLAG_RW, NULL, NULL); 1631235783Skib if (info == NULL) 1632235783Skib return (ENOMEM); 1633235783Skib for (i = 0; i < DRM_ARRAY_SIZE(i915_info_sysctl_list); i++) { 1634235783Skib oid = SYSCTL_ADD_OID(ctx, SYSCTL_CHILDREN(info), OID_AUTO, 1635235783Skib i915_info_sysctl_list[i].name, CTLTYPE_STRING | CTLFLAG_RD, 1636235783Skib &thunks[i], 0, i915_info_sysctl_handler, "A", NULL); 1637235783Skib if (oid == NULL) 1638235783Skib return (ENOMEM); 1639235783Skib } 1640235783Skib oid = SYSCTL_ADD_LONG(ctx, SYSCTL_CHILDREN(info), OID_AUTO, 1641235783Skib "i915_gem_wired_pages", CTLFLAG_RD, &i915_gem_wired_pages_cnt, 1642235783Skib NULL); 1643235783Skib oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "wedged", 1644235783Skib CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, dev, 0, 1645235783Skib i915_debug_set_wedged, "I", NULL); 1646235783Skib if (oid == NULL) 1647235783Skib return (ENOMEM); 1648235783Skib oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "max_freq", 1649235783Skib CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, dev, 0, i915_max_freq, 1650235783Skib "I", NULL); 1651235783Skib if (oid == NULL) 1652235783Skib return (ENOMEM); 1653235783Skib oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(top), OID_AUTO, 1654235783Skib "cache_sharing", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, dev, 1655235783Skib 0, i915_cache_sharing, "I", NULL); 1656235783Skib if (oid == NULL) 1657235783Skib return (ENOMEM); 1658235783Skib oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "sync_exec", 1659235783Skib CTLFLAG_RW, &i915_gem_sync_exec_requests, 0, NULL); 1660235783Skib if (oid == NULL) 1661235783Skib return (ENOMEM); 1662235783Skib oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "fix_mi", 1663235783Skib CTLFLAG_RW, &i915_fix_mi_batchbuffer_end, 0, NULL); 1664235783Skib if (oid == NULL) 1665235783Skib return (ENOMEM); 1666235783Skib oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "intr_pf", 1667235783Skib CTLFLAG_RW, &i915_intr_pf, 0, NULL); 1668235783Skib if (oid == NULL) 1669235783Skib return (ENOMEM); 1670235783Skib 1671235783Skib error = drm_add_busid_modesetting(dev, ctx, top); 1672235783Skib if (error != 0) 1673235783Skib return (error); 1674235783Skib 1675235783Skib return (0); 1676235783Skib} 1677235783Skib 1678235783Skibvoid 1679235783Skibi915_sysctl_cleanup(struct drm_device *dev) 1680235783Skib{ 1681235783Skib 1682235783Skib free(dev->sysctl_private, DRM_MEM_DRIVER); 1683235783Skib} 1684