i915_dma.c revision 280369
1112918Sjeff/* i915_dma.c -- DMA support for the I915 -*- linux-c -*- 2153496Sdavidxu */ 3112918Sjeff/*- 4112918Sjeff * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5112918Sjeff * All Rights Reserved. 6112918Sjeff * 7112918Sjeff * Permission is hereby granted, free of charge, to any person obtaining a 8112918Sjeff * copy of this software and associated documentation files (the 9153496Sdavidxu * "Software"), to deal in the Software without restriction, including 10153496Sdavidxu * without limitation the rights to use, copy, modify, merge, publish, 11112918Sjeff * distribute, sub license, and/or sell copies of the Software, and to 12112918Sjeff * permit persons to whom the Software is furnished to do so, subject to 13112918Sjeff * the following conditions: 14112918Sjeff * 15153496Sdavidxu * The above copyright notice and this permission notice (including the 16153496Sdavidxu * next paragraph) shall be included in all copies or substantial portions 17153496Sdavidxu * of the Software. 18153496Sdavidxu * 19153496Sdavidxu * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20153496Sdavidxu * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21153496Sdavidxu * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22153496Sdavidxu * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23153496Sdavidxu * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24153496Sdavidxu * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25112918Sjeff * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26112918Sjeff * 27153496Sdavidxu */ 28112918Sjeff 29144518Sdavidxu#include <sys/cdefs.h> 30157457Sdavidxu__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/i915/i915_dma.c 280369 2015-03-23 13:38:33Z kib $"); 31112918Sjeff 32112918Sjeff#include <dev/drm2/drmP.h> 33157457Sdavidxu#include <dev/drm2/drm.h> 34144518Sdavidxu#include <dev/drm2/i915/i915_drm.h> 35112918Sjeff#include <dev/drm2/i915/i915_drv.h> 36112918Sjeff#include <dev/drm2/i915/intel_drv.h> 37157457Sdavidxu#include <dev/drm2/i915/intel_ringbuffer.h> 38157457Sdavidxu 39150901Sdavidxu#define LP_RING(d) (&((struct drm_i915_private *)(d))->rings[RCS]) 40150901Sdavidxu 41112918Sjeff#define BEGIN_LP_RING(n) \ 42150901Sdavidxu intel_ring_begin(LP_RING(dev_priv), (n)) 43112918Sjeff 44144518Sdavidxu#define OUT_RING(x) \ 45144518Sdavidxu intel_ring_emit(LP_RING(dev_priv), x) 46212536Sdavidxu 47144518Sdavidxu#define ADVANCE_LP_RING() \ 48144518Sdavidxu intel_ring_advance(LP_RING(dev_priv)) 49212536Sdavidxu 50144518Sdavidxu#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \ 51212840Sdavidxu if (LP_RING(dev->dev_private)->obj == NULL) \ 52144518Sdavidxu LOCK_TEST_WITH_RETURN(dev, file); \ 53144518Sdavidxu} while (0) 54112918Sjeff 55112918Sjeffstatic inline u32 56112918Sjeffintel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg) 57150901Sdavidxu{ 58150901Sdavidxu if (I915_NEED_GFX_HWS(dev_priv->dev)) 59150901Sdavidxu return ((volatile u32*)(dev_priv->dri1.gfx_hws_cpu_addr))[reg]; 60150901Sdavidxu else 61150901Sdavidxu return intel_read_status_page(LP_RING(dev_priv), reg); 62150901Sdavidxu} 63150901Sdavidxu 64150901Sdavidxu#define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg) 65150901Sdavidxu#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) 66150901Sdavidxu#define I915_BREADCRUMB_INDEX 0x21 67150901Sdavidxu 68150901Sdavidxustatic int i915_driver_unload_int(struct drm_device *dev, bool locked); 69150901Sdavidxu 70150901Sdavidxuvoid i915_update_dri1_breadcrumb(struct drm_device *dev) 71211524Sdavidxu{ 72211524Sdavidxu drm_i915_private_t *dev_priv = dev->dev_private; 73211524Sdavidxu#if 0 74211524Sdavidxu struct drm_i915_master_private *master_priv; 75150901Sdavidxu 76150901Sdavidxu if (dev->primary->master) { 77150901Sdavidxu master_priv = dev->primary->master->driver_priv; 78150901Sdavidxu if (master_priv->sarea_priv) 79144518Sdavidxu master_priv->sarea_priv->last_dispatch = 80150901Sdavidxu READ_BREADCRUMB(dev_priv); 81144518Sdavidxu } 82151694Sdavidxu#else 83144518Sdavidxu if (dev_priv->sarea_priv) 84151694Sdavidxu dev_priv->sarea_priv->last_dispatch = 85144518Sdavidxu READ_BREADCRUMB(dev_priv); 86144518Sdavidxu#endif 87112918Sjeff} 88129484Smtm 89144518Sdavidxustatic void i915_write_hws_pga(struct drm_device *dev) 90112918Sjeff{ 91212536Sdavidxu drm_i915_private_t *dev_priv = dev->dev_private; 92212536Sdavidxu u32 addr; 93212536Sdavidxu 94212536Sdavidxu addr = dev_priv->status_page_dmah->busaddr; 95164715Sdavidxu if (INTEL_INFO(dev)->gen >= 4) 96144518Sdavidxu addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; 97112918Sjeff I915_WRITE(HWS_PGA, addr); 98112918Sjeff} 99112918Sjeff 100144518Sdavidxu/** 101212536Sdavidxu * Sets up the hardware status page for devices that need a physical address 102144518Sdavidxu * in the register. 103144518Sdavidxu */ 104144518Sdavidxustatic int i915_init_phys_hws(struct drm_device *dev) 105144518Sdavidxu{ 106112918Sjeff drm_i915_private_t *dev_priv = dev->dev_private; 107212536Sdavidxu struct intel_ring_buffer *ring = LP_RING(dev_priv); 108112918Sjeff 109144518Sdavidxu /* 110212076Sdavidxu * Program Hardware Status Page 111112918Sjeff * XXXKIB Keep 4GB limit for allocation for now. This method 112151694Sdavidxu * of allocation is used on <= 965 hardware, that has several 113151694Sdavidxu * erratas regarding the use of physical memory > 4 GB. 114211524Sdavidxu */ 115150901Sdavidxu DRM_UNLOCK(dev); 116150901Sdavidxu dev_priv->status_page_dmah = 117150901Sdavidxu drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff); 118150901Sdavidxu DRM_LOCK(dev); 119150901Sdavidxu if (!dev_priv->status_page_dmah) { 120150901Sdavidxu DRM_ERROR("Can not allocate hardware status page\n"); 121150901Sdavidxu return -ENOMEM; 122150901Sdavidxu } 123150901Sdavidxu ring->status_page.page_addr = dev_priv->hw_status_page = 124150901Sdavidxu dev_priv->status_page_dmah->vaddr; 125151694Sdavidxu dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; 126150901Sdavidxu 127150901Sdavidxu memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 128144518Sdavidxu 129129484Smtm i915_write_hws_pga(dev); 130212076Sdavidxu DRM_DEBUG("Enabled hardware status page, phys %jx\n", 131144518Sdavidxu (uintmax_t)dev_priv->dma_status_page); 132115314Smtm return 0; 133150901Sdavidxu} 134212536Sdavidxu 135150901Sdavidxu/** 136212536Sdavidxu * Frees the hardware status page, whether it's a physical address or a virtual 137150901Sdavidxu * address set up by the X Server. 138153526Sdavidxu */ 139150901Sdavidxustatic void i915_free_hws(struct drm_device *dev) 140212536Sdavidxu{ 141212536Sdavidxu drm_i915_private_t *dev_priv = dev->dev_private; 142150901Sdavidxu struct intel_ring_buffer *ring = LP_RING(dev_priv); 143212536Sdavidxu 144112918Sjeff if (dev_priv->status_page_dmah) { 145150901Sdavidxu drm_pci_free(dev, dev_priv->status_page_dmah); 146150901Sdavidxu dev_priv->status_page_dmah = NULL; 147150901Sdavidxu } 148112918Sjeff 149112918Sjeff if (dev_priv->status_gfx_addr) { 150 dev_priv->status_gfx_addr = 0; 151 ring->status_page.gfx_addr = 0; 152 pmap_unmapdev((vm_offset_t)dev_priv->dri1.gfx_hws_cpu_addr, 153 PAGE_SIZE); 154 } 155 156 /* Need to rewrite hardware status page */ 157 I915_WRITE(HWS_PGA, 0x1ffff000); 158} 159 160void i915_kernel_lost_context(struct drm_device * dev) 161{ 162 drm_i915_private_t *dev_priv = dev->dev_private; 163 struct intel_ring_buffer *ring = LP_RING(dev_priv); 164 165 /* 166 * We should never lose context on the ring with modesetting 167 * as we don't expose it to userspace 168 */ 169 if (drm_core_check_feature(dev, DRIVER_MODESET)) 170 return; 171 172 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; 173 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; 174 ring->space = ring->head - (ring->tail + 8); 175 if (ring->space < 0) 176 ring->space += ring->size; 177 178#if 1 179 KIB_NOTYET(); 180#else 181 if (!dev->primary->master) 182 return; 183#endif 184 185 if (ring->head == ring->tail && dev_priv->sarea_priv) 186 dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; 187} 188 189static int i915_dma_cleanup(struct drm_device * dev) 190{ 191 drm_i915_private_t *dev_priv = dev->dev_private; 192 int i; 193 194 195 /* Make sure interrupts are disabled here because the uninstall ioctl 196 * may not have been called from userspace and after dev_private 197 * is freed, it's too late. 198 */ 199 if (dev->irq_enabled) 200 drm_irq_uninstall(dev); 201 202 for (i = 0; i < I915_NUM_RINGS; i++) 203 intel_cleanup_ring_buffer(&dev_priv->rings[i]); 204 205 /* Clear the HWS virtual address at teardown */ 206 if (I915_NEED_GFX_HWS(dev)) 207 i915_free_hws(dev); 208 209 return 0; 210} 211 212static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) 213{ 214 drm_i915_private_t *dev_priv = dev->dev_private; 215 int ret; 216 217 dev_priv->sarea = drm_getsarea(dev); 218 if (!dev_priv->sarea) { 219 DRM_ERROR("can not find sarea!\n"); 220 i915_dma_cleanup(dev); 221 return -EINVAL; 222 } 223 224 dev_priv->sarea_priv = (drm_i915_sarea_t *) 225 ((u8 *) dev_priv->sarea->virtual + init->sarea_priv_offset); 226 227 if (init->ring_size != 0) { 228 if (LP_RING(dev_priv)->obj != NULL) { 229 i915_dma_cleanup(dev); 230 DRM_ERROR("Client tried to initialize ringbuffer in " 231 "GEM mode\n"); 232 return -EINVAL; 233 } 234 235 ret = intel_render_ring_init_dri(dev, 236 init->ring_start, 237 init->ring_size); 238 if (ret) { 239 i915_dma_cleanup(dev); 240 return ret; 241 } 242 } 243 244 dev_priv->cpp = init->cpp; 245 dev_priv->back_offset = init->back_offset; 246 dev_priv->front_offset = init->front_offset; 247 dev_priv->current_page = 0; 248 dev_priv->sarea_priv->pf_current_page = 0; 249 250 /* Allow hardware batchbuffers unless told otherwise. 251 */ 252 dev_priv->dri1.allow_batchbuffer = 1; 253 254 return 0; 255} 256 257static int i915_dma_resume(struct drm_device * dev) 258{ 259 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 260 struct intel_ring_buffer *ring = LP_RING(dev_priv); 261 262 DRM_DEBUG("\n"); 263 264 if (ring->virtual_start == NULL) { 265 DRM_ERROR("can not ioremap virtual address for" 266 " ring buffer\n"); 267 return -ENOMEM; 268 } 269 270 /* Program Hardware Status Page */ 271 if (!ring->status_page.page_addr) { 272 DRM_ERROR("Can not find hardware status page\n"); 273 return -EINVAL; 274 } 275 DRM_DEBUG("hw status page @ %p\n", ring->status_page.page_addr); 276 if (ring->status_page.gfx_addr != 0) 277 intel_ring_setup_status_page(ring); 278 else 279 i915_write_hws_pga(dev); 280 281 DRM_DEBUG("Enabled hardware status page\n"); 282 283 return 0; 284} 285 286static int i915_dma_init(struct drm_device *dev, void *data, 287 struct drm_file *file_priv) 288{ 289 drm_i915_init_t *init = data; 290 int retcode = 0; 291 292 if (drm_core_check_feature(dev, DRIVER_MODESET)) 293 return -ENODEV; 294 295 switch (init->func) { 296 case I915_INIT_DMA: 297 retcode = i915_initialize(dev, init); 298 break; 299 case I915_CLEANUP_DMA: 300 retcode = i915_dma_cleanup(dev); 301 break; 302 case I915_RESUME_DMA: 303 retcode = i915_dma_resume(dev); 304 break; 305 default: 306 retcode = -EINVAL; 307 break; 308 } 309 310 return retcode; 311} 312 313/* Implement basically the same security restrictions as hardware does 314 * for MI_BATCH_NON_SECURE. These can be made stricter at any time. 315 * 316 * Most of the calculations below involve calculating the size of a 317 * particular instruction. It's important to get the size right as 318 * that tells us where the next instruction to check is. Any illegal 319 * instruction detected will be given a size of zero, which is a 320 * signal to abort the rest of the buffer. 321 */ 322static int do_validate_cmd(int cmd) 323{ 324 switch (((cmd >> 29) & 0x7)) { 325 case 0x0: 326 switch ((cmd >> 23) & 0x3f) { 327 case 0x0: 328 return 1; /* MI_NOOP */ 329 case 0x4: 330 return 1; /* MI_FLUSH */ 331 default: 332 return 0; /* disallow everything else */ 333 } 334 break; 335 case 0x1: 336 return 0; /* reserved */ 337 case 0x2: 338 return (cmd & 0xff) + 2; /* 2d commands */ 339 case 0x3: 340 if (((cmd >> 24) & 0x1f) <= 0x18) 341 return 1; 342 343 switch ((cmd >> 24) & 0x1f) { 344 case 0x1c: 345 return 1; 346 case 0x1d: 347 switch ((cmd >> 16) & 0xff) { 348 case 0x3: 349 return (cmd & 0x1f) + 2; 350 case 0x4: 351 return (cmd & 0xf) + 2; 352 default: 353 return (cmd & 0xffff) + 2; 354 } 355 case 0x1e: 356 if (cmd & (1 << 23)) 357 return (cmd & 0xffff) + 1; 358 else 359 return 1; 360 case 0x1f: 361 if ((cmd & (1 << 23)) == 0) /* inline vertices */ 362 return (cmd & 0x1ffff) + 2; 363 else if (cmd & (1 << 17)) /* indirect random */ 364 if ((cmd & 0xffff) == 0) 365 return 0; /* unknown length, too hard */ 366 else 367 return (((cmd & 0xffff) + 1) / 2) + 1; 368 else 369 return 2; /* indirect sequential */ 370 default: 371 return 0; 372 } 373 default: 374 return 0; 375 } 376 377 return 0; 378} 379 380static int validate_cmd(int cmd) 381{ 382 int ret = do_validate_cmd(cmd); 383 384/* printk("validate_cmd( %x ): %d\n", cmd, ret); */ 385 386 return ret; 387} 388 389static int i915_emit_cmds(struct drm_device *dev, int __user *buffer, 390 int dwords) 391{ 392 drm_i915_private_t *dev_priv = dev->dev_private; 393 int i; 394 395 if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8) 396 return -EINVAL; 397 398 BEGIN_LP_RING((dwords+1)&~1); 399 400 for (i = 0; i < dwords;) { 401 int cmd, sz; 402 403 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd))) 404 return -EINVAL; 405 406 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords) 407 return -EINVAL; 408 409 OUT_RING(cmd); 410 411 while (++i, --sz) { 412 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], 413 sizeof(cmd))) { 414 return -EINVAL; 415 } 416 OUT_RING(cmd); 417 } 418 } 419 420 if (dwords & 1) 421 OUT_RING(0); 422 423 ADVANCE_LP_RING(); 424 425 return 0; 426} 427 428int i915_emit_box(struct drm_device * dev, 429 struct drm_clip_rect *boxes, 430 int i, int DR1, int DR4) 431{ 432 struct drm_clip_rect box; 433 434 if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) { 435 return -EFAULT; 436 } 437 438 return (i915_emit_box_p(dev, &box, DR1, DR4)); 439} 440 441int 442i915_emit_box_p(struct drm_device *dev, struct drm_clip_rect *box, 443 int DR1, int DR4) 444{ 445 drm_i915_private_t *dev_priv = dev->dev_private; 446 int ret; 447 448 if (box->y2 <= box->y1 || box->x2 <= box->x1 || box->y2 <= 0 || 449 box->x2 <= 0) { 450 DRM_ERROR("Bad box %d,%d..%d,%d\n", 451 box->x1, box->y1, box->x2, box->y2); 452 return -EINVAL; 453 } 454 455 if (INTEL_INFO(dev)->gen >= 4) { 456 ret = BEGIN_LP_RING(4); 457 if (ret != 0) 458 return (ret); 459 460 OUT_RING(GFX_OP_DRAWRECT_INFO_I965); 461 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); 462 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); 463 OUT_RING(DR4); 464 } else { 465 ret = BEGIN_LP_RING(6); 466 if (ret != 0) 467 return (ret); 468 469 OUT_RING(GFX_OP_DRAWRECT_INFO); 470 OUT_RING(DR1); 471 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); 472 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); 473 OUT_RING(DR4); 474 OUT_RING(0); 475 } 476 ADVANCE_LP_RING(); 477 478 return 0; 479} 480 481/* XXX: Emitting the counter should really be moved to part of the IRQ 482 * emit. For now, do it in both places: 483 */ 484 485static void i915_emit_breadcrumb(struct drm_device *dev) 486{ 487 drm_i915_private_t *dev_priv = dev->dev_private; 488 489 if (++dev_priv->counter > 0x7FFFFFFFUL) 490 dev_priv->counter = 0; 491 if (dev_priv->sarea_priv) 492 dev_priv->sarea_priv->last_enqueue = dev_priv->counter; 493 494 if (BEGIN_LP_RING(4) == 0) { 495 OUT_RING(MI_STORE_DWORD_INDEX); 496 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 497 OUT_RING(dev_priv->counter); 498 OUT_RING(0); 499 ADVANCE_LP_RING(); 500 } 501} 502 503static int i915_dispatch_cmdbuffer(struct drm_device * dev, 504 drm_i915_cmdbuffer_t * cmd, struct drm_clip_rect *cliprects, void *cmdbuf) 505{ 506 int nbox = cmd->num_cliprects; 507 int i = 0, count, ret; 508 509 if (cmd->sz & 0x3) { 510 DRM_ERROR("alignment\n"); 511 return -EINVAL; 512 } 513 514 i915_kernel_lost_context(dev); 515 516 count = nbox ? nbox : 1; 517 518 for (i = 0; i < count; i++) { 519 if (i < nbox) { 520 ret = i915_emit_box_p(dev, &cmd->cliprects[i], 521 cmd->DR1, cmd->DR4); 522 if (ret) 523 return ret; 524 } 525 526 ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4); 527 if (ret) 528 return ret; 529 } 530 531 i915_emit_breadcrumb(dev); 532 return 0; 533} 534 535static int 536i915_dispatch_batchbuffer(struct drm_device * dev, 537 drm_i915_batchbuffer_t * batch, struct drm_clip_rect *cliprects) 538{ 539 drm_i915_private_t *dev_priv = dev->dev_private; 540 int nbox = batch->num_cliprects; 541 int i, count, ret; 542 543 if (drm_core_check_feature(dev, DRIVER_MODESET)) 544 return -ENODEV; 545 546 if ((batch->start | batch->used) & 0x7) { 547 DRM_ERROR("alignment\n"); 548 return -EINVAL; 549 } 550 551 i915_kernel_lost_context(dev); 552 553 count = nbox ? nbox : 1; 554 555 for (i = 0; i < count; i++) { 556 if (i < nbox) { 557 int ret = i915_emit_box_p(dev, &cliprects[i], 558 batch->DR1, batch->DR4); 559 if (ret) 560 return ret; 561 } 562 563 if (!IS_I830(dev) && !IS_845G(dev)) { 564 ret = BEGIN_LP_RING(2); 565 if (ret != 0) 566 return (ret); 567 568 if (INTEL_INFO(dev)->gen >= 4) { 569 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | 570 MI_BATCH_NON_SECURE_I965); 571 OUT_RING(batch->start); 572 } else { 573 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); 574 OUT_RING(batch->start | MI_BATCH_NON_SECURE); 575 } 576 } else { 577 ret = BEGIN_LP_RING(4); 578 if (ret != 0) 579 return (ret); 580 581 OUT_RING(MI_BATCH_BUFFER); 582 OUT_RING(batch->start | MI_BATCH_NON_SECURE); 583 OUT_RING(batch->start + batch->used - 4); 584 OUT_RING(0); 585 } 586 ADVANCE_LP_RING(); 587 } 588 589 i915_emit_breadcrumb(dev); 590 591 return 0; 592} 593 594static int i915_dispatch_flip(struct drm_device * dev) 595{ 596 drm_i915_private_t *dev_priv = dev->dev_private; 597 int ret; 598 599 if (!dev_priv->sarea_priv) 600 return -EINVAL; 601 602 DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n", 603 __func__, 604 dev_priv->current_page, 605 dev_priv->sarea_priv->pf_current_page); 606 607 i915_kernel_lost_context(dev); 608 609 ret = BEGIN_LP_RING(10); 610 if (ret) 611 return ret; 612 OUT_RING(MI_FLUSH | MI_READ_FLUSH); 613 OUT_RING(0); 614 615 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); 616 OUT_RING(0); 617 if (dev_priv->current_page == 0) { 618 OUT_RING(dev_priv->back_offset); 619 dev_priv->current_page = 1; 620 } else { 621 OUT_RING(dev_priv->front_offset); 622 dev_priv->current_page = 0; 623 } 624 OUT_RING(0); 625 626 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); 627 OUT_RING(0); 628 629 ADVANCE_LP_RING(); 630 631 if (++dev_priv->counter > 0x7FFFFFFFUL) 632 dev_priv->counter = 0; 633 if (dev_priv->sarea_priv) 634 dev_priv->sarea_priv->last_enqueue = dev_priv->counter; 635 636 if (BEGIN_LP_RING(4) == 0) { 637 OUT_RING(MI_STORE_DWORD_INDEX); 638 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 639 OUT_RING(dev_priv->counter); 640 OUT_RING(0); 641 ADVANCE_LP_RING(); 642 } 643 644 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; 645 return 0; 646} 647 648static int 649i915_quiescent(struct drm_device *dev) 650{ 651 struct intel_ring_buffer *ring = LP_RING(dev->dev_private); 652 653 i915_kernel_lost_context(dev); 654 return (intel_wait_ring_idle(ring)); 655} 656 657static int 658i915_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) 659{ 660 int ret; 661 662 if (drm_core_check_feature(dev, DRIVER_MODESET)) 663 return -ENODEV; 664 665 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 666 667 DRM_LOCK(dev); 668 ret = i915_quiescent(dev); 669 DRM_UNLOCK(dev); 670 671 return (ret); 672} 673 674int i915_batchbuffer(struct drm_device *dev, void *data, 675 struct drm_file *file_priv) 676{ 677 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 678 drm_i915_sarea_t *sarea_priv; 679 drm_i915_batchbuffer_t *batch = data; 680 struct drm_clip_rect *cliprects; 681 size_t cliplen; 682 int ret; 683 684 if (!dev_priv->dri1.allow_batchbuffer) { 685 DRM_ERROR("Batchbuffer ioctl disabled\n"); 686 return -EINVAL; 687 } 688 DRM_UNLOCK(dev); 689 690 DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n", 691 batch->start, batch->used, batch->num_cliprects); 692 693 cliplen = batch->num_cliprects * sizeof(struct drm_clip_rect); 694 if (batch->num_cliprects < 0) 695 return -EFAULT; 696 if (batch->num_cliprects != 0) { 697 cliprects = malloc(batch->num_cliprects * 698 sizeof(struct drm_clip_rect), DRM_MEM_DMA, 699 M_WAITOK | M_ZERO); 700 701 ret = -copyin(batch->cliprects, cliprects, 702 batch->num_cliprects * sizeof(struct drm_clip_rect)); 703 if (ret != 0) { 704 DRM_LOCK(dev); 705 goto fail_free; 706 } 707 } else 708 cliprects = NULL; 709 710 DRM_LOCK(dev); 711 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 712 ret = i915_dispatch_batchbuffer(dev, batch, cliprects); 713 714 sarea_priv = (drm_i915_sarea_t *)dev_priv->sarea_priv; 715 if (sarea_priv) 716 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 717 718fail_free: 719 free(cliprects, DRM_MEM_DMA); 720 return ret; 721} 722 723int i915_cmdbuffer(struct drm_device *dev, void *data, 724 struct drm_file *file_priv) 725{ 726 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 727 drm_i915_sarea_t *sarea_priv; 728 drm_i915_cmdbuffer_t *cmdbuf = data; 729 struct drm_clip_rect *cliprects = NULL; 730 void *batch_data; 731 int ret; 732 733 if (drm_core_check_feature(dev, DRIVER_MODESET)) 734 return -ENODEV; 735 736 DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n", 737 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); 738 739 if (cmdbuf->num_cliprects < 0) 740 return -EINVAL; 741 742 DRM_UNLOCK(dev); 743 744 batch_data = malloc(cmdbuf->sz, DRM_MEM_DMA, M_WAITOK); 745 746 ret = -copyin(cmdbuf->buf, batch_data, cmdbuf->sz); 747 if (ret != 0) { 748 DRM_LOCK(dev); 749 goto fail_batch_free; 750 } 751 752 if (cmdbuf->num_cliprects) { 753 cliprects = malloc(cmdbuf->num_cliprects * 754 sizeof(struct drm_clip_rect), DRM_MEM_DMA, 755 M_WAITOK | M_ZERO); 756 ret = -copyin(cmdbuf->cliprects, cliprects, 757 cmdbuf->num_cliprects * sizeof(struct drm_clip_rect)); 758 if (ret != 0) { 759 DRM_LOCK(dev); 760 goto fail_clip_free; 761 } 762 } 763 764 DRM_LOCK(dev); 765 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 766 ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data); 767 if (ret) { 768 DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); 769 goto fail_clip_free; 770 } 771 772 sarea_priv = (drm_i915_sarea_t *)dev_priv->sarea_priv; 773 if (sarea_priv) 774 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 775 776fail_clip_free: 777 free(cliprects, DRM_MEM_DMA); 778fail_batch_free: 779 free(batch_data, DRM_MEM_DMA); 780 return ret; 781} 782 783static int i915_emit_irq(struct drm_device * dev) 784{ 785 drm_i915_private_t *dev_priv = dev->dev_private; 786#if 0 787 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 788#endif 789 790 i915_kernel_lost_context(dev); 791 792 DRM_DEBUG("i915: emit_irq\n"); 793 794 dev_priv->counter++; 795 if (dev_priv->counter > 0x7FFFFFFFUL) 796 dev_priv->counter = 1; 797#if 0 798 if (master_priv->sarea_priv) 799 master_priv->sarea_priv->last_enqueue = dev_priv->counter; 800#else 801 if (dev_priv->sarea_priv) 802 dev_priv->sarea_priv->last_enqueue = dev_priv->counter; 803#endif 804 805 if (BEGIN_LP_RING(4) == 0) { 806 OUT_RING(MI_STORE_DWORD_INDEX); 807 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 808 OUT_RING(dev_priv->counter); 809 OUT_RING(MI_USER_INTERRUPT); 810 ADVANCE_LP_RING(); 811 } 812 813 return dev_priv->counter; 814} 815 816static int i915_wait_irq(struct drm_device * dev, int irq_nr) 817{ 818 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 819#if 0 820 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 821#endif 822 int ret; 823 struct intel_ring_buffer *ring = LP_RING(dev_priv); 824 825 DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr, 826 READ_BREADCRUMB(dev_priv)); 827 828#if 0 829 if (READ_BREADCRUMB(dev_priv) >= irq_nr) { 830 if (master_priv->sarea_priv) 831 master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 832 return 0; 833 } 834 835 if (master_priv->sarea_priv) 836 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 837#else 838 if (READ_BREADCRUMB(dev_priv) >= irq_nr) { 839 if (dev_priv->sarea_priv) { 840 dev_priv->sarea_priv->last_dispatch = 841 READ_BREADCRUMB(dev_priv); 842 } 843 return 0; 844 } 845 846 if (dev_priv->sarea_priv) 847 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 848#endif 849 850 ret = 0; 851 mtx_lock(&dev_priv->irq_lock); 852 if (ring->irq_get(ring)) { 853 DRM_UNLOCK(dev); 854 while (ret == 0 && READ_BREADCRUMB(dev_priv) < irq_nr) { 855 ret = -msleep(ring, &dev_priv->irq_lock, PCATCH, 856 "915wtq", 3 * hz); 857 } 858 ring->irq_put(ring); 859 mtx_unlock(&dev_priv->irq_lock); 860 DRM_LOCK(dev); 861 } else { 862 mtx_unlock(&dev_priv->irq_lock); 863 if (_intel_wait_for(dev, READ_BREADCRUMB(dev_priv) >= irq_nr, 864 3000, 1, "915wir")) 865 ret = -EBUSY; 866 } 867 868 if (ret == -EBUSY) { 869 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", 870 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter); 871 } 872 873 return ret; 874} 875 876/* Needs the lock as it touches the ring. 877 */ 878int i915_irq_emit(struct drm_device *dev, void *data, 879 struct drm_file *file_priv) 880{ 881 drm_i915_private_t *dev_priv = dev->dev_private; 882 drm_i915_irq_emit_t *emit = data; 883 int result; 884 885 if (drm_core_check_feature(dev, DRIVER_MODESET)) 886 return -ENODEV; 887 888 if (!dev_priv || !LP_RING(dev_priv)->virtual_start) { 889 DRM_ERROR("called with no initialization\n"); 890 return -EINVAL; 891 } 892 893 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 894 895 DRM_LOCK(dev); 896 result = i915_emit_irq(dev); 897 DRM_UNLOCK(dev); 898 899 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) { 900 DRM_ERROR("copy_to_user\n"); 901 return -EFAULT; 902 } 903 904 return 0; 905} 906 907/* Doesn't need the hardware lock. 908 */ 909static int i915_irq_wait(struct drm_device *dev, void *data, 910 struct drm_file *file_priv) 911{ 912 drm_i915_private_t *dev_priv = dev->dev_private; 913 drm_i915_irq_wait_t *irqwait = data; 914 915 if (drm_core_check_feature(dev, DRIVER_MODESET)) 916 return -ENODEV; 917 918 if (!dev_priv) { 919 DRM_ERROR("called with no initialization\n"); 920 return -EINVAL; 921 } 922 923 return i915_wait_irq(dev, irqwait->irq_seq); 924} 925 926static int i915_vblank_pipe_get(struct drm_device *dev, void *data, 927 struct drm_file *file_priv) 928{ 929 drm_i915_private_t *dev_priv = dev->dev_private; 930 drm_i915_vblank_pipe_t *pipe = data; 931 932 if (drm_core_check_feature(dev, DRIVER_MODESET)) 933 return -ENODEV; 934 935 if (!dev_priv) { 936 DRM_ERROR("called with no initialization\n"); 937 return -EINVAL; 938 } 939 940 pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 941 942 return 0; 943} 944 945/** 946 * Schedule buffer swap at given vertical blank. 947 */ 948static int i915_vblank_swap(struct drm_device *dev, void *data, 949 struct drm_file *file_priv) 950{ 951 /* The delayed swap mechanism was fundamentally racy, and has been 952 * removed. The model was that the client requested a delayed flip/swap 953 * from the kernel, then waited for vblank before continuing to perform 954 * rendering. The problem was that the kernel might wake the client 955 * up before it dispatched the vblank swap (since the lock has to be 956 * held while touching the ringbuffer), in which case the client would 957 * clear and start the next frame before the swap occurred, and 958 * flicker would occur in addition to likely missing the vblank. 959 * 960 * In the absence of this ioctl, userland falls back to a correct path 961 * of waiting for a vblank, then dispatching the swap on its own. 962 * Context switching to userland and back is plenty fast enough for 963 * meeting the requirements of vblank swapping. 964 */ 965 return -EINVAL; 966} 967 968static int i915_flip_bufs(struct drm_device *dev, void *data, 969 struct drm_file *file_priv) 970{ 971 int ret; 972 973 if (drm_core_check_feature(dev, DRIVER_MODESET)) 974 return -ENODEV; 975 976 DRM_DEBUG("%s\n", __func__); 977 978 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 979 980 ret = i915_dispatch_flip(dev); 981 982 return ret; 983} 984 985int i915_getparam(struct drm_device *dev, void *data, 986 struct drm_file *file_priv) 987{ 988 drm_i915_private_t *dev_priv = dev->dev_private; 989 drm_i915_getparam_t *param = data; 990 int value; 991 992 if (!dev_priv) { 993 DRM_ERROR("called with no initialization\n"); 994 return -EINVAL; 995 } 996 997 switch (param->param) { 998 case I915_PARAM_IRQ_ACTIVE: 999 value = dev->irq_enabled ? 1 : 0; 1000 break; 1001 case I915_PARAM_ALLOW_BATCHBUFFER: 1002 value = dev_priv->dri1.allow_batchbuffer ? 1 : 0; 1003 break; 1004 case I915_PARAM_LAST_DISPATCH: 1005 value = READ_BREADCRUMB(dev_priv); 1006 break; 1007 case I915_PARAM_CHIPSET_ID: 1008 value = dev->pci_device; 1009 break; 1010 case I915_PARAM_HAS_GEM: 1011 value = 1; 1012 break; 1013 case I915_PARAM_NUM_FENCES_AVAIL: 1014 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start; 1015 break; 1016 case I915_PARAM_HAS_OVERLAY: 1017 value = dev_priv->overlay ? 1 : 0; 1018 break; 1019 case I915_PARAM_HAS_PAGEFLIPPING: 1020 value = 1; 1021 break; 1022 case I915_PARAM_HAS_EXECBUF2: 1023 value = 1; 1024 break; 1025 case I915_PARAM_HAS_BSD: 1026 value = intel_ring_initialized(&dev_priv->rings[VCS]); 1027 break; 1028 case I915_PARAM_HAS_BLT: 1029 value = intel_ring_initialized(&dev_priv->rings[BCS]); 1030 break; 1031 case I915_PARAM_HAS_RELAXED_FENCING: 1032 value = 1; 1033 break; 1034 case I915_PARAM_HAS_COHERENT_RINGS: 1035 value = 1; 1036 break; 1037 case I915_PARAM_HAS_EXEC_CONSTANTS: 1038 value = INTEL_INFO(dev)->gen >= 4; 1039 break; 1040 case I915_PARAM_HAS_RELAXED_DELTA: 1041 value = 1; 1042 break; 1043 case I915_PARAM_HAS_GEN7_SOL_RESET: 1044 value = 1; 1045 break; 1046 case I915_PARAM_HAS_LLC: 1047 value = HAS_LLC(dev); 1048 break; 1049 case I915_PARAM_HAS_ALIASING_PPGTT: 1050 value = dev_priv->mm.aliasing_ppgtt ? 1 : 0; 1051 break; 1052 default: 1053 DRM_DEBUG_DRIVER("Unknown parameter %d\n", 1054 param->param); 1055 return -EINVAL; 1056 } 1057 1058 if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { 1059 DRM_ERROR("DRM_COPY_TO_USER failed\n"); 1060 return -EFAULT; 1061 } 1062 1063 return 0; 1064} 1065 1066static int i915_setparam(struct drm_device *dev, void *data, 1067 struct drm_file *file_priv) 1068{ 1069 drm_i915_private_t *dev_priv = dev->dev_private; 1070 drm_i915_setparam_t *param = data; 1071 1072 if (!dev_priv) { 1073 DRM_ERROR("called with no initialization\n"); 1074 return -EINVAL; 1075 } 1076 1077 switch (param->param) { 1078 case I915_SETPARAM_USE_MI_BATCHBUFFER_START: 1079 break; 1080 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: 1081 break; 1082 case I915_SETPARAM_ALLOW_BATCHBUFFER: 1083 dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0; 1084 break; 1085 case I915_SETPARAM_NUM_USED_FENCES: 1086 if (param->value > dev_priv->num_fence_regs || 1087 param->value < 0) 1088 return -EINVAL; 1089 /* Userspace can use first N regs */ 1090 dev_priv->fence_reg_start = param->value; 1091 break; 1092 default: 1093 DRM_DEBUG("unknown parameter %d\n", param->param); 1094 return -EINVAL; 1095 } 1096 1097 return 0; 1098} 1099 1100static int i915_set_status_page(struct drm_device *dev, void *data, 1101 struct drm_file *file_priv) 1102{ 1103 drm_i915_private_t *dev_priv = dev->dev_private; 1104 drm_i915_hws_addr_t *hws = data; 1105 struct intel_ring_buffer *ring = LP_RING(dev_priv); 1106 1107 if (drm_core_check_feature(dev, DRIVER_MODESET)) 1108 return -ENODEV; 1109 1110 if (!I915_NEED_GFX_HWS(dev)) 1111 return -EINVAL; 1112 1113 if (!dev_priv) { 1114 DRM_ERROR("called with no initialization\n"); 1115 return -EINVAL; 1116 } 1117 1118 DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr); 1119 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1120 DRM_ERROR("tried to set status page when mode setting active\n"); 1121 return 0; 1122 } 1123 1124 ring->status_page.gfx_addr = dev_priv->status_gfx_addr = 1125 hws->addr & (0x1ffff<<12); 1126 1127 dev_priv->dri1.gfx_hws_cpu_addr = pmap_mapdev_attr( 1128 dev->agp->base + hws->addr, PAGE_SIZE, 1129 VM_MEMATTR_WRITE_COMBINING); 1130 if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) { 1131 i915_dma_cleanup(dev); 1132 ring->status_page.gfx_addr = dev_priv->status_gfx_addr = 0; 1133 DRM_ERROR("can not ioremap virtual address for" 1134 " G33 hw status page\n"); 1135 return -ENOMEM; 1136 } 1137 1138 memset(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE); 1139 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); 1140 DRM_DEBUG("load hws HWS_PGA with gfx mem 0x%x\n", 1141 dev_priv->status_gfx_addr); 1142 DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page); 1143 return 0; 1144} 1145 1146static int 1147i915_load_modeset_init(struct drm_device *dev) 1148{ 1149 struct drm_i915_private *dev_priv = dev->dev_private; 1150 int ret; 1151 1152 ret = intel_parse_bios(dev); 1153 if (ret) 1154 DRM_INFO("failed to find VBIOS tables\n"); 1155 1156#if 0 1157 intel_register_dsm_handler(); 1158#endif 1159 1160 /* Initialise stolen first so that we may reserve preallocated 1161 * objects for the BIOS to KMS transition. 1162 */ 1163 ret = i915_gem_init_stolen(dev); 1164 if (ret) 1165 goto cleanup_vga_switcheroo; 1166 1167 intel_modeset_init(dev); 1168 1169 ret = i915_gem_init(dev); 1170 if (ret != 0) 1171 goto cleanup_gem_stolen; 1172 1173 intel_modeset_gem_init(dev); 1174 1175 ret = drm_irq_install(dev); 1176 if (ret) 1177 goto cleanup_gem; 1178 1179 dev->vblank_disable_allowed = 1; 1180 1181 ret = intel_fbdev_init(dev); 1182 if (ret) 1183 goto cleanup_gem; 1184 1185 drm_kms_helper_poll_init(dev); 1186 1187 /* We're off and running w/KMS */ 1188 dev_priv->mm.suspended = 0; 1189 1190 return (0); 1191 1192cleanup_gem: 1193 DRM_LOCK(dev); 1194 i915_gem_cleanup_ringbuffer(dev); 1195 DRM_UNLOCK(dev); 1196 i915_gem_cleanup_aliasing_ppgtt(dev); 1197cleanup_gem_stolen: 1198 i915_gem_cleanup_stolen(dev); 1199cleanup_vga_switcheroo: 1200 return (ret); 1201} 1202 1203static int 1204i915_get_bridge_dev(struct drm_device *dev) 1205{ 1206 struct drm_i915_private *dev_priv; 1207 1208 dev_priv = dev->dev_private; 1209 1210 dev_priv->bridge_dev = intel_gtt_get_bridge_device(); 1211 if (dev_priv->bridge_dev == NULL) { 1212 DRM_ERROR("bridge device not found\n"); 1213 return (-1); 1214 } 1215 return (0); 1216} 1217 1218#define MCHBAR_I915 0x44 1219#define MCHBAR_I965 0x48 1220#define MCHBAR_SIZE (4*4096) 1221 1222#define DEVEN_REG 0x54 1223#define DEVEN_MCHBAR_EN (1 << 28) 1224 1225/* Allocate space for the MCH regs if needed, return nonzero on error */ 1226static int 1227intel_alloc_mchbar_resource(struct drm_device *dev) 1228{ 1229 drm_i915_private_t *dev_priv; 1230 device_t vga; 1231 int reg; 1232 u32 temp_lo, temp_hi; 1233 u64 mchbar_addr, temp; 1234 1235 dev_priv = dev->dev_private; 1236 reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; 1237 1238 if (INTEL_INFO(dev)->gen >= 4) 1239 temp_hi = pci_read_config(dev_priv->bridge_dev, reg + 4, 4); 1240 else 1241 temp_hi = 0; 1242 temp_lo = pci_read_config(dev_priv->bridge_dev, reg, 4); 1243 mchbar_addr = ((u64)temp_hi << 32) | temp_lo; 1244 1245 /* If ACPI doesn't have it, assume we need to allocate it ourselves */ 1246#ifdef XXX_CONFIG_PNP 1247 if (mchbar_addr && 1248 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) 1249 return 0; 1250#endif 1251 1252 /* Get some space for it */ 1253 vga = device_get_parent(dev->device); 1254 dev_priv->mch_res_rid = 0x100; 1255 dev_priv->mch_res = BUS_ALLOC_RESOURCE(device_get_parent(vga), 1256 dev->device, SYS_RES_MEMORY, &dev_priv->mch_res_rid, 0, ~0UL, 1257 MCHBAR_SIZE, RF_ACTIVE | RF_SHAREABLE); 1258 if (dev_priv->mch_res == NULL) { 1259 DRM_ERROR("failed mchbar resource alloc\n"); 1260 return (-ENOMEM); 1261 } 1262 1263 if (INTEL_INFO(dev)->gen >= 4) { 1264 temp = rman_get_start(dev_priv->mch_res); 1265 temp >>= 32; 1266 pci_write_config(dev_priv->bridge_dev, reg + 4, temp, 4); 1267 } 1268 pci_write_config(dev_priv->bridge_dev, reg, 1269 rman_get_start(dev_priv->mch_res) & UINT32_MAX, 4); 1270 return (0); 1271} 1272 1273static void 1274intel_setup_mchbar(struct drm_device *dev) 1275{ 1276 drm_i915_private_t *dev_priv; 1277 int mchbar_reg; 1278 u32 temp; 1279 bool enabled; 1280 1281 dev_priv = dev->dev_private; 1282 mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; 1283 1284 dev_priv->mchbar_need_disable = false; 1285 1286 if (IS_I915G(dev) || IS_I915GM(dev)) { 1287 temp = pci_read_config(dev_priv->bridge_dev, DEVEN_REG, 4); 1288 enabled = (temp & DEVEN_MCHBAR_EN) != 0; 1289 } else { 1290 temp = pci_read_config(dev_priv->bridge_dev, mchbar_reg, 4); 1291 enabled = temp & 1; 1292 } 1293 1294 /* If it's already enabled, don't have to do anything */ 1295 if (enabled) { 1296 DRM_DEBUG("mchbar already enabled\n"); 1297 return; 1298 } 1299 1300 if (intel_alloc_mchbar_resource(dev)) 1301 return; 1302 1303 dev_priv->mchbar_need_disable = true; 1304 1305 /* Space is allocated or reserved, so enable it. */ 1306 if (IS_I915G(dev) || IS_I915GM(dev)) { 1307 pci_write_config(dev_priv->bridge_dev, DEVEN_REG, 1308 temp | DEVEN_MCHBAR_EN, 4); 1309 } else { 1310 temp = pci_read_config(dev_priv->bridge_dev, mchbar_reg, 4); 1311 pci_write_config(dev_priv->bridge_dev, mchbar_reg, temp | 1, 4); 1312 } 1313} 1314 1315static void 1316intel_teardown_mchbar(struct drm_device *dev) 1317{ 1318 drm_i915_private_t *dev_priv; 1319 device_t vga; 1320 int mchbar_reg; 1321 u32 temp; 1322 1323 dev_priv = dev->dev_private; 1324 mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; 1325 1326 if (dev_priv->mchbar_need_disable) { 1327 if (IS_I915G(dev) || IS_I915GM(dev)) { 1328 temp = pci_read_config(dev_priv->bridge_dev, 1329 DEVEN_REG, 4); 1330 temp &= ~DEVEN_MCHBAR_EN; 1331 pci_write_config(dev_priv->bridge_dev, DEVEN_REG, 1332 temp, 4); 1333 } else { 1334 temp = pci_read_config(dev_priv->bridge_dev, 1335 mchbar_reg, 4); 1336 temp &= ~1; 1337 pci_write_config(dev_priv->bridge_dev, mchbar_reg, 1338 temp, 4); 1339 } 1340 } 1341 1342 if (dev_priv->mch_res != NULL) { 1343 vga = device_get_parent(dev->device); 1344 BUS_DEACTIVATE_RESOURCE(device_get_parent(vga), dev->device, 1345 SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res); 1346 BUS_RELEASE_RESOURCE(device_get_parent(vga), dev->device, 1347 SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res); 1348 dev_priv->mch_res = NULL; 1349 } 1350} 1351 1352int 1353i915_driver_load(struct drm_device *dev, unsigned long flags) 1354{ 1355 struct drm_i915_private *dev_priv = dev->dev_private; 1356 const struct intel_device_info *info; 1357 unsigned long base, size; 1358 int mmio_bar, ret; 1359 1360 info = i915_get_device_id(dev->pci_device); 1361 1362 /* Refuse to load on gen6+ without kms enabled. */ 1363 if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) 1364 return -ENODEV; 1365 1366 1367 ret = 0; 1368 1369 /* i915 has 4 more counters */ 1370 dev->counters += 4; 1371 dev->types[6] = _DRM_STAT_IRQ; 1372 dev->types[7] = _DRM_STAT_PRIMARY; 1373 dev->types[8] = _DRM_STAT_SECONDARY; 1374 dev->types[9] = _DRM_STAT_DMA; 1375 1376 dev_priv = malloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER, 1377 M_ZERO | M_WAITOK); 1378 1379 dev->dev_private = (void *)dev_priv; 1380 dev_priv->dev = dev; 1381 dev_priv->info = info; 1382 1383 if (i915_get_bridge_dev(dev)) { 1384 free(dev_priv, DRM_MEM_DRIVER); 1385 return (-EIO); 1386 } 1387 dev_priv->mm.gtt = intel_gtt_get(); 1388 1389 /* Add register map (needed for suspend/resume) */ 1390 mmio_bar = IS_GEN2(dev) ? 1 : 0; 1391 base = drm_get_resource_start(dev, mmio_bar); 1392 size = drm_get_resource_len(dev, mmio_bar); 1393 1394 ret = drm_addmap(dev, base, size, _DRM_REGISTERS, 1395 _DRM_KERNEL | _DRM_DRIVER, &dev_priv->mmio_map); 1396 1397 dev_priv->tq = taskqueue_create("915", M_WAITOK, 1398 taskqueue_thread_enqueue, &dev_priv->tq); 1399 taskqueue_start_threads(&dev_priv->tq, 1, PWAIT, "i915 taskq"); 1400 mtx_init(&dev_priv->gt_lock, "915gt", NULL, MTX_DEF); 1401 mtx_init(&dev_priv->error_lock, "915err", NULL, MTX_DEF); 1402 mtx_init(&dev_priv->error_completion_lock, "915cmp", NULL, MTX_DEF); 1403 mtx_init(&dev_priv->rps_lock, "915rps", NULL, MTX_DEF); 1404 mtx_init(&dev_priv->dpio_lock, "915dpi", NULL, MTX_DEF); 1405 1406 intel_irq_init(dev); 1407 1408 intel_setup_mchbar(dev); 1409 intel_setup_gmbus(dev); 1410 intel_opregion_setup(dev); 1411 1412 intel_setup_bios(dev); 1413 1414 i915_gem_load(dev); 1415 1416 /* Init HWS */ 1417 if (!I915_NEED_GFX_HWS(dev)) { 1418 ret = i915_init_phys_hws(dev); 1419 if (ret != 0) { 1420 drm_rmmap(dev, dev_priv->mmio_map); 1421 drm_free(dev_priv, sizeof(struct drm_i915_private), 1422 DRM_MEM_DRIVER); 1423 return ret; 1424 } 1425 } 1426 1427 mtx_init(&dev_priv->irq_lock, "userirq", NULL, MTX_DEF); 1428 1429 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 1430 dev_priv->num_pipe = 3; 1431 else if (IS_MOBILE(dev) || !IS_GEN2(dev)) 1432 dev_priv->num_pipe = 2; 1433 else 1434 dev_priv->num_pipe = 1; 1435 1436 ret = drm_vblank_init(dev, dev_priv->num_pipe); 1437 if (ret) 1438 goto out_gem_unload; 1439 1440 /* Start out suspended */ 1441 dev_priv->mm.suspended = 1; 1442 1443 intel_detect_pch(dev); 1444 1445 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1446 DRM_UNLOCK(dev); 1447 ret = i915_load_modeset_init(dev); 1448 DRM_LOCK(dev); 1449 if (ret < 0) { 1450 DRM_ERROR("failed to init modeset\n"); 1451 goto out_gem_unload; 1452 } 1453 } 1454 1455 intel_opregion_init(dev); 1456 1457 callout_init(&dev_priv->hangcheck_timer, 1); 1458 callout_reset(&dev_priv->hangcheck_timer, DRM_I915_HANGCHECK_PERIOD, 1459 i915_hangcheck_elapsed, dev); 1460 1461 if (IS_GEN5(dev)) 1462 intel_gpu_ips_init(dev_priv); 1463 1464 return (0); 1465 1466out_gem_unload: 1467 /* XXXKIB */ 1468 (void) i915_driver_unload_int(dev, true); 1469 return (ret); 1470} 1471 1472static int 1473i915_driver_unload_int(struct drm_device *dev, bool locked) 1474{ 1475 struct drm_i915_private *dev_priv = dev->dev_private; 1476 int ret; 1477 1478 if (!locked) 1479 DRM_LOCK(dev); 1480 ret = i915_gpu_idle(dev); 1481 if (ret) 1482 DRM_ERROR("failed to idle hardware: %d\n", ret); 1483 i915_gem_retire_requests(dev); 1484 if (!locked) 1485 DRM_UNLOCK(dev); 1486 1487 i915_free_hws(dev); 1488 1489 intel_teardown_mchbar(dev); 1490 1491 if (locked) 1492 DRM_UNLOCK(dev); 1493 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1494 intel_fbdev_fini(dev); 1495 intel_modeset_cleanup(dev); 1496 } 1497 1498 /* Free error state after interrupts are fully disabled. */ 1499 callout_stop(&dev_priv->hangcheck_timer); 1500 callout_drain(&dev_priv->hangcheck_timer); 1501 1502 i915_destroy_error_state(dev); 1503 1504 intel_opregion_fini(dev); 1505 1506 if (locked) 1507 DRM_LOCK(dev); 1508 1509 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1510 if (!locked) 1511 DRM_LOCK(dev); 1512 i915_gem_free_all_phys_object(dev); 1513 i915_gem_cleanup_ringbuffer(dev); 1514 i915_gem_context_fini(dev); 1515 if (!locked) 1516 DRM_UNLOCK(dev); 1517 i915_gem_cleanup_aliasing_ppgtt(dev); 1518#if 1 1519 KIB_NOTYET(); 1520#else 1521 if (I915_HAS_FBC(dev) && i915_powersave) 1522 i915_cleanup_compression(dev); 1523#endif 1524 drm_mm_takedown(&dev_priv->mm.stolen); 1525 1526 intel_cleanup_overlay(dev); 1527 1528 if (!I915_NEED_GFX_HWS(dev)) 1529 i915_free_hws(dev); 1530 } 1531 1532 i915_gem_unload(dev); 1533 1534 mtx_destroy(&dev_priv->irq_lock); 1535 1536 if (dev_priv->tq != NULL) 1537 taskqueue_free(dev_priv->tq); 1538 1539 bus_generic_detach(dev->device); 1540 drm_rmmap(dev, dev_priv->mmio_map); 1541 intel_teardown_gmbus(dev); 1542 1543 mtx_destroy(&dev_priv->dpio_lock); 1544 mtx_destroy(&dev_priv->error_lock); 1545 mtx_destroy(&dev_priv->error_completion_lock); 1546 mtx_destroy(&dev_priv->rps_lock); 1547 drm_free(dev->dev_private, sizeof(drm_i915_private_t), 1548 DRM_MEM_DRIVER); 1549 1550 return (0); 1551} 1552 1553int 1554i915_driver_unload(struct drm_device *dev) 1555{ 1556 1557 return (i915_driver_unload_int(dev, true)); 1558} 1559 1560int 1561i915_driver_open(struct drm_device *dev, struct drm_file *file_priv) 1562{ 1563 struct drm_i915_file_private *i915_file_priv; 1564 1565 i915_file_priv = malloc(sizeof(*i915_file_priv), DRM_MEM_FILES, 1566 M_WAITOK | M_ZERO); 1567 1568 mtx_init(&i915_file_priv->mm.lck, "915fp", NULL, MTX_DEF); 1569 INIT_LIST_HEAD(&i915_file_priv->mm.request_list); 1570 file_priv->driver_priv = i915_file_priv; 1571 1572 drm_gem_names_init(&i915_file_priv->context_idr); 1573 1574 return (0); 1575} 1576 1577void 1578i915_driver_lastclose(struct drm_device * dev) 1579{ 1580 drm_i915_private_t *dev_priv = dev->dev_private; 1581 1582 if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) { 1583#if 1 1584 KIB_NOTYET(); 1585#else 1586 drm_fb_helper_restore(); 1587 vga_switcheroo_process_delayed_switch(); 1588#endif 1589 return; 1590 } 1591 i915_gem_lastclose(dev); 1592 i915_dma_cleanup(dev); 1593} 1594 1595void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) 1596{ 1597 1598 i915_gem_context_close(dev, file_priv); 1599 i915_gem_release(dev, file_priv); 1600} 1601 1602void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv) 1603{ 1604 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; 1605 1606 mtx_destroy(&i915_file_priv->mm.lck); 1607 drm_free(i915_file_priv, sizeof(*i915_file_priv), DRM_MEM_FILES); 1608} 1609 1610struct drm_ioctl_desc i915_ioctls[] = { 1611 DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1612 DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH), 1613 DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH), 1614 DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH), 1615 DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH), 1616 DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH), 1617 DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH), 1618 DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1619 DRM_IOCTL_DEF(DRM_I915_ALLOC, drm_noop, DRM_AUTH), 1620 DRM_IOCTL_DEF(DRM_I915_FREE, drm_noop, DRM_AUTH), 1621 DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1622 DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH), 1623 DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ), 1624 DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ), 1625 DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ), 1626 DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), 1627 DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1628 DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1629 DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH | DRM_UNLOCKED), 1630 DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH | DRM_UNLOCKED), 1631 DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), 1632 DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), 1633 DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), 1634 DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH), 1635 DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1636 DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1637 DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0), 1638 DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED), 1639 DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED), 1640 DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0), 1641 DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED), 1642 DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED), 1643 DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED), 1644 DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0), 1645 DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0), 1646 DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED), 1647 DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED), 1648 DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED), 1649 DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 1650 DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 1651 DRM_IOCTL_DEF(DRM_I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 1652 DRM_IOCTL_DEF(DRM_I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 1653 DRM_IOCTL_DEF(DRM_I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED), 1654 DRM_IOCTL_DEF(DRM_I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED), 1655}; 1656 1657#ifdef COMPAT_FREEBSD32 1658extern drm_ioctl_desc_t i915_compat_ioctls[]; 1659extern int i915_compat_ioctls_nr; 1660#endif 1661 1662struct drm_driver_info i915_driver_info = { 1663 .driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | 1664 DRIVER_USE_MTRR | DRIVER_HAVE_IRQ | DRIVER_LOCKLESS_IRQ | 1665 DRIVER_GEM /*| DRIVER_MODESET*/, 1666 1667 .buf_priv_size = sizeof(drm_i915_private_t), 1668 .load = i915_driver_load, 1669 .open = i915_driver_open, 1670 .unload = i915_driver_unload, 1671 .preclose = i915_driver_preclose, 1672 .lastclose = i915_driver_lastclose, 1673 .postclose = i915_driver_postclose, 1674 .device_is_agp = i915_driver_device_is_agp, 1675 .gem_init_object = i915_gem_init_object, 1676 .gem_free_object = i915_gem_free_object, 1677 .gem_pager_ops = &i915_gem_pager_ops, 1678 .dumb_create = i915_gem_dumb_create, 1679 .dumb_map_offset = i915_gem_mmap_gtt, 1680 .dumb_destroy = i915_gem_dumb_destroy, 1681 .sysctl_init = i915_sysctl_init, 1682 .sysctl_cleanup = i915_sysctl_cleanup, 1683 1684 .ioctls = i915_ioctls, 1685#ifdef COMPAT_FREEBSD32 1686 .compat_ioctls = i915_compat_ioctls, 1687 .compat_ioctls_nr = &i915_compat_ioctls_nr, 1688#endif 1689 .max_ioctl = DRM_ARRAY_SIZE(i915_ioctls), 1690 1691 .name = DRIVER_NAME, 1692 .desc = DRIVER_DESC, 1693 .date = DRIVER_DATE, 1694 .major = DRIVER_MAJOR, 1695 .minor = DRIVER_MINOR, 1696 .patchlevel = DRIVER_PATCHLEVEL, 1697}; 1698 1699/* 1700 * This is really ugly: Because old userspace abused the linux agp interface to 1701 * manage the gtt, we need to claim that all intel devices are agp. For 1702 * otherwise the drm core refuses to initialize the agp support code. 1703 */ 1704int i915_driver_device_is_agp(struct drm_device * dev) 1705{ 1706 return 1; 1707} 1708 1709