1/* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 29#include <sys/cdefs.h> 30__FBSDID("$FreeBSD$"); 31 32#include <dev/drm2/drmP.h> 33#include <dev/drm2/radeon/radeon_drm.h> 34#include "radeon_reg.h" 35#include "radeon.h" 36#include "radeon_asic.h" 37#include "r100d.h" 38#include "rs100d.h" 39#include "rv200d.h" 40#include "rv250d.h" 41#include "atom.h" 42 43#include "r100_reg_safe.h" 44#include "rn50_reg_safe.h" 45 46/* Firmware Names */ 47#define FIRMWARE_R100 "radeonkmsfw_R100_cp" 48#define FIRMWARE_R200 "radeonkmsfw_R200_cp" 49#define FIRMWARE_R300 "radeonkmsfw_R300_cp" 50#define FIRMWARE_R420 "radeonkmsfw_R420_cp" 51#define FIRMWARE_RS690 "radeonkmsfw_RS690_cp" 52#define FIRMWARE_RS600 "radeonkmsfw_RS600_cp" 53#define FIRMWARE_R520 "radeonkmsfw_R520_cp" 54 55#include "r100_track.h" 56 57/* This files gather functions specifics to: 58 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 59 * and others in some cases. 60 */ 61 62/** 63 * r100_wait_for_vblank - vblank wait asic callback. 64 * 65 * @rdev: radeon_device pointer 66 * @crtc: crtc to wait for vblank on 67 * 68 * Wait for vblank on the requested crtc (r1xx-r4xx). 69 */ 70void r100_wait_for_vblank(struct radeon_device *rdev, int crtc) 71{ 72 int i; 73 74 if (crtc >= rdev->num_crtc) 75 return; 76 77 if (crtc == 0) { 78 if (RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN) { 79 for (i = 0; i < rdev->usec_timeout; i++) { 80 if (!(RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR)) 81 break; 82 DRM_UDELAY(1); 83 } 84 for (i = 0; i < rdev->usec_timeout; i++) { 85 if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR) 86 break; 87 DRM_UDELAY(1); 88 } 89 } 90 } else { 91 if (RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN) { 92 for (i = 0; i < rdev->usec_timeout; i++) { 93 if (!(RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR)) 94 break; 95 DRM_UDELAY(1); 96 } 97 for (i = 0; i < rdev->usec_timeout; i++) { 98 if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR) 99 break; 100 DRM_UDELAY(1); 101 } 102 } 103 } 104} 105 106/** 107 * r100_pre_page_flip - pre-pageflip callback. 108 * 109 * @rdev: radeon_device pointer 110 * @crtc: crtc to prepare for pageflip on 111 * 112 * Pre-pageflip callback (r1xx-r4xx). 113 * Enables the pageflip irq (vblank irq). 114 */ 115void r100_pre_page_flip(struct radeon_device *rdev, int crtc) 116{ 117 /* enable the pflip int */ 118 radeon_irq_kms_pflip_irq_get(rdev, crtc); 119} 120 121/** 122 * r100_post_page_flip - pos-pageflip callback. 123 * 124 * @rdev: radeon_device pointer 125 * @crtc: crtc to cleanup pageflip on 126 * 127 * Post-pageflip callback (r1xx-r4xx). 128 * Disables the pageflip irq (vblank irq). 129 */ 130void r100_post_page_flip(struct radeon_device *rdev, int crtc) 131{ 132 /* disable the pflip int */ 133 radeon_irq_kms_pflip_irq_put(rdev, crtc); 134} 135 136/** 137 * r100_page_flip - pageflip callback. 138 * 139 * @rdev: radeon_device pointer 140 * @crtc_id: crtc to cleanup pageflip on 141 * @crtc_base: new address of the crtc (GPU MC address) 142 * 143 * Does the actual pageflip (r1xx-r4xx). 144 * During vblank we take the crtc lock and wait for the update_pending 145 * bit to go high, when it does, we release the lock, and allow the 146 * double buffered update to take place. 147 * Returns the current update pending status. 148 */ 149u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) 150{ 151 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 152 u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK; 153 int i; 154 155 /* Lock the graphics update lock */ 156 /* update the scanout addresses */ 157 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp); 158 159 /* Wait for update_pending to go high. */ 160 for (i = 0; i < rdev->usec_timeout; i++) { 161 if (RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET) 162 break; 163 DRM_UDELAY(1); 164 } 165 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); 166 167 /* Unlock the lock, so double-buffering can take place inside vblank */ 168 tmp &= ~RADEON_CRTC_OFFSET__OFFSET_LOCK; 169 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp); 170 171 /* Return current update_pending status: */ 172 return RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET; 173} 174 175/** 176 * r100_pm_get_dynpm_state - look up dynpm power state callback. 177 * 178 * @rdev: radeon_device pointer 179 * 180 * Look up the optimal power state based on the 181 * current state of the GPU (r1xx-r5xx). 182 * Used for dynpm only. 183 */ 184void r100_pm_get_dynpm_state(struct radeon_device *rdev) 185{ 186 int i; 187 rdev->pm.dynpm_can_upclock = true; 188 rdev->pm.dynpm_can_downclock = true; 189 190 switch (rdev->pm.dynpm_planned_action) { 191 case DYNPM_ACTION_MINIMUM: 192 rdev->pm.requested_power_state_index = 0; 193 rdev->pm.dynpm_can_downclock = false; 194 break; 195 case DYNPM_ACTION_DOWNCLOCK: 196 if (rdev->pm.current_power_state_index == 0) { 197 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; 198 rdev->pm.dynpm_can_downclock = false; 199 } else { 200 if (rdev->pm.active_crtc_count > 1) { 201 for (i = 0; i < rdev->pm.num_power_states; i++) { 202 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) 203 continue; 204 else if (i >= rdev->pm.current_power_state_index) { 205 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; 206 break; 207 } else { 208 rdev->pm.requested_power_state_index = i; 209 break; 210 } 211 } 212 } else 213 rdev->pm.requested_power_state_index = 214 rdev->pm.current_power_state_index - 1; 215 } 216 /* don't use the power state if crtcs are active and no display flag is set */ 217 if ((rdev->pm.active_crtc_count > 0) && 218 (rdev->pm.power_state[rdev->pm.requested_power_state_index].clock_info[0].flags & 219 RADEON_PM_MODE_NO_DISPLAY)) { 220 rdev->pm.requested_power_state_index++; 221 } 222 break; 223 case DYNPM_ACTION_UPCLOCK: 224 if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) { 225 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; 226 rdev->pm.dynpm_can_upclock = false; 227 } else { 228 if (rdev->pm.active_crtc_count > 1) { 229 for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) { 230 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) 231 continue; 232 else if (i <= rdev->pm.current_power_state_index) { 233 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; 234 break; 235 } else { 236 rdev->pm.requested_power_state_index = i; 237 break; 238 } 239 } 240 } else 241 rdev->pm.requested_power_state_index = 242 rdev->pm.current_power_state_index + 1; 243 } 244 break; 245 case DYNPM_ACTION_DEFAULT: 246 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index; 247 rdev->pm.dynpm_can_upclock = false; 248 break; 249 case DYNPM_ACTION_NONE: 250 default: 251 DRM_ERROR("Requested mode for not defined action\n"); 252 return; 253 } 254 /* only one clock mode per power state */ 255 rdev->pm.requested_clock_mode_index = 0; 256 257 DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n", 258 rdev->pm.power_state[rdev->pm.requested_power_state_index]. 259 clock_info[rdev->pm.requested_clock_mode_index].sclk, 260 rdev->pm.power_state[rdev->pm.requested_power_state_index]. 261 clock_info[rdev->pm.requested_clock_mode_index].mclk, 262 rdev->pm.power_state[rdev->pm.requested_power_state_index]. 263 pcie_lanes); 264} 265 266/** 267 * r100_pm_init_profile - Initialize power profiles callback. 268 * 269 * @rdev: radeon_device pointer 270 * 271 * Initialize the power states used in profile mode 272 * (r1xx-r3xx). 273 * Used for profile mode only. 274 */ 275void r100_pm_init_profile(struct radeon_device *rdev) 276{ 277 /* default */ 278 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 279 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 280 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; 281 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; 282 /* low sh */ 283 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0; 284 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0; 285 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; 286 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; 287 /* mid sh */ 288 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0; 289 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0; 290 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; 291 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; 292 /* high sh */ 293 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0; 294 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 295 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; 296 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; 297 /* low mh */ 298 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0; 299 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 300 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; 301 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; 302 /* mid mh */ 303 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0; 304 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 305 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; 306 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; 307 /* high mh */ 308 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0; 309 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 310 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; 311 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; 312} 313 314/** 315 * r100_pm_misc - set additional pm hw parameters callback. 316 * 317 * @rdev: radeon_device pointer 318 * 319 * Set non-clock parameters associated with a power state 320 * (voltage, pcie lanes, etc.) (r1xx-r4xx). 321 */ 322void r100_pm_misc(struct radeon_device *rdev) 323{ 324 int requested_index = rdev->pm.requested_power_state_index; 325 struct radeon_power_state *ps = &rdev->pm.power_state[requested_index]; 326 struct radeon_voltage *voltage = &ps->clock_info[0].voltage; 327 u32 tmp, sclk_cntl, sclk_cntl2, sclk_more_cntl; 328 329 if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) { 330 if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { 331 tmp = RREG32(voltage->gpio.reg); 332 if (voltage->active_high) 333 tmp |= voltage->gpio.mask; 334 else 335 tmp &= ~(voltage->gpio.mask); 336 WREG32(voltage->gpio.reg, tmp); 337 if (voltage->delay) 338 DRM_UDELAY(voltage->delay); 339 } else { 340 tmp = RREG32(voltage->gpio.reg); 341 if (voltage->active_high) 342 tmp &= ~voltage->gpio.mask; 343 else 344 tmp |= voltage->gpio.mask; 345 WREG32(voltage->gpio.reg, tmp); 346 if (voltage->delay) 347 DRM_UDELAY(voltage->delay); 348 } 349 } 350 351 sclk_cntl = RREG32_PLL(SCLK_CNTL); 352 sclk_cntl2 = RREG32_PLL(SCLK_CNTL2); 353 sclk_cntl2 &= ~REDUCED_SPEED_SCLK_SEL(3); 354 sclk_more_cntl = RREG32_PLL(SCLK_MORE_CNTL); 355 sclk_more_cntl &= ~VOLTAGE_DELAY_SEL(3); 356 if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) { 357 sclk_more_cntl |= REDUCED_SPEED_SCLK_EN; 358 if (ps->misc & ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE) 359 sclk_cntl2 |= REDUCED_SPEED_SCLK_MODE; 360 else 361 sclk_cntl2 &= ~REDUCED_SPEED_SCLK_MODE; 362 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2) 363 sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(0); 364 else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4) 365 sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(2); 366 } else 367 sclk_more_cntl &= ~REDUCED_SPEED_SCLK_EN; 368 369 if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) { 370 sclk_more_cntl |= IO_CG_VOLTAGE_DROP; 371 if (voltage->delay) { 372 sclk_more_cntl |= VOLTAGE_DROP_SYNC; 373 switch (voltage->delay) { 374 case 33: 375 sclk_more_cntl |= VOLTAGE_DELAY_SEL(0); 376 break; 377 case 66: 378 sclk_more_cntl |= VOLTAGE_DELAY_SEL(1); 379 break; 380 case 99: 381 sclk_more_cntl |= VOLTAGE_DELAY_SEL(2); 382 break; 383 case 132: 384 sclk_more_cntl |= VOLTAGE_DELAY_SEL(3); 385 break; 386 } 387 } else 388 sclk_more_cntl &= ~VOLTAGE_DROP_SYNC; 389 } else 390 sclk_more_cntl &= ~IO_CG_VOLTAGE_DROP; 391 392 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN) 393 sclk_cntl &= ~FORCE_HDP; 394 else 395 sclk_cntl |= FORCE_HDP; 396 397 WREG32_PLL(SCLK_CNTL, sclk_cntl); 398 WREG32_PLL(SCLK_CNTL2, sclk_cntl2); 399 WREG32_PLL(SCLK_MORE_CNTL, sclk_more_cntl); 400 401 /* set pcie lanes */ 402 if ((rdev->flags & RADEON_IS_PCIE) && 403 !(rdev->flags & RADEON_IS_IGP) && 404 rdev->asic->pm.set_pcie_lanes && 405 (ps->pcie_lanes != 406 rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) { 407 radeon_set_pcie_lanes(rdev, 408 ps->pcie_lanes); 409 DRM_DEBUG_DRIVER("Setting: p: %d\n", ps->pcie_lanes); 410 } 411} 412 413/** 414 * r100_pm_prepare - pre-power state change callback. 415 * 416 * @rdev: radeon_device pointer 417 * 418 * Prepare for a power state change (r1xx-r4xx). 419 */ 420void r100_pm_prepare(struct radeon_device *rdev) 421{ 422 struct drm_device *ddev = rdev->ddev; 423 struct drm_crtc *crtc; 424 struct radeon_crtc *radeon_crtc; 425 u32 tmp; 426 427 /* disable any active CRTCs */ 428 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { 429 radeon_crtc = to_radeon_crtc(crtc); 430 if (radeon_crtc->enabled) { 431 if (radeon_crtc->crtc_id) { 432 tmp = RREG32(RADEON_CRTC2_GEN_CNTL); 433 tmp |= RADEON_CRTC2_DISP_REQ_EN_B; 434 WREG32(RADEON_CRTC2_GEN_CNTL, tmp); 435 } else { 436 tmp = RREG32(RADEON_CRTC_GEN_CNTL); 437 tmp |= RADEON_CRTC_DISP_REQ_EN_B; 438 WREG32(RADEON_CRTC_GEN_CNTL, tmp); 439 } 440 } 441 } 442} 443 444/** 445 * r100_pm_finish - post-power state change callback. 446 * 447 * @rdev: radeon_device pointer 448 * 449 * Clean up after a power state change (r1xx-r4xx). 450 */ 451void r100_pm_finish(struct radeon_device *rdev) 452{ 453 struct drm_device *ddev = rdev->ddev; 454 struct drm_crtc *crtc; 455 struct radeon_crtc *radeon_crtc; 456 u32 tmp; 457 458 /* enable any active CRTCs */ 459 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { 460 radeon_crtc = to_radeon_crtc(crtc); 461 if (radeon_crtc->enabled) { 462 if (radeon_crtc->crtc_id) { 463 tmp = RREG32(RADEON_CRTC2_GEN_CNTL); 464 tmp &= ~RADEON_CRTC2_DISP_REQ_EN_B; 465 WREG32(RADEON_CRTC2_GEN_CNTL, tmp); 466 } else { 467 tmp = RREG32(RADEON_CRTC_GEN_CNTL); 468 tmp &= ~RADEON_CRTC_DISP_REQ_EN_B; 469 WREG32(RADEON_CRTC_GEN_CNTL, tmp); 470 } 471 } 472 } 473} 474 475/** 476 * r100_gui_idle - gui idle callback. 477 * 478 * @rdev: radeon_device pointer 479 * 480 * Check of the GUI (2D/3D engines) are idle (r1xx-r5xx). 481 * Returns true if idle, false if not. 482 */ 483bool r100_gui_idle(struct radeon_device *rdev) 484{ 485 if (RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE) 486 return false; 487 else 488 return true; 489} 490 491/* hpd for digital panel detect/disconnect */ 492/** 493 * r100_hpd_sense - hpd sense callback. 494 * 495 * @rdev: radeon_device pointer 496 * @hpd: hpd (hotplug detect) pin 497 * 498 * Checks if a digital monitor is connected (r1xx-r4xx). 499 * Returns true if connected, false if not connected. 500 */ 501bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) 502{ 503 bool connected = false; 504 505 switch (hpd) { 506 case RADEON_HPD_1: 507 if (RREG32(RADEON_FP_GEN_CNTL) & RADEON_FP_DETECT_SENSE) 508 connected = true; 509 break; 510 case RADEON_HPD_2: 511 if (RREG32(RADEON_FP2_GEN_CNTL) & RADEON_FP2_DETECT_SENSE) 512 connected = true; 513 break; 514 default: 515 break; 516 } 517 return connected; 518} 519 520/** 521 * r100_hpd_set_polarity - hpd set polarity callback. 522 * 523 * @rdev: radeon_device pointer 524 * @hpd: hpd (hotplug detect) pin 525 * 526 * Set the polarity of the hpd pin (r1xx-r4xx). 527 */ 528void r100_hpd_set_polarity(struct radeon_device *rdev, 529 enum radeon_hpd_id hpd) 530{ 531 u32 tmp; 532 bool connected = r100_hpd_sense(rdev, hpd); 533 534 switch (hpd) { 535 case RADEON_HPD_1: 536 tmp = RREG32(RADEON_FP_GEN_CNTL); 537 if (connected) 538 tmp &= ~RADEON_FP_DETECT_INT_POL; 539 else 540 tmp |= RADEON_FP_DETECT_INT_POL; 541 WREG32(RADEON_FP_GEN_CNTL, tmp); 542 break; 543 case RADEON_HPD_2: 544 tmp = RREG32(RADEON_FP2_GEN_CNTL); 545 if (connected) 546 tmp &= ~RADEON_FP2_DETECT_INT_POL; 547 else 548 tmp |= RADEON_FP2_DETECT_INT_POL; 549 WREG32(RADEON_FP2_GEN_CNTL, tmp); 550 break; 551 default: 552 break; 553 } 554} 555 556/** 557 * r100_hpd_init - hpd setup callback. 558 * 559 * @rdev: radeon_device pointer 560 * 561 * Setup the hpd pins used by the card (r1xx-r4xx). 562 * Set the polarity, and enable the hpd interrupts. 563 */ 564void r100_hpd_init(struct radeon_device *rdev) 565{ 566 struct drm_device *dev = rdev->ddev; 567 struct drm_connector *connector; 568 unsigned enable = 0; 569 570 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 571 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 572 enable |= 1 << radeon_connector->hpd.hpd; 573 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); 574 } 575 radeon_irq_kms_enable_hpd(rdev, enable); 576} 577 578/** 579 * r100_hpd_fini - hpd tear down callback. 580 * 581 * @rdev: radeon_device pointer 582 * 583 * Tear down the hpd pins used by the card (r1xx-r4xx). 584 * Disable the hpd interrupts. 585 */ 586void r100_hpd_fini(struct radeon_device *rdev) 587{ 588 struct drm_device *dev = rdev->ddev; 589 struct drm_connector *connector; 590 unsigned disable = 0; 591 592 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 593 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 594 disable |= 1 << radeon_connector->hpd.hpd; 595 } 596 radeon_irq_kms_disable_hpd(rdev, disable); 597} 598 599/* 600 * PCI GART 601 */ 602void r100_pci_gart_tlb_flush(struct radeon_device *rdev) 603{ 604 /* TODO: can we do somethings here ? */ 605 /* It seems hw only cache one entry so we should discard this 606 * entry otherwise if first GPU GART read hit this entry it 607 * could end up in wrong address. */ 608} 609 610int r100_pci_gart_init(struct radeon_device *rdev) 611{ 612 int r; 613 614 if (rdev->gart.ptr) { 615 DRM_ERROR("R100 PCI GART already initialized\n"); 616 return 0; 617 } 618 /* Initialize common gart structure */ 619 r = radeon_gart_init(rdev); 620 if (r) 621 return r; 622 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; 623 rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; 624 rdev->asic->gart.set_page = &r100_pci_gart_set_page; 625 return radeon_gart_table_ram_alloc(rdev); 626} 627 628int r100_pci_gart_enable(struct radeon_device *rdev) 629{ 630 uint32_t tmp; 631 632 radeon_gart_restore(rdev); 633 /* discard memory request outside of configured range */ 634 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS; 635 WREG32(RADEON_AIC_CNTL, tmp); 636 /* set address range for PCI address translate */ 637 WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_start); 638 WREG32(RADEON_AIC_HI_ADDR, rdev->mc.gtt_end); 639 /* set PCI GART page-table base address */ 640 WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr); 641 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN; 642 WREG32(RADEON_AIC_CNTL, tmp); 643 r100_pci_gart_tlb_flush(rdev); 644 DRM_INFO("PCI GART of %uM enabled (table at 0x%016llX).\n", 645 (unsigned)(rdev->mc.gtt_size >> 20), 646 (unsigned long long)rdev->gart.table_addr); 647 rdev->gart.ready = true; 648 return 0; 649} 650 651void r100_pci_gart_disable(struct radeon_device *rdev) 652{ 653 uint32_t tmp; 654 655 /* discard memory request outside of configured range */ 656 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS; 657 WREG32(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN); 658 WREG32(RADEON_AIC_LO_ADDR, 0); 659 WREG32(RADEON_AIC_HI_ADDR, 0); 660} 661 662int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) 663{ 664 u32 *gtt = rdev->gart.ptr; 665 666 if (i < 0 || i > rdev->gart.num_gpu_pages) { 667 return -EINVAL; 668 } 669 gtt[i] = cpu_to_le32(lower_32_bits(addr)); 670 return 0; 671} 672 673void r100_pci_gart_fini(struct radeon_device *rdev) 674{ 675 radeon_gart_fini(rdev); 676 r100_pci_gart_disable(rdev); 677 radeon_gart_table_ram_free(rdev); 678} 679 680int r100_irq_set(struct radeon_device *rdev) 681{ 682 uint32_t tmp = 0; 683 684 if (!rdev->irq.installed) { 685 DRM_ERROR("Can't enable IRQ/MSI because no handler is installed\n"); 686 WREG32(R_000040_GEN_INT_CNTL, 0); 687 return -EINVAL; 688 } 689 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { 690 tmp |= RADEON_SW_INT_ENABLE; 691 } 692 if (rdev->irq.crtc_vblank_int[0] || 693 atomic_read(&rdev->irq.pflip[0])) { 694 tmp |= RADEON_CRTC_VBLANK_MASK; 695 } 696 if (rdev->irq.crtc_vblank_int[1] || 697 atomic_read(&rdev->irq.pflip[1])) { 698 tmp |= RADEON_CRTC2_VBLANK_MASK; 699 } 700 if (rdev->irq.hpd[0]) { 701 tmp |= RADEON_FP_DETECT_MASK; 702 } 703 if (rdev->irq.hpd[1]) { 704 tmp |= RADEON_FP2_DETECT_MASK; 705 } 706 WREG32(RADEON_GEN_INT_CNTL, tmp); 707 return 0; 708} 709 710void r100_irq_disable(struct radeon_device *rdev) 711{ 712 u32 tmp; 713 714 WREG32(R_000040_GEN_INT_CNTL, 0); 715 /* Wait and acknowledge irq */ 716 DRM_MDELAY(1); 717 tmp = RREG32(R_000044_GEN_INT_STATUS); 718 WREG32(R_000044_GEN_INT_STATUS, tmp); 719} 720 721static uint32_t r100_irq_ack(struct radeon_device *rdev) 722{ 723 uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS); 724 uint32_t irq_mask = RADEON_SW_INT_TEST | 725 RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT | 726 RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT; 727 728 if (irqs) { 729 WREG32(RADEON_GEN_INT_STATUS, irqs); 730 } 731 return irqs & irq_mask; 732} 733 734irqreturn_t r100_irq_process(struct radeon_device *rdev) 735{ 736 uint32_t status, msi_rearm; 737 bool queue_hotplug = false; 738 739 status = r100_irq_ack(rdev); 740 if (!status) { 741 return IRQ_NONE; 742 } 743 if (rdev->shutdown) { 744 return IRQ_NONE; 745 } 746 while (status) { 747 /* SW interrupt */ 748 if (status & RADEON_SW_INT_TEST) { 749 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); 750 } 751 /* Vertical blank interrupts */ 752 if (status & RADEON_CRTC_VBLANK_STAT) { 753 if (rdev->irq.crtc_vblank_int[0]) { 754 drm_handle_vblank(rdev->ddev, 0); 755 rdev->pm.vblank_sync = true; 756 DRM_WAKEUP(&rdev->irq.vblank_queue); 757 } 758 if (atomic_read(&rdev->irq.pflip[0])) 759 radeon_crtc_handle_flip(rdev, 0); 760 } 761 if (status & RADEON_CRTC2_VBLANK_STAT) { 762 if (rdev->irq.crtc_vblank_int[1]) { 763 drm_handle_vblank(rdev->ddev, 1); 764 rdev->pm.vblank_sync = true; 765 DRM_WAKEUP(&rdev->irq.vblank_queue); 766 } 767 if (atomic_read(&rdev->irq.pflip[1])) 768 radeon_crtc_handle_flip(rdev, 1); 769 } 770 if (status & RADEON_FP_DETECT_STAT) { 771 queue_hotplug = true; 772 DRM_DEBUG("HPD1\n"); 773 } 774 if (status & RADEON_FP2_DETECT_STAT) { 775 queue_hotplug = true; 776 DRM_DEBUG("HPD2\n"); 777 } 778 status = r100_irq_ack(rdev); 779 } 780 if (queue_hotplug) 781 taskqueue_enqueue(rdev->tq, &rdev->hotplug_work); 782 if (rdev->msi_enabled) { 783 switch (rdev->family) { 784 case CHIP_RS400: 785 case CHIP_RS480: 786 msi_rearm = RREG32(RADEON_AIC_CNTL) & ~RS400_MSI_REARM; 787 WREG32(RADEON_AIC_CNTL, msi_rearm); 788 WREG32(RADEON_AIC_CNTL, msi_rearm | RS400_MSI_REARM); 789 break; 790 default: 791 WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN); 792 break; 793 } 794 } 795 return IRQ_HANDLED; 796} 797 798u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc) 799{ 800 if (crtc == 0) 801 return RREG32(RADEON_CRTC_CRNT_FRAME); 802 else 803 return RREG32(RADEON_CRTC2_CRNT_FRAME); 804} 805 806/* Who ever call radeon_fence_emit should call ring_lock and ask 807 * for enough space (today caller are ib schedule and buffer move) */ 808void r100_fence_ring_emit(struct radeon_device *rdev, 809 struct radeon_fence *fence) 810{ 811 struct radeon_ring *ring = &rdev->ring[fence->ring]; 812 813 /* We have to make sure that caches are flushed before 814 * CPU might read something from VRAM. */ 815 radeon_ring_write(ring, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); 816 radeon_ring_write(ring, RADEON_RB3D_DC_FLUSH_ALL); 817 radeon_ring_write(ring, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); 818 radeon_ring_write(ring, RADEON_RB3D_ZC_FLUSH_ALL); 819 /* Wait until IDLE & CLEAN */ 820 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); 821 radeon_ring_write(ring, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN); 822 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0)); 823 radeon_ring_write(ring, rdev->config.r100.hdp_cntl | 824 RADEON_HDP_READ_BUFFER_INVALIDATE); 825 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0)); 826 radeon_ring_write(ring, rdev->config.r100.hdp_cntl); 827 /* Emit fence sequence & fire IRQ */ 828 radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0)); 829 radeon_ring_write(ring, fence->seq); 830 radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0)); 831 radeon_ring_write(ring, RADEON_SW_INT_FIRE); 832} 833 834void r100_semaphore_ring_emit(struct radeon_device *rdev, 835 struct radeon_ring *ring, 836 struct radeon_semaphore *semaphore, 837 bool emit_wait) 838{ 839 /* Unused on older asics, since we don't have semaphores or multiple rings */ 840 panic("%s: Unused on older asics", __func__); 841} 842 843int r100_copy_blit(struct radeon_device *rdev, 844 uint64_t src_offset, 845 uint64_t dst_offset, 846 unsigned num_gpu_pages, 847 struct radeon_fence **fence) 848{ 849 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 850 uint32_t cur_pages; 851 uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE; 852 uint32_t pitch; 853 uint32_t stride_pixels; 854 unsigned ndw; 855 int num_loops; 856 int r = 0; 857 858 /* radeon limited to 16k stride */ 859 stride_bytes &= 0x3fff; 860 /* radeon pitch is /64 */ 861 pitch = stride_bytes / 64; 862 stride_pixels = stride_bytes / 4; 863 num_loops = DIV_ROUND_UP(num_gpu_pages, 8191); 864 865 /* Ask for enough room for blit + flush + fence */ 866 ndw = 64 + (10 * num_loops); 867 r = radeon_ring_lock(rdev, ring, ndw); 868 if (r) { 869 DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw); 870 return -EINVAL; 871 } 872 while (num_gpu_pages > 0) { 873 cur_pages = num_gpu_pages; 874 if (cur_pages > 8191) { 875 cur_pages = 8191; 876 } 877 num_gpu_pages -= cur_pages; 878 879 /* pages are in Y direction - height 880 page width in X direction - width */ 881 radeon_ring_write(ring, PACKET3(PACKET3_BITBLT_MULTI, 8)); 882 radeon_ring_write(ring, 883 RADEON_GMC_SRC_PITCH_OFFSET_CNTL | 884 RADEON_GMC_DST_PITCH_OFFSET_CNTL | 885 RADEON_GMC_SRC_CLIPPING | 886 RADEON_GMC_DST_CLIPPING | 887 RADEON_GMC_BRUSH_NONE | 888 (RADEON_COLOR_FORMAT_ARGB8888 << 8) | 889 RADEON_GMC_SRC_DATATYPE_COLOR | 890 RADEON_ROP3_S | 891 RADEON_DP_SRC_SOURCE_MEMORY | 892 RADEON_GMC_CLR_CMP_CNTL_DIS | 893 RADEON_GMC_WR_MSK_DIS); 894 radeon_ring_write(ring, (pitch << 22) | (src_offset >> 10)); 895 radeon_ring_write(ring, (pitch << 22) | (dst_offset >> 10)); 896 radeon_ring_write(ring, (0x1fff) | (0x1fff << 16)); 897 radeon_ring_write(ring, 0); 898 radeon_ring_write(ring, (0x1fff) | (0x1fff << 16)); 899 radeon_ring_write(ring, num_gpu_pages); 900 radeon_ring_write(ring, num_gpu_pages); 901 radeon_ring_write(ring, cur_pages | (stride_pixels << 16)); 902 } 903 radeon_ring_write(ring, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0)); 904 radeon_ring_write(ring, RADEON_RB2D_DC_FLUSH_ALL); 905 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); 906 radeon_ring_write(ring, 907 RADEON_WAIT_2D_IDLECLEAN | 908 RADEON_WAIT_HOST_IDLECLEAN | 909 RADEON_WAIT_DMA_GUI_IDLE); 910 if (fence) { 911 r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX); 912 } 913 radeon_ring_unlock_commit(rdev, ring); 914 return r; 915} 916 917static int r100_cp_wait_for_idle(struct radeon_device *rdev) 918{ 919 unsigned i; 920 u32 tmp; 921 922 for (i = 0; i < rdev->usec_timeout; i++) { 923 tmp = RREG32(R_000E40_RBBM_STATUS); 924 if (!G_000E40_CP_CMDSTRM_BUSY(tmp)) { 925 return 0; 926 } 927 DRM_UDELAY(1); 928 } 929 return -1; 930} 931 932void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring) 933{ 934 int r; 935 936 r = radeon_ring_lock(rdev, ring, 2); 937 if (r) { 938 return; 939 } 940 radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0)); 941 radeon_ring_write(ring, 942 RADEON_ISYNC_ANY2D_IDLE3D | 943 RADEON_ISYNC_ANY3D_IDLE2D | 944 RADEON_ISYNC_WAIT_IDLEGUI | 945 RADEON_ISYNC_CPSCRATCH_IDLEGUI); 946 radeon_ring_unlock_commit(rdev, ring); 947} 948 949 950/* Load the microcode for the CP */ 951static int r100_cp_init_microcode(struct radeon_device *rdev) 952{ 953 const char *fw_name = NULL; 954 int err; 955 956 DRM_DEBUG_KMS("\n"); 957 958 if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) || 959 (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) || 960 (rdev->family == CHIP_RS200)) { 961 DRM_INFO("Loading R100 Microcode\n"); 962 fw_name = FIRMWARE_R100; 963 } else if ((rdev->family == CHIP_R200) || 964 (rdev->family == CHIP_RV250) || 965 (rdev->family == CHIP_RV280) || 966 (rdev->family == CHIP_RS300)) { 967 DRM_INFO("Loading R200 Microcode\n"); 968 fw_name = FIRMWARE_R200; 969 } else if ((rdev->family == CHIP_R300) || 970 (rdev->family == CHIP_R350) || 971 (rdev->family == CHIP_RV350) || 972 (rdev->family == CHIP_RV380) || 973 (rdev->family == CHIP_RS400) || 974 (rdev->family == CHIP_RS480)) { 975 DRM_INFO("Loading R300 Microcode\n"); 976 fw_name = FIRMWARE_R300; 977 } else if ((rdev->family == CHIP_R420) || 978 (rdev->family == CHIP_R423) || 979 (rdev->family == CHIP_RV410)) { 980 DRM_INFO("Loading R400 Microcode\n"); 981 fw_name = FIRMWARE_R420; 982 } else if ((rdev->family == CHIP_RS690) || 983 (rdev->family == CHIP_RS740)) { 984 DRM_INFO("Loading RS690/RS740 Microcode\n"); 985 fw_name = FIRMWARE_RS690; 986 } else if (rdev->family == CHIP_RS600) { 987 DRM_INFO("Loading RS600 Microcode\n"); 988 fw_name = FIRMWARE_RS600; 989 } else if ((rdev->family == CHIP_RV515) || 990 (rdev->family == CHIP_R520) || 991 (rdev->family == CHIP_RV530) || 992 (rdev->family == CHIP_R580) || 993 (rdev->family == CHIP_RV560) || 994 (rdev->family == CHIP_RV570)) { 995 DRM_INFO("Loading R500 Microcode\n"); 996 fw_name = FIRMWARE_R520; 997 } 998 999 err = 0; 1000 rdev->me_fw = firmware_get(fw_name); 1001 if (rdev->me_fw == NULL) { 1002 DRM_ERROR("radeon_cp: Failed to load firmware \"%s\"\n", 1003 fw_name); 1004 err = -ENOENT; 1005 } else if (rdev->me_fw->datasize % 8) { 1006 DRM_ERROR( 1007 "radeon_cp: Bogus length %zu in firmware \"%s\"\n", 1008 rdev->me_fw->datasize, fw_name); 1009 err = -EINVAL; 1010 firmware_put(rdev->me_fw, FIRMWARE_UNLOAD); 1011 rdev->me_fw = NULL; 1012 } 1013 return err; 1014} 1015 1016/** 1017 * r100_cp_fini_microcode - drop the firmware image reference 1018 * 1019 * @rdev: radeon_device pointer 1020 * 1021 * Drop the me firmware image reference. 1022 * Called at driver shutdown. 1023 */ 1024static void r100_cp_fini_microcode (struct radeon_device *rdev) 1025{ 1026 1027 if (rdev->me_fw != NULL) { 1028 firmware_put(rdev->me_fw, FIRMWARE_UNLOAD); 1029 rdev->me_fw = NULL; 1030 } 1031} 1032 1033static void r100_cp_load_microcode(struct radeon_device *rdev) 1034{ 1035 const __be32 *fw_data; 1036 int i, size; 1037 1038 if (r100_gui_wait_for_idle(rdev)) { 1039 DRM_ERROR("Failed to wait GUI idle while " 1040 "programming pipes. Bad things might happen.\n"); 1041 } 1042 1043 if (rdev->me_fw) { 1044 size = rdev->me_fw->datasize / 4; 1045 fw_data = (const __be32 *)rdev->me_fw->data; 1046 WREG32(RADEON_CP_ME_RAM_ADDR, 0); 1047 for (i = 0; i < size; i += 2) { 1048 WREG32(RADEON_CP_ME_RAM_DATAH, 1049 be32_to_cpup(&fw_data[i])); 1050 WREG32(RADEON_CP_ME_RAM_DATAL, 1051 be32_to_cpup(&fw_data[i + 1])); 1052 } 1053 } 1054} 1055 1056int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) 1057{ 1058 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 1059 unsigned rb_bufsz; 1060 unsigned rb_blksz; 1061 unsigned max_fetch; 1062 unsigned pre_write_timer; 1063 unsigned pre_write_limit; 1064 unsigned indirect2_start; 1065 unsigned indirect1_start; 1066 uint32_t tmp; 1067 int r; 1068 1069 if (r100_debugfs_cp_init(rdev)) { 1070 DRM_ERROR("Failed to register debugfs file for CP !\n"); 1071 } 1072 if (!rdev->me_fw) { 1073 r = r100_cp_init_microcode(rdev); 1074 if (r) { 1075 DRM_ERROR("Failed to load firmware!\n"); 1076 return r; 1077 } 1078 } 1079 1080 /* Align ring size */ 1081 rb_bufsz = drm_order(ring_size / 8); 1082 ring_size = (1 << (rb_bufsz + 1)) * 4; 1083 r100_cp_load_microcode(rdev); 1084 r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET, 1085 RADEON_CP_RB_RPTR, RADEON_CP_RB_WPTR, 1086 0, 0x7fffff, RADEON_CP_PACKET2); 1087 if (r) { 1088 return r; 1089 } 1090 /* Each time the cp read 1024 bytes (16 dword/quadword) update 1091 * the rptr copy in system ram */ 1092 rb_blksz = 9; 1093 /* cp will read 128bytes at a time (4 dwords) */ 1094 max_fetch = 1; 1095 ring->align_mask = 16 - 1; 1096 /* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */ 1097 pre_write_timer = 64; 1098 /* Force CP_RB_WPTR write if written more than one time before the 1099 * delay expire 1100 */ 1101 pre_write_limit = 0; 1102 /* Setup the cp cache like this (cache size is 96 dwords) : 1103 * RING 0 to 15 1104 * INDIRECT1 16 to 79 1105 * INDIRECT2 80 to 95 1106 * So ring cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords)) 1107 * indirect1 cache size is 64dwords (> (2 * max_fetch = 2 * 4dwords)) 1108 * indirect2 cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords)) 1109 * Idea being that most of the gpu cmd will be through indirect1 buffer 1110 * so it gets the bigger cache. 1111 */ 1112 indirect2_start = 80; 1113 indirect1_start = 16; 1114 /* cp setup */ 1115 WREG32(0x718, pre_write_timer | (pre_write_limit << 28)); 1116 tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) | 1117 REG_SET(RADEON_RB_BLKSZ, rb_blksz) | 1118 REG_SET(RADEON_MAX_FETCH, max_fetch)); 1119#ifdef __BIG_ENDIAN 1120 tmp |= RADEON_BUF_SWAP_32BIT; 1121#endif 1122 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE); 1123 1124 /* Set ring address */ 1125 DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)ring->gpu_addr); 1126 WREG32(RADEON_CP_RB_BASE, ring->gpu_addr); 1127 /* Force read & write ptr to 0 */ 1128 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE); 1129 WREG32(RADEON_CP_RB_RPTR_WR, 0); 1130 ring->wptr = 0; 1131 WREG32(RADEON_CP_RB_WPTR, ring->wptr); 1132 1133 /* set the wb address whether it's enabled or not */ 1134 WREG32(R_00070C_CP_RB_RPTR_ADDR, 1135 S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) >> 2)); 1136 WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET); 1137 1138 if (rdev->wb.enabled) 1139 WREG32(R_000770_SCRATCH_UMSK, 0xff); 1140 else { 1141 tmp |= RADEON_RB_NO_UPDATE; 1142 WREG32(R_000770_SCRATCH_UMSK, 0); 1143 } 1144 1145 WREG32(RADEON_CP_RB_CNTL, tmp); 1146 DRM_UDELAY(10); 1147 ring->rptr = RREG32(RADEON_CP_RB_RPTR); 1148 /* Set cp mode to bus mastering & enable cp*/ 1149 WREG32(RADEON_CP_CSQ_MODE, 1150 REG_SET(RADEON_INDIRECT2_START, indirect2_start) | 1151 REG_SET(RADEON_INDIRECT1_START, indirect1_start)); 1152 WREG32(RADEON_CP_RB_WPTR_DELAY, 0); 1153 WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D); 1154 WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM); 1155 1156 /* at this point everything should be setup correctly to enable master */ 1157 pci_enable_busmaster(rdev->dev); 1158 1159 radeon_ring_start(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 1160 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring); 1161 if (r) { 1162 DRM_ERROR("radeon: cp isn't working (%d).\n", r); 1163 return r; 1164 } 1165 ring->ready = true; 1166 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); 1167 1168 if (!ring->rptr_save_reg /* not resuming from suspend */ 1169 && radeon_ring_supports_scratch_reg(rdev, ring)) { 1170 r = radeon_scratch_get(rdev, &ring->rptr_save_reg); 1171 if (r) { 1172 DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r); 1173 ring->rptr_save_reg = 0; 1174 } 1175 } 1176 return 0; 1177} 1178 1179void r100_cp_fini(struct radeon_device *rdev) 1180{ 1181 if (r100_cp_wait_for_idle(rdev)) { 1182 DRM_ERROR("Wait for CP idle timeout, shutting down CP.\n"); 1183 } 1184 /* Disable ring */ 1185 r100_cp_disable(rdev); 1186 radeon_scratch_free(rdev, rdev->ring[RADEON_RING_TYPE_GFX_INDEX].rptr_save_reg); 1187 radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 1188 DRM_INFO("radeon: cp finalized\n"); 1189} 1190 1191void r100_cp_disable(struct radeon_device *rdev) 1192{ 1193 /* Disable ring */ 1194 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 1195 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; 1196 WREG32(RADEON_CP_CSQ_MODE, 0); 1197 WREG32(RADEON_CP_CSQ_CNTL, 0); 1198 WREG32(R_000770_SCRATCH_UMSK, 0); 1199 if (r100_gui_wait_for_idle(rdev)) { 1200 DRM_ERROR("Failed to wait GUI idle while " 1201 "programming pipes. Bad things might happen.\n"); 1202 } 1203} 1204 1205/* 1206 * CS functions 1207 */ 1208int r100_reloc_pitch_offset(struct radeon_cs_parser *p, 1209 struct radeon_cs_packet *pkt, 1210 unsigned idx, 1211 unsigned reg) 1212{ 1213 int r; 1214 u32 tile_flags = 0; 1215 u32 tmp; 1216 struct radeon_cs_reloc *reloc; 1217 u32 value; 1218 1219 r = r100_cs_packet_next_reloc(p, &reloc); 1220 if (r) { 1221 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1222 idx, reg); 1223 r100_cs_dump_packet(p, pkt); 1224 return r; 1225 } 1226 1227 value = radeon_get_ib_value(p, idx); 1228 tmp = value & 0x003fffff; 1229 tmp += (((u32)reloc->lobj.gpu_offset) >> 10); 1230 1231 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 1232 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 1233 tile_flags |= RADEON_DST_TILE_MACRO; 1234 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { 1235 if (reg == RADEON_SRC_PITCH_OFFSET) { 1236 DRM_ERROR("Cannot src blit from microtiled surface\n"); 1237 r100_cs_dump_packet(p, pkt); 1238 return -EINVAL; 1239 } 1240 tile_flags |= RADEON_DST_TILE_MICRO; 1241 } 1242 1243 tmp |= tile_flags; 1244 p->ib.ptr[idx] = (value & 0x3fc00000) | tmp; 1245 } else 1246 p->ib.ptr[idx] = (value & 0xffc00000) | tmp; 1247 return 0; 1248} 1249 1250int r100_packet3_load_vbpntr(struct radeon_cs_parser *p, 1251 struct radeon_cs_packet *pkt, 1252 int idx) 1253{ 1254 unsigned c, i; 1255 struct radeon_cs_reloc *reloc; 1256 struct r100_cs_track *track; 1257 int r = 0; 1258 volatile uint32_t *ib; 1259 u32 idx_value; 1260 1261 ib = p->ib.ptr; 1262 track = (struct r100_cs_track *)p->track; 1263 c = radeon_get_ib_value(p, idx++) & 0x1F; 1264 if (c > 16) { 1265 DRM_ERROR("Only 16 vertex buffers are allowed %d\n", 1266 pkt->opcode); 1267 r100_cs_dump_packet(p, pkt); 1268 return -EINVAL; 1269 } 1270 track->num_arrays = c; 1271 for (i = 0; i < (c - 1); i+=2, idx+=3) { 1272 r = r100_cs_packet_next_reloc(p, &reloc); 1273 if (r) { 1274 DRM_ERROR("No reloc for packet3 %d\n", 1275 pkt->opcode); 1276 r100_cs_dump_packet(p, pkt); 1277 return r; 1278 } 1279 idx_value = radeon_get_ib_value(p, idx); 1280 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset); 1281 1282 track->arrays[i + 0].esize = idx_value >> 8; 1283 track->arrays[i + 0].robj = reloc->robj; 1284 track->arrays[i + 0].esize &= 0x7F; 1285 r = r100_cs_packet_next_reloc(p, &reloc); 1286 if (r) { 1287 DRM_ERROR("No reloc for packet3 %d\n", 1288 pkt->opcode); 1289 r100_cs_dump_packet(p, pkt); 1290 return r; 1291 } 1292 ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset); 1293 track->arrays[i + 1].robj = reloc->robj; 1294 track->arrays[i + 1].esize = idx_value >> 24; 1295 track->arrays[i + 1].esize &= 0x7F; 1296 } 1297 if (c & 1) { 1298 r = r100_cs_packet_next_reloc(p, &reloc); 1299 if (r) { 1300 DRM_ERROR("No reloc for packet3 %d\n", 1301 pkt->opcode); 1302 r100_cs_dump_packet(p, pkt); 1303 return r; 1304 } 1305 idx_value = radeon_get_ib_value(p, idx); 1306 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset); 1307 track->arrays[i + 0].robj = reloc->robj; 1308 track->arrays[i + 0].esize = idx_value >> 8; 1309 track->arrays[i + 0].esize &= 0x7F; 1310 } 1311 return r; 1312} 1313 1314int r100_cs_parse_packet0(struct radeon_cs_parser *p, 1315 struct radeon_cs_packet *pkt, 1316 const unsigned *auth, unsigned n, 1317 radeon_packet0_check_t check) 1318{ 1319 unsigned reg; 1320 unsigned i, j, m; 1321 unsigned idx; 1322 int r; 1323 1324 idx = pkt->idx + 1; 1325 reg = pkt->reg; 1326 /* Check that register fall into register range 1327 * determined by the number of entry (n) in the 1328 * safe register bitmap. 1329 */ 1330 if (pkt->one_reg_wr) { 1331 if ((reg >> 7) > n) { 1332 return -EINVAL; 1333 } 1334 } else { 1335 if (((reg + (pkt->count << 2)) >> 7) > n) { 1336 return -EINVAL; 1337 } 1338 } 1339 for (i = 0; i <= pkt->count; i++, idx++) { 1340 j = (reg >> 7); 1341 m = 1 << ((reg >> 2) & 31); 1342 if (auth[j] & m) { 1343 r = check(p, pkt, idx, reg); 1344 if (r) { 1345 return r; 1346 } 1347 } 1348 if (pkt->one_reg_wr) { 1349 if (!(auth[j] & m)) { 1350 break; 1351 } 1352 } else { 1353 reg += 4; 1354 } 1355 } 1356 return 0; 1357} 1358 1359void r100_cs_dump_packet(struct radeon_cs_parser *p, 1360 struct radeon_cs_packet *pkt) 1361{ 1362 volatile uint32_t *ib; 1363 unsigned i; 1364 unsigned idx; 1365 1366 ib = p->ib.ptr; 1367 idx = pkt->idx; 1368 for (i = 0; i <= (pkt->count + 1); i++, idx++) { 1369 DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]); 1370 } 1371} 1372 1373/** 1374 * r100_cs_packet_parse() - parse cp packet and point ib index to next packet 1375 * @parser: parser structure holding parsing context. 1376 * @pkt: where to store packet informations 1377 * 1378 * Assume that chunk_ib_index is properly set. Will return -EINVAL 1379 * if packet is bigger than remaining ib size. or if packets is unknown. 1380 **/ 1381int r100_cs_packet_parse(struct radeon_cs_parser *p, 1382 struct radeon_cs_packet *pkt, 1383 unsigned idx) 1384{ 1385 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; 1386 uint32_t header; 1387 1388 if (idx >= ib_chunk->length_dw) { 1389 DRM_ERROR("Can not parse packet at %d after CS end %d !\n", 1390 idx, ib_chunk->length_dw); 1391 return -EINVAL; 1392 } 1393 header = radeon_get_ib_value(p, idx); 1394 pkt->idx = idx; 1395 pkt->type = CP_PACKET_GET_TYPE(header); 1396 pkt->count = CP_PACKET_GET_COUNT(header); 1397 switch (pkt->type) { 1398 case PACKET_TYPE0: 1399 pkt->reg = CP_PACKET0_GET_REG(header); 1400 pkt->one_reg_wr = CP_PACKET0_GET_ONE_REG_WR(header); 1401 break; 1402 case PACKET_TYPE3: 1403 pkt->opcode = CP_PACKET3_GET_OPCODE(header); 1404 break; 1405 case PACKET_TYPE2: 1406 pkt->count = -1; 1407 break; 1408 default: 1409 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx); 1410 return -EINVAL; 1411 } 1412 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) { 1413 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n", 1414 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw); 1415 return -EINVAL; 1416 } 1417 return 0; 1418} 1419 1420/** 1421 * r100_cs_packet_next_vline() - parse userspace VLINE packet 1422 * @parser: parser structure holding parsing context. 1423 * 1424 * Userspace sends a special sequence for VLINE waits. 1425 * PACKET0 - VLINE_START_END + value 1426 * PACKET0 - WAIT_UNTIL +_value 1427 * RELOC (P3) - crtc_id in reloc. 1428 * 1429 * This function parses this and relocates the VLINE START END 1430 * and WAIT UNTIL packets to the correct crtc. 1431 * It also detects a switched off crtc and nulls out the 1432 * wait in that case. 1433 */ 1434int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) 1435{ 1436 struct drm_mode_object *obj; 1437 struct drm_crtc *crtc; 1438 struct radeon_crtc *radeon_crtc; 1439 struct radeon_cs_packet p3reloc, waitreloc; 1440 int crtc_id; 1441 int r; 1442 uint32_t header, h_idx, reg; 1443 volatile uint32_t *ib; 1444 1445 ib = p->ib.ptr; 1446 1447 /* parse the wait until */ 1448 r = r100_cs_packet_parse(p, &waitreloc, p->idx); 1449 if (r) 1450 return r; 1451 1452 /* check its a wait until and only 1 count */ 1453 if (waitreloc.reg != RADEON_WAIT_UNTIL || 1454 waitreloc.count != 0) { 1455 DRM_ERROR("vline wait had illegal wait until segment\n"); 1456 return -EINVAL; 1457 } 1458 1459 if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) { 1460 DRM_ERROR("vline wait had illegal wait until\n"); 1461 return -EINVAL; 1462 } 1463 1464 /* jump over the NOP */ 1465 r = r100_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2); 1466 if (r) 1467 return r; 1468 1469 h_idx = p->idx - 2; 1470 p->idx += waitreloc.count + 2; 1471 p->idx += p3reloc.count + 2; 1472 1473 header = radeon_get_ib_value(p, h_idx); 1474 crtc_id = radeon_get_ib_value(p, h_idx + 5); 1475 reg = CP_PACKET0_GET_REG(header); 1476 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); 1477 if (!obj) { 1478 DRM_ERROR("cannot find crtc %d\n", crtc_id); 1479 return -EINVAL; 1480 } 1481 crtc = obj_to_crtc(obj); 1482 radeon_crtc = to_radeon_crtc(crtc); 1483 crtc_id = radeon_crtc->crtc_id; 1484 1485 if (!crtc->enabled) { 1486 /* if the CRTC isn't enabled - we need to nop out the wait until */ 1487 ib[h_idx + 2] = PACKET2(0); 1488 ib[h_idx + 3] = PACKET2(0); 1489 } else if (crtc_id == 1) { 1490 switch (reg) { 1491 case AVIVO_D1MODE_VLINE_START_END: 1492 header &= ~R300_CP_PACKET0_REG_MASK; 1493 header |= AVIVO_D2MODE_VLINE_START_END >> 2; 1494 break; 1495 case RADEON_CRTC_GUI_TRIG_VLINE: 1496 header &= ~R300_CP_PACKET0_REG_MASK; 1497 header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2; 1498 break; 1499 default: 1500 DRM_ERROR("unknown crtc reloc\n"); 1501 return -EINVAL; 1502 } 1503 ib[h_idx] = header; 1504 ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1; 1505 } 1506 1507 return 0; 1508} 1509 1510/** 1511 * r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3 1512 * @parser: parser structure holding parsing context. 1513 * @data: pointer to relocation data 1514 * @offset_start: starting offset 1515 * @offset_mask: offset mask (to align start offset on) 1516 * @reloc: reloc informations 1517 * 1518 * Check next packet is relocation packet3, do bo validation and compute 1519 * GPU offset using the provided start. 1520 **/ 1521int r100_cs_packet_next_reloc(struct radeon_cs_parser *p, 1522 struct radeon_cs_reloc **cs_reloc) 1523{ 1524 struct radeon_cs_chunk *relocs_chunk; 1525 struct radeon_cs_packet p3reloc; 1526 unsigned idx; 1527 int r; 1528 1529 if (p->chunk_relocs_idx == -1) { 1530 DRM_ERROR("No relocation chunk !\n"); 1531 return -EINVAL; 1532 } 1533 *cs_reloc = NULL; 1534 relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 1535 r = r100_cs_packet_parse(p, &p3reloc, p->idx); 1536 if (r) { 1537 return r; 1538 } 1539 p->idx += p3reloc.count + 2; 1540 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) { 1541 DRM_ERROR("No packet3 for relocation for packet at %d.\n", 1542 p3reloc.idx); 1543 r100_cs_dump_packet(p, &p3reloc); 1544 return -EINVAL; 1545 } 1546 idx = radeon_get_ib_value(p, p3reloc.idx + 1); 1547 if (idx >= relocs_chunk->length_dw) { 1548 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", 1549 idx, relocs_chunk->length_dw); 1550 r100_cs_dump_packet(p, &p3reloc); 1551 return -EINVAL; 1552 } 1553 /* FIXME: we assume reloc size is 4 dwords */ 1554 *cs_reloc = p->relocs_ptr[(idx / 4)]; 1555 return 0; 1556} 1557 1558static int r100_get_vtx_size(uint32_t vtx_fmt) 1559{ 1560 int vtx_size; 1561 vtx_size = 2; 1562 /* ordered according to bits in spec */ 1563 if (vtx_fmt & RADEON_SE_VTX_FMT_W0) 1564 vtx_size++; 1565 if (vtx_fmt & RADEON_SE_VTX_FMT_FPCOLOR) 1566 vtx_size += 3; 1567 if (vtx_fmt & RADEON_SE_VTX_FMT_FPALPHA) 1568 vtx_size++; 1569 if (vtx_fmt & RADEON_SE_VTX_FMT_PKCOLOR) 1570 vtx_size++; 1571 if (vtx_fmt & RADEON_SE_VTX_FMT_FPSPEC) 1572 vtx_size += 3; 1573 if (vtx_fmt & RADEON_SE_VTX_FMT_FPFOG) 1574 vtx_size++; 1575 if (vtx_fmt & RADEON_SE_VTX_FMT_PKSPEC) 1576 vtx_size++; 1577 if (vtx_fmt & RADEON_SE_VTX_FMT_ST0) 1578 vtx_size += 2; 1579 if (vtx_fmt & RADEON_SE_VTX_FMT_ST1) 1580 vtx_size += 2; 1581 if (vtx_fmt & RADEON_SE_VTX_FMT_Q1) 1582 vtx_size++; 1583 if (vtx_fmt & RADEON_SE_VTX_FMT_ST2) 1584 vtx_size += 2; 1585 if (vtx_fmt & RADEON_SE_VTX_FMT_Q2) 1586 vtx_size++; 1587 if (vtx_fmt & RADEON_SE_VTX_FMT_ST3) 1588 vtx_size += 2; 1589 if (vtx_fmt & RADEON_SE_VTX_FMT_Q3) 1590 vtx_size++; 1591 if (vtx_fmt & RADEON_SE_VTX_FMT_Q0) 1592 vtx_size++; 1593 /* blend weight */ 1594 if (vtx_fmt & (0x7 << 15)) 1595 vtx_size += (vtx_fmt >> 15) & 0x7; 1596 if (vtx_fmt & RADEON_SE_VTX_FMT_N0) 1597 vtx_size += 3; 1598 if (vtx_fmt & RADEON_SE_VTX_FMT_XY1) 1599 vtx_size += 2; 1600 if (vtx_fmt & RADEON_SE_VTX_FMT_Z1) 1601 vtx_size++; 1602 if (vtx_fmt & RADEON_SE_VTX_FMT_W1) 1603 vtx_size++; 1604 if (vtx_fmt & RADEON_SE_VTX_FMT_N1) 1605 vtx_size++; 1606 if (vtx_fmt & RADEON_SE_VTX_FMT_Z) 1607 vtx_size++; 1608 return vtx_size; 1609} 1610 1611static int r100_packet0_check(struct radeon_cs_parser *p, 1612 struct radeon_cs_packet *pkt, 1613 unsigned idx, unsigned reg) 1614{ 1615 struct radeon_cs_reloc *reloc; 1616 struct r100_cs_track *track; 1617 volatile uint32_t *ib; 1618 uint32_t tmp; 1619 int r; 1620 int i, face; 1621 u32 tile_flags = 0; 1622 u32 idx_value; 1623 1624 ib = p->ib.ptr; 1625 track = (struct r100_cs_track *)p->track; 1626 1627 idx_value = radeon_get_ib_value(p, idx); 1628 1629 switch (reg) { 1630 case RADEON_CRTC_GUI_TRIG_VLINE: 1631 r = r100_cs_packet_parse_vline(p); 1632 if (r) { 1633 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1634 idx, reg); 1635 r100_cs_dump_packet(p, pkt); 1636 return r; 1637 } 1638 break; 1639 /* FIXME: only allow PACKET3 blit? easier to check for out of 1640 * range access */ 1641 case RADEON_DST_PITCH_OFFSET: 1642 case RADEON_SRC_PITCH_OFFSET: 1643 r = r100_reloc_pitch_offset(p, pkt, idx, reg); 1644 if (r) 1645 return r; 1646 break; 1647 case RADEON_RB3D_DEPTHOFFSET: 1648 r = r100_cs_packet_next_reloc(p, &reloc); 1649 if (r) { 1650 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1651 idx, reg); 1652 r100_cs_dump_packet(p, pkt); 1653 return r; 1654 } 1655 track->zb.robj = reloc->robj; 1656 track->zb.offset = idx_value; 1657 track->zb_dirty = true; 1658 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1659 break; 1660 case RADEON_RB3D_COLOROFFSET: 1661 r = r100_cs_packet_next_reloc(p, &reloc); 1662 if (r) { 1663 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1664 idx, reg); 1665 r100_cs_dump_packet(p, pkt); 1666 return r; 1667 } 1668 track->cb[0].robj = reloc->robj; 1669 track->cb[0].offset = idx_value; 1670 track->cb_dirty = true; 1671 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1672 break; 1673 case RADEON_PP_TXOFFSET_0: 1674 case RADEON_PP_TXOFFSET_1: 1675 case RADEON_PP_TXOFFSET_2: 1676 i = (reg - RADEON_PP_TXOFFSET_0) / 24; 1677 r = r100_cs_packet_next_reloc(p, &reloc); 1678 if (r) { 1679 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1680 idx, reg); 1681 r100_cs_dump_packet(p, pkt); 1682 return r; 1683 } 1684 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 1685 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 1686 tile_flags |= RADEON_TXO_MACRO_TILE; 1687 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 1688 tile_flags |= RADEON_TXO_MICRO_TILE_X2; 1689 1690 tmp = idx_value & ~(0x7 << 2); 1691 tmp |= tile_flags; 1692 ib[idx] = tmp + ((u32)reloc->lobj.gpu_offset); 1693 } else 1694 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1695 track->textures[i].robj = reloc->robj; 1696 track->tex_dirty = true; 1697 break; 1698 case RADEON_PP_CUBIC_OFFSET_T0_0: 1699 case RADEON_PP_CUBIC_OFFSET_T0_1: 1700 case RADEON_PP_CUBIC_OFFSET_T0_2: 1701 case RADEON_PP_CUBIC_OFFSET_T0_3: 1702 case RADEON_PP_CUBIC_OFFSET_T0_4: 1703 i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4; 1704 r = r100_cs_packet_next_reloc(p, &reloc); 1705 if (r) { 1706 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1707 idx, reg); 1708 r100_cs_dump_packet(p, pkt); 1709 return r; 1710 } 1711 track->textures[0].cube_info[i].offset = idx_value; 1712 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1713 track->textures[0].cube_info[i].robj = reloc->robj; 1714 track->tex_dirty = true; 1715 break; 1716 case RADEON_PP_CUBIC_OFFSET_T1_0: 1717 case RADEON_PP_CUBIC_OFFSET_T1_1: 1718 case RADEON_PP_CUBIC_OFFSET_T1_2: 1719 case RADEON_PP_CUBIC_OFFSET_T1_3: 1720 case RADEON_PP_CUBIC_OFFSET_T1_4: 1721 i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4; 1722 r = r100_cs_packet_next_reloc(p, &reloc); 1723 if (r) { 1724 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1725 idx, reg); 1726 r100_cs_dump_packet(p, pkt); 1727 return r; 1728 } 1729 track->textures[1].cube_info[i].offset = idx_value; 1730 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1731 track->textures[1].cube_info[i].robj = reloc->robj; 1732 track->tex_dirty = true; 1733 break; 1734 case RADEON_PP_CUBIC_OFFSET_T2_0: 1735 case RADEON_PP_CUBIC_OFFSET_T2_1: 1736 case RADEON_PP_CUBIC_OFFSET_T2_2: 1737 case RADEON_PP_CUBIC_OFFSET_T2_3: 1738 case RADEON_PP_CUBIC_OFFSET_T2_4: 1739 i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4; 1740 r = r100_cs_packet_next_reloc(p, &reloc); 1741 if (r) { 1742 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1743 idx, reg); 1744 r100_cs_dump_packet(p, pkt); 1745 return r; 1746 } 1747 track->textures[2].cube_info[i].offset = idx_value; 1748 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1749 track->textures[2].cube_info[i].robj = reloc->robj; 1750 track->tex_dirty = true; 1751 break; 1752 case RADEON_RE_WIDTH_HEIGHT: 1753 track->maxy = ((idx_value >> 16) & 0x7FF); 1754 track->cb_dirty = true; 1755 track->zb_dirty = true; 1756 break; 1757 case RADEON_RB3D_COLORPITCH: 1758 r = r100_cs_packet_next_reloc(p, &reloc); 1759 if (r) { 1760 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1761 idx, reg); 1762 r100_cs_dump_packet(p, pkt); 1763 return r; 1764 } 1765 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 1766 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 1767 tile_flags |= RADEON_COLOR_TILE_ENABLE; 1768 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 1769 tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; 1770 1771 tmp = idx_value & ~(0x7 << 16); 1772 tmp |= tile_flags; 1773 ib[idx] = tmp; 1774 } else 1775 ib[idx] = idx_value; 1776 1777 track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK; 1778 track->cb_dirty = true; 1779 break; 1780 case RADEON_RB3D_DEPTHPITCH: 1781 track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK; 1782 track->zb_dirty = true; 1783 break; 1784 case RADEON_RB3D_CNTL: 1785 switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { 1786 case 7: 1787 case 8: 1788 case 9: 1789 case 11: 1790 case 12: 1791 track->cb[0].cpp = 1; 1792 break; 1793 case 3: 1794 case 4: 1795 case 15: 1796 track->cb[0].cpp = 2; 1797 break; 1798 case 6: 1799 track->cb[0].cpp = 4; 1800 break; 1801 default: 1802 DRM_ERROR("Invalid color buffer format (%d) !\n", 1803 ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f)); 1804 return -EINVAL; 1805 } 1806 track->z_enabled = !!(idx_value & RADEON_Z_ENABLE); 1807 track->cb_dirty = true; 1808 track->zb_dirty = true; 1809 break; 1810 case RADEON_RB3D_ZSTENCILCNTL: 1811 switch (idx_value & 0xf) { 1812 case 0: 1813 track->zb.cpp = 2; 1814 break; 1815 case 2: 1816 case 3: 1817 case 4: 1818 case 5: 1819 case 9: 1820 case 11: 1821 track->zb.cpp = 4; 1822 break; 1823 default: 1824 break; 1825 } 1826 track->zb_dirty = true; 1827 break; 1828 case RADEON_RB3D_ZPASS_ADDR: 1829 r = r100_cs_packet_next_reloc(p, &reloc); 1830 if (r) { 1831 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1832 idx, reg); 1833 r100_cs_dump_packet(p, pkt); 1834 return r; 1835 } 1836 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1837 break; 1838 case RADEON_PP_CNTL: 1839 { 1840 uint32_t temp = idx_value >> 4; 1841 for (i = 0; i < track->num_texture; i++) 1842 track->textures[i].enabled = !!(temp & (1 << i)); 1843 track->tex_dirty = true; 1844 } 1845 break; 1846 case RADEON_SE_VF_CNTL: 1847 track->vap_vf_cntl = idx_value; 1848 break; 1849 case RADEON_SE_VTX_FMT: 1850 track->vtx_size = r100_get_vtx_size(idx_value); 1851 break; 1852 case RADEON_PP_TEX_SIZE_0: 1853 case RADEON_PP_TEX_SIZE_1: 1854 case RADEON_PP_TEX_SIZE_2: 1855 i = (reg - RADEON_PP_TEX_SIZE_0) / 8; 1856 track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1; 1857 track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; 1858 track->tex_dirty = true; 1859 break; 1860 case RADEON_PP_TEX_PITCH_0: 1861 case RADEON_PP_TEX_PITCH_1: 1862 case RADEON_PP_TEX_PITCH_2: 1863 i = (reg - RADEON_PP_TEX_PITCH_0) / 8; 1864 track->textures[i].pitch = idx_value + 32; 1865 track->tex_dirty = true; 1866 break; 1867 case RADEON_PP_TXFILTER_0: 1868 case RADEON_PP_TXFILTER_1: 1869 case RADEON_PP_TXFILTER_2: 1870 i = (reg - RADEON_PP_TXFILTER_0) / 24; 1871 track->textures[i].num_levels = ((idx_value & RADEON_MAX_MIP_LEVEL_MASK) 1872 >> RADEON_MAX_MIP_LEVEL_SHIFT); 1873 tmp = (idx_value >> 23) & 0x7; 1874 if (tmp == 2 || tmp == 6) 1875 track->textures[i].roundup_w = false; 1876 tmp = (idx_value >> 27) & 0x7; 1877 if (tmp == 2 || tmp == 6) 1878 track->textures[i].roundup_h = false; 1879 track->tex_dirty = true; 1880 break; 1881 case RADEON_PP_TXFORMAT_0: 1882 case RADEON_PP_TXFORMAT_1: 1883 case RADEON_PP_TXFORMAT_2: 1884 i = (reg - RADEON_PP_TXFORMAT_0) / 24; 1885 if (idx_value & RADEON_TXFORMAT_NON_POWER2) { 1886 track->textures[i].use_pitch = 1; 1887 } else { 1888 track->textures[i].use_pitch = 0; 1889 track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK); 1890 track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK); 1891 } 1892 if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE) 1893 track->textures[i].tex_coord_type = 2; 1894 switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) { 1895 case RADEON_TXFORMAT_I8: 1896 case RADEON_TXFORMAT_RGB332: 1897 case RADEON_TXFORMAT_Y8: 1898 track->textures[i].cpp = 1; 1899 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 1900 break; 1901 case RADEON_TXFORMAT_AI88: 1902 case RADEON_TXFORMAT_ARGB1555: 1903 case RADEON_TXFORMAT_RGB565: 1904 case RADEON_TXFORMAT_ARGB4444: 1905 case RADEON_TXFORMAT_VYUY422: 1906 case RADEON_TXFORMAT_YVYU422: 1907 case RADEON_TXFORMAT_SHADOW16: 1908 case RADEON_TXFORMAT_LDUDV655: 1909 case RADEON_TXFORMAT_DUDV88: 1910 track->textures[i].cpp = 2; 1911 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 1912 break; 1913 case RADEON_TXFORMAT_ARGB8888: 1914 case RADEON_TXFORMAT_RGBA8888: 1915 case RADEON_TXFORMAT_SHADOW32: 1916 case RADEON_TXFORMAT_LDUDUV8888: 1917 track->textures[i].cpp = 4; 1918 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 1919 break; 1920 case RADEON_TXFORMAT_DXT1: 1921 track->textures[i].cpp = 1; 1922 track->textures[i].compress_format = R100_TRACK_COMP_DXT1; 1923 break; 1924 case RADEON_TXFORMAT_DXT23: 1925 case RADEON_TXFORMAT_DXT45: 1926 track->textures[i].cpp = 1; 1927 track->textures[i].compress_format = R100_TRACK_COMP_DXT35; 1928 break; 1929 } 1930 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); 1931 track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf); 1932 track->tex_dirty = true; 1933 break; 1934 case RADEON_PP_CUBIC_FACES_0: 1935 case RADEON_PP_CUBIC_FACES_1: 1936 case RADEON_PP_CUBIC_FACES_2: 1937 tmp = idx_value; 1938 i = (reg - RADEON_PP_CUBIC_FACES_0) / 4; 1939 for (face = 0; face < 4; face++) { 1940 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); 1941 track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf); 1942 } 1943 track->tex_dirty = true; 1944 break; 1945 default: 1946 DRM_ERROR("Forbidden register 0x%04X in cs at %d\n", 1947 reg, idx); 1948 return -EINVAL; 1949 } 1950 return 0; 1951} 1952 1953int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, 1954 struct radeon_cs_packet *pkt, 1955 struct radeon_bo *robj) 1956{ 1957 unsigned idx; 1958 u32 value; 1959 idx = pkt->idx + 1; 1960 value = radeon_get_ib_value(p, idx + 2); 1961 if ((value + 1) > radeon_bo_size(robj)) { 1962 DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER " 1963 "(need %u have %lu) !\n", 1964 value + 1, 1965 radeon_bo_size(robj)); 1966 return -EINVAL; 1967 } 1968 return 0; 1969} 1970 1971static int r100_packet3_check(struct radeon_cs_parser *p, 1972 struct radeon_cs_packet *pkt) 1973{ 1974 struct radeon_cs_reloc *reloc; 1975 struct r100_cs_track *track; 1976 unsigned idx; 1977 volatile uint32_t *ib; 1978 int r; 1979 1980 ib = p->ib.ptr; 1981 idx = pkt->idx + 1; 1982 track = (struct r100_cs_track *)p->track; 1983 switch (pkt->opcode) { 1984 case PACKET3_3D_LOAD_VBPNTR: 1985 r = r100_packet3_load_vbpntr(p, pkt, idx); 1986 if (r) 1987 return r; 1988 break; 1989 case PACKET3_INDX_BUFFER: 1990 r = r100_cs_packet_next_reloc(p, &reloc); 1991 if (r) { 1992 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode); 1993 r100_cs_dump_packet(p, pkt); 1994 return r; 1995 } 1996 ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->lobj.gpu_offset); 1997 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); 1998 if (r) { 1999 return r; 2000 } 2001 break; 2002 case 0x23: 2003 /* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */ 2004 r = r100_cs_packet_next_reloc(p, &reloc); 2005 if (r) { 2006 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode); 2007 r100_cs_dump_packet(p, pkt); 2008 return r; 2009 } 2010 ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->lobj.gpu_offset); 2011 track->num_arrays = 1; 2012 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2)); 2013 2014 track->arrays[0].robj = reloc->robj; 2015 track->arrays[0].esize = track->vtx_size; 2016 2017 track->max_indx = radeon_get_ib_value(p, idx+1); 2018 2019 track->vap_vf_cntl = radeon_get_ib_value(p, idx+3); 2020 track->immd_dwords = pkt->count - 1; 2021 r = r100_cs_track_check(p->rdev, track); 2022 if (r) 2023 return r; 2024 break; 2025 case PACKET3_3D_DRAW_IMMD: 2026 if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) { 2027 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 2028 return -EINVAL; 2029 } 2030 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0)); 2031 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 2032 track->immd_dwords = pkt->count - 1; 2033 r = r100_cs_track_check(p->rdev, track); 2034 if (r) 2035 return r; 2036 break; 2037 /* triggers drawing using in-packet vertex data */ 2038 case PACKET3_3D_DRAW_IMMD_2: 2039 if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) { 2040 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 2041 return -EINVAL; 2042 } 2043 track->vap_vf_cntl = radeon_get_ib_value(p, idx); 2044 track->immd_dwords = pkt->count; 2045 r = r100_cs_track_check(p->rdev, track); 2046 if (r) 2047 return r; 2048 break; 2049 /* triggers drawing using in-packet vertex data */ 2050 case PACKET3_3D_DRAW_VBUF_2: 2051 track->vap_vf_cntl = radeon_get_ib_value(p, idx); 2052 r = r100_cs_track_check(p->rdev, track); 2053 if (r) 2054 return r; 2055 break; 2056 /* triggers drawing of vertex buffers setup elsewhere */ 2057 case PACKET3_3D_DRAW_INDX_2: 2058 track->vap_vf_cntl = radeon_get_ib_value(p, idx); 2059 r = r100_cs_track_check(p->rdev, track); 2060 if (r) 2061 return r; 2062 break; 2063 /* triggers drawing using indices to vertex buffer */ 2064 case PACKET3_3D_DRAW_VBUF: 2065 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 2066 r = r100_cs_track_check(p->rdev, track); 2067 if (r) 2068 return r; 2069 break; 2070 /* triggers drawing of vertex buffers setup elsewhere */ 2071 case PACKET3_3D_DRAW_INDX: 2072 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 2073 r = r100_cs_track_check(p->rdev, track); 2074 if (r) 2075 return r; 2076 break; 2077 /* triggers drawing using indices to vertex buffer */ 2078 case PACKET3_3D_CLEAR_HIZ: 2079 case PACKET3_3D_CLEAR_ZMASK: 2080 if (p->rdev->hyperz_filp != p->filp) 2081 return -EINVAL; 2082 break; 2083 case PACKET3_NOP: 2084 break; 2085 default: 2086 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode); 2087 return -EINVAL; 2088 } 2089 return 0; 2090} 2091 2092int r100_cs_parse(struct radeon_cs_parser *p) 2093{ 2094 struct radeon_cs_packet pkt; 2095 struct r100_cs_track *track; 2096 int r; 2097 2098 track = malloc(sizeof(*track), DRM_MEM_DRIVER, M_ZERO | M_WAITOK); 2099 if (!track) 2100 return -ENOMEM; 2101 r100_cs_track_clear(p->rdev, track); 2102 p->track = track; 2103 do { 2104 r = r100_cs_packet_parse(p, &pkt, p->idx); 2105 if (r) { 2106 free(p->track, DRM_MEM_DRIVER); 2107 p->track = NULL; 2108 return r; 2109 } 2110 p->idx += pkt.count + 2; 2111 switch (pkt.type) { 2112 case PACKET_TYPE0: 2113 if (p->rdev->family >= CHIP_R200) 2114 r = r100_cs_parse_packet0(p, &pkt, 2115 p->rdev->config.r100.reg_safe_bm, 2116 p->rdev->config.r100.reg_safe_bm_size, 2117 &r200_packet0_check); 2118 else 2119 r = r100_cs_parse_packet0(p, &pkt, 2120 p->rdev->config.r100.reg_safe_bm, 2121 p->rdev->config.r100.reg_safe_bm_size, 2122 &r100_packet0_check); 2123 break; 2124 case PACKET_TYPE2: 2125 break; 2126 case PACKET_TYPE3: 2127 r = r100_packet3_check(p, &pkt); 2128 break; 2129 default: 2130 DRM_ERROR("Unknown packet type %d !\n", 2131 pkt.type); 2132 free(p->track, DRM_MEM_DRIVER); 2133 p->track = NULL; 2134 return -EINVAL; 2135 } 2136 if (r) { 2137 free(p->track, DRM_MEM_DRIVER); 2138 p->track = NULL; 2139 return r; 2140 } 2141 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 2142 free(p->track, DRM_MEM_DRIVER); 2143 p->track = NULL; 2144 return 0; 2145} 2146 2147static void r100_cs_track_texture_print(struct r100_cs_track_texture *t) 2148{ 2149 DRM_ERROR("pitch %d\n", t->pitch); 2150 DRM_ERROR("use_pitch %d\n", t->use_pitch); 2151 DRM_ERROR("width %d\n", t->width); 2152 DRM_ERROR("width_11 %d\n", t->width_11); 2153 DRM_ERROR("height %d\n", t->height); 2154 DRM_ERROR("height_11 %d\n", t->height_11); 2155 DRM_ERROR("num levels %d\n", t->num_levels); 2156 DRM_ERROR("depth %d\n", t->txdepth); 2157 DRM_ERROR("bpp %d\n", t->cpp); 2158 DRM_ERROR("coordinate type %d\n", t->tex_coord_type); 2159 DRM_ERROR("width round to power of 2 %d\n", t->roundup_w); 2160 DRM_ERROR("height round to power of 2 %d\n", t->roundup_h); 2161 DRM_ERROR("compress format %d\n", t->compress_format); 2162} 2163 2164static int r100_track_compress_size(int compress_format, int w, int h) 2165{ 2166 int block_width, block_height, block_bytes; 2167 int wblocks, hblocks; 2168 int min_wblocks; 2169 int sz; 2170 2171 block_width = 4; 2172 block_height = 4; 2173 2174 switch (compress_format) { 2175 case R100_TRACK_COMP_DXT1: 2176 block_bytes = 8; 2177 min_wblocks = 4; 2178 break; 2179 default: 2180 case R100_TRACK_COMP_DXT35: 2181 block_bytes = 16; 2182 min_wblocks = 2; 2183 break; 2184 } 2185 2186 hblocks = (h + block_height - 1) / block_height; 2187 wblocks = (w + block_width - 1) / block_width; 2188 if (wblocks < min_wblocks) 2189 wblocks = min_wblocks; 2190 sz = wblocks * hblocks * block_bytes; 2191 return sz; 2192} 2193 2194static int r100_cs_track_cube(struct radeon_device *rdev, 2195 struct r100_cs_track *track, unsigned idx) 2196{ 2197 unsigned face, w, h; 2198 struct radeon_bo *cube_robj; 2199 unsigned long size; 2200 unsigned compress_format = track->textures[idx].compress_format; 2201 2202 for (face = 0; face < 5; face++) { 2203 cube_robj = track->textures[idx].cube_info[face].robj; 2204 w = track->textures[idx].cube_info[face].width; 2205 h = track->textures[idx].cube_info[face].height; 2206 2207 if (compress_format) { 2208 size = r100_track_compress_size(compress_format, w, h); 2209 } else 2210 size = w * h; 2211 size *= track->textures[idx].cpp; 2212 2213 size += track->textures[idx].cube_info[face].offset; 2214 2215 if (size > radeon_bo_size(cube_robj)) { 2216 DRM_ERROR("Cube texture offset greater than object size %lu %lu\n", 2217 size, radeon_bo_size(cube_robj)); 2218 r100_cs_track_texture_print(&track->textures[idx]); 2219 return -1; 2220 } 2221 } 2222 return 0; 2223} 2224 2225static int r100_cs_track_texture_check(struct radeon_device *rdev, 2226 struct r100_cs_track *track) 2227{ 2228 struct radeon_bo *robj; 2229 unsigned long size; 2230 unsigned u, i, w, h, d; 2231 int ret; 2232 2233 for (u = 0; u < track->num_texture; u++) { 2234 if (!track->textures[u].enabled) 2235 continue; 2236 if (track->textures[u].lookup_disable) 2237 continue; 2238 robj = track->textures[u].robj; 2239 if (robj == NULL) { 2240 DRM_ERROR("No texture bound to unit %u\n", u); 2241 return -EINVAL; 2242 } 2243 size = 0; 2244 for (i = 0; i <= track->textures[u].num_levels; i++) { 2245 if (track->textures[u].use_pitch) { 2246 if (rdev->family < CHIP_R300) 2247 w = (track->textures[u].pitch / track->textures[u].cpp) / (1 << i); 2248 else 2249 w = track->textures[u].pitch / (1 << i); 2250 } else { 2251 w = track->textures[u].width; 2252 if (rdev->family >= CHIP_RV515) 2253 w |= track->textures[u].width_11; 2254 w = w / (1 << i); 2255 if (track->textures[u].roundup_w) 2256 w = roundup_pow_of_two(w); 2257 } 2258 h = track->textures[u].height; 2259 if (rdev->family >= CHIP_RV515) 2260 h |= track->textures[u].height_11; 2261 h = h / (1 << i); 2262 if (track->textures[u].roundup_h) 2263 h = roundup_pow_of_two(h); 2264 if (track->textures[u].tex_coord_type == 1) { 2265 d = (1 << track->textures[u].txdepth) / (1 << i); 2266 if (!d) 2267 d = 1; 2268 } else { 2269 d = 1; 2270 } 2271 if (track->textures[u].compress_format) { 2272 2273 size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d; 2274 /* compressed textures are block based */ 2275 } else 2276 size += w * h * d; 2277 } 2278 size *= track->textures[u].cpp; 2279 2280 switch (track->textures[u].tex_coord_type) { 2281 case 0: 2282 case 1: 2283 break; 2284 case 2: 2285 if (track->separate_cube) { 2286 ret = r100_cs_track_cube(rdev, track, u); 2287 if (ret) 2288 return ret; 2289 } else 2290 size *= 6; 2291 break; 2292 default: 2293 DRM_ERROR("Invalid texture coordinate type %u for unit " 2294 "%u\n", track->textures[u].tex_coord_type, u); 2295 return -EINVAL; 2296 } 2297 if (size > radeon_bo_size(robj)) { 2298 DRM_ERROR("Texture of unit %u needs %lu bytes but is " 2299 "%lu\n", u, size, radeon_bo_size(robj)); 2300 r100_cs_track_texture_print(&track->textures[u]); 2301 return -EINVAL; 2302 } 2303 } 2304 return 0; 2305} 2306 2307int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) 2308{ 2309 unsigned i; 2310 unsigned long size; 2311 unsigned prim_walk; 2312 unsigned nverts; 2313 unsigned num_cb = track->cb_dirty ? track->num_cb : 0; 2314 2315 if (num_cb && !track->zb_cb_clear && !track->color_channel_mask && 2316 !track->blend_read_enable) 2317 num_cb = 0; 2318 2319 for (i = 0; i < num_cb; i++) { 2320 if (track->cb[i].robj == NULL) { 2321 DRM_ERROR("[drm] No buffer for color buffer %d !\n", i); 2322 return -EINVAL; 2323 } 2324 size = track->cb[i].pitch * track->cb[i].cpp * track->maxy; 2325 size += track->cb[i].offset; 2326 if (size > radeon_bo_size(track->cb[i].robj)) { 2327 DRM_ERROR("[drm] Buffer too small for color buffer %d " 2328 "(need %lu have %lu) !\n", i, size, 2329 radeon_bo_size(track->cb[i].robj)); 2330 DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n", 2331 i, track->cb[i].pitch, track->cb[i].cpp, 2332 track->cb[i].offset, track->maxy); 2333 return -EINVAL; 2334 } 2335 } 2336 track->cb_dirty = false; 2337 2338 if (track->zb_dirty && track->z_enabled) { 2339 if (track->zb.robj == NULL) { 2340 DRM_ERROR("[drm] No buffer for z buffer !\n"); 2341 return -EINVAL; 2342 } 2343 size = track->zb.pitch * track->zb.cpp * track->maxy; 2344 size += track->zb.offset; 2345 if (size > radeon_bo_size(track->zb.robj)) { 2346 DRM_ERROR("[drm] Buffer too small for z buffer " 2347 "(need %lu have %lu) !\n", size, 2348 radeon_bo_size(track->zb.robj)); 2349 DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n", 2350 track->zb.pitch, track->zb.cpp, 2351 track->zb.offset, track->maxy); 2352 return -EINVAL; 2353 } 2354 } 2355 track->zb_dirty = false; 2356 2357 if (track->aa_dirty && track->aaresolve) { 2358 if (track->aa.robj == NULL) { 2359 DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i); 2360 return -EINVAL; 2361 } 2362 /* I believe the format comes from colorbuffer0. */ 2363 size = track->aa.pitch * track->cb[0].cpp * track->maxy; 2364 size += track->aa.offset; 2365 if (size > radeon_bo_size(track->aa.robj)) { 2366 DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d " 2367 "(need %lu have %lu) !\n", i, size, 2368 radeon_bo_size(track->aa.robj)); 2369 DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n", 2370 i, track->aa.pitch, track->cb[0].cpp, 2371 track->aa.offset, track->maxy); 2372 return -EINVAL; 2373 } 2374 } 2375 track->aa_dirty = false; 2376 2377 prim_walk = (track->vap_vf_cntl >> 4) & 0x3; 2378 if (track->vap_vf_cntl & (1 << 14)) { 2379 nverts = track->vap_alt_nverts; 2380 } else { 2381 nverts = (track->vap_vf_cntl >> 16) & 0xFFFF; 2382 } 2383 switch (prim_walk) { 2384 case 1: 2385 for (i = 0; i < track->num_arrays; i++) { 2386 size = track->arrays[i].esize * track->max_indx * 4; 2387 if (track->arrays[i].robj == NULL) { 2388 DRM_ERROR("(PW %u) Vertex array %u no buffer " 2389 "bound\n", prim_walk, i); 2390 return -EINVAL; 2391 } 2392 if (size > radeon_bo_size(track->arrays[i].robj)) { 2393 dev_err(rdev->dev, "(PW %u) Vertex array %u " 2394 "need %lu dwords have %lu dwords\n", 2395 prim_walk, i, size >> 2, 2396 radeon_bo_size(track->arrays[i].robj) 2397 >> 2); 2398 DRM_ERROR("Max indices %u\n", track->max_indx); 2399 return -EINVAL; 2400 } 2401 } 2402 break; 2403 case 2: 2404 for (i = 0; i < track->num_arrays; i++) { 2405 size = track->arrays[i].esize * (nverts - 1) * 4; 2406 if (track->arrays[i].robj == NULL) { 2407 DRM_ERROR("(PW %u) Vertex array %u no buffer " 2408 "bound\n", prim_walk, i); 2409 return -EINVAL; 2410 } 2411 if (size > radeon_bo_size(track->arrays[i].robj)) { 2412 dev_err(rdev->dev, "(PW %u) Vertex array %u " 2413 "need %lu dwords have %lu dwords\n", 2414 prim_walk, i, size >> 2, 2415 radeon_bo_size(track->arrays[i].robj) 2416 >> 2); 2417 return -EINVAL; 2418 } 2419 } 2420 break; 2421 case 3: 2422 size = track->vtx_size * nverts; 2423 if (size != track->immd_dwords) { 2424 DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n", 2425 track->immd_dwords, size); 2426 DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n", 2427 nverts, track->vtx_size); 2428 return -EINVAL; 2429 } 2430 break; 2431 default: 2432 DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n", 2433 prim_walk); 2434 return -EINVAL; 2435 } 2436 2437 if (track->tex_dirty) { 2438 track->tex_dirty = false; 2439 return r100_cs_track_texture_check(rdev, track); 2440 } 2441 return 0; 2442} 2443 2444void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track) 2445{ 2446 unsigned i, face; 2447 2448 track->cb_dirty = true; 2449 track->zb_dirty = true; 2450 track->tex_dirty = true; 2451 track->aa_dirty = true; 2452 2453 if (rdev->family < CHIP_R300) { 2454 track->num_cb = 1; 2455 if (rdev->family <= CHIP_RS200) 2456 track->num_texture = 3; 2457 else 2458 track->num_texture = 6; 2459 track->maxy = 2048; 2460 track->separate_cube = 1; 2461 } else { 2462 track->num_cb = 4; 2463 track->num_texture = 16; 2464 track->maxy = 4096; 2465 track->separate_cube = 0; 2466 track->aaresolve = false; 2467 track->aa.robj = NULL; 2468 } 2469 2470 for (i = 0; i < track->num_cb; i++) { 2471 track->cb[i].robj = NULL; 2472 track->cb[i].pitch = 8192; 2473 track->cb[i].cpp = 16; 2474 track->cb[i].offset = 0; 2475 } 2476 track->z_enabled = true; 2477 track->zb.robj = NULL; 2478 track->zb.pitch = 8192; 2479 track->zb.cpp = 4; 2480 track->zb.offset = 0; 2481 track->vtx_size = 0x7F; 2482 track->immd_dwords = 0xFFFFFFFFUL; 2483 track->num_arrays = 11; 2484 track->max_indx = 0x00FFFFFFUL; 2485 for (i = 0; i < track->num_arrays; i++) { 2486 track->arrays[i].robj = NULL; 2487 track->arrays[i].esize = 0x7F; 2488 } 2489 for (i = 0; i < track->num_texture; i++) { 2490 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 2491 track->textures[i].pitch = 16536; 2492 track->textures[i].width = 16536; 2493 track->textures[i].height = 16536; 2494 track->textures[i].width_11 = 1 << 11; 2495 track->textures[i].height_11 = 1 << 11; 2496 track->textures[i].num_levels = 12; 2497 if (rdev->family <= CHIP_RS200) { 2498 track->textures[i].tex_coord_type = 0; 2499 track->textures[i].txdepth = 0; 2500 } else { 2501 track->textures[i].txdepth = 16; 2502 track->textures[i].tex_coord_type = 1; 2503 } 2504 track->textures[i].cpp = 64; 2505 track->textures[i].robj = NULL; 2506 /* CS IB emission code makes sure texture unit are disabled */ 2507 track->textures[i].enabled = false; 2508 track->textures[i].lookup_disable = false; 2509 track->textures[i].roundup_w = true; 2510 track->textures[i].roundup_h = true; 2511 if (track->separate_cube) 2512 for (face = 0; face < 5; face++) { 2513 track->textures[i].cube_info[face].robj = NULL; 2514 track->textures[i].cube_info[face].width = 16536; 2515 track->textures[i].cube_info[face].height = 16536; 2516 track->textures[i].cube_info[face].offset = 0; 2517 } 2518 } 2519} 2520 2521/* 2522 * Global GPU functions 2523 */ 2524static void r100_errata(struct radeon_device *rdev) 2525{ 2526 rdev->pll_errata = 0; 2527 2528 if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) { 2529 rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS; 2530 } 2531 2532 if (rdev->family == CHIP_RV100 || 2533 rdev->family == CHIP_RS100 || 2534 rdev->family == CHIP_RS200) { 2535 rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY; 2536 } 2537} 2538 2539static int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n) 2540{ 2541 unsigned i; 2542 uint32_t tmp; 2543 2544 for (i = 0; i < rdev->usec_timeout; i++) { 2545 tmp = RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_FIFOCNT_MASK; 2546 if (tmp >= n) { 2547 return 0; 2548 } 2549 DRM_UDELAY(1); 2550 } 2551 return -1; 2552} 2553 2554int r100_gui_wait_for_idle(struct radeon_device *rdev) 2555{ 2556 unsigned i; 2557 uint32_t tmp; 2558 2559 if (r100_rbbm_fifo_wait_for_entry(rdev, 64)) { 2560 DRM_ERROR("radeon: wait for empty RBBM fifo failed !" 2561 " Bad things might happen.\n"); 2562 } 2563 for (i = 0; i < rdev->usec_timeout; i++) { 2564 tmp = RREG32(RADEON_RBBM_STATUS); 2565 if (!(tmp & RADEON_RBBM_ACTIVE)) { 2566 return 0; 2567 } 2568 DRM_UDELAY(1); 2569 } 2570 return -1; 2571} 2572 2573int r100_mc_wait_for_idle(struct radeon_device *rdev) 2574{ 2575 unsigned i; 2576 uint32_t tmp; 2577 2578 for (i = 0; i < rdev->usec_timeout; i++) { 2579 /* read MC_STATUS */ 2580 tmp = RREG32(RADEON_MC_STATUS); 2581 if (tmp & RADEON_MC_IDLE) { 2582 return 0; 2583 } 2584 DRM_UDELAY(1); 2585 } 2586 return -1; 2587} 2588 2589bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) 2590{ 2591 u32 rbbm_status; 2592 2593 rbbm_status = RREG32(R_000E40_RBBM_STATUS); 2594 if (!G_000E40_GUI_ACTIVE(rbbm_status)) { 2595 radeon_ring_lockup_update(ring); 2596 return false; 2597 } 2598 /* force CP activities */ 2599 radeon_ring_force_activity(rdev, ring); 2600 return radeon_ring_test_lockup(rdev, ring); 2601} 2602 2603/* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */ 2604void r100_enable_bm(struct radeon_device *rdev) 2605{ 2606 uint32_t tmp; 2607 /* Enable bus mastering */ 2608 tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS; 2609 WREG32(RADEON_BUS_CNTL, tmp); 2610} 2611 2612void r100_bm_disable(struct radeon_device *rdev) 2613{ 2614 u32 tmp; 2615 2616 /* disable bus mastering */ 2617 tmp = RREG32(R_000030_BUS_CNTL); 2618 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000044); 2619 DRM_MDELAY(1); 2620 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000042); 2621 DRM_MDELAY(1); 2622 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040); 2623 tmp = RREG32(RADEON_BUS_CNTL); 2624 DRM_MDELAY(1); 2625 pci_disable_busmaster(rdev->dev); 2626 DRM_MDELAY(1); 2627} 2628 2629int r100_asic_reset(struct radeon_device *rdev) 2630{ 2631 struct r100_mc_save save; 2632 u32 status, tmp; 2633 int ret = 0; 2634 2635 status = RREG32(R_000E40_RBBM_STATUS); 2636 if (!G_000E40_GUI_ACTIVE(status)) { 2637 return 0; 2638 } 2639 r100_mc_stop(rdev, &save); 2640 status = RREG32(R_000E40_RBBM_STATUS); 2641 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 2642 /* stop CP */ 2643 WREG32(RADEON_CP_CSQ_CNTL, 0); 2644 tmp = RREG32(RADEON_CP_RB_CNTL); 2645 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); 2646 WREG32(RADEON_CP_RB_RPTR_WR, 0); 2647 WREG32(RADEON_CP_RB_WPTR, 0); 2648 WREG32(RADEON_CP_RB_CNTL, tmp); 2649 /* save PCI state */ 2650 pci_save_state(device_get_parent(rdev->dev)); 2651 /* disable bus mastering */ 2652 r100_bm_disable(rdev); 2653 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_SE(1) | 2654 S_0000F0_SOFT_RESET_RE(1) | 2655 S_0000F0_SOFT_RESET_PP(1) | 2656 S_0000F0_SOFT_RESET_RB(1)); 2657 RREG32(R_0000F0_RBBM_SOFT_RESET); 2658 DRM_MDELAY(500); 2659 WREG32(R_0000F0_RBBM_SOFT_RESET, 0); 2660 DRM_MDELAY(1); 2661 status = RREG32(R_000E40_RBBM_STATUS); 2662 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 2663 /* reset CP */ 2664 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1)); 2665 RREG32(R_0000F0_RBBM_SOFT_RESET); 2666 DRM_MDELAY(500); 2667 WREG32(R_0000F0_RBBM_SOFT_RESET, 0); 2668 DRM_MDELAY(1); 2669 status = RREG32(R_000E40_RBBM_STATUS); 2670 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 2671 /* restore PCI & busmastering */ 2672 pci_restore_state(device_get_parent(rdev->dev)); 2673 r100_enable_bm(rdev); 2674 /* Check if GPU is idle */ 2675 if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) || 2676 G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) { 2677 dev_err(rdev->dev, "failed to reset GPU\n"); 2678 ret = -1; 2679 } else 2680 dev_info(rdev->dev, "GPU reset succeed\n"); 2681 r100_mc_resume(rdev, &save); 2682 return ret; 2683} 2684 2685void r100_set_common_regs(struct radeon_device *rdev) 2686{ 2687 struct drm_device *dev = rdev->ddev; 2688 bool force_dac2 = false; 2689 u32 tmp; 2690 2691 /* set these so they don't interfere with anything */ 2692 WREG32(RADEON_OV0_SCALE_CNTL, 0); 2693 WREG32(RADEON_SUBPIC_CNTL, 0); 2694 WREG32(RADEON_VIPH_CONTROL, 0); 2695 WREG32(RADEON_I2C_CNTL_1, 0); 2696 WREG32(RADEON_DVI_I2C_CNTL_1, 0); 2697 WREG32(RADEON_CAP0_TRIG_CNTL, 0); 2698 WREG32(RADEON_CAP1_TRIG_CNTL, 0); 2699 2700 /* always set up dac2 on rn50 and some rv100 as lots 2701 * of servers seem to wire it up to a VGA port but 2702 * don't report it in the bios connector 2703 * table. 2704 */ 2705 switch (dev->pci_device) { 2706 /* RN50 */ 2707 case 0x515e: 2708 case 0x5969: 2709 force_dac2 = true; 2710 break; 2711 /* RV100*/ 2712 case 0x5159: 2713 case 0x515a: 2714 /* DELL triple head servers */ 2715 if ((dev->pci_subvendor == 0x1028 /* DELL */) && 2716 ((dev->pci_subdevice == 0x016c) || 2717 (dev->pci_subdevice == 0x016d) || 2718 (dev->pci_subdevice == 0x016e) || 2719 (dev->pci_subdevice == 0x016f) || 2720 (dev->pci_subdevice == 0x0170) || 2721 (dev->pci_subdevice == 0x017d) || 2722 (dev->pci_subdevice == 0x017e) || 2723 (dev->pci_subdevice == 0x0183) || 2724 (dev->pci_subdevice == 0x018a) || 2725 (dev->pci_subdevice == 0x019a))) 2726 force_dac2 = true; 2727 break; 2728 } 2729 2730 if (force_dac2) { 2731 u32 disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG); 2732 u32 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); 2733 u32 dac2_cntl = RREG32(RADEON_DAC_CNTL2); 2734 2735 /* For CRT on DAC2, don't turn it on if BIOS didn't 2736 enable it, even it's detected. 2737 */ 2738 2739 /* force it to crtc0 */ 2740 dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL; 2741 dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL; 2742 disp_hw_debug |= RADEON_CRT2_DISP1_SEL; 2743 2744 /* set up the TV DAC */ 2745 tv_dac_cntl &= ~(RADEON_TV_DAC_PEDESTAL | 2746 RADEON_TV_DAC_STD_MASK | 2747 RADEON_TV_DAC_RDACPD | 2748 RADEON_TV_DAC_GDACPD | 2749 RADEON_TV_DAC_BDACPD | 2750 RADEON_TV_DAC_BGADJ_MASK | 2751 RADEON_TV_DAC_DACADJ_MASK); 2752 tv_dac_cntl |= (RADEON_TV_DAC_NBLANK | 2753 RADEON_TV_DAC_NHOLD | 2754 RADEON_TV_DAC_STD_PS2 | 2755 (0x58 << 16)); 2756 2757 WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl); 2758 WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug); 2759 WREG32(RADEON_DAC_CNTL2, dac2_cntl); 2760 } 2761 2762 /* switch PM block to ACPI mode */ 2763 tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL); 2764 tmp &= ~RADEON_PM_MODE_SEL; 2765 WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp); 2766 2767} 2768 2769/* 2770 * VRAM info 2771 */ 2772static void r100_vram_get_type(struct radeon_device *rdev) 2773{ 2774 uint32_t tmp; 2775 2776 rdev->mc.vram_is_ddr = false; 2777 if (rdev->flags & RADEON_IS_IGP) 2778 rdev->mc.vram_is_ddr = true; 2779 else if (RREG32(RADEON_MEM_SDRAM_MODE_REG) & RADEON_MEM_CFG_TYPE_DDR) 2780 rdev->mc.vram_is_ddr = true; 2781 if ((rdev->family == CHIP_RV100) || 2782 (rdev->family == CHIP_RS100) || 2783 (rdev->family == CHIP_RS200)) { 2784 tmp = RREG32(RADEON_MEM_CNTL); 2785 if (tmp & RV100_HALF_MODE) { 2786 rdev->mc.vram_width = 32; 2787 } else { 2788 rdev->mc.vram_width = 64; 2789 } 2790 if (rdev->flags & RADEON_SINGLE_CRTC) { 2791 rdev->mc.vram_width /= 4; 2792 rdev->mc.vram_is_ddr = true; 2793 } 2794 } else if (rdev->family <= CHIP_RV280) { 2795 tmp = RREG32(RADEON_MEM_CNTL); 2796 if (tmp & RADEON_MEM_NUM_CHANNELS_MASK) { 2797 rdev->mc.vram_width = 128; 2798 } else { 2799 rdev->mc.vram_width = 64; 2800 } 2801 } else { 2802 /* newer IGPs */ 2803 rdev->mc.vram_width = 128; 2804 } 2805} 2806 2807static u32 r100_get_accessible_vram(struct radeon_device *rdev) 2808{ 2809 u32 aper_size; 2810 u8 byte; 2811 2812 aper_size = RREG32(RADEON_CONFIG_APER_SIZE); 2813 2814 /* Set HDP_APER_CNTL only on cards that are known not to be broken, 2815 * that is has the 2nd generation multifunction PCI interface 2816 */ 2817 if (rdev->family == CHIP_RV280 || 2818 rdev->family >= CHIP_RV350) { 2819 WREG32_P(RADEON_HOST_PATH_CNTL, RADEON_HDP_APER_CNTL, 2820 ~RADEON_HDP_APER_CNTL); 2821 DRM_INFO("Generation 2 PCI interface, using max accessible memory\n"); 2822 return aper_size * 2; 2823 } 2824 2825 /* Older cards have all sorts of funny issues to deal with. First 2826 * check if it's a multifunction card by reading the PCI config 2827 * header type... Limit those to one aperture size 2828 */ 2829 byte = pci_read_config(rdev->dev, 0xe, 1); 2830 if (byte & 0x80) { 2831 DRM_INFO("Generation 1 PCI interface in multifunction mode\n"); 2832 DRM_INFO("Limiting VRAM to one aperture\n"); 2833 return aper_size; 2834 } 2835 2836 /* Single function older card. We read HDP_APER_CNTL to see how the BIOS 2837 * have set it up. We don't write this as it's broken on some ASICs but 2838 * we expect the BIOS to have done the right thing (might be too optimistic...) 2839 */ 2840 if (RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL) 2841 return aper_size * 2; 2842 return aper_size; 2843} 2844 2845void r100_vram_init_sizes(struct radeon_device *rdev) 2846{ 2847 u64 config_aper_size; 2848 2849 /* work out accessible VRAM */ 2850 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); 2851 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); 2852 rdev->mc.visible_vram_size = r100_get_accessible_vram(rdev); 2853 /* FIXME we don't use the second aperture yet when we could use it */ 2854 if (rdev->mc.visible_vram_size > rdev->mc.aper_size) 2855 rdev->mc.visible_vram_size = rdev->mc.aper_size; 2856 config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE); 2857 if (rdev->flags & RADEON_IS_IGP) { 2858 uint32_t tom; 2859 /* read NB_TOM to get the amount of ram stolen for the GPU */ 2860 tom = RREG32(RADEON_NB_TOM); 2861 rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16); 2862 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); 2863 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 2864 } else { 2865 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); 2866 /* Some production boards of m6 will report 0 2867 * if it's 8 MB 2868 */ 2869 if (rdev->mc.real_vram_size == 0) { 2870 rdev->mc.real_vram_size = 8192 * 1024; 2871 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); 2872 } 2873 /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM - 2874 * Novell bug 204882 + along with lots of ubuntu ones 2875 */ 2876 if (rdev->mc.aper_size > config_aper_size) 2877 config_aper_size = rdev->mc.aper_size; 2878 2879 if (config_aper_size > rdev->mc.real_vram_size) 2880 rdev->mc.mc_vram_size = config_aper_size; 2881 else 2882 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 2883 } 2884} 2885 2886void r100_vga_set_state(struct radeon_device *rdev, bool state) 2887{ 2888 uint32_t temp; 2889 2890 temp = RREG32(RADEON_CONFIG_CNTL); 2891 if (state == false) { 2892 temp &= ~RADEON_CFG_VGA_RAM_EN; 2893 temp |= RADEON_CFG_VGA_IO_DIS; 2894 } else { 2895 temp &= ~RADEON_CFG_VGA_IO_DIS; 2896 } 2897 WREG32(RADEON_CONFIG_CNTL, temp); 2898} 2899 2900static void r100_mc_init(struct radeon_device *rdev) 2901{ 2902 u64 base; 2903 2904 r100_vram_get_type(rdev); 2905 r100_vram_init_sizes(rdev); 2906 base = rdev->mc.aper_base; 2907 if (rdev->flags & RADEON_IS_IGP) 2908 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; 2909 radeon_vram_location(rdev, &rdev->mc, base); 2910 rdev->mc.gtt_base_align = 0; 2911 if (!(rdev->flags & RADEON_IS_AGP)) 2912 radeon_gtt_location(rdev, &rdev->mc); 2913 radeon_update_bandwidth_info(rdev); 2914} 2915 2916 2917/* 2918 * Indirect registers accessor 2919 */ 2920void r100_pll_errata_after_index(struct radeon_device *rdev) 2921{ 2922 if (rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS) { 2923 (void)RREG32(RADEON_CLOCK_CNTL_DATA); 2924 (void)RREG32(RADEON_CRTC_GEN_CNTL); 2925 } 2926} 2927 2928static void r100_pll_errata_after_data(struct radeon_device *rdev) 2929{ 2930 /* This workarounds is necessary on RV100, RS100 and RS200 chips 2931 * or the chip could hang on a subsequent access 2932 */ 2933 if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) { 2934 DRM_MDELAY(5); 2935 } 2936 2937 /* This function is required to workaround a hardware bug in some (all?) 2938 * revisions of the R300. This workaround should be called after every 2939 * CLOCK_CNTL_INDEX register access. If not, register reads afterward 2940 * may not be correct. 2941 */ 2942 if (rdev->pll_errata & CHIP_ERRATA_R300_CG) { 2943 uint32_t save, tmp; 2944 2945 save = RREG32(RADEON_CLOCK_CNTL_INDEX); 2946 tmp = save & ~(0x3f | RADEON_PLL_WR_EN); 2947 WREG32(RADEON_CLOCK_CNTL_INDEX, tmp); 2948 tmp = RREG32(RADEON_CLOCK_CNTL_DATA); 2949 WREG32(RADEON_CLOCK_CNTL_INDEX, save); 2950 } 2951} 2952 2953uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg) 2954{ 2955 uint32_t data; 2956 2957 WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f); 2958 r100_pll_errata_after_index(rdev); 2959 data = RREG32(RADEON_CLOCK_CNTL_DATA); 2960 r100_pll_errata_after_data(rdev); 2961 return data; 2962} 2963 2964void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 2965{ 2966 WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN)); 2967 r100_pll_errata_after_index(rdev); 2968 WREG32(RADEON_CLOCK_CNTL_DATA, v); 2969 r100_pll_errata_after_data(rdev); 2970} 2971 2972static void r100_set_safe_registers(struct radeon_device *rdev) 2973{ 2974 if (ASIC_IS_RN50(rdev)) { 2975 rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm; 2976 rdev->config.r100.reg_safe_bm_size = DRM_ARRAY_SIZE(rn50_reg_safe_bm); 2977 } else if (rdev->family < CHIP_R200) { 2978 rdev->config.r100.reg_safe_bm = r100_reg_safe_bm; 2979 rdev->config.r100.reg_safe_bm_size = DRM_ARRAY_SIZE(r100_reg_safe_bm); 2980 } else { 2981 r200_set_safe_registers(rdev); 2982 } 2983} 2984 2985/* 2986 * Debugfs info 2987 */ 2988#if defined(CONFIG_DEBUG_FS) 2989static int r100_debugfs_rbbm_info(struct seq_file *m, void *data) 2990{ 2991 struct drm_info_node *node = (struct drm_info_node *) m->private; 2992 struct drm_device *dev = node->minor->dev; 2993 struct radeon_device *rdev = dev->dev_private; 2994 uint32_t reg, value; 2995 unsigned i; 2996 2997 seq_printf(m, "RBBM_STATUS 0x%08x\n", RREG32(RADEON_RBBM_STATUS)); 2998 seq_printf(m, "RBBM_CMDFIFO_STAT 0x%08x\n", RREG32(0xE7C)); 2999 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT)); 3000 for (i = 0; i < 64; i++) { 3001 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i | 0x100); 3002 reg = (RREG32(RADEON_RBBM_CMDFIFO_DATA) - 1) >> 2; 3003 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i); 3004 value = RREG32(RADEON_RBBM_CMDFIFO_DATA); 3005 seq_printf(m, "[0x%03X] 0x%04X=0x%08X\n", i, reg, value); 3006 } 3007 return 0; 3008} 3009 3010static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data) 3011{ 3012 struct drm_info_node *node = (struct drm_info_node *) m->private; 3013 struct drm_device *dev = node->minor->dev; 3014 struct radeon_device *rdev = dev->dev_private; 3015 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 3016 uint32_t rdp, wdp; 3017 unsigned count, i, j; 3018 3019 radeon_ring_free_size(rdev, ring); 3020 rdp = RREG32(RADEON_CP_RB_RPTR); 3021 wdp = RREG32(RADEON_CP_RB_WPTR); 3022 count = (rdp + ring->ring_size - wdp) & ring->ptr_mask; 3023 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT)); 3024 seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp); 3025 seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp); 3026 seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); 3027 seq_printf(m, "%u dwords in ring\n", count); 3028 for (j = 0; j <= count; j++) { 3029 i = (rdp + j) & ring->ptr_mask; 3030 seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]); 3031 } 3032 return 0; 3033} 3034 3035 3036static int r100_debugfs_cp_csq_fifo(struct seq_file *m, void *data) 3037{ 3038 struct drm_info_node *node = (struct drm_info_node *) m->private; 3039 struct drm_device *dev = node->minor->dev; 3040 struct radeon_device *rdev = dev->dev_private; 3041 uint32_t csq_stat, csq2_stat, tmp; 3042 unsigned r_rptr, r_wptr, ib1_rptr, ib1_wptr, ib2_rptr, ib2_wptr; 3043 unsigned i; 3044 3045 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT)); 3046 seq_printf(m, "CP_CSQ_MODE 0x%08x\n", RREG32(RADEON_CP_CSQ_MODE)); 3047 csq_stat = RREG32(RADEON_CP_CSQ_STAT); 3048 csq2_stat = RREG32(RADEON_CP_CSQ2_STAT); 3049 r_rptr = (csq_stat >> 0) & 0x3ff; 3050 r_wptr = (csq_stat >> 10) & 0x3ff; 3051 ib1_rptr = (csq_stat >> 20) & 0x3ff; 3052 ib1_wptr = (csq2_stat >> 0) & 0x3ff; 3053 ib2_rptr = (csq2_stat >> 10) & 0x3ff; 3054 ib2_wptr = (csq2_stat >> 20) & 0x3ff; 3055 seq_printf(m, "CP_CSQ_STAT 0x%08x\n", csq_stat); 3056 seq_printf(m, "CP_CSQ2_STAT 0x%08x\n", csq2_stat); 3057 seq_printf(m, "Ring rptr %u\n", r_rptr); 3058 seq_printf(m, "Ring wptr %u\n", r_wptr); 3059 seq_printf(m, "Indirect1 rptr %u\n", ib1_rptr); 3060 seq_printf(m, "Indirect1 wptr %u\n", ib1_wptr); 3061 seq_printf(m, "Indirect2 rptr %u\n", ib2_rptr); 3062 seq_printf(m, "Indirect2 wptr %u\n", ib2_wptr); 3063 /* FIXME: 0, 128, 640 depends on fifo setup see cp_init_kms 3064 * 128 = indirect1_start * 8 & 640 = indirect2_start * 8 */ 3065 seq_printf(m, "Ring fifo:\n"); 3066 for (i = 0; i < 256; i++) { 3067 WREG32(RADEON_CP_CSQ_ADDR, i << 2); 3068 tmp = RREG32(RADEON_CP_CSQ_DATA); 3069 seq_printf(m, "rfifo[%04d]=0x%08X\n", i, tmp); 3070 } 3071 seq_printf(m, "Indirect1 fifo:\n"); 3072 for (i = 256; i <= 512; i++) { 3073 WREG32(RADEON_CP_CSQ_ADDR, i << 2); 3074 tmp = RREG32(RADEON_CP_CSQ_DATA); 3075 seq_printf(m, "ib1fifo[%04d]=0x%08X\n", i, tmp); 3076 } 3077 seq_printf(m, "Indirect2 fifo:\n"); 3078 for (i = 640; i < ib1_wptr; i++) { 3079 WREG32(RADEON_CP_CSQ_ADDR, i << 2); 3080 tmp = RREG32(RADEON_CP_CSQ_DATA); 3081 seq_printf(m, "ib2fifo[%04d]=0x%08X\n", i, tmp); 3082 } 3083 return 0; 3084} 3085 3086static int r100_debugfs_mc_info(struct seq_file *m, void *data) 3087{ 3088 struct drm_info_node *node = (struct drm_info_node *) m->private; 3089 struct drm_device *dev = node->minor->dev; 3090 struct radeon_device *rdev = dev->dev_private; 3091 uint32_t tmp; 3092 3093 tmp = RREG32(RADEON_CONFIG_MEMSIZE); 3094 seq_printf(m, "CONFIG_MEMSIZE 0x%08x\n", tmp); 3095 tmp = RREG32(RADEON_MC_FB_LOCATION); 3096 seq_printf(m, "MC_FB_LOCATION 0x%08x\n", tmp); 3097 tmp = RREG32(RADEON_BUS_CNTL); 3098 seq_printf(m, "BUS_CNTL 0x%08x\n", tmp); 3099 tmp = RREG32(RADEON_MC_AGP_LOCATION); 3100 seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp); 3101 tmp = RREG32(RADEON_AGP_BASE); 3102 seq_printf(m, "AGP_BASE 0x%08x\n", tmp); 3103 tmp = RREG32(RADEON_HOST_PATH_CNTL); 3104 seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp); 3105 tmp = RREG32(0x01D0); 3106 seq_printf(m, "AIC_CTRL 0x%08x\n", tmp); 3107 tmp = RREG32(RADEON_AIC_LO_ADDR); 3108 seq_printf(m, "AIC_LO_ADDR 0x%08x\n", tmp); 3109 tmp = RREG32(RADEON_AIC_HI_ADDR); 3110 seq_printf(m, "AIC_HI_ADDR 0x%08x\n", tmp); 3111 tmp = RREG32(0x01E4); 3112 seq_printf(m, "AIC_TLB_ADDR 0x%08x\n", tmp); 3113 return 0; 3114} 3115 3116static struct drm_info_list r100_debugfs_rbbm_list[] = { 3117 {"r100_rbbm_info", r100_debugfs_rbbm_info, 0, NULL}, 3118}; 3119 3120static struct drm_info_list r100_debugfs_cp_list[] = { 3121 {"r100_cp_ring_info", r100_debugfs_cp_ring_info, 0, NULL}, 3122 {"r100_cp_csq_fifo", r100_debugfs_cp_csq_fifo, 0, NULL}, 3123}; 3124 3125static struct drm_info_list r100_debugfs_mc_info_list[] = { 3126 {"r100_mc_info", r100_debugfs_mc_info, 0, NULL}, 3127}; 3128#endif 3129 3130int r100_debugfs_rbbm_init(struct radeon_device *rdev) 3131{ 3132#if defined(CONFIG_DEBUG_FS) 3133 return radeon_debugfs_add_files(rdev, r100_debugfs_rbbm_list, 1); 3134#else 3135 return 0; 3136#endif 3137} 3138 3139int r100_debugfs_cp_init(struct radeon_device *rdev) 3140{ 3141#if defined(CONFIG_DEBUG_FS) 3142 return radeon_debugfs_add_files(rdev, r100_debugfs_cp_list, 2); 3143#else 3144 return 0; 3145#endif 3146} 3147 3148int r100_debugfs_mc_info_init(struct radeon_device *rdev) 3149{ 3150#if defined(CONFIG_DEBUG_FS) 3151 return radeon_debugfs_add_files(rdev, r100_debugfs_mc_info_list, 1); 3152#else 3153 return 0; 3154#endif 3155} 3156 3157int r100_set_surface_reg(struct radeon_device *rdev, int reg, 3158 uint32_t tiling_flags, uint32_t pitch, 3159 uint32_t offset, uint32_t obj_size) 3160{ 3161 int surf_index = reg * 16; 3162 int flags = 0; 3163 3164 if (rdev->family <= CHIP_RS200) { 3165 if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) 3166 == (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) 3167 flags |= RADEON_SURF_TILE_COLOR_BOTH; 3168 if (tiling_flags & RADEON_TILING_MACRO) 3169 flags |= RADEON_SURF_TILE_COLOR_MACRO; 3170 } else if (rdev->family <= CHIP_RV280) { 3171 if (tiling_flags & (RADEON_TILING_MACRO)) 3172 flags |= R200_SURF_TILE_COLOR_MACRO; 3173 if (tiling_flags & RADEON_TILING_MICRO) 3174 flags |= R200_SURF_TILE_COLOR_MICRO; 3175 } else { 3176 if (tiling_flags & RADEON_TILING_MACRO) 3177 flags |= R300_SURF_TILE_MACRO; 3178 if (tiling_flags & RADEON_TILING_MICRO) 3179 flags |= R300_SURF_TILE_MICRO; 3180 } 3181 3182 if (tiling_flags & RADEON_TILING_SWAP_16BIT) 3183 flags |= RADEON_SURF_AP0_SWP_16BPP | RADEON_SURF_AP1_SWP_16BPP; 3184 if (tiling_flags & RADEON_TILING_SWAP_32BIT) 3185 flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP; 3186 3187 /* when we aren't tiling the pitch seems to needs to be furtherdivided down. - tested on power5 + rn50 server */ 3188 if (tiling_flags & (RADEON_TILING_SWAP_16BIT | RADEON_TILING_SWAP_32BIT)) { 3189 if (!(tiling_flags & (RADEON_TILING_MACRO | RADEON_TILING_MICRO))) 3190 if (ASIC_IS_RN50(rdev)) 3191 pitch /= 16; 3192 } 3193 3194 /* r100/r200 divide by 16 */ 3195 if (rdev->family < CHIP_R300) 3196 flags |= pitch / 16; 3197 else 3198 flags |= pitch / 8; 3199 3200 3201 DRM_DEBUG_KMS("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1); 3202 WREG32(RADEON_SURFACE0_INFO + surf_index, flags); 3203 WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset); 3204 WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1); 3205 return 0; 3206} 3207 3208void r100_clear_surface_reg(struct radeon_device *rdev, int reg) 3209{ 3210 int surf_index = reg * 16; 3211 WREG32(RADEON_SURFACE0_INFO + surf_index, 0); 3212} 3213 3214void r100_bandwidth_update(struct radeon_device *rdev) 3215{ 3216 fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff; 3217 fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff; 3218 fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff; 3219 uint32_t temp, data, mem_trcd, mem_trp, mem_tras; 3220 fixed20_12 memtcas_ff[8] = { 3221 dfixed_init(1), 3222 dfixed_init(2), 3223 dfixed_init(3), 3224 dfixed_init(0), 3225 dfixed_init_half(1), 3226 dfixed_init_half(2), 3227 dfixed_init(0), 3228 }; 3229 fixed20_12 memtcas_rs480_ff[8] = { 3230 dfixed_init(0), 3231 dfixed_init(1), 3232 dfixed_init(2), 3233 dfixed_init(3), 3234 dfixed_init(0), 3235 dfixed_init_half(1), 3236 dfixed_init_half(2), 3237 dfixed_init_half(3), 3238 }; 3239 fixed20_12 memtcas2_ff[8] = { 3240 dfixed_init(0), 3241 dfixed_init(1), 3242 dfixed_init(2), 3243 dfixed_init(3), 3244 dfixed_init(4), 3245 dfixed_init(5), 3246 dfixed_init(6), 3247 dfixed_init(7), 3248 }; 3249 fixed20_12 memtrbs[8] = { 3250 dfixed_init(1), 3251 dfixed_init_half(1), 3252 dfixed_init(2), 3253 dfixed_init_half(2), 3254 dfixed_init(3), 3255 dfixed_init_half(3), 3256 dfixed_init(4), 3257 dfixed_init_half(4) 3258 }; 3259 fixed20_12 memtrbs_r4xx[8] = { 3260 dfixed_init(4), 3261 dfixed_init(5), 3262 dfixed_init(6), 3263 dfixed_init(7), 3264 dfixed_init(8), 3265 dfixed_init(9), 3266 dfixed_init(10), 3267 dfixed_init(11) 3268 }; 3269 fixed20_12 min_mem_eff; 3270 fixed20_12 mc_latency_sclk, mc_latency_mclk, k1; 3271 fixed20_12 cur_latency_mclk, cur_latency_sclk; 3272 fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate, 3273 disp_drain_rate2, read_return_rate; 3274 fixed20_12 time_disp1_drop_priority; 3275 int c; 3276 int cur_size = 16; /* in octawords */ 3277 int critical_point = 0, critical_point2; 3278/* uint32_t read_return_rate, time_disp1_drop_priority; */ 3279 int stop_req, max_stop_req; 3280 struct drm_display_mode *mode1 = NULL; 3281 struct drm_display_mode *mode2 = NULL; 3282 uint32_t pixel_bytes1 = 0; 3283 uint32_t pixel_bytes2 = 0; 3284 3285 radeon_update_display_priority(rdev); 3286 3287 if (rdev->mode_info.crtcs[0]->base.enabled) { 3288 mode1 = &rdev->mode_info.crtcs[0]->base.mode; 3289 pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8; 3290 } 3291 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3292 if (rdev->mode_info.crtcs[1]->base.enabled) { 3293 mode2 = &rdev->mode_info.crtcs[1]->base.mode; 3294 pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8; 3295 } 3296 } 3297 3298 min_mem_eff.full = dfixed_const_8(0); 3299 /* get modes */ 3300 if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) { 3301 uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER); 3302 mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT); 3303 mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT); 3304 /* check crtc enables */ 3305 if (mode2) 3306 mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT); 3307 if (mode1) 3308 mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT); 3309 WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer); 3310 } 3311 3312 /* 3313 * determine is there is enough bw for current mode 3314 */ 3315 sclk_ff = rdev->pm.sclk; 3316 mclk_ff = rdev->pm.mclk; 3317 3318 temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1); 3319 temp_ff.full = dfixed_const(temp); 3320 mem_bw.full = dfixed_mul(mclk_ff, temp_ff); 3321 3322 pix_clk.full = 0; 3323 pix_clk2.full = 0; 3324 peak_disp_bw.full = 0; 3325 if (mode1) { 3326 temp_ff.full = dfixed_const(1000); 3327 pix_clk.full = dfixed_const(mode1->clock); /* convert to fixed point */ 3328 pix_clk.full = dfixed_div(pix_clk, temp_ff); 3329 temp_ff.full = dfixed_const(pixel_bytes1); 3330 peak_disp_bw.full += dfixed_mul(pix_clk, temp_ff); 3331 } 3332 if (mode2) { 3333 temp_ff.full = dfixed_const(1000); 3334 pix_clk2.full = dfixed_const(mode2->clock); /* convert to fixed point */ 3335 pix_clk2.full = dfixed_div(pix_clk2, temp_ff); 3336 temp_ff.full = dfixed_const(pixel_bytes2); 3337 peak_disp_bw.full += dfixed_mul(pix_clk2, temp_ff); 3338 } 3339 3340 mem_bw.full = dfixed_mul(mem_bw, min_mem_eff); 3341 if (peak_disp_bw.full >= mem_bw.full) { 3342 DRM_ERROR("You may not have enough display bandwidth for current mode\n" 3343 "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n"); 3344 } 3345 3346 /* Get values from the EXT_MEM_CNTL register...converting its contents. */ 3347 temp = RREG32(RADEON_MEM_TIMING_CNTL); 3348 if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */ 3349 mem_trcd = ((temp >> 2) & 0x3) + 1; 3350 mem_trp = ((temp & 0x3)) + 1; 3351 mem_tras = ((temp & 0x70) >> 4) + 1; 3352 } else if (rdev->family == CHIP_R300 || 3353 rdev->family == CHIP_R350) { /* r300, r350 */ 3354 mem_trcd = (temp & 0x7) + 1; 3355 mem_trp = ((temp >> 8) & 0x7) + 1; 3356 mem_tras = ((temp >> 11) & 0xf) + 4; 3357 } else if (rdev->family == CHIP_RV350 || 3358 rdev->family <= CHIP_RV380) { 3359 /* rv3x0 */ 3360 mem_trcd = (temp & 0x7) + 3; 3361 mem_trp = ((temp >> 8) & 0x7) + 3; 3362 mem_tras = ((temp >> 11) & 0xf) + 6; 3363 } else if (rdev->family == CHIP_R420 || 3364 rdev->family == CHIP_R423 || 3365 rdev->family == CHIP_RV410) { 3366 /* r4xx */ 3367 mem_trcd = (temp & 0xf) + 3; 3368 if (mem_trcd > 15) 3369 mem_trcd = 15; 3370 mem_trp = ((temp >> 8) & 0xf) + 3; 3371 if (mem_trp > 15) 3372 mem_trp = 15; 3373 mem_tras = ((temp >> 12) & 0x1f) + 6; 3374 if (mem_tras > 31) 3375 mem_tras = 31; 3376 } else { /* RV200, R200 */ 3377 mem_trcd = (temp & 0x7) + 1; 3378 mem_trp = ((temp >> 8) & 0x7) + 1; 3379 mem_tras = ((temp >> 12) & 0xf) + 4; 3380 } 3381 /* convert to FF */ 3382 trcd_ff.full = dfixed_const(mem_trcd); 3383 trp_ff.full = dfixed_const(mem_trp); 3384 tras_ff.full = dfixed_const(mem_tras); 3385 3386 /* Get values from the MEM_SDRAM_MODE_REG register...converting its */ 3387 temp = RREG32(RADEON_MEM_SDRAM_MODE_REG); 3388 data = (temp & (7 << 20)) >> 20; 3389 if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) { 3390 if (rdev->family == CHIP_RS480) /* don't think rs400 */ 3391 tcas_ff = memtcas_rs480_ff[data]; 3392 else 3393 tcas_ff = memtcas_ff[data]; 3394 } else 3395 tcas_ff = memtcas2_ff[data]; 3396 3397 if (rdev->family == CHIP_RS400 || 3398 rdev->family == CHIP_RS480) { 3399 /* extra cas latency stored in bits 23-25 0-4 clocks */ 3400 data = (temp >> 23) & 0x7; 3401 if (data < 5) 3402 tcas_ff.full += dfixed_const(data); 3403 } 3404 3405 if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) { 3406 /* on the R300, Tcas is included in Trbs. 3407 */ 3408 temp = RREG32(RADEON_MEM_CNTL); 3409 data = (R300_MEM_NUM_CHANNELS_MASK & temp); 3410 if (data == 1) { 3411 if (R300_MEM_USE_CD_CH_ONLY & temp) { 3412 temp = RREG32(R300_MC_IND_INDEX); 3413 temp &= ~R300_MC_IND_ADDR_MASK; 3414 temp |= R300_MC_READ_CNTL_CD_mcind; 3415 WREG32(R300_MC_IND_INDEX, temp); 3416 temp = RREG32(R300_MC_IND_DATA); 3417 data = (R300_MEM_RBS_POSITION_C_MASK & temp); 3418 } else { 3419 temp = RREG32(R300_MC_READ_CNTL_AB); 3420 data = (R300_MEM_RBS_POSITION_A_MASK & temp); 3421 } 3422 } else { 3423 temp = RREG32(R300_MC_READ_CNTL_AB); 3424 data = (R300_MEM_RBS_POSITION_A_MASK & temp); 3425 } 3426 if (rdev->family == CHIP_RV410 || 3427 rdev->family == CHIP_R420 || 3428 rdev->family == CHIP_R423) 3429 trbs_ff = memtrbs_r4xx[data]; 3430 else 3431 trbs_ff = memtrbs[data]; 3432 tcas_ff.full += trbs_ff.full; 3433 } 3434 3435 sclk_eff_ff.full = sclk_ff.full; 3436 3437 if (rdev->flags & RADEON_IS_AGP) { 3438 fixed20_12 agpmode_ff; 3439 agpmode_ff.full = dfixed_const(radeon_agpmode); 3440 temp_ff.full = dfixed_const_666(16); 3441 sclk_eff_ff.full -= dfixed_mul(agpmode_ff, temp_ff); 3442 } 3443 /* TODO PCIE lanes may affect this - agpmode == 16?? */ 3444 3445 if (ASIC_IS_R300(rdev)) { 3446 sclk_delay_ff.full = dfixed_const(250); 3447 } else { 3448 if ((rdev->family == CHIP_RV100) || 3449 rdev->flags & RADEON_IS_IGP) { 3450 if (rdev->mc.vram_is_ddr) 3451 sclk_delay_ff.full = dfixed_const(41); 3452 else 3453 sclk_delay_ff.full = dfixed_const(33); 3454 } else { 3455 if (rdev->mc.vram_width == 128) 3456 sclk_delay_ff.full = dfixed_const(57); 3457 else 3458 sclk_delay_ff.full = dfixed_const(41); 3459 } 3460 } 3461 3462 mc_latency_sclk.full = dfixed_div(sclk_delay_ff, sclk_eff_ff); 3463 3464 if (rdev->mc.vram_is_ddr) { 3465 if (rdev->mc.vram_width == 32) { 3466 k1.full = dfixed_const(40); 3467 c = 3; 3468 } else { 3469 k1.full = dfixed_const(20); 3470 c = 1; 3471 } 3472 } else { 3473 k1.full = dfixed_const(40); 3474 c = 3; 3475 } 3476 3477 temp_ff.full = dfixed_const(2); 3478 mc_latency_mclk.full = dfixed_mul(trcd_ff, temp_ff); 3479 temp_ff.full = dfixed_const(c); 3480 mc_latency_mclk.full += dfixed_mul(tcas_ff, temp_ff); 3481 temp_ff.full = dfixed_const(4); 3482 mc_latency_mclk.full += dfixed_mul(tras_ff, temp_ff); 3483 mc_latency_mclk.full += dfixed_mul(trp_ff, temp_ff); 3484 mc_latency_mclk.full += k1.full; 3485 3486 mc_latency_mclk.full = dfixed_div(mc_latency_mclk, mclk_ff); 3487 mc_latency_mclk.full += dfixed_div(temp_ff, sclk_eff_ff); 3488 3489 /* 3490 HW cursor time assuming worst case of full size colour cursor. 3491 */ 3492 temp_ff.full = dfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1)))); 3493 temp_ff.full += trcd_ff.full; 3494 if (temp_ff.full < tras_ff.full) 3495 temp_ff.full = tras_ff.full; 3496 cur_latency_mclk.full = dfixed_div(temp_ff, mclk_ff); 3497 3498 temp_ff.full = dfixed_const(cur_size); 3499 cur_latency_sclk.full = dfixed_div(temp_ff, sclk_eff_ff); 3500 /* 3501 Find the total latency for the display data. 3502 */ 3503 disp_latency_overhead.full = dfixed_const(8); 3504 disp_latency_overhead.full = dfixed_div(disp_latency_overhead, sclk_ff); 3505 mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full; 3506 mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full; 3507 3508 if (mc_latency_mclk.full > mc_latency_sclk.full) 3509 disp_latency.full = mc_latency_mclk.full; 3510 else 3511 disp_latency.full = mc_latency_sclk.full; 3512 3513 /* setup Max GRPH_STOP_REQ default value */ 3514 if (ASIC_IS_RV100(rdev)) 3515 max_stop_req = 0x5c; 3516 else 3517 max_stop_req = 0x7c; 3518 3519 if (mode1) { 3520 /* CRTC1 3521 Set GRPH_BUFFER_CNTL register using h/w defined optimal values. 3522 GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ] 3523 */ 3524 stop_req = mode1->hdisplay * pixel_bytes1 / 16; 3525 3526 if (stop_req > max_stop_req) 3527 stop_req = max_stop_req; 3528 3529 /* 3530 Find the drain rate of the display buffer. 3531 */ 3532 temp_ff.full = dfixed_const((16/pixel_bytes1)); 3533 disp_drain_rate.full = dfixed_div(pix_clk, temp_ff); 3534 3535 /* 3536 Find the critical point of the display buffer. 3537 */ 3538 crit_point_ff.full = dfixed_mul(disp_drain_rate, disp_latency); 3539 crit_point_ff.full += dfixed_const_half(0); 3540 3541 critical_point = dfixed_trunc(crit_point_ff); 3542 3543 if (rdev->disp_priority == 2) { 3544 critical_point = 0; 3545 } 3546 3547 /* 3548 The critical point should never be above max_stop_req-4. Setting 3549 GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time. 3550 */ 3551 if (max_stop_req - critical_point < 4) 3552 critical_point = 0; 3553 3554 if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) { 3555 /* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/ 3556 critical_point = 0x10; 3557 } 3558 3559 temp = RREG32(RADEON_GRPH_BUFFER_CNTL); 3560 temp &= ~(RADEON_GRPH_STOP_REQ_MASK); 3561 temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); 3562 temp &= ~(RADEON_GRPH_START_REQ_MASK); 3563 if ((rdev->family == CHIP_R350) && 3564 (stop_req > 0x15)) { 3565 stop_req -= 0x10; 3566 } 3567 temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); 3568 temp |= RADEON_GRPH_BUFFER_SIZE; 3569 temp &= ~(RADEON_GRPH_CRITICAL_CNTL | 3570 RADEON_GRPH_CRITICAL_AT_SOF | 3571 RADEON_GRPH_STOP_CNTL); 3572 /* 3573 Write the result into the register. 3574 */ 3575 WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) | 3576 (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT))); 3577 3578#if 0 3579 if ((rdev->family == CHIP_RS400) || 3580 (rdev->family == CHIP_RS480)) { 3581 /* attempt to program RS400 disp regs correctly ??? */ 3582 temp = RREG32(RS400_DISP1_REG_CNTL); 3583 temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK | 3584 RS400_DISP1_STOP_REQ_LEVEL_MASK); 3585 WREG32(RS400_DISP1_REQ_CNTL1, (temp | 3586 (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) | 3587 (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); 3588 temp = RREG32(RS400_DMIF_MEM_CNTL1); 3589 temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK | 3590 RS400_DISP1_CRITICAL_POINT_STOP_MASK); 3591 WREG32(RS400_DMIF_MEM_CNTL1, (temp | 3592 (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) | 3593 (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT))); 3594 } 3595#endif 3596 3597 DRM_DEBUG_KMS("GRPH_BUFFER_CNTL from to %x\n", 3598 /* (unsigned int)info->SavedReg->grph_buffer_cntl, */ 3599 (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL)); 3600 } 3601 3602 if (mode2) { 3603 u32 grph2_cntl; 3604 stop_req = mode2->hdisplay * pixel_bytes2 / 16; 3605 3606 if (stop_req > max_stop_req) 3607 stop_req = max_stop_req; 3608 3609 /* 3610 Find the drain rate of the display buffer. 3611 */ 3612 temp_ff.full = dfixed_const((16/pixel_bytes2)); 3613 disp_drain_rate2.full = dfixed_div(pix_clk2, temp_ff); 3614 3615 grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL); 3616 grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK); 3617 grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); 3618 grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK); 3619 if ((rdev->family == CHIP_R350) && 3620 (stop_req > 0x15)) { 3621 stop_req -= 0x10; 3622 } 3623 grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); 3624 grph2_cntl |= RADEON_GRPH_BUFFER_SIZE; 3625 grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL | 3626 RADEON_GRPH_CRITICAL_AT_SOF | 3627 RADEON_GRPH_STOP_CNTL); 3628 3629 if ((rdev->family == CHIP_RS100) || 3630 (rdev->family == CHIP_RS200)) 3631 critical_point2 = 0; 3632 else { 3633 temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128; 3634 temp_ff.full = dfixed_const(temp); 3635 temp_ff.full = dfixed_mul(mclk_ff, temp_ff); 3636 if (sclk_ff.full < temp_ff.full) 3637 temp_ff.full = sclk_ff.full; 3638 3639 read_return_rate.full = temp_ff.full; 3640 3641 if (mode1) { 3642 temp_ff.full = read_return_rate.full - disp_drain_rate.full; 3643 time_disp1_drop_priority.full = dfixed_div(crit_point_ff, temp_ff); 3644 } else { 3645 time_disp1_drop_priority.full = 0; 3646 } 3647 crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full; 3648 crit_point_ff.full = dfixed_mul(crit_point_ff, disp_drain_rate2); 3649 crit_point_ff.full += dfixed_const_half(0); 3650 3651 critical_point2 = dfixed_trunc(crit_point_ff); 3652 3653 if (rdev->disp_priority == 2) { 3654 critical_point2 = 0; 3655 } 3656 3657 if (max_stop_req - critical_point2 < 4) 3658 critical_point2 = 0; 3659 3660 } 3661 3662 if (critical_point2 == 0 && rdev->family == CHIP_R300) { 3663 /* some R300 cards have problem with this set to 0 */ 3664 critical_point2 = 0x10; 3665 } 3666 3667 WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) | 3668 (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT))); 3669 3670 if ((rdev->family == CHIP_RS400) || 3671 (rdev->family == CHIP_RS480)) { 3672#if 0 3673 /* attempt to program RS400 disp2 regs correctly ??? */ 3674 temp = RREG32(RS400_DISP2_REQ_CNTL1); 3675 temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK | 3676 RS400_DISP2_STOP_REQ_LEVEL_MASK); 3677 WREG32(RS400_DISP2_REQ_CNTL1, (temp | 3678 (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) | 3679 (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); 3680 temp = RREG32(RS400_DISP2_REQ_CNTL2); 3681 temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK | 3682 RS400_DISP2_CRITICAL_POINT_STOP_MASK); 3683 WREG32(RS400_DISP2_REQ_CNTL2, (temp | 3684 (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) | 3685 (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT))); 3686#endif 3687 WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC); 3688 WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000); 3689 WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC); 3690 WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC); 3691 } 3692 3693 DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n", 3694 (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL)); 3695 } 3696} 3697 3698int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) 3699{ 3700 uint32_t scratch; 3701 uint32_t tmp = 0; 3702 unsigned i; 3703 int r; 3704 3705 r = radeon_scratch_get(rdev, &scratch); 3706 if (r) { 3707 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r); 3708 return r; 3709 } 3710 WREG32(scratch, 0xCAFEDEAD); 3711 r = radeon_ring_lock(rdev, ring, 2); 3712 if (r) { 3713 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 3714 radeon_scratch_free(rdev, scratch); 3715 return r; 3716 } 3717 radeon_ring_write(ring, PACKET0(scratch, 0)); 3718 radeon_ring_write(ring, 0xDEADBEEF); 3719 radeon_ring_unlock_commit(rdev, ring); 3720 for (i = 0; i < rdev->usec_timeout; i++) { 3721 tmp = RREG32(scratch); 3722 if (tmp == 0xDEADBEEF) { 3723 break; 3724 } 3725 DRM_UDELAY(1); 3726 } 3727 if (i < rdev->usec_timeout) { 3728 DRM_INFO("ring test succeeded in %d usecs\n", i); 3729 } else { 3730 DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n", 3731 scratch, tmp); 3732 r = -EINVAL; 3733 } 3734 radeon_scratch_free(rdev, scratch); 3735 return r; 3736} 3737 3738void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) 3739{ 3740 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 3741 3742 if (ring->rptr_save_reg) { 3743 u32 next_rptr = ring->wptr + 2 + 3; 3744 radeon_ring_write(ring, PACKET0(ring->rptr_save_reg, 0)); 3745 radeon_ring_write(ring, next_rptr); 3746 } 3747 3748 radeon_ring_write(ring, PACKET0(RADEON_CP_IB_BASE, 1)); 3749 radeon_ring_write(ring, ib->gpu_addr); 3750 radeon_ring_write(ring, ib->length_dw); 3751} 3752 3753int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) 3754{ 3755 struct radeon_ib ib; 3756 uint32_t scratch; 3757 uint32_t tmp = 0; 3758 unsigned i; 3759 int r; 3760 3761 r = radeon_scratch_get(rdev, &scratch); 3762 if (r) { 3763 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r); 3764 return r; 3765 } 3766 WREG32(scratch, 0xCAFEDEAD); 3767 r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &ib, NULL, 256); 3768 if (r) { 3769 DRM_ERROR("radeon: failed to get ib (%d).\n", r); 3770 goto free_scratch; 3771 } 3772 ib.ptr[0] = PACKET0(scratch, 0); 3773 ib.ptr[1] = 0xDEADBEEF; 3774 ib.ptr[2] = PACKET2(0); 3775 ib.ptr[3] = PACKET2(0); 3776 ib.ptr[4] = PACKET2(0); 3777 ib.ptr[5] = PACKET2(0); 3778 ib.ptr[6] = PACKET2(0); 3779 ib.ptr[7] = PACKET2(0); 3780 ib.length_dw = 8; 3781 r = radeon_ib_schedule(rdev, &ib, NULL); 3782 if (r) { 3783 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 3784 goto free_ib; 3785 } 3786 r = radeon_fence_wait(ib.fence, false); 3787 if (r) { 3788 DRM_ERROR("radeon: fence wait failed (%d).\n", r); 3789 goto free_ib; 3790 } 3791 for (i = 0; i < rdev->usec_timeout; i++) { 3792 tmp = RREG32(scratch); 3793 if (tmp == 0xDEADBEEF) { 3794 break; 3795 } 3796 DRM_UDELAY(1); 3797 } 3798 if (i < rdev->usec_timeout) { 3799 DRM_INFO("ib test succeeded in %u usecs\n", i); 3800 } else { 3801 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n", 3802 scratch, tmp); 3803 r = -EINVAL; 3804 } 3805free_ib: 3806 radeon_ib_free(rdev, &ib); 3807free_scratch: 3808 radeon_scratch_free(rdev, scratch); 3809 return r; 3810} 3811 3812void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save) 3813{ 3814 /* Shutdown CP we shouldn't need to do that but better be safe than 3815 * sorry 3816 */ 3817 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; 3818 WREG32(R_000740_CP_CSQ_CNTL, 0); 3819 3820 /* Save few CRTC registers */ 3821 save->GENMO_WT = RREG8(R_0003C2_GENMO_WT); 3822 save->CRTC_EXT_CNTL = RREG32(R_000054_CRTC_EXT_CNTL); 3823 save->CRTC_GEN_CNTL = RREG32(R_000050_CRTC_GEN_CNTL); 3824 save->CUR_OFFSET = RREG32(R_000260_CUR_OFFSET); 3825 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3826 save->CRTC2_GEN_CNTL = RREG32(R_0003F8_CRTC2_GEN_CNTL); 3827 save->CUR2_OFFSET = RREG32(R_000360_CUR2_OFFSET); 3828 } 3829 3830 /* Disable VGA aperture access */ 3831 WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & save->GENMO_WT); 3832 /* Disable cursor, overlay, crtc */ 3833 WREG32(R_000260_CUR_OFFSET, save->CUR_OFFSET | S_000260_CUR_LOCK(1)); 3834 WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL | 3835 S_000054_CRTC_DISPLAY_DIS(1)); 3836 WREG32(R_000050_CRTC_GEN_CNTL, 3837 (C_000050_CRTC_CUR_EN & save->CRTC_GEN_CNTL) | 3838 S_000050_CRTC_DISP_REQ_EN_B(1)); 3839 WREG32(R_000420_OV0_SCALE_CNTL, 3840 C_000420_OV0_OVERLAY_EN & RREG32(R_000420_OV0_SCALE_CNTL)); 3841 WREG32(R_000260_CUR_OFFSET, C_000260_CUR_LOCK & save->CUR_OFFSET); 3842 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3843 WREG32(R_000360_CUR2_OFFSET, save->CUR2_OFFSET | 3844 S_000360_CUR2_LOCK(1)); 3845 WREG32(R_0003F8_CRTC2_GEN_CNTL, 3846 (C_0003F8_CRTC2_CUR_EN & save->CRTC2_GEN_CNTL) | 3847 S_0003F8_CRTC2_DISPLAY_DIS(1) | 3848 S_0003F8_CRTC2_DISP_REQ_EN_B(1)); 3849 WREG32(R_000360_CUR2_OFFSET, 3850 C_000360_CUR2_LOCK & save->CUR2_OFFSET); 3851 } 3852} 3853 3854void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save) 3855{ 3856 /* Update base address for crtc */ 3857 WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_start); 3858 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3859 WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR, rdev->mc.vram_start); 3860 } 3861 /* Restore CRTC registers */ 3862 WREG8(R_0003C2_GENMO_WT, save->GENMO_WT); 3863 WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL); 3864 WREG32(R_000050_CRTC_GEN_CNTL, save->CRTC_GEN_CNTL); 3865 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3866 WREG32(R_0003F8_CRTC2_GEN_CNTL, save->CRTC2_GEN_CNTL); 3867 } 3868} 3869 3870void r100_vga_render_disable(struct radeon_device *rdev) 3871{ 3872 u32 tmp; 3873 3874 tmp = RREG8(R_0003C2_GENMO_WT); 3875 WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & tmp); 3876} 3877 3878static void r100_debugfs(struct radeon_device *rdev) 3879{ 3880 int r; 3881 3882 r = r100_debugfs_mc_info_init(rdev); 3883 if (r) 3884 dev_warn(rdev->dev, "Failed to create r100_mc debugfs file.\n"); 3885} 3886 3887static void r100_mc_program(struct radeon_device *rdev) 3888{ 3889 struct r100_mc_save save; 3890 3891 /* Stops all mc clients */ 3892 r100_mc_stop(rdev, &save); 3893 if (rdev->flags & RADEON_IS_AGP) { 3894 WREG32(R_00014C_MC_AGP_LOCATION, 3895 S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) | 3896 S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16)); 3897 WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base)); 3898 if (rdev->family > CHIP_RV200) 3899 WREG32(R_00015C_AGP_BASE_2, 3900 upper_32_bits(rdev->mc.agp_base) & 0xff); 3901 } else { 3902 WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF); 3903 WREG32(R_000170_AGP_BASE, 0); 3904 if (rdev->family > CHIP_RV200) 3905 WREG32(R_00015C_AGP_BASE_2, 0); 3906 } 3907 /* Wait for mc idle */ 3908 if (r100_mc_wait_for_idle(rdev)) 3909 dev_warn(rdev->dev, "Wait for MC idle timeout.\n"); 3910 /* Program MC, should be a 32bits limited address space */ 3911 WREG32(R_000148_MC_FB_LOCATION, 3912 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | 3913 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); 3914 r100_mc_resume(rdev, &save); 3915} 3916 3917static void r100_clock_startup(struct radeon_device *rdev) 3918{ 3919 u32 tmp; 3920 3921 if (radeon_dynclks != -1 && radeon_dynclks) 3922 radeon_legacy_set_clock_gating(rdev, 1); 3923 /* We need to force on some of the block */ 3924 tmp = RREG32_PLL(R_00000D_SCLK_CNTL); 3925 tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); 3926 if ((rdev->family == CHIP_RV250) || (rdev->family == CHIP_RV280)) 3927 tmp |= S_00000D_FORCE_DISP1(1) | S_00000D_FORCE_DISP2(1); 3928 WREG32_PLL(R_00000D_SCLK_CNTL, tmp); 3929} 3930 3931static int r100_startup(struct radeon_device *rdev) 3932{ 3933 int r; 3934 3935 /* set common regs */ 3936 r100_set_common_regs(rdev); 3937 /* program mc */ 3938 r100_mc_program(rdev); 3939 /* Resume clock */ 3940 r100_clock_startup(rdev); 3941 /* Initialize GART (initialize after TTM so we can allocate 3942 * memory through TTM but finalize after TTM) */ 3943 r100_enable_bm(rdev); 3944 if (rdev->flags & RADEON_IS_PCI) { 3945 r = r100_pci_gart_enable(rdev); 3946 if (r) 3947 return r; 3948 } 3949 3950 /* allocate wb buffer */ 3951 r = radeon_wb_init(rdev); 3952 if (r) 3953 return r; 3954 3955 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); 3956 if (r) { 3957 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); 3958 return r; 3959 } 3960 3961 /* Enable IRQ */ 3962 r100_irq_set(rdev); 3963 rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); 3964 /* 1M ring buffer */ 3965 r = r100_cp_init(rdev, 1024 * 1024); 3966 if (r) { 3967 dev_err(rdev->dev, "failed initializing CP (%d).\n", r); 3968 return r; 3969 } 3970 3971 r = radeon_ib_pool_init(rdev); 3972 if (r) { 3973 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 3974 return r; 3975 } 3976 3977 return 0; 3978} 3979 3980int r100_resume(struct radeon_device *rdev) 3981{ 3982 int r; 3983 3984 /* Make sur GART are not working */ 3985 if (rdev->flags & RADEON_IS_PCI) 3986 r100_pci_gart_disable(rdev); 3987 /* Resume clock before doing reset */ 3988 r100_clock_startup(rdev); 3989 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 3990 if (radeon_asic_reset(rdev)) { 3991 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 3992 RREG32(R_000E40_RBBM_STATUS), 3993 RREG32(R_0007C0_CP_STAT)); 3994 } 3995 /* post */ 3996 radeon_combios_asic_init(rdev->ddev); 3997 /* Resume clock after posting */ 3998 r100_clock_startup(rdev); 3999 /* Initialize surface registers */ 4000 radeon_surface_init(rdev); 4001 4002 rdev->accel_working = true; 4003 r = r100_startup(rdev); 4004 if (r) { 4005 rdev->accel_working = false; 4006 } 4007 return r; 4008} 4009 4010int r100_suspend(struct radeon_device *rdev) 4011{ 4012 r100_cp_disable(rdev); 4013 radeon_wb_disable(rdev); 4014 r100_irq_disable(rdev); 4015 if (rdev->flags & RADEON_IS_PCI) 4016 r100_pci_gart_disable(rdev); 4017 return 0; 4018} 4019 4020void r100_fini(struct radeon_device *rdev) 4021{ 4022 r100_cp_fini(rdev); 4023 radeon_wb_fini(rdev); 4024 radeon_ib_pool_fini(rdev); 4025 radeon_gem_fini(rdev); 4026 if (rdev->flags & RADEON_IS_PCI) 4027 r100_pci_gart_fini(rdev); 4028 radeon_agp_fini(rdev); 4029 radeon_irq_kms_fini(rdev); 4030 radeon_fence_driver_fini(rdev); 4031 radeon_bo_fini(rdev); 4032 radeon_atombios_fini(rdev); 4033 r100_cp_fini_microcode(rdev); 4034 free(rdev->bios, DRM_MEM_DRIVER); 4035 rdev->bios = NULL; 4036} 4037 4038/* 4039 * Due to how kexec works, it can leave the hw fully initialised when it 4040 * boots the new kernel. However doing our init sequence with the CP and 4041 * WB stuff setup causes GPU hangs on the RN50 at least. So at startup 4042 * do some quick sanity checks and restore sane values to avoid this 4043 * problem. 4044 */ 4045void r100_restore_sanity(struct radeon_device *rdev) 4046{ 4047 u32 tmp; 4048 4049 tmp = RREG32(RADEON_CP_CSQ_CNTL); 4050 if (tmp) { 4051 WREG32(RADEON_CP_CSQ_CNTL, 0); 4052 } 4053 tmp = RREG32(RADEON_CP_RB_CNTL); 4054 if (tmp) { 4055 WREG32(RADEON_CP_RB_CNTL, 0); 4056 } 4057 tmp = RREG32(RADEON_SCRATCH_UMSK); 4058 if (tmp) { 4059 WREG32(RADEON_SCRATCH_UMSK, 0); 4060 } 4061} 4062 4063int r100_init(struct radeon_device *rdev) 4064{ 4065 int r; 4066 4067 /* Register debugfs file specific to this group of asics */ 4068 r100_debugfs(rdev); 4069 /* Disable VGA */ 4070 r100_vga_render_disable(rdev); 4071 /* Initialize scratch registers */ 4072 radeon_scratch_init(rdev); 4073 /* Initialize surface registers */ 4074 radeon_surface_init(rdev); 4075 /* sanity check some register to avoid hangs like after kexec */ 4076 r100_restore_sanity(rdev); 4077 /* TODO: disable VGA need to use VGA request */ 4078 /* BIOS*/ 4079 if (!radeon_get_bios(rdev)) { 4080 if (ASIC_IS_AVIVO(rdev)) 4081 return -EINVAL; 4082 } 4083 if (rdev->is_atom_bios) { 4084 dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n"); 4085 return -EINVAL; 4086 } else { 4087 r = radeon_combios_init(rdev); 4088 if (r) 4089 return r; 4090 } 4091 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 4092 if (radeon_asic_reset(rdev)) { 4093 dev_warn(rdev->dev, 4094 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 4095 RREG32(R_000E40_RBBM_STATUS), 4096 RREG32(R_0007C0_CP_STAT)); 4097 } 4098 /* check if cards are posted or not */ 4099 if (radeon_boot_test_post_card(rdev) == false) 4100 return -EINVAL; 4101 /* Set asic errata */ 4102 r100_errata(rdev); 4103 /* Initialize clocks */ 4104 radeon_get_clock_info(rdev->ddev); 4105 /* initialize AGP */ 4106 if (rdev->flags & RADEON_IS_AGP) { 4107 r = radeon_agp_init(rdev); 4108 if (r) { 4109 radeon_agp_disable(rdev); 4110 } 4111 } 4112 /* initialize VRAM */ 4113 r100_mc_init(rdev); 4114 /* Fence driver */ 4115 r = radeon_fence_driver_init(rdev); 4116 if (r) 4117 return r; 4118 r = radeon_irq_kms_init(rdev); 4119 if (r) 4120 return r; 4121 /* Memory manager */ 4122 r = radeon_bo_init(rdev); 4123 if (r) 4124 return r; 4125 if (rdev->flags & RADEON_IS_PCI) { 4126 r = r100_pci_gart_init(rdev); 4127 if (r) 4128 return r; 4129 } 4130 r100_set_safe_registers(rdev); 4131 4132 rdev->accel_working = true; 4133 r = r100_startup(rdev); 4134 if (r) { 4135 /* Somethings want wront with the accel init stop accel */ 4136 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 4137 r100_cp_fini(rdev); 4138 radeon_wb_fini(rdev); 4139 radeon_ib_pool_fini(rdev); 4140 radeon_irq_kms_fini(rdev); 4141 if (rdev->flags & RADEON_IS_PCI) 4142 r100_pci_gart_fini(rdev); 4143 rdev->accel_working = false; 4144 } 4145 return 0; 4146} 4147 4148uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg, 4149 bool always_indirect) 4150{ 4151 if (reg < rdev->rmmio_size && !always_indirect) 4152 return bus_read_4(rdev->rmmio, reg); 4153 else { 4154 unsigned long flags; 4155 uint32_t ret; 4156 4157 DRM_SPINLOCK_IRQSAVE(&rdev->mmio_idx_lock, flags); 4158 bus_write_4(rdev->rmmio, RADEON_MM_INDEX, reg); 4159 ret = bus_read_4(rdev->rmmio, RADEON_MM_DATA); 4160 DRM_SPINUNLOCK_IRQRESTORE(&rdev->mmio_idx_lock, flags); 4161 4162 return ret; 4163 } 4164} 4165 4166void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v, 4167 bool always_indirect) 4168{ 4169 if (reg < rdev->rmmio_size && !always_indirect) 4170 bus_write_4(rdev->rmmio, reg, v); 4171 else { 4172 unsigned long flags; 4173 4174 DRM_SPINLOCK_IRQSAVE(&rdev->mmio_idx_lock, flags); 4175 bus_write_4(rdev->rmmio, RADEON_MM_INDEX, reg); 4176 bus_write_4(rdev->rmmio, RADEON_MM_DATA, v); 4177 DRM_SPINUNLOCK_IRQRESTORE(&rdev->mmio_idx_lock, flags); 4178 } 4179} 4180 4181u32 r100_io_rreg(struct radeon_device *rdev, u32 reg) 4182{ 4183 if (reg < rdev->rio_mem_size) 4184 return bus_read_4(rdev->rio_mem, reg); 4185 else { 4186 /* XXX No locking? -- dumbbell@ */ 4187 bus_write_4(rdev->rio_mem, RADEON_MM_INDEX, reg); 4188 return bus_read_4(rdev->rio_mem, RADEON_MM_DATA); 4189 } 4190} 4191 4192void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v) 4193{ 4194 if (reg < rdev->rio_mem_size) 4195 bus_write_4(rdev->rio_mem, reg, v); 4196 else { 4197 /* XXX No locking? -- dumbbell@ */ 4198 bus_write_4(rdev->rio_mem, RADEON_MM_INDEX, reg); 4199 bus_write_4(rdev->rio_mem, RADEON_MM_DATA, v); 4200 } 4201} 4202