1/* 2 * Copyright 2020 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23#include <linux/firmware.h> 24#include <linux/module.h> 25#include <linux/pci.h> 26#include <linux/reboot.h> 27 28#define SMU_13_0_PARTIAL_PPTABLE 29#define SWSMU_CODE_LAYER_L3 30 31#include "amdgpu.h" 32#include "amdgpu_smu.h" 33#include "atomfirmware.h" 34#include "amdgpu_atomfirmware.h" 35#include "amdgpu_atombios.h" 36#include "smu_v13_0.h" 37#include "soc15_common.h" 38#include "atom.h" 39#include "amdgpu_ras.h" 40#include "smu_cmn.h" 41 42#include "asic_reg/thm/thm_13_0_2_offset.h" 43#include "asic_reg/thm/thm_13_0_2_sh_mask.h" 44#include "asic_reg/mp/mp_13_0_2_offset.h" 45#include "asic_reg/mp/mp_13_0_2_sh_mask.h" 46#include "asic_reg/smuio/smuio_13_0_2_offset.h" 47#include "asic_reg/smuio/smuio_13_0_2_sh_mask.h" 48 49/* 50 * DO NOT use these for err/warn/info/debug messages. 51 * Use dev_err, dev_warn, dev_info and dev_dbg instead. 52 * They are more MGPU friendly. 53 */ 54#undef pr_err 55#undef pr_warn 56#undef pr_info 57#undef pr_debug 58 59MODULE_FIRMWARE("amdgpu/aldebaran_smc.bin"); 60MODULE_FIRMWARE("amdgpu/smu_13_0_0.bin"); 61MODULE_FIRMWARE("amdgpu/smu_13_0_7.bin"); 62MODULE_FIRMWARE("amdgpu/smu_13_0_10.bin"); 63 64#define mmMP1_SMN_C2PMSG_66 0x0282 65#define mmMP1_SMN_C2PMSG_66_BASE_IDX 0 66 67#define mmMP1_SMN_C2PMSG_82 0x0292 68#define mmMP1_SMN_C2PMSG_82_BASE_IDX 0 69 70#define mmMP1_SMN_C2PMSG_90 0x029a 71#define mmMP1_SMN_C2PMSG_90_BASE_IDX 0 72 73#define SMU13_VOLTAGE_SCALE 4 74 75#define LINK_WIDTH_MAX 6 76#define LINK_SPEED_MAX 3 77 78#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288 79#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK 0x00000070L 80#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT 0x4 81#define smnPCIE_LC_SPEED_CNTL 0x11140290 82#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xC000 83#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0xE 84 85#define ENABLE_IMU_ARG_GFXOFF_ENABLE 1 86 87static const int link_width[] = {0, 1, 2, 4, 8, 12, 16}; 88 89const int pmfw_decoded_link_speed[5] = {1, 2, 3, 4, 5}; 90const int pmfw_decoded_link_width[7] = {0, 1, 2, 4, 8, 12, 16}; 91 92int smu_v13_0_init_microcode(struct smu_context *smu) 93{ 94 struct amdgpu_device *adev = smu->adev; 95 char fw_name[30]; 96 char ucode_prefix[15]; 97 int err = 0; 98 const struct smc_firmware_header_v1_0 *hdr; 99 const struct common_firmware_header *header; 100 struct amdgpu_firmware_info *ucode = NULL; 101 102 /* doesn't need to load smu firmware in IOV mode */ 103 if (amdgpu_sriov_vf(adev)) 104 return 0; 105 106 amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix)); 107 108 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix); 109 110 err = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name); 111 if (err) 112 goto out; 113 114 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; 115 amdgpu_ucode_print_smc_hdr(&hdr->header); 116 adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version); 117 118 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 119 ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC]; 120 ucode->ucode_id = AMDGPU_UCODE_ID_SMC; 121 ucode->fw = adev->pm.fw; 122 header = (const struct common_firmware_header *)ucode->fw->data; 123 adev->firmware.fw_size += 124 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 125 } 126 127out: 128 if (err) 129 amdgpu_ucode_release(&adev->pm.fw); 130 return err; 131} 132 133void smu_v13_0_fini_microcode(struct smu_context *smu) 134{ 135 struct amdgpu_device *adev = smu->adev; 136 137 amdgpu_ucode_release(&adev->pm.fw); 138 adev->pm.fw_version = 0; 139} 140 141int smu_v13_0_load_microcode(struct smu_context *smu) 142{ 143#if 0 144 struct amdgpu_device *adev = smu->adev; 145 const uint32_t *src; 146 const struct smc_firmware_header_v1_0 *hdr; 147 uint32_t addr_start = MP1_SRAM; 148 uint32_t i; 149 uint32_t smc_fw_size; 150 uint32_t mp1_fw_flags; 151 152 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; 153 src = (const uint32_t *)(adev->pm.fw->data + 154 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 155 smc_fw_size = hdr->header.ucode_size_bytes; 156 157 for (i = 1; i < smc_fw_size/4 - 1; i++) { 158 WREG32_PCIE(addr_start, src[i]); 159 addr_start += 4; 160 } 161 162 WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff), 163 1 & MP1_SMN_PUB_CTRL__RESET_MASK); 164 WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff), 165 1 & ~MP1_SMN_PUB_CTRL__RESET_MASK); 166 167 for (i = 0; i < adev->usec_timeout; i++) { 168 mp1_fw_flags = RREG32_PCIE(MP1_Public | 169 (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); 170 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> 171 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) 172 break; 173 udelay(1); 174 } 175 176 if (i == adev->usec_timeout) 177 return -ETIME; 178#endif 179 180 return 0; 181} 182 183int smu_v13_0_init_pptable_microcode(struct smu_context *smu) 184{ 185 struct amdgpu_device *adev = smu->adev; 186 struct amdgpu_firmware_info *ucode = NULL; 187 uint32_t size = 0, pptable_id = 0; 188 int ret = 0; 189 void *table; 190 191 /* doesn't need to load smu firmware in IOV mode */ 192 if (amdgpu_sriov_vf(adev)) 193 return 0; 194 195 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 196 return 0; 197 198 if (!adev->scpm_enabled) 199 return 0; 200 201 if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 7)) || 202 (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0)) || 203 (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10))) 204 return 0; 205 206 /* override pptable_id from driver parameter */ 207 if (amdgpu_smu_pptable_id >= 0) { 208 pptable_id = amdgpu_smu_pptable_id; 209 dev_info(adev->dev, "override pptable id %d\n", pptable_id); 210 } else { 211 pptable_id = smu->smu_table.boot_values.pp_table_id; 212 } 213 214 /* "pptable_id == 0" means vbios carries the pptable. */ 215 if (!pptable_id) 216 return 0; 217 218 ret = smu_v13_0_get_pptable_from_firmware(smu, &table, &size, pptable_id); 219 if (ret) 220 return ret; 221 222 smu->pptable_firmware.data = table; 223 smu->pptable_firmware.size = size; 224 225 ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_PPTABLE]; 226 ucode->ucode_id = AMDGPU_UCODE_ID_PPTABLE; 227 ucode->fw = &smu->pptable_firmware; 228 adev->firmware.fw_size += 229 ALIGN(smu->pptable_firmware.size, PAGE_SIZE); 230 231 return 0; 232} 233 234int smu_v13_0_check_fw_status(struct smu_context *smu) 235{ 236 struct amdgpu_device *adev = smu->adev; 237 uint32_t mp1_fw_flags; 238 239 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 240 case IP_VERSION(13, 0, 4): 241 case IP_VERSION(13, 0, 11): 242 mp1_fw_flags = RREG32_PCIE(MP1_Public | 243 (smnMP1_V13_0_4_FIRMWARE_FLAGS & 0xffffffff)); 244 break; 245 default: 246 mp1_fw_flags = RREG32_PCIE(MP1_Public | 247 (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); 248 break; 249 } 250 251 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> 252 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) 253 return 0; 254 255 return -EIO; 256} 257 258int smu_v13_0_check_fw_version(struct smu_context *smu) 259{ 260 struct amdgpu_device *adev = smu->adev; 261 uint32_t if_version = 0xff, smu_version = 0xff; 262 uint8_t smu_program, smu_major, smu_minor, smu_debug; 263 int ret = 0; 264 265 ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version); 266 if (ret) 267 return ret; 268 269 smu_program = (smu_version >> 24) & 0xff; 270 smu_major = (smu_version >> 16) & 0xff; 271 smu_minor = (smu_version >> 8) & 0xff; 272 smu_debug = (smu_version >> 0) & 0xff; 273 if (smu->is_apu || 274 amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 6)) 275 adev->pm.fw_version = smu_version; 276 277 /* only for dGPU w/ SMU13*/ 278 if (adev->pm.fw) 279 dev_dbg(smu->adev->dev, "smu fw reported program %d, version = 0x%08x (%d.%d.%d)\n", 280 smu_program, smu_version, smu_major, smu_minor, smu_debug); 281 282 /* 283 * 1. if_version mismatch is not critical as our fw is designed 284 * to be backward compatible. 285 * 2. New fw usually brings some optimizations. But that's visible 286 * only on the paired driver. 287 * Considering above, we just leave user a verbal message instead 288 * of halt driver loading. 289 */ 290 if (if_version != smu->smc_driver_if_version) { 291 dev_info(adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, " 292 "smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n", 293 smu->smc_driver_if_version, if_version, 294 smu_program, smu_version, smu_major, smu_minor, smu_debug); 295 dev_info(adev->dev, "SMU driver if version not matched\n"); 296 } 297 298 return ret; 299} 300 301static int smu_v13_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size) 302{ 303 struct amdgpu_device *adev = smu->adev; 304 uint32_t ppt_offset_bytes; 305 const struct smc_firmware_header_v2_0 *v2; 306 307 v2 = (const struct smc_firmware_header_v2_0 *) adev->pm.fw->data; 308 309 ppt_offset_bytes = le32_to_cpu(v2->ppt_offset_bytes); 310 *size = le32_to_cpu(v2->ppt_size_bytes); 311 *table = (uint8_t *)v2 + ppt_offset_bytes; 312 313 return 0; 314} 315 316static int smu_v13_0_set_pptable_v2_1(struct smu_context *smu, void **table, 317 uint32_t *size, uint32_t pptable_id) 318{ 319 struct amdgpu_device *adev = smu->adev; 320 const struct smc_firmware_header_v2_1 *v2_1; 321 struct smc_soft_pptable_entry *entries; 322 uint32_t pptable_count = 0; 323 int i = 0; 324 325 v2_1 = (const struct smc_firmware_header_v2_1 *) adev->pm.fw->data; 326 entries = (struct smc_soft_pptable_entry *) 327 ((uint8_t *)v2_1 + le32_to_cpu(v2_1->pptable_entry_offset)); 328 pptable_count = le32_to_cpu(v2_1->pptable_count); 329 for (i = 0; i < pptable_count; i++) { 330 if (le32_to_cpu(entries[i].id) == pptable_id) { 331 *table = ((uint8_t *)v2_1 + le32_to_cpu(entries[i].ppt_offset_bytes)); 332 *size = le32_to_cpu(entries[i].ppt_size_bytes); 333 break; 334 } 335 } 336 337 if (i == pptable_count) 338 return -EINVAL; 339 340 return 0; 341} 342 343static int smu_v13_0_get_pptable_from_vbios(struct smu_context *smu, void **table, uint32_t *size) 344{ 345 struct amdgpu_device *adev = smu->adev; 346 uint16_t atom_table_size; 347 uint8_t frev, crev; 348 int ret, index; 349 350 dev_info(adev->dev, "use vbios provided pptable\n"); 351 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 352 powerplayinfo); 353 354 ret = amdgpu_atombios_get_data_table(adev, index, &atom_table_size, &frev, &crev, 355 (uint8_t **)table); 356 if (ret) 357 return ret; 358 359 if (size) 360 *size = atom_table_size; 361 362 return 0; 363} 364 365int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu, 366 void **table, 367 uint32_t *size, 368 uint32_t pptable_id) 369{ 370 const struct smc_firmware_header_v1_0 *hdr; 371 struct amdgpu_device *adev = smu->adev; 372 uint16_t version_major, version_minor; 373 int ret; 374 375 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; 376 if (!hdr) 377 return -EINVAL; 378 379 dev_info(adev->dev, "use driver provided pptable %d\n", pptable_id); 380 381 version_major = le16_to_cpu(hdr->header.header_version_major); 382 version_minor = le16_to_cpu(hdr->header.header_version_minor); 383 if (version_major != 2) { 384 dev_err(adev->dev, "Unsupported smu firmware version %d.%d\n", 385 version_major, version_minor); 386 return -EINVAL; 387 } 388 389 switch (version_minor) { 390 case 0: 391 ret = smu_v13_0_set_pptable_v2_0(smu, table, size); 392 break; 393 case 1: 394 ret = smu_v13_0_set_pptable_v2_1(smu, table, size, pptable_id); 395 break; 396 default: 397 ret = -EINVAL; 398 break; 399 } 400 401 return ret; 402} 403 404int smu_v13_0_setup_pptable(struct smu_context *smu) 405{ 406 struct amdgpu_device *adev = smu->adev; 407 uint32_t size = 0, pptable_id = 0; 408 void *table; 409 int ret = 0; 410 411 /* override pptable_id from driver parameter */ 412 if (amdgpu_smu_pptable_id >= 0) { 413 pptable_id = amdgpu_smu_pptable_id; 414 dev_info(adev->dev, "override pptable id %d\n", pptable_id); 415 } else { 416 pptable_id = smu->smu_table.boot_values.pp_table_id; 417 } 418 419 /* force using vbios pptable in sriov mode */ 420 if ((amdgpu_sriov_vf(adev) || !pptable_id) && (amdgpu_emu_mode != 1)) 421 ret = smu_v13_0_get_pptable_from_vbios(smu, &table, &size); 422 else 423 ret = smu_v13_0_get_pptable_from_firmware(smu, &table, &size, pptable_id); 424 425 if (ret) 426 return ret; 427 428 if (!smu->smu_table.power_play_table) 429 smu->smu_table.power_play_table = table; 430 if (!smu->smu_table.power_play_table_size) 431 smu->smu_table.power_play_table_size = size; 432 433 return 0; 434} 435 436int smu_v13_0_init_smc_tables(struct smu_context *smu) 437{ 438 struct smu_table_context *smu_table = &smu->smu_table; 439 struct smu_table *tables = smu_table->tables; 440 int ret = 0; 441 442 smu_table->driver_pptable = 443 kzalloc(tables[SMU_TABLE_PPTABLE].size, GFP_KERNEL); 444 if (!smu_table->driver_pptable) { 445 ret = -ENOMEM; 446 goto err0_out; 447 } 448 449 smu_table->max_sustainable_clocks = 450 kzalloc(sizeof(struct smu_13_0_max_sustainable_clocks), GFP_KERNEL); 451 if (!smu_table->max_sustainable_clocks) { 452 ret = -ENOMEM; 453 goto err1_out; 454 } 455 456 /* Aldebaran does not support OVERDRIVE */ 457 if (tables[SMU_TABLE_OVERDRIVE].size) { 458 smu_table->overdrive_table = 459 kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL); 460 if (!smu_table->overdrive_table) { 461 ret = -ENOMEM; 462 goto err2_out; 463 } 464 465 smu_table->boot_overdrive_table = 466 kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL); 467 if (!smu_table->boot_overdrive_table) { 468 ret = -ENOMEM; 469 goto err3_out; 470 } 471 472 smu_table->user_overdrive_table = 473 kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL); 474 if (!smu_table->user_overdrive_table) { 475 ret = -ENOMEM; 476 goto err4_out; 477 } 478 } 479 480 smu_table->combo_pptable = 481 kzalloc(tables[SMU_TABLE_COMBO_PPTABLE].size, GFP_KERNEL); 482 if (!smu_table->combo_pptable) { 483 ret = -ENOMEM; 484 goto err5_out; 485 } 486 487 return 0; 488 489err5_out: 490 kfree(smu_table->user_overdrive_table); 491err4_out: 492 kfree(smu_table->boot_overdrive_table); 493err3_out: 494 kfree(smu_table->overdrive_table); 495err2_out: 496 kfree(smu_table->max_sustainable_clocks); 497err1_out: 498 kfree(smu_table->driver_pptable); 499err0_out: 500 return ret; 501} 502 503int smu_v13_0_fini_smc_tables(struct smu_context *smu) 504{ 505 struct smu_table_context *smu_table = &smu->smu_table; 506 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 507 508 kfree(smu_table->gpu_metrics_table); 509 kfree(smu_table->combo_pptable); 510 kfree(smu_table->user_overdrive_table); 511 kfree(smu_table->boot_overdrive_table); 512 kfree(smu_table->overdrive_table); 513 kfree(smu_table->max_sustainable_clocks); 514 kfree(smu_table->driver_pptable); 515 smu_table->gpu_metrics_table = NULL; 516 smu_table->combo_pptable = NULL; 517 smu_table->user_overdrive_table = NULL; 518 smu_table->boot_overdrive_table = NULL; 519 smu_table->overdrive_table = NULL; 520 smu_table->max_sustainable_clocks = NULL; 521 smu_table->driver_pptable = NULL; 522 kfree(smu_table->hardcode_pptable); 523 smu_table->hardcode_pptable = NULL; 524 525 kfree(smu_table->ecc_table); 526 kfree(smu_table->metrics_table); 527 kfree(smu_table->watermarks_table); 528 smu_table->ecc_table = NULL; 529 smu_table->metrics_table = NULL; 530 smu_table->watermarks_table = NULL; 531 smu_table->metrics_time = 0; 532 533 kfree(smu_dpm->dpm_context); 534 kfree(smu_dpm->golden_dpm_context); 535 kfree(smu_dpm->dpm_current_power_state); 536 kfree(smu_dpm->dpm_request_power_state); 537 smu_dpm->dpm_context = NULL; 538 smu_dpm->golden_dpm_context = NULL; 539 smu_dpm->dpm_context_size = 0; 540 smu_dpm->dpm_current_power_state = NULL; 541 smu_dpm->dpm_request_power_state = NULL; 542 543 return 0; 544} 545 546int smu_v13_0_init_power(struct smu_context *smu) 547{ 548 struct smu_power_context *smu_power = &smu->smu_power; 549 550 if (smu_power->power_context || smu_power->power_context_size != 0) 551 return -EINVAL; 552 553 smu_power->power_context = kzalloc(sizeof(struct smu_13_0_power_context), 554 GFP_KERNEL); 555 if (!smu_power->power_context) 556 return -ENOMEM; 557 smu_power->power_context_size = sizeof(struct smu_13_0_power_context); 558 559 return 0; 560} 561 562int smu_v13_0_fini_power(struct smu_context *smu) 563{ 564 struct smu_power_context *smu_power = &smu->smu_power; 565 566 if (!smu_power->power_context || smu_power->power_context_size == 0) 567 return -EINVAL; 568 569 kfree(smu_power->power_context); 570 smu_power->power_context = NULL; 571 smu_power->power_context_size = 0; 572 573 return 0; 574} 575 576int smu_v13_0_get_vbios_bootup_values(struct smu_context *smu) 577{ 578 int ret, index; 579 uint16_t size; 580 uint8_t frev, crev; 581 struct atom_common_table_header *header; 582 struct atom_firmware_info_v3_4 *v_3_4; 583 struct atom_firmware_info_v3_3 *v_3_3; 584 struct atom_firmware_info_v3_1 *v_3_1; 585 struct atom_smu_info_v3_6 *smu_info_v3_6; 586 struct atom_smu_info_v4_0 *smu_info_v4_0; 587 588 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 589 firmwareinfo); 590 591 ret = amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev, 592 (uint8_t **)&header); 593 if (ret) 594 return ret; 595 596 if (header->format_revision != 3) { 597 dev_err(smu->adev->dev, "unknown atom_firmware_info version! for smu13\n"); 598 return -EINVAL; 599 } 600 601 switch (header->content_revision) { 602 case 0: 603 case 1: 604 case 2: 605 v_3_1 = (struct atom_firmware_info_v3_1 *)header; 606 smu->smu_table.boot_values.revision = v_3_1->firmware_revision; 607 smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz; 608 smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz; 609 smu->smu_table.boot_values.socclk = 0; 610 smu->smu_table.boot_values.dcefclk = 0; 611 smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv; 612 smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv; 613 smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv; 614 smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv; 615 smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id; 616 smu->smu_table.boot_values.pp_table_id = 0; 617 break; 618 case 3: 619 v_3_3 = (struct atom_firmware_info_v3_3 *)header; 620 smu->smu_table.boot_values.revision = v_3_3->firmware_revision; 621 smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz; 622 smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz; 623 smu->smu_table.boot_values.socclk = 0; 624 smu->smu_table.boot_values.dcefclk = 0; 625 smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv; 626 smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv; 627 smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv; 628 smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv; 629 smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id; 630 smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id; 631 break; 632 case 4: 633 default: 634 v_3_4 = (struct atom_firmware_info_v3_4 *)header; 635 smu->smu_table.boot_values.revision = v_3_4->firmware_revision; 636 smu->smu_table.boot_values.gfxclk = v_3_4->bootup_sclk_in10khz; 637 smu->smu_table.boot_values.uclk = v_3_4->bootup_mclk_in10khz; 638 smu->smu_table.boot_values.socclk = 0; 639 smu->smu_table.boot_values.dcefclk = 0; 640 smu->smu_table.boot_values.vddc = v_3_4->bootup_vddc_mv; 641 smu->smu_table.boot_values.vddci = v_3_4->bootup_vddci_mv; 642 smu->smu_table.boot_values.mvddc = v_3_4->bootup_mvddc_mv; 643 smu->smu_table.boot_values.vdd_gfx = v_3_4->bootup_vddgfx_mv; 644 smu->smu_table.boot_values.cooling_id = v_3_4->coolingsolution_id; 645 smu->smu_table.boot_values.pp_table_id = v_3_4->pplib_pptable_id; 646 break; 647 } 648 649 smu->smu_table.boot_values.format_revision = header->format_revision; 650 smu->smu_table.boot_values.content_revision = header->content_revision; 651 652 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 653 smu_info); 654 if (!amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev, 655 (uint8_t **)&header)) { 656 657 if ((frev == 3) && (crev == 6)) { 658 smu_info_v3_6 = (struct atom_smu_info_v3_6 *)header; 659 660 smu->smu_table.boot_values.socclk = smu_info_v3_6->bootup_socclk_10khz; 661 smu->smu_table.boot_values.vclk = smu_info_v3_6->bootup_vclk_10khz; 662 smu->smu_table.boot_values.dclk = smu_info_v3_6->bootup_dclk_10khz; 663 smu->smu_table.boot_values.fclk = smu_info_v3_6->bootup_fclk_10khz; 664 } else if ((frev == 3) && (crev == 1)) { 665 return 0; 666 } else if ((frev == 4) && (crev == 0)) { 667 smu_info_v4_0 = (struct atom_smu_info_v4_0 *)header; 668 669 smu->smu_table.boot_values.socclk = smu_info_v4_0->bootup_socclk_10khz; 670 smu->smu_table.boot_values.dcefclk = smu_info_v4_0->bootup_dcefclk_10khz; 671 smu->smu_table.boot_values.vclk = smu_info_v4_0->bootup_vclk0_10khz; 672 smu->smu_table.boot_values.dclk = smu_info_v4_0->bootup_dclk0_10khz; 673 smu->smu_table.boot_values.fclk = smu_info_v4_0->bootup_fclk_10khz; 674 } else { 675 dev_warn(smu->adev->dev, "Unexpected and unhandled version: %d.%d\n", 676 (uint32_t)frev, (uint32_t)crev); 677 } 678 } 679 680 return 0; 681} 682 683 684int smu_v13_0_notify_memory_pool_location(struct smu_context *smu) 685{ 686 struct smu_table_context *smu_table = &smu->smu_table; 687 struct smu_table *memory_pool = &smu_table->memory_pool; 688 int ret = 0; 689 uint64_t address; 690 uint32_t address_low, address_high; 691 692 if (memory_pool->size == 0 || memory_pool->cpu_addr == NULL) 693 return ret; 694 695 address = memory_pool->mc_address; 696 address_high = (uint32_t)upper_32_bits(address); 697 address_low = (uint32_t)lower_32_bits(address); 698 699 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh, 700 address_high, NULL); 701 if (ret) 702 return ret; 703 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow, 704 address_low, NULL); 705 if (ret) 706 return ret; 707 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize, 708 (uint32_t)memory_pool->size, NULL); 709 if (ret) 710 return ret; 711 712 return ret; 713} 714 715int smu_v13_0_set_min_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk) 716{ 717 int ret; 718 719 ret = smu_cmn_send_smc_msg_with_param(smu, 720 SMU_MSG_SetMinDeepSleepDcefclk, clk, NULL); 721 if (ret) 722 dev_err(smu->adev->dev, "SMU13 attempt to set divider for DCEFCLK Failed!"); 723 724 return ret; 725} 726 727int smu_v13_0_set_driver_table_location(struct smu_context *smu) 728{ 729 struct smu_table *driver_table = &smu->smu_table.driver_table; 730 int ret = 0; 731 732 if (driver_table->mc_address) { 733 ret = smu_cmn_send_smc_msg_with_param(smu, 734 SMU_MSG_SetDriverDramAddrHigh, 735 upper_32_bits(driver_table->mc_address), 736 NULL); 737 if (!ret) 738 ret = smu_cmn_send_smc_msg_with_param(smu, 739 SMU_MSG_SetDriverDramAddrLow, 740 lower_32_bits(driver_table->mc_address), 741 NULL); 742 } 743 744 return ret; 745} 746 747int smu_v13_0_set_tool_table_location(struct smu_context *smu) 748{ 749 int ret = 0; 750 struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG]; 751 752 if (tool_table->mc_address) { 753 ret = smu_cmn_send_smc_msg_with_param(smu, 754 SMU_MSG_SetToolsDramAddrHigh, 755 upper_32_bits(tool_table->mc_address), 756 NULL); 757 if (!ret) 758 ret = smu_cmn_send_smc_msg_with_param(smu, 759 SMU_MSG_SetToolsDramAddrLow, 760 lower_32_bits(tool_table->mc_address), 761 NULL); 762 } 763 764 return ret; 765} 766 767int smu_v13_0_init_display_count(struct smu_context *smu, uint32_t count) 768{ 769 int ret = 0; 770 771 if (!smu->pm_enabled) 772 return ret; 773 774 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count, NULL); 775 776 return ret; 777} 778 779int smu_v13_0_set_allowed_mask(struct smu_context *smu) 780{ 781 struct smu_feature *feature = &smu->smu_feature; 782 int ret = 0; 783 uint32_t feature_mask[2]; 784 785 if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || 786 feature->feature_num < 64) 787 return -EINVAL; 788 789 bitmap_to_arr32(feature_mask, feature->allowed, 64); 790 791 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh, 792 feature_mask[1], NULL); 793 if (ret) 794 return ret; 795 796 return smu_cmn_send_smc_msg_with_param(smu, 797 SMU_MSG_SetAllowedFeaturesMaskLow, 798 feature_mask[0], 799 NULL); 800} 801 802int smu_v13_0_gfx_off_control(struct smu_context *smu, bool enable) 803{ 804 int ret = 0; 805 struct amdgpu_device *adev = smu->adev; 806 807 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 808 case IP_VERSION(13, 0, 0): 809 case IP_VERSION(13, 0, 1): 810 case IP_VERSION(13, 0, 3): 811 case IP_VERSION(13, 0, 4): 812 case IP_VERSION(13, 0, 5): 813 case IP_VERSION(13, 0, 7): 814 case IP_VERSION(13, 0, 8): 815 case IP_VERSION(13, 0, 10): 816 case IP_VERSION(13, 0, 11): 817 if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) 818 return 0; 819 if (enable) 820 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL); 821 else 822 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL); 823 break; 824 default: 825 break; 826 } 827 828 return ret; 829} 830 831int smu_v13_0_system_features_control(struct smu_context *smu, 832 bool en) 833{ 834 return smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures : 835 SMU_MSG_DisableAllSmuFeatures), NULL); 836} 837 838int smu_v13_0_notify_display_change(struct smu_context *smu) 839{ 840 int ret = 0; 841 842 if (!amdgpu_device_has_dc_support(smu->adev)) 843 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DALNotPresent, NULL); 844 845 return ret; 846} 847 848 static int 849smu_v13_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock, 850 enum smu_clk_type clock_select) 851{ 852 int ret = 0; 853 int clk_id; 854 855 if ((smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetDcModeMaxDpmFreq) < 0) || 856 (smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetMaxDpmFreq) < 0)) 857 return 0; 858 859 clk_id = smu_cmn_to_asic_specific_index(smu, 860 CMN2ASIC_MAPPING_CLK, 861 clock_select); 862 if (clk_id < 0) 863 return -EINVAL; 864 865 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq, 866 clk_id << 16, clock); 867 if (ret) { 868 dev_err(smu->adev->dev, "[GetMaxSustainableClock] Failed to get max DC clock from SMC!"); 869 return ret; 870 } 871 872 if (*clock != 0) 873 return 0; 874 875 /* if DC limit is zero, return AC limit */ 876 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, 877 clk_id << 16, clock); 878 if (ret) { 879 dev_err(smu->adev->dev, "[GetMaxSustainableClock] failed to get max AC clock from SMC!"); 880 return ret; 881 } 882 883 return 0; 884} 885 886int smu_v13_0_init_max_sustainable_clocks(struct smu_context *smu) 887{ 888 struct smu_13_0_max_sustainable_clocks *max_sustainable_clocks = 889 smu->smu_table.max_sustainable_clocks; 890 int ret = 0; 891 892 max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100; 893 max_sustainable_clocks->soc_clock = smu->smu_table.boot_values.socclk / 100; 894 max_sustainable_clocks->dcef_clock = smu->smu_table.boot_values.dcefclk / 100; 895 max_sustainable_clocks->display_clock = 0xFFFFFFFF; 896 max_sustainable_clocks->phy_clock = 0xFFFFFFFF; 897 max_sustainable_clocks->pixel_clock = 0xFFFFFFFF; 898 899 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { 900 ret = smu_v13_0_get_max_sustainable_clock(smu, 901 &(max_sustainable_clocks->uclock), 902 SMU_UCLK); 903 if (ret) { 904 dev_err(smu->adev->dev, "[%s] failed to get max UCLK from SMC!", 905 __func__); 906 return ret; 907 } 908 } 909 910 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { 911 ret = smu_v13_0_get_max_sustainable_clock(smu, 912 &(max_sustainable_clocks->soc_clock), 913 SMU_SOCCLK); 914 if (ret) { 915 dev_err(smu->adev->dev, "[%s] failed to get max SOCCLK from SMC!", 916 __func__); 917 return ret; 918 } 919 } 920 921 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) { 922 ret = smu_v13_0_get_max_sustainable_clock(smu, 923 &(max_sustainable_clocks->dcef_clock), 924 SMU_DCEFCLK); 925 if (ret) { 926 dev_err(smu->adev->dev, "[%s] failed to get max DCEFCLK from SMC!", 927 __func__); 928 return ret; 929 } 930 931 ret = smu_v13_0_get_max_sustainable_clock(smu, 932 &(max_sustainable_clocks->display_clock), 933 SMU_DISPCLK); 934 if (ret) { 935 dev_err(smu->adev->dev, "[%s] failed to get max DISPCLK from SMC!", 936 __func__); 937 return ret; 938 } 939 ret = smu_v13_0_get_max_sustainable_clock(smu, 940 &(max_sustainable_clocks->phy_clock), 941 SMU_PHYCLK); 942 if (ret) { 943 dev_err(smu->adev->dev, "[%s] failed to get max PHYCLK from SMC!", 944 __func__); 945 return ret; 946 } 947 ret = smu_v13_0_get_max_sustainable_clock(smu, 948 &(max_sustainable_clocks->pixel_clock), 949 SMU_PIXCLK); 950 if (ret) { 951 dev_err(smu->adev->dev, "[%s] failed to get max PIXCLK from SMC!", 952 __func__); 953 return ret; 954 } 955 } 956 957 if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock) 958 max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock; 959 960 return 0; 961} 962 963int smu_v13_0_get_current_power_limit(struct smu_context *smu, 964 uint32_t *power_limit) 965{ 966 int power_src; 967 int ret = 0; 968 969 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) 970 return -EINVAL; 971 972 power_src = smu_cmn_to_asic_specific_index(smu, 973 CMN2ASIC_MAPPING_PWR, 974 smu->adev->pm.ac_power ? 975 SMU_POWER_SOURCE_AC : 976 SMU_POWER_SOURCE_DC); 977 if (power_src < 0) 978 return -EINVAL; 979 980 ret = smu_cmn_send_smc_msg_with_param(smu, 981 SMU_MSG_GetPptLimit, 982 power_src << 16, 983 power_limit); 984 if (ret) 985 dev_err(smu->adev->dev, "[%s] get PPT limit failed!", __func__); 986 987 return ret; 988} 989 990int smu_v13_0_set_power_limit(struct smu_context *smu, 991 enum smu_ppt_limit_type limit_type, 992 uint32_t limit) 993{ 994 int ret = 0; 995 996 if (limit_type != SMU_DEFAULT_PPT_LIMIT) 997 return -EINVAL; 998 999 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) { 1000 dev_err(smu->adev->dev, "Setting new power limit is not supported!\n"); 1001 return -EOPNOTSUPP; 1002 } 1003 1004 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, limit, NULL); 1005 if (ret) { 1006 dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__); 1007 return ret; 1008 } 1009 1010 smu->current_power_limit = limit; 1011 1012 return 0; 1013} 1014 1015static int smu_v13_0_allow_ih_interrupt(struct smu_context *smu) 1016{ 1017 return smu_cmn_send_smc_msg(smu, 1018 SMU_MSG_AllowIHHostInterrupt, 1019 NULL); 1020} 1021 1022static int smu_v13_0_process_pending_interrupt(struct smu_context *smu) 1023{ 1024 int ret = 0; 1025 1026 if (smu->dc_controlled_by_gpio && 1027 smu_cmn_feature_is_enabled(smu, SMU_FEATURE_ACDC_BIT)) 1028 ret = smu_v13_0_allow_ih_interrupt(smu); 1029 1030 return ret; 1031} 1032 1033int smu_v13_0_enable_thermal_alert(struct smu_context *smu) 1034{ 1035 int ret = 0; 1036 1037 if (!smu->irq_source.num_types) 1038 return 0; 1039 1040 ret = amdgpu_irq_get(smu->adev, &smu->irq_source, 0); 1041 if (ret) 1042 return ret; 1043 1044 return smu_v13_0_process_pending_interrupt(smu); 1045} 1046 1047int smu_v13_0_disable_thermal_alert(struct smu_context *smu) 1048{ 1049 if (!smu->irq_source.num_types) 1050 return 0; 1051 1052 return amdgpu_irq_put(smu->adev, &smu->irq_source, 0); 1053} 1054 1055static uint16_t convert_to_vddc(uint8_t vid) 1056{ 1057 return (uint16_t) ((6200 - (vid * 25)) / SMU13_VOLTAGE_SCALE); 1058} 1059 1060int smu_v13_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value) 1061{ 1062 struct amdgpu_device *adev = smu->adev; 1063 uint32_t vdd = 0, val_vid = 0; 1064 1065 if (!value) 1066 return -EINVAL; 1067 val_vid = (RREG32_SOC15(SMUIO, 0, regSMUSVI0_TEL_PLANE0) & 1068 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK) >> 1069 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT; 1070 1071 vdd = (uint32_t)convert_to_vddc((uint8_t)val_vid); 1072 1073 *value = vdd; 1074 1075 return 0; 1076 1077} 1078 1079int 1080smu_v13_0_display_clock_voltage_request(struct smu_context *smu, 1081 struct pp_display_clock_request 1082 *clock_req) 1083{ 1084 enum amd_pp_clock_type clk_type = clock_req->clock_type; 1085 int ret = 0; 1086 enum smu_clk_type clk_select = 0; 1087 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000; 1088 1089 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) || 1090 smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { 1091 switch (clk_type) { 1092 case amd_pp_dcef_clock: 1093 clk_select = SMU_DCEFCLK; 1094 break; 1095 case amd_pp_disp_clock: 1096 clk_select = SMU_DISPCLK; 1097 break; 1098 case amd_pp_pixel_clock: 1099 clk_select = SMU_PIXCLK; 1100 break; 1101 case amd_pp_phy_clock: 1102 clk_select = SMU_PHYCLK; 1103 break; 1104 case amd_pp_mem_clock: 1105 clk_select = SMU_UCLK; 1106 break; 1107 default: 1108 dev_info(smu->adev->dev, "[%s] Invalid Clock Type!", __func__); 1109 ret = -EINVAL; 1110 break; 1111 } 1112 1113 if (ret) 1114 goto failed; 1115 1116 if (clk_select == SMU_UCLK && smu->disable_uclk_switch) 1117 return 0; 1118 1119 ret = smu_v13_0_set_hard_freq_limited_range(smu, clk_select, clk_freq, 0); 1120 1121 if (clk_select == SMU_UCLK) 1122 smu->hard_min_uclk_req_from_dal = clk_freq; 1123 } 1124 1125failed: 1126 return ret; 1127} 1128 1129uint32_t smu_v13_0_get_fan_control_mode(struct smu_context *smu) 1130{ 1131 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT)) 1132 return AMD_FAN_CTRL_MANUAL; 1133 else 1134 return AMD_FAN_CTRL_AUTO; 1135} 1136 1137 static int 1138smu_v13_0_auto_fan_control(struct smu_context *smu, bool auto_fan_control) 1139{ 1140 int ret = 0; 1141 1142 if (!smu_cmn_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT)) 1143 return 0; 1144 1145 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, auto_fan_control); 1146 if (ret) 1147 dev_err(smu->adev->dev, "[%s]%s smc FAN CONTROL feature failed!", 1148 __func__, (auto_fan_control ? "Start" : "Stop")); 1149 1150 return ret; 1151} 1152 1153 static int 1154smu_v13_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode) 1155{ 1156 struct amdgpu_device *adev = smu->adev; 1157 1158 WREG32_SOC15(THM, 0, regCG_FDO_CTRL2, 1159 REG_SET_FIELD(RREG32_SOC15(THM, 0, regCG_FDO_CTRL2), 1160 CG_FDO_CTRL2, TMIN, 0)); 1161 WREG32_SOC15(THM, 0, regCG_FDO_CTRL2, 1162 REG_SET_FIELD(RREG32_SOC15(THM, 0, regCG_FDO_CTRL2), 1163 CG_FDO_CTRL2, FDO_PWM_MODE, mode)); 1164 1165 return 0; 1166} 1167 1168int smu_v13_0_set_fan_speed_pwm(struct smu_context *smu, 1169 uint32_t speed) 1170{ 1171 struct amdgpu_device *adev = smu->adev; 1172 uint32_t duty100, duty; 1173 uint64_t tmp64; 1174 1175 speed = min_t(uint32_t, speed, 255); 1176 1177 if (smu_v13_0_auto_fan_control(smu, 0)) 1178 return -EINVAL; 1179 1180 duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, regCG_FDO_CTRL1), 1181 CG_FDO_CTRL1, FMAX_DUTY100); 1182 if (!duty100) 1183 return -EINVAL; 1184 1185 tmp64 = (uint64_t)speed * duty100; 1186 do_div(tmp64, 255); 1187 duty = (uint32_t)tmp64; 1188 1189 WREG32_SOC15(THM, 0, regCG_FDO_CTRL0, 1190 REG_SET_FIELD(RREG32_SOC15(THM, 0, regCG_FDO_CTRL0), 1191 CG_FDO_CTRL0, FDO_STATIC_DUTY, duty)); 1192 1193 return smu_v13_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC); 1194} 1195 1196 int 1197smu_v13_0_set_fan_control_mode(struct smu_context *smu, 1198 uint32_t mode) 1199{ 1200 int ret = 0; 1201 1202 switch (mode) { 1203 case AMD_FAN_CTRL_NONE: 1204 ret = smu_v13_0_set_fan_speed_pwm(smu, 255); 1205 break; 1206 case AMD_FAN_CTRL_MANUAL: 1207 ret = smu_v13_0_auto_fan_control(smu, 0); 1208 break; 1209 case AMD_FAN_CTRL_AUTO: 1210 ret = smu_v13_0_auto_fan_control(smu, 1); 1211 break; 1212 default: 1213 break; 1214 } 1215 1216 if (ret) { 1217 dev_err(smu->adev->dev, "[%s]Set fan control mode failed!", __func__); 1218 return -EINVAL; 1219 } 1220 1221 return ret; 1222} 1223 1224int smu_v13_0_set_fan_speed_rpm(struct smu_context *smu, 1225 uint32_t speed) 1226{ 1227 struct amdgpu_device *adev = smu->adev; 1228 uint32_t crystal_clock_freq = 2500; 1229 uint32_t tach_period; 1230 int ret; 1231 1232 if (!speed) 1233 return -EINVAL; 1234 1235 ret = smu_v13_0_auto_fan_control(smu, 0); 1236 if (ret) 1237 return ret; 1238 1239 tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed); 1240 WREG32_SOC15(THM, 0, regCG_TACH_CTRL, 1241 REG_SET_FIELD(RREG32_SOC15(THM, 0, regCG_TACH_CTRL), 1242 CG_TACH_CTRL, TARGET_PERIOD, 1243 tach_period)); 1244 1245 return smu_v13_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM); 1246} 1247 1248int smu_v13_0_set_xgmi_pstate(struct smu_context *smu, 1249 uint32_t pstate) 1250{ 1251 int ret = 0; 1252 ret = smu_cmn_send_smc_msg_with_param(smu, 1253 SMU_MSG_SetXgmiMode, 1254 pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3, 1255 NULL); 1256 return ret; 1257} 1258 1259static int smu_v13_0_set_irq_state(struct amdgpu_device *adev, 1260 struct amdgpu_irq_src *source, 1261 unsigned tyep, 1262 enum amdgpu_interrupt_state state) 1263{ 1264 struct smu_context *smu = adev->powerplay.pp_handle; 1265 uint32_t low, high; 1266 uint32_t val = 0; 1267 1268 switch (state) { 1269 case AMDGPU_IRQ_STATE_DISABLE: 1270 /* For THM irqs */ 1271 val = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL); 1272 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 1); 1273 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 1); 1274 WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, val); 1275 1276 WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_ENA, 0); 1277 1278 /* For MP1 SW irqs */ 1279 val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); 1280 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1); 1281 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val); 1282 1283 break; 1284 case AMDGPU_IRQ_STATE_ENABLE: 1285 /* For THM irqs */ 1286 low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP, 1287 smu->thermal_range.min / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES); 1288 high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP, 1289 smu->thermal_range.software_shutdown_temp); 1290 1291 val = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL); 1292 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5); 1293 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); 1294 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0); 1295 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0); 1296 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 0xff)); 1297 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 0xff)); 1298 val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); 1299 WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, val); 1300 1301 val = (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT); 1302 val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT); 1303 val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT); 1304 WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_ENA, val); 1305 1306 /* For MP1 SW irqs */ 1307 val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT); 1308 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE); 1309 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0); 1310 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT, val); 1311 1312 val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); 1313 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0); 1314 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val); 1315 1316 break; 1317 default: 1318 break; 1319 } 1320 1321 return 0; 1322} 1323 1324static int smu_v13_0_ack_ac_dc_interrupt(struct smu_context *smu) 1325{ 1326 return smu_cmn_send_smc_msg(smu, 1327 SMU_MSG_ReenableAcDcInterrupt, 1328 NULL); 1329} 1330 1331#define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */ 1332#define THM_11_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */ 1333#define SMUIO_11_0__SRCID__SMUIO_GPIO19 83 1334 1335static int smu_v13_0_irq_process(struct amdgpu_device *adev, 1336 struct amdgpu_irq_src *source, 1337 struct amdgpu_iv_entry *entry) 1338{ 1339 struct smu_context *smu = adev->powerplay.pp_handle; 1340 uint32_t client_id = entry->client_id; 1341 uint32_t src_id = entry->src_id; 1342 /* 1343 * ctxid is used to distinguish different 1344 * events for SMCToHost interrupt. 1345 */ 1346 uint32_t ctxid = entry->src_data[0]; 1347 uint32_t data; 1348 uint32_t high; 1349 1350 if (client_id == SOC15_IH_CLIENTID_THM) { 1351 switch (src_id) { 1352 case THM_11_0__SRCID__THM_DIG_THERM_L2H: 1353 schedule_delayed_work(&smu->swctf_delayed_work, 1354 msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY)); 1355 break; 1356 case THM_11_0__SRCID__THM_DIG_THERM_H2L: 1357 dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n"); 1358 break; 1359 default: 1360 dev_emerg(adev->dev, "ERROR: GPU under temperature range unknown src id (%d)\n", 1361 src_id); 1362 break; 1363 } 1364 } else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO) { 1365 dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n"); 1366 /* 1367 * HW CTF just occurred. Shutdown to prevent further damage. 1368 */ 1369 dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n"); 1370 orderly_poweroff(true); 1371 } else if (client_id == SOC15_IH_CLIENTID_MP1) { 1372 if (src_id == SMU_IH_INTERRUPT_ID_TO_DRIVER) { 1373 /* ACK SMUToHost interrupt */ 1374 data = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); 1375 data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1); 1376 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, data); 1377 1378 switch (ctxid) { 1379 case SMU_IH_INTERRUPT_CONTEXT_ID_AC: 1380 dev_dbg(adev->dev, "Switched to AC mode!\n"); 1381 smu_v13_0_ack_ac_dc_interrupt(smu); 1382 adev->pm.ac_power = true; 1383 break; 1384 case SMU_IH_INTERRUPT_CONTEXT_ID_DC: 1385 dev_dbg(adev->dev, "Switched to DC mode!\n"); 1386 smu_v13_0_ack_ac_dc_interrupt(smu); 1387 adev->pm.ac_power = false; 1388 break; 1389 case SMU_IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING: 1390 /* 1391 * Increment the throttle interrupt counter 1392 */ 1393 atomic64_inc(&smu->throttle_int_counter); 1394 1395 if (!atomic_read(&adev->throttling_logging_enabled)) 1396 return 0; 1397 1398 if (__ratelimit(&adev->throttling_logging_rs)) 1399 schedule_work(&smu->throttling_logging_work); 1400 1401 break; 1402 case SMU_IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL: 1403 high = smu->thermal_range.software_shutdown_temp + 1404 smu->thermal_range.software_shutdown_temp_offset; 1405 high = min_t(typeof(high), 1406 SMU_THERMAL_MAXIMUM_ALERT_TEMP, 1407 high); 1408 dev_emerg(adev->dev, "Reduce soft CTF limit to %d (by an offset %d)\n", 1409 high, 1410 smu->thermal_range.software_shutdown_temp_offset); 1411 1412 data = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL); 1413 data = REG_SET_FIELD(data, THM_THERMAL_INT_CTRL, 1414 DIG_THERM_INTH, 1415 (high & 0xff)); 1416 data = data & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); 1417 WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, data); 1418 break; 1419 case SMU_IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY: 1420 high = min_t(typeof(high), 1421 SMU_THERMAL_MAXIMUM_ALERT_TEMP, 1422 smu->thermal_range.software_shutdown_temp); 1423 dev_emerg(adev->dev, "Recover soft CTF limit to %d\n", high); 1424 1425 data = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL); 1426 data = REG_SET_FIELD(data, THM_THERMAL_INT_CTRL, 1427 DIG_THERM_INTH, 1428 (high & 0xff)); 1429 data = data & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); 1430 WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, data); 1431 break; 1432 default: 1433 dev_dbg(adev->dev, "Unhandled context id %d from client:%d!\n", 1434 ctxid, client_id); 1435 break; 1436 } 1437 } 1438 } 1439 1440 return 0; 1441} 1442 1443static const struct amdgpu_irq_src_funcs smu_v13_0_irq_funcs = { 1444 .set = smu_v13_0_set_irq_state, 1445 .process = smu_v13_0_irq_process, 1446}; 1447 1448int smu_v13_0_register_irq_handler(struct smu_context *smu) 1449{ 1450 struct amdgpu_device *adev = smu->adev; 1451 struct amdgpu_irq_src *irq_src = &smu->irq_source; 1452 int ret = 0; 1453 1454 if (amdgpu_sriov_vf(adev)) 1455 return 0; 1456 1457 irq_src->num_types = 1; 1458 irq_src->funcs = &smu_v13_0_irq_funcs; 1459 1460 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM, 1461 THM_11_0__SRCID__THM_DIG_THERM_L2H, 1462 irq_src); 1463 if (ret) 1464 return ret; 1465 1466 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM, 1467 THM_11_0__SRCID__THM_DIG_THERM_H2L, 1468 irq_src); 1469 if (ret) 1470 return ret; 1471 1472 /* Register CTF(GPIO_19) interrupt */ 1473 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_ROM_SMUIO, 1474 SMUIO_11_0__SRCID__SMUIO_GPIO19, 1475 irq_src); 1476 if (ret) 1477 return ret; 1478 1479 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1, 1480 SMU_IH_INTERRUPT_ID_TO_DRIVER, 1481 irq_src); 1482 if (ret) 1483 return ret; 1484 1485 return ret; 1486} 1487 1488int smu_v13_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu, 1489 struct pp_smu_nv_clock_table *max_clocks) 1490{ 1491 struct smu_table_context *table_context = &smu->smu_table; 1492 struct smu_13_0_max_sustainable_clocks *sustainable_clocks = NULL; 1493 1494 if (!max_clocks || !table_context->max_sustainable_clocks) 1495 return -EINVAL; 1496 1497 sustainable_clocks = table_context->max_sustainable_clocks; 1498 1499 max_clocks->dcfClockInKhz = 1500 (unsigned int) sustainable_clocks->dcef_clock * 1000; 1501 max_clocks->displayClockInKhz = 1502 (unsigned int) sustainable_clocks->display_clock * 1000; 1503 max_clocks->phyClockInKhz = 1504 (unsigned int) sustainable_clocks->phy_clock * 1000; 1505 max_clocks->pixelClockInKhz = 1506 (unsigned int) sustainable_clocks->pixel_clock * 1000; 1507 max_clocks->uClockInKhz = 1508 (unsigned int) sustainable_clocks->uclock * 1000; 1509 max_clocks->socClockInKhz = 1510 (unsigned int) sustainable_clocks->soc_clock * 1000; 1511 max_clocks->dscClockInKhz = 0; 1512 max_clocks->dppClockInKhz = 0; 1513 max_clocks->fabricClockInKhz = 0; 1514 1515 return 0; 1516} 1517 1518int smu_v13_0_set_azalia_d3_pme(struct smu_context *smu) 1519{ 1520 int ret = 0; 1521 1522 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME, NULL); 1523 1524 return ret; 1525} 1526 1527static int smu_v13_0_wait_for_reset_complete(struct smu_context *smu, 1528 uint64_t event_arg) 1529{ 1530 int ret = 0; 1531 1532 dev_dbg(smu->adev->dev, "waiting for smu reset complete\n"); 1533 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GfxDriverResetRecovery, NULL); 1534 1535 return ret; 1536} 1537 1538int smu_v13_0_wait_for_event(struct smu_context *smu, enum smu_event_type event, 1539 uint64_t event_arg) 1540{ 1541 int ret = -EINVAL; 1542 1543 switch (event) { 1544 case SMU_EVENT_RESET_COMPLETE: 1545 ret = smu_v13_0_wait_for_reset_complete(smu, event_arg); 1546 break; 1547 default: 1548 break; 1549 } 1550 1551 return ret; 1552} 1553 1554int smu_v13_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, 1555 uint32_t *min, uint32_t *max) 1556{ 1557 int ret = 0, clk_id = 0; 1558 uint32_t param = 0; 1559 uint32_t clock_limit; 1560 1561 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) { 1562 switch (clk_type) { 1563 case SMU_MCLK: 1564 case SMU_UCLK: 1565 clock_limit = smu->smu_table.boot_values.uclk; 1566 break; 1567 case SMU_GFXCLK: 1568 case SMU_SCLK: 1569 clock_limit = smu->smu_table.boot_values.gfxclk; 1570 break; 1571 case SMU_SOCCLK: 1572 clock_limit = smu->smu_table.boot_values.socclk; 1573 break; 1574 default: 1575 clock_limit = 0; 1576 break; 1577 } 1578 1579 /* clock in Mhz unit */ 1580 if (min) 1581 *min = clock_limit / 100; 1582 if (max) 1583 *max = clock_limit / 100; 1584 1585 return 0; 1586 } 1587 1588 clk_id = smu_cmn_to_asic_specific_index(smu, 1589 CMN2ASIC_MAPPING_CLK, 1590 clk_type); 1591 if (clk_id < 0) { 1592 ret = -EINVAL; 1593 goto failed; 1594 } 1595 param = (clk_id & 0xffff) << 16; 1596 1597 if (max) { 1598 if (smu->adev->pm.ac_power) 1599 ret = smu_cmn_send_smc_msg_with_param(smu, 1600 SMU_MSG_GetMaxDpmFreq, 1601 param, 1602 max); 1603 else 1604 ret = smu_cmn_send_smc_msg_with_param(smu, 1605 SMU_MSG_GetDcModeMaxDpmFreq, 1606 param, 1607 max); 1608 if (ret) 1609 goto failed; 1610 } 1611 1612 if (min) { 1613 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min); 1614 if (ret) 1615 goto failed; 1616 } 1617 1618failed: 1619 return ret; 1620} 1621 1622int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu, 1623 enum smu_clk_type clk_type, 1624 uint32_t min, 1625 uint32_t max) 1626{ 1627 int ret = 0, clk_id = 0; 1628 uint32_t param; 1629 1630 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) 1631 return 0; 1632 1633 clk_id = smu_cmn_to_asic_specific_index(smu, 1634 CMN2ASIC_MAPPING_CLK, 1635 clk_type); 1636 if (clk_id < 0) 1637 return clk_id; 1638 1639 if (max > 0) { 1640 param = (uint32_t)((clk_id << 16) | (max & 0xffff)); 1641 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq, 1642 param, NULL); 1643 if (ret) 1644 goto out; 1645 } 1646 1647 if (min > 0) { 1648 param = (uint32_t)((clk_id << 16) | (min & 0xffff)); 1649 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq, 1650 param, NULL); 1651 if (ret) 1652 goto out; 1653 } 1654 1655out: 1656 return ret; 1657} 1658 1659int smu_v13_0_set_hard_freq_limited_range(struct smu_context *smu, 1660 enum smu_clk_type clk_type, 1661 uint32_t min, 1662 uint32_t max) 1663{ 1664 int ret = 0, clk_id = 0; 1665 uint32_t param; 1666 1667 if (min <= 0 && max <= 0) 1668 return -EINVAL; 1669 1670 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) 1671 return 0; 1672 1673 clk_id = smu_cmn_to_asic_specific_index(smu, 1674 CMN2ASIC_MAPPING_CLK, 1675 clk_type); 1676 if (clk_id < 0) 1677 return clk_id; 1678 1679 if (max > 0) { 1680 param = (uint32_t)((clk_id << 16) | (max & 0xffff)); 1681 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq, 1682 param, NULL); 1683 if (ret) 1684 return ret; 1685 } 1686 1687 if (min > 0) { 1688 param = (uint32_t)((clk_id << 16) | (min & 0xffff)); 1689 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq, 1690 param, NULL); 1691 if (ret) 1692 return ret; 1693 } 1694 1695 return ret; 1696} 1697 1698int smu_v13_0_set_performance_level(struct smu_context *smu, 1699 enum amd_dpm_forced_level level) 1700{ 1701 struct smu_13_0_dpm_context *dpm_context = 1702 smu->smu_dpm.dpm_context; 1703 struct smu_13_0_dpm_table *gfx_table = 1704 &dpm_context->dpm_tables.gfx_table; 1705 struct smu_13_0_dpm_table *mem_table = 1706 &dpm_context->dpm_tables.uclk_table; 1707 struct smu_13_0_dpm_table *soc_table = 1708 &dpm_context->dpm_tables.soc_table; 1709 struct smu_13_0_dpm_table *vclk_table = 1710 &dpm_context->dpm_tables.vclk_table; 1711 struct smu_13_0_dpm_table *dclk_table = 1712 &dpm_context->dpm_tables.dclk_table; 1713 struct smu_13_0_dpm_table *fclk_table = 1714 &dpm_context->dpm_tables.fclk_table; 1715 struct smu_umd_pstate_table *pstate_table = 1716 &smu->pstate_table; 1717 struct amdgpu_device *adev = smu->adev; 1718 uint32_t sclk_min = 0, sclk_max = 0; 1719 uint32_t mclk_min = 0, mclk_max = 0; 1720 uint32_t socclk_min = 0, socclk_max = 0; 1721 uint32_t vclk_min = 0, vclk_max = 0; 1722 uint32_t dclk_min = 0, dclk_max = 0; 1723 uint32_t fclk_min = 0, fclk_max = 0; 1724 int ret = 0, i; 1725 1726 switch (level) { 1727 case AMD_DPM_FORCED_LEVEL_HIGH: 1728 sclk_min = sclk_max = gfx_table->max; 1729 mclk_min = mclk_max = mem_table->max; 1730 socclk_min = socclk_max = soc_table->max; 1731 vclk_min = vclk_max = vclk_table->max; 1732 dclk_min = dclk_max = dclk_table->max; 1733 fclk_min = fclk_max = fclk_table->max; 1734 break; 1735 case AMD_DPM_FORCED_LEVEL_LOW: 1736 sclk_min = sclk_max = gfx_table->min; 1737 mclk_min = mclk_max = mem_table->min; 1738 socclk_min = socclk_max = soc_table->min; 1739 vclk_min = vclk_max = vclk_table->min; 1740 dclk_min = dclk_max = dclk_table->min; 1741 fclk_min = fclk_max = fclk_table->min; 1742 break; 1743 case AMD_DPM_FORCED_LEVEL_AUTO: 1744 sclk_min = gfx_table->min; 1745 sclk_max = gfx_table->max; 1746 mclk_min = mem_table->min; 1747 mclk_max = mem_table->max; 1748 socclk_min = soc_table->min; 1749 socclk_max = soc_table->max; 1750 vclk_min = vclk_table->min; 1751 vclk_max = vclk_table->max; 1752 dclk_min = dclk_table->min; 1753 dclk_max = dclk_table->max; 1754 fclk_min = fclk_table->min; 1755 fclk_max = fclk_table->max; 1756 break; 1757 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 1758 sclk_min = sclk_max = pstate_table->gfxclk_pstate.standard; 1759 mclk_min = mclk_max = pstate_table->uclk_pstate.standard; 1760 socclk_min = socclk_max = pstate_table->socclk_pstate.standard; 1761 vclk_min = vclk_max = pstate_table->vclk_pstate.standard; 1762 dclk_min = dclk_max = pstate_table->dclk_pstate.standard; 1763 fclk_min = fclk_max = pstate_table->fclk_pstate.standard; 1764 break; 1765 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 1766 sclk_min = sclk_max = pstate_table->gfxclk_pstate.min; 1767 break; 1768 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: 1769 mclk_min = mclk_max = pstate_table->uclk_pstate.min; 1770 break; 1771 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 1772 sclk_min = sclk_max = pstate_table->gfxclk_pstate.peak; 1773 mclk_min = mclk_max = pstate_table->uclk_pstate.peak; 1774 socclk_min = socclk_max = pstate_table->socclk_pstate.peak; 1775 vclk_min = vclk_max = pstate_table->vclk_pstate.peak; 1776 dclk_min = dclk_max = pstate_table->dclk_pstate.peak; 1777 fclk_min = fclk_max = pstate_table->fclk_pstate.peak; 1778 break; 1779 case AMD_DPM_FORCED_LEVEL_MANUAL: 1780 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 1781 return 0; 1782 default: 1783 dev_err(adev->dev, "Invalid performance level %d\n", level); 1784 return -EINVAL; 1785 } 1786 1787 /* 1788 * Unset those settings for SMU 13.0.2. As soft limits settings 1789 * for those clock domains are not supported. 1790 */ 1791 if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 2)) { 1792 mclk_min = mclk_max = 0; 1793 socclk_min = socclk_max = 0; 1794 vclk_min = vclk_max = 0; 1795 dclk_min = dclk_max = 0; 1796 fclk_min = fclk_max = 0; 1797 } 1798 1799 if (sclk_min && sclk_max) { 1800 ret = smu_v13_0_set_soft_freq_limited_range(smu, 1801 SMU_GFXCLK, 1802 sclk_min, 1803 sclk_max); 1804 if (ret) 1805 return ret; 1806 1807 pstate_table->gfxclk_pstate.curr.min = sclk_min; 1808 pstate_table->gfxclk_pstate.curr.max = sclk_max; 1809 } 1810 1811 if (mclk_min && mclk_max) { 1812 ret = smu_v13_0_set_soft_freq_limited_range(smu, 1813 SMU_MCLK, 1814 mclk_min, 1815 mclk_max); 1816 if (ret) 1817 return ret; 1818 1819 pstate_table->uclk_pstate.curr.min = mclk_min; 1820 pstate_table->uclk_pstate.curr.max = mclk_max; 1821 } 1822 1823 if (socclk_min && socclk_max) { 1824 ret = smu_v13_0_set_soft_freq_limited_range(smu, 1825 SMU_SOCCLK, 1826 socclk_min, 1827 socclk_max); 1828 if (ret) 1829 return ret; 1830 1831 pstate_table->socclk_pstate.curr.min = socclk_min; 1832 pstate_table->socclk_pstate.curr.max = socclk_max; 1833 } 1834 1835 if (vclk_min && vclk_max) { 1836 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 1837 if (adev->vcn.harvest_config & (1 << i)) 1838 continue; 1839 ret = smu_v13_0_set_soft_freq_limited_range(smu, 1840 i ? SMU_VCLK1 : SMU_VCLK, 1841 vclk_min, 1842 vclk_max); 1843 if (ret) 1844 return ret; 1845 } 1846 pstate_table->vclk_pstate.curr.min = vclk_min; 1847 pstate_table->vclk_pstate.curr.max = vclk_max; 1848 } 1849 1850 if (dclk_min && dclk_max) { 1851 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 1852 if (adev->vcn.harvest_config & (1 << i)) 1853 continue; 1854 ret = smu_v13_0_set_soft_freq_limited_range(smu, 1855 i ? SMU_DCLK1 : SMU_DCLK, 1856 dclk_min, 1857 dclk_max); 1858 if (ret) 1859 return ret; 1860 } 1861 pstate_table->dclk_pstate.curr.min = dclk_min; 1862 pstate_table->dclk_pstate.curr.max = dclk_max; 1863 } 1864 1865 if (fclk_min && fclk_max) { 1866 ret = smu_v13_0_set_soft_freq_limited_range(smu, 1867 SMU_FCLK, 1868 fclk_min, 1869 fclk_max); 1870 if (ret) 1871 return ret; 1872 1873 pstate_table->fclk_pstate.curr.min = fclk_min; 1874 pstate_table->fclk_pstate.curr.max = fclk_max; 1875 } 1876 1877 return ret; 1878} 1879 1880int smu_v13_0_set_power_source(struct smu_context *smu, 1881 enum smu_power_src_type power_src) 1882{ 1883 int pwr_source; 1884 1885 pwr_source = smu_cmn_to_asic_specific_index(smu, 1886 CMN2ASIC_MAPPING_PWR, 1887 (uint32_t)power_src); 1888 if (pwr_source < 0) 1889 return -EINVAL; 1890 1891 return smu_cmn_send_smc_msg_with_param(smu, 1892 SMU_MSG_NotifyPowerSource, 1893 pwr_source, 1894 NULL); 1895} 1896 1897int smu_v13_0_get_dpm_freq_by_index(struct smu_context *smu, 1898 enum smu_clk_type clk_type, uint16_t level, 1899 uint32_t *value) 1900{ 1901 int ret = 0, clk_id = 0; 1902 uint32_t param; 1903 1904 if (!value) 1905 return -EINVAL; 1906 1907 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) 1908 return 0; 1909 1910 clk_id = smu_cmn_to_asic_specific_index(smu, 1911 CMN2ASIC_MAPPING_CLK, 1912 clk_type); 1913 if (clk_id < 0) 1914 return clk_id; 1915 1916 param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff)); 1917 1918 ret = smu_cmn_send_smc_msg_with_param(smu, 1919 SMU_MSG_GetDpmFreqByIndex, 1920 param, 1921 value); 1922 if (ret) 1923 return ret; 1924 1925 *value = *value & 0x7fffffff; 1926 1927 return ret; 1928} 1929 1930static int smu_v13_0_get_dpm_level_count(struct smu_context *smu, 1931 enum smu_clk_type clk_type, 1932 uint32_t *value) 1933{ 1934 int ret; 1935 1936 ret = smu_v13_0_get_dpm_freq_by_index(smu, clk_type, 0xff, value); 1937 /* SMU v13.0.2 FW returns 0 based max level, increment by one for it */ 1938 if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 2)) && (!ret && value)) 1939 ++(*value); 1940 1941 return ret; 1942} 1943 1944static int smu_v13_0_get_fine_grained_status(struct smu_context *smu, 1945 enum smu_clk_type clk_type, 1946 bool *is_fine_grained_dpm) 1947{ 1948 int ret = 0, clk_id = 0; 1949 uint32_t param; 1950 uint32_t value; 1951 1952 if (!is_fine_grained_dpm) 1953 return -EINVAL; 1954 1955 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) 1956 return 0; 1957 1958 clk_id = smu_cmn_to_asic_specific_index(smu, 1959 CMN2ASIC_MAPPING_CLK, 1960 clk_type); 1961 if (clk_id < 0) 1962 return clk_id; 1963 1964 param = (uint32_t)(((clk_id & 0xffff) << 16) | 0xff); 1965 1966 ret = smu_cmn_send_smc_msg_with_param(smu, 1967 SMU_MSG_GetDpmFreqByIndex, 1968 param, 1969 &value); 1970 if (ret) 1971 return ret; 1972 1973 /* 1974 * BIT31: 1 - Fine grained DPM, 0 - Dicrete DPM 1975 * now, we un-support it 1976 */ 1977 *is_fine_grained_dpm = value & 0x80000000; 1978 1979 return 0; 1980} 1981 1982int smu_v13_0_set_single_dpm_table(struct smu_context *smu, 1983 enum smu_clk_type clk_type, 1984 struct smu_13_0_dpm_table *single_dpm_table) 1985{ 1986 int ret = 0; 1987 uint32_t clk; 1988 int i; 1989 1990 ret = smu_v13_0_get_dpm_level_count(smu, 1991 clk_type, 1992 &single_dpm_table->count); 1993 if (ret) { 1994 dev_err(smu->adev->dev, "[%s] failed to get dpm levels!\n", __func__); 1995 return ret; 1996 } 1997 1998 if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) != IP_VERSION(13, 0, 2)) { 1999 ret = smu_v13_0_get_fine_grained_status(smu, 2000 clk_type, 2001 &single_dpm_table->is_fine_grained); 2002 if (ret) { 2003 dev_err(smu->adev->dev, "[%s] failed to get fine grained status!\n", __func__); 2004 return ret; 2005 } 2006 } 2007 2008 for (i = 0; i < single_dpm_table->count; i++) { 2009 ret = smu_v13_0_get_dpm_freq_by_index(smu, 2010 clk_type, 2011 i, 2012 &clk); 2013 if (ret) { 2014 dev_err(smu->adev->dev, "[%s] failed to get dpm freq by index!\n", __func__); 2015 return ret; 2016 } 2017 2018 single_dpm_table->dpm_levels[i].value = clk; 2019 single_dpm_table->dpm_levels[i].enabled = true; 2020 2021 if (i == 0) 2022 single_dpm_table->min = clk; 2023 else if (i == single_dpm_table->count - 1) 2024 single_dpm_table->max = clk; 2025 } 2026 2027 return 0; 2028} 2029 2030int smu_v13_0_get_current_pcie_link_width_level(struct smu_context *smu) 2031{ 2032 struct amdgpu_device *adev = smu->adev; 2033 2034 return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) & 2035 PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK) 2036 >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT; 2037} 2038 2039int smu_v13_0_get_current_pcie_link_width(struct smu_context *smu) 2040{ 2041 uint32_t width_level; 2042 2043 width_level = smu_v13_0_get_current_pcie_link_width_level(smu); 2044 if (width_level > LINK_WIDTH_MAX) 2045 width_level = 0; 2046 2047 return link_width[width_level]; 2048} 2049 2050int smu_v13_0_get_current_pcie_link_speed_level(struct smu_context *smu) 2051{ 2052 struct amdgpu_device *adev = smu->adev; 2053 2054 return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) & 2055 PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) 2056 >> PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; 2057} 2058 2059int smu_v13_0_get_current_pcie_link_speed(struct smu_context *smu) 2060{ 2061 uint32_t speed_level; 2062 2063 speed_level = smu_v13_0_get_current_pcie_link_speed_level(smu); 2064 if (speed_level > LINK_SPEED_MAX) 2065 speed_level = 0; 2066 2067 return link_speed[speed_level]; 2068} 2069 2070int smu_v13_0_set_vcn_enable(struct smu_context *smu, 2071 bool enable) 2072{ 2073 struct amdgpu_device *adev = smu->adev; 2074 int i, ret = 0; 2075 2076 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 2077 if (adev->vcn.harvest_config & (1 << i)) 2078 continue; 2079 2080 ret = smu_cmn_send_smc_msg_with_param(smu, enable ? 2081 SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn, 2082 i << 16U, NULL); 2083 if (ret) 2084 return ret; 2085 } 2086 2087 return ret; 2088} 2089 2090int smu_v13_0_set_jpeg_enable(struct smu_context *smu, 2091 bool enable) 2092{ 2093 return smu_cmn_send_smc_msg_with_param(smu, enable ? 2094 SMU_MSG_PowerUpJpeg : SMU_MSG_PowerDownJpeg, 2095 0, NULL); 2096} 2097 2098int smu_v13_0_run_btc(struct smu_context *smu) 2099{ 2100 int res; 2101 2102 res = smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL); 2103 if (res) 2104 dev_err(smu->adev->dev, "RunDcBtc failed!\n"); 2105 2106 return res; 2107} 2108 2109int smu_v13_0_gpo_control(struct smu_context *smu, 2110 bool enablement) 2111{ 2112 int res; 2113 2114 res = smu_cmn_send_smc_msg_with_param(smu, 2115 SMU_MSG_AllowGpo, 2116 enablement ? 1 : 0, 2117 NULL); 2118 if (res) 2119 dev_err(smu->adev->dev, "SetGpoAllow %d failed!\n", enablement); 2120 2121 return res; 2122} 2123 2124int smu_v13_0_deep_sleep_control(struct smu_context *smu, 2125 bool enablement) 2126{ 2127 struct amdgpu_device *adev = smu->adev; 2128 int ret = 0; 2129 2130 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_GFXCLK_BIT)) { 2131 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_GFXCLK_BIT, enablement); 2132 if (ret) { 2133 dev_err(adev->dev, "Failed to %s GFXCLK DS!\n", enablement ? "enable" : "disable"); 2134 return ret; 2135 } 2136 } 2137 2138 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_UCLK_BIT)) { 2139 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_UCLK_BIT, enablement); 2140 if (ret) { 2141 dev_err(adev->dev, "Failed to %s UCLK DS!\n", enablement ? "enable" : "disable"); 2142 return ret; 2143 } 2144 } 2145 2146 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_FCLK_BIT)) { 2147 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_FCLK_BIT, enablement); 2148 if (ret) { 2149 dev_err(adev->dev, "Failed to %s FCLK DS!\n", enablement ? "enable" : "disable"); 2150 return ret; 2151 } 2152 } 2153 2154 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_SOCCLK_BIT)) { 2155 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_SOCCLK_BIT, enablement); 2156 if (ret) { 2157 dev_err(adev->dev, "Failed to %s SOCCLK DS!\n", enablement ? "enable" : "disable"); 2158 return ret; 2159 } 2160 } 2161 2162 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_LCLK_BIT)) { 2163 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_LCLK_BIT, enablement); 2164 if (ret) { 2165 dev_err(adev->dev, "Failed to %s LCLK DS!\n", enablement ? "enable" : "disable"); 2166 return ret; 2167 } 2168 } 2169 2170 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_VCN_BIT)) { 2171 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_VCN_BIT, enablement); 2172 if (ret) { 2173 dev_err(adev->dev, "Failed to %s VCN DS!\n", enablement ? "enable" : "disable"); 2174 return ret; 2175 } 2176 } 2177 2178 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_MP0CLK_BIT)) { 2179 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_MP0CLK_BIT, enablement); 2180 if (ret) { 2181 dev_err(adev->dev, "Failed to %s MP0/MPIOCLK DS!\n", enablement ? "enable" : "disable"); 2182 return ret; 2183 } 2184 } 2185 2186 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_MP1CLK_BIT)) { 2187 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_MP1CLK_BIT, enablement); 2188 if (ret) { 2189 dev_err(adev->dev, "Failed to %s MP1CLK DS!\n", enablement ? "enable" : "disable"); 2190 return ret; 2191 } 2192 } 2193 2194 return ret; 2195} 2196 2197int smu_v13_0_gfx_ulv_control(struct smu_context *smu, 2198 bool enablement) 2199{ 2200 int ret = 0; 2201 2202 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_GFX_ULV_BIT)) 2203 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_GFX_ULV_BIT, enablement); 2204 2205 return ret; 2206} 2207 2208static int smu_v13_0_baco_set_armd3_sequence(struct smu_context *smu, 2209 enum smu_baco_seq baco_seq) 2210{ 2211 struct smu_baco_context *smu_baco = &smu->smu_baco; 2212 int ret; 2213 2214 ret = smu_cmn_send_smc_msg_with_param(smu, 2215 SMU_MSG_ArmD3, 2216 baco_seq, 2217 NULL); 2218 if (ret) 2219 return ret; 2220 2221 if (baco_seq == BACO_SEQ_BAMACO || 2222 baco_seq == BACO_SEQ_BACO) 2223 smu_baco->state = SMU_BACO_STATE_ENTER; 2224 else 2225 smu_baco->state = SMU_BACO_STATE_EXIT; 2226 2227 return 0; 2228} 2229 2230static enum smu_baco_state smu_v13_0_baco_get_state(struct smu_context *smu) 2231{ 2232 struct smu_baco_context *smu_baco = &smu->smu_baco; 2233 2234 return smu_baco->state; 2235} 2236 2237static int smu_v13_0_baco_set_state(struct smu_context *smu, 2238 enum smu_baco_state state) 2239{ 2240 struct smu_baco_context *smu_baco = &smu->smu_baco; 2241 struct amdgpu_device *adev = smu->adev; 2242 int ret = 0; 2243 2244 if (smu_v13_0_baco_get_state(smu) == state) 2245 return 0; 2246 2247 if (state == SMU_BACO_STATE_ENTER) { 2248 ret = smu_cmn_send_smc_msg_with_param(smu, 2249 SMU_MSG_EnterBaco, 2250 (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO) ? 2251 BACO_SEQ_BAMACO : BACO_SEQ_BACO, 2252 NULL); 2253 } else { 2254 ret = smu_cmn_send_smc_msg(smu, 2255 SMU_MSG_ExitBaco, 2256 NULL); 2257 if (ret) 2258 return ret; 2259 2260 /* clear vbios scratch 6 and 7 for coming asic reinit */ 2261 WREG32(adev->bios_scratch_reg_offset + 6, 0); 2262 WREG32(adev->bios_scratch_reg_offset + 7, 0); 2263 } 2264 2265 if (!ret) 2266 smu_baco->state = state; 2267 2268 return ret; 2269} 2270 2271int smu_v13_0_get_bamaco_support(struct smu_context *smu) 2272{ 2273 struct smu_baco_context *smu_baco = &smu->smu_baco; 2274 int bamaco_support = 0; 2275 2276 if (amdgpu_sriov_vf(smu->adev) || !smu_baco->platform_support) 2277 return 0; 2278 2279 if (smu_baco->maco_support) 2280 bamaco_support |= MACO_SUPPORT; 2281 2282 /* return true if ASIC is in BACO state already */ 2283 if (smu_v13_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER) 2284 return bamaco_support |= BACO_SUPPORT; 2285 2286 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) && 2287 !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) 2288 return 0; 2289 2290 return (bamaco_support |= BACO_SUPPORT); 2291} 2292 2293int smu_v13_0_baco_enter(struct smu_context *smu) 2294{ 2295 struct amdgpu_device *adev = smu->adev; 2296 int ret; 2297 2298 if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { 2299 return smu_v13_0_baco_set_armd3_sequence(smu, 2300 (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO) ? 2301 BACO_SEQ_BAMACO : BACO_SEQ_BACO); 2302 } else { 2303 ret = smu_v13_0_baco_set_state(smu, SMU_BACO_STATE_ENTER); 2304 if (!ret) 2305 usleep_range(10000, 11000); 2306 2307 return ret; 2308 } 2309} 2310 2311int smu_v13_0_baco_exit(struct smu_context *smu) 2312{ 2313 struct amdgpu_device *adev = smu->adev; 2314 int ret; 2315 2316 if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { 2317 /* Wait for PMFW handling for the Dstate change */ 2318 usleep_range(10000, 11000); 2319 ret = smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS); 2320 } else { 2321 ret = smu_v13_0_baco_set_state(smu, SMU_BACO_STATE_EXIT); 2322 } 2323 2324 if (!ret) 2325 adev->gfx.is_poweron = false; 2326 2327 return ret; 2328} 2329 2330int smu_v13_0_set_gfx_power_up_by_imu(struct smu_context *smu) 2331{ 2332 uint16_t index; 2333 struct amdgpu_device *adev = smu->adev; 2334 2335 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 2336 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableGfxImu, 2337 ENABLE_IMU_ARG_GFXOFF_ENABLE, NULL); 2338 } 2339 2340 index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, 2341 SMU_MSG_EnableGfxImu); 2342 return smu_cmn_send_msg_without_waiting(smu, index, 2343 ENABLE_IMU_ARG_GFXOFF_ENABLE); 2344} 2345 2346int smu_v13_0_od_edit_dpm_table(struct smu_context *smu, 2347 enum PP_OD_DPM_TABLE_COMMAND type, 2348 long input[], uint32_t size) 2349{ 2350 struct smu_dpm_context *smu_dpm = &(smu->smu_dpm); 2351 int ret = 0; 2352 2353 /* Only allowed in manual mode */ 2354 if (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) 2355 return -EINVAL; 2356 2357 switch (type) { 2358 case PP_OD_EDIT_SCLK_VDDC_TABLE: 2359 if (size != 2) { 2360 dev_err(smu->adev->dev, "Input parameter number not correct\n"); 2361 return -EINVAL; 2362 } 2363 2364 if (input[0] == 0) { 2365 if (input[1] < smu->gfx_default_hard_min_freq) { 2366 dev_warn(smu->adev->dev, 2367 "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n", 2368 input[1], smu->gfx_default_hard_min_freq); 2369 return -EINVAL; 2370 } 2371 smu->gfx_actual_hard_min_freq = input[1]; 2372 } else if (input[0] == 1) { 2373 if (input[1] > smu->gfx_default_soft_max_freq) { 2374 dev_warn(smu->adev->dev, 2375 "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n", 2376 input[1], smu->gfx_default_soft_max_freq); 2377 return -EINVAL; 2378 } 2379 smu->gfx_actual_soft_max_freq = input[1]; 2380 } else { 2381 return -EINVAL; 2382 } 2383 break; 2384 case PP_OD_RESTORE_DEFAULT_TABLE: 2385 if (size != 0) { 2386 dev_err(smu->adev->dev, "Input parameter number not correct\n"); 2387 return -EINVAL; 2388 } 2389 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; 2390 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; 2391 break; 2392 case PP_OD_COMMIT_DPM_TABLE: 2393 if (size != 0) { 2394 dev_err(smu->adev->dev, "Input parameter number not correct\n"); 2395 return -EINVAL; 2396 } 2397 if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) { 2398 dev_err(smu->adev->dev, 2399 "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n", 2400 smu->gfx_actual_hard_min_freq, 2401 smu->gfx_actual_soft_max_freq); 2402 return -EINVAL; 2403 } 2404 2405 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, 2406 smu->gfx_actual_hard_min_freq, 2407 NULL); 2408 if (ret) { 2409 dev_err(smu->adev->dev, "Set hard min sclk failed!"); 2410 return ret; 2411 } 2412 2413 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, 2414 smu->gfx_actual_soft_max_freq, 2415 NULL); 2416 if (ret) { 2417 dev_err(smu->adev->dev, "Set soft max sclk failed!"); 2418 return ret; 2419 } 2420 break; 2421 default: 2422 return -ENOSYS; 2423 } 2424 2425 return ret; 2426} 2427 2428int smu_v13_0_set_default_dpm_tables(struct smu_context *smu) 2429{ 2430 struct smu_table_context *smu_table = &smu->smu_table; 2431 2432 return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, 2433 smu_table->clocks_table, false); 2434} 2435 2436void smu_v13_0_set_smu_mailbox_registers(struct smu_context *smu) 2437{ 2438 struct amdgpu_device *adev = smu->adev; 2439 2440 smu->param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_82); 2441 smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_66); 2442 smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90); 2443} 2444 2445int smu_v13_0_mode1_reset(struct smu_context *smu) 2446{ 2447 int ret = 0; 2448 2449 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL); 2450 if (!ret) 2451 msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS); 2452 2453 return ret; 2454} 2455 2456int smu_v13_0_update_pcie_parameters(struct smu_context *smu, 2457 uint8_t pcie_gen_cap, 2458 uint8_t pcie_width_cap) 2459{ 2460 struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; 2461 struct smu_13_0_pcie_table *pcie_table = 2462 &dpm_context->dpm_tables.pcie_table; 2463 int num_of_levels = pcie_table->num_of_link_levels; 2464 uint32_t smu_pcie_arg; 2465 int ret, i; 2466 2467 if (!num_of_levels) 2468 return 0; 2469 2470 if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK)) { 2471 if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap) 2472 pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1]; 2473 2474 if (pcie_table->pcie_lane[num_of_levels - 1] < pcie_width_cap) 2475 pcie_width_cap = pcie_table->pcie_lane[num_of_levels - 1]; 2476 2477 /* Force all levels to use the same settings */ 2478 for (i = 0; i < num_of_levels; i++) { 2479 pcie_table->pcie_gen[i] = pcie_gen_cap; 2480 pcie_table->pcie_lane[i] = pcie_width_cap; 2481 } 2482 } else { 2483 for (i = 0; i < num_of_levels; i++) { 2484 if (pcie_table->pcie_gen[i] > pcie_gen_cap) 2485 pcie_table->pcie_gen[i] = pcie_gen_cap; 2486 if (pcie_table->pcie_lane[i] > pcie_width_cap) 2487 pcie_table->pcie_lane[i] = pcie_width_cap; 2488 } 2489 } 2490 2491 for (i = 0; i < num_of_levels; i++) { 2492 smu_pcie_arg = i << 16; 2493 smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; 2494 smu_pcie_arg |= pcie_table->pcie_lane[i]; 2495 2496 ret = smu_cmn_send_smc_msg_with_param(smu, 2497 SMU_MSG_OverridePcieParameters, 2498 smu_pcie_arg, 2499 NULL); 2500 if (ret) 2501 return ret; 2502 } 2503 2504 return 0; 2505} 2506 2507int smu_v13_0_disable_pmfw_state(struct smu_context *smu) 2508{ 2509 int ret; 2510 struct amdgpu_device *adev = smu->adev; 2511 2512 WREG32_PCIE(MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff), 0); 2513 2514 ret = RREG32_PCIE(MP1_Public | 2515 (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); 2516 2517 return ret == 0 ? 0 : -EINVAL; 2518} 2519 2520int smu_v13_0_enable_uclk_shadow(struct smu_context *smu, bool enable) 2521{ 2522 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableUCLKShadow, enable, NULL); 2523} 2524 2525int smu_v13_0_set_wbrf_exclusion_ranges(struct smu_context *smu, 2526 struct freq_band_range *exclusion_ranges) 2527{ 2528 WifiBandEntryTable_t wifi_bands; 2529 int valid_entries = 0; 2530 int ret, i; 2531 2532 memset(&wifi_bands, 0, sizeof(wifi_bands)); 2533 for (i = 0; i < ARRAY_SIZE(wifi_bands.WifiBandEntry); i++) { 2534 if (!exclusion_ranges[i].start && !exclusion_ranges[i].end) 2535 break; 2536 2537 /* PMFW expects the inputs to be in Mhz unit */ 2538 wifi_bands.WifiBandEntry[valid_entries].LowFreq = 2539 DIV_ROUND_DOWN_ULL(exclusion_ranges[i].start, HZ_PER_MHZ); 2540 wifi_bands.WifiBandEntry[valid_entries++].HighFreq = 2541 DIV_ROUND_UP_ULL(exclusion_ranges[i].end, HZ_PER_MHZ); 2542 } 2543 wifi_bands.WifiBandEntryNum = valid_entries; 2544 2545 /* 2546 * Per confirm with PMFW team, WifiBandEntryNum = 0 2547 * is a valid setting. 2548 * 2549 * Considering the scenarios below: 2550 * - At first the wifi device adds an exclusion range e.g. (2400,2500) to 2551 * BIOS and our driver gets notified. We will set WifiBandEntryNum = 1 2552 * and pass the WifiBandEntry (2400, 2500) to PMFW. 2553 * 2554 * - Later the wifi device removes the wifiband list added above and 2555 * our driver gets notified again. At this time, driver will set 2556 * WifiBandEntryNum = 0 and pass an empty WifiBandEntry list to PMFW. 2557 * 2558 * - PMFW may still need to do some uclk shadow update(e.g. switching 2559 * from shadow clock back to primary clock) on receiving this. 2560 */ 2561 ret = smu_cmn_update_table(smu, SMU_TABLE_WIFIBAND, 0, &wifi_bands, true); 2562 if (ret) 2563 dev_warn(smu->adev->dev, "Failed to set wifiband!"); 2564 2565 return ret; 2566} 2567