1/* 2 * Copyright 2023 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23#include <linux/firmware.h> 24#include <linux/module.h> 25#include <linux/pci.h> 26#include <linux/reboot.h> 27 28#define SWSMU_CODE_LAYER_L3 29 30#include "amdgpu.h" 31#include "amdgpu_smu.h" 32#include "atomfirmware.h" 33#include "amdgpu_atomfirmware.h" 34#include "amdgpu_atombios.h" 35#include "smu_v14_0.h" 36#include "soc15_common.h" 37#include "atom.h" 38#include "amdgpu_ras.h" 39#include "smu_cmn.h" 40 41#include "asic_reg/mp/mp_14_0_2_offset.h" 42#include "asic_reg/mp/mp_14_0_2_sh_mask.h" 43 44#define regMP1_SMN_IH_SW_INT_mp1_14_0_0 0x0341 45#define regMP1_SMN_IH_SW_INT_mp1_14_0_0_BASE_IDX 0 46#define regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0 0x0342 47#define regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0_BASE_IDX 0 48 49/* 50 * DO NOT use these for err/warn/info/debug messages. 51 * Use dev_err, dev_warn, dev_info and dev_dbg instead. 52 * They are more MGPU friendly. 53 */ 54#undef pr_err 55#undef pr_warn 56#undef pr_info 57#undef pr_debug 58 59MODULE_FIRMWARE("amdgpu/smu_14_0_2.bin"); 60MODULE_FIRMWARE("amdgpu/smu_14_0_3.bin"); 61 62#define ENABLE_IMU_ARG_GFXOFF_ENABLE 1 63 64int smu_v14_0_init_microcode(struct smu_context *smu) 65{ 66 struct amdgpu_device *adev = smu->adev; 67 char fw_name[30]; 68 char ucode_prefix[15]; 69 int err = 0; 70 const struct smc_firmware_header_v1_0 *hdr; 71 const struct common_firmware_header *header; 72 struct amdgpu_firmware_info *ucode = NULL; 73 74 /* doesn't need to load smu firmware in IOV mode */ 75 if (amdgpu_sriov_vf(adev)) 76 return 0; 77 78 amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix)); 79 80 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix); 81 82 err = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name); 83 if (err) 84 goto out; 85 86 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; 87 amdgpu_ucode_print_smc_hdr(&hdr->header); 88 adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version); 89 90 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 91 ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC]; 92 ucode->ucode_id = AMDGPU_UCODE_ID_SMC; 93 ucode->fw = adev->pm.fw; 94 header = (const struct common_firmware_header *)ucode->fw->data; 95 adev->firmware.fw_size += 96 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 97 } 98 99out: 100 if (err) 101 amdgpu_ucode_release(&adev->pm.fw); 102 return err; 103} 104 105void smu_v14_0_fini_microcode(struct smu_context *smu) 106{ 107 struct amdgpu_device *adev = smu->adev; 108 109 amdgpu_ucode_release(&adev->pm.fw); 110 adev->pm.fw_version = 0; 111} 112 113int smu_v14_0_load_microcode(struct smu_context *smu) 114{ 115 struct amdgpu_device *adev = smu->adev; 116 const uint32_t *src; 117 const struct smc_firmware_header_v1_0 *hdr; 118 uint32_t addr_start = MP1_SRAM; 119 uint32_t i; 120 uint32_t smc_fw_size; 121 uint32_t mp1_fw_flags; 122 123 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; 124 src = (const uint32_t *)(adev->pm.fw->data + 125 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 126 smc_fw_size = hdr->header.ucode_size_bytes; 127 128 for (i = 1; i < smc_fw_size/4 - 1; i++) { 129 WREG32_PCIE(addr_start, src[i]); 130 addr_start += 4; 131 } 132 133 WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff), 134 1 & MP1_SMN_PUB_CTRL__LX3_RESET_MASK); 135 WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff), 136 1 & ~MP1_SMN_PUB_CTRL__LX3_RESET_MASK); 137 138 for (i = 0; i < adev->usec_timeout; i++) { 139 if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0) || 140 amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) 141 mp1_fw_flags = RREG32_PCIE(MP1_Public | 142 (smnMP1_FIRMWARE_FLAGS_14_0_0 & 0xffffffff)); 143 else 144 mp1_fw_flags = RREG32_PCIE(MP1_Public | 145 (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); 146 if ((mp1_fw_flags & MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> 147 MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) 148 break; 149 udelay(1); 150 } 151 152 if (i == adev->usec_timeout) 153 return -ETIME; 154 155 return 0; 156} 157 158int smu_v14_0_init_pptable_microcode(struct smu_context *smu) 159{ 160 struct amdgpu_device *adev = smu->adev; 161 struct amdgpu_firmware_info *ucode = NULL; 162 uint32_t size = 0, pptable_id = 0; 163 int ret = 0; 164 void *table; 165 166 /* doesn't need to load smu firmware in IOV mode */ 167 if (amdgpu_sriov_vf(adev)) 168 return 0; 169 170 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 171 return 0; 172 173 if (!adev->scpm_enabled) 174 return 0; 175 176 if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 2)) || 177 (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 3))) 178 return 0; 179 180 /* override pptable_id from driver parameter */ 181 if (amdgpu_smu_pptable_id >= 0) { 182 pptable_id = amdgpu_smu_pptable_id; 183 dev_info(adev->dev, "override pptable id %d\n", pptable_id); 184 } else { 185 pptable_id = smu->smu_table.boot_values.pp_table_id; 186 } 187 188 /* "pptable_id == 0" means vbios carries the pptable. */ 189 if (!pptable_id) 190 return 0; 191 192 ret = smu_v14_0_get_pptable_from_firmware(smu, &table, &size, pptable_id); 193 if (ret) 194 return ret; 195 196 smu->pptable_firmware.data = table; 197 smu->pptable_firmware.size = size; 198 199 ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_PPTABLE]; 200 ucode->ucode_id = AMDGPU_UCODE_ID_PPTABLE; 201 ucode->fw = &smu->pptable_firmware; 202 adev->firmware.fw_size += 203 ALIGN(smu->pptable_firmware.size, PAGE_SIZE); 204 205 return 0; 206} 207 208int smu_v14_0_check_fw_status(struct smu_context *smu) 209{ 210 struct amdgpu_device *adev = smu->adev; 211 uint32_t mp1_fw_flags; 212 213 if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0) || 214 amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) 215 mp1_fw_flags = RREG32_PCIE(MP1_Public | 216 (smnMP1_FIRMWARE_FLAGS_14_0_0 & 0xffffffff)); 217 else 218 mp1_fw_flags = RREG32_PCIE(MP1_Public | 219 (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); 220 221 if ((mp1_fw_flags & MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> 222 MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) 223 return 0; 224 225 return -EIO; 226} 227 228int smu_v14_0_check_fw_version(struct smu_context *smu) 229{ 230 struct amdgpu_device *adev = smu->adev; 231 uint32_t if_version = 0xff, smu_version = 0xff; 232 uint8_t smu_program, smu_major, smu_minor, smu_debug; 233 int ret = 0; 234 235 ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version); 236 if (ret) 237 return ret; 238 239 smu_program = (smu_version >> 24) & 0xff; 240 smu_major = (smu_version >> 16) & 0xff; 241 smu_minor = (smu_version >> 8) & 0xff; 242 smu_debug = (smu_version >> 0) & 0xff; 243 if (smu->is_apu) 244 adev->pm.fw_version = smu_version; 245 246 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 247 case IP_VERSION(14, 0, 0): 248 smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0; 249 break; 250 case IP_VERSION(14, 0, 1): 251 smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_1; 252 break; 253 case IP_VERSION(14, 0, 2): 254 case IP_VERSION(14, 0, 3): 255 smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_2; 256 break; 257 default: 258 dev_err(adev->dev, "smu unsupported IP version: 0x%x.\n", 259 amdgpu_ip_version(adev, MP1_HWIP, 0)); 260 smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_INV; 261 break; 262 } 263 264 if (adev->pm.fw) 265 dev_dbg(smu->adev->dev, "smu fw reported program %d, version = 0x%08x (%d.%d.%d)\n", 266 smu_program, smu_version, smu_major, smu_minor, smu_debug); 267 268 /* 269 * 1. if_version mismatch is not critical as our fw is designed 270 * to be backward compatible. 271 * 2. New fw usually brings some optimizations. But that's visible 272 * only on the paired driver. 273 * Considering above, we just leave user a verbal message instead 274 * of halt driver loading. 275 */ 276 if (if_version != smu->smc_driver_if_version) { 277 dev_info(adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, " 278 "smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n", 279 smu->smc_driver_if_version, if_version, 280 smu_program, smu_version, smu_major, smu_minor, smu_debug); 281 dev_info(adev->dev, "SMU driver if version not matched\n"); 282 } 283 284 return ret; 285} 286 287static int smu_v14_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size) 288{ 289 struct amdgpu_device *adev = smu->adev; 290 uint32_t ppt_offset_bytes; 291 const struct smc_firmware_header_v2_0 *v2; 292 293 v2 = (const struct smc_firmware_header_v2_0 *) adev->pm.fw->data; 294 295 ppt_offset_bytes = le32_to_cpu(v2->ppt_offset_bytes); 296 *size = le32_to_cpu(v2->ppt_size_bytes); 297 *table = (uint8_t *)v2 + ppt_offset_bytes; 298 299 return 0; 300} 301 302static int smu_v14_0_set_pptable_v2_1(struct smu_context *smu, void **table, 303 uint32_t *size, uint32_t pptable_id) 304{ 305 struct amdgpu_device *adev = smu->adev; 306 const struct smc_firmware_header_v2_1 *v2_1; 307 struct smc_soft_pptable_entry *entries; 308 uint32_t pptable_count = 0; 309 int i = 0; 310 311 v2_1 = (const struct smc_firmware_header_v2_1 *) adev->pm.fw->data; 312 entries = (struct smc_soft_pptable_entry *) 313 ((uint8_t *)v2_1 + le32_to_cpu(v2_1->pptable_entry_offset)); 314 pptable_count = le32_to_cpu(v2_1->pptable_count); 315 for (i = 0; i < pptable_count; i++) { 316 if (le32_to_cpu(entries[i].id) == pptable_id) { 317 *table = ((uint8_t *)v2_1 + le32_to_cpu(entries[i].ppt_offset_bytes)); 318 *size = le32_to_cpu(entries[i].ppt_size_bytes); 319 break; 320 } 321 } 322 323 if (i == pptable_count) 324 return -EINVAL; 325 326 return 0; 327} 328 329static int smu_v14_0_get_pptable_from_vbios(struct smu_context *smu, void **table, uint32_t *size) 330{ 331 struct amdgpu_device *adev = smu->adev; 332 uint16_t atom_table_size; 333 uint8_t frev, crev; 334 int ret, index; 335 336 dev_info(adev->dev, "use vbios provided pptable\n"); 337 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 338 powerplayinfo); 339 340 ret = amdgpu_atombios_get_data_table(adev, index, &atom_table_size, &frev, &crev, 341 (uint8_t **)table); 342 if (ret) 343 return ret; 344 345 if (size) 346 *size = atom_table_size; 347 348 return 0; 349} 350 351int smu_v14_0_get_pptable_from_firmware(struct smu_context *smu, 352 void **table, 353 uint32_t *size, 354 uint32_t pptable_id) 355{ 356 const struct smc_firmware_header_v1_0 *hdr; 357 struct amdgpu_device *adev = smu->adev; 358 uint16_t version_major, version_minor; 359 int ret; 360 361 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; 362 if (!hdr) 363 return -EINVAL; 364 365 dev_info(adev->dev, "use driver provided pptable %d\n", pptable_id); 366 367 version_major = le16_to_cpu(hdr->header.header_version_major); 368 version_minor = le16_to_cpu(hdr->header.header_version_minor); 369 if (version_major != 2) { 370 dev_err(adev->dev, "Unsupported smu firmware version %d.%d\n", 371 version_major, version_minor); 372 return -EINVAL; 373 } 374 375 switch (version_minor) { 376 case 0: 377 ret = smu_v14_0_set_pptable_v2_0(smu, table, size); 378 break; 379 case 1: 380 ret = smu_v14_0_set_pptable_v2_1(smu, table, size, pptable_id); 381 break; 382 default: 383 ret = -EINVAL; 384 break; 385 } 386 387 return ret; 388} 389 390int smu_v14_0_setup_pptable(struct smu_context *smu) 391{ 392 struct amdgpu_device *adev = smu->adev; 393 uint32_t size = 0, pptable_id = 0; 394 void *table; 395 int ret = 0; 396 397 /* override pptable_id from driver parameter */ 398 if (amdgpu_smu_pptable_id >= 0) { 399 pptable_id = amdgpu_smu_pptable_id; 400 dev_info(adev->dev, "override pptable id %d\n", pptable_id); 401 } else { 402 pptable_id = smu->smu_table.boot_values.pp_table_id; 403 } 404 405 /* force using vbios pptable in sriov mode */ 406 if ((amdgpu_sriov_vf(adev) || !pptable_id) && (amdgpu_emu_mode != 1)) 407 ret = smu_v14_0_get_pptable_from_vbios(smu, &table, &size); 408 else 409 ret = smu_v14_0_get_pptable_from_firmware(smu, &table, &size, pptable_id); 410 411 if (ret) 412 return ret; 413 414 if (!smu->smu_table.power_play_table) 415 smu->smu_table.power_play_table = table; 416 if (!smu->smu_table.power_play_table_size) 417 smu->smu_table.power_play_table_size = size; 418 419 return 0; 420} 421 422int smu_v14_0_init_smc_tables(struct smu_context *smu) 423{ 424 struct smu_table_context *smu_table = &smu->smu_table; 425 struct smu_table *tables = smu_table->tables; 426 int ret = 0; 427 428 smu_table->driver_pptable = 429 kzalloc(tables[SMU_TABLE_PPTABLE].size, GFP_KERNEL); 430 if (!smu_table->driver_pptable) { 431 ret = -ENOMEM; 432 goto err0_out; 433 } 434 435 smu_table->max_sustainable_clocks = 436 kzalloc(sizeof(struct smu_14_0_max_sustainable_clocks), GFP_KERNEL); 437 if (!smu_table->max_sustainable_clocks) { 438 ret = -ENOMEM; 439 goto err1_out; 440 } 441 442 if (tables[SMU_TABLE_OVERDRIVE].size) { 443 smu_table->overdrive_table = 444 kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL); 445 if (!smu_table->overdrive_table) { 446 ret = -ENOMEM; 447 goto err2_out; 448 } 449 450 smu_table->boot_overdrive_table = 451 kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL); 452 if (!smu_table->boot_overdrive_table) { 453 ret = -ENOMEM; 454 goto err3_out; 455 } 456 } 457 458 smu_table->combo_pptable = 459 kzalloc(tables[SMU_TABLE_COMBO_PPTABLE].size, GFP_KERNEL); 460 if (!smu_table->combo_pptable) { 461 ret = -ENOMEM; 462 goto err4_out; 463 } 464 465 return 0; 466 467err4_out: 468 kfree(smu_table->boot_overdrive_table); 469err3_out: 470 kfree(smu_table->overdrive_table); 471err2_out: 472 kfree(smu_table->max_sustainable_clocks); 473err1_out: 474 kfree(smu_table->driver_pptable); 475err0_out: 476 return ret; 477} 478 479int smu_v14_0_fini_smc_tables(struct smu_context *smu) 480{ 481 struct smu_table_context *smu_table = &smu->smu_table; 482 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 483 484 kfree(smu_table->gpu_metrics_table); 485 kfree(smu_table->combo_pptable); 486 kfree(smu_table->boot_overdrive_table); 487 kfree(smu_table->overdrive_table); 488 kfree(smu_table->max_sustainable_clocks); 489 kfree(smu_table->driver_pptable); 490 smu_table->gpu_metrics_table = NULL; 491 smu_table->combo_pptable = NULL; 492 smu_table->boot_overdrive_table = NULL; 493 smu_table->overdrive_table = NULL; 494 smu_table->max_sustainable_clocks = NULL; 495 smu_table->driver_pptable = NULL; 496 kfree(smu_table->hardcode_pptable); 497 smu_table->hardcode_pptable = NULL; 498 499 kfree(smu_table->ecc_table); 500 kfree(smu_table->metrics_table); 501 kfree(smu_table->watermarks_table); 502 smu_table->ecc_table = NULL; 503 smu_table->metrics_table = NULL; 504 smu_table->watermarks_table = NULL; 505 smu_table->metrics_time = 0; 506 507 kfree(smu_dpm->dpm_context); 508 kfree(smu_dpm->golden_dpm_context); 509 kfree(smu_dpm->dpm_current_power_state); 510 kfree(smu_dpm->dpm_request_power_state); 511 smu_dpm->dpm_context = NULL; 512 smu_dpm->golden_dpm_context = NULL; 513 smu_dpm->dpm_context_size = 0; 514 smu_dpm->dpm_current_power_state = NULL; 515 smu_dpm->dpm_request_power_state = NULL; 516 517 return 0; 518} 519 520int smu_v14_0_init_power(struct smu_context *smu) 521{ 522 struct smu_power_context *smu_power = &smu->smu_power; 523 524 if (smu_power->power_context || smu_power->power_context_size != 0) 525 return -EINVAL; 526 527 smu_power->power_context = kzalloc(sizeof(struct smu_14_0_dpm_context), 528 GFP_KERNEL); 529 if (!smu_power->power_context) 530 return -ENOMEM; 531 smu_power->power_context_size = sizeof(struct smu_14_0_dpm_context); 532 533 return 0; 534} 535 536int smu_v14_0_fini_power(struct smu_context *smu) 537{ 538 struct smu_power_context *smu_power = &smu->smu_power; 539 540 if (!smu_power->power_context || smu_power->power_context_size == 0) 541 return -EINVAL; 542 543 kfree(smu_power->power_context); 544 smu_power->power_context = NULL; 545 smu_power->power_context_size = 0; 546 547 return 0; 548} 549 550int smu_v14_0_get_vbios_bootup_values(struct smu_context *smu) 551{ 552 int ret, index; 553 uint16_t size; 554 uint8_t frev, crev; 555 struct atom_common_table_header *header; 556 struct atom_firmware_info_v3_4 *v_3_4; 557 struct atom_firmware_info_v3_3 *v_3_3; 558 struct atom_firmware_info_v3_1 *v_3_1; 559 struct atom_smu_info_v3_6 *smu_info_v3_6; 560 struct atom_smu_info_v4_0 *smu_info_v4_0; 561 562 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 563 firmwareinfo); 564 565 ret = amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev, 566 (uint8_t **)&header); 567 if (ret) 568 return ret; 569 570 if (header->format_revision != 3) { 571 dev_err(smu->adev->dev, "unknown atom_firmware_info version! for smu14\n"); 572 return -EINVAL; 573 } 574 575 switch (header->content_revision) { 576 case 0: 577 case 1: 578 case 2: 579 v_3_1 = (struct atom_firmware_info_v3_1 *)header; 580 smu->smu_table.boot_values.revision = v_3_1->firmware_revision; 581 smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz; 582 smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz; 583 smu->smu_table.boot_values.socclk = 0; 584 smu->smu_table.boot_values.dcefclk = 0; 585 smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv; 586 smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv; 587 smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv; 588 smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv; 589 smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id; 590 smu->smu_table.boot_values.pp_table_id = 0; 591 break; 592 case 3: 593 v_3_3 = (struct atom_firmware_info_v3_3 *)header; 594 smu->smu_table.boot_values.revision = v_3_3->firmware_revision; 595 smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz; 596 smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz; 597 smu->smu_table.boot_values.socclk = 0; 598 smu->smu_table.boot_values.dcefclk = 0; 599 smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv; 600 smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv; 601 smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv; 602 smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv; 603 smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id; 604 smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id; 605 break; 606 case 4: 607 default: 608 v_3_4 = (struct atom_firmware_info_v3_4 *)header; 609 smu->smu_table.boot_values.revision = v_3_4->firmware_revision; 610 smu->smu_table.boot_values.gfxclk = v_3_4->bootup_sclk_in10khz; 611 smu->smu_table.boot_values.uclk = v_3_4->bootup_mclk_in10khz; 612 smu->smu_table.boot_values.socclk = 0; 613 smu->smu_table.boot_values.dcefclk = 0; 614 smu->smu_table.boot_values.vddc = v_3_4->bootup_vddc_mv; 615 smu->smu_table.boot_values.vddci = v_3_4->bootup_vddci_mv; 616 smu->smu_table.boot_values.mvddc = v_3_4->bootup_mvddc_mv; 617 smu->smu_table.boot_values.vdd_gfx = v_3_4->bootup_vddgfx_mv; 618 smu->smu_table.boot_values.cooling_id = v_3_4->coolingsolution_id; 619 smu->smu_table.boot_values.pp_table_id = v_3_4->pplib_pptable_id; 620 break; 621 } 622 623 smu->smu_table.boot_values.format_revision = header->format_revision; 624 smu->smu_table.boot_values.content_revision = header->content_revision; 625 626 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 627 smu_info); 628 if (!amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev, 629 (uint8_t **)&header)) { 630 631 if ((frev == 3) && (crev == 6)) { 632 smu_info_v3_6 = (struct atom_smu_info_v3_6 *)header; 633 634 smu->smu_table.boot_values.socclk = smu_info_v3_6->bootup_socclk_10khz; 635 smu->smu_table.boot_values.vclk = smu_info_v3_6->bootup_vclk_10khz; 636 smu->smu_table.boot_values.dclk = smu_info_v3_6->bootup_dclk_10khz; 637 smu->smu_table.boot_values.fclk = smu_info_v3_6->bootup_fclk_10khz; 638 } else if ((frev == 3) && (crev == 1)) { 639 return 0; 640 } else if ((frev == 4) && (crev == 0)) { 641 smu_info_v4_0 = (struct atom_smu_info_v4_0 *)header; 642 643 smu->smu_table.boot_values.socclk = smu_info_v4_0->bootup_socclk_10khz; 644 smu->smu_table.boot_values.dcefclk = smu_info_v4_0->bootup_dcefclk_10khz; 645 smu->smu_table.boot_values.vclk = smu_info_v4_0->bootup_vclk0_10khz; 646 smu->smu_table.boot_values.dclk = smu_info_v4_0->bootup_dclk0_10khz; 647 smu->smu_table.boot_values.fclk = smu_info_v4_0->bootup_fclk_10khz; 648 } else { 649 dev_warn(smu->adev->dev, "Unexpected and unhandled version: %d.%d\n", 650 (uint32_t)frev, (uint32_t)crev); 651 } 652 } 653 654 return 0; 655} 656 657 658int smu_v14_0_notify_memory_pool_location(struct smu_context *smu) 659{ 660 struct smu_table_context *smu_table = &smu->smu_table; 661 struct smu_table *memory_pool = &smu_table->memory_pool; 662 int ret = 0; 663 uint64_t address; 664 uint32_t address_low, address_high; 665 666 if (memory_pool->size == 0 || memory_pool->cpu_addr == NULL) 667 return ret; 668 669 address = memory_pool->mc_address; 670 address_high = (uint32_t)upper_32_bits(address); 671 address_low = (uint32_t)lower_32_bits(address); 672 673 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh, 674 address_high, NULL); 675 if (ret) 676 return ret; 677 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow, 678 address_low, NULL); 679 if (ret) 680 return ret; 681 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize, 682 (uint32_t)memory_pool->size, NULL); 683 if (ret) 684 return ret; 685 686 return ret; 687} 688 689int smu_v14_0_set_driver_table_location(struct smu_context *smu) 690{ 691 struct smu_table *driver_table = &smu->smu_table.driver_table; 692 int ret = 0; 693 694 if (driver_table->mc_address) { 695 ret = smu_cmn_send_smc_msg_with_param(smu, 696 SMU_MSG_SetDriverDramAddrHigh, 697 upper_32_bits(driver_table->mc_address), 698 NULL); 699 if (!ret) 700 ret = smu_cmn_send_smc_msg_with_param(smu, 701 SMU_MSG_SetDriverDramAddrLow, 702 lower_32_bits(driver_table->mc_address), 703 NULL); 704 } 705 706 return ret; 707} 708 709int smu_v14_0_set_tool_table_location(struct smu_context *smu) 710{ 711 int ret = 0; 712 struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG]; 713 714 if (tool_table->mc_address) { 715 ret = smu_cmn_send_smc_msg_with_param(smu, 716 SMU_MSG_SetToolsDramAddrHigh, 717 upper_32_bits(tool_table->mc_address), 718 NULL); 719 if (!ret) 720 ret = smu_cmn_send_smc_msg_with_param(smu, 721 SMU_MSG_SetToolsDramAddrLow, 722 lower_32_bits(tool_table->mc_address), 723 NULL); 724 } 725 726 return ret; 727} 728 729int smu_v14_0_set_allowed_mask(struct smu_context *smu) 730{ 731 struct smu_feature *feature = &smu->smu_feature; 732 int ret = 0; 733 uint32_t feature_mask[2]; 734 735 if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || 736 feature->feature_num < 64) 737 return -EINVAL; 738 739 bitmap_to_arr32(feature_mask, feature->allowed, 64); 740 741 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh, 742 feature_mask[1], NULL); 743 if (ret) 744 return ret; 745 746 return smu_cmn_send_smc_msg_with_param(smu, 747 SMU_MSG_SetAllowedFeaturesMaskLow, 748 feature_mask[0], 749 NULL); 750} 751 752int smu_v14_0_gfx_off_control(struct smu_context *smu, bool enable) 753{ 754 int ret = 0; 755 struct amdgpu_device *adev = smu->adev; 756 757 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 758 case IP_VERSION(14, 0, 0): 759 case IP_VERSION(14, 0, 1): 760 case IP_VERSION(14, 0, 2): 761 if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) 762 return 0; 763 if (enable) 764 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL); 765 else 766 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL); 767 break; 768 default: 769 break; 770 } 771 772 return ret; 773} 774 775int smu_v14_0_system_features_control(struct smu_context *smu, 776 bool en) 777{ 778 return smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures : 779 SMU_MSG_DisableAllSmuFeatures), NULL); 780} 781 782int smu_v14_0_notify_display_change(struct smu_context *smu) 783{ 784 int ret = 0; 785 786 if (!smu->pm_enabled) 787 return ret; 788 789 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) && 790 smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM) 791 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL); 792 793 return ret; 794} 795 796int smu_v14_0_get_current_power_limit(struct smu_context *smu, 797 uint32_t *power_limit) 798{ 799 int power_src; 800 int ret = 0; 801 802 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) 803 return -EINVAL; 804 805 power_src = smu_cmn_to_asic_specific_index(smu, 806 CMN2ASIC_MAPPING_PWR, 807 smu->adev->pm.ac_power ? 808 SMU_POWER_SOURCE_AC : 809 SMU_POWER_SOURCE_DC); 810 if (power_src < 0) 811 return -EINVAL; 812 813 ret = smu_cmn_send_smc_msg_with_param(smu, 814 SMU_MSG_GetPptLimit, 815 power_src << 16, 816 power_limit); 817 if (ret) 818 dev_err(smu->adev->dev, "[%s] get PPT limit failed!", __func__); 819 820 return ret; 821} 822 823int smu_v14_0_set_power_limit(struct smu_context *smu, 824 enum smu_ppt_limit_type limit_type, 825 uint32_t limit) 826{ 827 int ret = 0; 828 829 if (limit_type != SMU_DEFAULT_PPT_LIMIT) 830 return -EINVAL; 831 832 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) { 833 dev_err(smu->adev->dev, "Setting new power limit is not supported!\n"); 834 return -EOPNOTSUPP; 835 } 836 837 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, limit, NULL); 838 if (ret) { 839 dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__); 840 return ret; 841 } 842 843 smu->current_power_limit = limit; 844 845 return 0; 846} 847 848static int smu_v14_0_set_irq_state(struct amdgpu_device *adev, 849 struct amdgpu_irq_src *source, 850 unsigned tyep, 851 enum amdgpu_interrupt_state state) 852{ 853 uint32_t val = 0; 854 855 switch (state) { 856 case AMDGPU_IRQ_STATE_DISABLE: 857 /* For THM irqs */ 858 // TODO 859 860 /* For MP1 SW irqs */ 861 if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0) || 862 amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) { 863 val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0); 864 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1); 865 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0, val); 866 } else { 867 val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); 868 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1); 869 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val); 870 } 871 872 break; 873 case AMDGPU_IRQ_STATE_ENABLE: 874 /* For THM irqs */ 875 // TODO 876 877 /* For MP1 SW irqs */ 878 if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0) || 879 amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) { 880 val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_mp1_14_0_0); 881 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE); 882 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0); 883 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_mp1_14_0_0, val); 884 885 val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0); 886 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0); 887 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0, val); 888 } else { 889 val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT); 890 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE); 891 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0); 892 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT, val); 893 894 val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); 895 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0); 896 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val); 897 } 898 899 break; 900 default: 901 break; 902 } 903 904 return 0; 905} 906 907#define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */ 908#define THM_11_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */ 909 910static int smu_v14_0_irq_process(struct amdgpu_device *adev, 911 struct amdgpu_irq_src *source, 912 struct amdgpu_iv_entry *entry) 913{ 914 struct smu_context *smu = adev->powerplay.pp_handle; 915 uint32_t client_id = entry->client_id; 916 uint32_t src_id = entry->src_id; 917 918 if (client_id == SOC15_IH_CLIENTID_THM) { 919 switch (src_id) { 920 case THM_11_0__SRCID__THM_DIG_THERM_L2H: 921 schedule_delayed_work(&smu->swctf_delayed_work, 922 msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY)); 923 break; 924 case THM_11_0__SRCID__THM_DIG_THERM_H2L: 925 dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n"); 926 break; 927 default: 928 dev_emerg(adev->dev, "ERROR: GPU under temperature range unknown src id (%d)\n", 929 src_id); 930 break; 931 } 932 } 933 934 return 0; 935} 936 937static const struct amdgpu_irq_src_funcs smu_v14_0_irq_funcs = { 938 .set = smu_v14_0_set_irq_state, 939 .process = smu_v14_0_irq_process, 940}; 941 942int smu_v14_0_register_irq_handler(struct smu_context *smu) 943{ 944 struct amdgpu_device *adev = smu->adev; 945 struct amdgpu_irq_src *irq_src = &smu->irq_source; 946 int ret = 0; 947 948 if (amdgpu_sriov_vf(adev)) 949 return 0; 950 951 irq_src->num_types = 1; 952 irq_src->funcs = &smu_v14_0_irq_funcs; 953 954 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM, 955 THM_11_0__SRCID__THM_DIG_THERM_L2H, 956 irq_src); 957 if (ret) 958 return ret; 959 960 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM, 961 THM_11_0__SRCID__THM_DIG_THERM_H2L, 962 irq_src); 963 if (ret) 964 return ret; 965 966 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1, 967 SMU_IH_INTERRUPT_ID_TO_DRIVER, 968 irq_src); 969 if (ret) 970 return ret; 971 972 return ret; 973} 974 975static int smu_v14_0_wait_for_reset_complete(struct smu_context *smu, 976 uint64_t event_arg) 977{ 978 int ret = 0; 979 980 dev_dbg(smu->adev->dev, "waiting for smu reset complete\n"); 981 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GfxDriverResetRecovery, NULL); 982 983 return ret; 984} 985 986int smu_v14_0_wait_for_event(struct smu_context *smu, enum smu_event_type event, 987 uint64_t event_arg) 988{ 989 int ret = -EINVAL; 990 991 switch (event) { 992 case SMU_EVENT_RESET_COMPLETE: 993 ret = smu_v14_0_wait_for_reset_complete(smu, event_arg); 994 break; 995 default: 996 break; 997 } 998 999 return ret; 1000} 1001 1002int smu_v14_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, 1003 uint32_t *min, uint32_t *max) 1004{ 1005 int ret = 0, clk_id = 0; 1006 uint32_t param = 0; 1007 uint32_t clock_limit; 1008 1009 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) { 1010 switch (clk_type) { 1011 case SMU_MCLK: 1012 case SMU_UCLK: 1013 clock_limit = smu->smu_table.boot_values.uclk; 1014 break; 1015 case SMU_GFXCLK: 1016 case SMU_SCLK: 1017 clock_limit = smu->smu_table.boot_values.gfxclk; 1018 break; 1019 case SMU_SOCCLK: 1020 clock_limit = smu->smu_table.boot_values.socclk; 1021 break; 1022 default: 1023 clock_limit = 0; 1024 break; 1025 } 1026 1027 /* clock in Mhz unit */ 1028 if (min) 1029 *min = clock_limit / 100; 1030 if (max) 1031 *max = clock_limit / 100; 1032 1033 return 0; 1034 } 1035 1036 clk_id = smu_cmn_to_asic_specific_index(smu, 1037 CMN2ASIC_MAPPING_CLK, 1038 clk_type); 1039 if (clk_id < 0) { 1040 ret = -EINVAL; 1041 goto failed; 1042 } 1043 param = (clk_id & 0xffff) << 16; 1044 1045 if (max) { 1046 if (smu->adev->pm.ac_power) 1047 ret = smu_cmn_send_smc_msg_with_param(smu, 1048 SMU_MSG_GetMaxDpmFreq, 1049 param, 1050 max); 1051 else 1052 ret = smu_cmn_send_smc_msg_with_param(smu, 1053 SMU_MSG_GetDcModeMaxDpmFreq, 1054 param, 1055 max); 1056 if (ret) 1057 goto failed; 1058 } 1059 1060 if (min) { 1061 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min); 1062 if (ret) 1063 goto failed; 1064 } 1065 1066failed: 1067 return ret; 1068} 1069 1070int smu_v14_0_set_soft_freq_limited_range(struct smu_context *smu, 1071 enum smu_clk_type clk_type, 1072 uint32_t min, 1073 uint32_t max) 1074{ 1075 int ret = 0, clk_id = 0; 1076 uint32_t param; 1077 1078 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) 1079 return 0; 1080 1081 clk_id = smu_cmn_to_asic_specific_index(smu, 1082 CMN2ASIC_MAPPING_CLK, 1083 clk_type); 1084 if (clk_id < 0) 1085 return clk_id; 1086 1087 if (max > 0) { 1088 param = (uint32_t)((clk_id << 16) | (max & 0xffff)); 1089 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq, 1090 param, NULL); 1091 if (ret) 1092 goto out; 1093 } 1094 1095 if (min > 0) { 1096 param = (uint32_t)((clk_id << 16) | (min & 0xffff)); 1097 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq, 1098 param, NULL); 1099 if (ret) 1100 goto out; 1101 } 1102 1103out: 1104 return ret; 1105} 1106 1107int smu_v14_0_set_hard_freq_limited_range(struct smu_context *smu, 1108 enum smu_clk_type clk_type, 1109 uint32_t min, 1110 uint32_t max) 1111{ 1112 int ret = 0, clk_id = 0; 1113 uint32_t param; 1114 1115 if (min <= 0 && max <= 0) 1116 return -EINVAL; 1117 1118 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) 1119 return 0; 1120 1121 clk_id = smu_cmn_to_asic_specific_index(smu, 1122 CMN2ASIC_MAPPING_CLK, 1123 clk_type); 1124 if (clk_id < 0) 1125 return clk_id; 1126 1127 if (max > 0) { 1128 param = (uint32_t)((clk_id << 16) | (max & 0xffff)); 1129 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq, 1130 param, NULL); 1131 if (ret) 1132 return ret; 1133 } 1134 1135 if (min > 0) { 1136 param = (uint32_t)((clk_id << 16) | (min & 0xffff)); 1137 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq, 1138 param, NULL); 1139 if (ret) 1140 return ret; 1141 } 1142 1143 return ret; 1144} 1145 1146int smu_v14_0_set_performance_level(struct smu_context *smu, 1147 enum amd_dpm_forced_level level) 1148{ 1149 struct smu_14_0_dpm_context *dpm_context = 1150 smu->smu_dpm.dpm_context; 1151 struct smu_14_0_dpm_table *gfx_table = 1152 &dpm_context->dpm_tables.gfx_table; 1153 struct smu_14_0_dpm_table *mem_table = 1154 &dpm_context->dpm_tables.uclk_table; 1155 struct smu_14_0_dpm_table *soc_table = 1156 &dpm_context->dpm_tables.soc_table; 1157 struct smu_14_0_dpm_table *vclk_table = 1158 &dpm_context->dpm_tables.vclk_table; 1159 struct smu_14_0_dpm_table *dclk_table = 1160 &dpm_context->dpm_tables.dclk_table; 1161 struct smu_14_0_dpm_table *fclk_table = 1162 &dpm_context->dpm_tables.fclk_table; 1163 struct smu_umd_pstate_table *pstate_table = 1164 &smu->pstate_table; 1165 struct amdgpu_device *adev = smu->adev; 1166 uint32_t sclk_min = 0, sclk_max = 0; 1167 uint32_t mclk_min = 0, mclk_max = 0; 1168 uint32_t socclk_min = 0, socclk_max = 0; 1169 uint32_t vclk_min = 0, vclk_max = 0; 1170 uint32_t dclk_min = 0, dclk_max = 0; 1171 uint32_t fclk_min = 0, fclk_max = 0; 1172 int ret = 0, i; 1173 1174 switch (level) { 1175 case AMD_DPM_FORCED_LEVEL_HIGH: 1176 sclk_min = sclk_max = gfx_table->max; 1177 mclk_min = mclk_max = mem_table->max; 1178 socclk_min = socclk_max = soc_table->max; 1179 vclk_min = vclk_max = vclk_table->max; 1180 dclk_min = dclk_max = dclk_table->max; 1181 fclk_min = fclk_max = fclk_table->max; 1182 break; 1183 case AMD_DPM_FORCED_LEVEL_LOW: 1184 sclk_min = sclk_max = gfx_table->min; 1185 mclk_min = mclk_max = mem_table->min; 1186 socclk_min = socclk_max = soc_table->min; 1187 vclk_min = vclk_max = vclk_table->min; 1188 dclk_min = dclk_max = dclk_table->min; 1189 fclk_min = fclk_max = fclk_table->min; 1190 break; 1191 case AMD_DPM_FORCED_LEVEL_AUTO: 1192 sclk_min = gfx_table->min; 1193 sclk_max = gfx_table->max; 1194 mclk_min = mem_table->min; 1195 mclk_max = mem_table->max; 1196 socclk_min = soc_table->min; 1197 socclk_max = soc_table->max; 1198 vclk_min = vclk_table->min; 1199 vclk_max = vclk_table->max; 1200 dclk_min = dclk_table->min; 1201 dclk_max = dclk_table->max; 1202 fclk_min = fclk_table->min; 1203 fclk_max = fclk_table->max; 1204 break; 1205 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 1206 sclk_min = sclk_max = pstate_table->gfxclk_pstate.standard; 1207 mclk_min = mclk_max = pstate_table->uclk_pstate.standard; 1208 socclk_min = socclk_max = pstate_table->socclk_pstate.standard; 1209 vclk_min = vclk_max = pstate_table->vclk_pstate.standard; 1210 dclk_min = dclk_max = pstate_table->dclk_pstate.standard; 1211 fclk_min = fclk_max = pstate_table->fclk_pstate.standard; 1212 break; 1213 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 1214 sclk_min = sclk_max = pstate_table->gfxclk_pstate.min; 1215 break; 1216 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: 1217 mclk_min = mclk_max = pstate_table->uclk_pstate.min; 1218 break; 1219 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 1220 sclk_min = sclk_max = pstate_table->gfxclk_pstate.peak; 1221 mclk_min = mclk_max = pstate_table->uclk_pstate.peak; 1222 socclk_min = socclk_max = pstate_table->socclk_pstate.peak; 1223 vclk_min = vclk_max = pstate_table->vclk_pstate.peak; 1224 dclk_min = dclk_max = pstate_table->dclk_pstate.peak; 1225 fclk_min = fclk_max = pstate_table->fclk_pstate.peak; 1226 break; 1227 case AMD_DPM_FORCED_LEVEL_MANUAL: 1228 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 1229 return 0; 1230 default: 1231 dev_err(adev->dev, "Invalid performance level %d\n", level); 1232 return -EINVAL; 1233 } 1234 1235 if (sclk_min && sclk_max) { 1236 ret = smu_v14_0_set_soft_freq_limited_range(smu, 1237 SMU_GFXCLK, 1238 sclk_min, 1239 sclk_max); 1240 if (ret) 1241 return ret; 1242 1243 pstate_table->gfxclk_pstate.curr.min = sclk_min; 1244 pstate_table->gfxclk_pstate.curr.max = sclk_max; 1245 } 1246 1247 if (mclk_min && mclk_max) { 1248 ret = smu_v14_0_set_soft_freq_limited_range(smu, 1249 SMU_MCLK, 1250 mclk_min, 1251 mclk_max); 1252 if (ret) 1253 return ret; 1254 1255 pstate_table->uclk_pstate.curr.min = mclk_min; 1256 pstate_table->uclk_pstate.curr.max = mclk_max; 1257 } 1258 1259 if (socclk_min && socclk_max) { 1260 ret = smu_v14_0_set_soft_freq_limited_range(smu, 1261 SMU_SOCCLK, 1262 socclk_min, 1263 socclk_max); 1264 if (ret) 1265 return ret; 1266 1267 pstate_table->socclk_pstate.curr.min = socclk_min; 1268 pstate_table->socclk_pstate.curr.max = socclk_max; 1269 } 1270 1271 if (vclk_min && vclk_max) { 1272 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 1273 if (adev->vcn.harvest_config & (1 << i)) 1274 continue; 1275 ret = smu_v14_0_set_soft_freq_limited_range(smu, 1276 i ? SMU_VCLK1 : SMU_VCLK, 1277 vclk_min, 1278 vclk_max); 1279 if (ret) 1280 return ret; 1281 } 1282 pstate_table->vclk_pstate.curr.min = vclk_min; 1283 pstate_table->vclk_pstate.curr.max = vclk_max; 1284 } 1285 1286 if (dclk_min && dclk_max) { 1287 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 1288 if (adev->vcn.harvest_config & (1 << i)) 1289 continue; 1290 ret = smu_v14_0_set_soft_freq_limited_range(smu, 1291 i ? SMU_DCLK1 : SMU_DCLK, 1292 dclk_min, 1293 dclk_max); 1294 if (ret) 1295 return ret; 1296 } 1297 pstate_table->dclk_pstate.curr.min = dclk_min; 1298 pstate_table->dclk_pstate.curr.max = dclk_max; 1299 } 1300 1301 if (fclk_min && fclk_max) { 1302 ret = smu_v14_0_set_soft_freq_limited_range(smu, 1303 SMU_FCLK, 1304 fclk_min, 1305 fclk_max); 1306 if (ret) 1307 return ret; 1308 1309 pstate_table->fclk_pstate.curr.min = fclk_min; 1310 pstate_table->fclk_pstate.curr.max = fclk_max; 1311 } 1312 1313 return ret; 1314} 1315 1316int smu_v14_0_set_power_source(struct smu_context *smu, 1317 enum smu_power_src_type power_src) 1318{ 1319 int pwr_source; 1320 1321 pwr_source = smu_cmn_to_asic_specific_index(smu, 1322 CMN2ASIC_MAPPING_PWR, 1323 (uint32_t)power_src); 1324 if (pwr_source < 0) 1325 return -EINVAL; 1326 1327 return smu_cmn_send_smc_msg_with_param(smu, 1328 SMU_MSG_NotifyPowerSource, 1329 pwr_source, 1330 NULL); 1331} 1332 1333static int smu_v14_0_get_dpm_freq_by_index(struct smu_context *smu, 1334 enum smu_clk_type clk_type, 1335 uint16_t level, 1336 uint32_t *value) 1337{ 1338 int ret = 0, clk_id = 0; 1339 uint32_t param; 1340 1341 if (!value) 1342 return -EINVAL; 1343 1344 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) 1345 return 0; 1346 1347 clk_id = smu_cmn_to_asic_specific_index(smu, 1348 CMN2ASIC_MAPPING_CLK, 1349 clk_type); 1350 if (clk_id < 0) 1351 return clk_id; 1352 1353 param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff)); 1354 1355 ret = smu_cmn_send_smc_msg_with_param(smu, 1356 SMU_MSG_GetDpmFreqByIndex, 1357 param, 1358 value); 1359 if (ret) 1360 return ret; 1361 1362 *value = *value & 0x7fffffff; 1363 1364 return ret; 1365} 1366 1367static int smu_v14_0_get_dpm_level_count(struct smu_context *smu, 1368 enum smu_clk_type clk_type, 1369 uint32_t *value) 1370{ 1371 int ret; 1372 1373 ret = smu_v14_0_get_dpm_freq_by_index(smu, clk_type, 0xff, value); 1374 1375 return ret; 1376} 1377 1378static int smu_v14_0_get_fine_grained_status(struct smu_context *smu, 1379 enum smu_clk_type clk_type, 1380 bool *is_fine_grained_dpm) 1381{ 1382 int ret = 0, clk_id = 0; 1383 uint32_t param; 1384 uint32_t value; 1385 1386 if (!is_fine_grained_dpm) 1387 return -EINVAL; 1388 1389 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) 1390 return 0; 1391 1392 clk_id = smu_cmn_to_asic_specific_index(smu, 1393 CMN2ASIC_MAPPING_CLK, 1394 clk_type); 1395 if (clk_id < 0) 1396 return clk_id; 1397 1398 param = (uint32_t)(((clk_id & 0xffff) << 16) | 0xff); 1399 1400 ret = smu_cmn_send_smc_msg_with_param(smu, 1401 SMU_MSG_GetDpmFreqByIndex, 1402 param, 1403 &value); 1404 if (ret) 1405 return ret; 1406 1407 /* 1408 * BIT31: 1 - Fine grained DPM, 0 - Dicrete DPM 1409 * now, we un-support it 1410 */ 1411 *is_fine_grained_dpm = value & 0x80000000; 1412 1413 return 0; 1414} 1415 1416int smu_v14_0_set_single_dpm_table(struct smu_context *smu, 1417 enum smu_clk_type clk_type, 1418 struct smu_14_0_dpm_table *single_dpm_table) 1419{ 1420 int ret = 0; 1421 uint32_t clk; 1422 int i; 1423 1424 ret = smu_v14_0_get_dpm_level_count(smu, 1425 clk_type, 1426 &single_dpm_table->count); 1427 if (ret) { 1428 dev_err(smu->adev->dev, "[%s] failed to get dpm levels!\n", __func__); 1429 return ret; 1430 } 1431 1432 ret = smu_v14_0_get_fine_grained_status(smu, 1433 clk_type, 1434 &single_dpm_table->is_fine_grained); 1435 if (ret) { 1436 dev_err(smu->adev->dev, "[%s] failed to get fine grained status!\n", __func__); 1437 return ret; 1438 } 1439 1440 for (i = 0; i < single_dpm_table->count; i++) { 1441 ret = smu_v14_0_get_dpm_freq_by_index(smu, 1442 clk_type, 1443 i, 1444 &clk); 1445 if (ret) { 1446 dev_err(smu->adev->dev, "[%s] failed to get dpm freq by index!\n", __func__); 1447 return ret; 1448 } 1449 1450 single_dpm_table->dpm_levels[i].value = clk; 1451 single_dpm_table->dpm_levels[i].enabled = true; 1452 1453 if (i == 0) 1454 single_dpm_table->min = clk; 1455 else if (i == single_dpm_table->count - 1) 1456 single_dpm_table->max = clk; 1457 } 1458 1459 return 0; 1460} 1461 1462int smu_v14_0_set_vcn_enable(struct smu_context *smu, 1463 bool enable) 1464{ 1465 struct amdgpu_device *adev = smu->adev; 1466 int i, ret = 0; 1467 1468 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 1469 if (adev->vcn.harvest_config & (1 << i)) 1470 continue; 1471 1472 if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0) || 1473 amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) { 1474 if (i == 0) 1475 ret = smu_cmn_send_smc_msg_with_param(smu, enable ? 1476 SMU_MSG_PowerUpVcn0 : SMU_MSG_PowerDownVcn0, 1477 i << 16U, NULL); 1478 else if (i == 1) 1479 ret = smu_cmn_send_smc_msg_with_param(smu, enable ? 1480 SMU_MSG_PowerUpVcn1 : SMU_MSG_PowerDownVcn1, 1481 i << 16U, NULL); 1482 } else { 1483 ret = smu_cmn_send_smc_msg_with_param(smu, enable ? 1484 SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn, 1485 i << 16U, NULL); 1486 } 1487 1488 if (ret) 1489 return ret; 1490 } 1491 1492 return ret; 1493} 1494 1495int smu_v14_0_set_jpeg_enable(struct smu_context *smu, 1496 bool enable) 1497{ 1498 struct amdgpu_device *adev = smu->adev; 1499 int i, ret = 0; 1500 1501 for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) { 1502 if (adev->jpeg.harvest_config & (1 << i)) 1503 continue; 1504 1505 if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0) || 1506 amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) { 1507 if (i == 0) 1508 ret = smu_cmn_send_smc_msg_with_param(smu, enable ? 1509 SMU_MSG_PowerUpJpeg0 : SMU_MSG_PowerDownJpeg0, 1510 i << 16U, NULL); 1511 else if (i == 1 && amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) 1512 ret = smu_cmn_send_smc_msg_with_param(smu, enable ? 1513 SMU_MSG_PowerUpJpeg1 : SMU_MSG_PowerDownJpeg1, 1514 i << 16U, NULL); 1515 } else { 1516 ret = smu_cmn_send_smc_msg_with_param(smu, enable ? 1517 SMU_MSG_PowerUpJpeg : SMU_MSG_PowerDownJpeg, 1518 i << 16U, NULL); 1519 } 1520 1521 if (ret) 1522 return ret; 1523 } 1524 1525 return ret; 1526} 1527 1528int smu_v14_0_run_btc(struct smu_context *smu) 1529{ 1530 int res; 1531 1532 res = smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL); 1533 if (res) 1534 dev_err(smu->adev->dev, "RunDcBtc failed!\n"); 1535 1536 return res; 1537} 1538 1539int smu_v14_0_gpo_control(struct smu_context *smu, 1540 bool enablement) 1541{ 1542 int res; 1543 1544 res = smu_cmn_send_smc_msg_with_param(smu, 1545 SMU_MSG_AllowGpo, 1546 enablement ? 1 : 0, 1547 NULL); 1548 if (res) 1549 dev_err(smu->adev->dev, "SetGpoAllow %d failed!\n", enablement); 1550 1551 return res; 1552} 1553 1554int smu_v14_0_deep_sleep_control(struct smu_context *smu, 1555 bool enablement) 1556{ 1557 struct amdgpu_device *adev = smu->adev; 1558 int ret = 0; 1559 1560 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_GFXCLK_BIT)) { 1561 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_GFXCLK_BIT, enablement); 1562 if (ret) { 1563 dev_err(adev->dev, "Failed to %s GFXCLK DS!\n", enablement ? "enable" : "disable"); 1564 return ret; 1565 } 1566 } 1567 1568 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_UCLK_BIT)) { 1569 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_UCLK_BIT, enablement); 1570 if (ret) { 1571 dev_err(adev->dev, "Failed to %s UCLK DS!\n", enablement ? "enable" : "disable"); 1572 return ret; 1573 } 1574 } 1575 1576 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_FCLK_BIT)) { 1577 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_FCLK_BIT, enablement); 1578 if (ret) { 1579 dev_err(adev->dev, "Failed to %s FCLK DS!\n", enablement ? "enable" : "disable"); 1580 return ret; 1581 } 1582 } 1583 1584 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_SOCCLK_BIT)) { 1585 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_SOCCLK_BIT, enablement); 1586 if (ret) { 1587 dev_err(adev->dev, "Failed to %s SOCCLK DS!\n", enablement ? "enable" : "disable"); 1588 return ret; 1589 } 1590 } 1591 1592 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_LCLK_BIT)) { 1593 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_LCLK_BIT, enablement); 1594 if (ret) { 1595 dev_err(adev->dev, "Failed to %s LCLK DS!\n", enablement ? "enable" : "disable"); 1596 return ret; 1597 } 1598 } 1599 1600 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_VCN_BIT)) { 1601 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_VCN_BIT, enablement); 1602 if (ret) { 1603 dev_err(adev->dev, "Failed to %s VCN DS!\n", enablement ? "enable" : "disable"); 1604 return ret; 1605 } 1606 } 1607 1608 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_MP0CLK_BIT)) { 1609 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_MP0CLK_BIT, enablement); 1610 if (ret) { 1611 dev_err(adev->dev, "Failed to %s MP0/MPIOCLK DS!\n", enablement ? "enable" : "disable"); 1612 return ret; 1613 } 1614 } 1615 1616 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_MP1CLK_BIT)) { 1617 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_MP1CLK_BIT, enablement); 1618 if (ret) { 1619 dev_err(adev->dev, "Failed to %s MP1CLK DS!\n", enablement ? "enable" : "disable"); 1620 return ret; 1621 } 1622 } 1623 1624 return ret; 1625} 1626 1627int smu_v14_0_gfx_ulv_control(struct smu_context *smu, 1628 bool enablement) 1629{ 1630 int ret = 0; 1631 1632 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_GFX_ULV_BIT)) 1633 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_GFX_ULV_BIT, enablement); 1634 1635 return ret; 1636} 1637 1638int smu_v14_0_baco_set_armd3_sequence(struct smu_context *smu, 1639 enum smu_baco_seq baco_seq) 1640{ 1641 struct smu_baco_context *smu_baco = &smu->smu_baco; 1642 int ret; 1643 1644 ret = smu_cmn_send_smc_msg_with_param(smu, 1645 SMU_MSG_ArmD3, 1646 baco_seq, 1647 NULL); 1648 if (ret) 1649 return ret; 1650 1651 if (baco_seq == BACO_SEQ_BAMACO || 1652 baco_seq == BACO_SEQ_BACO) 1653 smu_baco->state = SMU_BACO_STATE_ENTER; 1654 else 1655 smu_baco->state = SMU_BACO_STATE_EXIT; 1656 1657 return 0; 1658} 1659 1660int smu_v14_0_get_bamaco_support(struct smu_context *smu) 1661{ 1662 struct smu_baco_context *smu_baco = &smu->smu_baco; 1663 int bamaco_support = 0; 1664 1665 if (amdgpu_sriov_vf(smu->adev) || 1666 !smu_baco->platform_support) 1667 return 0; 1668 1669 if (smu_baco->maco_support) 1670 bamaco_support |= MACO_SUPPORT; 1671 1672 /* return true if ASIC is in BACO state already */ 1673 if (smu_v14_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER) 1674 return (bamaco_support |= BACO_SUPPORT); 1675 1676 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) && 1677 !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) 1678 return 0; 1679 1680 return (bamaco_support |= BACO_SUPPORT); 1681} 1682 1683enum smu_baco_state smu_v14_0_baco_get_state(struct smu_context *smu) 1684{ 1685 struct smu_baco_context *smu_baco = &smu->smu_baco; 1686 1687 return smu_baco->state; 1688} 1689 1690int smu_v14_0_baco_set_state(struct smu_context *smu, 1691 enum smu_baco_state state) 1692{ 1693 struct smu_baco_context *smu_baco = &smu->smu_baco; 1694 struct amdgpu_device *adev = smu->adev; 1695 int ret = 0; 1696 1697 if (smu_v14_0_baco_get_state(smu) == state) 1698 return 0; 1699 1700 if (state == SMU_BACO_STATE_ENTER) { 1701 ret = smu_cmn_send_smc_msg_with_param(smu, 1702 SMU_MSG_EnterBaco, 1703 (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO) ? 1704 BACO_SEQ_BAMACO : BACO_SEQ_BACO, 1705 NULL); 1706 } else { 1707 ret = smu_cmn_send_smc_msg(smu, 1708 SMU_MSG_ExitBaco, 1709 NULL); 1710 if (ret) 1711 return ret; 1712 1713 /* clear vbios scratch 6 and 7 for coming asic reinit */ 1714 WREG32(adev->bios_scratch_reg_offset + 6, 0); 1715 WREG32(adev->bios_scratch_reg_offset + 7, 0); 1716 } 1717 1718 if (!ret) 1719 smu_baco->state = state; 1720 1721 return ret; 1722} 1723 1724int smu_v14_0_baco_enter(struct smu_context *smu) 1725{ 1726 int ret = 0; 1727 1728 ret = smu_v14_0_baco_set_state(smu, 1729 SMU_BACO_STATE_ENTER); 1730 if (ret) 1731 return ret; 1732 1733 msleep(10); 1734 1735 return ret; 1736} 1737 1738int smu_v14_0_baco_exit(struct smu_context *smu) 1739{ 1740 return smu_v14_0_baco_set_state(smu, 1741 SMU_BACO_STATE_EXIT); 1742} 1743 1744int smu_v14_0_set_gfx_power_up_by_imu(struct smu_context *smu) 1745{ 1746 uint16_t index; 1747 struct amdgpu_device *adev = smu->adev; 1748 1749 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 1750 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableGfxImu, 1751 ENABLE_IMU_ARG_GFXOFF_ENABLE, NULL); 1752 } 1753 1754 index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, 1755 SMU_MSG_EnableGfxImu); 1756 return smu_cmn_send_msg_without_waiting(smu, index, ENABLE_IMU_ARG_GFXOFF_ENABLE); 1757} 1758 1759int smu_v14_0_set_default_dpm_tables(struct smu_context *smu) 1760{ 1761 struct smu_table_context *smu_table = &smu->smu_table; 1762 1763 return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, 1764 smu_table->clocks_table, false); 1765} 1766 1767int smu_v14_0_od_edit_dpm_table(struct smu_context *smu, 1768 enum PP_OD_DPM_TABLE_COMMAND type, 1769 long input[], uint32_t size) 1770{ 1771 struct smu_dpm_context *smu_dpm = &(smu->smu_dpm); 1772 int ret = 0; 1773 1774 /* Only allowed in manual mode */ 1775 if (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) 1776 return -EINVAL; 1777 1778 switch (type) { 1779 case PP_OD_EDIT_SCLK_VDDC_TABLE: 1780 if (size != 2) { 1781 dev_err(smu->adev->dev, "Input parameter number not correct\n"); 1782 return -EINVAL; 1783 } 1784 1785 if (input[0] == 0) { 1786 if (input[1] < smu->gfx_default_hard_min_freq) { 1787 dev_warn(smu->adev->dev, 1788 "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n", 1789 input[1], smu->gfx_default_hard_min_freq); 1790 return -EINVAL; 1791 } 1792 smu->gfx_actual_hard_min_freq = input[1]; 1793 } else if (input[0] == 1) { 1794 if (input[1] > smu->gfx_default_soft_max_freq) { 1795 dev_warn(smu->adev->dev, 1796 "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n", 1797 input[1], smu->gfx_default_soft_max_freq); 1798 return -EINVAL; 1799 } 1800 smu->gfx_actual_soft_max_freq = input[1]; 1801 } else { 1802 return -EINVAL; 1803 } 1804 break; 1805 case PP_OD_RESTORE_DEFAULT_TABLE: 1806 if (size != 0) { 1807 dev_err(smu->adev->dev, "Input parameter number not correct\n"); 1808 return -EINVAL; 1809 } 1810 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; 1811 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; 1812 break; 1813 case PP_OD_COMMIT_DPM_TABLE: 1814 if (size != 0) { 1815 dev_err(smu->adev->dev, "Input parameter number not correct\n"); 1816 return -EINVAL; 1817 } 1818 if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) { 1819 dev_err(smu->adev->dev, 1820 "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n", 1821 smu->gfx_actual_hard_min_freq, 1822 smu->gfx_actual_soft_max_freq); 1823 return -EINVAL; 1824 } 1825 1826 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, 1827 smu->gfx_actual_hard_min_freq, 1828 NULL); 1829 if (ret) { 1830 dev_err(smu->adev->dev, "Set hard min sclk failed!"); 1831 return ret; 1832 } 1833 1834 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, 1835 smu->gfx_actual_soft_max_freq, 1836 NULL); 1837 if (ret) { 1838 dev_err(smu->adev->dev, "Set soft max sclk failed!"); 1839 return ret; 1840 } 1841 break; 1842 default: 1843 return -ENOSYS; 1844 } 1845 1846 return ret; 1847} 1848 1849