1/* 2 * Copyright 2023 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24#include "smu_types.h" 25#define SWSMU_CODE_LAYER_L2 26 27#include "amdgpu.h" 28#include "amdgpu_smu.h" 29#include "smu_v14_0.h" 30#include "smu14_driver_if_v14_0_0.h" 31#include "smu_v14_0_0_ppt.h" 32#include "smu_v14_0_0_ppsmc.h" 33#include "smu_v14_0_0_pmfw.h" 34#include "smu_cmn.h" 35 36/* 37 * DO NOT use these for err/warn/info/debug messages. 38 * Use dev_err, dev_warn, dev_info and dev_dbg instead. 39 * They are more MGPU friendly. 40 */ 41#undef pr_err 42#undef pr_warn 43#undef pr_info 44#undef pr_debug 45 46#define mmMP1_SMN_C2PMSG_66 0x0282 47#define mmMP1_SMN_C2PMSG_66_BASE_IDX 0 48 49#define mmMP1_SMN_C2PMSG_82 0x0292 50#define mmMP1_SMN_C2PMSG_82_BASE_IDX 0 51 52#define mmMP1_SMN_C2PMSG_90 0x029a 53#define mmMP1_SMN_C2PMSG_90_BASE_IDX 0 54 55#define FEATURE_MASK(feature) (1ULL << feature) 56#define SMC_DPM_FEATURE ( \ 57 FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \ 58 FEATURE_MASK(FEATURE_VCN_DPM_BIT) | \ 59 FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \ 60 FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT) | \ 61 FEATURE_MASK(FEATURE_LCLK_DPM_BIT) | \ 62 FEATURE_MASK(FEATURE_SHUBCLK_DPM_BIT) | \ 63 FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT)| \ 64 FEATURE_MASK(FEATURE_ISP_DPM_BIT)| \ 65 FEATURE_MASK(FEATURE_IPU_DPM_BIT) | \ 66 FEATURE_MASK(FEATURE_GFX_DPM_BIT) | \ 67 FEATURE_MASK(FEATURE_VPE_DPM_BIT)) 68 69static struct cmn2asic_msg_mapping smu_v14_0_0_message_map[SMU_MSG_MAX_COUNT] = { 70 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), 71 MSG_MAP(GetSmuVersion, PPSMC_MSG_GetPmfwVersion, 1), 72 MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1), 73 MSG_MAP(PowerDownVcn0, PPSMC_MSG_PowerDownVcn0, 1), 74 MSG_MAP(PowerUpVcn0, PPSMC_MSG_PowerUpVcn0, 1), 75 MSG_MAP(SetHardMinVcn0, PPSMC_MSG_SetHardMinVcn0, 1), 76 MSG_MAP(PowerDownVcn1, PPSMC_MSG_PowerDownVcn1, 1), 77 MSG_MAP(PowerUpVcn1, PPSMC_MSG_PowerUpVcn1, 1), 78 MSG_MAP(SetHardMinVcn1, PPSMC_MSG_SetHardMinVcn1, 1), 79 MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxclk, 1), 80 MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 1), 81 MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1), 82 MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1), 83 MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1), 84 MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 1), 85 MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDeviceDriverReset, 1), 86 MSG_MAP(GetEnabledSmuFeatures, PPSMC_MSG_GetEnabledSmuFeatures, 1), 87 MSG_MAP(SetHardMinSocclkByFreq, PPSMC_MSG_SetHardMinSocclkByFreq, 1), 88 MSG_MAP(SetSoftMinFclk, PPSMC_MSG_SetSoftMinFclk, 1), 89 MSG_MAP(SetSoftMinVcn0, PPSMC_MSG_SetSoftMinVcn0, 1), 90 MSG_MAP(SetSoftMinVcn1, PPSMC_MSG_SetSoftMinVcn1, 1), 91 MSG_MAP(EnableGfxImu, PPSMC_MSG_EnableGfxImu, 1), 92 MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 1), 93 MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 1), 94 MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 1), 95 MSG_MAP(SetHardMinGfxClk, PPSMC_MSG_SetHardMinGfxClk, 1), 96 MSG_MAP(SetSoftMaxSocclkByFreq, PPSMC_MSG_SetSoftMaxSocclkByFreq, 1), 97 MSG_MAP(SetSoftMaxFclkByFreq, PPSMC_MSG_SetSoftMaxFclkByFreq, 1), 98 MSG_MAP(SetSoftMaxVcn0, PPSMC_MSG_SetSoftMaxVcn0, 1), 99 MSG_MAP(SetSoftMaxVcn1, PPSMC_MSG_SetSoftMaxVcn1, 1), 100 MSG_MAP(PowerDownJpeg0, PPSMC_MSG_PowerDownJpeg0, 1), 101 MSG_MAP(PowerUpJpeg0, PPSMC_MSG_PowerUpJpeg0, 1), 102 MSG_MAP(PowerDownJpeg1, PPSMC_MSG_PowerDownJpeg1, 1), 103 MSG_MAP(PowerUpJpeg1, PPSMC_MSG_PowerUpJpeg1, 1), 104 MSG_MAP(SetHardMinFclkByFreq, PPSMC_MSG_SetHardMinFclkByFreq, 1), 105 MSG_MAP(SetSoftMinSocclkByFreq, PPSMC_MSG_SetSoftMinSocclkByFreq, 1), 106 MSG_MAP(PowerDownIspByTile, PPSMC_MSG_PowerDownIspByTile, 1), 107 MSG_MAP(PowerUpIspByTile, PPSMC_MSG_PowerUpIspByTile, 1), 108 MSG_MAP(SetHardMinIspiclkByFreq, PPSMC_MSG_SetHardMinIspiclkByFreq, 1), 109 MSG_MAP(SetHardMinIspxclkByFreq, PPSMC_MSG_SetHardMinIspxclkByFreq, 1), 110 MSG_MAP(PowerUpVpe, PPSMC_MSG_PowerUpVpe, 1), 111 MSG_MAP(PowerDownVpe, PPSMC_MSG_PowerDownVpe, 1), 112 MSG_MAP(PowerUpUmsch, PPSMC_MSG_PowerUpUmsch, 1), 113 MSG_MAP(PowerDownUmsch, PPSMC_MSG_PowerDownUmsch, 1), 114 MSG_MAP(SetSoftMaxVpe, PPSMC_MSG_SetSoftMaxVpe, 1), 115 MSG_MAP(SetSoftMinVpe, PPSMC_MSG_SetSoftMinVpe, 1), 116}; 117 118static struct cmn2asic_mapping smu_v14_0_0_feature_mask_map[SMU_FEATURE_COUNT] = { 119 FEA_MAP(CCLK_DPM), 120 FEA_MAP(FAN_CONTROLLER), 121 FEA_MAP(PPT), 122 FEA_MAP(TDC), 123 FEA_MAP(THERMAL), 124 FEA_MAP(VCN_DPM), 125 FEA_MAP_REVERSE(FCLK), 126 FEA_MAP_REVERSE(SOCCLK), 127 FEA_MAP(LCLK_DPM), 128 FEA_MAP(SHUBCLK_DPM), 129 FEA_MAP(DCFCLK_DPM), 130 FEA_MAP_HALF_REVERSE(GFX), 131 FEA_MAP(DS_GFXCLK), 132 FEA_MAP(DS_SOCCLK), 133 FEA_MAP(DS_LCLK), 134 FEA_MAP(LOW_POWER_DCNCLKS), 135 FEA_MAP(DS_FCLK), 136 FEA_MAP(DS_MP1CLK), 137 FEA_MAP(PSI), 138 FEA_MAP(PROCHOT), 139 FEA_MAP(CPUOFF), 140 FEA_MAP(STAPM), 141 FEA_MAP(S0I3), 142 FEA_MAP(PERF_LIMIT), 143 FEA_MAP(CORE_DLDO), 144 FEA_MAP(DS_VCN), 145 FEA_MAP(CPPC), 146 FEA_MAP(DF_CSTATES), 147 FEA_MAP(ATHUB_PG), 148}; 149 150static struct cmn2asic_mapping smu_v14_0_0_table_map[SMU_TABLE_COUNT] = { 151 TAB_MAP_VALID(WATERMARKS), 152 TAB_MAP_VALID(SMU_METRICS), 153 TAB_MAP_VALID(CUSTOM_DPM), 154 TAB_MAP_VALID(DPMCLOCKS), 155}; 156 157static int smu_v14_0_0_init_smc_tables(struct smu_context *smu) 158{ 159 struct smu_table_context *smu_table = &smu->smu_table; 160 struct smu_table *tables = smu_table->tables; 161 162 SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t), 163 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 164 SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, max(sizeof(DpmClocks_t), sizeof(DpmClocks_t_v14_0_1)), 165 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 166 SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t), 167 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 168 169 smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); 170 if (!smu_table->metrics_table) 171 goto err0_out; 172 smu_table->metrics_time = 0; 173 174 smu_table->clocks_table = kzalloc(max(sizeof(DpmClocks_t), sizeof(DpmClocks_t_v14_0_1)), GFP_KERNEL); 175 if (!smu_table->clocks_table) 176 goto err1_out; 177 178 smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL); 179 if (!smu_table->watermarks_table) 180 goto err2_out; 181 182 smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v3_0); 183 smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); 184 if (!smu_table->gpu_metrics_table) 185 goto err3_out; 186 187 return 0; 188 189err3_out: 190 kfree(smu_table->watermarks_table); 191err2_out: 192 kfree(smu_table->clocks_table); 193err1_out: 194 kfree(smu_table->metrics_table); 195err0_out: 196 return -ENOMEM; 197} 198 199static int smu_v14_0_0_fini_smc_tables(struct smu_context *smu) 200{ 201 struct smu_table_context *smu_table = &smu->smu_table; 202 203 kfree(smu_table->clocks_table); 204 smu_table->clocks_table = NULL; 205 206 kfree(smu_table->metrics_table); 207 smu_table->metrics_table = NULL; 208 209 kfree(smu_table->watermarks_table); 210 smu_table->watermarks_table = NULL; 211 212 kfree(smu_table->gpu_metrics_table); 213 smu_table->gpu_metrics_table = NULL; 214 215 return 0; 216} 217 218static int smu_v14_0_0_system_features_control(struct smu_context *smu, bool en) 219{ 220 struct amdgpu_device *adev = smu->adev; 221 int ret = 0; 222 223 if (!en && !adev->in_s0ix) 224 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL); 225 226 return ret; 227} 228 229static int smu_v14_0_0_get_smu_metrics_data(struct smu_context *smu, 230 MetricsMember_t member, 231 uint32_t *value) 232{ 233 struct smu_table_context *smu_table = &smu->smu_table; 234 235 SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; 236 int ret = 0; 237 238 ret = smu_cmn_get_metrics_table(smu, NULL, false); 239 if (ret) 240 return ret; 241 242 switch (member) { 243 case METRICS_AVERAGE_GFXCLK: 244 *value = metrics->GfxclkFrequency; 245 break; 246 case METRICS_AVERAGE_SOCCLK: 247 *value = metrics->SocclkFrequency; 248 break; 249 case METRICS_AVERAGE_VCLK: 250 *value = metrics->VclkFrequency; 251 break; 252 case METRICS_AVERAGE_DCLK: 253 *value = 0; 254 break; 255 case METRICS_AVERAGE_UCLK: 256 *value = metrics->MemclkFrequency; 257 break; 258 case METRICS_AVERAGE_FCLK: 259 *value = metrics->FclkFrequency; 260 break; 261 case METRICS_AVERAGE_VPECLK: 262 *value = metrics->VpeclkFrequency; 263 break; 264 case METRICS_AVERAGE_IPUCLK: 265 *value = metrics->IpuclkFrequency; 266 break; 267 case METRICS_AVERAGE_MPIPUCLK: 268 *value = metrics->MpipuclkFrequency; 269 break; 270 case METRICS_AVERAGE_GFXACTIVITY: 271 if ((smu->smc_fw_version > 0x5d4600)) 272 *value = metrics->GfxActivity; 273 else 274 *value = metrics->GfxActivity / 100; 275 break; 276 case METRICS_AVERAGE_VCNACTIVITY: 277 *value = metrics->VcnActivity / 100; 278 break; 279 case METRICS_AVERAGE_SOCKETPOWER: 280 case METRICS_CURR_SOCKETPOWER: 281 *value = (metrics->SocketPower / 1000 << 8) + 282 (metrics->SocketPower % 1000 / 10); 283 break; 284 case METRICS_TEMPERATURE_EDGE: 285 *value = metrics->GfxTemperature / 100 * 286 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 287 break; 288 case METRICS_TEMPERATURE_HOTSPOT: 289 *value = metrics->SocTemperature / 100 * 290 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 291 break; 292 case METRICS_THROTTLER_RESIDENCY_PROCHOT: 293 *value = metrics->ThrottleResidency_PROCHOT; 294 break; 295 case METRICS_THROTTLER_RESIDENCY_SPL: 296 *value = metrics->ThrottleResidency_SPL; 297 break; 298 case METRICS_THROTTLER_RESIDENCY_FPPT: 299 *value = metrics->ThrottleResidency_FPPT; 300 break; 301 case METRICS_THROTTLER_RESIDENCY_SPPT: 302 *value = metrics->ThrottleResidency_SPPT; 303 break; 304 case METRICS_THROTTLER_RESIDENCY_THM_CORE: 305 *value = metrics->ThrottleResidency_THM_CORE; 306 break; 307 case METRICS_THROTTLER_RESIDENCY_THM_GFX: 308 *value = metrics->ThrottleResidency_THM_GFX; 309 break; 310 case METRICS_THROTTLER_RESIDENCY_THM_SOC: 311 *value = metrics->ThrottleResidency_THM_SOC; 312 break; 313 case METRICS_VOLTAGE_VDDGFX: 314 *value = 0; 315 break; 316 case METRICS_VOLTAGE_VDDSOC: 317 *value = 0; 318 break; 319 case METRICS_SS_APU_SHARE: 320 /* return the percentage of APU power with respect to APU's power limit. 321 * percentage is reported, this isn't boost value. Smartshift power 322 * boost/shift is only when the percentage is more than 100. 323 */ 324 if (metrics->StapmOpnLimit > 0) 325 *value = (metrics->ApuPower * 100) / metrics->StapmOpnLimit; 326 else 327 *value = 0; 328 break; 329 case METRICS_SS_DGPU_SHARE: 330 /* return the percentage of dGPU power with respect to dGPU's power limit. 331 * percentage is reported, this isn't boost value. Smartshift power 332 * boost/shift is only when the percentage is more than 100. 333 */ 334 if ((metrics->dGpuPower > 0) && 335 (metrics->StapmCurrentLimit > metrics->StapmOpnLimit)) 336 *value = (metrics->dGpuPower * 100) / 337 (metrics->StapmCurrentLimit - metrics->StapmOpnLimit); 338 else 339 *value = 0; 340 break; 341 default: 342 *value = UINT_MAX; 343 break; 344 } 345 346 return ret; 347} 348 349static int smu_v14_0_0_read_sensor(struct smu_context *smu, 350 enum amd_pp_sensors sensor, 351 void *data, uint32_t *size) 352{ 353 int ret = 0; 354 355 if (!data || !size) 356 return -EINVAL; 357 358 switch (sensor) { 359 case AMDGPU_PP_SENSOR_GPU_LOAD: 360 ret = smu_v14_0_0_get_smu_metrics_data(smu, 361 METRICS_AVERAGE_GFXACTIVITY, 362 (uint32_t *)data); 363 *size = 4; 364 break; 365 case AMDGPU_PP_SENSOR_VCN_LOAD: 366 ret = smu_v14_0_0_get_smu_metrics_data(smu, 367 METRICS_AVERAGE_VCNACTIVITY, 368 (uint32_t *)data); 369 *size = 4; 370 break; 371 case AMDGPU_PP_SENSOR_GPU_AVG_POWER: 372 ret = smu_v14_0_0_get_smu_metrics_data(smu, 373 METRICS_AVERAGE_SOCKETPOWER, 374 (uint32_t *)data); 375 *size = 4; 376 break; 377 case AMDGPU_PP_SENSOR_GPU_INPUT_POWER: 378 ret = smu_v14_0_0_get_smu_metrics_data(smu, 379 METRICS_CURR_SOCKETPOWER, 380 (uint32_t *)data); 381 *size = 4; 382 break; 383 case AMDGPU_PP_SENSOR_EDGE_TEMP: 384 ret = smu_v14_0_0_get_smu_metrics_data(smu, 385 METRICS_TEMPERATURE_EDGE, 386 (uint32_t *)data); 387 *size = 4; 388 break; 389 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: 390 ret = smu_v14_0_0_get_smu_metrics_data(smu, 391 METRICS_TEMPERATURE_HOTSPOT, 392 (uint32_t *)data); 393 *size = 4; 394 break; 395 case AMDGPU_PP_SENSOR_GFX_MCLK: 396 ret = smu_v14_0_0_get_smu_metrics_data(smu, 397 METRICS_AVERAGE_UCLK, 398 (uint32_t *)data); 399 *(uint32_t *)data *= 100; 400 *size = 4; 401 break; 402 case AMDGPU_PP_SENSOR_GFX_SCLK: 403 ret = smu_v14_0_0_get_smu_metrics_data(smu, 404 METRICS_AVERAGE_GFXCLK, 405 (uint32_t *)data); 406 *(uint32_t *)data *= 100; 407 *size = 4; 408 break; 409 case AMDGPU_PP_SENSOR_VDDGFX: 410 ret = smu_v14_0_0_get_smu_metrics_data(smu, 411 METRICS_VOLTAGE_VDDGFX, 412 (uint32_t *)data); 413 *size = 4; 414 break; 415 case AMDGPU_PP_SENSOR_VDDNB: 416 ret = smu_v14_0_0_get_smu_metrics_data(smu, 417 METRICS_VOLTAGE_VDDSOC, 418 (uint32_t *)data); 419 *size = 4; 420 break; 421 case AMDGPU_PP_SENSOR_SS_APU_SHARE: 422 ret = smu_v14_0_0_get_smu_metrics_data(smu, 423 METRICS_SS_APU_SHARE, 424 (uint32_t *)data); 425 *size = 4; 426 break; 427 case AMDGPU_PP_SENSOR_SS_DGPU_SHARE: 428 ret = smu_v14_0_0_get_smu_metrics_data(smu, 429 METRICS_SS_DGPU_SHARE, 430 (uint32_t *)data); 431 *size = 4; 432 break; 433 default: 434 ret = -EOPNOTSUPP; 435 break; 436 } 437 438 return ret; 439} 440 441static bool smu_v14_0_0_is_dpm_running(struct smu_context *smu) 442{ 443 int ret = 0; 444 uint64_t feature_enabled; 445 446 ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); 447 448 if (ret) 449 return false; 450 451 return !!(feature_enabled & SMC_DPM_FEATURE); 452} 453 454static int smu_v14_0_0_set_watermarks_table(struct smu_context *smu, 455 struct pp_smu_wm_range_sets *clock_ranges) 456{ 457 int i; 458 int ret = 0; 459 Watermarks_t *table = smu->smu_table.watermarks_table; 460 461 if (!table || !clock_ranges) 462 return -EINVAL; 463 464 if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES || 465 clock_ranges->num_writer_wm_sets > NUM_WM_RANGES) 466 return -EINVAL; 467 468 for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) { 469 table->WatermarkRow[WM_DCFCLK][i].MinClock = 470 clock_ranges->reader_wm_sets[i].min_drain_clk_mhz; 471 table->WatermarkRow[WM_DCFCLK][i].MaxClock = 472 clock_ranges->reader_wm_sets[i].max_drain_clk_mhz; 473 table->WatermarkRow[WM_DCFCLK][i].MinMclk = 474 clock_ranges->reader_wm_sets[i].min_fill_clk_mhz; 475 table->WatermarkRow[WM_DCFCLK][i].MaxMclk = 476 clock_ranges->reader_wm_sets[i].max_fill_clk_mhz; 477 478 table->WatermarkRow[WM_DCFCLK][i].WmSetting = 479 clock_ranges->reader_wm_sets[i].wm_inst; 480 } 481 482 for (i = 0; i < clock_ranges->num_writer_wm_sets; i++) { 483 table->WatermarkRow[WM_SOCCLK][i].MinClock = 484 clock_ranges->writer_wm_sets[i].min_fill_clk_mhz; 485 table->WatermarkRow[WM_SOCCLK][i].MaxClock = 486 clock_ranges->writer_wm_sets[i].max_fill_clk_mhz; 487 table->WatermarkRow[WM_SOCCLK][i].MinMclk = 488 clock_ranges->writer_wm_sets[i].min_drain_clk_mhz; 489 table->WatermarkRow[WM_SOCCLK][i].MaxMclk = 490 clock_ranges->writer_wm_sets[i].max_drain_clk_mhz; 491 492 table->WatermarkRow[WM_SOCCLK][i].WmSetting = 493 clock_ranges->writer_wm_sets[i].wm_inst; 494 } 495 496 smu->watermarks_bitmap |= WATERMARKS_EXIST; 497 498 /* pass data to smu controller */ 499 if ((smu->watermarks_bitmap & WATERMARKS_EXIST) && 500 !(smu->watermarks_bitmap & WATERMARKS_LOADED)) { 501 ret = smu_cmn_write_watermarks_table(smu); 502 if (ret) { 503 dev_err(smu->adev->dev, "Failed to update WMTABLE!"); 504 return ret; 505 } 506 smu->watermarks_bitmap |= WATERMARKS_LOADED; 507 } 508 509 return 0; 510} 511 512static ssize_t smu_v14_0_0_get_gpu_metrics(struct smu_context *smu, 513 void **table) 514{ 515 struct smu_table_context *smu_table = &smu->smu_table; 516 struct gpu_metrics_v3_0 *gpu_metrics = 517 (struct gpu_metrics_v3_0 *)smu_table->gpu_metrics_table; 518 SmuMetrics_t metrics; 519 int ret = 0; 520 521 ret = smu_cmn_get_metrics_table(smu, &metrics, true); 522 if (ret) 523 return ret; 524 525 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 3, 0); 526 527 gpu_metrics->temperature_gfx = metrics.GfxTemperature; 528 gpu_metrics->temperature_soc = metrics.SocTemperature; 529 memcpy(&gpu_metrics->temperature_core[0], 530 &metrics.CoreTemperature[0], 531 sizeof(uint16_t) * 16); 532 gpu_metrics->temperature_skin = metrics.SkinTemp; 533 534 gpu_metrics->average_gfx_activity = metrics.GfxActivity; 535 gpu_metrics->average_vcn_activity = metrics.VcnActivity; 536 memcpy(&gpu_metrics->average_ipu_activity[0], 537 &metrics.IpuBusy[0], 538 sizeof(uint16_t) * 8); 539 memcpy(&gpu_metrics->average_core_c0_activity[0], 540 &metrics.CoreC0Residency[0], 541 sizeof(uint16_t) * 16); 542 gpu_metrics->average_dram_reads = metrics.DRAMReads; 543 gpu_metrics->average_dram_writes = metrics.DRAMWrites; 544 gpu_metrics->average_ipu_reads = metrics.IpuReads; 545 gpu_metrics->average_ipu_writes = metrics.IpuWrites; 546 547 gpu_metrics->average_socket_power = metrics.SocketPower; 548 gpu_metrics->average_ipu_power = metrics.IpuPower; 549 gpu_metrics->average_apu_power = metrics.ApuPower; 550 gpu_metrics->average_gfx_power = metrics.GfxPower; 551 gpu_metrics->average_dgpu_power = metrics.dGpuPower; 552 gpu_metrics->average_all_core_power = metrics.AllCorePower; 553 gpu_metrics->average_sys_power = metrics.Psys; 554 memcpy(&gpu_metrics->average_core_power[0], 555 &metrics.CorePower[0], 556 sizeof(uint16_t) * 16); 557 558 gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency; 559 gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency; 560 gpu_metrics->average_vpeclk_frequency = metrics.VpeclkFrequency; 561 gpu_metrics->average_fclk_frequency = metrics.FclkFrequency; 562 gpu_metrics->average_vclk_frequency = metrics.VclkFrequency; 563 gpu_metrics->average_ipuclk_frequency = metrics.IpuclkFrequency; 564 gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency; 565 gpu_metrics->average_mpipu_frequency = metrics.MpipuclkFrequency; 566 567 memcpy(&gpu_metrics->current_coreclk[0], 568 &metrics.CoreFrequency[0], 569 sizeof(uint16_t) * 16); 570 gpu_metrics->current_core_maxfreq = metrics.InfrastructureCpuMaxFreq; 571 gpu_metrics->current_gfx_maxfreq = metrics.InfrastructureGfxMaxFreq; 572 573 gpu_metrics->throttle_residency_prochot = metrics.ThrottleResidency_PROCHOT; 574 gpu_metrics->throttle_residency_spl = metrics.ThrottleResidency_SPL; 575 gpu_metrics->throttle_residency_fppt = metrics.ThrottleResidency_FPPT; 576 gpu_metrics->throttle_residency_sppt = metrics.ThrottleResidency_SPPT; 577 gpu_metrics->throttle_residency_thm_core = metrics.ThrottleResidency_THM_CORE; 578 gpu_metrics->throttle_residency_thm_gfx = metrics.ThrottleResidency_THM_GFX; 579 gpu_metrics->throttle_residency_thm_soc = metrics.ThrottleResidency_THM_SOC; 580 581 gpu_metrics->time_filter_alphavalue = metrics.FilterAlphaValue; 582 gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 583 584 *table = (void *)gpu_metrics; 585 586 return sizeof(struct gpu_metrics_v3_0); 587} 588 589static int smu_v14_0_0_mode2_reset(struct smu_context *smu) 590{ 591 int ret; 592 593 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, 594 SMU_RESET_MODE_2, NULL); 595 596 if (ret) 597 dev_err(smu->adev->dev, "Failed to mode2 reset!\n"); 598 599 return ret; 600} 601 602static int smu_v14_0_1_get_dpm_freq_by_index(struct smu_context *smu, 603 enum smu_clk_type clk_type, 604 uint32_t dpm_level, 605 uint32_t *freq) 606{ 607 DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table; 608 609 if (!clk_table || clk_type >= SMU_CLK_COUNT) 610 return -EINVAL; 611 612 switch (clk_type) { 613 case SMU_SOCCLK: 614 if (dpm_level >= clk_table->NumSocClkLevelsEnabled) 615 return -EINVAL; 616 *freq = clk_table->SocClocks[dpm_level]; 617 break; 618 case SMU_VCLK: 619 if (dpm_level >= clk_table->Vcn0ClkLevelsEnabled) 620 return -EINVAL; 621 *freq = clk_table->VClocks0[dpm_level]; 622 break; 623 case SMU_DCLK: 624 if (dpm_level >= clk_table->Vcn0ClkLevelsEnabled) 625 return -EINVAL; 626 *freq = clk_table->DClocks0[dpm_level]; 627 break; 628 case SMU_VCLK1: 629 if (dpm_level >= clk_table->Vcn1ClkLevelsEnabled) 630 return -EINVAL; 631 *freq = clk_table->VClocks1[dpm_level]; 632 break; 633 case SMU_DCLK1: 634 if (dpm_level >= clk_table->Vcn1ClkLevelsEnabled) 635 return -EINVAL; 636 *freq = clk_table->DClocks1[dpm_level]; 637 break; 638 case SMU_UCLK: 639 case SMU_MCLK: 640 if (dpm_level >= clk_table->NumMemPstatesEnabled) 641 return -EINVAL; 642 *freq = clk_table->MemPstateTable[dpm_level].MemClk; 643 break; 644 case SMU_FCLK: 645 if (dpm_level >= clk_table->NumFclkLevelsEnabled) 646 return -EINVAL; 647 *freq = clk_table->FclkClocks_Freq[dpm_level]; 648 break; 649 default: 650 return -EINVAL; 651 } 652 653 return 0; 654} 655 656static int smu_v14_0_0_get_dpm_freq_by_index(struct smu_context *smu, 657 enum smu_clk_type clk_type, 658 uint32_t dpm_level, 659 uint32_t *freq) 660{ 661 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 662 663 if (!clk_table || clk_type >= SMU_CLK_COUNT) 664 return -EINVAL; 665 666 switch (clk_type) { 667 case SMU_SOCCLK: 668 if (dpm_level >= clk_table->NumSocClkLevelsEnabled) 669 return -EINVAL; 670 *freq = clk_table->SocClocks[dpm_level]; 671 break; 672 case SMU_VCLK: 673 if (dpm_level >= clk_table->VcnClkLevelsEnabled) 674 return -EINVAL; 675 *freq = clk_table->VClocks[dpm_level]; 676 break; 677 case SMU_DCLK: 678 if (dpm_level >= clk_table->VcnClkLevelsEnabled) 679 return -EINVAL; 680 *freq = clk_table->DClocks[dpm_level]; 681 break; 682 case SMU_UCLK: 683 case SMU_MCLK: 684 if (dpm_level >= clk_table->NumMemPstatesEnabled) 685 return -EINVAL; 686 *freq = clk_table->MemPstateTable[dpm_level].MemClk; 687 break; 688 case SMU_FCLK: 689 if (dpm_level >= clk_table->NumFclkLevelsEnabled) 690 return -EINVAL; 691 *freq = clk_table->FclkClocks_Freq[dpm_level]; 692 break; 693 default: 694 return -EINVAL; 695 } 696 697 return 0; 698} 699 700static int smu_v14_0_common_get_dpm_freq_by_index(struct smu_context *smu, 701 enum smu_clk_type clk_type, 702 uint32_t dpm_level, 703 uint32_t *freq) 704{ 705 if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) 706 smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, dpm_level, freq); 707 else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) 708 smu_v14_0_1_get_dpm_freq_by_index(smu, clk_type, dpm_level, freq); 709 710 return 0; 711} 712 713static bool smu_v14_0_0_clk_dpm_is_enabled(struct smu_context *smu, 714 enum smu_clk_type clk_type) 715{ 716 enum smu_feature_mask feature_id = 0; 717 718 switch (clk_type) { 719 case SMU_MCLK: 720 case SMU_UCLK: 721 case SMU_FCLK: 722 feature_id = SMU_FEATURE_DPM_FCLK_BIT; 723 break; 724 case SMU_GFXCLK: 725 case SMU_SCLK: 726 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT; 727 break; 728 case SMU_SOCCLK: 729 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT; 730 break; 731 case SMU_VCLK: 732 case SMU_DCLK: 733 case SMU_VCLK1: 734 case SMU_DCLK1: 735 feature_id = SMU_FEATURE_VCN_DPM_BIT; 736 break; 737 default: 738 return true; 739 } 740 741 return smu_cmn_feature_is_enabled(smu, feature_id); 742} 743 744static int smu_v14_0_1_get_dpm_ultimate_freq(struct smu_context *smu, 745 enum smu_clk_type clk_type, 746 uint32_t *min, 747 uint32_t *max) 748{ 749 DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table; 750 uint32_t clock_limit; 751 uint32_t max_dpm_level, min_dpm_level; 752 int ret = 0; 753 754 if (!smu_v14_0_0_clk_dpm_is_enabled(smu, clk_type)) { 755 switch (clk_type) { 756 case SMU_MCLK: 757 case SMU_UCLK: 758 clock_limit = smu->smu_table.boot_values.uclk; 759 break; 760 case SMU_FCLK: 761 clock_limit = smu->smu_table.boot_values.fclk; 762 break; 763 case SMU_GFXCLK: 764 case SMU_SCLK: 765 clock_limit = smu->smu_table.boot_values.gfxclk; 766 break; 767 case SMU_SOCCLK: 768 clock_limit = smu->smu_table.boot_values.socclk; 769 break; 770 case SMU_VCLK: 771 case SMU_VCLK1: 772 clock_limit = smu->smu_table.boot_values.vclk; 773 break; 774 case SMU_DCLK: 775 case SMU_DCLK1: 776 clock_limit = smu->smu_table.boot_values.dclk; 777 break; 778 default: 779 clock_limit = 0; 780 break; 781 } 782 783 /* clock in Mhz unit */ 784 if (min) 785 *min = clock_limit / 100; 786 if (max) 787 *max = clock_limit / 100; 788 789 return 0; 790 } 791 792 if (max) { 793 switch (clk_type) { 794 case SMU_GFXCLK: 795 case SMU_SCLK: 796 *max = clk_table->MaxGfxClk; 797 break; 798 case SMU_MCLK: 799 case SMU_UCLK: 800 case SMU_FCLK: 801 max_dpm_level = 0; 802 break; 803 case SMU_SOCCLK: 804 max_dpm_level = clk_table->NumSocClkLevelsEnabled - 1; 805 break; 806 case SMU_VCLK: 807 case SMU_DCLK: 808 max_dpm_level = clk_table->Vcn0ClkLevelsEnabled - 1; 809 break; 810 case SMU_VCLK1: 811 case SMU_DCLK1: 812 max_dpm_level = clk_table->Vcn1ClkLevelsEnabled - 1; 813 break; 814 default: 815 ret = -EINVAL; 816 goto failed; 817 } 818 819 if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) { 820 ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, max_dpm_level, max); 821 if (ret) 822 goto failed; 823 } 824 } 825 826 if (min) { 827 switch (clk_type) { 828 case SMU_GFXCLK: 829 case SMU_SCLK: 830 *min = clk_table->MinGfxClk; 831 break; 832 case SMU_MCLK: 833 case SMU_UCLK: 834 min_dpm_level = clk_table->NumMemPstatesEnabled - 1; 835 break; 836 case SMU_FCLK: 837 min_dpm_level = clk_table->NumFclkLevelsEnabled - 1; 838 break; 839 case SMU_SOCCLK: 840 min_dpm_level = 0; 841 break; 842 case SMU_VCLK: 843 case SMU_DCLK: 844 case SMU_VCLK1: 845 case SMU_DCLK1: 846 min_dpm_level = 0; 847 break; 848 default: 849 ret = -EINVAL; 850 goto failed; 851 } 852 853 if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) { 854 ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, min_dpm_level, min); 855 if (ret) 856 goto failed; 857 } 858 } 859 860failed: 861 return ret; 862} 863 864static int smu_v14_0_0_get_dpm_ultimate_freq(struct smu_context *smu, 865 enum smu_clk_type clk_type, 866 uint32_t *min, 867 uint32_t *max) 868{ 869 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 870 uint32_t clock_limit; 871 uint32_t max_dpm_level, min_dpm_level; 872 int ret = 0; 873 874 if (!smu_v14_0_0_clk_dpm_is_enabled(smu, clk_type)) { 875 switch (clk_type) { 876 case SMU_MCLK: 877 case SMU_UCLK: 878 clock_limit = smu->smu_table.boot_values.uclk; 879 break; 880 case SMU_FCLK: 881 clock_limit = smu->smu_table.boot_values.fclk; 882 break; 883 case SMU_GFXCLK: 884 case SMU_SCLK: 885 clock_limit = smu->smu_table.boot_values.gfxclk; 886 break; 887 case SMU_SOCCLK: 888 clock_limit = smu->smu_table.boot_values.socclk; 889 break; 890 case SMU_VCLK: 891 clock_limit = smu->smu_table.boot_values.vclk; 892 break; 893 case SMU_DCLK: 894 clock_limit = smu->smu_table.boot_values.dclk; 895 break; 896 default: 897 clock_limit = 0; 898 break; 899 } 900 901 /* clock in Mhz unit */ 902 if (min) 903 *min = clock_limit / 100; 904 if (max) 905 *max = clock_limit / 100; 906 907 return 0; 908 } 909 910 if (max) { 911 switch (clk_type) { 912 case SMU_GFXCLK: 913 case SMU_SCLK: 914 *max = clk_table->MaxGfxClk; 915 break; 916 case SMU_MCLK: 917 case SMU_UCLK: 918 case SMU_FCLK: 919 max_dpm_level = 0; 920 break; 921 case SMU_SOCCLK: 922 max_dpm_level = clk_table->NumSocClkLevelsEnabled - 1; 923 break; 924 case SMU_VCLK: 925 case SMU_DCLK: 926 max_dpm_level = clk_table->VcnClkLevelsEnabled - 1; 927 break; 928 default: 929 ret = -EINVAL; 930 goto failed; 931 } 932 933 if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) { 934 ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, max_dpm_level, max); 935 if (ret) 936 goto failed; 937 } 938 } 939 940 if (min) { 941 switch (clk_type) { 942 case SMU_GFXCLK: 943 case SMU_SCLK: 944 *min = clk_table->MinGfxClk; 945 break; 946 case SMU_MCLK: 947 case SMU_UCLK: 948 min_dpm_level = clk_table->NumMemPstatesEnabled - 1; 949 break; 950 case SMU_FCLK: 951 min_dpm_level = clk_table->NumFclkLevelsEnabled - 1; 952 break; 953 case SMU_SOCCLK: 954 min_dpm_level = 0; 955 break; 956 case SMU_VCLK: 957 case SMU_DCLK: 958 min_dpm_level = 0; 959 break; 960 default: 961 ret = -EINVAL; 962 goto failed; 963 } 964 965 if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) { 966 ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, min_dpm_level, min); 967 if (ret) 968 goto failed; 969 } 970 } 971 972failed: 973 return ret; 974} 975 976static int smu_v14_0_common_get_dpm_ultimate_freq(struct smu_context *smu, 977 enum smu_clk_type clk_type, 978 uint32_t *min, 979 uint32_t *max) 980{ 981 if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) 982 smu_v14_0_0_get_dpm_ultimate_freq(smu, clk_type, min, max); 983 else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) 984 smu_v14_0_1_get_dpm_ultimate_freq(smu, clk_type, min, max); 985 986 return 0; 987} 988 989static int smu_v14_0_0_get_current_clk_freq(struct smu_context *smu, 990 enum smu_clk_type clk_type, 991 uint32_t *value) 992{ 993 MetricsMember_t member_type; 994 995 switch (clk_type) { 996 case SMU_SOCCLK: 997 member_type = METRICS_AVERAGE_SOCCLK; 998 break; 999 case SMU_VCLK: 1000 member_type = METRICS_AVERAGE_VCLK; 1001 break; 1002 case SMU_DCLK: 1003 member_type = METRICS_AVERAGE_DCLK; 1004 break; 1005 case SMU_MCLK: 1006 member_type = METRICS_AVERAGE_UCLK; 1007 break; 1008 case SMU_FCLK: 1009 member_type = METRICS_AVERAGE_FCLK; 1010 break; 1011 case SMU_GFXCLK: 1012 case SMU_SCLK: 1013 member_type = METRICS_AVERAGE_GFXCLK; 1014 break; 1015 default: 1016 return -EINVAL; 1017 } 1018 1019 return smu_v14_0_0_get_smu_metrics_data(smu, member_type, value); 1020} 1021 1022static int smu_v14_0_1_get_dpm_level_count(struct smu_context *smu, 1023 enum smu_clk_type clk_type, 1024 uint32_t *count) 1025{ 1026 DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table; 1027 1028 switch (clk_type) { 1029 case SMU_SOCCLK: 1030 *count = clk_table->NumSocClkLevelsEnabled; 1031 break; 1032 case SMU_VCLK: 1033 case SMU_DCLK: 1034 *count = clk_table->Vcn0ClkLevelsEnabled; 1035 break; 1036 case SMU_VCLK1: 1037 case SMU_DCLK1: 1038 *count = clk_table->Vcn1ClkLevelsEnabled; 1039 break; 1040 case SMU_MCLK: 1041 *count = clk_table->NumMemPstatesEnabled; 1042 break; 1043 case SMU_FCLK: 1044 *count = clk_table->NumFclkLevelsEnabled; 1045 break; 1046 default: 1047 break; 1048 } 1049 1050 return 0; 1051} 1052 1053static int smu_v14_0_0_get_dpm_level_count(struct smu_context *smu, 1054 enum smu_clk_type clk_type, 1055 uint32_t *count) 1056{ 1057 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 1058 1059 switch (clk_type) { 1060 case SMU_SOCCLK: 1061 *count = clk_table->NumSocClkLevelsEnabled; 1062 break; 1063 case SMU_VCLK: 1064 *count = clk_table->VcnClkLevelsEnabled; 1065 break; 1066 case SMU_DCLK: 1067 *count = clk_table->VcnClkLevelsEnabled; 1068 break; 1069 case SMU_MCLK: 1070 *count = clk_table->NumMemPstatesEnabled; 1071 break; 1072 case SMU_FCLK: 1073 *count = clk_table->NumFclkLevelsEnabled; 1074 break; 1075 default: 1076 break; 1077 } 1078 1079 return 0; 1080} 1081 1082static int smu_v14_0_common_get_dpm_level_count(struct smu_context *smu, 1083 enum smu_clk_type clk_type, 1084 uint32_t *count) 1085{ 1086 if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) 1087 smu_v14_0_0_get_dpm_level_count(smu, clk_type, count); 1088 else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) 1089 smu_v14_0_1_get_dpm_level_count(smu, clk_type, count); 1090 1091 return 0; 1092} 1093 1094static int smu_v14_0_0_print_clk_levels(struct smu_context *smu, 1095 enum smu_clk_type clk_type, char *buf) 1096{ 1097 int i, size = 0, ret = 0; 1098 uint32_t cur_value = 0, value = 0, count = 0; 1099 uint32_t min, max; 1100 1101 smu_cmn_get_sysfs_buf(&buf, &size); 1102 1103 switch (clk_type) { 1104 case SMU_OD_SCLK: 1105 size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); 1106 size += sysfs_emit_at(buf, size, "0: %10uMhz\n", 1107 (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); 1108 size += sysfs_emit_at(buf, size, "1: %10uMhz\n", 1109 (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq); 1110 break; 1111 case SMU_OD_RANGE: 1112 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); 1113 size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", 1114 smu->gfx_default_hard_min_freq, 1115 smu->gfx_default_soft_max_freq); 1116 break; 1117 case SMU_SOCCLK: 1118 case SMU_VCLK: 1119 case SMU_DCLK: 1120 case SMU_VCLK1: 1121 case SMU_DCLK1: 1122 case SMU_MCLK: 1123 case SMU_FCLK: 1124 ret = smu_v14_0_0_get_current_clk_freq(smu, clk_type, &cur_value); 1125 if (ret) 1126 break; 1127 1128 ret = smu_v14_0_common_get_dpm_level_count(smu, clk_type, &count); 1129 if (ret) 1130 break; 1131 1132 for (i = 0; i < count; i++) { 1133 ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, i, &value); 1134 if (ret) 1135 break; 1136 1137 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value, 1138 cur_value == value ? "*" : ""); 1139 } 1140 break; 1141 case SMU_GFXCLK: 1142 case SMU_SCLK: 1143 ret = smu_v14_0_0_get_current_clk_freq(smu, clk_type, &cur_value); 1144 if (ret) 1145 break; 1146 min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq; 1147 max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq; 1148 if (cur_value == max) 1149 i = 2; 1150 else if (cur_value == min) 1151 i = 0; 1152 else 1153 i = 1; 1154 size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", min, 1155 i == 0 ? "*" : ""); 1156 size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", 1157 i == 1 ? cur_value : 1100, /* UMD PSTATE GFXCLK 1100 */ 1158 i == 1 ? "*" : ""); 1159 size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max, 1160 i == 2 ? "*" : ""); 1161 break; 1162 default: 1163 break; 1164 } 1165 1166 return size; 1167} 1168 1169static int smu_v14_0_0_set_soft_freq_limited_range(struct smu_context *smu, 1170 enum smu_clk_type clk_type, 1171 uint32_t min, 1172 uint32_t max) 1173{ 1174 enum smu_message_type msg_set_min, msg_set_max; 1175 int ret = 0; 1176 1177 if (!smu_v14_0_0_clk_dpm_is_enabled(smu, clk_type)) 1178 return -EINVAL; 1179 1180 switch (clk_type) { 1181 case SMU_GFXCLK: 1182 case SMU_SCLK: 1183 msg_set_min = SMU_MSG_SetHardMinGfxClk; 1184 msg_set_max = SMU_MSG_SetSoftMaxGfxClk; 1185 break; 1186 case SMU_FCLK: 1187 msg_set_min = SMU_MSG_SetHardMinFclkByFreq; 1188 msg_set_max = SMU_MSG_SetSoftMaxFclkByFreq; 1189 break; 1190 case SMU_SOCCLK: 1191 msg_set_min = SMU_MSG_SetHardMinSocclkByFreq; 1192 msg_set_max = SMU_MSG_SetSoftMaxSocclkByFreq; 1193 break; 1194 case SMU_VCLK: 1195 case SMU_DCLK: 1196 msg_set_min = SMU_MSG_SetHardMinVcn0; 1197 msg_set_max = SMU_MSG_SetSoftMaxVcn0; 1198 break; 1199 case SMU_VCLK1: 1200 case SMU_DCLK1: 1201 msg_set_min = SMU_MSG_SetHardMinVcn1; 1202 msg_set_max = SMU_MSG_SetSoftMaxVcn1; 1203 break; 1204 default: 1205 return -EINVAL; 1206 } 1207 1208 ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_min, min, NULL); 1209 if (ret) 1210 return ret; 1211 1212 return smu_cmn_send_smc_msg_with_param(smu, msg_set_max, 1213 max, NULL); 1214} 1215 1216static int smu_v14_0_0_force_clk_levels(struct smu_context *smu, 1217 enum smu_clk_type clk_type, 1218 uint32_t mask) 1219{ 1220 uint32_t soft_min_level = 0, soft_max_level = 0; 1221 uint32_t min_freq = 0, max_freq = 0; 1222 int ret = 0; 1223 1224 soft_min_level = mask ? (ffs(mask) - 1) : 0; 1225 soft_max_level = mask ? (fls(mask) - 1) : 0; 1226 1227 switch (clk_type) { 1228 case SMU_SOCCLK: 1229 case SMU_FCLK: 1230 case SMU_VCLK: 1231 case SMU_DCLK: 1232 ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq); 1233 if (ret) 1234 break; 1235 1236 ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq); 1237 if (ret) 1238 break; 1239 1240 ret = smu_v14_0_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq); 1241 break; 1242 default: 1243 ret = -EINVAL; 1244 break; 1245 } 1246 1247 return ret; 1248} 1249 1250static int smu_v14_0_0_set_performance_level(struct smu_context *smu, 1251 enum amd_dpm_forced_level level) 1252{ 1253 struct amdgpu_device *adev = smu->adev; 1254 uint32_t sclk_min = 0, sclk_max = 0; 1255 uint32_t fclk_min = 0, fclk_max = 0; 1256 uint32_t socclk_min = 0, socclk_max = 0; 1257 int ret = 0; 1258 1259 switch (level) { 1260 case AMD_DPM_FORCED_LEVEL_HIGH: 1261 smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &sclk_max); 1262 smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_max); 1263 smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_max); 1264 sclk_min = sclk_max; 1265 fclk_min = fclk_max; 1266 socclk_min = socclk_max; 1267 break; 1268 case AMD_DPM_FORCED_LEVEL_LOW: 1269 smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, NULL); 1270 smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, NULL); 1271 smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, NULL); 1272 sclk_max = sclk_min; 1273 fclk_max = fclk_min; 1274 socclk_max = socclk_min; 1275 break; 1276 case AMD_DPM_FORCED_LEVEL_AUTO: 1277 smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, &sclk_max); 1278 smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, &fclk_max); 1279 smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, &socclk_max); 1280 break; 1281 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 1282 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 1283 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: 1284 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 1285 /* Temporarily do nothing since the optimal clocks haven't been provided yet */ 1286 break; 1287 case AMD_DPM_FORCED_LEVEL_MANUAL: 1288 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 1289 return 0; 1290 default: 1291 dev_err(adev->dev, "Invalid performance level %d\n", level); 1292 return -EINVAL; 1293 } 1294 1295 if (sclk_min && sclk_max) { 1296 ret = smu_v14_0_0_set_soft_freq_limited_range(smu, 1297 SMU_SCLK, 1298 sclk_min, 1299 sclk_max); 1300 if (ret) 1301 return ret; 1302 1303 smu->gfx_actual_hard_min_freq = sclk_min; 1304 smu->gfx_actual_soft_max_freq = sclk_max; 1305 } 1306 1307 if (fclk_min && fclk_max) { 1308 ret = smu_v14_0_0_set_soft_freq_limited_range(smu, 1309 SMU_FCLK, 1310 fclk_min, 1311 fclk_max); 1312 if (ret) 1313 return ret; 1314 } 1315 1316 if (socclk_min && socclk_max) { 1317 ret = smu_v14_0_0_set_soft_freq_limited_range(smu, 1318 SMU_SOCCLK, 1319 socclk_min, 1320 socclk_max); 1321 if (ret) 1322 return ret; 1323 } 1324 1325 return ret; 1326} 1327 1328static int smu_v14_0_1_set_fine_grain_gfx_freq_parameters(struct smu_context *smu) 1329{ 1330 DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table; 1331 1332 smu->gfx_default_hard_min_freq = clk_table->MinGfxClk; 1333 smu->gfx_default_soft_max_freq = clk_table->MaxGfxClk; 1334 smu->gfx_actual_hard_min_freq = 0; 1335 smu->gfx_actual_soft_max_freq = 0; 1336 1337 return 0; 1338} 1339 1340static int smu_v14_0_0_set_fine_grain_gfx_freq_parameters(struct smu_context *smu) 1341{ 1342 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 1343 1344 smu->gfx_default_hard_min_freq = clk_table->MinGfxClk; 1345 smu->gfx_default_soft_max_freq = clk_table->MaxGfxClk; 1346 smu->gfx_actual_hard_min_freq = 0; 1347 smu->gfx_actual_soft_max_freq = 0; 1348 1349 return 0; 1350} 1351 1352static int smu_v14_0_common_set_fine_grain_gfx_freq_parameters(struct smu_context *smu) 1353{ 1354 if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) 1355 smu_v14_0_0_set_fine_grain_gfx_freq_parameters(smu); 1356 else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) 1357 smu_v14_0_1_set_fine_grain_gfx_freq_parameters(smu); 1358 1359 return 0; 1360} 1361 1362static int smu_v14_0_0_set_vpe_enable(struct smu_context *smu, 1363 bool enable) 1364{ 1365 return smu_cmn_send_smc_msg_with_param(smu, enable ? 1366 SMU_MSG_PowerUpVpe : SMU_MSG_PowerDownVpe, 1367 0, NULL); 1368} 1369 1370static int smu_v14_0_0_set_umsch_mm_enable(struct smu_context *smu, 1371 bool enable) 1372{ 1373 return smu_cmn_send_smc_msg_with_param(smu, enable ? 1374 SMU_MSG_PowerUpUmsch : SMU_MSG_PowerDownUmsch, 1375 0, NULL); 1376} 1377 1378static int smu_14_0_1_get_dpm_table(struct smu_context *smu, struct dpm_clocks *clock_table) 1379{ 1380 DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table; 1381 uint8_t idx; 1382 1383 /* Only the Clock information of SOC and VPE is copied to provide VPE DPM settings for use. */ 1384 for (idx = 0; idx < NUM_SOCCLK_DPM_LEVELS; idx++) { 1385 clock_table->SocClocks[idx].Freq = (idx < clk_table->NumSocClkLevelsEnabled) ? clk_table->SocClocks[idx]:0; 1386 clock_table->SocClocks[idx].Vol = 0; 1387 } 1388 1389 for (idx = 0; idx < NUM_VPE_DPM_LEVELS; idx++) { 1390 clock_table->VPEClocks[idx].Freq = (idx < clk_table->VpeClkLevelsEnabled) ? clk_table->VPEClocks[idx]:0; 1391 clock_table->VPEClocks[idx].Vol = 0; 1392 } 1393 1394 return 0; 1395} 1396 1397static int smu_14_0_0_get_dpm_table(struct smu_context *smu, struct dpm_clocks *clock_table) 1398{ 1399 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 1400 uint8_t idx; 1401 1402 /* Only the Clock information of SOC and VPE is copied to provide VPE DPM settings for use. */ 1403 for (idx = 0; idx < NUM_SOCCLK_DPM_LEVELS; idx++) { 1404 clock_table->SocClocks[idx].Freq = (idx < clk_table->NumSocClkLevelsEnabled) ? clk_table->SocClocks[idx]:0; 1405 clock_table->SocClocks[idx].Vol = 0; 1406 } 1407 1408 for (idx = 0; idx < NUM_VPE_DPM_LEVELS; idx++) { 1409 clock_table->VPEClocks[idx].Freq = (idx < clk_table->VpeClkLevelsEnabled) ? clk_table->VPEClocks[idx]:0; 1410 clock_table->VPEClocks[idx].Vol = 0; 1411 } 1412 1413 return 0; 1414} 1415 1416static int smu_v14_0_common_get_dpm_table(struct smu_context *smu, struct dpm_clocks *clock_table) 1417{ 1418 if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) 1419 smu_14_0_0_get_dpm_table(smu, clock_table); 1420 else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) 1421 smu_14_0_1_get_dpm_table(smu, clock_table); 1422 1423 return 0; 1424} 1425 1426static const struct pptable_funcs smu_v14_0_0_ppt_funcs = { 1427 .check_fw_status = smu_v14_0_check_fw_status, 1428 .check_fw_version = smu_v14_0_check_fw_version, 1429 .init_smc_tables = smu_v14_0_0_init_smc_tables, 1430 .fini_smc_tables = smu_v14_0_0_fini_smc_tables, 1431 .get_vbios_bootup_values = smu_v14_0_get_vbios_bootup_values, 1432 .system_features_control = smu_v14_0_0_system_features_control, 1433 .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param, 1434 .send_smc_msg = smu_cmn_send_smc_msg, 1435 .dpm_set_vcn_enable = smu_v14_0_set_vcn_enable, 1436 .dpm_set_jpeg_enable = smu_v14_0_set_jpeg_enable, 1437 .set_default_dpm_table = smu_v14_0_set_default_dpm_tables, 1438 .read_sensor = smu_v14_0_0_read_sensor, 1439 .is_dpm_running = smu_v14_0_0_is_dpm_running, 1440 .set_watermarks_table = smu_v14_0_0_set_watermarks_table, 1441 .get_gpu_metrics = smu_v14_0_0_get_gpu_metrics, 1442 .get_enabled_mask = smu_cmn_get_enabled_mask, 1443 .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, 1444 .set_driver_table_location = smu_v14_0_set_driver_table_location, 1445 .gfx_off_control = smu_v14_0_gfx_off_control, 1446 .mode2_reset = smu_v14_0_0_mode2_reset, 1447 .get_dpm_ultimate_freq = smu_v14_0_common_get_dpm_ultimate_freq, 1448 .od_edit_dpm_table = smu_v14_0_od_edit_dpm_table, 1449 .print_clk_levels = smu_v14_0_0_print_clk_levels, 1450 .force_clk_levels = smu_v14_0_0_force_clk_levels, 1451 .set_performance_level = smu_v14_0_0_set_performance_level, 1452 .set_fine_grain_gfx_freq_parameters = smu_v14_0_common_set_fine_grain_gfx_freq_parameters, 1453 .set_gfx_power_up_by_imu = smu_v14_0_set_gfx_power_up_by_imu, 1454 .dpm_set_vpe_enable = smu_v14_0_0_set_vpe_enable, 1455 .dpm_set_umsch_mm_enable = smu_v14_0_0_set_umsch_mm_enable, 1456 .get_dpm_clock_table = smu_v14_0_common_get_dpm_table, 1457}; 1458 1459static void smu_v14_0_0_set_smu_mailbox_registers(struct smu_context *smu) 1460{ 1461 struct amdgpu_device *adev = smu->adev; 1462 1463 smu->param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_82); 1464 smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_66); 1465 smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90); 1466} 1467 1468void smu_v14_0_0_set_ppt_funcs(struct smu_context *smu) 1469{ 1470 1471 smu->ppt_funcs = &smu_v14_0_0_ppt_funcs; 1472 smu->message_map = smu_v14_0_0_message_map; 1473 smu->feature_map = smu_v14_0_0_feature_mask_map; 1474 smu->table_map = smu_v14_0_0_table_map; 1475 smu->is_apu = true; 1476 1477 smu_v14_0_0_set_smu_mailbox_registers(smu); 1478} 1479