2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
27 #include "smu8_fusion.h"
29 #include "cz_smumgr.h"
30 #include "smu_ucode_xfer_cz.h"
31 #include "amdgpu_ucode.h"
33 #include "smu/smu_8_0_d.h"
34 #include "smu/smu_8_0_sh_mask.h"
35 #include "gca/gfx_8_0_d.h"
36 #include "gca/gfx_8_0_sh_mask.h"
38 uint32_t cz_get_argument(struct amdgpu_device *adev)
40 return RREG32(mmSMU_MP1_SRBM2P_ARG_0);
43 static struct cz_smu_private_data *cz_smu_get_priv(struct amdgpu_device *adev)
45 struct cz_smu_private_data *priv =
46 (struct cz_smu_private_data *)(adev->smu.priv);
51 int cz_send_msg_to_smc_async(struct amdgpu_device *adev, u16 msg)
56 for (i = 0; i < adev->usec_timeout; i++) {
57 tmp = REG_GET_FIELD(RREG32(mmSMU_MP1_SRBM2P_RESP_0),
58 SMU_MP1_SRBM2P_RESP_0, CONTENT);
64 /* timeout means wrong logic*/
65 if (i == adev->usec_timeout)
68 WREG32(mmSMU_MP1_SRBM2P_RESP_0, 0);
69 WREG32(mmSMU_MP1_SRBM2P_MSG_0, msg);
74 int cz_send_msg_to_smc(struct amdgpu_device *adev, u16 msg)
77 u32 content = 0, tmp = 0;
79 if (cz_send_msg_to_smc_async(adev, msg))
82 for (i = 0; i < adev->usec_timeout; i++) {
83 tmp = REG_GET_FIELD(RREG32(mmSMU_MP1_SRBM2P_RESP_0),
84 SMU_MP1_SRBM2P_RESP_0, CONTENT);
90 /* timeout means wrong logic*/
91 if (i == adev->usec_timeout)
94 if (PPSMC_Result_OK != tmp) {
95 dev_err(adev->dev, "SMC Failed to send Message.\n");
102 int cz_send_msg_to_smc_with_parameter_async(struct amdgpu_device *adev,
103 u16 msg, u32 parameter)
105 WREG32(mmSMU_MP1_SRBM2P_ARG_0, parameter);
106 return cz_send_msg_to_smc_async(adev, msg);
109 int cz_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
110 u16 msg, u32 parameter)
112 WREG32(mmSMU_MP1_SRBM2P_ARG_0, parameter);
113 return cz_send_msg_to_smc(adev, msg);
116 static int cz_set_smc_sram_address(struct amdgpu_device *adev,
117 u32 smc_address, u32 limit)
121 if ((smc_address + 3) > limit)
124 WREG32(mmMP0PUB_IND_INDEX_0, SMN_MP1_SRAM_START_ADDR + smc_address);
129 int cz_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
130 u32 *value, u32 limit)
134 ret = cz_set_smc_sram_address(adev, smc_address, limit);
138 *value = RREG32(mmMP0PUB_IND_DATA_0);
143 int cz_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
144 u32 value, u32 limit)
148 ret = cz_set_smc_sram_address(adev, smc_address, limit);
152 WREG32(mmMP0PUB_IND_DATA_0, value);
157 static int cz_smu_request_load_fw(struct amdgpu_device *adev)
159 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
161 uint32_t smc_addr = SMU8_FIRMWARE_HEADER_LOCATION +
162 offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
164 cz_write_smc_sram_dword(adev, smc_addr, 0, smc_addr + 4);
166 /*prepare toc buffers*/
167 cz_send_msg_to_smc_with_parameter(adev,
168 PPSMC_MSG_DriverDramAddrHi,
169 priv->toc_buffer.mc_addr_high);
170 cz_send_msg_to_smc_with_parameter(adev,
171 PPSMC_MSG_DriverDramAddrLo,
172 priv->toc_buffer.mc_addr_low);
173 cz_send_msg_to_smc(adev, PPSMC_MSG_InitJobs);
176 cz_send_msg_to_smc_with_parameter(adev,
177 PPSMC_MSG_ExecuteJob,
178 priv->toc_entry_aram);
180 cz_send_msg_to_smc_with_parameter(adev,
181 PPSMC_MSG_ExecuteJob,
182 priv->toc_entry_power_profiling_index);
184 cz_send_msg_to_smc_with_parameter(adev,
185 PPSMC_MSG_ExecuteJob,
186 priv->toc_entry_initialize_index);
192 *Check if the FW has been loaded, SMU will not return if loading
195 static int cz_smu_check_fw_load_finish(struct amdgpu_device *adev,
199 uint32_t index = SMN_MP1_SRAM_START_ADDR +
200 SMU8_FIRMWARE_HEADER_LOCATION +
201 offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
203 WREG32(mmMP0PUB_IND_INDEX, index);
205 for (i = 0; i < adev->usec_timeout; i++) {
206 if (fw_mask == (RREG32(mmMP0PUB_IND_DATA) & fw_mask))
211 if (i >= adev->usec_timeout) {
213 "SMU check loaded firmware failed, expecting 0x%x, getting 0x%x",
214 fw_mask, RREG32(mmMP0PUB_IND_DATA));
222 * interfaces for different ip blocks to check firmware loading status
223 * 0 for success otherwise failed
225 static int cz_smu_check_finished(struct amdgpu_device *adev,
226 enum AMDGPU_UCODE_ID id)
229 case AMDGPU_UCODE_ID_SDMA0:
230 if (adev->smu.fw_flags & AMDGPU_SDMA0_UCODE_LOADED)
233 case AMDGPU_UCODE_ID_SDMA1:
234 if (adev->smu.fw_flags & AMDGPU_SDMA1_UCODE_LOADED)
237 case AMDGPU_UCODE_ID_CP_CE:
238 if (adev->smu.fw_flags & AMDGPU_CPCE_UCODE_LOADED)
241 case AMDGPU_UCODE_ID_CP_PFP:
242 if (adev->smu.fw_flags & AMDGPU_CPPFP_UCODE_LOADED)
244 case AMDGPU_UCODE_ID_CP_ME:
245 if (adev->smu.fw_flags & AMDGPU_CPME_UCODE_LOADED)
248 case AMDGPU_UCODE_ID_CP_MEC1:
249 if (adev->smu.fw_flags & AMDGPU_CPMEC1_UCODE_LOADED)
252 case AMDGPU_UCODE_ID_CP_MEC2:
253 if (adev->smu.fw_flags & AMDGPU_CPMEC2_UCODE_LOADED)
256 case AMDGPU_UCODE_ID_RLC_G:
257 if (adev->smu.fw_flags & AMDGPU_CPRLC_UCODE_LOADED)
260 case AMDGPU_UCODE_ID_MAXIMUM:
268 static int cz_load_mec_firmware(struct amdgpu_device *adev)
270 struct amdgpu_firmware_info *ucode =
271 &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
275 if (ucode->fw == NULL)
278 /* Disable MEC parsing/prefetching */
279 tmp = RREG32(mmCP_MEC_CNTL);
280 tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
281 tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
282 WREG32(mmCP_MEC_CNTL, tmp);
284 tmp = RREG32(mmCP_CPC_IC_BASE_CNTL);
285 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
286 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0);
287 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
288 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1);
289 WREG32(mmCP_CPC_IC_BASE_CNTL, tmp);
291 reg_data = lower_32_bits(ucode->mc_addr) &
292 REG_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO);
293 WREG32(mmCP_CPC_IC_BASE_LO, reg_data);
295 reg_data = upper_32_bits(ucode->mc_addr) &
296 REG_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI);
297 WREG32(mmCP_CPC_IC_BASE_HI, reg_data);
302 int cz_smu_start(struct amdgpu_device *adev)
306 uint32_t fw_to_check = UCODE_ID_RLC_G_MASK |
307 UCODE_ID_SDMA0_MASK |
308 UCODE_ID_SDMA1_MASK |
309 UCODE_ID_CP_CE_MASK |
310 UCODE_ID_CP_ME_MASK |
311 UCODE_ID_CP_PFP_MASK |
312 UCODE_ID_CP_MEC_JT1_MASK |
313 UCODE_ID_CP_MEC_JT2_MASK;
315 cz_smu_request_load_fw(adev);
316 ret = cz_smu_check_fw_load_finish(adev, fw_to_check);
320 /* manually load MEC firmware for CZ */
321 if (adev->asic_type == CHIP_CARRIZO) {
322 ret = cz_load_mec_firmware(adev);
324 dev_err(adev->dev, "(%d) Mec Firmware load failed\n", ret);
329 /* setup fw load flag */
330 adev->smu.fw_flags = AMDGPU_SDMA0_UCODE_LOADED |
331 AMDGPU_SDMA1_UCODE_LOADED |
332 AMDGPU_CPCE_UCODE_LOADED |
333 AMDGPU_CPPFP_UCODE_LOADED |
334 AMDGPU_CPME_UCODE_LOADED |
335 AMDGPU_CPMEC1_UCODE_LOADED |
336 AMDGPU_CPMEC2_UCODE_LOADED |
337 AMDGPU_CPRLC_UCODE_LOADED;
342 static uint32_t cz_convert_fw_type(uint32_t fw_type)
344 enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM;
348 result = AMDGPU_UCODE_ID_SDMA0;
351 result = AMDGPU_UCODE_ID_SDMA1;
354 result = AMDGPU_UCODE_ID_CP_CE;
356 case UCODE_ID_CP_PFP:
357 result = AMDGPU_UCODE_ID_CP_PFP;
360 result = AMDGPU_UCODE_ID_CP_ME;
362 case UCODE_ID_CP_MEC_JT1:
363 case UCODE_ID_CP_MEC_JT2:
364 result = AMDGPU_UCODE_ID_CP_MEC1;
367 result = AMDGPU_UCODE_ID_RLC_G;
370 DRM_ERROR("UCode type is out of range!");
376 static uint8_t cz_smu_translate_firmware_enum_to_arg(
377 enum cz_scratch_entry firmware_enum)
381 switch (firmware_enum) {
382 case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0:
383 ret = UCODE_ID_SDMA0;
385 case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1:
386 ret = UCODE_ID_SDMA1;
388 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE:
389 ret = UCODE_ID_CP_CE;
391 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP:
392 ret = UCODE_ID_CP_PFP;
394 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME:
395 ret = UCODE_ID_CP_ME;
397 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1:
398 ret = UCODE_ID_CP_MEC_JT1;
400 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2:
401 ret = UCODE_ID_CP_MEC_JT2;
403 case CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG:
404 ret = UCODE_ID_GMCON_RENG;
406 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G:
407 ret = UCODE_ID_RLC_G;
409 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH:
410 ret = UCODE_ID_RLC_SCRATCH;
412 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM:
413 ret = UCODE_ID_RLC_SRM_ARAM;
415 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM:
416 ret = UCODE_ID_RLC_SRM_DRAM;
418 case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM:
419 ret = UCODE_ID_DMCU_ERAM;
421 case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM:
422 ret = UCODE_ID_DMCU_IRAM;
424 case CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING:
425 ret = TASK_ARG_INIT_MM_PWR_LOG;
427 case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_HALT:
428 case CZ_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING:
429 case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS:
430 case CZ_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT:
431 case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_START:
432 case CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS:
433 ret = TASK_ARG_REG_MMIO;
435 case CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE:
436 ret = TASK_ARG_INIT_CLK_TABLE;
443 static int cz_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
444 enum cz_scratch_entry firmware_enum,
445 struct cz_buffer_entry *entry)
449 uint8_t ucode_id = cz_smu_translate_firmware_enum_to_arg(firmware_enum);
450 enum AMDGPU_UCODE_ID id = cz_convert_fw_type(ucode_id);
451 struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
452 const struct gfx_firmware_header_v1_0 *header;
454 if (ucode->fw == NULL)
457 gpu_addr = ucode->mc_addr;
458 header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
459 data_size = le32_to_cpu(header->header.ucode_size_bytes);
461 if ((firmware_enum == CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1) ||
462 (firmware_enum == CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2)) {
463 gpu_addr += le32_to_cpu(header->jt_offset) << 2;
464 data_size = le32_to_cpu(header->jt_size) << 2;
467 entry->mc_addr_low = lower_32_bits(gpu_addr);
468 entry->mc_addr_high = upper_32_bits(gpu_addr);
469 entry->data_size = data_size;
470 entry->firmware_ID = firmware_enum;
475 static int cz_smu_populate_single_scratch_entry(struct amdgpu_device *adev,
476 enum cz_scratch_entry scratch_type,
477 uint32_t size_in_byte,
478 struct cz_buffer_entry *entry)
480 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
481 uint64_t mc_addr = (((uint64_t) priv->smu_buffer.mc_addr_high) << 32) |
482 priv->smu_buffer.mc_addr_low;
483 mc_addr += size_in_byte;
485 priv->smu_buffer_used_bytes += size_in_byte;
486 entry->data_size = size_in_byte;
487 entry->kaddr = priv->smu_buffer.kaddr + priv->smu_buffer_used_bytes;
488 entry->mc_addr_low = lower_32_bits(mc_addr);
489 entry->mc_addr_high = upper_32_bits(mc_addr);
490 entry->firmware_ID = scratch_type;
495 static int cz_smu_populate_single_ucode_load_task(struct amdgpu_device *adev,
496 enum cz_scratch_entry firmware_enum,
500 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
501 struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
502 struct SMU_Task *task = &toc->tasks[priv->toc_entry_used_count++];
504 task->type = TASK_TYPE_UCODE_LOAD;
505 task->arg = cz_smu_translate_firmware_enum_to_arg(firmware_enum);
506 task->next = is_last ? END_OF_TASK_LIST : priv->toc_entry_used_count;
508 for (i = 0; i < priv->driver_buffer_length; i++)
509 if (priv->driver_buffer[i].firmware_ID == firmware_enum)
512 if (i >= priv->driver_buffer_length) {
513 dev_err(adev->dev, "Invalid Firmware Type\n");
517 task->addr.low = priv->driver_buffer[i].mc_addr_low;
518 task->addr.high = priv->driver_buffer[i].mc_addr_high;
519 task->size_bytes = priv->driver_buffer[i].data_size;
524 static int cz_smu_populate_single_scratch_task(struct amdgpu_device *adev,
525 enum cz_scratch_entry firmware_enum,
526 uint8_t type, bool is_last)
529 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
530 struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
531 struct SMU_Task *task = &toc->tasks[priv->toc_entry_used_count++];
534 task->arg = cz_smu_translate_firmware_enum_to_arg(firmware_enum);
535 task->next = is_last ? END_OF_TASK_LIST : priv->toc_entry_used_count;
537 for (i = 0; i < priv->scratch_buffer_length; i++)
538 if (priv->scratch_buffer[i].firmware_ID == firmware_enum)
541 if (i >= priv->scratch_buffer_length) {
542 dev_err(adev->dev, "Invalid Firmware Type\n");
546 task->addr.low = priv->scratch_buffer[i].mc_addr_low;
547 task->addr.high = priv->scratch_buffer[i].mc_addr_high;
548 task->size_bytes = priv->scratch_buffer[i].data_size;
550 if (CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS == firmware_enum) {
551 struct cz_ih_meta_data *pIHReg_restore =
552 (struct cz_ih_meta_data *)priv->scratch_buffer[i].kaddr;
553 pIHReg_restore->command =
554 METADATA_CMD_MODE0 | METADATA_PERFORM_ON_LOAD;
560 static int cz_smu_construct_toc_for_rlc_aram_save(struct amdgpu_device *adev)
562 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
563 priv->toc_entry_aram = priv->toc_entry_used_count;
564 cz_smu_populate_single_scratch_task(adev,
565 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
566 TASK_TYPE_UCODE_SAVE, true);
571 static int cz_smu_construct_toc_for_vddgfx_enter(struct amdgpu_device *adev)
573 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
574 struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
576 toc->JobList[JOB_GFX_SAVE] = (uint8_t)priv->toc_entry_used_count;
577 cz_smu_populate_single_scratch_task(adev,
578 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
579 TASK_TYPE_UCODE_SAVE, false);
580 cz_smu_populate_single_scratch_task(adev,
581 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
582 TASK_TYPE_UCODE_SAVE, true);
587 static int cz_smu_construct_toc_for_vddgfx_exit(struct amdgpu_device *adev)
589 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
590 struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
592 toc->JobList[JOB_GFX_RESTORE] = (uint8_t)priv->toc_entry_used_count;
595 if (adev->firmware.smu_load) {
596 cz_smu_populate_single_ucode_load_task(adev,
597 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
598 cz_smu_populate_single_ucode_load_task(adev,
599 CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
600 cz_smu_populate_single_ucode_load_task(adev,
601 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
602 cz_smu_populate_single_ucode_load_task(adev,
603 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
604 cz_smu_populate_single_ucode_load_task(adev,
605 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
606 cz_smu_populate_single_ucode_load_task(adev,
607 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false);
610 /* populate scratch */
611 cz_smu_populate_single_scratch_task(adev,
612 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
613 TASK_TYPE_UCODE_LOAD, false);
614 cz_smu_populate_single_scratch_task(adev,
615 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
616 TASK_TYPE_UCODE_LOAD, false);
617 cz_smu_populate_single_scratch_task(adev,
618 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
619 TASK_TYPE_UCODE_LOAD, true);
624 static int cz_smu_construct_toc_for_power_profiling(struct amdgpu_device *adev)
626 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
628 priv->toc_entry_power_profiling_index = priv->toc_entry_used_count;
630 cz_smu_populate_single_scratch_task(adev,
631 CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
632 TASK_TYPE_INITIALIZE, true);
636 static int cz_smu_construct_toc_for_bootup(struct amdgpu_device *adev)
638 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
640 priv->toc_entry_initialize_index = priv->toc_entry_used_count;
642 if (adev->firmware.smu_load) {
643 cz_smu_populate_single_ucode_load_task(adev,
644 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
645 cz_smu_populate_single_ucode_load_task(adev,
646 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
647 cz_smu_populate_single_ucode_load_task(adev,
648 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
649 cz_smu_populate_single_ucode_load_task(adev,
650 CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
651 cz_smu_populate_single_ucode_load_task(adev,
652 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
653 cz_smu_populate_single_ucode_load_task(adev,
654 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
655 cz_smu_populate_single_ucode_load_task(adev,
656 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
657 cz_smu_populate_single_ucode_load_task(adev,
658 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true);
664 static int cz_smu_construct_toc_for_clock_table(struct amdgpu_device *adev)
666 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
668 priv->toc_entry_clock_table = priv->toc_entry_used_count;
670 cz_smu_populate_single_scratch_task(adev,
671 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
672 TASK_TYPE_INITIALIZE, true);
677 static int cz_smu_initialize_toc_empty_job_list(struct amdgpu_device *adev)
680 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
681 struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
683 for (i = 0; i < NUM_JOBLIST_ENTRIES; i++)
684 toc->JobList[i] = (uint8_t)IGNORE_JOB;
690 * cz smu uninitialization
692 int cz_smu_fini(struct amdgpu_device *adev)
694 amdgpu_bo_unref(&adev->smu.toc_buf);
695 amdgpu_bo_unref(&adev->smu.smu_buf);
696 kfree(adev->smu.priv);
697 adev->smu.priv = NULL;
698 if (adev->firmware.smu_load)
699 amdgpu_ucode_fini_bo(adev);
704 int cz_smu_download_pptable(struct amdgpu_device *adev, void **table)
707 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
709 for (i = 0; i < priv->scratch_buffer_length; i++)
710 if (priv->scratch_buffer[i].firmware_ID ==
711 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
714 if (i >= priv->scratch_buffer_length) {
715 dev_err(adev->dev, "Invalid Scratch Type\n");
719 *table = (struct SMU8_Fusion_ClkTable *)priv->scratch_buffer[i].kaddr;
721 /* prepare buffer for pptable */
722 cz_send_msg_to_smc_with_parameter(adev,
723 PPSMC_MSG_SetClkTableAddrHi,
724 priv->scratch_buffer[i].mc_addr_high);
725 cz_send_msg_to_smc_with_parameter(adev,
726 PPSMC_MSG_SetClkTableAddrLo,
727 priv->scratch_buffer[i].mc_addr_low);
728 cz_send_msg_to_smc_with_parameter(adev,
729 PPSMC_MSG_ExecuteJob,
730 priv->toc_entry_clock_table);
732 /* actual downloading */
733 cz_send_msg_to_smc(adev, PPSMC_MSG_ClkTableXferToDram);
738 int cz_smu_upload_pptable(struct amdgpu_device *adev)
741 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
743 for (i = 0; i < priv->scratch_buffer_length; i++)
744 if (priv->scratch_buffer[i].firmware_ID ==
745 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
748 if (i >= priv->scratch_buffer_length) {
749 dev_err(adev->dev, "Invalid Scratch Type\n");
754 cz_send_msg_to_smc_with_parameter(adev,
755 PPSMC_MSG_SetClkTableAddrHi,
756 priv->scratch_buffer[i].mc_addr_high);
757 cz_send_msg_to_smc_with_parameter(adev,
758 PPSMC_MSG_SetClkTableAddrLo,
759 priv->scratch_buffer[i].mc_addr_low);
760 cz_send_msg_to_smc_with_parameter(adev,
761 PPSMC_MSG_ExecuteJob,
762 priv->toc_entry_clock_table);
764 /* actual uploading */
765 cz_send_msg_to_smc(adev, PPSMC_MSG_ClkTableXferToSmu);
771 * cz smumgr functions initialization
773 static const struct amdgpu_smumgr_funcs cz_smumgr_funcs = {
774 .check_fw_load_finish = cz_smu_check_finished,
775 .request_smu_load_fw = NULL,
776 .request_smu_specific_fw = NULL,
780 * cz smu initialization
782 int cz_smu_init(struct amdgpu_device *adev)
785 uint64_t mc_addr = 0;
786 struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
787 struct amdgpu_bo **smu_buf = &adev->smu.smu_buf;
788 void *toc_buf_ptr = NULL;
789 void *smu_buf_ptr = NULL;
791 struct cz_smu_private_data *priv =
792 kzalloc(sizeof(struct cz_smu_private_data), GFP_KERNEL);
796 /* allocate firmware buffers */
797 if (adev->firmware.smu_load)
798 amdgpu_ucode_init_bo(adev);
800 adev->smu.priv = priv;
801 adev->smu.fw_flags = 0;
802 priv->toc_buffer.data_size = 4096;
804 priv->smu_buffer.data_size =
805 ALIGN(UCODE_ID_RLC_SCRATCH_SIZE_BYTE, 32) +
806 ALIGN(UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, 32) +
807 ALIGN(UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, 32) +
808 ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) +
809 ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32);
811 /* prepare toc buffer and smu buffer:
812 * 1. create amdgpu_bo for toc buffer and smu buffer
814 * 3. map kernel virtual address
816 ret = amdgpu_bo_create(adev, priv->toc_buffer.data_size, PAGE_SIZE,
817 true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
821 dev_err(adev->dev, "(%d) SMC TOC buffer allocation failed\n", ret);
825 ret = amdgpu_bo_create(adev, priv->smu_buffer.data_size, PAGE_SIZE,
826 true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
830 dev_err(adev->dev, "(%d) SMC Internal buffer allocation failed\n", ret);
834 /* toc buffer reserve/pin/map */
835 ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
837 amdgpu_bo_unref(&adev->smu.toc_buf);
838 dev_err(adev->dev, "(%d) SMC TOC buffer reserve failed\n", ret);
842 ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_GTT, &mc_addr);
844 amdgpu_bo_unreserve(adev->smu.toc_buf);
845 amdgpu_bo_unref(&adev->smu.toc_buf);
846 dev_err(adev->dev, "(%d) SMC TOC buffer pin failed\n", ret);
850 ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
852 goto smu_init_failed;
854 amdgpu_bo_unreserve(adev->smu.toc_buf);
856 priv->toc_buffer.mc_addr_low = lower_32_bits(mc_addr);
857 priv->toc_buffer.mc_addr_high = upper_32_bits(mc_addr);
858 priv->toc_buffer.kaddr = toc_buf_ptr;
860 /* smu buffer reserve/pin/map */
861 ret = amdgpu_bo_reserve(adev->smu.smu_buf, false);
863 amdgpu_bo_unref(&adev->smu.smu_buf);
864 dev_err(adev->dev, "(%d) SMC Internal buffer reserve failed\n", ret);
868 ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_GTT, &mc_addr);
870 amdgpu_bo_unreserve(adev->smu.smu_buf);
871 amdgpu_bo_unref(&adev->smu.smu_buf);
872 dev_err(adev->dev, "(%d) SMC Internal buffer pin failed\n", ret);
876 ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr);
878 goto smu_init_failed;
880 amdgpu_bo_unreserve(adev->smu.smu_buf);
882 priv->smu_buffer.mc_addr_low = lower_32_bits(mc_addr);
883 priv->smu_buffer.mc_addr_high = upper_32_bits(mc_addr);
884 priv->smu_buffer.kaddr = smu_buf_ptr;
886 if (adev->firmware.smu_load) {
887 if (cz_smu_populate_single_firmware_entry(adev,
888 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0,
889 &priv->driver_buffer[priv->driver_buffer_length++]))
890 goto smu_init_failed;
891 if (cz_smu_populate_single_firmware_entry(adev,
892 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1,
893 &priv->driver_buffer[priv->driver_buffer_length++]))
894 goto smu_init_failed;
895 if (cz_smu_populate_single_firmware_entry(adev,
896 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE,
897 &priv->driver_buffer[priv->driver_buffer_length++]))
898 goto smu_init_failed;
899 if (cz_smu_populate_single_firmware_entry(adev,
900 CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP,
901 &priv->driver_buffer[priv->driver_buffer_length++]))
902 goto smu_init_failed;
903 if (cz_smu_populate_single_firmware_entry(adev,
904 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME,
905 &priv->driver_buffer[priv->driver_buffer_length++]))
906 goto smu_init_failed;
907 if (cz_smu_populate_single_firmware_entry(adev,
908 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
909 &priv->driver_buffer[priv->driver_buffer_length++]))
910 goto smu_init_failed;
911 if (cz_smu_populate_single_firmware_entry(adev,
912 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
913 &priv->driver_buffer[priv->driver_buffer_length++]))
914 goto smu_init_failed;
915 if (cz_smu_populate_single_firmware_entry(adev,
916 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G,
917 &priv->driver_buffer[priv->driver_buffer_length++]))
918 goto smu_init_failed;
921 if (cz_smu_populate_single_scratch_entry(adev,
922 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
923 UCODE_ID_RLC_SCRATCH_SIZE_BYTE,
924 &priv->scratch_buffer[priv->scratch_buffer_length++]))
925 goto smu_init_failed;
926 if (cz_smu_populate_single_scratch_entry(adev,
927 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
928 UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE,
929 &priv->scratch_buffer[priv->scratch_buffer_length++]))
930 goto smu_init_failed;
931 if (cz_smu_populate_single_scratch_entry(adev,
932 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
933 UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE,
934 &priv->scratch_buffer[priv->scratch_buffer_length++]))
935 goto smu_init_failed;
936 if (cz_smu_populate_single_scratch_entry(adev,
937 CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
938 sizeof(struct SMU8_MultimediaPowerLogData),
939 &priv->scratch_buffer[priv->scratch_buffer_length++]))
940 goto smu_init_failed;
941 if (cz_smu_populate_single_scratch_entry(adev,
942 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
943 sizeof(struct SMU8_Fusion_ClkTable),
944 &priv->scratch_buffer[priv->scratch_buffer_length++]))
945 goto smu_init_failed;
947 cz_smu_initialize_toc_empty_job_list(adev);
948 cz_smu_construct_toc_for_rlc_aram_save(adev);
949 cz_smu_construct_toc_for_vddgfx_enter(adev);
950 cz_smu_construct_toc_for_vddgfx_exit(adev);
951 cz_smu_construct_toc_for_power_profiling(adev);
952 cz_smu_construct_toc_for_bootup(adev);
953 cz_smu_construct_toc_for_clock_table(adev);
954 /* init the smumgr functions */
955 adev->smu.smumgr_funcs = &cz_smumgr_funcs;
960 amdgpu_bo_unref(toc_buf);
961 amdgpu_bo_unref(smu_buf);