1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00

drm/amd/pm: add inst to dpm_set_powergating_by_smu

Add an instance parameter to amdgpu_dpm_set_powergating_by_smu() function,
and use the instance to call set_powergating_by_smu().

v2: remove duplicated functions.

remove for-loop in amdgpu_dpm_set_powergating_by_smu(), and temporarily
move it to amdgpu_dpm_enable_vcn(), in order to keep the exact same logic
as before, until further separation in next patch.

v3: drop SI logic in amdgpu_dpm_enable_vcn().

Signed-off-by: Boyuan Zhang <boyuan.zhang@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Boyuan Zhang 2024-10-02 23:52:01 -04:00 committed by Alex Deucher
parent 697cb5cc25
commit ff69bba05f
16 changed files with 59 additions and 43 deletions

View file

@ -140,7 +140,7 @@ static int acp_poweroff(struct generic_pm_domain *genpd)
* 2. power off the acp tiles
* 3. check and enter ulv state
*/
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true, 0);
return 0;
}
@ -157,7 +157,7 @@ static int acp_poweron(struct generic_pm_domain *genpd)
* 2. turn on acp clock
* 3. power on acp tiles
*/
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false, 0);
return 0;
}
@ -236,7 +236,7 @@ static int acp_hw_init(struct amdgpu_ip_block *ip_block)
ip_block->version->major, ip_block->version->minor);
/* -ENODEV means board uses AZ rather than ACP */
if (r == -ENODEV) {
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true, 0);
return 0;
} else if (r) {
return r;
@ -508,7 +508,7 @@ static int acp_hw_fini(struct amdgpu_ip_block *ip_block)
/* return early if no ACP */
if (!adev->acp.acp_genpd) {
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false, 0);
return 0;
}
@ -565,7 +565,7 @@ static int acp_suspend(struct amdgpu_ip_block *ip_block)
/* power up on suspend */
if (!adev->acp.acp_cell)
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false, 0);
return 0;
}
@ -575,7 +575,7 @@ static int acp_resume(struct amdgpu_ip_block *ip_block)
/* power down again on resume */
if (!adev->acp.acp_cell)
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true, 0);
return 0;
}
@ -596,7 +596,7 @@ static int acp_set_powergating_state(void *handle,
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_PG_STATE_GATE);
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, enable);
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, enable, 0);
return 0;
}

View file

@ -3478,7 +3478,7 @@ static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
WARN_ON_ONCE(adev->gfx.gfx_off_state);
WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true, 0))
adev->gfx.gfx_off_state = true;
}

View file

@ -806,7 +806,7 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
/* If going to s2idle, no need to wait */
if (adev->in_s0ix) {
if (!amdgpu_dpm_set_powergating_by_smu(adev,
AMD_IP_BLOCK_TYPE_GFX, true))
AMD_IP_BLOCK_TYPE_GFX, true, 0))
adev->gfx.gfx_off_state = true;
} else {
schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
@ -818,7 +818,7 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
if (adev->gfx.gfx_off_state &&
!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) {
!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false, 0)) {
adev->gfx.gfx_off_state = false;
if (adev->gfx.funcs->init_spm_golden) {

View file

@ -5319,7 +5319,7 @@ static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *ade
(adev->asic_type == CHIP_POLARIS12) ||
(adev->asic_type == CHIP_VEGAM))
/* Send msg to SMU via Powerplay */
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, enable);
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, enable, 0);
WREG32_FIELD(RLC_PG_CNTL, STATIC_PER_CU_PG_ENABLE, enable ? 1 : 0);
}

View file

@ -356,7 +356,7 @@ static void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
if (adev->pg_flags & AMD_PG_SUPPORT_MMHUB)
amdgpu_dpm_set_powergating_by_smu(adev,
AMD_IP_BLOCK_TYPE_GMC,
enable);
enable, 0);
}
static int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)

View file

@ -1956,7 +1956,7 @@ static int sdma_v4_0_hw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
if (adev->flags & AMD_IS_APU)
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false);
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false, 0);
if (!amdgpu_sriov_vf(adev))
sdma_v4_0_init_golden_registers(adev);
@ -1983,7 +1983,7 @@ static int sdma_v4_0_hw_fini(struct amdgpu_ip_block *ip_block)
sdma_v4_0_enable(adev, false);
if (adev->flags & AMD_IS_APU)
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, true);
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, true, 0);
return 0;
}

View file

@ -303,7 +303,7 @@ static int vcn_v1_0_suspend(struct amdgpu_ip_block *ip_block)
idle_work_unexecuted = cancel_delayed_work_sync(&adev->vcn.idle_work);
if (idle_work_unexecuted) {
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_uvd(adev, false);
amdgpu_dpm_enable_vcn(adev, false);
}
r = vcn_v1_0_hw_fini(ip_block);
@ -1856,7 +1856,7 @@ static void vcn_v1_0_idle_work_handler(struct work_struct *work)
if (fences == 0) {
amdgpu_gfx_off_ctrl(adev, true);
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_uvd(adev, false);
amdgpu_dpm_enable_vcn(adev, false);
else
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_GATE);
@ -1886,7 +1886,7 @@ void vcn_v1_0_set_pg_for_begin_use(struct amdgpu_ring *ring, bool set_clocks)
if (set_clocks) {
amdgpu_gfx_off_ctrl(adev, false);
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_uvd(adev, true);
amdgpu_dpm_enable_vcn(adev, true);
else
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_UNGATE);

View file

@ -978,7 +978,7 @@ static int vcn_v2_0_start(struct amdgpu_device *adev)
int i, j, r;
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_uvd(adev, true);
amdgpu_dpm_enable_vcn(adev, true);
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
return vcn_v2_0_start_dpg_mode(adev, adev->vcn.indirect_sram);
@ -1235,7 +1235,7 @@ static int vcn_v2_0_stop(struct amdgpu_device *adev)
power_off:
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_uvd(adev, false);
amdgpu_dpm_enable_vcn(adev, false);
return 0;
}

View file

@ -1013,7 +1013,7 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
int i, j, k, r;
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_uvd(adev, true);
amdgpu_dpm_enable_vcn(adev, true);
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
@ -1486,7 +1486,7 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev)
}
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_uvd(adev, false);
amdgpu_dpm_enable_vcn(adev, false);
return 0;
}

View file

@ -1142,7 +1142,7 @@ static int vcn_v3_0_start(struct amdgpu_device *adev)
int i, j, k, r;
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_uvd(adev, true);
amdgpu_dpm_enable_vcn(adev, true);
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
@ -1633,7 +1633,7 @@ static int vcn_v3_0_stop(struct amdgpu_device *adev)
}
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_uvd(adev, false);
amdgpu_dpm_enable_vcn(adev, false);
return 0;
}

View file

@ -1098,7 +1098,7 @@ static int vcn_v4_0_start(struct amdgpu_device *adev)
int i, j, k, r;
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_uvd(adev, true);
amdgpu_dpm_enable_vcn(adev, true);
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
@ -1624,7 +1624,7 @@ static int vcn_v4_0_stop(struct amdgpu_device *adev)
}
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_uvd(adev, false);
amdgpu_dpm_enable_vcn(adev, false);
return 0;
}

View file

@ -1122,7 +1122,7 @@ static int vcn_v4_0_3_start(struct amdgpu_device *adev)
uint32_t tmp;
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_uvd(adev, true);
amdgpu_dpm_enable_vcn(adev, true);
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
@ -1396,7 +1396,7 @@ static int vcn_v4_0_3_stop(struct amdgpu_device *adev)
}
Done:
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_uvd(adev, false);
amdgpu_dpm_enable_vcn(adev, false);
return 0;
}

View file

@ -1001,7 +1001,7 @@ static int vcn_v4_0_5_start(struct amdgpu_device *adev)
int i, j, k, r;
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_uvd(adev, true);
amdgpu_dpm_enable_vcn(adev, true);
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
@ -1278,7 +1278,7 @@ static int vcn_v4_0_5_stop(struct amdgpu_device *adev)
}
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_uvd(adev, false);
amdgpu_dpm_enable_vcn(adev, false);
return 0;
}

View file

@ -772,7 +772,7 @@ static int vcn_v5_0_0_start(struct amdgpu_device *adev)
int i, j, k, r;
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_uvd(adev, true);
amdgpu_dpm_enable_vcn(adev, true);
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
@ -1019,7 +1019,7 @@ static int vcn_v5_0_0_stop(struct amdgpu_device *adev)
}
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_uvd(adev, false);
amdgpu_dpm_enable_vcn(adev, false);
return 0;
}

View file

@ -70,13 +70,18 @@ int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
return ret;
}
int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev,
uint32_t block_type,
bool gate,
int inst)
{
int ret = 0;
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
bool is_vcn = (block_type == AMD_IP_BLOCK_TYPE_UVD || block_type == AMD_IP_BLOCK_TYPE_VCN);
if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) {
if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state &&
(!is_vcn || adev->vcn.num_vcn_inst == 1)) {
dev_dbg(adev->dev, "IP block%d already in the target %s state!",
block_type, gate ? "gate" : "ungate");
return 0;
@ -98,11 +103,9 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
(adev)->powerplay.pp_handle, block_type, gate, 0));
break;
case AMD_IP_BLOCK_TYPE_VCN:
if (pp_funcs && pp_funcs->set_powergating_by_smu) {
for (int i = 0; i < adev->vcn.num_vcn_inst; i++)
ret = (pp_funcs->set_powergating_by_smu(
(adev)->powerplay.pp_handle, block_type, gate, i));
}
if (pp_funcs && pp_funcs->set_powergating_by_smu)
ret = (pp_funcs->set_powergating_by_smu(
(adev)->powerplay.pp_handle, block_type, gate, inst));
break;
default:
break;
@ -572,12 +575,24 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
return;
}
ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable, 0);
if (ret)
DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
enable ? "enable" : "disable", ret);
}
void amdgpu_dpm_enable_vcn(struct amdgpu_device *adev, bool enable)
{
int i, ret = 0;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCN, !enable, i);
if (ret)
DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
enable ? "enable" : "disable", ret);
}
}
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
{
int ret = 0;
@ -597,7 +612,7 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
return;
}
ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable, 0);
if (ret)
DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
enable ? "enable" : "disable", ret);
@ -607,7 +622,7 @@ void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
{
int ret = 0;
ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable, 0);
if (ret)
DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
enable ? "enable" : "disable", ret);
@ -617,7 +632,7 @@ void amdgpu_dpm_enable_vpe(struct amdgpu_device *adev, bool enable)
{
int ret = 0;
ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VPE, !enable);
ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VPE, !enable, 0);
if (ret)
DRM_ERROR("Dpm %s vpe failed, ret = %d.\n",
enable ? "enable" : "disable", ret);

View file

@ -397,7 +397,7 @@ int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit
int amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device *adev, uint32_t limit);
int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev,
uint32_t block_type, bool gate);
uint32_t block_type, bool gate, int inst);
extern int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low);
@ -446,6 +446,7 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev);
void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev);
void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable);
void amdgpu_dpm_enable_vcn(struct amdgpu_device *adev, bool enable);
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable);
void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable);
void amdgpu_dpm_enable_vpe(struct amdgpu_device *adev, bool enable);