drm/amdgpu: update the handle ptr in wait_for_idle
Update the *handle to amdgpu_ip_block ptr for all functions pointers of wait_for_idle. Signed-off-by: Sunil Khatri <sunil.khatri@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
ded57e4951
commit
82ae6619a4
79 changed files with 219 additions and 157 deletions
|
@ -590,7 +590,7 @@ static bool acp_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int acp_wait_for_idle(void *handle)
|
||||
static int acp_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -2204,7 +2204,7 @@ int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
|
|||
if (!adev->ip_blocks[i].status.valid)
|
||||
continue;
|
||||
if (adev->ip_blocks[i].version->type == block_type) {
|
||||
r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
|
||||
r = adev->ip_blocks[i].version->funcs->wait_for_idle(&adev->ip_blocks[i]);
|
||||
if (r)
|
||||
return r;
|
||||
break;
|
||||
|
|
|
@ -155,7 +155,7 @@ static bool isp_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int isp_wait_for_idle(void *handle)
|
||||
static int isp_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -632,7 +632,7 @@ static bool amdgpu_vkms_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int amdgpu_vkms_wait_for_idle(void *handle)
|
||||
static int amdgpu_vkms_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -2172,7 +2172,7 @@ static bool cik_common_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int cik_common_wait_for_idle(void *handle)
|
||||
static int cik_common_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -362,11 +362,11 @@ static bool cik_ih_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int cik_ih_wait_for_idle(void *handle)
|
||||
static int cik_ih_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
unsigned i;
|
||||
u32 tmp;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
/* read MC_STATUS */
|
||||
|
|
|
@ -1039,11 +1039,11 @@ static bool cik_sdma_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int cik_sdma_wait_for_idle(void *handle)
|
||||
static int cik_sdma_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
unsigned i;
|
||||
u32 tmp;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK |
|
||||
|
|
|
@ -358,11 +358,11 @@ static bool cz_ih_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int cz_ih_wait_for_idle(void *handle)
|
||||
static int cz_ih_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
unsigned i;
|
||||
u32 tmp;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
/* read MC_STATUS */
|
||||
|
|
|
@ -2948,7 +2948,7 @@ static bool dce_v10_0_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int dce_v10_0_wait_for_idle(void *handle)
|
||||
static int dce_v10_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -3086,7 +3086,7 @@ static bool dce_v11_0_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int dce_v11_0_wait_for_idle(void *handle)
|
||||
static int dce_v11_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -2843,7 +2843,7 @@ static bool dce_v6_0_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int dce_v6_0_wait_for_idle(void *handle)
|
||||
static int dce_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -2866,7 +2866,7 @@ static bool dce_v8_0_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int dce_v8_0_wait_for_idle(void *handle)
|
||||
static int dce_v8_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -7477,11 +7477,11 @@ static bool gfx_v10_0_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int gfx_v10_0_wait_for_idle(void *handle)
|
||||
static int gfx_v10_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
unsigned int i;
|
||||
u32 tmp;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
/* read MC_STATUS */
|
||||
|
|
|
@ -4726,11 +4726,11 @@ static bool gfx_v11_0_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int gfx_v11_0_wait_for_idle(void *handle)
|
||||
static int gfx_v11_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
unsigned i;
|
||||
u32 tmp;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
/* read MC_STATUS */
|
||||
|
|
|
@ -3664,11 +3664,11 @@ static bool gfx_v12_0_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int gfx_v12_0_wait_for_idle(void *handle)
|
||||
static int gfx_v12_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
unsigned i;
|
||||
u32 tmp;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
/* read MC_STATUS */
|
||||
|
|
|
@ -3177,13 +3177,13 @@ static bool gfx_v6_0_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int gfx_v6_0_wait_for_idle(void *handle)
|
||||
static int gfx_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
unsigned i;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
if (gfx_v6_0_is_idle(handle))
|
||||
if (gfx_v6_0_is_idle(adev))
|
||||
return 0;
|
||||
udelay(1);
|
||||
}
|
||||
|
|
|
@ -4523,11 +4523,11 @@ static bool gfx_v7_0_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int gfx_v7_0_wait_for_idle(void *handle)
|
||||
static int gfx_v7_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
unsigned i;
|
||||
u32 tmp;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
/* read MC_STATUS */
|
||||
|
|
|
@ -4865,13 +4865,13 @@ static int gfx_v8_0_wait_for_rlc_idle(void *handle)
|
|||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
static int gfx_v8_0_wait_for_idle(void *handle)
|
||||
static int gfx_v8_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
unsigned int i;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
if (gfx_v8_0_is_idle(handle))
|
||||
if (gfx_v8_0_is_idle(adev))
|
||||
return 0;
|
||||
|
||||
udelay(1);
|
||||
|
@ -4882,6 +4882,7 @@ static int gfx_v8_0_wait_for_idle(void *handle)
|
|||
static int gfx_v8_0_hw_fini(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_ip_block *ip_block;
|
||||
|
||||
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
|
||||
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
|
||||
|
@ -4897,8 +4898,13 @@ static int gfx_v8_0_hw_fini(void *handle)
|
|||
pr_debug("For SRIOV client, shouldn't do anything.\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
|
||||
if (!ip_block)
|
||||
return -EINVAL;
|
||||
|
||||
amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
|
||||
if (!gfx_v8_0_wait_for_idle(adev))
|
||||
if (!gfx_v8_0_wait_for_idle(ip_block))
|
||||
gfx_v8_0_cp_enable(adev, false);
|
||||
else
|
||||
pr_err("cp is busy, skip halt cp\n");
|
||||
|
|
|
@ -4095,13 +4095,13 @@ static bool gfx_v9_0_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int gfx_v9_0_wait_for_idle(void *handle)
|
||||
static int gfx_v9_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
unsigned i;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
if (gfx_v9_0_is_idle(handle))
|
||||
if (gfx_v9_0_is_idle(adev))
|
||||
return 0;
|
||||
udelay(1);
|
||||
}
|
||||
|
|
|
@ -2410,13 +2410,13 @@ static bool gfx_v9_4_3_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int gfx_v9_4_3_wait_for_idle(void *handle)
|
||||
static int gfx_v9_4_3_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
unsigned i;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
if (gfx_v9_4_3_is_idle(handle))
|
||||
if (gfx_v9_4_3_is_idle(adev))
|
||||
return 0;
|
||||
udelay(1);
|
||||
}
|
||||
|
|
|
@ -1082,7 +1082,7 @@ static bool gmc_v10_0_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int gmc_v10_0_wait_for_idle(void *handle)
|
||||
static int gmc_v10_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
/* There is no need to wait for MC idle in GMC v10.*/
|
||||
return 0;
|
||||
|
|
|
@ -990,7 +990,7 @@ static bool gmc_v11_0_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int gmc_v11_0_wait_for_idle(void *handle)
|
||||
static int gmc_v11_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
/* There is no need to wait for MC idle in GMC v11.*/
|
||||
return 0;
|
||||
|
|
|
@ -974,7 +974,7 @@ static bool gmc_v12_0_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int gmc_v12_0_wait_for_idle(void *handle)
|
||||
static int gmc_v12_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
/* There is no need to wait for MC idle in GMC v11.*/
|
||||
return 0;
|
||||
|
|
|
@ -43,7 +43,7 @@
|
|||
|
||||
static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev);
|
||||
static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
|
||||
static int gmc_v6_0_wait_for_idle(void *handle);
|
||||
static int gmc_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block);
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/tahiti_mc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/pitcairn_mc.bin");
|
||||
|
@ -64,8 +64,13 @@ MODULE_FIRMWARE("amdgpu/si58_mc.bin");
|
|||
static void gmc_v6_0_mc_stop(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 blackout;
|
||||
struct amdgpu_ip_block *ip_block;
|
||||
|
||||
gmc_v6_0_wait_for_idle((void *)adev);
|
||||
ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GMC);
|
||||
if (!ip_block)
|
||||
return;
|
||||
|
||||
gmc_v6_0_wait_for_idle(ip_block);
|
||||
|
||||
blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
|
||||
if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
|
||||
|
@ -213,6 +218,8 @@ static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
|
|||
static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
|
||||
{
|
||||
int i, j;
|
||||
struct amdgpu_ip_block *ip_block;
|
||||
|
||||
|
||||
/* Initialize HDP */
|
||||
for (i = 0, j = 0; i < 32; i++, j += 0x6) {
|
||||
|
@ -224,7 +231,11 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
|
|||
}
|
||||
WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
|
||||
|
||||
if (gmc_v6_0_wait_for_idle((void *)adev))
|
||||
ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GMC);
|
||||
if (!ip_block)
|
||||
return;
|
||||
|
||||
if (gmc_v6_0_wait_for_idle(ip_block))
|
||||
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
|
||||
|
||||
if (adev->mode_info.num_crtc) {
|
||||
|
@ -251,7 +262,7 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
|
|||
WREG32(mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 22);
|
||||
WREG32(mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 22);
|
||||
|
||||
if (gmc_v6_0_wait_for_idle((void *)adev))
|
||||
if (gmc_v6_0_wait_for_idle(ip_block))
|
||||
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
|
||||
}
|
||||
|
||||
|
@ -950,6 +961,7 @@ static int gmc_v6_0_resume(void *handle)
|
|||
static bool gmc_v6_0_is_idle(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
u32 tmp = RREG32(mmSRBM_STATUS);
|
||||
|
||||
if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
|
||||
|
@ -959,13 +971,13 @@ static bool gmc_v6_0_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int gmc_v6_0_wait_for_idle(void *handle)
|
||||
static int gmc_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
unsigned int i;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
if (gmc_v6_0_is_idle(handle))
|
||||
if (gmc_v6_0_is_idle(adev))
|
||||
return 0;
|
||||
udelay(1);
|
||||
}
|
||||
|
@ -976,6 +988,7 @@ static int gmc_v6_0_wait_for_idle(void *handle)
|
|||
static int gmc_v6_0_soft_reset(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
u32 srbm_soft_reset = 0;
|
||||
u32 tmp = RREG32(mmSRBM_STATUS);
|
||||
|
||||
|
@ -992,7 +1005,8 @@ static int gmc_v6_0_soft_reset(struct amdgpu_ip_block *ip_block)
|
|||
|
||||
if (srbm_soft_reset) {
|
||||
gmc_v6_0_mc_stop(adev);
|
||||
if (gmc_v6_0_wait_for_idle(adev))
|
||||
|
||||
if (gmc_v6_0_wait_for_idle(ip_block))
|
||||
dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
|
||||
|
||||
tmp = RREG32(mmSRBM_SOFT_RESET);
|
||||
|
|
|
@ -52,7 +52,7 @@
|
|||
|
||||
static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev);
|
||||
static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
|
||||
static int gmc_v7_0_wait_for_idle(void *handle);
|
||||
static int gmc_v7_0_wait_for_idle(struct amdgpu_ip_block *ip_block);
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/bonaire_mc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/hawaii_mc.bin");
|
||||
|
@ -1146,11 +1146,11 @@ static bool gmc_v7_0_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int gmc_v7_0_wait_for_idle(void *handle)
|
||||
static int gmc_v7_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
unsigned int i;
|
||||
u32 tmp;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
/* read MC_STATUS */
|
||||
|
|
|
@ -53,7 +53,7 @@
|
|||
|
||||
static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev);
|
||||
static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
|
||||
static int gmc_v8_0_wait_for_idle(void *handle);
|
||||
static int gmc_v8_0_wait_for_idle(struct amdgpu_ip_block *ip_block);
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
|
||||
|
@ -170,8 +170,13 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
|
|||
static void gmc_v8_0_mc_stop(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 blackout;
|
||||
struct amdgpu_ip_block *ip_block;
|
||||
|
||||
gmc_v8_0_wait_for_idle(adev);
|
||||
ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GMC);
|
||||
if (!ip_block)
|
||||
return;
|
||||
|
||||
gmc_v8_0_wait_for_idle(ip_block);
|
||||
|
||||
blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
|
||||
if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
|
||||
|
@ -426,6 +431,7 @@ static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
|
|||
*/
|
||||
static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_ip_block *ip_block;
|
||||
u32 tmp;
|
||||
int i, j;
|
||||
|
||||
|
@ -439,7 +445,11 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
|
|||
}
|
||||
WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
|
||||
|
||||
if (gmc_v8_0_wait_for_idle((void *)adev))
|
||||
ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GMC);
|
||||
if (!ip_block)
|
||||
return;
|
||||
|
||||
if (gmc_v8_0_wait_for_idle(ip_block))
|
||||
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
|
||||
|
||||
if (adev->mode_info.num_crtc) {
|
||||
|
@ -474,7 +484,7 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
|
|||
WREG32(mmMC_VM_AGP_BASE, 0);
|
||||
WREG32(mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 22);
|
||||
WREG32(mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 22);
|
||||
if (gmc_v8_0_wait_for_idle((void *)adev))
|
||||
if (gmc_v8_0_wait_for_idle(ip_block))
|
||||
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
|
||||
|
||||
WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
|
||||
|
@ -1267,11 +1277,11 @@ static bool gmc_v8_0_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int gmc_v8_0_wait_for_idle(void *handle)
|
||||
static int gmc_v8_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
unsigned int i;
|
||||
u32 tmp;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
/* read MC_STATUS */
|
||||
|
@ -1324,7 +1334,7 @@ static int gmc_v8_0_pre_soft_reset(struct amdgpu_ip_block *ip_block)
|
|||
return 0;
|
||||
|
||||
gmc_v8_0_mc_stop(adev);
|
||||
if (gmc_v8_0_wait_for_idle(adev))
|
||||
if (gmc_v8_0_wait_for_idle(ip_block))
|
||||
dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -2455,7 +2455,7 @@ static bool gmc_v9_0_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int gmc_v9_0_wait_for_idle(void *handle)
|
||||
static int gmc_v9_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
/* There is no need to wait for MC idle in GMC v9.*/
|
||||
return 0;
|
||||
|
|
|
@ -352,11 +352,11 @@ static bool iceland_ih_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int iceland_ih_wait_for_idle(void *handle)
|
||||
static int iceland_ih_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
unsigned i;
|
||||
u32 tmp;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
/* read MC_STATUS */
|
||||
|
|
|
@ -664,7 +664,7 @@ static bool ih_v6_0_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int ih_v6_0_wait_for_idle(void *handle)
|
||||
static int ih_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
/* todo */
|
||||
return -ETIMEDOUT;
|
||||
|
|
|
@ -643,7 +643,7 @@ static bool ih_v6_1_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int ih_v6_1_wait_for_idle(void *handle)
|
||||
static int ih_v6_1_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
/* todo */
|
||||
return -ETIMEDOUT;
|
||||
|
|
|
@ -633,7 +633,7 @@ static bool ih_v7_0_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int ih_v7_0_wait_for_idle(void *handle)
|
||||
static int ih_v7_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
/* todo */
|
||||
return -ETIMEDOUT;
|
||||
|
|
|
@ -666,9 +666,9 @@ static bool jpeg_v2_0_is_idle(void *handle)
|
|||
UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
|
||||
}
|
||||
|
||||
static int jpeg_v2_0_wait_for_idle(void *handle)
|
||||
static int jpeg_v2_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
int ret;
|
||||
|
||||
ret = SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_JRBC_STATUS, UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
|
||||
|
|
|
@ -501,9 +501,9 @@ static bool jpeg_v2_5_is_idle(void *handle)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int jpeg_v2_5_wait_for_idle(void *handle)
|
||||
static int jpeg_v2_5_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
int i, ret;
|
||||
|
||||
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
|
||||
|
|
|
@ -459,9 +459,9 @@ static bool jpeg_v3_0_is_idle(void *handle)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int jpeg_v3_0_wait_for_idle(void *handle)
|
||||
static int jpeg_v3_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
return SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_JRBC_STATUS,
|
||||
UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
|
||||
|
|
|
@ -621,9 +621,9 @@ static bool jpeg_v4_0_is_idle(void *handle)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int jpeg_v4_0_wait_for_idle(void *handle)
|
||||
static int jpeg_v4_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
return SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_JRBC_STATUS,
|
||||
UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
|
||||
|
|
|
@ -923,9 +923,9 @@ static bool jpeg_v4_0_3_is_idle(void *handle)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int jpeg_v4_0_3_wait_for_idle(void *handle)
|
||||
static int jpeg_v4_0_3_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
int ret = 0;
|
||||
int i, j;
|
||||
|
||||
|
|
|
@ -637,9 +637,9 @@ static bool jpeg_v4_0_5_is_idle(void *handle)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int jpeg_v4_0_5_wait_for_idle(void *handle)
|
||||
static int jpeg_v4_0_5_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
|
||||
|
|
|
@ -546,9 +546,9 @@ static bool jpeg_v5_0_0_is_idle(void *handle)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int jpeg_v5_0_0_wait_for_idle(void *handle)
|
||||
static int jpeg_v5_0_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
return SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_JRBC_STATUS,
|
||||
UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
|
||||
|
|
|
@ -638,7 +638,7 @@ static bool navi10_ih_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int navi10_ih_wait_for_idle(void *handle)
|
||||
static int navi10_ih_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
/* todo */
|
||||
return -ETIMEDOUT;
|
||||
|
|
|
@ -1048,7 +1048,7 @@ static bool nv_common_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int nv_common_wait_for_idle(void *handle)
|
||||
static int nv_common_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -927,11 +927,11 @@ static bool sdma_v2_4_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int sdma_v2_4_wait_for_idle(void *handle)
|
||||
static int sdma_v2_4_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
unsigned i;
|
||||
u32 tmp;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK |
|
||||
|
|
|
@ -1214,11 +1214,11 @@ static bool sdma_v3_0_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int sdma_v3_0_wait_for_idle(void *handle)
|
||||
static int sdma_v3_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
unsigned i;
|
||||
u32 tmp;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK |
|
||||
|
|
|
@ -2030,11 +2030,11 @@ static bool sdma_v4_0_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int sdma_v4_0_wait_for_idle(void *handle)
|
||||
static int sdma_v4_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
unsigned i, j;
|
||||
u32 sdma[AMDGPU_MAX_SDMA_INSTANCES];
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
for (j = 0; j < adev->sdma.num_instances; j++) {
|
||||
|
|
|
@ -1540,11 +1540,11 @@ static bool sdma_v4_4_2_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int sdma_v4_4_2_wait_for_idle(void *handle)
|
||||
static int sdma_v4_4_2_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
unsigned i, j;
|
||||
u32 sdma[AMDGPU_MAX_SDMA_INSTANCES];
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
for (j = 0; j < adev->sdma.num_instances; j++) {
|
||||
|
|
|
@ -1531,11 +1531,11 @@ static bool sdma_v5_0_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int sdma_v5_0_wait_for_idle(void *handle)
|
||||
static int sdma_v5_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
unsigned i;
|
||||
u32 sdma0, sdma1;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
sdma0 = RREG32(sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_STATUS_REG));
|
||||
|
|
|
@ -1431,11 +1431,11 @@ static bool sdma_v5_2_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int sdma_v5_2_wait_for_idle(void *handle)
|
||||
static int sdma_v5_2_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
unsigned i;
|
||||
u32 sdma0, sdma1, sdma2, sdma3;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
sdma0 = RREG32(sdma_v5_2_get_reg_offset(adev, 0, mmSDMA0_STATUS_REG));
|
||||
|
|
|
@ -1429,11 +1429,11 @@ static bool sdma_v6_0_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int sdma_v6_0_wait_for_idle(void *handle)
|
||||
static int sdma_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
unsigned i;
|
||||
u32 sdma0, sdma1;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
sdma0 = RREG32(sdma_v6_0_get_reg_offset(adev, 0, regSDMA0_STATUS_REG));
|
||||
|
|
|
@ -1387,11 +1387,11 @@ static bool sdma_v7_0_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int sdma_v7_0_wait_for_idle(void *handle)
|
||||
static int sdma_v7_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
unsigned i;
|
||||
u32 sdma0, sdma1;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
sdma0 = RREG32(sdma_v7_0_get_reg_offset(adev, 0, regSDMA0_STATUS_REG));
|
||||
|
|
|
@ -2669,7 +2669,7 @@ static bool si_common_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int si_common_wait_for_idle(void *handle)
|
||||
static int si_common_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -550,6 +550,7 @@ static int si_dma_resume(void *handle)
|
|||
static bool si_dma_is_idle(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
u32 tmp = RREG32(SRBM_STATUS2);
|
||||
|
||||
if (tmp & (DMA_BUSY_MASK | DMA1_BUSY_MASK))
|
||||
|
@ -558,13 +559,13 @@ static bool si_dma_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int si_dma_wait_for_idle(void *handle)
|
||||
static int si_dma_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
unsigned i;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
if (si_dma_is_idle(handle))
|
||||
if (si_dma_is_idle(adev))
|
||||
return 0;
|
||||
udelay(1);
|
||||
}
|
||||
|
|
|
@ -227,13 +227,13 @@ static bool si_ih_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int si_ih_wait_for_idle(void *handle)
|
||||
static int si_ih_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
unsigned i;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
if (si_ih_is_idle(handle))
|
||||
if (si_ih_is_idle(adev))
|
||||
return 0;
|
||||
udelay(1);
|
||||
}
|
||||
|
|
|
@ -1341,7 +1341,7 @@ static bool soc15_common_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int soc15_common_wait_for_idle(void *handle)
|
||||
static int soc15_common_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -934,7 +934,7 @@ static bool soc21_common_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int soc21_common_wait_for_idle(void *handle)
|
||||
static int soc21_common_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -531,7 +531,7 @@ static bool soc24_common_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int soc24_common_wait_for_idle(void *handle)
|
||||
static int soc24_common_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -370,11 +370,11 @@ static bool tonga_ih_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int tonga_ih_wait_for_idle(void *handle)
|
||||
static int tonga_ih_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
unsigned i;
|
||||
u32 tmp;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
/* read MC_STATUS */
|
||||
|
|
|
@ -766,10 +766,10 @@ static bool uvd_v3_1_is_idle(void *handle)
|
|||
return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
|
||||
}
|
||||
|
||||
static int uvd_v3_1_wait_for_idle(void *handle)
|
||||
static int uvd_v3_1_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
unsigned i;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
|
||||
|
|
|
@ -666,10 +666,10 @@ static bool uvd_v4_2_is_idle(void *handle)
|
|||
return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
|
||||
}
|
||||
|
||||
static int uvd_v4_2_wait_for_idle(void *handle)
|
||||
static int uvd_v4_2_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
unsigned i;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
|
||||
|
|
|
@ -588,10 +588,10 @@ static bool uvd_v5_0_is_idle(void *handle)
|
|||
return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
|
||||
}
|
||||
|
||||
static int uvd_v5_0_wait_for_idle(void *handle)
|
||||
static int uvd_v5_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
unsigned i;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
|
||||
|
@ -796,10 +796,15 @@ static int uvd_v5_0_set_clockgating_state(void *handle,
|
|||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
bool enable = (state == AMD_CG_STATE_GATE);
|
||||
struct amdgpu_ip_block *ip_block;
|
||||
|
||||
ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD);
|
||||
if (!ip_block)
|
||||
return -EINVAL;
|
||||
|
||||
if (enable) {
|
||||
/* wait for STATUS to clear */
|
||||
if (uvd_v5_0_wait_for_idle(handle))
|
||||
if (uvd_v5_0_wait_for_idle(ip_block))
|
||||
return -EBUSY;
|
||||
uvd_v5_0_enable_clock_gating(adev, true);
|
||||
|
||||
|
|
|
@ -1151,13 +1151,13 @@ static bool uvd_v6_0_is_idle(void *handle)
|
|||
return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
|
||||
}
|
||||
|
||||
static int uvd_v6_0_wait_for_idle(void *handle)
|
||||
static int uvd_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
unsigned i;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
if (uvd_v6_0_is_idle(handle))
|
||||
if (uvd_v6_0_is_idle(adev))
|
||||
return 0;
|
||||
}
|
||||
return -ETIMEDOUT;
|
||||
|
@ -1455,11 +1455,16 @@ static int uvd_v6_0_set_clockgating_state(void *handle,
|
|||
enum amd_clockgating_state state)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_ip_block *ip_block;
|
||||
bool enable = (state == AMD_CG_STATE_GATE);
|
||||
|
||||
ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD);
|
||||
if (!ip_block)
|
||||
return -EINVAL;
|
||||
|
||||
if (enable) {
|
||||
/* wait for STATUS to clear */
|
||||
if (uvd_v6_0_wait_for_idle(handle))
|
||||
if (uvd_v6_0_wait_for_idle(ip_block))
|
||||
return -EBUSY;
|
||||
uvd_v6_0_enable_clock_gating(adev, true);
|
||||
/* enable HW gates because UVD is idle */
|
||||
|
|
|
@ -1471,10 +1471,10 @@ static bool uvd_v7_0_is_idle(void *handle)
|
|||
return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
|
||||
}
|
||||
|
||||
static int uvd_v7_0_wait_for_idle(void *handle)
|
||||
static int uvd_v7_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
unsigned i;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
if (uvd_v7_0_is_idle(handle))
|
||||
|
@ -1728,6 +1728,11 @@ static int uvd_v7_0_set_clockgating_state(void *handle,
|
|||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
bool enable = (state == AMD_CG_STATE_GATE);
|
||||
struct amdgpu_ip_block *ip_block;
|
||||
|
||||
ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD);
|
||||
if (!ip_block)
|
||||
return -EINVAL;
|
||||
|
||||
uvd_v7_0_set_bypass_mode(adev, enable);
|
||||
|
||||
|
@ -1739,7 +1744,7 @@ static int uvd_v7_0_set_clockgating_state(void *handle,
|
|||
uvd_v7_0_set_sw_clock_gating(adev);
|
||||
} else {
|
||||
/* wait for STATUS to clear */
|
||||
if (uvd_v7_0_wait_for_idle(handle))
|
||||
if (uvd_v7_0_wait_for_idle(ip_block))
|
||||
return -EBUSY;
|
||||
|
||||
/* enable HW gates because UVD is idle */
|
||||
|
|
|
@ -208,13 +208,13 @@ static bool vce_v2_0_is_idle(void *handle)
|
|||
return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK);
|
||||
}
|
||||
|
||||
static int vce_v2_0_wait_for_idle(void *handle)
|
||||
static int vce_v2_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
if (vce_v2_0_is_idle(handle))
|
||||
if (vce_v2_0_is_idle(adev))
|
||||
return 0;
|
||||
}
|
||||
return -ETIMEDOUT;
|
||||
|
@ -274,15 +274,21 @@ static int vce_v2_0_start(struct amdgpu_device *adev)
|
|||
|
||||
static int vce_v2_0_stop(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_ip_block *ip_block;
|
||||
int i;
|
||||
int status;
|
||||
|
||||
|
||||
if (vce_v2_0_lmi_clean(adev)) {
|
||||
DRM_INFO("vce is not idle \n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (vce_v2_0_wait_for_idle(adev)) {
|
||||
ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCN);
|
||||
if (!ip_block)
|
||||
return -EINVAL;
|
||||
|
||||
if (vce_v2_0_wait_for_idle(ip_block)) {
|
||||
DRM_INFO("VCE is busy, Can't set clock gating");
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -64,7 +64,7 @@
|
|||
static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
|
||||
static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
|
||||
static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
|
||||
static int vce_v3_0_wait_for_idle(void *handle);
|
||||
static int vce_v3_0_wait_for_idle(struct amdgpu_ip_block *ip_block);
|
||||
static int vce_v3_0_set_clockgating_state(void *handle,
|
||||
enum amd_clockgating_state state);
|
||||
/**
|
||||
|
@ -489,10 +489,15 @@ static int vce_v3_0_hw_fini(void *handle)
|
|||
{
|
||||
int r;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_ip_block *ip_block;
|
||||
|
||||
ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE);
|
||||
if (!ip_block)
|
||||
return -EINVAL;
|
||||
|
||||
cancel_delayed_work_sync(&adev->vce.idle_work);
|
||||
|
||||
r = vce_v3_0_wait_for_idle(handle);
|
||||
r = vce_v3_0_wait_for_idle(ip_block);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -609,13 +614,13 @@ static bool vce_v3_0_is_idle(void *handle)
|
|||
return !(RREG32(mmSRBM_STATUS2) & mask);
|
||||
}
|
||||
|
||||
static int vce_v3_0_wait_for_idle(void *handle)
|
||||
static int vce_v3_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
unsigned i;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++)
|
||||
if (vce_v3_0_is_idle(handle))
|
||||
if (vce_v3_0_is_idle(adev))
|
||||
return 0;
|
||||
|
||||
return -ETIMEDOUT;
|
||||
|
|
|
@ -539,11 +539,16 @@ static int vce_v4_0_hw_init(void *handle)
|
|||
static int vce_v4_0_hw_fini(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_ip_block *ip_block;
|
||||
|
||||
ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE);
|
||||
if (!ip_block)
|
||||
return -EINVAL;
|
||||
|
||||
cancel_delayed_work_sync(&adev->vce.idle_work);
|
||||
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
/* vce_v4_0_wait_for_idle(handle); */
|
||||
/* vce_v4_0_wait_for_idle(ip_block); */
|
||||
vce_v4_0_stop(adev);
|
||||
} else {
|
||||
/* full access mode, so don't touch any VCE register */
|
||||
|
@ -703,10 +708,10 @@ static bool vce_v4_0_is_idle(void *handle)
|
|||
return !(RREG32(mmSRBM_STATUS2) & mask);
|
||||
}
|
||||
|
||||
static int vce_v4_0_wait_for_idle(void *handle)
|
||||
static int vce_v4_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
unsigned i;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++)
|
||||
if (vce_v4_0_is_idle(handle))
|
||||
|
|
|
@ -1384,9 +1384,9 @@ static bool vcn_v1_0_is_idle(void *handle)
|
|||
return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE);
|
||||
}
|
||||
|
||||
static int vcn_v1_0_wait_for_idle(void *handle)
|
||||
static int vcn_v1_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
int ret;
|
||||
|
||||
ret = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE,
|
||||
|
|
|
@ -1326,9 +1326,9 @@ static bool vcn_v2_0_is_idle(void *handle)
|
|||
return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE);
|
||||
}
|
||||
|
||||
static int vcn_v2_0_wait_for_idle(void *handle)
|
||||
static int vcn_v2_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
int ret;
|
||||
|
||||
ret = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE,
|
||||
|
|
|
@ -1786,9 +1786,9 @@ static bool vcn_v2_5_is_idle(void *handle)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int vcn_v2_5_wait_for_idle(void *handle)
|
||||
static int vcn_v2_5_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
int i, ret = 0;
|
||||
|
||||
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
|
||||
|
|
|
@ -2116,9 +2116,9 @@ static bool vcn_v3_0_is_idle(void *handle)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int vcn_v3_0_wait_for_idle(void *handle)
|
||||
static int vcn_v3_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
int i, ret = 0;
|
||||
|
||||
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
|
||||
|
|
|
@ -1979,9 +1979,9 @@ static bool vcn_v4_0_is_idle(void *handle)
|
|||
*
|
||||
* Wait for VCN block idle
|
||||
*/
|
||||
static int vcn_v4_0_wait_for_idle(void *handle)
|
||||
static int vcn_v4_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
int i, ret = 0;
|
||||
|
||||
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
|
||||
|
|
|
@ -1571,9 +1571,9 @@ static bool vcn_v4_0_3_is_idle(void *handle)
|
|||
*
|
||||
* Wait for VCN block idle
|
||||
*/
|
||||
static int vcn_v4_0_3_wait_for_idle(void *handle)
|
||||
static int vcn_v4_0_3_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
int i, ret = 0;
|
||||
|
||||
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
|
||||
|
|
|
@ -1473,9 +1473,9 @@ static bool vcn_v4_0_5_is_idle(void *handle)
|
|||
*
|
||||
* Wait for VCN block idle
|
||||
*/
|
||||
static int vcn_v4_0_5_wait_for_idle(void *handle)
|
||||
static int vcn_v4_0_5_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
int i, ret = 0;
|
||||
|
||||
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
|
||||
|
|
|
@ -1200,9 +1200,9 @@ static bool vcn_v5_0_0_is_idle(void *handle)
|
|||
*
|
||||
* Wait for VCN block idle
|
||||
*/
|
||||
static int vcn_v5_0_0_wait_for_idle(void *handle)
|
||||
static int vcn_v5_0_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
int i, ret = 0;
|
||||
|
||||
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
|
||||
|
|
|
@ -570,7 +570,7 @@ static bool vega10_ih_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int vega10_ih_wait_for_idle(void *handle)
|
||||
static int vega10_ih_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
/* todo */
|
||||
return -ETIMEDOUT;
|
||||
|
|
|
@ -636,7 +636,7 @@ static bool vega20_ih_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int vega20_ih_wait_for_idle(void *handle)
|
||||
static int vega20_ih_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
/* todo */
|
||||
return -ETIMEDOUT;
|
||||
|
|
|
@ -1750,7 +1750,7 @@ static bool vi_common_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int vi_common_wait_for_idle(void *handle)
|
||||
static int vi_common_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -320,7 +320,7 @@ static bool dm_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int dm_wait_for_idle(void *handle)
|
||||
static int dm_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
/* XXX todo */
|
||||
return 0;
|
||||
|
|
|
@ -391,7 +391,7 @@ struct amd_ip_funcs {
|
|||
int (*suspend)(void *handle);
|
||||
int (*resume)(void *handle);
|
||||
bool (*is_idle)(void *handle);
|
||||
int (*wait_for_idle)(void *handle);
|
||||
int (*wait_for_idle)(struct amdgpu_ip_block *ip_block);
|
||||
bool (*check_soft_reset)(struct amdgpu_ip_block *ip_block);
|
||||
int (*pre_soft_reset)(struct amdgpu_ip_block *ip_block);
|
||||
int (*soft_reset)(struct amdgpu_ip_block *ip_block);
|
||||
|
|
|
@ -3099,7 +3099,7 @@ static bool kv_dpm_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int kv_dpm_wait_for_idle(void *handle)
|
||||
static int kv_dpm_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -7843,7 +7843,7 @@ static bool si_dpm_is_idle(void *handle)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int si_dpm_wait_for_idle(void *handle)
|
||||
static int si_dpm_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
/* XXX */
|
||||
return 0;
|
||||
|
|
|
@ -245,7 +245,7 @@ static bool pp_is_idle(void *handle)
|
|||
return false;
|
||||
}
|
||||
|
||||
static int pp_wait_for_idle(void *handle)
|
||||
static int pp_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue