1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00

scsi: ufs: core: Prepare to introduce a new clock_gating lock

Remove hba->clk_gating.active_reqs check from ufshcd_is_ufs_dev_busy()
function to separate clock gating logic from general device busy checks.

Signed-off-by: Avri Altman <avri.altman@wdc.com>
Link: https://lore.kernel.org/r/20241124070808.194860-3-avri.altman@wdc.com
Reviewed-by: Bart Van Assche <bvanassche@acm.org>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
Avri Altman 2024-11-24 09:08:06 +02:00 committed by Martin K. Petersen
parent e738ba458e
commit 7869c6521f

View file

@ -266,8 +266,7 @@ static bool ufshcd_has_pending_tasks(struct ufs_hba *hba)
static bool ufshcd_is_ufs_dev_busy(struct ufs_hba *hba)
{
return hba->clk_gating.active_reqs || hba->outstanding_reqs ||
ufshcd_has_pending_tasks(hba);
return hba->outstanding_reqs || ufshcd_has_pending_tasks(hba);
}
static const struct ufs_dev_quirk ufs_fixups[] = {
@ -1949,7 +1948,9 @@ static void ufshcd_gate_work(struct work_struct *work)
goto rel_lock;
}
if (ufshcd_is_ufs_dev_busy(hba) || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
if (ufshcd_is_ufs_dev_busy(hba) ||
hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
hba->clk_gating.active_reqs)
goto rel_lock;
spin_unlock_irqrestore(hba->host->host_lock, flags);
@ -8226,7 +8227,9 @@ static void ufshcd_rtc_work(struct work_struct *work)
hba = container_of(to_delayed_work(work), struct ufs_hba, ufs_rtc_update_work);
/* Update RTC only when there are no requests in progress and UFSHCI is operational */
if (!ufshcd_is_ufs_dev_busy(hba) && hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL)
if (!ufshcd_is_ufs_dev_busy(hba) &&
hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL &&
!hba->clk_gating.active_reqs)
ufshcd_update_rtc(hba);
if (ufshcd_is_ufs_dev_active(hba) && hba->dev_info.rtc_update_period)