drm/amd/display: Implement multiple secure display
[Why] Current secure display only work with single display, now make it work with multiple displays. [How] Create secure_display_context for each crtc instance to store its own Region of Interest (ROI) information. v2: squash in warning fix (Alex) Reviewed-by: Wayne Lin <Wayne.Lin@amd.com> Acked-by: Jasdeep Dhillon <jdhillon@amd.com> Signed-off-by: Alan Liu <HaoPing.Liu@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
7a18e089ef
commit
1b11ff764a
7 changed files with 119 additions and 141 deletions
|
@ -1642,7 +1642,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
|||
}
|
||||
#endif
|
||||
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
|
||||
adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
|
||||
adev->dm.secure_display_ctxs = amdgpu_dm_crtc_secure_display_create_contexts(adev->dm.dc->caps.max_links);
|
||||
#endif
|
||||
if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
|
||||
init_completion(&adev->dm.dmub_aux_transfer_done);
|
||||
|
@ -1737,10 +1737,15 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
|
|||
amdgpu_dm_destroy_drm_device(&adev->dm);
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
|
||||
if (adev->dm.crc_rd_wrk) {
|
||||
flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
|
||||
kfree(adev->dm.crc_rd_wrk);
|
||||
adev->dm.crc_rd_wrk = NULL;
|
||||
if (adev->dm.secure_display_ctxs) {
|
||||
for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
|
||||
if (adev->dm.secure_display_ctxs[i].crtc) {
|
||||
flush_work(&adev->dm.secure_display_ctxs[i].notify_ta_work);
|
||||
flush_work(&adev->dm.secure_display_ctxs[i].forward_roi_work);
|
||||
}
|
||||
}
|
||||
kfree(adev->dm.secure_display_ctxs);
|
||||
adev->dm.secure_display_ctxs = NULL;
|
||||
}
|
||||
#endif
|
||||
#ifdef CONFIG_DRM_AMD_DC_HDCP
|
||||
|
@ -8403,9 +8408,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
|||
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
enum amdgpu_dm_pipe_crc_source cur_crc_src;
|
||||
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
|
||||
struct crc_rd_work *crc_rd_wrk;
|
||||
#endif
|
||||
#endif
|
||||
/* Count number of newly disabled CRTCs for dropping PM refs later. */
|
||||
if (old_crtc_state->active && !new_crtc_state->active)
|
||||
|
@ -8418,9 +8420,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
|||
update_stream_irq_parameters(dm, dm_new_crtc_state);
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
|
||||
crc_rd_wrk = dm->crc_rd_wrk;
|
||||
#endif
|
||||
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
|
||||
cur_crc_src = acrtc->dm_irq_params.crc_src;
|
||||
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
|
||||
|
@ -8449,10 +8448,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
|||
if (amdgpu_dm_crc_window_is_activated(crtc)) {
|
||||
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
|
||||
acrtc->dm_irq_params.window_param.update_win = true;
|
||||
|
||||
/**
|
||||
* It takes 2 frames for HW to stably generate CRC when
|
||||
* resuming from suspend, so we set skip_frame_cnt 2.
|
||||
*/
|
||||
acrtc->dm_irq_params.window_param.skip_frame_cnt = 2;
|
||||
spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
|
||||
crc_rd_wrk->crtc = crtc;
|
||||
spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
|
||||
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -494,11 +494,12 @@ struct amdgpu_display_manager {
|
|||
|
||||
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
|
||||
/**
|
||||
* @crc_rd_wrk:
|
||||
* @secure_display_ctxs:
|
||||
*
|
||||
* Work to be executed in a separate thread to communicate with PSP.
|
||||
* Store the ROI information and the work_struct to command dmub and psp for
|
||||
* all crtcs.
|
||||
*/
|
||||
struct crc_rd_work *crc_rd_wrk;
|
||||
struct secure_display_context *secure_display_ctxs;
|
||||
#endif
|
||||
/**
|
||||
* @hpd_rx_offload_wq:
|
||||
|
|
|
@ -101,35 +101,38 @@ static void amdgpu_dm_set_crc_window_default(struct drm_crtc *crtc)
|
|||
|
||||
static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work)
|
||||
{
|
||||
struct crc_rd_work *crc_rd_wrk;
|
||||
struct amdgpu_device *adev;
|
||||
struct secure_display_context *secure_display_ctx;
|
||||
struct psp_context *psp;
|
||||
struct securedisplay_cmd *securedisplay_cmd;
|
||||
struct drm_crtc *crtc;
|
||||
uint8_t phy_id;
|
||||
struct dc_stream_state *stream;
|
||||
uint8_t phy_inst;
|
||||
int ret;
|
||||
|
||||
crc_rd_wrk = container_of(work, struct crc_rd_work, notify_ta_work);
|
||||
spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
|
||||
crtc = crc_rd_wrk->crtc;
|
||||
secure_display_ctx = container_of(work, struct secure_display_context, notify_ta_work);
|
||||
crtc = secure_display_ctx->crtc;
|
||||
|
||||
if (!crtc) {
|
||||
spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
adev = drm_to_adev(crtc->dev);
|
||||
psp = &adev->psp;
|
||||
phy_id = crc_rd_wrk->phy_inst;
|
||||
spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
|
||||
psp = &drm_to_adev(crtc->dev)->psp;
|
||||
stream = to_amdgpu_crtc(crtc)->dm_irq_params.stream;
|
||||
phy_inst = stream->link->link_enc_hw_inst;
|
||||
|
||||
/* need lock for multiple crtcs to use the command buffer */
|
||||
mutex_lock(&psp->securedisplay_context.mutex);
|
||||
|
||||
psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
|
||||
TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
|
||||
securedisplay_cmd->securedisplay_in_message.send_roi_crc.phy_id =
|
||||
phy_id;
|
||||
|
||||
securedisplay_cmd->securedisplay_in_message.send_roi_crc.phy_id = phy_inst;
|
||||
|
||||
/* PSP TA is expected to finish data transmission over I2C within current frame,
|
||||
* even there are up to 4 crtcs request to send in this frame.
|
||||
*/
|
||||
ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
|
||||
|
||||
if (!ret) {
|
||||
if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) {
|
||||
psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
|
||||
|
@ -142,17 +145,23 @@ static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work)
|
|||
static void
|
||||
amdgpu_dm_forward_crc_window(struct work_struct *work)
|
||||
{
|
||||
struct crc_fw_work *crc_fw_wrk;
|
||||
struct secure_display_context *secure_display_ctx;
|
||||
struct amdgpu_display_manager *dm;
|
||||
struct drm_crtc *crtc;
|
||||
struct dc_stream_state *stream;
|
||||
|
||||
crc_fw_wrk = container_of(work, struct crc_fw_work, forward_roi_work);
|
||||
dm = crc_fw_wrk->dm;
|
||||
secure_display_ctx = container_of(work, struct secure_display_context, forward_roi_work);
|
||||
crtc = secure_display_ctx->crtc;
|
||||
|
||||
if (!crtc)
|
||||
return;
|
||||
|
||||
dm = &drm_to_adev(crtc->dev)->dm;
|
||||
stream = to_amdgpu_crtc(crtc)->dm_irq_params.stream;
|
||||
|
||||
mutex_lock(&dm->dc_lock);
|
||||
dc_stream_forward_crc_window(dm->dc, &crc_fw_wrk->rect, crc_fw_wrk->stream, crc_fw_wrk->is_stop_cmd);
|
||||
dc_stream_forward_crc_window(stream, &secure_display_ctx->rect, false);
|
||||
mutex_unlock(&dm->dc_lock);
|
||||
|
||||
kfree(crc_fw_wrk);
|
||||
}
|
||||
|
||||
bool amdgpu_dm_crc_window_is_activated(struct drm_crtc *crtc)
|
||||
|
@ -189,6 +198,9 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
|
|||
struct dm_crtc_state *dm_crtc_state,
|
||||
enum amdgpu_dm_pipe_crc_source source)
|
||||
{
|
||||
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
|
||||
int i;
|
||||
#endif
|
||||
struct amdgpu_device *adev = drm_to_adev(crtc->dev);
|
||||
struct dc_stream_state *stream_state = dm_crtc_state->stream;
|
||||
bool enable = amdgpu_dm_is_valid_crc_source(source);
|
||||
|
@ -200,21 +212,20 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
|
|||
|
||||
mutex_lock(&adev->dm.dc_lock);
|
||||
|
||||
/* Enable CRTC CRC generation if necessary. */
|
||||
/* Enable or disable CRTC CRC generation */
|
||||
if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) {
|
||||
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
|
||||
/* Disable secure_display if it was enabled */
|
||||
if (!enable) {
|
||||
if (adev->dm.crc_rd_wrk) {
|
||||
flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
|
||||
spin_lock_irq(&adev->dm.crc_rd_wrk->crc_rd_work_lock);
|
||||
|
||||
if (adev->dm.crc_rd_wrk->crtc == crtc) {
|
||||
if (adev->dm.secure_display_ctxs) {
|
||||
for (i = 0; i < adev->mode_info.num_crtc; i++) {
|
||||
if (adev->dm.secure_display_ctxs[i].crtc == crtc) {
|
||||
/* stop ROI update on this crtc */
|
||||
dc_stream_forward_crc_window(stream_state->ctx->dc,
|
||||
NULL, stream_state, true);
|
||||
adev->dm.crc_rd_wrk->crtc = NULL;
|
||||
flush_work(&adev->dm.secure_display_ctxs[i].notify_ta_work);
|
||||
dc_stream_forward_crc_window(stream_state, NULL, true);
|
||||
adev->dm.secure_display_ctxs[i].crtc = NULL;
|
||||
}
|
||||
}
|
||||
spin_unlock_irq(&adev->dm.crc_rd_wrk->crc_rd_work_lock);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -347,6 +358,7 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
|
|||
}
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
|
||||
/* Reset secure_display when we change crc source from debugfs */
|
||||
amdgpu_dm_set_crc_window_default(crtc);
|
||||
#endif
|
||||
|
||||
|
@ -456,14 +468,12 @@ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc)
|
|||
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
|
||||
void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc)
|
||||
{
|
||||
struct dc_stream_state *stream_state;
|
||||
struct drm_device *drm_dev = NULL;
|
||||
enum amdgpu_dm_pipe_crc_source cur_crc_src;
|
||||
struct amdgpu_crtc *acrtc = NULL;
|
||||
struct amdgpu_device *adev = NULL;
|
||||
struct crc_rd_work *crc_rd_wrk;
|
||||
struct crc_fw_work *crc_fw_wrk;
|
||||
unsigned long flags1, flags2;
|
||||
struct secure_display_context *secure_display_ctx = NULL;
|
||||
unsigned long flags1;
|
||||
|
||||
if (crtc == NULL)
|
||||
return;
|
||||
|
@ -473,75 +483,68 @@ void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc)
|
|||
drm_dev = crtc->dev;
|
||||
|
||||
spin_lock_irqsave(&drm_dev->event_lock, flags1);
|
||||
stream_state = acrtc->dm_irq_params.stream;
|
||||
cur_crc_src = acrtc->dm_irq_params.crc_src;
|
||||
|
||||
/* Early return if CRC capture is not enabled. */
|
||||
if (!amdgpu_dm_is_valid_crc_source(cur_crc_src))
|
||||
goto cleanup;
|
||||
|
||||
if (!dm_is_crc_source_crtc(cur_crc_src))
|
||||
if (!amdgpu_dm_is_valid_crc_source(cur_crc_src) ||
|
||||
!dm_is_crc_source_crtc(cur_crc_src))
|
||||
goto cleanup;
|
||||
|
||||
if (!acrtc->dm_irq_params.window_param.activated)
|
||||
goto cleanup;
|
||||
|
||||
if (acrtc->dm_irq_params.window_param.update_win) {
|
||||
if (acrtc->dm_irq_params.window_param.skip_frame_cnt) {
|
||||
acrtc->dm_irq_params.window_param.skip_frame_cnt -= 1;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/* prepare work for dmub to update ROI */
|
||||
crc_fw_wrk = kzalloc(sizeof(*crc_fw_wrk), GFP_ATOMIC);
|
||||
if (!crc_fw_wrk)
|
||||
goto cleanup;
|
||||
secure_display_ctx = &adev->dm.secure_display_ctxs[acrtc->crtc_id];
|
||||
secure_display_ctx->crtc = crtc;
|
||||
|
||||
INIT_WORK(&crc_fw_wrk->forward_roi_work, amdgpu_dm_forward_crc_window);
|
||||
crc_fw_wrk->dm = &adev->dm;
|
||||
crc_fw_wrk->stream = stream_state;
|
||||
crc_fw_wrk->rect.x = acrtc->dm_irq_params.window_param.x_start;
|
||||
crc_fw_wrk->rect.y = acrtc->dm_irq_params.window_param.y_start;
|
||||
crc_fw_wrk->rect.width = acrtc->dm_irq_params.window_param.x_end -
|
||||
if (acrtc->dm_irq_params.window_param.update_win) {
|
||||
/* prepare work for dmub to update ROI */
|
||||
secure_display_ctx->rect.x = acrtc->dm_irq_params.window_param.x_start;
|
||||
secure_display_ctx->rect.y = acrtc->dm_irq_params.window_param.y_start;
|
||||
secure_display_ctx->rect.width = acrtc->dm_irq_params.window_param.x_end -
|
||||
acrtc->dm_irq_params.window_param.x_start;
|
||||
crc_fw_wrk->rect.height = acrtc->dm_irq_params.window_param.y_end -
|
||||
secure_display_ctx->rect.height = acrtc->dm_irq_params.window_param.y_end -
|
||||
acrtc->dm_irq_params.window_param.y_start;
|
||||
schedule_work(&crc_fw_wrk->forward_roi_work);
|
||||
schedule_work(&secure_display_ctx->forward_roi_work);
|
||||
|
||||
acrtc->dm_irq_params.window_param.update_win = false;
|
||||
|
||||
/* Statically skip 1 frame, because we may need to wait below things
|
||||
* before sending ROI to dmub:
|
||||
* 1. We defer the work by using system workqueue.
|
||||
* 2. We may need to wait for dc_lock before accessing dmub.
|
||||
*/
|
||||
acrtc->dm_irq_params.window_param.skip_frame_cnt = 1;
|
||||
|
||||
} else {
|
||||
if (acrtc->dm_irq_params.window_param.skip_frame_cnt) {
|
||||
acrtc->dm_irq_params.window_param.skip_frame_cnt -= 1;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
if (adev->dm.crc_rd_wrk) {
|
||||
crc_rd_wrk = adev->dm.crc_rd_wrk;
|
||||
spin_lock_irqsave(&crc_rd_wrk->crc_rd_work_lock, flags2);
|
||||
crc_rd_wrk->phy_inst = stream_state->link->link_enc_hw_inst;
|
||||
spin_unlock_irqrestore(&crc_rd_wrk->crc_rd_work_lock, flags2);
|
||||
schedule_work(&crc_rd_wrk->notify_ta_work);
|
||||
}
|
||||
/* prepare work for psp to read ROI/CRC and send to I2C */
|
||||
schedule_work(&secure_display_ctx->notify_ta_work);
|
||||
}
|
||||
|
||||
cleanup:
|
||||
spin_unlock_irqrestore(&drm_dev->event_lock, flags1);
|
||||
}
|
||||
|
||||
struct crc_rd_work *amdgpu_dm_crtc_secure_display_create_work(void)
|
||||
struct secure_display_context *
|
||||
amdgpu_dm_crtc_secure_display_create_contexts(int num_crtc)
|
||||
{
|
||||
struct crc_rd_work *crc_rd_wrk = NULL;
|
||||
struct secure_display_context *secure_display_ctxs = NULL;
|
||||
int i;
|
||||
|
||||
crc_rd_wrk = kzalloc(sizeof(*crc_rd_wrk), GFP_KERNEL);
|
||||
secure_display_ctxs = kcalloc(num_crtc, sizeof(struct secure_display_context), GFP_KERNEL);
|
||||
|
||||
if (!crc_rd_wrk)
|
||||
if (!secure_display_ctxs)
|
||||
return NULL;
|
||||
|
||||
spin_lock_init(&crc_rd_wrk->crc_rd_work_lock);
|
||||
INIT_WORK(&crc_rd_wrk->notify_ta_work, amdgpu_dm_crtc_notify_ta_to_read);
|
||||
for (i = 0; i < num_crtc; i++) {
|
||||
INIT_WORK(&secure_display_ctxs[i].forward_roi_work, amdgpu_dm_forward_crc_window);
|
||||
INIT_WORK(&secure_display_ctxs[i].notify_ta_work, amdgpu_dm_crtc_notify_ta_to_read);
|
||||
}
|
||||
|
||||
return crc_rd_wrk;
|
||||
return secure_display_ctxs;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -45,7 +45,7 @@ struct crc_window_param {
|
|||
uint16_t y_start;
|
||||
uint16_t x_end;
|
||||
uint16_t y_end;
|
||||
/* CRC windwo is activated or not*/
|
||||
/* CRC window is activated or not*/
|
||||
bool activated;
|
||||
/* Update crc window during vertical blank or not */
|
||||
bool update_win;
|
||||
|
@ -53,22 +53,17 @@ struct crc_window_param {
|
|||
int skip_frame_cnt;
|
||||
};
|
||||
|
||||
/* read_work for driver to call PSP to read */
|
||||
struct crc_rd_work {
|
||||
struct secure_display_context {
|
||||
/* work to notify PSP TA to transmit CRC over I2C */
|
||||
struct work_struct notify_ta_work;
|
||||
/* To protect crc_rd_work carried fields*/
|
||||
spinlock_t crc_rd_work_lock;
|
||||
struct drm_crtc *crtc;
|
||||
uint8_t phy_inst;
|
||||
};
|
||||
|
||||
/* forward_work for driver to forward ROI to dmu */
|
||||
struct crc_fw_work {
|
||||
/* work to forward ROI to dmcu/dmub */
|
||||
struct work_struct forward_roi_work;
|
||||
struct amdgpu_display_manager *dm;
|
||||
struct dc_stream_state *stream;
|
||||
|
||||
struct drm_crtc *crtc;
|
||||
|
||||
/* Region of Interest (ROI) */
|
||||
struct rect rect;
|
||||
bool is_stop_cmd;
|
||||
};
|
||||
#endif
|
||||
|
||||
|
@ -100,11 +95,11 @@ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc);
|
|||
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
|
||||
bool amdgpu_dm_crc_window_is_activated(struct drm_crtc *crtc);
|
||||
void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc);
|
||||
struct crc_rd_work *amdgpu_dm_crtc_secure_display_create_work(void);
|
||||
struct secure_display_context *amdgpu_dm_crtc_secure_display_create_contexts(int num_crtc);
|
||||
#else
|
||||
#define amdgpu_dm_crc_window_is_activated(x)
|
||||
#define amdgpu_dm_crtc_handle_crc_window_irq(x)
|
||||
#define amdgpu_dm_crtc_secure_display_create_work()
|
||||
#define amdgpu_dm_crtc_secure_display_create_contexts()
|
||||
#endif
|
||||
|
||||
#endif /* AMD_DAL_DEV_AMDGPU_DM_AMDGPU_DM_CRC_H_ */
|
||||
|
|
|
@ -3245,46 +3245,24 @@ DEFINE_DEBUGFS_ATTRIBUTE(crc_win_y_end_fops, crc_win_y_end_get,
|
|||
*/
|
||||
static int crc_win_update_set(void *data, u64 val)
|
||||
{
|
||||
struct drm_crtc *new_crtc = data;
|
||||
struct drm_crtc *old_crtc = NULL;
|
||||
struct amdgpu_crtc *new_acrtc, *old_acrtc;
|
||||
struct amdgpu_device *adev = drm_to_adev(new_crtc->dev);
|
||||
struct crc_rd_work *crc_rd_wrk = adev->dm.crc_rd_wrk;
|
||||
|
||||
if (!crc_rd_wrk)
|
||||
return 0;
|
||||
struct drm_crtc *crtc = data;
|
||||
struct amdgpu_crtc *acrtc;
|
||||
struct amdgpu_device *adev = drm_to_adev(crtc->dev);
|
||||
|
||||
if (val) {
|
||||
new_acrtc = to_amdgpu_crtc(new_crtc);
|
||||
acrtc = to_amdgpu_crtc(crtc);
|
||||
mutex_lock(&adev->dm.dc_lock);
|
||||
/* PSR may write to OTG CRC window control register,
|
||||
* so close it before starting secure_display.
|
||||
*/
|
||||
amdgpu_dm_psr_disable(new_acrtc->dm_irq_params.stream);
|
||||
amdgpu_dm_psr_disable(acrtc->dm_irq_params.stream);
|
||||
|
||||
spin_lock_irq(&adev_to_drm(adev)->event_lock);
|
||||
spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
|
||||
if (crc_rd_wrk->crtc) {
|
||||
old_crtc = crc_rd_wrk->crtc;
|
||||
old_acrtc = to_amdgpu_crtc(old_crtc);
|
||||
}
|
||||
|
||||
if (old_crtc && old_crtc != new_crtc) {
|
||||
old_acrtc->dm_irq_params.window_param.activated = false;
|
||||
old_acrtc->dm_irq_params.window_param.update_win = false;
|
||||
old_acrtc->dm_irq_params.window_param.skip_frame_cnt = 0;
|
||||
acrtc->dm_irq_params.window_param.activated = true;
|
||||
acrtc->dm_irq_params.window_param.update_win = true;
|
||||
acrtc->dm_irq_params.window_param.skip_frame_cnt = 0;
|
||||
|
||||
new_acrtc->dm_irq_params.window_param.activated = true;
|
||||
new_acrtc->dm_irq_params.window_param.update_win = true;
|
||||
new_acrtc->dm_irq_params.window_param.skip_frame_cnt = 0;
|
||||
crc_rd_wrk->crtc = new_crtc;
|
||||
} else {
|
||||
new_acrtc->dm_irq_params.window_param.activated = true;
|
||||
new_acrtc->dm_irq_params.window_param.update_win = true;
|
||||
new_acrtc->dm_irq_params.window_param.skip_frame_cnt = 0;
|
||||
crc_rd_wrk->crtc = new_crtc;
|
||||
}
|
||||
spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
|
||||
spin_unlock_irq(&adev_to_drm(adev)->event_lock);
|
||||
mutex_unlock(&adev->dm.dc_lock);
|
||||
}
|
||||
|
|
|
@ -518,14 +518,15 @@ dc_stream_forward_dmcu_crc_window(struct dmcu *dmcu,
|
|||
}
|
||||
|
||||
bool
|
||||
dc_stream_forward_crc_window(struct dc *dc,
|
||||
struct rect *rect, struct dc_stream_state *stream, bool is_stop)
|
||||
dc_stream_forward_crc_window(struct dc_stream_state *stream,
|
||||
struct rect *rect, bool is_stop)
|
||||
{
|
||||
struct dmcu *dmcu;
|
||||
struct dc_dmub_srv *dmub_srv;
|
||||
struct otg_phy_mux mux_mapping;
|
||||
struct pipe_ctx *pipe;
|
||||
int i;
|
||||
struct dc *dc = stream->ctx->dc;
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
|
||||
|
|
|
@ -543,9 +543,8 @@ bool dc_stream_get_crtc_position(struct dc *dc,
|
|||
unsigned int *nom_v_pos);
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
|
||||
bool dc_stream_forward_crc_window(struct dc *dc,
|
||||
bool dc_stream_forward_crc_window(struct dc_stream_state *stream,
|
||||
struct rect *rect,
|
||||
struct dc_stream_state *stream,
|
||||
bool is_stop);
|
||||
#endif
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue