1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00

drm/amd/display: Add new DCN401 sources

Add initial support for DCN 4.0.1.

Signed-off-by: Aurabindo Pillai <aurabindo.pillai@amd.com>
Acked-by: Rodrigo Siqueira <rodrigo.siqueira@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Aurabindo Pillai 2024-04-19 12:02:53 -04:00 committed by Alex Deucher
parent 59a0c03a50
commit 70839da636
103 changed files with 61642 additions and 0 deletions

View file

@ -0,0 +1,46 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#ifndef DALSMC_H
#define DALSMC_H
#define DALSMC_VERSION 0x1
// SMU Response Codes:
#define DALSMC_Result_OK 0x1
#define DALSMC_Result_Failed 0xFF
#define DALSMC_Result_UnknownCmd 0xFE
#define DALSMC_Result_CmdRejectedPrereq 0xFD
#define DALSMC_Result_CmdRejectedBusy 0xFC
// Message Definitions:
#define DALSMC_MSG_TestMessage 0x1
#define DALSMC_MSG_GetSmuVersion 0x2
#define DALSMC_MSG_GetDriverIfVersion 0x3
#define DALSMC_MSG_GetMsgHeaderVersion 0x4
#define DALSMC_MSG_SetDalDramAddrHigh 0x5
#define DALSMC_MSG_SetDalDramAddrLow 0x6
#define DALSMC_MSG_TransferTableSmu2Dram 0x7
#define DALSMC_MSG_TransferTableDram2Smu 0x8
#define DALSMC_MSG_SetHardMinByFreq 0x9
#define DALSMC_MSG_SetHardMaxByFreq 0xA
#define DALSMC_MSG_GetDpmFreqByIndex 0xB
#define DALSMC_MSG_GetDcModeMaxDpmFreq 0xC
#define DALSMC_MSG_SetMinDeepSleepDcfclk 0xD
#define DALSMC_MSG_NumOfDisplays 0xE
#define DALSMC_MSG_SetExternalClientDfCstateAllow 0xF
#define DALSMC_MSG_BacoAudioD3PME 0x10
#define DALSMC_MSG_SetFclkSwitchAllow 0x11
#define DALSMC_MSG_SetCabForUclkPstate 0x12
#define DALSMC_MSG_SetWorstCaseUclkLatency 0x13
#define DALSMC_Message_Count 0x14
typedef enum {
FCLK_SWITCH_DISALLOW,
FCLK_SWITCH_ALLOW,
} FclkSwitchAllow_e;
#endif

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,22 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#ifndef __DCN401_CLK_MGR_H_
#define __DCN401_CLK_MGR_H_
void dcn401_init_clocks(struct clk_mgr *clk_mgr_base);
void dcn401_clk_mgr_construct(struct dc_context *ctx,
struct clk_mgr_internal *clk_mgr,
struct pp_smu_funcs *pp_smu,
struct dccg *dccg);
void dcn401_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
struct dc_state *context, bool safe_to_lower, int dppclk_khz);
void dcn401_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr);
#endif /* __DCN401_CLK_MGR_H_ */

View file

@ -0,0 +1,120 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#include "dcn401_clk_mgr_smu_msg.h"
#include "clk_mgr_internal.h"
#include "reg_helper.h"
#include "dalsmc.h"
#include "dcn401_smu14_driver_if.h"
#define mmDAL_MSG_REG 0x1628A
#define mmDAL_ARG_REG 0x16273
#define mmDAL_RESP_REG 0x16274
#define REG(reg_name) \
mm ## reg_name
#include "logger_types.h"
#define smu_print(str, ...) {DC_LOG_SMU(str, ##__VA_ARGS__); }
/*
* Function to be used instead of REG_WAIT macro because the wait ends when
* the register is NOT EQUAL to zero, and because the translation in msg_if.h
* won't work with REG_WAIT.
*/
static uint32_t dcn401_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, unsigned int delay_us, unsigned int max_retries)
{
uint32_t reg = 0;
do {
reg = REG_READ(DAL_RESP_REG);
if (reg)
break;
if (delay_us >= 1000)
msleep(delay_us/1000);
else if (delay_us > 0)
udelay(delay_us);
} while (max_retries--);
return reg;
}
static bool dcn401_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, uint32_t msg_id, uint32_t param_in, uint32_t *param_out)
{
/* Wait for response register to be ready */
dcn401_smu_wait_for_response(clk_mgr, 10, 200000);
/* Clear response register */
REG_WRITE(DAL_RESP_REG, 0);
/* Set the parameter register for the SMU message */
REG_WRITE(DAL_ARG_REG, param_in);
/* Trigger the message transaction by writing the message ID */
REG_WRITE(DAL_MSG_REG, msg_id);
/* Wait for response */
if (dcn401_smu_wait_for_response(clk_mgr, 10, 200000) == DALSMC_Result_OK) {
if (param_out)
*param_out = REG_READ(DAL_ARG_REG);
return true;
}
return false;
}
void dcn401_smu_send_fclk_pstate_message(struct clk_mgr_internal *clk_mgr, bool enable)
{
smu_print("FCLK P-state support value is : %d\n", enable);
dcn401_smu_send_msg_with_param(clk_mgr,
DALSMC_MSG_SetFclkSwitchAllow, enable ? FCLK_PSTATE_SUPPORTED : FCLK_PSTATE_NOTSUPPORTED, NULL);
}
void dcn401_smu_send_cab_for_uclk_message(struct clk_mgr_internal *clk_mgr, unsigned int num_ways)
{
uint32_t param = (num_ways << 1) | (num_ways > 0);
dcn401_smu_send_msg_with_param(clk_mgr, DALSMC_MSG_SetCabForUclkPstate, param, NULL);
smu_print("Numways for SubVP : %d\n", num_ways);
}
void dcn401_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr)
{
smu_print("SMU Transfer WM table DRAM 2 SMU\n");
dcn401_smu_send_msg_with_param(clk_mgr,
DALSMC_MSG_TransferTableDram2Smu, TABLE_WATERMARKS, NULL);
}
void dcn401_smu_set_pme_workaround(struct clk_mgr_internal *clk_mgr)
{
smu_print("SMU Set PME workaround\n");
dcn401_smu_send_msg_with_param(clk_mgr,
DALSMC_MSG_BacoAudioD3PME, 0, NULL);
}
/* Returns the actual frequency that was set in MHz, 0 on failure */
unsigned int dcn401_smu_set_hard_min_by_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint16_t freq_mhz)
{
uint32_t response = 0;
/* bits 23:16 for clock type, lower 16 bits for frequency in MHz */
uint32_t param = (clk << 16) | freq_mhz;
smu_print("SMU Set hard min by freq: clk = %d, freq_mhz = %d MHz\n", clk, freq_mhz);
dcn401_smu_send_msg_with_param(clk_mgr,
DALSMC_MSG_SetHardMinByFreq, param, &response);
smu_print("SMU Frequency set = %d KHz\n", response);
return response;
}

View file

@ -0,0 +1,21 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#ifndef __DCN401_CLK_MGR_SMU_MSG_H_
#define __DCN401_CLK_MGR_SMU_MSG_H_
#include "os_types.h"
#include "core_types.h"
#include "dcn32/dcn32_clk_mgr_smu_msg.h"
#define FCLK_PSTATE_NOTSUPPORTED 0x00
#define FCLK_PSTATE_SUPPORTED 0x01
void dcn401_smu_send_fclk_pstate_message(struct clk_mgr_internal *clk_mgr, bool enable);
void dcn401_smu_send_cab_for_uclk_message(struct clk_mgr_internal *clk_mgr, unsigned int num_ways);
void dcn401_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr);
void dcn401_smu_set_pme_workaround(struct clk_mgr_internal *clk_mgr);
unsigned int dcn401_smu_set_hard_min_by_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint16_t freq_mhz);
#endif /* __DCN401_CLK_MGR_SMU_MSG_H_ */

View file

@ -0,0 +1,66 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
//
// This is a stripped-down version of the smu13_driver_if.h file for the relevant DAL interfaces.
#define SMU14_DRIVER_IF_VERSION 0x1
//Only Clks that have DPM descriptors are listed here
typedef enum {
PPCLK_GFXCLK = 0,
PPCLK_SOCCLK,
PPCLK_UCLK,
PPCLK_FCLK,
PPCLK_DCLK_0,
PPCLK_VCLK_0,
PPCLK_DISPCLK,
PPCLK_DPPCLK,
PPCLK_DPREFCLK,
PPCLK_DCFCLK,
PPCLK_DTBCLK,
PPCLK_COUNT,
} PPCLK_e;
typedef struct {
uint8_t WmSetting;
uint8_t Flags;
uint8_t Padding[2];
} WatermarkRowGeneric_t;
#define NUM_WM_RANGES 4
typedef enum {
WATERMARKS_CLOCK_RANGE = 0,
WATERMARKS_DUMMY_PSTATE,
WATERMARKS_MALL,
WATERMARKS_COUNT,
} WATERMARKS_FLAGS_e;
typedef struct {
// Watermarks
WatermarkRowGeneric_t WatermarkRow[NUM_WM_RANGES];
} Watermarks_t;
typedef struct {
Watermarks_t Watermarks;
uint32_t Spare[16];
uint32_t MmHubPadding[8]; // SMU internal use
} WatermarksExternal_t;
// Table types
#define TABLE_PMFW_PPTABLE 0
#define TABLE_COMBO_PPTABLE 1
#define TABLE_WATERMARKS 2
#define TABLE_AVFS_PSM_DEBUG 3
#define TABLE_PMSTATUSLOG 4
#define TABLE_SMU_METRICS 5
#define TABLE_DRIVER_SMU_CONFIG 6
#define TABLE_ACTIVITY_MONITOR_COEFF 7
#define TABLE_OVERDRIVE 8
#define TABLE_I2C_COMMANDS 9
#define TABLE_DRIVER_INFO 10
#define TABLE_ECCINFO 11
#define TABLE_COUNT 12

View file

@ -0,0 +1,170 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#include "dc_spl_translate.h"
#include "spl/dc_spl_types.h"
#include "dcn20/dcn20_dpp.h"
#include "dcn32/dcn32_dpp.h"
#include "dcn401/dcn401_dpp.h"
static struct spl_funcs dcn2_spl_funcs = {
.spl_calc_lb_num_partitions = dscl2_spl_calc_lb_num_partitions,
};
static struct spl_funcs dcn32_spl_funcs = {
.spl_calc_lb_num_partitions = dscl32_spl_calc_lb_num_partitions,
};
static struct spl_funcs dcn401_spl_funcs = {
.spl_calc_lb_num_partitions = dscl401_spl_calc_lb_num_partitions,
};
static void populate_splrect_from_rect(struct spl_rect *spl_rect, const struct rect *rect)
{
spl_rect->x = rect->x;
spl_rect->y = rect->y;
spl_rect->width = rect->width;
spl_rect->height = rect->height;
}
static void populate_rect_from_splrect(struct rect *rect, const struct spl_rect *spl_rect)
{
rect->x = spl_rect->x;
rect->y = spl_rect->y;
rect->width = spl_rect->width;
rect->height = spl_rect->height;
}
static void populate_spltaps_from_taps(struct spl_taps *spl_scaling_quality,
const struct scaling_taps *scaling_quality)
{
spl_scaling_quality->h_taps_c = scaling_quality->h_taps_c;
spl_scaling_quality->h_taps = scaling_quality->h_taps;
spl_scaling_quality->v_taps_c = scaling_quality->v_taps_c;
spl_scaling_quality->v_taps = scaling_quality->v_taps;
}
static void populate_taps_from_spltaps(struct scaling_taps *scaling_quality,
const struct spl_taps *spl_scaling_quality)
{
scaling_quality->h_taps_c = spl_scaling_quality->h_taps_c;
scaling_quality->h_taps = spl_scaling_quality->h_taps;
scaling_quality->v_taps_c = spl_scaling_quality->v_taps_c;
scaling_quality->v_taps = spl_scaling_quality->v_taps;
}
static void populate_ratios_from_splratios(struct scaling_ratios *ratios,
const struct spl_ratios *spl_ratios)
{
ratios->horz = spl_ratios->horz;
ratios->vert = spl_ratios->vert;
ratios->horz_c = spl_ratios->horz_c;
ratios->vert_c = spl_ratios->vert_c;
}
static void populate_inits_from_splinits(struct scl_inits *inits,
const struct spl_inits *spl_inits)
{
inits->h = spl_inits->h;
inits->v = spl_inits->v;
inits->h_c = spl_inits->h_c;
inits->v_c = spl_inits->v_c;
}
/// @brief Translate SPL input parameters from pipe context
/// @param pipe_ctx
/// @param spl_in
void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl_in *spl_in)
{
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
const struct dc_stream_state *stream = pipe_ctx->stream;
// Assign the function to calculate the number of partitions in the line buffer
// This is used to determine the vtap support
switch (plane_state->ctx->dce_version) {
case DCN_VERSION_2_0:
spl_in->funcs = &dcn2_spl_funcs;
break;
case DCN_VERSION_3_2:
spl_in->funcs = &dcn32_spl_funcs;
break;
case DCN_VERSION_4_01:
spl_in->funcs = &dcn401_spl_funcs;
break;
default:
spl_in->funcs = &dcn2_spl_funcs;
}
// Make format field from spl_in point to plane_res scl_data format
spl_in->basic_in.format = (enum spl_pixel_format)pipe_ctx->plane_res.scl_data.format;
// Make view_format from basic_out point to view_format from stream
spl_in->basic_out.view_format = (enum spl_view_3d)stream->view_format;
// Populate spl input basic input clip rect from plane state clip rect
populate_splrect_from_rect(&spl_in->basic_in.clip_rect, &plane_state->clip_rect);
// Populate spl input basic out src rect from stream src rect
populate_splrect_from_rect(&spl_in->basic_out.src_rect, &stream->src);
// Populate spl input basic out dst rect from stream dst rect
populate_splrect_from_rect(&spl_in->basic_out.dst_rect, &stream->dst);
// Make spl input basic input info rotation field point to plane state rotation
spl_in->basic_in.rotation = (enum spl_rotation_angle)plane_state->rotation;
// Populate spl input basic input src rect from plane state src rect
populate_splrect_from_rect(&spl_in->basic_in.src_rect, &plane_state->src_rect);
// Populate spl input basic input dst rect from plane state dst rect
populate_splrect_from_rect(&spl_in->basic_in.dst_rect, &plane_state->dst_rect);
// Make spl input basic input info horiz mirror field point to plane state horz mirror
spl_in->basic_in.horizontal_mirror = plane_state->horizontal_mirror;
// Calculate horizontal splits and split index
spl_in->basic_in.mpc_combine_h = resource_get_mpc_slice_count(pipe_ctx);
if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE)
spl_in->basic_in.mpc_combine_v = 0;
else
spl_in->basic_in.mpc_combine_v = resource_get_mpc_slice_index(pipe_ctx);
spl_in->basic_out.odm_combine_factor = resource_get_odm_slice_count(pipe_ctx);
spl_in->odm_slice_index = resource_get_odm_slice_index(pipe_ctx);
// Make spl input basic out info output_size width point to stream h active
spl_in->basic_out.output_size.width =
stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
// Make spl input basic out info output_size height point to v active
spl_in->basic_out.output_size.height =
stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top;
spl_in->basic_out.max_downscale_src_width =
pipe_ctx->stream->ctx->dc->debug.max_downscale_src_width;
spl_in->basic_out.always_scale = pipe_ctx->stream->ctx->dc->debug.always_scale;
// Make spl input basic output info alpha_en field point to plane res scl_data lb_params alpha_en
spl_in->basic_out.alpha_en = pipe_ctx->plane_res.scl_data.lb_params.alpha_en;
// Make spl input basic input info scaling quality field point to plane state scaling_quality
populate_spltaps_from_taps(&spl_in->scaling_quality, &plane_state->scaling_quality);
// Translate edge adaptive scaler preference
spl_in->prefer_easf = pipe_ctx->stream->ctx->dc->config.prefer_easf;
// Translate adaptive sharpening preference
spl_in->adaptive_sharpness.enable = plane_state->adaptive_sharpness_en;
if (plane_state->sharpnessX1000 == 0) {
spl_in->adaptive_sharpness.enable = false;
} else if (plane_state->sharpnessX1000 < 999) {
spl_in->adaptive_sharpness.sharpness = SHARPNESS_LOW;
} else if (plane_state->sharpnessX1000 < 1999) {
spl_in->adaptive_sharpness.sharpness = SHARPNESS_MID;
} else { // Any other value is high sharpness
spl_in->adaptive_sharpness.sharpness = SHARPNESS_HIGH;
}
// Translate linear light scaling preference
spl_in->lls_pref = plane_state->linear_light_scaling;
/* Translate chroma subsampling offset ( cositing ) */
if (pipe_ctx->stream->ctx->dc->debug.force_cositing)
spl_in->basic_in.cositing = pipe_ctx->stream->ctx->dc->debug.force_cositing - 1;
else
spl_in->basic_in.cositing = plane_state->cositing;
}
/// @brief Translate SPL output parameters to pipe context
/// @param pipe_ctx
/// @param spl_out
void translate_SPL_out_params_to_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl_out *spl_out)
{
// Make scaler data recout point to spl output field recout
populate_rect_from_splrect(&pipe_ctx->plane_res.scl_data.recout, &spl_out->scl_data.recout);
// Make scaler data ratios point to spl output field ratios
populate_ratios_from_splratios(&pipe_ctx->plane_res.scl_data.ratios, &spl_out->scl_data.ratios);
// Make scaler data viewport point to spl output field viewport
populate_rect_from_splrect(&pipe_ctx->plane_res.scl_data.viewport, &spl_out->scl_data.viewport);
// Make scaler data viewport_c point to spl output field viewport_c
populate_rect_from_splrect(&pipe_ctx->plane_res.scl_data.viewport_c, &spl_out->scl_data.viewport_c);
// Make scaler data taps point to spl output field scaling taps
populate_taps_from_spltaps(&pipe_ctx->plane_res.scl_data.taps, &spl_out->scl_data.taps);
// Make scaler data init point to spl output field init
populate_inits_from_splinits(&pipe_ctx->plane_res.scl_data.inits, &spl_out->scl_data.inits);
}

View file

@ -0,0 +1,22 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#ifndef __DC_SPL_TRANSLATE_H__
#define __DC_SPL_TRANSLATE_H__
#include "dc.h"
#include "resource.h"
/* Map SPL input parameters to pipe context
* @pipe_ctx: pipe context
* @spl_in: spl input structure
*/
void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl_in *spl_in);
/* Map SPL output parameters to pipe context
* @pipe_ctx: pipe context
* @spl_out: spl output structure
*/
void translate_SPL_out_params_to_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl_out *spl_out);
#endif /* __DC_SPL_TRANSLATE_H__ */

View file

@ -0,0 +1,14 @@
#
# Copyright © 2023 Advanced Micro Devices, Inc. All rights reserved.
#
DCN401 += dcn401_dio_link_encoder.o
DCN401 += dcn401_dio_stream_encoder.o
DCN401 += dcn401_hubp.o
DCN401 += dcn401_mpc.o
DCN401 += dcn401_dccg.o
DCN401 += dcn401_hubbub.o
AMD_DAL_DCN401 = $(addprefix $(AMDDALPATH)/dc/dcn401/,$(DCN401))
AMD_DISPLAY_FILES += $(AMD_DAL_DCN401)

View file

@ -0,0 +1,846 @@
/*
* Copyright 2023 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "reg_helper.h"
#include "core_types.h"
#include "dcn401_dccg.h"
#include "dcn31/dcn31_dccg.h"
/*
#include "dmub_common.h"
#include "dmcub_reg_access_helper.h"
#include "dmub401_common.h"
#include "dmub401_regs.h"
#include "dmub401_dccg.h"
*/
#define TO_DCN_DCCG(dccg)\
container_of(dccg, struct dcn_dccg, base)
#define REG(reg) \
(dccg_dcn->regs->reg)
#undef FN
#define FN(reg_name, field_name) \
dccg_dcn->dccg_shift->field_name, dccg_dcn->dccg_mask->field_name
#define CTX \
dccg_dcn->base.ctx
#define DC_LOGGER \
dccg->ctx->logger
static void dcn401_set_dppclk_enable(struct dccg *dccg,
uint32_t dpp_inst, uint32_t enable)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
switch (dpp_inst) {
case 0:
REG_UPDATE(DPPCLK_CTRL, DPPCLK0_EN, enable);
break;
case 1:
REG_UPDATE(DPPCLK_CTRL, DPPCLK1_EN, enable);
break;
case 2:
REG_UPDATE(DPPCLK_CTRL, DPPCLK2_EN, enable);
break;
case 3:
REG_UPDATE(DPPCLK_CTRL, DPPCLK3_EN, enable);
break;
default:
break;
}
}
void dccg401_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
if (dccg->ref_dppclk && req_dppclk) {
int ref_dppclk = dccg->ref_dppclk;
int modulo, phase;
// phase / modulo = dpp pipe clk / dpp global clk
modulo = 0xff; // use FF at the end
phase = ((modulo * req_dppclk) + ref_dppclk - 1) / ref_dppclk;
if (phase > 0xff) {
ASSERT(false);
phase = 0xff;
}
REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
DPPCLK0_DTO_PHASE, phase,
DPPCLK0_DTO_MODULO, modulo);
dcn401_set_dppclk_enable(dccg, dpp_inst, true);
} else {
dcn401_set_dppclk_enable(dccg, dpp_inst, false);
}
dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk;
}
/* This function is a workaround for writing to OTG_PIXEL_RATE_DIV
* without the probability of causing a DIG FIFO error.
*/
static void dccg401_wait_for_dentist_change_done(
struct dccg *dccg)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
uint32_t dentist_dispclk_value = REG_READ(DENTIST_DISPCLK_CNTL);
REG_WRITE(DENTIST_DISPCLK_CNTL, dentist_dispclk_value);
REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 2000);
}
static void dccg401_get_pixel_rate_div(
struct dccg *dccg,
uint32_t otg_inst,
enum pixel_rate_div *tmds_div,
uint32_t *dp_dto_int)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
uint32_t val_tmds_div = PIXEL_RATE_DIV_NA;
switch (otg_inst) {
case 0:
REG_GET_2(OTG_PIXEL_RATE_DIV,
OTG0_TMDS_PIXEL_RATE_DIV, &val_tmds_div,
DPDTO0_INT, dp_dto_int);
break;
case 1:
REG_GET_2(OTG_PIXEL_RATE_DIV,
OTG1_TMDS_PIXEL_RATE_DIV, &val_tmds_div,
DPDTO1_INT, dp_dto_int);
break;
case 2:
REG_GET_2(OTG_PIXEL_RATE_DIV,
OTG2_TMDS_PIXEL_RATE_DIV, &val_tmds_div,
DPDTO2_INT, dp_dto_int);
break;
case 3:
REG_GET_2(OTG_PIXEL_RATE_DIV,
OTG3_TMDS_PIXEL_RATE_DIV, &val_tmds_div,
DPDTO3_INT, dp_dto_int);
break;
default:
BREAK_TO_DEBUGGER();
return;
}
*tmds_div = val_tmds_div == 0 ? PIXEL_RATE_DIV_BY_2 : PIXEL_RATE_DIV_BY_4;
}
static void dccg401_set_pixel_rate_div(
struct dccg *dccg,
uint32_t otg_inst,
enum pixel_rate_div tmds_div,
enum pixel_rate_div unused)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
enum pixel_rate_div cur_tmds_div = PIXEL_RATE_DIV_NA;
uint32_t dp_dto_int;
uint32_t reg_val;
// only 2 and 4 are valid on dcn401
if (tmds_div != PIXEL_RATE_DIV_BY_2 && tmds_div != PIXEL_RATE_DIV_BY_4) {
return;
}
dccg401_get_pixel_rate_div(dccg, otg_inst, &cur_tmds_div, &dp_dto_int);
if (tmds_div == cur_tmds_div)
return;
// encode enum to register value
reg_val = tmds_div == PIXEL_RATE_DIV_BY_4 ? 1 : 0;
switch (otg_inst) {
case 0:
REG_UPDATE(OTG_PIXEL_RATE_DIV,
OTG0_TMDS_PIXEL_RATE_DIV, reg_val);
dccg401_wait_for_dentist_change_done(dccg);
break;
case 1:
REG_UPDATE(OTG_PIXEL_RATE_DIV,
OTG1_TMDS_PIXEL_RATE_DIV, reg_val);
dccg401_wait_for_dentist_change_done(dccg);
break;
case 2:
REG_UPDATE(OTG_PIXEL_RATE_DIV,
OTG2_TMDS_PIXEL_RATE_DIV, reg_val);
dccg401_wait_for_dentist_change_done(dccg);
break;
case 3:
REG_UPDATE(OTG_PIXEL_RATE_DIV,
OTG3_TMDS_PIXEL_RATE_DIV, reg_val);
dccg401_wait_for_dentist_change_done(dccg);
break;
default:
BREAK_TO_DEBUGGER();
return;
}
}
static void dccg401_set_dtbclk_p_src(
struct dccg *dccg,
enum streamclk_source src,
uint32_t otg_inst)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
uint32_t p_src_sel = 0; /* selects dprefclk */
if (src == DTBCLK0)
p_src_sel = 2; /* selects dtbclk0 */
switch (otg_inst) {
case 0:
if (src == REFCLK)
REG_UPDATE(DTBCLK_P_CNTL,
DTBCLK_P0_EN, 0);
else
REG_UPDATE_2(DTBCLK_P_CNTL,
DTBCLK_P0_SRC_SEL, p_src_sel,
DTBCLK_P0_EN, 1);
break;
case 1:
if (src == REFCLK)
REG_UPDATE(DTBCLK_P_CNTL,
DTBCLK_P1_EN, 0);
else
REG_UPDATE_2(DTBCLK_P_CNTL,
DTBCLK_P1_SRC_SEL, p_src_sel,
DTBCLK_P1_EN, 1);
break;
case 2:
if (src == REFCLK)
REG_UPDATE(DTBCLK_P_CNTL,
DTBCLK_P2_EN, 0);
else
REG_UPDATE_2(DTBCLK_P_CNTL,
DTBCLK_P2_SRC_SEL, p_src_sel,
DTBCLK_P2_EN, 1);
break;
case 3:
if (src == REFCLK)
REG_UPDATE(DTBCLK_P_CNTL,
DTBCLK_P3_EN, 0);
else
REG_UPDATE_2(DTBCLK_P_CNTL,
DTBCLK_P3_SRC_SEL, p_src_sel,
DTBCLK_P3_EN, 1);
break;
default:
BREAK_TO_DEBUGGER();
return;
}
}
void dccg401_set_physymclk(
struct dccg *dccg,
int phy_inst,
enum physymclk_clock_source clk_src,
bool force_enable)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
/* Force PHYSYMCLK on and Select phyd32clk as the source of clock which is output to PHY through DCIO */
switch (phy_inst) {
case 0:
if (force_enable) {
REG_UPDATE_2(PHYASYMCLK_CLOCK_CNTL,
PHYASYMCLK_EN, 1,
PHYASYMCLK_SRC_SEL, clk_src);
if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
PHYASYMCLK_ROOT_GATE_DISABLE, 1);
} else {
REG_UPDATE_2(PHYASYMCLK_CLOCK_CNTL,
PHYASYMCLK_EN, 0,
PHYASYMCLK_SRC_SEL, 0);
if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
PHYASYMCLK_ROOT_GATE_DISABLE, 0);
}
break;
case 1:
if (force_enable) {
REG_UPDATE_2(PHYBSYMCLK_CLOCK_CNTL,
PHYBSYMCLK_EN, 1,
PHYBSYMCLK_SRC_SEL, clk_src);
if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
PHYBSYMCLK_ROOT_GATE_DISABLE, 1);
} else {
REG_UPDATE_2(PHYBSYMCLK_CLOCK_CNTL,
PHYBSYMCLK_EN, 0,
PHYBSYMCLK_SRC_SEL, 0);
if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
PHYBSYMCLK_ROOT_GATE_DISABLE, 0);
}
break;
case 2:
if (force_enable) {
REG_UPDATE_2(PHYCSYMCLK_CLOCK_CNTL,
PHYCSYMCLK_EN, 1,
PHYCSYMCLK_SRC_SEL, clk_src);
if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
PHYCSYMCLK_ROOT_GATE_DISABLE, 1);
} else {
REG_UPDATE_2(PHYCSYMCLK_CLOCK_CNTL,
PHYCSYMCLK_EN, 0,
PHYCSYMCLK_SRC_SEL, 0);
if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
PHYCSYMCLK_ROOT_GATE_DISABLE, 0);
}
break;
case 3:
if (force_enable) {
REG_UPDATE_2(PHYDSYMCLK_CLOCK_CNTL,
PHYDSYMCLK_EN, 1,
PHYDSYMCLK_SRC_SEL, clk_src);
if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
PHYDSYMCLK_ROOT_GATE_DISABLE, 1);
} else {
REG_UPDATE_2(PHYDSYMCLK_CLOCK_CNTL,
PHYDSYMCLK_EN, 0,
PHYDSYMCLK_SRC_SEL, 0);
if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
PHYDSYMCLK_ROOT_GATE_DISABLE, 0);
}
break;
default:
BREAK_TO_DEBUGGER();
return;
}
}
static void dccg401_get_dccg_ref_freq(struct dccg *dccg,
unsigned int xtalin_freq_inKhz,
unsigned int *dccg_ref_freq_inKhz)
{
/*
* Assume refclk is sourced from xtalin
* expect 100MHz
*/
*dccg_ref_freq_inKhz = xtalin_freq_inKhz;
return;
}
static void dccg401_otg_add_pixel(struct dccg *dccg,
uint32_t otg_inst)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
REG_UPDATE(OTG_PIXEL_RATE_CNTL[otg_inst],
OTG_ADD_PIXEL[otg_inst], 1);
}
static void dccg401_otg_drop_pixel(struct dccg *dccg,
uint32_t otg_inst)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
REG_UPDATE(OTG_PIXEL_RATE_CNTL[otg_inst],
OTG_DROP_PIXEL[otg_inst], 1);
}
static void dccg401_enable_symclk32_le(
struct dccg *dccg,
int hpo_le_inst,
enum phyd32clk_clock_source phyd32clk)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
/* select one of the PHYD32CLKs as the source for symclk32_le */
switch (hpo_le_inst) {
case 0:
if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
SYMCLK32_LE0_GATE_DISABLE, 1,
SYMCLK32_ROOT_LE0_GATE_DISABLE, 1);
REG_UPDATE_2(SYMCLK32_LE_CNTL,
SYMCLK32_LE0_SRC_SEL, phyd32clk,
SYMCLK32_LE0_EN, 1);
break;
case 1:
if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
SYMCLK32_LE1_GATE_DISABLE, 1,
SYMCLK32_ROOT_LE1_GATE_DISABLE, 1);
REG_UPDATE_2(SYMCLK32_LE_CNTL,
SYMCLK32_LE1_SRC_SEL, phyd32clk,
SYMCLK32_LE1_EN, 1);
break;
case 2:
if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
SYMCLK32_LE2_GATE_DISABLE, 1,
SYMCLK32_ROOT_LE2_GATE_DISABLE, 1);
REG_UPDATE_2(SYMCLK32_LE_CNTL,
SYMCLK32_LE2_SRC_SEL, phyd32clk,
SYMCLK32_LE2_EN, 1);
break;
case 3:
if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
SYMCLK32_LE3_GATE_DISABLE, 1,
SYMCLK32_ROOT_LE3_GATE_DISABLE, 1);
REG_UPDATE_2(SYMCLK32_LE_CNTL,
SYMCLK32_LE3_SRC_SEL, phyd32clk,
SYMCLK32_LE3_EN, 1);
break;
default:
BREAK_TO_DEBUGGER();
return;
}
}
static void dccg401_disable_symclk32_le(
struct dccg *dccg,
int hpo_le_inst)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
/* set refclk as the source for symclk32_le */
switch (hpo_le_inst) {
case 0:
REG_UPDATE_2(SYMCLK32_LE_CNTL,
SYMCLK32_LE0_SRC_SEL, 0,
SYMCLK32_LE0_EN, 0);
if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
SYMCLK32_LE0_GATE_DISABLE, 0,
SYMCLK32_ROOT_LE0_GATE_DISABLE, 0);
break;
case 1:
REG_UPDATE_2(SYMCLK32_LE_CNTL,
SYMCLK32_LE1_SRC_SEL, 0,
SYMCLK32_LE1_EN, 0);
if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
SYMCLK32_LE1_GATE_DISABLE, 0,
SYMCLK32_ROOT_LE1_GATE_DISABLE, 0);
break;
case 2:
REG_UPDATE_2(SYMCLK32_LE_CNTL,
SYMCLK32_LE2_SRC_SEL, 0,
SYMCLK32_LE2_EN, 0);
if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
SYMCLK32_LE2_GATE_DISABLE, 0,
SYMCLK32_ROOT_LE2_GATE_DISABLE, 0);
break;
case 3:
REG_UPDATE_2(SYMCLK32_LE_CNTL,
SYMCLK32_LE3_SRC_SEL, 0,
SYMCLK32_LE3_EN, 0);
if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
SYMCLK32_LE3_GATE_DISABLE, 0,
SYMCLK32_ROOT_LE3_GATE_DISABLE, 0);
break;
default:
BREAK_TO_DEBUGGER();
return;
}
}
static void dccg401_enable_dpstreamclk(struct dccg *dccg, int otg_inst, int dp_hpo_inst)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
/* enabled to select one of the DTBCLKs for pipe */
switch (dp_hpo_inst) {
case 0:
if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream)
REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL5,
DPSTREAMCLK0_ROOT_GATE_DISABLE, 1,
DPSTREAMCLK0_GATE_DISABLE, 1);
REG_UPDATE_2(DPSTREAMCLK_CNTL,
DPSTREAMCLK0_SRC_SEL, otg_inst,
DPSTREAMCLK0_EN, 1);
break;
case 1:
if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream)
REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL5,
DPSTREAMCLK1_ROOT_GATE_DISABLE, 1,
DPSTREAMCLK1_GATE_DISABLE, 1);
REG_UPDATE_2(DPSTREAMCLK_CNTL,
DPSTREAMCLK1_SRC_SEL, otg_inst,
DPSTREAMCLK1_EN, 1);
break;
case 2:
if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream)
REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL5,
DPSTREAMCLK2_ROOT_GATE_DISABLE, 1,
DPSTREAMCLK2_GATE_DISABLE, 1);
REG_UPDATE_2(DPSTREAMCLK_CNTL,
DPSTREAMCLK2_SRC_SEL, otg_inst,
DPSTREAMCLK2_EN, 1);
break;
case 3:
if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream)
REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL5,
DPSTREAMCLK3_ROOT_GATE_DISABLE, 1,
DPSTREAMCLK3_GATE_DISABLE, 1);
REG_UPDATE_2(DPSTREAMCLK_CNTL,
DPSTREAMCLK3_SRC_SEL, otg_inst,
DPSTREAMCLK3_EN, 1);
break;
default:
BREAK_TO_DEBUGGER();
return;
}
if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream)
REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
DPSTREAMCLK_GATE_DISABLE, 1,
DPSTREAMCLK_ROOT_GATE_DISABLE, 1);
}
static void dccg401_disable_dpstreamclk(struct dccg *dccg, int dp_hpo_inst)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
switch (dp_hpo_inst) {
case 0:
REG_UPDATE(DPSTREAMCLK_CNTL,
DPSTREAMCLK0_EN, 0);
if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream)
REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL5,
DPSTREAMCLK0_ROOT_GATE_DISABLE, 0,
DPSTREAMCLK0_GATE_DISABLE, 0);
break;
case 1:
REG_UPDATE(DPSTREAMCLK_CNTL,
DPSTREAMCLK1_EN, 0);
if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream)
REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL5,
DPSTREAMCLK1_ROOT_GATE_DISABLE, 0,
DPSTREAMCLK1_GATE_DISABLE, 0);
break;
case 2:
REG_UPDATE(DPSTREAMCLK_CNTL,
DPSTREAMCLK2_EN, 0);
if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream)
REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL5,
DPSTREAMCLK2_ROOT_GATE_DISABLE, 0,
DPSTREAMCLK2_GATE_DISABLE, 0);
break;
case 3:
REG_UPDATE(DPSTREAMCLK_CNTL,
DPSTREAMCLK3_EN, 0);
if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream)
REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL5,
DPSTREAMCLK3_ROOT_GATE_DISABLE, 0,
DPSTREAMCLK3_GATE_DISABLE, 0);
break;
default:
BREAK_TO_DEBUGGER();
return;
}
}
static void dccg401_set_dpstreamclk(
struct dccg *dccg,
enum streamclk_source src,
int otg_inst,
int dp_hpo_inst)
{
/* set the dtbclk_p source */
dccg401_set_dtbclk_p_src(dccg, src, otg_inst);
/* enabled to select one of the DTBCLKs for pipe */
if (src == REFCLK)
dccg401_disable_dpstreamclk(dccg, dp_hpo_inst);
else
dccg401_enable_dpstreamclk(dccg, otg_inst, dp_hpo_inst);
}
static void dccg401_set_dp_dto(
struct dccg *dccg,
const struct dp_dto_params *params)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
bool enable = false;
if (params->otg_inst > 3) {
/* dcn401 only has 4 instances */
BREAK_TO_DEBUGGER();
return;
}
if (!dc_is_tmds_signal(params->signal)) {
uint64_t dto_integer;
uint64_t dto_phase_hz;
uint64_t dto_modulo_hz = params->refclk_hz;
enable = true;
/* Set DTO values:
* int = target_pix_rate / reference_clock
* phase = target_pix_rate - int * reference_clock,
* modulo = reference_clock */
dto_integer = div_u64(params->pixclk_hz, dto_modulo_hz);
dto_phase_hz = params->pixclk_hz - dto_integer * dto_modulo_hz;
if (dto_phase_hz <= 0) {
/* negative pixel rate should never happen */
BREAK_TO_DEBUGGER();
return;
}
switch (params->otg_inst) {
case 0:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P0_GATE_DISABLE, 1);
REG_UPDATE_4(DCCG_GATE_DISABLE_CNTL3,
SYMCLK32_SE0_GATE_DISABLE, 1,
SYMCLK32_ROOT_SE0_GATE_DISABLE, 1,
SYMCLK32_LE0_GATE_DISABLE, 1,
SYMCLK32_ROOT_LE0_GATE_DISABLE, 1);
break;
case 1:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P1_GATE_DISABLE, 1);
REG_UPDATE_4(DCCG_GATE_DISABLE_CNTL3,
SYMCLK32_SE1_GATE_DISABLE, 1,
SYMCLK32_ROOT_SE1_GATE_DISABLE, 1,
SYMCLK32_LE1_GATE_DISABLE, 1,
SYMCLK32_ROOT_LE1_GATE_DISABLE, 1);
break;
case 2:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P2_GATE_DISABLE, 1);
REG_UPDATE_4(DCCG_GATE_DISABLE_CNTL3,
SYMCLK32_SE2_GATE_DISABLE, 1,
SYMCLK32_ROOT_SE2_GATE_DISABLE, 1,
SYMCLK32_LE2_GATE_DISABLE, 1,
SYMCLK32_ROOT_LE2_GATE_DISABLE, 1);
break;
case 3:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P3_GATE_DISABLE, 1);
REG_UPDATE_4(DCCG_GATE_DISABLE_CNTL3,
SYMCLK32_SE3_GATE_DISABLE, 1,
SYMCLK32_ROOT_SE3_GATE_DISABLE, 1,
SYMCLK32_LE3_GATE_DISABLE, 1,
SYMCLK32_ROOT_LE3_GATE_DISABLE, 1);
break;
}
dccg401_set_dtbclk_p_src(dccg, params->clk_src, params->otg_inst);
REG_WRITE(DP_DTO_PHASE[params->otg_inst], dto_phase_hz);
REG_WRITE(DP_DTO_MODULO[params->otg_inst], dto_modulo_hz);
switch (params->otg_inst) {
case 0:
REG_UPDATE(OTG_PIXEL_RATE_DIV,
DPDTO0_INT, dto_integer);
break;
case 1:
REG_UPDATE(OTG_PIXEL_RATE_DIV,
DPDTO1_INT, dto_integer);
break;
case 2:
REG_UPDATE(OTG_PIXEL_RATE_DIV,
DPDTO2_INT, dto_integer);
break;
case 3:
REG_UPDATE(OTG_PIXEL_RATE_DIV,
DPDTO3_INT, dto_integer);
break;
default:
BREAK_TO_DEBUGGER();
return;
}
}
/* Toggle DTO */
REG_UPDATE_2(OTG_PIXEL_RATE_CNTL[params->otg_inst],
DP_DTO_ENABLE[params->otg_inst], enable,
PIPE_DTO_SRC_SEL[params->otg_inst], enable);
}
void dccg401_init(struct dccg *dccg)
{
/* Set HPO stream encoder to use refclk to avoid case where PHY is
* disabled and SYMCLK32 for HPO SE is sourced from PHYD32CLK which
* will cause DCN to hang.
*/
dccg31_disable_symclk32_se(dccg, 0);
dccg31_disable_symclk32_se(dccg, 1);
dccg31_disable_symclk32_se(dccg, 2);
dccg31_disable_symclk32_se(dccg, 3);
if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) {
dccg401_disable_symclk32_le(dccg, 0);
dccg401_disable_symclk32_le(dccg, 1);
dccg401_disable_symclk32_le(dccg, 2);
dccg401_disable_symclk32_le(dccg, 3);
}
if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) {
dccg401_disable_dpstreamclk(dccg, 0);
dccg401_disable_dpstreamclk(dccg, 1);
dccg401_disable_dpstreamclk(dccg, 2);
dccg401_disable_dpstreamclk(dccg, 3);
}
if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) {
dccg401_set_physymclk(dccg, 0, PHYSYMCLK_FORCE_SRC_SYMCLK, false);
dccg401_set_physymclk(dccg, 1, PHYSYMCLK_FORCE_SRC_SYMCLK, false);
dccg401_set_physymclk(dccg, 2, PHYSYMCLK_FORCE_SRC_SYMCLK, false);
dccg401_set_physymclk(dccg, 3, PHYSYMCLK_FORCE_SRC_SYMCLK, false);
}
}
static void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
switch (inst) {
case 0:
REG_UPDATE_2(DSCCLK0_DTO_PARAM,
DSCCLK0_DTO_PHASE, 1,
DSCCLK0_DTO_MODULO, 1);
REG_UPDATE_2(DSCCLK_DTO_CTRL, DSCCLK0_EN, 1, DSCCLK0_DTO_DB_EN, 1);
break;
case 1:
REG_UPDATE_2(DSCCLK1_DTO_PARAM,
DSCCLK1_DTO_PHASE, 1,
DSCCLK1_DTO_MODULO, 1);
REG_UPDATE_2(DSCCLK_DTO_CTRL, DSCCLK1_EN, 1, DSCCLK1_DTO_DB_EN, 1);
break;
case 2:
REG_UPDATE_2(DSCCLK2_DTO_PARAM,
DSCCLK2_DTO_PHASE, 1,
DSCCLK2_DTO_MODULO, 1);
REG_UPDATE_2(DSCCLK_DTO_CTRL, DSCCLK2_EN, 1, DSCCLK2_DTO_DB_EN, 1);
break;
case 3:
REG_UPDATE_2(DSCCLK3_DTO_PARAM,
DSCCLK3_DTO_PHASE, 1,
DSCCLK3_DTO_MODULO, 1);
REG_UPDATE_2(DSCCLK_DTO_CTRL, DSCCLK3_EN, 1, DSCCLK3_DTO_DB_EN, 1);
break;
default:
BREAK_TO_DEBUGGER();
return;
}
}
static void dccg401_set_ref_dscclk(struct dccg *dccg,
uint32_t dsc_inst)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
switch (dsc_inst) {
case 0:
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK0_EN, 0);
REG_UPDATE_2(DSCCLK0_DTO_PARAM,
DSCCLK0_DTO_PHASE, 0,
DSCCLK0_DTO_MODULO, 1);
break;
case 1:
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK1_EN, 0);
REG_UPDATE_2(DSCCLK1_DTO_PARAM,
DSCCLK1_DTO_PHASE, 0,
DSCCLK1_DTO_MODULO, 1);
break;
case 2:
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK2_EN, 0);
REG_UPDATE_2(DSCCLK2_DTO_PARAM,
DSCCLK2_DTO_PHASE, 0,
DSCCLK2_DTO_MODULO, 1);
break;
case 3:
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK3_EN, 0);
REG_UPDATE_2(DSCCLK3_DTO_PARAM,
DSCCLK3_DTO_PHASE, 0,
DSCCLK3_DTO_MODULO, 1);
break;
default:
return;
}
}
static const struct dccg_funcs dccg401_funcs = {
.update_dpp_dto = dccg401_update_dpp_dto,
.get_dccg_ref_freq = dccg401_get_dccg_ref_freq,
.dccg_init = dccg401_init,
.set_dpstreamclk = dccg401_set_dpstreamclk,
.enable_symclk32_se = dccg31_enable_symclk32_se,
.disable_symclk32_se = dccg31_disable_symclk32_se,
.enable_symclk32_le = dccg401_enable_symclk32_le,
.disable_symclk32_le = dccg401_disable_symclk32_le,
.set_physymclk = dccg401_set_physymclk,
.set_dtbclk_dto = NULL,
.set_dto_dscclk = dccg401_set_dto_dscclk,
.set_ref_dscclk = dccg401_set_ref_dscclk,
.set_valid_pixel_rate = NULL,
.set_fifo_errdet_ovr_en = dccg2_set_fifo_errdet_ovr_en,
.set_audio_dtbclk_dto = NULL,
.otg_add_pixel = dccg401_otg_add_pixel,
.otg_drop_pixel = dccg401_otg_drop_pixel,
.set_pixel_rate_div = dccg401_set_pixel_rate_div,
.set_dp_dto = dccg401_set_dp_dto,
.set_dtbclk_p_src = dccg401_set_dtbclk_p_src,
};
struct dccg *dccg401_create(
struct dc_context *ctx,
const struct dccg_registers *regs,
const struct dccg_shift *dccg_shift,
const struct dccg_mask *dccg_mask)
{
struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_KERNEL);
struct dccg *base;
if (dccg_dcn == NULL) {
BREAK_TO_DEBUGGER();
return NULL;
}
base = &dccg_dcn->base;
base->ctx = ctx;
base->funcs = &dccg401_funcs;
dccg_dcn->regs = regs;
dccg_dcn->dccg_shift = dccg_shift;
dccg_dcn->dccg_mask = dccg_mask;
return &dccg_dcn->base;
}

View file

@ -0,0 +1,205 @@
/*
* Copyright 2023 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __DCN401_DCCG_H__
#define __DCN401_DCCG_H__
#include "dcn32/dcn32_dccg.h"
#define DCCG_SFII(block, reg_name, field_prefix, field_name, inst, post_fix)\
.field_prefix ## _ ## field_name[inst] = block ## inst ## _ ## reg_name ## __ ## field_prefix ## inst ## _ ## field_name ## post_fix
#define DCCG_MASK_SH_LIST_DCN401(mask_sh) \
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 0, mask_sh),\
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 1, mask_sh),\
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 2, mask_sh),\
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 3, mask_sh),\
DCCG_SF(DPPCLK_CTRL, DPPCLK0_EN, mask_sh),\
DCCG_SF(DPPCLK_CTRL, DPPCLK1_EN, mask_sh),\
DCCG_SF(DPPCLK_CTRL, DPPCLK2_EN, mask_sh),\
DCCG_SF(DPPCLK_CTRL, DPPCLK3_EN, mask_sh),\
DCCG_SF(DPPCLK0_DTO_PARAM, DPPCLK0_DTO_PHASE, mask_sh),\
DCCG_SF(DPPCLK0_DTO_PARAM, DPPCLK0_DTO_MODULO, mask_sh),\
DCCG_SF(HDMICHARCLK0_CLOCK_CNTL, HDMICHARCLK0_EN, mask_sh),\
DCCG_SF(HDMICHARCLK0_CLOCK_CNTL, HDMICHARCLK0_SRC_SEL, mask_sh),\
DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_EN, mask_sh),\
DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_SRC_SEL, mask_sh),\
DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_EN, mask_sh),\
DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_SRC_SEL, mask_sh),\
DCCG_SF(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_EN, mask_sh),\
DCCG_SF(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_SRC_SEL, mask_sh),\
DCCG_SF(PHYDSYMCLK_CLOCK_CNTL, PHYDSYMCLK_EN, mask_sh),\
DCCG_SF(PHYDSYMCLK_CLOCK_CNTL, PHYDSYMCLK_SRC_SEL, mask_sh),\
DCCG_SF(DPSTREAMCLK_CNTL, DPSTREAMCLK0_EN, mask_sh),\
DCCG_SF(DPSTREAMCLK_CNTL, DPSTREAMCLK1_EN, mask_sh),\
DCCG_SF(DPSTREAMCLK_CNTL, DPSTREAMCLK2_EN, mask_sh),\
DCCG_SF(DPSTREAMCLK_CNTL, DPSTREAMCLK3_EN, mask_sh),\
DCCG_SF(DPSTREAMCLK_CNTL, DPSTREAMCLK0_SRC_SEL, mask_sh),\
DCCG_SF(DPSTREAMCLK_CNTL, DPSTREAMCLK1_SRC_SEL, mask_sh),\
DCCG_SF(DPSTREAMCLK_CNTL, DPSTREAMCLK2_SRC_SEL, mask_sh),\
DCCG_SF(DPSTREAMCLK_CNTL, DPSTREAMCLK3_SRC_SEL, mask_sh),\
DCCG_SF(HDMISTREAMCLK_CNTL, HDMISTREAMCLK0_EN, mask_sh),\
DCCG_SF(HDMISTREAMCLK_CNTL, HDMISTREAMCLK0_SRC_SEL, mask_sh),\
DCCG_SF(SYMCLK32_SE_CNTL, SYMCLK32_SE0_SRC_SEL, mask_sh),\
DCCG_SF(SYMCLK32_SE_CNTL, SYMCLK32_SE1_SRC_SEL, mask_sh),\
DCCG_SF(SYMCLK32_SE_CNTL, SYMCLK32_SE2_SRC_SEL, mask_sh),\
DCCG_SF(SYMCLK32_SE_CNTL, SYMCLK32_SE3_SRC_SEL, mask_sh),\
DCCG_SF(SYMCLK32_SE_CNTL, SYMCLK32_SE0_EN, mask_sh),\
DCCG_SF(SYMCLK32_SE_CNTL, SYMCLK32_SE1_EN, mask_sh),\
DCCG_SF(SYMCLK32_SE_CNTL, SYMCLK32_SE2_EN, mask_sh),\
DCCG_SF(SYMCLK32_SE_CNTL, SYMCLK32_SE3_EN, mask_sh),\
DCCG_SF(SYMCLK32_LE_CNTL, SYMCLK32_LE0_SRC_SEL, mask_sh),\
DCCG_SF(SYMCLK32_LE_CNTL, SYMCLK32_LE1_SRC_SEL, mask_sh),\
DCCG_SF(SYMCLK32_LE_CNTL, SYMCLK32_LE0_EN, mask_sh),\
DCCG_SF(SYMCLK32_LE_CNTL, SYMCLK32_LE1_EN, mask_sh),\
DCCG_SFII(OTG, PIXEL_RATE_CNTL, PIPE, DTO_SRC_SEL, 0, mask_sh),\
DCCG_SFII(OTG, PIXEL_RATE_CNTL, PIPE, DTO_SRC_SEL, 1, mask_sh),\
DCCG_SFII(OTG, PIXEL_RATE_CNTL, PIPE, DTO_SRC_SEL, 2, mask_sh),\
DCCG_SFII(OTG, PIXEL_RATE_CNTL, PIPE, DTO_SRC_SEL, 3, mask_sh),\
DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, ADD_PIXEL, 0, mask_sh),\
DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, ADD_PIXEL, 1, mask_sh),\
DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, ADD_PIXEL, 2, mask_sh),\
DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, ADD_PIXEL, 3, mask_sh),\
DCCG_SF(OTG_PIXEL_RATE_DIV, OTG0_TMDS_PIXEL_RATE_DIV, mask_sh),\
DCCG_SF(OTG_PIXEL_RATE_DIV, DPDTO0_INT, mask_sh),\
DCCG_SF(OTG_PIXEL_RATE_DIV, OTG1_TMDS_PIXEL_RATE_DIV, mask_sh),\
DCCG_SF(OTG_PIXEL_RATE_DIV, DPDTO1_INT, mask_sh),\
DCCG_SF(OTG_PIXEL_RATE_DIV, OTG2_TMDS_PIXEL_RATE_DIV, mask_sh),\
DCCG_SF(OTG_PIXEL_RATE_DIV, DPDTO2_INT, mask_sh),\
DCCG_SF(OTG_PIXEL_RATE_DIV, OTG3_TMDS_PIXEL_RATE_DIV, mask_sh),\
DCCG_SF(OTG_PIXEL_RATE_DIV, DPDTO3_INT, mask_sh),\
DCCG_SF(DTBCLK_P_CNTL, DTBCLK_P0_SRC_SEL, mask_sh),\
DCCG_SF(DTBCLK_P_CNTL, DTBCLK_P0_EN, mask_sh),\
DCCG_SF(DTBCLK_P_CNTL, DTBCLK_P1_SRC_SEL, mask_sh),\
DCCG_SF(DTBCLK_P_CNTL, DTBCLK_P1_EN, mask_sh),\
DCCG_SF(DTBCLK_P_CNTL, DTBCLK_P2_SRC_SEL, mask_sh),\
DCCG_SF(DTBCLK_P_CNTL, DTBCLK_P2_EN, mask_sh),\
DCCG_SF(DTBCLK_P_CNTL, DTBCLK_P3_SRC_SEL, mask_sh),\
DCCG_SF(DTBCLK_P_CNTL, DTBCLK_P3_EN, mask_sh),\
DCCG_SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO_SEL, mask_sh),\
DCCG_SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL, mask_sh),\
DCCG_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYASYMCLK_ROOT_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYBSYMCLK_ROOT_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYCSYMCLK_ROOT_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYDSYMCLK_ROOT_GATE_DISABLE, mask_sh),\
DCCG_SFII(OTG, PIXEL_RATE_CNTL, DP_DTO, ENABLE, 0, mask_sh),\
DCCG_SFII(OTG, PIXEL_RATE_CNTL, DP_DTO, ENABLE, 1, mask_sh),\
DCCG_SFII(OTG, PIXEL_RATE_CNTL, DP_DTO, ENABLE, 2, mask_sh),\
DCCG_SFII(OTG, PIXEL_RATE_CNTL, DP_DTO, ENABLE, 3, mask_sh),\
DCCG_SFII(OTG, PIXEL_RATE_CNTL, PIPE, DTO_SRC_SEL, 0, mask_sh),\
DCCG_SFII(OTG, PIXEL_RATE_CNTL, PIPE, DTO_SRC_SEL, 1, mask_sh),\
DCCG_SFII(OTG, PIXEL_RATE_CNTL, PIPE, DTO_SRC_SEL, 2, mask_sh),\
DCCG_SFII(OTG, PIXEL_RATE_CNTL, PIPE, DTO_SRC_SEL, 3, mask_sh),\
DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK0_EN, mask_sh),\
DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK1_EN, mask_sh),\
DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK2_EN, mask_sh),\
DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK3_EN, mask_sh),\
DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK0_DTO_DB_EN, mask_sh),\
DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK1_DTO_DB_EN, mask_sh),\
DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK2_DTO_DB_EN, mask_sh),\
DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK3_DTO_DB_EN, mask_sh),\
DCCG_SF(DSCCLK0_DTO_PARAM, DSCCLK0_DTO_PHASE, mask_sh),\
DCCG_SF(DSCCLK0_DTO_PARAM, DSCCLK0_DTO_MODULO, mask_sh),\
DCCG_SF(DSCCLK1_DTO_PARAM, DSCCLK1_DTO_PHASE, mask_sh),\
DCCG_SF(DSCCLK1_DTO_PARAM, DSCCLK1_DTO_MODULO, mask_sh),\
DCCG_SF(DSCCLK2_DTO_PARAM, DSCCLK2_DTO_PHASE, mask_sh),\
DCCG_SF(DSCCLK2_DTO_PARAM, DSCCLK2_DTO_MODULO, mask_sh),\
DCCG_SF(DSCCLK3_DTO_PARAM, DSCCLK3_DTO_PHASE, mask_sh),\
DCCG_SF(DSCCLK3_DTO_PARAM, DSCCLK3_DTO_MODULO, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL2, HDMICHARCLK0_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL3, HDMISTREAMCLK0_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE0_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE1_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE2_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE3_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_LE0_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_LE1_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_LE2_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_LE3_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_SE0_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_SE1_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_SE2_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_SE3_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_LE0_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_LE1_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_LE2_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_LE3_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL4, HDMICHARCLK0_ROOT_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL4, PHYA_REFCLK_ROOT_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL4, PHYB_REFCLK_ROOT_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL4, PHYC_REFCLK_ROOT_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL4, PHYD_REFCLK_ROOT_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P0_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P1_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P2_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P3_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKA_FE_ROOT_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKB_FE_ROOT_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKC_FE_ROOT_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKD_FE_ROOT_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKA_ROOT_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKB_ROOT_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKC_ROOT_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKD_ROOT_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK0_ROOT_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_ROOT_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_ROOT_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_ROOT_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK0_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL6, HDMISTREAMCLK0_ROOT_GATE_DISABLE, mask_sh),\
void dccg401_init(struct dccg *dccg);
void dccg401_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk);
void dccg401_set_src_sel(
struct dccg *dccg,
const struct dtbclk_dto_params *params);
struct dccg *dccg401_create(
struct dc_context *ctx,
const struct dccg_registers *regs,
const struct dccg_shift *dccg_shift,
const struct dccg_mask *dccg_mask);
void dccg401_set_physymclk(
struct dccg *dccg,
int phy_inst,
enum physymclk_clock_source clk_src,
bool force_enable);
#endif //__DCN401_DCCG_H__

View file

@ -0,0 +1,322 @@
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "reg_helper.h"
#include "core_types.h"
#include "link_encoder.h"
#include "dcn31/dcn31_dio_link_encoder.h"
#include "dcn32/dcn32_dio_link_encoder.h"
#include "dcn401_dio_link_encoder.h"
#include "stream_encoder.h"
#include "dc_bios_types.h"
#include "gpio_service_interface.h"
#ifndef MIN
#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
#endif
#define CTX \
enc10->base.ctx
#define DC_LOGGER \
enc10->base.ctx->logger
#define REG(reg)\
(enc10->link_regs->reg)
#undef FN
#define FN(reg_name, field_name) \
enc10->link_shift->field_name, enc10->link_mask->field_name
#define AUX_REG(reg)\
(enc10->aux_regs->reg)
#define AUX_REG_READ(reg_name) \
dm_read_reg(CTX, AUX_REG(reg_name))
#define AUX_REG_WRITE(reg_name, val) \
dm_write_reg(CTX, AUX_REG(reg_name), val)
#ifndef MIN
#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
#endif
void enc401_hw_init(struct link_encoder *enc)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
/*
00 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__1to2 : 1/2
01 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__3to4 : 3/4
02 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__7to8 : 7/8
03 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__15to16 : 15/16
04 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__31to32 : 31/32
05 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__63to64 : 63/64
06 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__127to128 : 127/128
07 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__255to256 : 255/256
*/
/*
AUX_REG_UPDATE_5(AUX_DPHY_RX_CONTROL0,
AUX_RX_START_WINDOW = 1 [6:4]
AUX_RX_RECEIVE_WINDOW = 1 default is 2 [10:8]
AUX_RX_HALF_SYM_DETECT_LEN = 1 [13:12] default is 1
AUX_RX_TRANSITION_FILTER_EN = 1 [16] default is 1
AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT [17] is 0 default is 0
AUX_RX_ALLOW_BELOW_THRESHOLD_START [18] is 1 default is 1
AUX_RX_ALLOW_BELOW_THRESHOLD_STOP [19] is 1 default is 1
AUX_RX_PHASE_DETECT_LEN, [21,20] = 0x3 default is 3
AUX_RX_DETECTION_THRESHOLD [30:28] = 1
*/
AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, 0x103d1110);
AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c7a);
//AUX_DPHY_TX_REF_CONTROL'AUX_TX_REF_DIV HW default is 0x32;
// Set AUX_TX_REF_DIV Divider to generate 2 MHz reference from refclk
// 27MHz -> 0xd
// 100MHz -> 0x32
// 48MHz -> 0x18
// Set TMDS_CTL0 to 1. This is a legacy setting.
REG_UPDATE(TMDS_CTL_BITS, TMDS_CTL0, 1);
dcn10_aux_initialize(enc10);
}
void dcn401_link_encoder_enable_dp_output(
struct link_encoder *enc,
const struct dc_link_settings *link_settings,
enum clock_source_id clock_source)
{
if (!enc->ctx->dc->debug.avoid_vbios_exec_table) {
dcn10_link_encoder_enable_dp_output(enc, link_settings, clock_source);
return;
}
}
void dcn401_link_encoder_setup(
struct link_encoder *enc,
enum signal_type signal)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
switch (signal) {
case SIGNAL_TYPE_EDP:
case SIGNAL_TYPE_DISPLAY_PORT:
/* DP SST */
REG_UPDATE(DIG_BE_CLK_CNTL, DIG_BE_MODE, 0);
break;
case SIGNAL_TYPE_DVI_SINGLE_LINK:
case SIGNAL_TYPE_DVI_DUAL_LINK:
/* TMDS-DVI */
REG_UPDATE(DIG_BE_CLK_CNTL, DIG_BE_MODE, 2);
break;
case SIGNAL_TYPE_HDMI_TYPE_A:
/* TMDS-HDMI */
REG_UPDATE(DIG_BE_CLK_CNTL, DIG_BE_MODE, 3);
break;
case SIGNAL_TYPE_DISPLAY_PORT_MST:
/* DP MST */
REG_UPDATE(DIG_BE_CLK_CNTL, DIG_BE_MODE, 5);
break;
default:
ASSERT_CRITICAL(false);
/* invalid mode ! */
break;
}
REG_UPDATE(DIG_BE_CLK_CNTL, DIG_BE_CLK_EN, 1);
REG_UPDATE(DIG_BE_EN_CNTL, DIG_BE_ENABLE, 1);
}
bool dcn401_is_dig_enabled(struct link_encoder *enc)
{
uint32_t clk_enabled;
uint32_t dig_enabled;
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
REG_GET(DIG_BE_CLK_CNTL, DIG_BE_CLK_EN, &clk_enabled);
REG_GET(DIG_BE_EN_CNTL, DIG_BE_ENABLE, &dig_enabled);
return (clk_enabled == 1 && dig_enabled == 1);
}
enum signal_type dcn401_get_dig_mode(
struct link_encoder *enc)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
uint32_t value;
REG_GET(DIG_BE_CLK_CNTL, DIG_BE_MODE, &value);
switch (value) {
case 0:
return SIGNAL_TYPE_DISPLAY_PORT;
case 2:
return SIGNAL_TYPE_DVI_SINGLE_LINK;
case 3:
return SIGNAL_TYPE_HDMI_TYPE_A;
case 5:
return SIGNAL_TYPE_DISPLAY_PORT_MST;
default:
return SIGNAL_TYPE_NONE;
}
}
static const struct link_encoder_funcs dcn401_link_enc_funcs = {
.read_state = link_enc2_read_state,
.validate_output_with_stream =
dcn30_link_encoder_validate_output_with_stream,
.hw_init = enc401_hw_init,
.setup = dcn401_link_encoder_setup,
.enable_tmds_output = dcn10_link_encoder_enable_tmds_output,
.enable_dp_output = dcn401_link_encoder_enable_dp_output,
.enable_dp_mst_output = dcn10_link_encoder_enable_dp_mst_output,
.disable_output = dcn10_link_encoder_disable_output,
.dp_set_lane_settings = dcn10_link_encoder_dp_set_lane_settings,
.dp_set_phy_pattern = dcn10_link_encoder_dp_set_phy_pattern,
.update_mst_stream_allocation_table =
dcn10_link_encoder_update_mst_stream_allocation_table,
.psr_program_dp_dphy_fast_training =
dcn10_psr_program_dp_dphy_fast_training,
.psr_program_secondary_packet = dcn10_psr_program_secondary_packet,
.connect_dig_be_to_fe = dcn10_link_encoder_connect_dig_be_to_fe,
.enable_hpd = dcn10_link_encoder_enable_hpd,
.disable_hpd = dcn10_link_encoder_disable_hpd,
.is_dig_enabled = dcn401_is_dig_enabled,
.destroy = dcn10_link_encoder_destroy,
.fec_set_enable = enc2_fec_set_enable,
.fec_set_ready = enc2_fec_set_ready,
.fec_is_active = enc2_fec_is_active,
.get_dig_frontend = dcn10_get_dig_frontend,
.get_dig_mode = dcn401_get_dig_mode,
.is_in_alt_mode = dcn32_link_encoder_is_in_alt_mode,
.get_max_link_cap = dcn32_link_encoder_get_max_link_cap,
.set_dio_phy_mux = dcn31_link_encoder_set_dio_phy_mux,
};
void dcn401_link_encoder_construct(
struct dcn20_link_encoder *enc20,
const struct encoder_init_data *init_data,
const struct encoder_feature_support *enc_features,
const struct dcn10_link_enc_registers *link_regs,
const struct dcn10_link_enc_aux_registers *aux_regs,
const struct dcn10_link_enc_hpd_registers *hpd_regs,
const struct dcn10_link_enc_shift *link_shift,
const struct dcn10_link_enc_mask *link_mask)
{
struct bp_connector_speed_cap_info bp_cap_info = {0};
const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs;
enum bp_result result = BP_RESULT_OK;
struct dcn10_link_encoder *enc10 = &enc20->enc10;
enc10->base.funcs = &dcn401_link_enc_funcs;
enc10->base.ctx = init_data->ctx;
enc10->base.id = init_data->encoder;
enc10->base.hpd_source = init_data->hpd_source;
enc10->base.connector = init_data->connector;
enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
enc10->base.features = *enc_features;
if (enc10->base.connector.id == CONNECTOR_ID_USBC)
enc10->base.features.flags.bits.DP_IS_USB_C = 1;
enc10->base.transmitter = init_data->transmitter;
/* set the flag to indicate whether driver poll the I2C data pin
* while doing the DP sink detect
*/
/* if (dal_adapter_service_is_feature_supported(as,
FEATURE_DP_SINK_DETECT_POLL_DATA_PIN))
enc10->base.features.flags.bits.
DP_SINK_DETECT_POLL_DATA_PIN = true;*/
enc10->base.output_signals =
SIGNAL_TYPE_DVI_SINGLE_LINK |
SIGNAL_TYPE_DVI_DUAL_LINK |
SIGNAL_TYPE_LVDS |
SIGNAL_TYPE_DISPLAY_PORT |
SIGNAL_TYPE_DISPLAY_PORT_MST |
SIGNAL_TYPE_EDP |
SIGNAL_TYPE_HDMI_TYPE_A;
enc10->link_regs = link_regs;
enc10->aux_regs = aux_regs;
enc10->hpd_regs = hpd_regs;
enc10->link_shift = link_shift;
enc10->link_mask = link_mask;
switch (enc10->base.transmitter) {
case TRANSMITTER_UNIPHY_A:
enc10->base.preferred_engine = ENGINE_ID_DIGA;
break;
case TRANSMITTER_UNIPHY_B:
enc10->base.preferred_engine = ENGINE_ID_DIGB;
break;
case TRANSMITTER_UNIPHY_C:
enc10->base.preferred_engine = ENGINE_ID_DIGC;
break;
case TRANSMITTER_UNIPHY_D:
enc10->base.preferred_engine = ENGINE_ID_DIGD;
break;
case TRANSMITTER_UNIPHY_E:
enc10->base.preferred_engine = ENGINE_ID_DIGE;
break;
default:
ASSERT_CRITICAL(false);
enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
}
/* default to one to mirror Windows behavior */
enc10->base.features.flags.bits.HDMI_6GB_EN = 1;
if (bp_funcs->get_connector_speed_cap_info)
result = bp_funcs->get_connector_speed_cap_info(enc10->base.ctx->dc_bios,
enc10->base.connector, &bp_cap_info);
/* Override features with DCE-specific values */
if (result == BP_RESULT_OK) {
enc10->base.features.flags.bits.IS_HBR2_CAPABLE =
bp_cap_info.DP_HBR2_EN;
enc10->base.features.flags.bits.IS_HBR3_CAPABLE =
bp_cap_info.DP_HBR3_EN;
enc10->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
enc10->base.features.flags.bits.IS_DP2_CAPABLE = 1;
enc10->base.features.flags.bits.IS_UHBR10_CAPABLE = bp_cap_info.DP_UHBR10_EN;
enc10->base.features.flags.bits.IS_UHBR13_5_CAPABLE = bp_cap_info.DP_UHBR13_5_EN;
enc10->base.features.flags.bits.IS_UHBR20_CAPABLE = bp_cap_info.DP_UHBR20_EN;
} else {
DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
__func__,
result);
}
if (enc10->base.ctx->dc->debug.hdmi20_disable) {
enc10->base.features.flags.bits.HDMI_6GB_EN = 0;
}
}

View file

@ -0,0 +1,134 @@
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __DC_LINK_ENCODER__DCN401_H__
#define __DC_LINK_ENCODER__DCN401_H__
#include "dcn30/dcn30_dio_link_encoder.h"
#define LINK_ENCODER_MASK_SH_LIST_DCN401(mask_sh) \
LE_SF(DIG0_DIG_BE_EN_CNTL, DIG_BE_ENABLE, mask_sh),\
LE_SF(DIG0_DIG_BE_CNTL, DIG_RB_SWITCH_EN, mask_sh),\
LE_SF(DIG0_DIG_BE_CNTL, DIG_HPD_SELECT, mask_sh),\
LE_SF(DIG0_DIG_BE_CNTL, DIG_FE_SOURCE_SELECT, mask_sh),\
LE_SF(DIG0_DIG_BE_CLK_CNTL, DIG_BE_MODE, mask_sh),\
LE_SF(DIG0_DIG_BE_CLK_CNTL, DIG_BE_CLK_EN, mask_sh),\
LE_SF(DIG0_DIG_BE_CLK_CNTL, DIG_BE_SOFT_RESET, mask_sh),\
LE_SF(DIG0_DIG_BE_CLK_CNTL, HDCP_SOFT_RESET, mask_sh),\
LE_SF(DIG0_DIG_BE_CLK_CNTL, DIG_BE_SYMCLK_G_CLOCK_ON, mask_sh),\
LE_SF(DIG0_DIG_BE_CLK_CNTL, DIG_BE_SYMCLK_G_HDCP_CLOCK_ON, mask_sh),\
LE_SF(DIG0_DIG_BE_CLK_CNTL, DIG_BE_SYMCLK_G_TMDS_CLOCK_ON, mask_sh),\
LE_SF(DIG0_DIG_CLOCK_PATTERN, DIG_CLOCK_PATTERN, mask_sh),\
LE_SF(DIG0_TMDS_CTL_BITS, TMDS_CTL0, mask_sh), \
LE_SF(DP0_DP_DPHY_CNTL, DPHY_BYPASS, mask_sh),\
LE_SF(DP0_DP_DPHY_CNTL, DPHY_ATEST_SEL_LANE0, mask_sh),\
LE_SF(DP0_DP_DPHY_CNTL, DPHY_ATEST_SEL_LANE1, mask_sh),\
LE_SF(DP0_DP_DPHY_CNTL, DPHY_ATEST_SEL_LANE2, mask_sh),\
LE_SF(DP0_DP_DPHY_CNTL, DPHY_ATEST_SEL_LANE3, mask_sh),\
LE_SF(DP0_DP_DPHY_PRBS_CNTL, DPHY_PRBS_EN, mask_sh),\
LE_SF(DP0_DP_DPHY_PRBS_CNTL, DPHY_PRBS_SEL, mask_sh),\
LE_SF(DP0_DP_DPHY_SYM0, DPHY_SYM1, mask_sh),\
LE_SF(DP0_DP_DPHY_SYM0, DPHY_SYM2, mask_sh),\
LE_SF(DP0_DP_DPHY_SYM0, DPHY_SYM3, mask_sh),\
LE_SF(DP0_DP_DPHY_SYM1, DPHY_SYM4, mask_sh),\
LE_SF(DP0_DP_DPHY_SYM1, DPHY_SYM5, mask_sh),\
LE_SF(DP0_DP_DPHY_SYM1, DPHY_SYM6, mask_sh),\
LE_SF(DP0_DP_DPHY_SYM2, DPHY_SYM7, mask_sh),\
LE_SF(DP0_DP_DPHY_SYM2, DPHY_SYM8, mask_sh),\
LE_SF(DP0_DP_DPHY_SCRAM_CNTL, DPHY_SCRAMBLER_BS_COUNT, mask_sh),\
LE_SF(DP0_DP_DPHY_SCRAM_CNTL, DPHY_SCRAMBLER_ADVANCE, mask_sh),\
LE_SF(DP0_DP_DPHY_FAST_TRAINING, DPHY_RX_FAST_TRAINING_CAPABLE, mask_sh),\
LE_SF(DP0_DP_DPHY_BS_SR_SWAP_CNTL, DPHY_LOAD_BS_COUNT, mask_sh),\
LE_SF(DP0_DP_DPHY_TRAINING_PATTERN_SEL, DPHY_TRAINING_PATTERN_SEL, mask_sh),\
LE_SF(DP0_DP_DPHY_HBR2_PATTERN_CONTROL, DP_DPHY_HBR2_PATTERN_CONTROL, mask_sh),\
LE_SF(DP0_DP_LINK_CNTL, DP_LINK_TRAINING_COMPLETE, mask_sh),\
LE_SF(DP0_DP_LINK_FRAMING_CNTL, DP_IDLE_BS_INTERVAL, mask_sh),\
LE_SF(DP0_DP_LINK_FRAMING_CNTL, DP_VBID_DISABLE, mask_sh),\
LE_SF(DP0_DP_LINK_FRAMING_CNTL, DP_VID_ENHANCED_FRAME_MODE, mask_sh),\
LE_SF(DP0_DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, mask_sh),\
LE_SF(DP0_DP_CONFIG, DP_UDI_LANES, mask_sh),\
LE_SF(DP0_DP_SEC_CNTL1, DP_SEC_GSP0_LINE_NUM, mask_sh),\
LE_SF(DP0_DP_SEC_CNTL1, DP_SEC_GSP0_PRIORITY, mask_sh),\
LE_SF(DP0_DP_MSE_SAT0, DP_MSE_SAT_SRC0, mask_sh),\
LE_SF(DP0_DP_MSE_SAT0, DP_MSE_SAT_SRC1, mask_sh),\
LE_SF(DP0_DP_MSE_SAT0, DP_MSE_SAT_SLOT_COUNT0, mask_sh),\
LE_SF(DP0_DP_MSE_SAT0, DP_MSE_SAT_SLOT_COUNT1, mask_sh),\
LE_SF(DP0_DP_MSE_SAT1, DP_MSE_SAT_SRC2, mask_sh),\
LE_SF(DP0_DP_MSE_SAT1, DP_MSE_SAT_SRC3, mask_sh),\
LE_SF(DP0_DP_MSE_SAT1, DP_MSE_SAT_SLOT_COUNT2, mask_sh),\
LE_SF(DP0_DP_MSE_SAT1, DP_MSE_SAT_SLOT_COUNT3, mask_sh),\
LE_SF(DP0_DP_MSE_SAT_UPDATE, DP_MSE_SAT_UPDATE, mask_sh),\
LE_SF(DP0_DP_MSE_SAT_UPDATE, DP_MSE_16_MTP_KEEPOUT, mask_sh),\
LE_SF(DP_AUX0_AUX_CONTROL, AUX_HPD_SEL, mask_sh),\
LE_SF(DP_AUX0_AUX_CONTROL, AUX_LS_READ_EN, mask_sh),\
LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL0, AUX_RX_RECEIVE_WINDOW, mask_sh),\
LE_SF(HPD0_DC_HPD_CONTROL, DC_HPD_EN, mask_sh),\
LE_SF(DP0_DP_DPHY_CNTL, DPHY_FEC_EN, mask_sh),\
LE_SF(DP0_DP_DPHY_CNTL, DPHY_FEC_READY_SHADOW, mask_sh),\
LE_SF(DP0_DP_DPHY_CNTL, DPHY_FEC_ACTIVE_STATUS, mask_sh),\
LE_SF(DIG0_TMDS_CTL_BITS, TMDS_CTL0, mask_sh), \
LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL0, AUX_RX_START_WINDOW, mask_sh),\
LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL0, AUX_RX_HALF_SYM_DETECT_LEN, mask_sh),\
LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL0, AUX_RX_TRANSITION_FILTER_EN, mask_sh),\
LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL0, AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT, mask_sh),\
LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL0, AUX_RX_ALLOW_BELOW_THRESHOLD_START, mask_sh),\
LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL0, AUX_RX_ALLOW_BELOW_THRESHOLD_STOP, mask_sh),\
LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL0, AUX_RX_PHASE_DETECT_LEN, mask_sh),\
LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL0, AUX_RX_DETECTION_THRESHOLD, mask_sh), \
LE_SF(DP_AUX0_AUX_DPHY_TX_CONTROL, AUX_TX_PRECHARGE_LEN, mask_sh),\
LE_SF(DP_AUX0_AUX_DPHY_TX_CONTROL, AUX_TX_PRECHARGE_SYMBOLS, mask_sh),\
LE_SF(DP_AUX0_AUX_DPHY_TX_CONTROL, AUX_MODE_DET_CHECK_DELAY, mask_sh),\
LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL1, AUX_RX_PRECHARGE_SKIP, mask_sh),\
LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN, mask_sh),\
LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN_MUL, mask_sh)
void dcn401_link_encoder_construct(
struct dcn20_link_encoder *enc20,
const struct encoder_init_data *init_data,
const struct encoder_feature_support *enc_features,
const struct dcn10_link_enc_registers *link_regs,
const struct dcn10_link_enc_aux_registers *aux_regs,
const struct dcn10_link_enc_hpd_registers *hpd_regs,
const struct dcn10_link_enc_shift *link_shift,
const struct dcn10_link_enc_mask *link_mask);
void enc401_hw_init(struct link_encoder *enc);
void dcn401_link_encoder_enable_dp_output(
struct link_encoder *enc,
const struct dc_link_settings *link_settings,
enum clock_source_id clock_source);
void dcn401_link_encoder_setup(
struct link_encoder *enc,
enum signal_type signal);
enum signal_type dcn401_get_dig_mode(
struct link_encoder *enc);
bool dcn401_is_dig_enabled(struct link_encoder *enc);
enum signal_type dcn401_get_dig_mode(struct link_encoder *enc);
#endif /* __DC_LINK_ENCODER__DCN401_H__ */

View file

@ -0,0 +1,895 @@
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dc_bios_types.h"
#include "dcn30/dcn30_dio_stream_encoder.h"
#include "dcn32/dcn32_dio_stream_encoder.h"
#include "dcn401_dio_stream_encoder.h"
#include "reg_helper.h"
#include "hw_shared.h"
#include "link.h"
#include "dpcd_defs.h"
#define DC_LOGGER \
enc1->base.ctx->logger
#define REG(reg)\
(enc1->regs->reg)
#undef FN
#define FN(reg_name, field_name) \
enc1->se_shift->field_name, enc1->se_mask->field_name
#define VBI_LINE_0 0
#define HDMI_CLOCK_CHANNEL_RATE_MORE_340M 340000
#define CTX \
enc1->base.ctx
static void enc401_dp_set_odm_combine(
struct stream_encoder *enc,
bool odm_combine)
{
}
/* setup stream encoder in dvi mode */
static void enc401_stream_encoder_dvi_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
bool is_dual_link)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
if (!enc->ctx->dc->debug.avoid_vbios_exec_table) {
struct bp_encoder_control cntl = {0};
cntl.action = ENCODER_CONTROL_SETUP;
cntl.engine_id = enc1->base.id;
cntl.signal = is_dual_link ?
SIGNAL_TYPE_DVI_DUAL_LINK : SIGNAL_TYPE_DVI_SINGLE_LINK;
cntl.enable_dp_audio = false;
cntl.pixel_clock = crtc_timing->pix_clk_100hz / 10;
cntl.lanes_number = (is_dual_link) ? LANE_COUNT_EIGHT : LANE_COUNT_FOUR;
if (enc1->base.bp->funcs->encoder_control(
enc1->base.bp, &cntl) != BP_RESULT_OK)
return;
} else {
//Set pattern for clock channel, default vlue 0x63 does not work
REG_UPDATE(DIG_CLOCK_PATTERN, DIG_CLOCK_PATTERN, 0x1F);
//DIG_BE_TMDS_DVI_MODE : TMDS-DVI mode is already set in link_encoder_setup
//DIG_SOURCE_SELECT is already set in dig_connect_to_otg
/* DIG_START is removed from the register spec */
}
ASSERT(crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB);
ASSERT(crtc_timing->display_color_depth == COLOR_DEPTH_888);
enc401_stream_encoder_set_stream_attribute_helper(enc1, crtc_timing);
}
/* setup stream encoder in hdmi mode */
static void enc401_stream_encoder_hdmi_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
int actual_pix_clk_khz,
bool enable_audio)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
if (!enc->ctx->dc->debug.avoid_vbios_exec_table) {
struct bp_encoder_control cntl = {0};
cntl.action = ENCODER_CONTROL_SETUP;
cntl.engine_id = enc1->base.id;
cntl.signal = SIGNAL_TYPE_HDMI_TYPE_A;
cntl.enable_dp_audio = enable_audio;
cntl.pixel_clock = actual_pix_clk_khz;
cntl.lanes_number = LANE_COUNT_FOUR;
if (enc1->base.bp->funcs->encoder_control(
enc1->base.bp, &cntl) != BP_RESULT_OK)
return;
} else {
//Set pattern for clock channel, default vlue 0x63 does not work
REG_UPDATE(DIG_CLOCK_PATTERN, DIG_CLOCK_PATTERN, 0x1F);
//DIG_BE_TMDS_HDMI_MODE : TMDS-HDMI mode is already set in link_encoder_setup
//DIG_SOURCE_SELECT is already set in dig_connect_to_otg
/* DIG_START is removed from the register spec */
}
/* Configure pixel encoding */
enc401_stream_encoder_set_stream_attribute_helper(enc1, crtc_timing);
/* setup HDMI engine */
REG_UPDATE_6(HDMI_CONTROL,
HDMI_PACKET_GEN_VERSION, 1,
HDMI_KEEPOUT_MODE, 1,
HDMI_DEEP_COLOR_ENABLE, 0,
HDMI_DATA_SCRAMBLE_EN, 0,
HDMI_NO_EXTRA_NULL_PACKET_FILLED, 1,
HDMI_CLOCK_CHANNEL_RATE, 0);
/* Configure color depth */
switch (crtc_timing->display_color_depth) {
case COLOR_DEPTH_888:
REG_UPDATE(HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0);
break;
case COLOR_DEPTH_101010:
if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
REG_UPDATE_2(HDMI_CONTROL,
HDMI_DEEP_COLOR_DEPTH, 1,
HDMI_DEEP_COLOR_ENABLE, 0);
} else {
REG_UPDATE_2(HDMI_CONTROL,
HDMI_DEEP_COLOR_DEPTH, 1,
HDMI_DEEP_COLOR_ENABLE, 1);
}
break;
case COLOR_DEPTH_121212:
if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
REG_UPDATE_2(HDMI_CONTROL,
HDMI_DEEP_COLOR_DEPTH, 2,
HDMI_DEEP_COLOR_ENABLE, 0);
} else {
REG_UPDATE_2(HDMI_CONTROL,
HDMI_DEEP_COLOR_DEPTH, 2,
HDMI_DEEP_COLOR_ENABLE, 1);
}
break;
case COLOR_DEPTH_161616:
REG_UPDATE_2(HDMI_CONTROL,
HDMI_DEEP_COLOR_DEPTH, 3,
HDMI_DEEP_COLOR_ENABLE, 1);
break;
default:
break;
}
if (actual_pix_clk_khz >= HDMI_CLOCK_CHANNEL_RATE_MORE_340M) {
/* enable HDMI data scrambler
* HDMI_CLOCK_CHANNEL_RATE_MORE_340M
* Clock channel frequency is 1/4 of character rate.
*/
REG_UPDATE_2(HDMI_CONTROL,
HDMI_DATA_SCRAMBLE_EN, 1,
HDMI_CLOCK_CHANNEL_RATE, 1);
} else if (crtc_timing->flags.LTE_340MCSC_SCRAMBLE) {
/* TODO: New feature for DCE11, still need to implement */
/* enable HDMI data scrambler
* HDMI_CLOCK_CHANNEL_FREQ_EQUAL_TO_CHAR_RATE
* Clock channel frequency is the same
* as character rate
*/
REG_UPDATE_2(HDMI_CONTROL,
HDMI_DATA_SCRAMBLE_EN, 1,
HDMI_CLOCK_CHANNEL_RATE, 0);
}
/* Enable transmission of General Control packet on every frame */
REG_UPDATE_3(HDMI_VBI_PACKET_CONTROL,
HDMI_GC_CONT, 1,
HDMI_GC_SEND, 1,
HDMI_NULL_SEND, 1);
/* Disable Audio Content Protection packet transmission */
REG_UPDATE(HDMI_VBI_PACKET_CONTROL, HDMI_ACP_SEND, 0);
/* following belongs to audio */
/* Enable Audio InfoFrame packet transmission. */
REG_UPDATE(HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
/* update double-buffered AUDIO_INFO registers immediately */
ASSERT(enc->afmt);
enc->afmt->funcs->audio_info_immediate_update(enc->afmt);
/* Select line number on which to send Audio InfoFrame packets */
REG_UPDATE(HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE,
VBI_LINE_0 + 2);
/* set HDMI GC AVMUTE */
REG_UPDATE(HDMI_GC, HDMI_GC_AVMUTE, 0);
}
static bool is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
{
bool two_pix = timing->pixel_encoding == PIXEL_ENCODING_YCBCR420;
two_pix = two_pix || (timing->flags.DSC && timing->pixel_encoding == PIXEL_ENCODING_YCBCR422
&& !timing->dsc_cfg.ycbcr422_simple);
return two_pix;
}
static bool is_h_timing_divisible_by_2(const struct dc_crtc_timing *timing)
{
/* math borrowed from function of same name in inc/resource
* checks if h_timing is divisible by 2
*/
bool divisible = false;
uint16_t h_blank_start = 0;
uint16_t h_blank_end = 0;
if (timing) {
h_blank_start = timing->h_total - timing->h_front_porch;
h_blank_end = h_blank_start - timing->h_addressable;
/* HTOTAL, Hblank start/end, and Hsync start/end all must be
* divisible by 2 in order for the horizontal timing params
* to be considered divisible by 2. Hsync start is always 0.
*/
divisible = (timing->h_total % 2 == 0) &&
(h_blank_start % 2 == 0) &&
(h_blank_end % 2 == 0) &&
(timing->h_sync_width % 2 == 0);
}
return divisible;
}
static bool is_dp_dig_pixel_rate_div_policy(struct dc *dc, const struct dc_crtc_timing *timing)
{
/* should be functionally the same as dcn32_is_dp_dig_pixel_rate_div_policy for DP encoders*/
return is_h_timing_divisible_by_2(timing) &&
dc->debug.enable_dp_dig_pixel_rate_div_policy;
}
static void enc401_stream_encoder_dp_unblank(
struct dc_link *link,
struct stream_encoder *enc,
const struct encoder_unblank_param *param)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
struct dc *dc = enc->ctx->dc;
if (param->link_settings.link_rate != LINK_RATE_UNKNOWN) {
uint32_t n_vid = 0x8000;
uint32_t m_vid;
uint32_t n_multiply = 0;
// TODO: Fix defined but not used
//uint32_t pix_per_cycle = 0;
uint64_t m_vid_l = n_vid;
/* YCbCr 4:2:0 : Computed VID_M will be 2X the input rate */
if (is_two_pixels_per_containter(&param->timing) || param->opp_cnt > 1
|| is_dp_dig_pixel_rate_div_policy(dc, &param->timing)) {
/*this logic should be the same in get_pixel_clock_parameters() */
n_multiply = 1;
// TODO: Fix defined but not used
//pix_per_cycle = 1;
}
/* M / N = Fstream / Flink
* m_vid / n_vid = pixel rate / link rate
*/
m_vid_l *= param->timing.pix_clk_100hz / 10;
m_vid_l = div_u64(m_vid_l,
param->link_settings.link_rate
* LINK_RATE_REF_FREQ_IN_KHZ);
m_vid = (uint32_t) m_vid_l;
/* enable auto measurement */
REG_UPDATE(DP_VID_TIMING, DP_VID_M_N_GEN_EN, 0);
/* auto measurement need 1 full 0x8000 symbol cycle to kick in,
* therefore program initial value for Mvid and Nvid
*/
REG_UPDATE(DP_VID_N, DP_VID_N, n_vid);
REG_UPDATE(DP_VID_M, DP_VID_M, m_vid);
REG_UPDATE_2(DP_VID_TIMING,
DP_VID_M_N_GEN_EN, 1,
DP_VID_N_INTERVAL, n_multiply);
}
/* make sure stream is disabled before resetting steer fifo */
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, false);
REG_WAIT(DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS, 0, 10, 5000);
/* DIG_START is removed from the register spec */
/* switch DP encoder to CRTC data, but reset it the fifo first. It may happen
* that it overflows during mode transition, and sometimes doesn't recover.
*/
REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 1);
udelay(10);
REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 0);
REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_ENABLE, 1);
REG_UPDATE_2(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 1, DP_VID_STREAM_DIS_DEFER, 2);
udelay(200);
/* DIG Resync FIFO now needs to be explicitly enabled
*/
/* read start level = 0 will bring underflow / overflow and DIG_FIFO_ERROR = 1
* so set it to 1/2 full = 7 before reset as suggested by hardware team.
*/
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7);
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 1);
REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 1, 10, 5000);
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 0);
REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 0, 10, 5000);
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 1);
/* wait 100us for DIG/DP logic to prime
* (i.e. a few video lines)
*/
udelay(100);
/* the hardware would start sending video at the start of the next DP
* frame (i.e. rising edge of the vblank).
* NOTE: We used to program DP_VID_STREAM_DIS_DEFER = 2 here, but this
* register has no effect on enable transition! HW always guarantees
* VID_STREAM enable at start of next frame, and this is not
* programmable
*/
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true);
link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
}
/* Set DSC-related configuration.
* dsc_mode: 0 disables DSC, other values enable DSC in specified format
* sc_bytes_per_pixel: DP_DSC_BYTES_PER_PIXEL removed in DCN3x
* dsc_slice_width: DP_DSC_SLICE_WIDTH removed in DCN3x
*/
static void enc401_dp_set_dsc_config(struct stream_encoder *enc,
enum optc_dsc_mode dsc_mode,
uint32_t dsc_bytes_per_pixel,
uint32_t dsc_slice_width)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
REG_UPDATE(DP_DSC_CNTL, DP_DSC_MODE, dsc_mode == OPTC_DSC_DISABLED ? 0 : 1);
}
/* this function read dsc related register fields to be logged later in dcn10_log_hw_state
* into a dcn_dsc_state struct.
*/
static void enc401_read_state(struct stream_encoder *enc, struct enc_state *s)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
//if dsc is enabled, continue to read
REG_GET(DP_DSC_CNTL, DP_DSC_MODE, &s->dsc_mode);
if (s->dsc_mode) {
REG_GET(DP_GSP11_CNTL, DP_SEC_GSP11_LINE_NUM, &s->sec_gsp_pps_line_num);
REG_GET(DP_MSA_VBID_MISC, DP_VBID6_LINE_REFERENCE, &s->vbid6_line_reference);
REG_GET(DP_MSA_VBID_MISC, DP_VBID6_LINE_NUM, &s->vbid6_line_num);
REG_GET(DP_GSP11_CNTL, DP_SEC_GSP11_ENABLE, &s->sec_gsp_pps_enable);
REG_GET(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, &s->sec_stream_enable);
}
}
static void enc401_set_dig_input_mode(struct stream_encoder *enc, unsigned int pix_per_container)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
// The naming of this field is confusing, what it means is the output mode of otg, which
// is the input mode of the dig
switch (pix_per_container) {
case 2:
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_OUTPUT_PIXEL_PER_CYCLE, 0x1);
break;
case 4:
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_OUTPUT_PIXEL_PER_CYCLE, 0x2);
break;
case 8:
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_OUTPUT_PIXEL_PER_CYCLE, 0x3);
break;
default:
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_OUTPUT_PIXEL_PER_CYCLE, 0x0);
break;
}
}
static void enc401_stream_encoder_enable(
struct stream_encoder *enc,
enum signal_type signal,
bool enable)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
if (enable) {
switch (signal) {
case SIGNAL_TYPE_DVI_SINGLE_LINK:
case SIGNAL_TYPE_DVI_DUAL_LINK:
/* TMDS-DVI */
REG_UPDATE(DIG_FE_CLK_CNTL, DIG_FE_MODE, 2);
break;
case SIGNAL_TYPE_HDMI_TYPE_A:
/* TMDS-HDMI */
REG_UPDATE(DIG_FE_CLK_CNTL, DIG_FE_MODE, 3);
break;
case SIGNAL_TYPE_DISPLAY_PORT_MST:
/* DP MST */
REG_UPDATE(DIG_FE_CLK_CNTL, DIG_FE_MODE, 5);
break;
case SIGNAL_TYPE_EDP:
case SIGNAL_TYPE_DISPLAY_PORT:
/* DP SST */
REG_UPDATE(DIG_FE_CLK_CNTL, DIG_FE_MODE, 0);
break;
default:
/* invalid mode ! */
ASSERT_CRITICAL(false);
}
REG_UPDATE(DIG_FE_CLK_CNTL, DIG_FE_CLK_EN, 1);
REG_UPDATE(DIG_FE_EN_CNTL, DIG_FE_ENABLE, 1);
} else {
REG_UPDATE(DIG_FE_EN_CNTL, DIG_FE_ENABLE, 0);
REG_UPDATE(DIG_FE_CLK_CNTL, DIG_FE_CLK_EN, 0);
}
}
void enc401_stream_encoder_dp_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
enum dc_color_space output_color_space,
bool use_vsc_sdp_for_colorimetry,
uint32_t enable_sdp_splitting)
{
uint32_t h_active_start;
uint32_t v_active_start;
uint32_t misc0 = 0;
uint32_t misc1 = 0;
uint32_t h_blank;
uint32_t h_back_porch;
uint8_t synchronous_clock = 0; /* asynchronous mode */
uint8_t colorimetry_bpc;
uint8_t dp_pixel_encoding = 0;
uint8_t dp_component_depth = 0;
uint8_t dp_translate_pixel_enc = 0;
// Fix set but not used warnings
//uint8_t dp_pixel_encoding_type = 0;
uint8_t dp_compressed_pixel_format = 0;
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
struct dc_crtc_timing hw_crtc_timing = *crtc_timing;
if (hw_crtc_timing.flags.INTERLACE) {
/*the input timing is in VESA spec format with Interlace flag =1*/
hw_crtc_timing.v_total /= 2;
hw_crtc_timing.v_border_top /= 2;
hw_crtc_timing.v_addressable /= 2;
hw_crtc_timing.v_border_bottom /= 2;
hw_crtc_timing.v_front_porch /= 2;
hw_crtc_timing.v_sync_width /= 2;
}
/* set pixel encoding */
switch (hw_crtc_timing.pixel_encoding) {
case PIXEL_ENCODING_YCBCR422:
dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_YCBCR422;
break;
case PIXEL_ENCODING_YCBCR444:
dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_YCBCR444;
if (hw_crtc_timing.flags.Y_ONLY)
if (hw_crtc_timing.display_color_depth != COLOR_DEPTH_666)
/* HW testing only, no use case yet.
* Color depth of Y-only could be
* 8, 10, 12, 16 bits
*/
dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_Y_ONLY;
/* Note: DP_MSA_MISC1 bit 7 is the indicator
* of Y-only mode.
* This bit is set in HW if register
* DP_PIXEL_ENCODING is programmed to 0x4
*/
break;
case PIXEL_ENCODING_YCBCR420:
dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_YCBCR420;
break;
default:
dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_RGB444;
break;
}
misc1 = REG_READ(DP_MSA_MISC);
/* For YCbCr420 and BT2020 Colorimetry Formats, VSC SDP shall be used.
* When MISC1, bit 6, is Set to 1, a Source device uses a VSC SDP to indicate the
* Pixel Encoding/Colorimetry Format and that a Sink device shall ignore MISC1, bit 7,
* and MISC0, bits 7:1 (MISC1, bit 7, and MISC0, bits 7:1, become "don't care").
*/
if (use_vsc_sdp_for_colorimetry)
misc1 = misc1 | 0x40;
else
misc1 = misc1 & ~0x40;
/* set color depth */
switch (hw_crtc_timing.display_color_depth) {
case COLOR_DEPTH_666:
dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_6BPC;
break;
case COLOR_DEPTH_888:
dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_8BPC;
break;
case COLOR_DEPTH_101010:
dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_10BPC;
break;
case COLOR_DEPTH_121212:
dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_12BPC;
break;
case COLOR_DEPTH_161616:
dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_16BPC;
break;
default:
dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_6BPC;
break;
}
if (hw_crtc_timing.flags.DSC) {
// Fix set but not used error
//dp_pixel_encoding_type = 1;
switch (hw_crtc_timing.pixel_encoding) {
case PIXEL_ENCODING_YCBCR444:
dp_compressed_pixel_format = 0;
break;
case PIXEL_ENCODING_YCBCR422:
dp_compressed_pixel_format = 1;
if (hw_crtc_timing.dsc_cfg.ycbcr422_simple)
dp_compressed_pixel_format = 0;
break;
case PIXEL_ENCODING_YCBCR420:
dp_compressed_pixel_format = 1;
break;
default:
dp_compressed_pixel_format = 0;
break;
}
} else {
// Fix set but not used error
//dp_pixel_encoding_type = 0;
switch (dp_pixel_encoding) {
case DP_PIXEL_ENCODING_TYPE_RGB444:
dp_translate_pixel_enc = 0;
break;
case DP_PIXEL_ENCODING_TYPE_YCBCR422:
dp_translate_pixel_enc = 1;
break;
case DP_PIXEL_ENCODING_TYPE_YCBCR444:
dp_translate_pixel_enc = 0;
break;
case DP_PIXEL_ENCODING_TYPE_Y_ONLY:
dp_translate_pixel_enc = 3;
break;
case DP_PIXEL_ENCODING_TYPE_YCBCR420:
dp_translate_pixel_enc = 2;
break;
default:
ASSERT(0);
break;
}
}
/* Set DP pixel encoding and component depth */
REG_UPDATE_4(DP_PIXEL_FORMAT,
PIXEL_ENCODING_TYPE, hw_crtc_timing.flags.DSC ? 1 : 0,
UNCOMPRESSED_PIXEL_FORMAT, dp_translate_pixel_enc,
UNCOMPRESSED_COMPONENT_DEPTH, dp_component_depth,
COMPRESSED_PIXEL_FORMAT, dp_compressed_pixel_format);
/* set dynamic range and YCbCr range */
switch (hw_crtc_timing.display_color_depth) {
case COLOR_DEPTH_666:
colorimetry_bpc = 0;
break;
case COLOR_DEPTH_888:
colorimetry_bpc = 1;
break;
case COLOR_DEPTH_101010:
colorimetry_bpc = 2;
break;
case COLOR_DEPTH_121212:
colorimetry_bpc = 3;
break;
default:
colorimetry_bpc = 0;
break;
}
misc0 = misc0 | synchronous_clock;
misc0 = colorimetry_bpc << 5;
switch (output_color_space) {
case COLOR_SPACE_SRGB:
misc1 = misc1 & ~0x80; /* bit7 = 0*/
break;
case COLOR_SPACE_SRGB_LIMITED:
misc0 = misc0 | 0x8; /* bit3=1 */
misc1 = misc1 & ~0x80; /* bit7 = 0*/
break;
case COLOR_SPACE_YCBCR601:
case COLOR_SPACE_YCBCR601_LIMITED:
misc0 = misc0 | 0x8; /* bit3=1, bit4=0 */
misc1 = misc1 & ~0x80; /* bit7 = 0*/
if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */
else if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR444)
misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */
break;
case COLOR_SPACE_YCBCR709:
case COLOR_SPACE_YCBCR709_LIMITED:
misc0 = misc0 | 0x18; /* bit3=1, bit4=1 */
misc1 = misc1 & ~0x80; /* bit7 = 0*/
if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */
else if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR444)
misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */
break;
case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
case COLOR_SPACE_2020_RGB_FULLRANGE:
case COLOR_SPACE_2020_YCBCR:
case COLOR_SPACE_XR_RGB:
case COLOR_SPACE_MSREF_SCRGB:
case COLOR_SPACE_ADOBERGB:
case COLOR_SPACE_DCIP3:
case COLOR_SPACE_XV_YCC_709:
case COLOR_SPACE_XV_YCC_601:
case COLOR_SPACE_DISPLAYNATIVE:
case COLOR_SPACE_DOLBYVISION:
case COLOR_SPACE_APPCTRL:
case COLOR_SPACE_CUSTOMPOINTS:
case COLOR_SPACE_UNKNOWN:
case COLOR_SPACE_YCBCR709_BLACK:
/* do nothing */
break;
}
REG_SET(DP_MSA_COLORIMETRY, 0, DP_MSA_MISC0, misc0);
REG_WRITE(DP_MSA_MISC, misc1); /* MSA_MISC1 */
/* dcn new register
* dc_crtc_timing is vesa dmt struct. data from edid
*/
REG_SET_2(DP_MSA_TIMING_PARAM1, 0,
DP_MSA_HTOTAL, hw_crtc_timing.h_total,
DP_MSA_VTOTAL, hw_crtc_timing.v_total);
/* calculate from vesa timing parameters
* h_active_start related to leading edge of sync
*/
h_blank = hw_crtc_timing.h_total - hw_crtc_timing.h_border_left -
hw_crtc_timing.h_addressable - hw_crtc_timing.h_border_right;
h_back_porch = h_blank - hw_crtc_timing.h_front_porch -
hw_crtc_timing.h_sync_width;
/* start at beginning of left border */
h_active_start = hw_crtc_timing.h_sync_width + h_back_porch;
v_active_start = hw_crtc_timing.v_total - hw_crtc_timing.v_border_top -
hw_crtc_timing.v_addressable - hw_crtc_timing.v_border_bottom -
hw_crtc_timing.v_front_porch;
/* start at beginning of left border */
REG_SET_2(DP_MSA_TIMING_PARAM2, 0,
DP_MSA_HSTART, h_active_start,
DP_MSA_VSTART, v_active_start);
REG_SET_4(DP_MSA_TIMING_PARAM3, 0,
DP_MSA_HSYNCWIDTH,
hw_crtc_timing.h_sync_width,
DP_MSA_HSYNCPOLARITY,
!hw_crtc_timing.flags.HSYNC_POSITIVE_POLARITY,
DP_MSA_VSYNCWIDTH,
hw_crtc_timing.v_sync_width,
DP_MSA_VSYNCPOLARITY,
!hw_crtc_timing.flags.VSYNC_POSITIVE_POLARITY);
/* HWDITH include border or overscan */
REG_SET_2(DP_MSA_TIMING_PARAM4, 0,
DP_MSA_HWIDTH, hw_crtc_timing.h_border_left +
hw_crtc_timing.h_addressable + hw_crtc_timing.h_border_right,
DP_MSA_VHEIGHT, hw_crtc_timing.v_border_top +
hw_crtc_timing.v_addressable + hw_crtc_timing.v_border_bottom);
REG_UPDATE(DP_SEC_FRAMING4,
DP_SST_SDP_SPLITTING, enable_sdp_splitting);
}
static void enc401_stream_encoder_map_to_link(
struct stream_encoder *enc,
uint32_t stream_enc_inst,
uint32_t link_enc_inst)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
REG_UPDATE(STREAM_MAPPER_CONTROL,
DIG_STREAM_LINK_TARGET, link_enc_inst);
}
static const struct stream_encoder_funcs dcn401_str_enc_funcs = {
.dp_set_odm_combine =
enc401_dp_set_odm_combine,
.dp_set_stream_attribute =
enc401_stream_encoder_dp_set_stream_attribute,
.hdmi_set_stream_attribute =
enc401_stream_encoder_hdmi_set_stream_attribute,
.dvi_set_stream_attribute =
enc401_stream_encoder_dvi_set_stream_attribute,
.set_throttled_vcp_size =
enc1_stream_encoder_set_throttled_vcp_size,
.update_hdmi_info_packets =
enc3_stream_encoder_update_hdmi_info_packets,
.stop_hdmi_info_packets =
enc3_stream_encoder_stop_hdmi_info_packets,
.update_dp_info_packets_sdp_line_num =
enc3_stream_encoder_update_dp_info_packets_sdp_line_num,
.update_dp_info_packets =
enc3_stream_encoder_update_dp_info_packets,
.stop_dp_info_packets =
enc1_stream_encoder_stop_dp_info_packets,
.dp_blank =
enc1_stream_encoder_dp_blank,
.dp_unblank =
enc401_stream_encoder_dp_unblank,
.audio_mute_control = enc3_audio_mute_control,
.dp_audio_setup = enc3_se_dp_audio_setup,
.dp_audio_enable = enc3_se_dp_audio_enable,
.dp_audio_disable = enc1_se_dp_audio_disable,
.hdmi_audio_setup = enc3_se_hdmi_audio_setup,
.hdmi_audio_disable = enc1_se_hdmi_audio_disable,
.setup_stereo_sync = enc1_setup_stereo_sync,
.set_avmute = enc1_stream_encoder_set_avmute,
.dig_connect_to_otg = enc1_dig_connect_to_otg,
.dig_source_otg = enc1_dig_source_otg,
.dp_get_pixel_format = enc1_stream_encoder_dp_get_pixel_format,
.enc_read_state = enc401_read_state,
.dp_set_dsc_config = enc401_dp_set_dsc_config,
.dp_set_dsc_pps_info_packet = enc3_dp_set_dsc_pps_info_packet,
.set_dynamic_metadata = enc401_set_dynamic_metadata,
.hdmi_reset_stream_attribute = enc1_reset_hdmi_stream_attribute,
.dig_stream_enable = enc401_stream_encoder_enable,
.set_input_mode = enc401_set_dig_input_mode,
.enable_fifo = enc32_enable_fifo,
.map_stream_to_link = enc401_stream_encoder_map_to_link,
};
void dcn401_dio_stream_encoder_construct(
struct dcn10_stream_encoder *enc1,
struct dc_context *ctx,
struct dc_bios *bp,
enum engine_id eng_id,
struct vpg *vpg,
struct afmt *afmt,
const struct dcn10_stream_enc_registers *regs,
const struct dcn10_stream_encoder_shift *se_shift,
const struct dcn10_stream_encoder_mask *se_mask)
{
enc1->base.funcs = &dcn401_str_enc_funcs;
enc1->base.ctx = ctx;
enc1->base.id = eng_id;
enc1->base.bp = bp;
enc1->base.vpg = vpg;
enc1->base.afmt = afmt;
enc1->regs = regs;
enc1->se_shift = se_shift;
enc1->se_mask = se_mask;
enc1->base.stream_enc_inst = vpg->inst;
}
void enc401_set_dynamic_metadata(struct stream_encoder *enc,
bool enable_dme,
uint32_t hubp_requestor_id,
enum dynamic_metadata_mode dmdata_mode)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
if (enable_dme) {
REG_UPDATE_2(DME_CONTROL,
METADATA_HUBP_REQUESTOR_ID, hubp_requestor_id,
METADATA_STREAM_TYPE, (dmdata_mode == dmdata_dolby_vision) ? 1 : 0);
/* Use default line reference DP_SOF for bringup.
* Should use OTG_SOF for DRR cases
*/
if (dmdata_mode == dmdata_dp)
REG_UPDATE_3(DP_SEC_METADATA_TRANSMISSION,
DP_SEC_METADATA_PACKET_ENABLE, 1,
DP_SEC_METADATA_PACKET_LINE_REFERENCE, 0,
DP_SEC_METADATA_PACKET_LINE, 20);
else {
REG_UPDATE_3(HDMI_METADATA_PACKET_CONTROL,
HDMI_METADATA_PACKET_ENABLE, 1,
HDMI_METADATA_PACKET_LINE_REFERENCE, 0,
HDMI_METADATA_PACKET_LINE, 2);
if (dmdata_mode == dmdata_dolby_vision)
REG_UPDATE(HDMI_CONTROL,
DOLBY_VISION_EN, 1);
}
REG_UPDATE(DME_CONTROL,
METADATA_ENGINE_EN, 1);
} else {
REG_UPDATE(DME_CONTROL,
METADATA_ENGINE_EN, 0);
if (dmdata_mode == dmdata_dp)
REG_UPDATE(DP_SEC_METADATA_TRANSMISSION,
DP_SEC_METADATA_PACKET_ENABLE, 0);
else {
REG_UPDATE(HDMI_METADATA_PACKET_CONTROL,
HDMI_METADATA_PACKET_ENABLE, 0);
REG_UPDATE(HDMI_CONTROL,
DOLBY_VISION_EN, 0);
}
}
}
void enc401_stream_encoder_set_stream_attribute_helper(
struct dcn10_stream_encoder *enc1,
struct dc_crtc_timing *crtc_timing)
{
switch (crtc_timing->pixel_encoding) {
case PIXEL_ENCODING_YCBCR422:
REG_UPDATE(HDMI_CONTROL, TMDS_PIXEL_ENCODING, 1);
break;
default:
REG_UPDATE(HDMI_CONTROL, TMDS_PIXEL_ENCODING, 0);
break;
}
REG_UPDATE(HDMI_CONTROL, TMDS_COLOR_FORMAT, 0);
}

View file

@ -0,0 +1,217 @@
/*
* Copyright 2021 - Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __DC_DIO_STREAM_ENCODER_DCN401_H__
#define __DC_DIO_STREAM_ENCODER_DCN401_H__
#include "dcn30/dcn30_vpg.h"
#include "dcn30/dcn30_afmt.h"
#include "stream_encoder.h"
#include "dcn20/dcn20_stream_encoder.h"
#define SE_COMMON_MASK_SH_LIST_DCN401(mask_sh)\
SE_SF(DP0_DP_PIXEL_FORMAT, PIXEL_ENCODING_TYPE, mask_sh),\
SE_SF(DP0_DP_PIXEL_FORMAT, UNCOMPRESSED_PIXEL_FORMAT, mask_sh),\
SE_SF(DP0_DP_PIXEL_FORMAT, UNCOMPRESSED_COMPONENT_DEPTH, mask_sh),\
SE_SF(DP0_DP_PIXEL_FORMAT, COMPRESSED_PIXEL_FORMAT, mask_sh),\
SE_SF(DIG0_HDMI_CONTROL, HDMI_PACKET_GEN_VERSION, mask_sh),\
SE_SF(DIG0_HDMI_CONTROL, HDMI_KEEPOUT_MODE, mask_sh),\
SE_SF(DIG0_HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, mask_sh),\
SE_SF(DIG0_HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, mask_sh),\
SE_SF(DIG0_HDMI_CONTROL, HDMI_DATA_SCRAMBLE_EN, mask_sh),\
SE_SF(DIG0_HDMI_CONTROL, HDMI_NO_EXTRA_NULL_PACKET_FILLED, mask_sh),\
SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, mask_sh),\
SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, mask_sh),\
SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, mask_sh),\
SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_ACP_SEND, mask_sh),\
SE_SF(DIG0_HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, mask_sh),\
SE_SF(DIG0_HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, mask_sh),\
SE_SF(DIG0_HDMI_GC, HDMI_GC_AVMUTE, mask_sh),\
SE_SF(DP0_DP_MSE_RATE_CNTL, DP_MSE_RATE_X, mask_sh),\
SE_SF(DP0_DP_MSE_RATE_CNTL, DP_MSE_RATE_Y, mask_sh),\
SE_SF(DP0_DP_MSE_RATE_UPDATE, DP_MSE_RATE_UPDATE_PENDING, mask_sh),\
SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP0_ENABLE, mask_sh),\
SE_SF(DP0_DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, mask_sh),\
SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP1_ENABLE, mask_sh),\
SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP2_ENABLE, mask_sh),\
SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP3_ENABLE, mask_sh),\
SE_SF(DP0_DP_SEC_CNTL, DP_SEC_MPG_ENABLE, mask_sh),\
SE_SF(DP0_DP_SEC_CNTL1, DP_SEC_GSP5_LINE_REFERENCE, mask_sh),\
SE_SF(DP0_DP_SEC_CNTL2, DP_SEC_GSP4_SEND, mask_sh),\
SE_SF(DP0_DP_SEC_CNTL2, DP_SEC_GSP4_SEND_PENDING, mask_sh),\
SE_SF(DP0_DP_SEC_CNTL4, DP_SEC_GSP4_LINE_NUM, mask_sh),\
SE_SF(DP0_DP_SEC_CNTL5, DP_SEC_GSP5_LINE_NUM, mask_sh),\
SE_SF(DP0_DP_SEC_CNTL2, DP_SEC_GSP4_SEND_ANY_LINE, mask_sh),\
SE_SF(DP0_DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, mask_sh),\
SE_SF(DP0_DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, mask_sh),\
SE_SF(DP0_DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS, mask_sh),\
SE_SF(DP0_DP_STEER_FIFO, DP_STEER_FIFO_RESET, mask_sh),\
SE_SF(DP0_DP_STEER_FIFO, DP_STEER_FIFO_ENABLE, mask_sh),\
SE_SF(DP0_DP_VID_TIMING, DP_VID_M_N_GEN_EN, mask_sh),\
SE_SF(DP0_DP_VID_N, DP_VID_N, mask_sh),\
SE_SF(DP0_DP_VID_M, DP_VID_M, mask_sh),\
SE_SF(DIG0_HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, mask_sh),\
SE_SF(DIG0_HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, mask_sh),\
SE_SF(DIG0_HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, mask_sh),\
SE_SF(DIG0_HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUDIO_PRIORITY, mask_sh),\
SE_SF(DIG0_HDMI_ACR_32_0, HDMI_ACR_CTS_32, mask_sh),\
SE_SF(DIG0_HDMI_ACR_32_1, HDMI_ACR_N_32, mask_sh),\
SE_SF(DIG0_HDMI_ACR_44_0, HDMI_ACR_CTS_44, mask_sh),\
SE_SF(DIG0_HDMI_ACR_44_1, HDMI_ACR_N_44, mask_sh),\
SE_SF(DIG0_HDMI_ACR_48_0, HDMI_ACR_CTS_48, mask_sh),\
SE_SF(DIG0_HDMI_ACR_48_1, HDMI_ACR_N_48, mask_sh),\
SE_SF(DP0_DP_SEC_AUD_N, DP_SEC_AUD_N, mask_sh),\
SE_SF(DP0_DP_SEC_TIMESTAMP, DP_SEC_TIMESTAMP_MODE, mask_sh),\
SE_SF(DP0_DP_SEC_CNTL, DP_SEC_ASP_ENABLE, mask_sh),\
SE_SF(DP0_DP_SEC_CNTL, DP_SEC_ATP_ENABLE, mask_sh),\
SE_SF(DP0_DP_SEC_CNTL, DP_SEC_AIP_ENABLE, mask_sh),\
SE_SF(DP0_DP_SEC_CNTL, DP_SEC_ACM_ENABLE, mask_sh),\
SE_SF(DIG0_AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, mask_sh),\
SE_SF(DIG0_HDMI_CONTROL, HDMI_CLOCK_CHANNEL_RATE, mask_sh),\
SE_SF(DIG1_HDMI_CONTROL, TMDS_PIXEL_ENCODING, mask_sh),\
SE_SF(DIG1_HDMI_CONTROL, TMDS_COLOR_FORMAT, mask_sh),\
SE_SF(DIG0_DIG_FE_CNTL, DIG_STEREOSYNC_SELECT, mask_sh),\
SE_SF(DIG0_DIG_FE_CNTL, DIG_STEREOSYNC_GATE_EN, mask_sh),\
SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP4_ENABLE, mask_sh),\
SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP5_ENABLE, mask_sh),\
SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP6_ENABLE, mask_sh),\
SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP7_ENABLE, mask_sh),\
SE_SF(DP0_DP_SEC_CNTL2, DP_SEC_GSP7_SEND, mask_sh),\
SE_SF(DP0_DP_SEC_CNTL6, DP_SEC_GSP7_LINE_NUM, mask_sh),\
SE_SF(DP0_DP_SEC_CNTL2, DP_SEC_GSP11_PPS, mask_sh),\
SE_SF(DP0_DP_GSP11_CNTL, DP_SEC_GSP11_ENABLE, mask_sh),\
SE_SF(DP0_DP_GSP11_CNTL, DP_SEC_GSP11_LINE_NUM, mask_sh),\
SE_SF(DP0_DP_DB_CNTL, DP_DB_DISABLE, mask_sh),\
SE_SF(DP0_DP_MSA_COLORIMETRY, DP_MSA_MISC0, mask_sh),\
SE_SF(DP0_DP_MSA_TIMING_PARAM1, DP_MSA_HTOTAL, mask_sh),\
SE_SF(DP0_DP_MSA_TIMING_PARAM1, DP_MSA_VTOTAL, mask_sh),\
SE_SF(DP0_DP_MSA_TIMING_PARAM2, DP_MSA_HSTART, mask_sh),\
SE_SF(DP0_DP_MSA_TIMING_PARAM2, DP_MSA_VSTART, mask_sh),\
SE_SF(DP0_DP_MSA_TIMING_PARAM3, DP_MSA_HSYNCWIDTH, mask_sh),\
SE_SF(DP0_DP_MSA_TIMING_PARAM3, DP_MSA_HSYNCPOLARITY, mask_sh),\
SE_SF(DP0_DP_MSA_TIMING_PARAM3, DP_MSA_VSYNCWIDTH, mask_sh),\
SE_SF(DP0_DP_MSA_TIMING_PARAM3, DP_MSA_VSYNCPOLARITY, mask_sh),\
SE_SF(DP0_DP_MSA_TIMING_PARAM4, DP_MSA_HWIDTH, mask_sh),\
SE_SF(DP0_DP_MSA_TIMING_PARAM4, DP_MSA_VHEIGHT, mask_sh),\
SE_SF(DIG0_HDMI_DB_CONTROL, HDMI_DB_DISABLE, mask_sh),\
SE_SF(DP0_DP_VID_TIMING, DP_VID_N_INTERVAL, mask_sh),\
SE_SF(DIG0_DIG_FE_CNTL, DIG_SOURCE_SELECT, mask_sh), \
SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_CONT, mask_sh),\
SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_SEND, mask_sh),\
SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_CONT, mask_sh),\
SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_SEND, mask_sh),\
SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC2_CONT, mask_sh),\
SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC2_SEND, mask_sh),\
SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC3_CONT, mask_sh),\
SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC3_SEND, mask_sh),\
SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC4_CONT, mask_sh),\
SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC4_SEND, mask_sh),\
SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC5_CONT, mask_sh),\
SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC5_SEND, mask_sh),\
SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC6_CONT, mask_sh),\
SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC6_SEND, mask_sh),\
SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC7_CONT, mask_sh),\
SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC7_SEND, mask_sh),\
SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC8_CONT, mask_sh),\
SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC8_SEND, mask_sh),\
SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC9_CONT, mask_sh),\
SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC9_SEND, mask_sh),\
SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC10_CONT, mask_sh),\
SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC10_SEND, mask_sh),\
SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC11_CONT, mask_sh),\
SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC11_SEND, mask_sh),\
SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC12_CONT, mask_sh),\
SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC12_SEND, mask_sh),\
SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC13_CONT, mask_sh),\
SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC13_SEND, mask_sh),\
SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC14_CONT, mask_sh),\
SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC14_SEND, mask_sh),\
SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL1, HDMI_GENERIC0_LINE, mask_sh),\
SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL1, HDMI_GENERIC1_LINE, mask_sh),\
SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL2, HDMI_GENERIC2_LINE, mask_sh),\
SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL2, HDMI_GENERIC3_LINE, mask_sh),\
SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL3, HDMI_GENERIC4_LINE, mask_sh),\
SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL3, HDMI_GENERIC5_LINE, mask_sh),\
SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL4, HDMI_GENERIC6_LINE, mask_sh),\
SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL4, HDMI_GENERIC7_LINE, mask_sh),\
SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL7, HDMI_GENERIC8_LINE, mask_sh),\
SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL7, HDMI_GENERIC9_LINE, mask_sh),\
SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL8, HDMI_GENERIC10_LINE, mask_sh),\
SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL8, HDMI_GENERIC11_LINE, mask_sh),\
SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL9, HDMI_GENERIC12_LINE, mask_sh),\
SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL9, HDMI_GENERIC13_LINE, mask_sh),\
SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL10, HDMI_GENERIC14_LINE, mask_sh),\
SE_SF(DP0_DP_MSA_VBID_MISC, DP_VBID6_LINE_REFERENCE, mask_sh),\
SE_SF(DP0_DP_MSA_VBID_MISC, DP_VBID6_LINE_NUM, mask_sh),\
SE_SF(DME0_DME_CONTROL, METADATA_ENGINE_EN, mask_sh),\
SE_SF(DME0_DME_CONTROL, METADATA_HUBP_REQUESTOR_ID, mask_sh),\
SE_SF(DME0_DME_CONTROL, METADATA_STREAM_TYPE, mask_sh),\
SE_SF(DP0_DP_SEC_METADATA_TRANSMISSION, DP_SEC_METADATA_PACKET_ENABLE, mask_sh),\
SE_SF(DP0_DP_SEC_METADATA_TRANSMISSION, DP_SEC_METADATA_PACKET_LINE_REFERENCE, mask_sh),\
SE_SF(DP0_DP_SEC_METADATA_TRANSMISSION, DP_SEC_METADATA_PACKET_LINE, mask_sh),\
SE_SF(DIG0_HDMI_METADATA_PACKET_CONTROL, HDMI_METADATA_PACKET_ENABLE, mask_sh),\
SE_SF(DIG0_HDMI_METADATA_PACKET_CONTROL, HDMI_METADATA_PACKET_LINE_REFERENCE, mask_sh),\
SE_SF(DIG0_HDMI_METADATA_PACKET_CONTROL, HDMI_METADATA_PACKET_LINE, mask_sh),\
SE_SF(DIG0_HDMI_CONTROL, DOLBY_VISION_EN, mask_sh),\
SE_SF(DIG0_DIG_FE_EN_CNTL, DIG_FE_ENABLE, mask_sh),\
SE_SF(DIG0_DIG_FE_CLK_CNTL, DIG_FE_MODE, mask_sh),\
SE_SF(DIG0_DIG_FE_CLK_CNTL, DIG_FE_CLK_EN, mask_sh),\
SE_SF(DIG0_DIG_FE_CLK_CNTL, DIG_FE_SOFT_RESET, mask_sh),\
SE_SF(DIG0_DIG_FE_CNTL, DIG_STEREOSYNC_GATE_EN, mask_sh),\
SE_SF(DP0_DP_SEC_FRAMING4, DP_SST_SDP_SPLITTING, mask_sh),\
SE_SF(DIG0_DIG_CLOCK_PATTERN, DIG_CLOCK_PATTERN, mask_sh),\
SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_OUTPUT_PIXEL_PER_CYCLE, mask_sh),\
SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, mask_sh),\
SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, mask_sh),\
SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_RESET, mask_sh),\
SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, mask_sh),\
SE_SF(DIG0_STREAM_MAPPER_CONTROL, DIG_STREAM_LINK_TARGET, mask_sh),
void dcn401_dio_stream_encoder_construct(
struct dcn10_stream_encoder *enc1,
struct dc_context *ctx,
struct dc_bios *bp,
enum engine_id eng_id,
struct vpg *vpg,
struct afmt *afmt,
const struct dcn10_stream_enc_registers *regs,
const struct dcn10_stream_encoder_shift *se_shift,
const struct dcn10_stream_encoder_mask *se_mask);
void enc401_set_dynamic_metadata(struct stream_encoder *enc,
bool enable_dme,
uint32_t hubp_requestor_id,
enum dynamic_metadata_mode dmdata_mode);
void enc401_stream_encoder_set_stream_attribute_helper(
struct dcn10_stream_encoder *enc1,
struct dc_crtc_timing *crtc_timing);
void enc401_stream_encoder_dp_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
enum dc_color_space output_color_space,
bool use_vsc_sdp_for_colorimetry,
uint32_t enable_sdp_splitting);
#endif /* __DC_DIO_STREAM_ENCODER_DCN401_H__ */

View file

@ -0,0 +1,933 @@
/*
* Copyright 2023 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dcn30/dcn30_hubbub.h"
#include "dcn401_hubbub.h"
#include "dm_services.h"
#include "reg_helper.h"
#define CTX \
hubbub2->base.ctx
#define DC_LOGGER \
hubbub2->base.ctx->logger
#define REG(reg)\
hubbub2->regs->reg
#undef FN
#define FN(reg_name, field_name) \
hubbub2->shifts->field_name, hubbub2->masks->field_name
static void dcn401_init_crb(struct hubbub *hubbub)
{
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
REG_GET(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT,
&hubbub2->det0_size);
REG_GET(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT,
&hubbub2->det1_size);
REG_GET(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT,
&hubbub2->det2_size);
REG_GET(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT,
&hubbub2->det3_size);
REG_GET(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE_CURRENT,
&hubbub2->compbuf_size_segments);
REG_SET(COMPBUF_RESERVED_SPACE, 0,
COMPBUF_RESERVED_SPACE_64B, hubbub2->pixel_chunk_size / 32); // 256 64Bytes
}
bool hubbub401_program_urgent_watermarks(
struct hubbub *hubbub,
union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
bool wm_pending = false;
/* Repeat for water mark set A and B */
/* clock state A */
if (safe_to_lower || watermarks->dcn4.a.urgent > hubbub2->watermarks.dcn4.a.urgent) {
hubbub2->watermarks.dcn4.a.urgent = watermarks->dcn4.a.urgent;
REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0,
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, watermarks->dcn4.a.urgent);
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->dcn4.a.urgent, watermarks->dcn4.a.urgent);
} else if (watermarks->dcn4.a.urgent < hubbub2->watermarks.dcn4.a.urgent)
wm_pending = true;
/* determine the transfer time for a quantity of data for a particular requestor.*/
if (safe_to_lower || watermarks->dcn4.a.frac_urg_bw_flip
> hubbub2->watermarks.dcn4.a.frac_urg_bw_flip) {
hubbub2->watermarks.dcn4.a.frac_urg_bw_flip = watermarks->dcn4.a.frac_urg_bw_flip;
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, 0,
DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, watermarks->dcn4.a.frac_urg_bw_flip);
} else if (watermarks->dcn4.a.frac_urg_bw_flip
< hubbub2->watermarks.dcn4.a.frac_urg_bw_flip)
wm_pending = true;
if (safe_to_lower || watermarks->dcn4.a.frac_urg_bw_nom
> hubbub2->watermarks.dcn4.a.frac_urg_bw_nom) {
hubbub2->watermarks.dcn4.a.frac_urg_bw_nom = watermarks->dcn4.a.frac_urg_bw_nom;
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, 0,
DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->dcn4.a.frac_urg_bw_nom);
} else if (watermarks->dcn4.a.frac_urg_bw_nom
< hubbub2->watermarks.dcn4.a.frac_urg_bw_nom)
wm_pending = true;
if (safe_to_lower || watermarks->dcn4.a.frac_urg_bw_mall
> hubbub2->watermarks.dcn4.a.frac_urg_bw_mall) {
hubbub2->watermarks.dcn4.a.frac_urg_bw_mall = watermarks->dcn4.a.frac_urg_bw_mall;
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_MALL_A, 0,
DCHUBBUB_ARB_FRAC_URG_BW_MALL_A, watermarks->dcn4.a.frac_urg_bw_mall);
} else if (watermarks->dcn4.a.frac_urg_bw_mall < hubbub2->watermarks.dcn4.a.frac_urg_bw_mall)
wm_pending = true;
if (safe_to_lower || watermarks->dcn4.a.refcyc_per_trip_to_mem > hubbub2->watermarks.dcn4.a.refcyc_per_trip_to_mem) {
hubbub2->watermarks.dcn4.a.refcyc_per_trip_to_mem = watermarks->dcn4.a.refcyc_per_trip_to_mem;
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0,
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, watermarks->dcn4.a.refcyc_per_trip_to_mem);
} else if (watermarks->dcn4.a.refcyc_per_trip_to_mem < hubbub2->watermarks.dcn4.a.refcyc_per_trip_to_mem)
wm_pending = true;
if (safe_to_lower || watermarks->dcn4.a.refcyc_per_meta_trip_to_mem > hubbub2->watermarks.dcn4.a.refcyc_per_meta_trip_to_mem) {
hubbub2->watermarks.dcn4.a.refcyc_per_meta_trip_to_mem = watermarks->dcn4.a.refcyc_per_meta_trip_to_mem;
REG_SET(DCHUBBUB_ARB_REFCYC_PER_META_TRIP_A, 0,
DCHUBBUB_ARB_REFCYC_PER_META_TRIP_A, watermarks->dcn4.a.refcyc_per_meta_trip_to_mem);
} else if (watermarks->dcn4.a.refcyc_per_meta_trip_to_mem < hubbub2->watermarks.dcn4.a.refcyc_per_meta_trip_to_mem)
wm_pending = true;
/* clock state B */
if (safe_to_lower || watermarks->dcn4.b.urgent > hubbub2->watermarks.dcn4.b.urgent) {
hubbub2->watermarks.dcn4.b.urgent = watermarks->dcn4.b.urgent;
REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0,
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, watermarks->dcn4.b.urgent);
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->dcn4.b.urgent, watermarks->dcn4.b.urgent);
} else if (watermarks->dcn4.b.urgent < hubbub2->watermarks.dcn4.b.urgent)
wm_pending = true;
/* determine the transfer time for a quantity of data for a particular requestor.*/
if (safe_to_lower || watermarks->dcn4.b.frac_urg_bw_flip
> hubbub2->watermarks.dcn4.b.frac_urg_bw_flip) {
hubbub2->watermarks.dcn4.b.frac_urg_bw_flip = watermarks->dcn4.b.frac_urg_bw_flip;
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, 0,
DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, watermarks->dcn4.b.frac_urg_bw_flip);
} else if (watermarks->dcn4.b.frac_urg_bw_flip
< hubbub2->watermarks.dcn4.b.frac_urg_bw_flip)
wm_pending = true;
if (safe_to_lower || watermarks->dcn4.b.frac_urg_bw_nom
> hubbub2->watermarks.dcn4.b.frac_urg_bw_nom) {
hubbub2->watermarks.dcn4.b.frac_urg_bw_nom = watermarks->dcn4.b.frac_urg_bw_nom;
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, 0,
DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->dcn4.b.frac_urg_bw_nom);
} else if (watermarks->dcn4.b.frac_urg_bw_nom
< hubbub2->watermarks.dcn4.b.frac_urg_bw_nom)
wm_pending = true;
if (safe_to_lower || watermarks->dcn4.b.frac_urg_bw_mall
> hubbub2->watermarks.dcn4.b.frac_urg_bw_mall) {
hubbub2->watermarks.dcn4.b.frac_urg_bw_mall = watermarks->dcn4.b.frac_urg_bw_mall;
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_MALL_B, 0,
DCHUBBUB_ARB_FRAC_URG_BW_MALL_B, watermarks->dcn4.b.frac_urg_bw_mall);
} else if (watermarks->dcn4.b.frac_urg_bw_mall < hubbub2->watermarks.dcn4.b.frac_urg_bw_mall)
wm_pending = true;
if (safe_to_lower || watermarks->dcn4.b.refcyc_per_trip_to_mem > hubbub2->watermarks.dcn4.b.refcyc_per_trip_to_mem) {
hubbub2->watermarks.dcn4.b.refcyc_per_trip_to_mem = watermarks->dcn4.b.refcyc_per_trip_to_mem;
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0,
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, watermarks->dcn4.b.refcyc_per_trip_to_mem);
} else if (watermarks->dcn4.b.refcyc_per_trip_to_mem < hubbub2->watermarks.dcn4.b.refcyc_per_trip_to_mem)
wm_pending = true;
if (safe_to_lower || watermarks->dcn4.b.refcyc_per_meta_trip_to_mem > hubbub2->watermarks.dcn4.b.refcyc_per_meta_trip_to_mem) {
hubbub2->watermarks.dcn4.b.refcyc_per_meta_trip_to_mem = watermarks->dcn4.b.refcyc_per_meta_trip_to_mem;
REG_SET(DCHUBBUB_ARB_REFCYC_PER_META_TRIP_B, 0,
DCHUBBUB_ARB_REFCYC_PER_META_TRIP_B, watermarks->dcn4.b.refcyc_per_meta_trip_to_mem);
} else if (watermarks->dcn4.b.refcyc_per_meta_trip_to_mem < hubbub2->watermarks.dcn4.b.refcyc_per_meta_trip_to_mem)
wm_pending = true;
return wm_pending;
}
bool hubbub401_program_stutter_watermarks(
struct hubbub *hubbub,
union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
bool wm_pending = false;
/* clock state A */
if (safe_to_lower || watermarks->dcn4.a.sr_enter
> hubbub2->watermarks.dcn4.a.sr_enter) {
hubbub2->watermarks.dcn4.a.sr_enter =
watermarks->dcn4.a.sr_enter;
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, watermarks->dcn4.a.sr_enter);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->dcn4.a.sr_enter, watermarks->dcn4.a.sr_enter);
// On dGPU Z states are N/A, so program all other 3 Stutter Enter wm A with the same value
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK1_A, 0,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK1_A, watermarks->dcn4.a.sr_enter);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK2_A, 0,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK2_A, watermarks->dcn4.a.sr_enter);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK3_A, 0,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK3_A, watermarks->dcn4.a.sr_enter);
} else if (watermarks->dcn4.a.sr_enter
< hubbub2->watermarks.dcn4.a.sr_enter)
wm_pending = true;
if (safe_to_lower || watermarks->dcn4.a.sr_exit
> hubbub2->watermarks.dcn4.a.sr_exit) {
hubbub2->watermarks.dcn4.a.sr_exit =
watermarks->dcn4.a.sr_exit;
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, watermarks->dcn4.a.sr_exit);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->dcn4.a.sr_exit, watermarks->dcn4.a.sr_exit);
// On dGPU Z states are N/A, so program all other 3 Stutter Exit wm A with the same value
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK1_A, 0,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK1_A, watermarks->dcn4.a.sr_exit);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK2_A, 0,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK2_A, watermarks->dcn4.a.sr_exit);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK3_A, 0,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK3_A, watermarks->dcn4.a.sr_exit);
} else if (watermarks->dcn4.a.sr_exit
< hubbub2->watermarks.dcn4.a.sr_exit)
wm_pending = true;
/* clock state B */
if (safe_to_lower || watermarks->dcn4.b.sr_enter
> hubbub2->watermarks.dcn4.b.sr_enter) {
hubbub2->watermarks.dcn4.b.sr_enter =
watermarks->dcn4.b.sr_enter;
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, watermarks->dcn4.b.sr_enter);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->dcn4.b.sr_enter, watermarks->dcn4.b.sr_enter);
// On dGPU Z states are N/A, so program all other 3 Stutter Enter wm A with the same value
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK1_B, 0,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK1_B, watermarks->dcn4.b.sr_enter);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK2_B, 0,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK2_B, watermarks->dcn4.b.sr_enter);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK3_B, 0,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK3_B, watermarks->dcn4.b.sr_enter);
} else if (watermarks->dcn4.b.sr_enter
< hubbub2->watermarks.dcn4.b.sr_enter)
wm_pending = true;
if (safe_to_lower || watermarks->dcn4.b.sr_exit
> hubbub2->watermarks.dcn4.b.sr_exit) {
hubbub2->watermarks.dcn4.b.sr_exit =
watermarks->dcn4.b.sr_exit;
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, watermarks->dcn4.b.sr_exit);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->dcn4.b.sr_exit, watermarks->dcn4.b.sr_exit);
// On dGPU Z states are N/A, so program all other 3 Stutter Exit wm A with the same value
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK1_B, 0,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK1_B, watermarks->dcn4.b.sr_exit);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK2_B, 0,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK2_B, watermarks->dcn4.b.sr_exit);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK3_B, 0,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK3_B, watermarks->dcn4.b.sr_exit);
} else if (watermarks->dcn4.b.sr_exit
< hubbub2->watermarks.dcn4.b.sr_exit)
wm_pending = true;
return wm_pending;
}
bool hubbub401_program_pstate_watermarks(
struct hubbub *hubbub,
union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
bool wm_pending = false;
/* Section for UCLK_PSTATE_CHANGE_WATERMARKS */
/* clock state A */
if (safe_to_lower || watermarks->dcn4.a.uclk_pstate
> hubbub2->watermarks.dcn4.a.uclk_pstate) {
hubbub2->watermarks.dcn4.a.uclk_pstate =
watermarks->dcn4.a.uclk_pstate;
REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, 0,
DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, watermarks->dcn4.a.uclk_pstate);
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->dcn4.a.uclk_pstate, watermarks->dcn4.a.uclk_pstate);
} else if (watermarks->dcn4.a.uclk_pstate
< hubbub2->watermarks.dcn4.a.uclk_pstate)
wm_pending = true;
/* clock state B */
if (safe_to_lower || watermarks->dcn4.b.uclk_pstate
> hubbub2->watermarks.dcn4.b.uclk_pstate) {
hubbub2->watermarks.dcn4.b.uclk_pstate =
watermarks->dcn4.b.uclk_pstate;
REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, 0,
DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, watermarks->dcn4.b.uclk_pstate);
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->dcn4.b.uclk_pstate, watermarks->dcn4.b.uclk_pstate);
} else if (watermarks->dcn4.b.uclk_pstate
< hubbub2->watermarks.dcn4.b.uclk_pstate)
wm_pending = true;
/* Section for UCLK_PSTATE_CHANGE_WATERMARKS1 (DUMMY_PSTATE/TEMP_READ/PPT) */
if (safe_to_lower || watermarks->dcn4.a.temp_read_or_ppt
> hubbub2->watermarks.dcn4.a.temp_read_or_ppt) {
hubbub2->watermarks.dcn4.a.temp_read_or_ppt =
watermarks->dcn4.a.temp_read_or_ppt;
REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK1_A, 0,
DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK1_A, watermarks->dcn4.a.temp_read_or_ppt);
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK1_A calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->dcn4.a.temp_read_or_ppt, watermarks->dcn4.a.temp_read_or_ppt);
} else if (watermarks->dcn4.a.temp_read_or_ppt
< hubbub2->watermarks.dcn4.a.temp_read_or_ppt)
wm_pending = true;
/* clock state B */
if (safe_to_lower || watermarks->dcn4.b.temp_read_or_ppt
> hubbub2->watermarks.dcn4.b.temp_read_or_ppt) {
hubbub2->watermarks.dcn4.b.temp_read_or_ppt =
watermarks->dcn4.b.temp_read_or_ppt;
REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK1_B, 0,
DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK1_B, watermarks->dcn4.b.temp_read_or_ppt);
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK1_B calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->dcn4.b.temp_read_or_ppt, watermarks->dcn4.b.temp_read_or_ppt);
} else if (watermarks->dcn4.b.temp_read_or_ppt
< hubbub2->watermarks.dcn4.b.temp_read_or_ppt)
wm_pending = true;
/* Section for FCLK_PSTATE_CHANGE_WATERMARKS */
/* clock state A */
if (safe_to_lower || watermarks->dcn4.a.fclk_pstate
> hubbub2->watermarks.dcn4.a.fclk_pstate) {
hubbub2->watermarks.dcn4.a.fclk_pstate =
watermarks->dcn4.a.fclk_pstate;
REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, 0,
DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, watermarks->dcn4.a.fclk_pstate);
DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->dcn4.a.fclk_pstate, watermarks->dcn4.a.fclk_pstate);
} else if (watermarks->dcn4.a.fclk_pstate
< hubbub2->watermarks.dcn4.a.fclk_pstate)
wm_pending = true;
/* clock state B */
if (safe_to_lower || watermarks->dcn4.b.fclk_pstate
> hubbub2->watermarks.dcn4.b.fclk_pstate) {
hubbub2->watermarks.dcn4.b.fclk_pstate =
watermarks->dcn4.b.fclk_pstate;
REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, 0,
DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, watermarks->dcn4.b.fclk_pstate);
DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->dcn4.b.fclk_pstate, watermarks->dcn4.b.fclk_pstate);
} else if (watermarks->dcn4.b.fclk_pstate
< hubbub2->watermarks.dcn4.b.fclk_pstate)
wm_pending = true;
/* Section for FCLK_CHANGE_WATERMARKS1 (DUMMY_PSTATE/TEMP_READ/PPT) */
if (safe_to_lower || watermarks->dcn4.a.temp_read_or_ppt
> hubbub2->watermarks.dcn4.a.temp_read_or_ppt) {
hubbub2->watermarks.dcn4.a.temp_read_or_ppt =
watermarks->dcn4.a.temp_read_or_ppt;
REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK1_A, 0,
DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK1_A, watermarks->dcn4.a.temp_read_or_ppt);
DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK1_A calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->dcn4.a.temp_read_or_ppt, watermarks->dcn4.a.temp_read_or_ppt);
} else if (watermarks->dcn4.a.temp_read_or_ppt
< hubbub2->watermarks.dcn4.a.temp_read_or_ppt)
wm_pending = true;
/* clock state B */
if (safe_to_lower || watermarks->dcn4.b.temp_read_or_ppt
> hubbub2->watermarks.dcn4.b.temp_read_or_ppt) {
hubbub2->watermarks.dcn4.b.temp_read_or_ppt =
watermarks->dcn4.b.temp_read_or_ppt;
REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK1_B, 0,
DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK1_B, watermarks->dcn4.b.temp_read_or_ppt);
DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK1_B calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->dcn4.b.temp_read_or_ppt, watermarks->dcn4.b.temp_read_or_ppt);
} else if (watermarks->dcn4.b.temp_read_or_ppt
< hubbub2->watermarks.dcn4.b.temp_read_or_ppt)
wm_pending = true;
return wm_pending;
}
bool hubbub401_program_usr_watermarks(
struct hubbub *hubbub,
union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
bool wm_pending = false;
/* clock state A */
if (safe_to_lower || watermarks->dcn4.a.usr
> hubbub2->watermarks.dcn4.a.usr) {
hubbub2->watermarks.dcn4.a.usr = watermarks->dcn4.a.usr;
REG_SET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, 0,
DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, watermarks->dcn4.a.usr);
DC_LOG_BANDWIDTH_CALCS("USR_RETRAINING_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->dcn4.a.usr, watermarks->dcn4.a.usr);
} else if (watermarks->dcn4.a.usr
< hubbub2->watermarks.dcn4.a.usr)
wm_pending = true;
/* clock state B */
if (safe_to_lower || watermarks->dcn4.b.usr
> hubbub2->watermarks.dcn4.b.usr) {
hubbub2->watermarks.dcn4.b.usr = watermarks->dcn4.b.usr;
REG_SET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, 0,
DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, watermarks->dcn4.b.usr);
DC_LOG_BANDWIDTH_CALCS("USR_RETRAINING_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->dcn4.b.usr, watermarks->dcn4.b.usr);
} else if (watermarks->dcn4.b.usr
< hubbub2->watermarks.dcn4.b.usr)
wm_pending = true;
return wm_pending;
}
static bool hubbub401_program_watermarks(
struct hubbub *hubbub,
union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
bool wm_pending = false;
if (hubbub401_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
wm_pending = true;
if (hubbub401_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
wm_pending = true;
if (hubbub401_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
wm_pending = true;
if (hubbub401_program_usr_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
wm_pending = true;
/*
* The DCHub arbiter has a mechanism to dynamically rate limit the DCHub request stream to the fabric.
* If the memory controller is fully utilized and the DCHub requestors are
* well ahead of their amortized schedule, then it is safe to prevent the next winner
* from being committed and sent to the fabric.
* The utilization of the memory controller is approximated by ensuring that
* the number of outstanding requests is greater than a threshold specified
* by the ARB_MIN_REQ_OUTSTANDING. To determine that the DCHub requestors are well ahead of the amortized
* schedule, the slack of the next winner is compared with the ARB_SAT_LEVEL in DLG RefClk cycles.
*
* TODO: Revisit request limit after figure out right number. request limit for RM isn't decided yet,
* set maximum value (0x1FF) to turn off it for now.
*/
/*REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0,
DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF);
*/
hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
hubbub32_force_usr_retraining_allow(hubbub, hubbub->ctx->dc->debug.force_usr_allow);
return wm_pending;
}
/* Copy values from WM set A to all other sets */
static void hubbub401_init_watermarks(struct hubbub *hubbub)
{
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
uint32_t reg;
reg = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A);
REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, reg);
reg = REG_READ(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A);
REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, reg);
reg = REG_READ(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A);
REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, reg);
reg = REG_READ(DCHUBBUB_ARB_FRAC_URG_BW_MALL_A);
REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_MALL_B, reg);
reg = REG_READ(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A);
REG_WRITE(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, reg);
reg = REG_READ(DCHUBBUB_ARB_REFCYC_PER_META_TRIP_A);
REG_WRITE(DCHUBBUB_ARB_REFCYC_PER_META_TRIP_B, reg);
reg = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A);
REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, reg);
REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK1_A, reg);
REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK1_B, reg);
REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK2_A, reg);
REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK2_B, reg);
REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK3_A, reg);
REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK3_B, reg);
reg = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A);
REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, reg);
REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK1_A, reg);
REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK1_B, reg);
REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK2_A, reg);
REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK2_B, reg);
REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK3_A, reg);
REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK3_B, reg);
reg = REG_READ(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A);
REG_WRITE(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, reg);
reg = REG_READ(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A);
REG_WRITE(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, reg);
reg = REG_READ(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK1_A);
REG_WRITE(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK1_B, reg);
reg = REG_READ(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A);
REG_WRITE(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, reg);
reg = REG_READ(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK1_A);
REG_WRITE(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK1_B, reg);
}
static void hubbub401_wm_read_state(struct hubbub *hubbub,
struct dcn_hubbub_wm *wm)
{
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
struct dcn_hubbub_wm_set *s;
memset(wm, 0, sizeof(struct dcn_hubbub_wm));
s = &wm->sets[0];
s->wm_set = 0;
REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A,
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, &s->data_urgent);
REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, &s->sr_enter);
REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, &s->sr_exit);
REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A,
DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, &s->dram_clk_change);
REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A,
DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, &s->usr_retrain);
REG_GET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A,
DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, &s->fclk_pstate_change);
s = &wm->sets[1];
s->wm_set = 1;
REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B,
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, &s->data_urgent);
REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, &s->sr_enter);
REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, &s->sr_exit);
REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B,
DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, &s->dram_clk_change);
REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B,
DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, &s->usr_retrain);
REG_GET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B,
DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, &s->fclk_pstate_change);
}
bool hubbub401_dcc_support_swizzle(
enum swizzle_mode_addr3_values swizzle,
unsigned int plane_pitch,
unsigned int bytes_per_element,
enum segment_order *segment_order_horz,
enum segment_order *segment_order_vert)
{
bool swizzle_supported = false;
switch (swizzle) {
case DC_ADDR3_SW_LINEAR:
if ((plane_pitch * bytes_per_element) % 256 == 0)
swizzle_supported = true;
break;
case DC_ADDR3_SW_64KB_2D:
case DC_ADDR3_SW_256KB_2D:
swizzle_supported = true;
break;
default:
swizzle_supported = false;
break;
}
if (swizzle_supported) {
if (bytes_per_element == 1) {
*segment_order_horz = segment_order__contiguous;
*segment_order_vert = segment_order__non_contiguous;
return true;
}
if (bytes_per_element == 2) {
*segment_order_horz = segment_order__non_contiguous;
*segment_order_vert = segment_order__contiguous;
return true;
}
if (bytes_per_element == 4) {
*segment_order_horz = segment_order__contiguous;
*segment_order_vert = segment_order__non_contiguous;
return true;
}
if (bytes_per_element == 8) {
*segment_order_horz = segment_order__contiguous;
*segment_order_vert = segment_order__non_contiguous;
return true;
}
}
return false;
}
bool hubbub401_dcc_support_pixel_format(
enum surface_pixel_format format,
unsigned int *plane0_bpe,
unsigned int *plane1_bpe)
{
switch (format) {
case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
*plane0_bpe = 2;
*plane1_bpe = 0;
return true;
case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
*plane0_bpe = 1;
*plane1_bpe = 2;
return true;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX:
case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX:
case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT:
case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT:
case SURFACE_PIXEL_FORMAT_GRPH_RGBE:
*plane0_bpe = 4;
*plane1_bpe = 0;
return true;
case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA:
*plane0_bpe = 4;
*plane1_bpe = 1;
return true;
case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
*plane0_bpe = 2;
*plane1_bpe = 4;
return true;
case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010:
case SURFACE_PIXEL_FORMAT_VIDEO_CrYCbA1010102:
case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888:
*plane0_bpe = 4;
*plane1_bpe = 0;
return true;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
*plane0_bpe = 8;
*plane1_bpe = 0;
return true;
default:
return false;
}
}
void hubbub401_get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height,
unsigned int bytes_per_element)
{
if (bytes_per_element == 1) {
*blk256_width = 16;
*blk256_height = 16;
} else if (bytes_per_element == 2) {
*blk256_width = 16;
*blk256_height = 8;
} else if (bytes_per_element == 4) {
*blk256_width = 8;
*blk256_height = 8;
} else if (bytes_per_element == 8) {
*blk256_width = 8;
*blk256_height = 4;
}
}
void hubbub401_det_request_size(
unsigned int detile_buf_size,
enum surface_pixel_format format,
unsigned int p0_height,
unsigned int p0_width,
unsigned int p0_bpe,
unsigned int p1_height,
unsigned int p1_width,
unsigned int p1_bpe,
bool *p0_req128_horz_wc,
bool *p0_req128_vert_wc,
bool *p1_req128_horz_wc,
bool *p1_req128_vert_wc)
{
unsigned int blk256_height = 0;
unsigned int blk256_width = 0;
unsigned int p0_swath_bytes_horz_wc, p0_swath_bytes_vert_wc;
unsigned int p1_swath_bytes_horz_wc, p1_swath_bytes_vert_wc;
//For plane0
hubbub401_get_blk256_size(&blk256_width, &blk256_height, p0_bpe);
p0_swath_bytes_horz_wc = p0_width * blk256_height * p0_bpe;
p0_swath_bytes_vert_wc = p0_height * blk256_width * p0_bpe;
*p0_req128_horz_wc = (2 * p0_swath_bytes_horz_wc <= detile_buf_size) ?
false : /* full 256B request */
true; /* half 128b request */
*p0_req128_vert_wc = (2 * p0_swath_bytes_vert_wc <= detile_buf_size) ?
false : /* full 256B request */
true; /* half 128b request */
/*For dual planes needs to be considered together */
if (p1_bpe) {
hubbub401_get_blk256_size(&blk256_width, &blk256_height, p1_bpe);
p1_swath_bytes_horz_wc = p1_width * blk256_height * p1_bpe;
p1_swath_bytes_vert_wc = p1_height * blk256_width * p1_bpe;
switch (format) {
default:
/* No any adjustment needed*/
break;
case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
/* Packing at the ratio of 3:2 is supported before the detile buffer
* for YUV420 video with 10bpc (P010). Need to adjust for that.
*/
p0_swath_bytes_horz_wc = (((p0_swath_bytes_horz_wc * 2) / 3 + 255) / 256) * 256;
p0_swath_bytes_vert_wc = (((p0_swath_bytes_vert_wc * 2) / 3 + 255) / 256) * 256;
p1_swath_bytes_horz_wc = (((p1_swath_bytes_horz_wc * 2) / 3 + 255) / 256) * 256;
p1_swath_bytes_vert_wc = (((p1_swath_bytes_vert_wc * 2) / 3 + 255) / 256) * 256;
break;
}
*p0_req128_horz_wc = *p1_req128_horz_wc = (2 * p0_swath_bytes_horz_wc +
2 * p1_swath_bytes_horz_wc <= detile_buf_size) ?
false : /* full 256B request */
true; /* half 128B request */
*p0_req128_vert_wc = *p1_req128_vert_wc = (2 * p0_swath_bytes_vert_wc +
2 * p1_swath_bytes_vert_wc <= detile_buf_size) ?
false : /* full 256B request */
true; /* half 128B request */
/* If 128B requests are true, meaning 2 full swaths of data cannot fit
* in de-tile buffer, check if one plane can use 256B request while
* the other plane is using 128B requests
*/
if (*p0_req128_horz_wc) {
// If ratio around 1:1 between p0 and p1 try to recalulate if p0 can use 256B
if (p0_swath_bytes_horz_wc <= p1_swath_bytes_horz_wc + p1_swath_bytes_horz_wc / 2) {
*p0_req128_horz_wc = (2 * p0_swath_bytes_horz_wc + p1_swath_bytes_horz_wc <= detile_buf_size) ?
false : /* full 256B request */
true; /* half 128b request */
} else {
/* ratio about 2:1 between p0 and p1, try to recalulate if p1 can use 256B */
*p1_req128_horz_wc = (p0_swath_bytes_horz_wc + 2 * p1_swath_bytes_horz_wc <= detile_buf_size) ?
false : /* full 256B request */
true; /* half 128b request */
}
}
if (*p0_req128_vert_wc) {
// If ratio around 1:1 between p0 and p1 try to recalulate if p0 can use 256B
if (p0_swath_bytes_vert_wc <= p1_swath_bytes_vert_wc + p1_swath_bytes_vert_wc / 2) {
*p0_req128_vert_wc = (2 * p0_swath_bytes_vert_wc + p1_swath_bytes_vert_wc <= detile_buf_size) ?
false : /* full 256B request */
true; /* half 128b request */
} else {
/* ratio about 2:1 between p0 and p1, try to recalulate if p1 can use 256B */
*p1_req128_vert_wc = (p0_swath_bytes_vert_wc + 2 * p1_swath_bytes_vert_wc <= detile_buf_size) ?
false : /* full 256B request */
true; /* half 128b request */
}
}
}
}
static void dcn401_program_det_segments(struct hubbub *hubbub, int hubp_inst, unsigned det_buffer_size_seg)
{
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
switch (hubp_inst) {
case 0:
REG_UPDATE(DCHUBBUB_DET0_CTRL,
DET0_SIZE, det_buffer_size_seg);
hubbub2->det0_size = det_buffer_size_seg;
break;
case 1:
REG_UPDATE(DCHUBBUB_DET1_CTRL,
DET1_SIZE, det_buffer_size_seg);
hubbub2->det1_size = det_buffer_size_seg;
break;
case 2:
REG_UPDATE(DCHUBBUB_DET2_CTRL,
DET2_SIZE, det_buffer_size_seg);
hubbub2->det2_size = det_buffer_size_seg;
break;
case 3:
REG_UPDATE(DCHUBBUB_DET3_CTRL,
DET3_SIZE, det_buffer_size_seg);
hubbub2->det3_size = det_buffer_size_seg;
break;
default:
break;
}
if (hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size
+ hubbub2->det3_size + hubbub2->compbuf_size_segments > hubbub2->crb_size_segs) {
/* This may happen during seamless transition from ODM 2:1 to ODM4:1 */
DC_LOG_WARNING("CRB Config Warning: DET size (%d,%d,%d,%d) + Compbuf size (%d) > CRB segments (%d)\n",
hubbub2->det0_size, hubbub2->det1_size, hubbub2->det2_size, hubbub2->det3_size,
hubbub2->compbuf_size_segments, hubbub2->crb_size_segs);
}
}
static void dcn401_program_compbuf_segments(struct hubbub *hubbub, unsigned compbuf_size_seg, bool safe_to_increase)
{
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
unsigned int cur_compbuf_size_seg = 0;
if (safe_to_increase || compbuf_size_seg <= hubbub2->compbuf_size_segments) {
if (compbuf_size_seg > hubbub2->compbuf_size_segments) {
REG_WAIT(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, hubbub2->det0_size, 1, 100);
REG_WAIT(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, hubbub2->det1_size, 1, 100);
REG_WAIT(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, hubbub2->det2_size, 1, 100);
REG_WAIT(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, hubbub2->det3_size, 1, 100);
}
/* Should never be hit, if it is we have an erroneous hw config*/
ASSERT(hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size
+ hubbub2->det3_size + compbuf_size_seg <= hubbub2->crb_size_segs);
REG_UPDATE(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE, compbuf_size_seg);
hubbub2->compbuf_size_segments = compbuf_size_seg;
#ifdef DIAGS_BUILD
REG_GET(DCHUBBUB_COMPBUF_CTRL, CONFIG_ERROR, &cur_compbuf_size_seg);
ASSERT(!cur_compbuf_size_seg);
#else
ASSERT(REG_GET(DCHUBBUB_COMPBUF_CTRL, CONFIG_ERROR, &cur_compbuf_size_seg) && !cur_compbuf_size_seg);
#endif
}
}
static const struct hubbub_funcs hubbub4_01_funcs = {
.update_dchub = hubbub2_update_dchub,
.init_dchub_sys_ctx = hubbub3_init_dchub_sys_ctx,
.init_vm_ctx = hubbub2_init_vm_ctx,
.dcc_support_swizzle_addr3 = hubbub401_dcc_support_swizzle,
.dcc_support_pixel_format_plane0_plane1 = hubbub401_dcc_support_pixel_format,
.wm_read_state = hubbub401_wm_read_state,
.get_dchub_ref_freq = hubbub2_get_dchub_ref_freq,
.program_watermarks = hubbub401_program_watermarks,
.allow_self_refresh_control = hubbub1_allow_self_refresh_control,
.is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled,
.verify_allow_pstate_change_high = NULL,
.force_wm_propagate_to_pipes = hubbub32_force_wm_propagate_to_pipes,
.force_pstate_change_control = hubbub3_force_pstate_change_control,
.init_watermarks = hubbub401_init_watermarks,
.init_crb = dcn401_init_crb,
.hubbub_read_state = hubbub2_read_state,
.force_usr_retraining_allow = hubbub32_force_usr_retraining_allow,
.set_request_limit = hubbub32_set_request_limit,
.program_det_segments = dcn401_program_det_segments,
.program_compbuf_segments = dcn401_program_compbuf_segments,
};
void hubbub401_construct(struct dcn20_hubbub *hubbub2,
struct dc_context *ctx,
const struct dcn_hubbub_registers *hubbub_regs,
const struct dcn_hubbub_shift *hubbub_shift,
const struct dcn_hubbub_mask *hubbub_mask,
int det_size_kb,
int pixel_chunk_size_kb,
int config_return_buffer_size_kb)
{
hubbub2->base.ctx = ctx;
hubbub2->base.funcs = &hubbub4_01_funcs;
hubbub2->regs = hubbub_regs;
hubbub2->shifts = hubbub_shift;
hubbub2->masks = hubbub_mask;
hubbub2->detile_buf_size = det_size_kb * 1024;
hubbub2->pixel_chunk_size = pixel_chunk_size_kb * 1024;
hubbub2->crb_size_segs = config_return_buffer_size_kb / DCN4_01_CRB_SEGMENT_SIZE_KB;
}

View file

@ -0,0 +1,192 @@
/*
* Copyright 2023 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __DC_HUBBUB_DCN401_H__
#define __DC_HUBBUB_DCN401_H__
#include "dcn32/dcn32_hubbub.h"
#define DCN4_01_CRB_SIZE_KB 1344
#define DCN4_01_DEFAULT_DET_SIZE 320
#define DCN4_01_CRB_SEGMENT_SIZE_KB 64
#define HUBBUB_MASK_SH_LIST_DCN4_01(mask_sh)\
HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, mask_sh), \
HUBBUB_SF(DCHUBBUB_SOFT_RESET, DCHUBBUB_GLOBAL_SOFT_RESET, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_SAT_LEVEL, DCHUBBUB_ARB_SAT_LEVEL, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MAX_REQ_OUTSTAND, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK1_A, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK1_A, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK1_A, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK1_A, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK1_B, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK1_B, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK1_B, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK1_B, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK2_A, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK2_A, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK2_A, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK2_A, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK2_B, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK2_B, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK2_B, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK2_B, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK3_A, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK3_A, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK3_A, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK3_A, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK3_B, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK3_B, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK3_B, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK3_B, mask_sh), \
HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \
HUBBUB_SF(DCN_VM_FB_LOCATION_BASE, FB_BASE, mask_sh), \
HUBBUB_SF(DCN_VM_FB_LOCATION_TOP, FB_TOP, mask_sh), \
HUBBUB_SF(DCN_VM_FB_OFFSET, FB_OFFSET, mask_sh), \
HUBBUB_SF(DCN_VM_AGP_BOT, AGP_BOT, mask_sh), \
HUBBUB_SF(DCN_VM_AGP_TOP, AGP_TOP, mask_sh), \
HUBBUB_SF(DCN_VM_AGP_BASE, AGP_BASE, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_META_TRIP_A, DCHUBBUB_ARB_REFCYC_PER_META_TRIP_A, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_META_TRIP_B, DCHUBBUB_ARB_REFCYC_PER_META_TRIP_B, mask_sh), \
HUBBUB_SF(DCHUBBUB_DEBUG_CTRL_0, DET_DEPTH, mask_sh),\
HUBBUB_SF(DCHUBBUB_DET0_CTRL, DET0_SIZE, mask_sh),\
HUBBUB_SF(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, mask_sh),\
HUBBUB_SF(DCHUBBUB_DET1_CTRL, DET1_SIZE, mask_sh),\
HUBBUB_SF(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, mask_sh),\
HUBBUB_SF(DCHUBBUB_DET2_CTRL, DET2_SIZE, mask_sh),\
HUBBUB_SF(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, mask_sh),\
HUBBUB_SF(DCHUBBUB_DET3_CTRL, DET3_SIZE, mask_sh),\
HUBBUB_SF(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, mask_sh),\
HUBBUB_SF(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE, mask_sh),\
HUBBUB_SF(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE_CURRENT, mask_sh),\
HUBBUB_SF(DCHUBBUB_COMPBUF_CTRL, CONFIG_ERROR, mask_sh),\
HUBBUB_SF(COMPBUF_RESERVED_SPACE, COMPBUF_RESERVED_SPACE_64B, mask_sh),\
HUBBUB_SF(DCHUBBUB_ARB_USR_RETRAINING_CNTL, DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_VALUE, mask_sh),\
HUBBUB_SF(DCHUBBUB_ARB_USR_RETRAINING_CNTL, DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_ENABLE, mask_sh),\
HUBBUB_SF(DCHUBBUB_ARB_USR_RETRAINING_CNTL, DCHUBBUB_ARB_DO_NOT_FORCE_ALLOW_USR_RETRAINING_DURING_PSTATE_CHANGE_REQUEST, mask_sh),\
HUBBUB_SF(DCHUBBUB_ARB_USR_RETRAINING_CNTL, DCHUBBUB_ARB_DO_NOT_FORCE_ALLOW_USR_RETRAINING_DURING_PRE_CSTATE, mask_sh),\
HUBBUB_SF(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, mask_sh),\
HUBBUB_SF(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, mask_sh),\
HUBBUB_SF(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK1_A, DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK1_A, mask_sh),\
HUBBUB_SF(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK1_B, DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK1_B, mask_sh),\
HUBBUB_SF(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, mask_sh),\
HUBBUB_SF(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, mask_sh),\
HUBBUB_SF(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK1_A, DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK1_A, mask_sh),\
HUBBUB_SF(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK1_B, DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK1_B, mask_sh),\
HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_MALL_A, DCHUBBUB_ARB_FRAC_URG_BW_MALL_A, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_MALL_B, DCHUBBUB_ARB_FRAC_URG_BW_MALL_B, mask_sh), \
HUBBUB_SF(DCN_VM_FAULT_ADDR_MSB, DCN_VM_FAULT_ADDR_MSB, mask_sh), \
HUBBUB_SF(DCN_VM_FAULT_ADDR_LSB, DCN_VM_FAULT_ADDR_LSB, mask_sh), \
HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_STATUS_CLEAR, mask_sh), \
HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_STATUS_MODE, mask_sh), \
HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_INTERRUPT_ENABLE, mask_sh), \
HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_RANGE_FAULT_DISABLE, mask_sh), \
HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_PRQ_FAULT_DISABLE, mask_sh), \
HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_STATUS, mask_sh), \
HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_VMID, mask_sh), \
HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_TABLE_LEVEL, mask_sh), \
HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, mask_sh), \
HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_INTERRUPT_STATUS, mask_sh),\
HUBBUB_SF(SDPIF_REQUEST_RATE_LIMIT, SDPIF_REQUEST_RATE_LIMIT, mask_sh),\
HUBBUB_SF(DCHUBBUB_CLOCK_CNTL, DISPCLK_R_DCHUBBUB_GATE_DIS, mask_sh),\
HUBBUB_SF(DCHUBBUB_CLOCK_CNTL, DCFCLK_R_DCHUBBUB_GATE_DIS, mask_sh),\
HUBBUB_SF(DCHUBBUB_SDPIF_CFG0, SDPIF_PORT_CONTROL, mask_sh),\
HUBBUB_SF(DCHUBBUB_SDPIF_CFG1, SDPIF_MAX_NUM_OUTSTANDING, mask_sh),\
HUBBUB_SF(DCHUBBUB_MEM_PWR_MODE_CTRL, DET_MEM_PWR_LS_MODE, mask_sh)
bool hubbub401_program_urgent_watermarks(
struct hubbub *hubbub,
union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
bool hubbub401_program_stutter_watermarks(
struct hubbub *hubbub,
union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
bool hubbub401_program_pstate_watermarks(
struct hubbub *hubbub,
union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
bool hubbub401_program_usr_watermarks(
struct hubbub *hubbub,
union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
bool hubbub401_dcc_support_swizzle(
enum swizzle_mode_addr3_values swizzle,
unsigned int plane_pitch,
unsigned int bytes_per_element,
enum segment_order *segment_order_horz,
enum segment_order *segment_order_vert);
bool hubbub401_dcc_support_pixel_format(
enum surface_pixel_format format,
unsigned int *plane0_bpe,
unsigned int *plane1_bpe);
void hubbub401_get_blk256_size(
unsigned int *blk256_width,
unsigned int *blk256_height,
unsigned int bytes_per_element);
void hubbub401_det_request_size(
unsigned int detile_buf_size,
enum surface_pixel_format format,
unsigned int p0_height,
unsigned int p0_width,
unsigned int p0_bpe,
unsigned int p1_height,
unsigned int p1_width,
unsigned int p1_bpe,
bool *p0_req128_horz_wc,
bool *p0_req128_vert_wc,
bool *p1_req128_horz_wc,
bool *p1_req128_vert_wc);
void hubbub401_construct(struct dcn20_hubbub *hubbub2,
struct dc_context *ctx,
const struct dcn_hubbub_registers *hubbub_regs,
const struct dcn_hubbub_shift *hubbub_shift,
const struct dcn_hubbub_mask *hubbub_mask,
int det_size_kb,
int pixel_chunk_size_kb,
int config_return_buffer_size_kb);
#endif

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,331 @@
/*
* Copyright 2023 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __DC_HUBP_DCN401_H__
#define __DC_HUBP_DCN401_H__
#include "dcn20/dcn20_hubp.h"
#include "dcn21/dcn21_hubp.h"
#include "dcn30/dcn30_hubp.h"
#include "dcn31/dcn31_hubp.h"
#include "dcn32/dcn32_hubp.h"
#include "dml2/dml21/inc/dml_top_dchub_registers.h"
#define HUBP_3DLUT_FL_REG_LIST_DCN401(inst)\
SRI_ARR_US(_3DLUT_FL_CONFIG, HUBP, inst),\
SRI_ARR_US(_3DLUT_FL_BIAS_SCALE, HUBP, inst),\
SRI_ARR(HUBP_3DLUT_ADDRESS_HIGH, CURSOR0_, inst),\
SRI_ARR(HUBP_3DLUT_ADDRESS_LOW, CURSOR0_, inst),\
SRI_ARR(HUBP_3DLUT_CONTROL, CURSOR0_, inst),\
SRI_ARR(HUBP_3DLUT_DLG_PARAM, CURSOR0_, inst)
#define HUBP_MASK_SH_LIST_DCN401(mask_sh)\
HUBP_SF(HUBPREQ0_DCN_DMDATA_VM_CNTL, REFCYC_PER_VM_DMDATA, mask_sh),\
HUBP_SF(HUBPREQ0_DCN_DMDATA_VM_CNTL, DMDATA_VM_FAULT_STATUS, mask_sh),\
HUBP_SF(HUBPREQ0_DCN_DMDATA_VM_CNTL, DMDATA_VM_FAULT_STATUS_CLEAR, mask_sh),\
HUBP_SF(HUBPREQ0_DCN_DMDATA_VM_CNTL, DMDATA_VM_UNDERFLOW_STATUS, mask_sh),\
HUBP_SF(HUBPREQ0_DCN_DMDATA_VM_CNTL, DMDATA_VM_LATE_STATUS, mask_sh),\
HUBP_SF(HUBPREQ0_DCN_DMDATA_VM_CNTL, DMDATA_VM_UNDERFLOW_STATUS_CLEAR, mask_sh),\
HUBP_SF(HUBPREQ0_DCN_DMDATA_VM_CNTL, DMDATA_VM_DONE, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_BLANK_EN, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_TTU_DISABLE, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_UNDERFLOW_STATUS, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_UNDERFLOW_CLEAR, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_NO_OUTSTANDING_REQ, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_VTG_SEL, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_UNBOUNDED_REQ_MODE, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_IN_BLANK, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_SOFT_RESET, mask_sh),\
HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_PIPES, mask_sh),\
HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, PIPE_INTERLEAVE, mask_sh),\
HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, MAX_COMPRESSED_FRAGS, mask_sh),\
HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_PKRS, mask_sh),\
HUBP_SF(HUBP0_DCSURF_TILING_CONFIG, SW_MODE, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_PITCH_C, PITCH_C, mask_sh),\
HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, SURFACE_PIXEL_FORMAT, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_FLIP_TYPE, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_FLIP_IN_STEREOSYNC, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_FLIP_PENDING, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_UPDATE_LOCK, mask_sh),\
HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH, mask_sh),\
HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT, mask_sh),\
HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_START, PRI_VIEWPORT_X_START, mask_sh),\
HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_START, PRI_VIEWPORT_Y_START, mask_sh),\
HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION, SEC_VIEWPORT_WIDTH, mask_sh),\
HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION, SEC_VIEWPORT_HEIGHT, mask_sh),\
HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_START, SEC_VIEWPORT_X_START, mask_sh),\
HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_START, SEC_VIEWPORT_Y_START, mask_sh),\
HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C, PRI_VIEWPORT_WIDTH_C, mask_sh),\
HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C, PRI_VIEWPORT_HEIGHT_C, mask_sh),\
HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_START_C, PRI_VIEWPORT_X_START_C, mask_sh),\
HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_START_C, PRI_VIEWPORT_Y_START_C, mask_sh),\
HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C, SEC_VIEWPORT_WIDTH_C, mask_sh),\
HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C, SEC_VIEWPORT_HEIGHT_C, mask_sh),\
HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_START_C, SEC_VIEWPORT_X_START_C, mask_sh),\
HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_START_C, SEC_VIEWPORT_Y_START_C, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, PRIMARY_SURFACE_ADDRESS_HIGH, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS, PRIMARY_SURFACE_ADDRESS, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, SECONDARY_SURFACE_ADDRESS_HIGH, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS, SECONDARY_SURFACE_ADDRESS, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, PRIMARY_SURFACE_ADDRESS_HIGH_C, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C, PRIMARY_SURFACE_ADDRESS_C, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C, SECONDARY_SURFACE_ADDRESS_HIGH_C, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_C, SECONDARY_SURFACE_ADDRESS_C, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_INUSE, SURFACE_INUSE_ADDRESS, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH, SURFACE_INUSE_ADDRESS_HIGH, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_INUSE_C, SURFACE_INUSE_ADDRESS_C, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C, SURFACE_INUSE_ADDRESS_HIGH_C, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE, SURFACE_EARLIEST_INUSE_ADDRESS, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH, SURFACE_EARLIEST_INUSE_ADDRESS_HIGH, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_C, SURFACE_EARLIEST_INUSE_ADDRESS_C, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C, SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_TMZ, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_TMZ_C, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_DCC_EN, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_TMZ, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_TMZ_C, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_EN, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK, mask_sh),\
HUBP_SF(HUBPRET0_HUBPRET_CONTROL, DET_BUF_PLANE1_BASE_ADDRESS, mask_sh),\
HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CB_B, mask_sh),\
HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CR_R, mask_sh),\
HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_Y_G, mask_sh),\
HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_ALPHA, mask_sh),\
HUBP_SF(HUBPRET0_HUBPRET_CONTROL, PACK_3TO2_ELEMENT_DISABLE, mask_sh),\
HUBP_SF(HUBPREQ0_DCN_EXPANSION_MODE, DRQ_EXPANSION_MODE, mask_sh),\
HUBP_SF(HUBPREQ0_DCN_EXPANSION_MODE, PRQ_EXPANSION_MODE, mask_sh),\
HUBP_SF(HUBPREQ0_DCN_EXPANSION_MODE, MRQ_EXPANSION_MODE, mask_sh),\
HUBP_SF(HUBPREQ0_DCN_EXPANSION_MODE, CRQ_EXPANSION_MODE, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, CHUNK_SIZE, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MIN_CHUNK_SIZE, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, DPTE_GROUP_SIZE, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, SWATH_HEIGHT, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, PTE_ROW_HEIGHT_LINEAR, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, CHUNK_SIZE_C, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MIN_CHUNK_SIZE_C, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, DPTE_GROUP_SIZE_C, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, SWATH_HEIGHT_C, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, PTE_ROW_HEIGHT_LINEAR_C, mask_sh),\
HUBP_SF(HUBPREQ0_BLANK_OFFSET_0, REFCYC_H_BLANK_END, mask_sh),\
HUBP_SF(HUBPREQ0_BLANK_OFFSET_0, DLG_V_BLANK_END, mask_sh),\
HUBP_SF(HUBPREQ0_BLANK_OFFSET_1, MIN_DST_Y_NEXT_START, mask_sh),\
HUBP_SF(HUBPREQ0_DST_DIMENSIONS, REFCYC_PER_HTOTAL, mask_sh),\
HUBP_SF(HUBPREQ0_DST_AFTER_SCALER, REFCYC_X_AFTER_SCALER, mask_sh),\
HUBP_SF(HUBPREQ0_DST_AFTER_SCALER, DST_Y_AFTER_SCALER, mask_sh),\
HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_0, DST_Y_PER_VM_VBLANK, mask_sh),\
HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_0, DST_Y_PER_ROW_VBLANK, mask_sh),\
HUBP_SF(HUBPREQ0_REF_FREQ_TO_PIX_FREQ, REF_FREQ_TO_PIX_FREQ, mask_sh),\
HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_1, REFCYC_PER_PTE_GROUP_VBLANK_L, mask_sh),\
HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_3, REFCYC_PER_META_CHUNK_VBLANK_L, mask_sh),\
HUBP_SF(HUBPREQ0_NOM_PARAMETERS_4, DST_Y_PER_META_ROW_NOM_L, mask_sh),\
HUBP_SF(HUBPREQ0_NOM_PARAMETERS_5, REFCYC_PER_META_CHUNK_NOM_L, mask_sh),\
HUBP_SF(HUBPREQ0_PER_LINE_DELIVERY_PRE, REFCYC_PER_LINE_DELIVERY_PRE_L, mask_sh),\
HUBP_SF(HUBPREQ0_PER_LINE_DELIVERY_PRE, REFCYC_PER_LINE_DELIVERY_PRE_C, mask_sh),\
HUBP_SF(HUBPREQ0_PER_LINE_DELIVERY, REFCYC_PER_LINE_DELIVERY_L, mask_sh),\
HUBP_SF(HUBPREQ0_PER_LINE_DELIVERY, REFCYC_PER_LINE_DELIVERY_C, mask_sh),\
HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_2, REFCYC_PER_PTE_GROUP_VBLANK_C, mask_sh),\
HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_4, REFCYC_PER_META_CHUNK_VBLANK_C, mask_sh),\
HUBP_SF(HUBPREQ0_NOM_PARAMETERS_6, DST_Y_PER_META_ROW_NOM_C, mask_sh),\
HUBP_SF(HUBPREQ0_NOM_PARAMETERS_7, REFCYC_PER_META_CHUNK_NOM_C, mask_sh),\
HUBP_SF(HUBPREQ0_DCN_TTU_QOS_WM, QoS_LEVEL_LOW_WM, mask_sh),\
HUBP_SF(HUBPREQ0_DCN_TTU_QOS_WM, QoS_LEVEL_HIGH_WM, mask_sh),\
HUBP_SF(HUBPREQ0_DCN_GLOBAL_TTU_CNTL, MIN_TTU_VBLANK, mask_sh),\
HUBP_SF(HUBPREQ0_DCN_GLOBAL_TTU_CNTL, QoS_LEVEL_FLIP, mask_sh),\
HUBP_SF(HUBPREQ0_DCN_GLOBAL_TTU_CNTL, ROW_TTU_MODE, mask_sh),\
HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL0, REFCYC_PER_REQ_DELIVERY, mask_sh),\
HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL0, QoS_LEVEL_FIXED, mask_sh),\
HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL0, QoS_RAMP_DISABLE, mask_sh),\
HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL1, REFCYC_PER_REQ_DELIVERY_PRE, mask_sh),\
HUBP_SF(HUBP0_HUBP_CLK_CNTL, HUBP_CLOCK_ENABLE, mask_sh),\
HUBP_MASK_SH_LIST_DCN_VM(mask_sh),\
HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, ROTATION_ANGLE, mask_sh),\
HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, H_MIRROR_EN, mask_sh),\
HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, ALPHA_PLANE_EN, mask_sh),\
HUBP_SF(HUBPREQ0_PREFETCH_SETTINGS, DST_Y_PREFETCH, mask_sh),\
HUBP_SF(HUBPREQ0_PREFETCH_SETTINGS, VRATIO_PREFETCH, mask_sh),\
HUBP_SF(HUBPREQ0_PREFETCH_SETTINGS_C, VRATIO_PREFETCH_C, mask_sh),\
HUBP_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_LOW_ADDR, MC_VM_SYSTEM_APERTURE_LOW_ADDR, mask_sh),\
HUBP_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR, MC_VM_SYSTEM_APERTURE_HIGH_ADDR, mask_sh),\
HUBP_SF(HUBPREQ0_CURSOR_SETTINGS, CURSOR0_DST_Y_OFFSET, mask_sh), \
HUBP_SF(HUBPREQ0_CURSOR_SETTINGS, CURSOR0_CHUNK_HDL_ADJUST, mask_sh), \
HUBP_SF(CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH, CURSOR_SURFACE_ADDRESS_HIGH, mask_sh), \
HUBP_SF(CURSOR0_0_CURSOR_SURFACE_ADDRESS, CURSOR_SURFACE_ADDRESS, mask_sh), \
HUBP_SF(CURSOR0_0_CURSOR_SIZE, CURSOR_WIDTH, mask_sh), \
HUBP_SF(CURSOR0_0_CURSOR_SIZE, CURSOR_HEIGHT, mask_sh), \
HUBP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \
HUBP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_REQ_MODE, mask_sh), \
HUBP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_2X_MAGNIFY, mask_sh), \
HUBP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_PITCH, mask_sh), \
HUBP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_LINES_PER_CHUNK, mask_sh), \
HUBP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_ENABLE, mask_sh), \
HUBP_SF(CURSOR0_0_CURSOR_POSITION, CURSOR_X_POSITION, mask_sh), \
HUBP_SF(CURSOR0_0_CURSOR_POSITION, CURSOR_Y_POSITION, mask_sh), \
HUBP_SF(CURSOR0_0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_X, mask_sh), \
HUBP_SF(CURSOR0_0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_Y, mask_sh), \
HUBP_SF(CURSOR0_0_CURSOR_DST_OFFSET, CURSOR_DST_X_OFFSET, mask_sh), \
HUBP_SF(CURSOR0_0_DMDATA_ADDRESS_HIGH, DMDATA_ADDRESS_HIGH, mask_sh), \
HUBP_SF(CURSOR0_0_DMDATA_CNTL, DMDATA_MODE, mask_sh), \
HUBP_SF(CURSOR0_0_DMDATA_CNTL, DMDATA_UPDATED, mask_sh), \
HUBP_SF(CURSOR0_0_DMDATA_CNTL, DMDATA_REPEAT, mask_sh), \
HUBP_SF(CURSOR0_0_DMDATA_CNTL, DMDATA_SIZE, mask_sh), \
HUBP_SF(CURSOR0_0_DMDATA_SW_CNTL, DMDATA_SW_UPDATED, mask_sh), \
HUBP_SF(CURSOR0_0_DMDATA_SW_CNTL, DMDATA_SW_REPEAT, mask_sh), \
HUBP_SF(CURSOR0_0_DMDATA_SW_CNTL, DMDATA_SW_SIZE, mask_sh), \
HUBP_SF(CURSOR0_0_DMDATA_QOS_CNTL, DMDATA_QOS_MODE, mask_sh), \
HUBP_SF(CURSOR0_0_DMDATA_QOS_CNTL, DMDATA_QOS_LEVEL, mask_sh), \
HUBP_SF(CURSOR0_0_DMDATA_QOS_CNTL, DMDATA_DL_DELTA, mask_sh), \
HUBP_SF(CURSOR0_0_DMDATA_STATUS, DMDATA_DONE, mask_sh),\
HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_0, DST_Y_PER_VM_FLIP, mask_sh),\
HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_0, DST_Y_PER_ROW_FLIP, mask_sh),\
HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_1, REFCYC_PER_PTE_GROUP_FLIP_L, mask_sh),\
HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_2, REFCYC_PER_META_CHUNK_FLIP_L, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_VREADY_AT_OR_AFTER_VSYNC, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_DISABLE_STOP_DATA_DURING_VM, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, HUBPREQ_MASTER_UPDATE_LOCK_STATUS, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL2, SURFACE_GSL_ENABLE, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL2, SURFACE_TRIPLE_BUFFER_ENABLE, mask_sh),\
HUBP_SF(HUBPREQ0_VMID_SETTINGS_0, VMID, mask_sh),\
HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_3, REFCYC_PER_VM_GROUP_FLIP, mask_sh),\
HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_4, REFCYC_PER_VM_REQ_FLIP, mask_sh),\
HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_5, REFCYC_PER_PTE_GROUP_FLIP_C, mask_sh),\
HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_6, REFCYC_PER_META_CHUNK_FLIP_C, mask_sh),\
HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_5, REFCYC_PER_VM_GROUP_VBLANK, mask_sh),\
HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_6, REFCYC_PER_VM_REQ_VBLANK, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, VM_GROUP_SIZE, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_MALL_CONFIG, USE_MALL_SEL, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_MALL_CONFIG, USE_MALL_FOR_CURSOR, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_VMPG_CONFIG, VMPG_SIZE, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_VMPG_CONFIG, PTE_BUFFER_MODE, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_VMPG_CONFIG, BIGK_FRAGMENT_SIZE, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_VMPG_CONFIG, FORCE_ONE_ROW_FOR_FRAME, mask_sh),\
HUBP_SF(HUBPREQ0_UCLK_PSTATE_FORCE, DATA_UCLK_PSTATE_FORCE_EN, mask_sh),\
HUBP_SF(HUBPREQ0_UCLK_PSTATE_FORCE, DATA_UCLK_PSTATE_FORCE_VALUE, mask_sh),\
HUBP_SF(HUBPREQ0_UCLK_PSTATE_FORCE, CURSOR_UCLK_PSTATE_FORCE_EN, mask_sh),\
HUBP_SF(HUBPREQ0_UCLK_PSTATE_FORCE, CURSOR_UCLK_PSTATE_FORCE_VALUE, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_MALL_CONFIG, MALL_PREF_CMD_TYPE, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_MALL_CONFIG, MALL_PREF_MODE, mask_sh),\
HUBP_SF(HUBP0_3DLUT_FL_CONFIG, HUBP0_3DLUT_FL_MODE, mask_sh),\
HUBP_SF(HUBP0_3DLUT_FL_CONFIG, HUBP0_3DLUT_FL_FORMAT, mask_sh),\
HUBP_SF(HUBP0_3DLUT_FL_BIAS_SCALE, HUBP0_3DLUT_FL_BIAS, mask_sh),\
HUBP_SF(HUBP0_3DLUT_FL_BIAS_SCALE, HUBP0_3DLUT_FL_SCALE, mask_sh),\
HUBP_SF(CURSOR0_0_HUBP_3DLUT_CONTROL, HUBP_3DLUT_ENABLE, mask_sh),\
HUBP_SF(CURSOR0_0_HUBP_3DLUT_CONTROL, HUBP_3DLUT_DONE, mask_sh),\
HUBP_SF(CURSOR0_0_HUBP_3DLUT_CONTROL, HUBP_3DLUT_ADDRESSING_MODE, mask_sh),\
HUBP_SF(CURSOR0_0_HUBP_3DLUT_CONTROL, HUBP_3DLUT_WIDTH, mask_sh),\
HUBP_SF(CURSOR0_0_HUBP_3DLUT_CONTROL, HUBP_3DLUT_TMZ, mask_sh),\
HUBP_SF(CURSOR0_0_HUBP_3DLUT_CONTROL, HUBP_3DLUT_CROSSBAR_SELECT_Y_G, mask_sh),\
HUBP_SF(CURSOR0_0_HUBP_3DLUT_CONTROL, HUBP_3DLUT_CROSSBAR_SELECT_CB_B, mask_sh),\
HUBP_SF(CURSOR0_0_HUBP_3DLUT_CONTROL, HUBP_3DLUT_CROSSBAR_SELECT_CR_R, mask_sh),\
HUBP_SF(CURSOR0_0_HUBP_3DLUT_ADDRESS_HIGH, HUBP_3DLUT_ADDRESS_HIGH, mask_sh),\
HUBP_SF(CURSOR0_0_HUBP_3DLUT_ADDRESS_LOW, HUBP_3DLUT_ADDRESS_LOW, mask_sh),\
HUBP_SF(CURSOR0_0_HUBP_3DLUT_DLG_PARAM, REFCYC_PER_3DLUT_GROUP, mask_sh),\
void hubp401_update_mall_sel(struct hubp *hubp, uint32_t mall_sel, bool c_cursor);
void hubp401_vready_at_or_After_vsync(struct hubp *hubp,
struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest);
void hubp401_program_requestor(
struct hubp *hubp,
struct _vcs_dpi_display_rq_regs_st *rq_regs);
void hubp401_program_deadline(
struct hubp *hubp,
struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
struct _vcs_dpi_display_ttu_regs_st *ttu_attr);
void hubp401_setup(
struct hubp *hubp,
struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
struct _vcs_dpi_display_ttu_regs_st *ttu_attr,
struct _vcs_dpi_display_rq_regs_st *rq_regs,
struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest);
void hubp401_setup_interdependent(
struct hubp *hubp,
struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
struct _vcs_dpi_display_ttu_regs_st *ttu_attr);
bool hubp401_program_surface_flip_and_addr(
struct hubp *hubp,
const struct dc_plane_address *address,
bool flip_immediate);
void hubp401_dcc_control(struct hubp *hubp,
struct dc_plane_dcc_param *dcc);
void hubp401_program_tiling(
struct dcn20_hubp *hubp2,
const union dc_tiling_info *info,
const enum surface_pixel_format pixel_format);
void hubp401_program_size(
struct hubp *hubp,
enum surface_pixel_format format,
const struct plane_size *plane_size,
struct dc_plane_dcc_param *dcc);
void hubp401_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
union dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
bool horizontal_mirror,
unsigned int compat_level);
void hubp401_set_viewport(struct hubp *hubp,
const struct rect *viewport,
const struct rect *viewport_c);
void hubp401_set_flip_int(struct hubp *hubp);
bool hubp401_in_blank(struct hubp *hubp);
void hubp401_cursor_set_position(
struct hubp *hubp,
const struct dc_cursor_position *pos,
const struct dc_cursor_mi_param *param);
void hubp401_read_state(struct hubp *hubp);
bool hubp401_construct(
struct dcn20_hubp *hubp2,
struct dc_context *ctx,
uint32_t inst,
const struct dcn_hubp2_registers *hubp_regs,
const struct dcn_hubp2_shift *hubp_shift,
const struct dcn_hubp2_mask *hubp_mask);
void hubp401_init(struct hubp *hubp);
int hubp401_get_3dlut_fl_done(struct hubp *hubp);
void hubp401_set_unbounded_requesting(struct hubp *hubp, bool enable);
#endif /* __DC_HUBP_DCN401_H__ */

View file

@ -0,0 +1,645 @@
/*
* Copyright 2023 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "reg_helper.h"
#include "dc.h"
#include "dcn401_mpc.h"
#include "dcn10/dcn10_cm_common.h"
#include "basics/conversion.h"
#include "mpc.h"
#define REG(reg)\
mpc401->mpc_regs->reg
#define CTX \
mpc401->base.ctx
#undef FN
#define FN(reg_name, field_name) \
mpc401->mpc_shift->field_name, mpc401->mpc_mask->field_name
static void mpc401_update_3dlut_fast_load_select(struct mpc *mpc, int mpcc_id, int hubp_idx)
{
struct dcn401_mpc *mpc401 = TO_DCN401_MPC(mpc);
REG_SET(MPCC_MCM_3DLUT_FAST_LOAD_SELECT[mpcc_id], 0, MPCC_MCM_3DLUT_FL_SEL, hubp_idx);
}
static void mpc401_get_3dlut_fast_load_status(struct mpc *mpc, int mpcc_id, uint32_t *done, uint32_t *soft_underflow, uint32_t *hard_underflow)
{
struct dcn401_mpc *mpc401 = TO_DCN401_MPC(mpc);
REG_GET_3(MPCC_MCM_3DLUT_FAST_LOAD_STATUS[mpcc_id],
MPCC_MCM_3DLUT_FL_DONE, done,
MPCC_MCM_3DLUT_FL_SOFT_UNDERFLOW, soft_underflow,
MPCC_MCM_3DLUT_FL_HARD_UNDERFLOW, hard_underflow);
}
void mpc401_set_movable_cm_location(struct mpc *mpc, enum mpcc_movable_cm_location location, int mpcc_id)
{
struct dcn401_mpc *mpc401 = TO_DCN401_MPC(mpc);
switch (location) {
case MPCC_MOVABLE_CM_LOCATION_BEFORE:
REG_UPDATE(MPCC_MOVABLE_CM_LOCATION_CONTROL[mpcc_id],
MPCC_MOVABLE_CM_LOCATION_CNTL, 0);
break;
case MPCC_MOVABLE_CM_LOCATION_AFTER:
REG_UPDATE(MPCC_MOVABLE_CM_LOCATION_CONTROL[mpcc_id],
MPCC_MOVABLE_CM_LOCATION_CNTL, 1);
break;
}
}
static enum dc_lut_mode get3dlut_config(
struct mpc *mpc,
bool *is_17x17x17,
bool *is_12bits_color_channel,
int mpcc_id)
{
uint32_t i_mode, i_enable_10bits, lut_size;
enum dc_lut_mode mode;
struct dcn401_mpc *mpc401 = TO_DCN401_MPC(mpc);
REG_GET(MPCC_MCM_3DLUT_MODE[mpcc_id],
MPCC_MCM_3DLUT_MODE_CURRENT, &i_mode);
REG_GET(MPCC_MCM_3DLUT_READ_WRITE_CONTROL[mpcc_id],
MPCC_MCM_3DLUT_30BIT_EN, &i_enable_10bits);
switch (i_mode) {
case 0:
mode = LUT_BYPASS;
break;
case 1:
mode = LUT_RAM_A;
break;
case 2:
mode = LUT_RAM_B;
break;
default:
mode = LUT_BYPASS;
break;
}
if (i_enable_10bits > 0)
*is_12bits_color_channel = false;
else
*is_12bits_color_channel = true;
REG_GET(MPCC_MCM_3DLUT_MODE[mpcc_id], MPCC_MCM_3DLUT_SIZE, &lut_size);
if (lut_size == 0)
*is_17x17x17 = true;
else
*is_17x17x17 = false;
return mode;
}
void mpc401_populate_lut(struct mpc *mpc, const enum MCM_LUT_ID id, const union mcm_lut_params params, bool lut_bank_a, int mpcc_id)
{
const enum dc_lut_mode next_mode = lut_bank_a ? LUT_RAM_A : LUT_RAM_B;
const struct pwl_params *lut1d = params.pwl;
const struct pwl_params *lut_shaper = params.pwl;
bool is_17x17x17;
bool is_12bits_color_channel;
const struct dc_rgb *lut0;
const struct dc_rgb *lut1;
const struct dc_rgb *lut2;
const struct dc_rgb *lut3;
int lut_size0;
int lut_size;
const struct tetrahedral_params *lut3d = params.lut3d;
switch (id) {
case MCM_LUT_1DLUT:
if (lut1d == NULL)
return;
mpc32_power_on_blnd_lut(mpc, mpcc_id, true);
mpc32_configure_post1dlut(mpc, mpcc_id, next_mode == LUT_RAM_A);
if (next_mode == LUT_RAM_A)
mpc32_program_post1dluta_settings(mpc, mpcc_id, lut1d);
else
mpc32_program_post1dlutb_settings(mpc, mpcc_id, lut1d);
mpc32_program_post1dlut_pwl(
mpc, mpcc_id, lut1d->rgb_resulted, lut1d->hw_points_num);
break;
case MCM_LUT_SHAPER:
if (lut_shaper == NULL)
return;
if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc)
mpc32_power_on_shaper_3dlut(mpc, mpcc_id, true);
mpc32_configure_shaper_lut(mpc, next_mode == LUT_RAM_A, mpcc_id);
if (next_mode == LUT_RAM_A)
mpc32_program_shaper_luta_settings(mpc, lut_shaper, mpcc_id);
else
mpc32_program_shaper_lutb_settings(mpc, lut_shaper, mpcc_id);
mpc32_program_shaper_lut(
mpc, lut_shaper->rgb_resulted, lut_shaper->hw_points_num, mpcc_id);
mpc32_power_on_shaper_3dlut(mpc, mpcc_id, false);
break;
case MCM_LUT_3DLUT:
if (lut3d == NULL)
return;
mpc32_power_on_shaper_3dlut(mpc, mpcc_id, true);
get3dlut_config(mpc, &is_17x17x17, &is_12bits_color_channel, mpcc_id);
is_17x17x17 = !lut3d->use_tetrahedral_9;
is_12bits_color_channel = lut3d->use_12bits;
if (is_17x17x17) {
lut0 = lut3d->tetrahedral_17.lut0;
lut1 = lut3d->tetrahedral_17.lut1;
lut2 = lut3d->tetrahedral_17.lut2;
lut3 = lut3d->tetrahedral_17.lut3;
lut_size0 = sizeof(lut3d->tetrahedral_17.lut0)/
sizeof(lut3d->tetrahedral_17.lut0[0]);
lut_size = sizeof(lut3d->tetrahedral_17.lut1)/
sizeof(lut3d->tetrahedral_17.lut1[0]);
} else {
lut0 = lut3d->tetrahedral_9.lut0;
lut1 = lut3d->tetrahedral_9.lut1;
lut2 = lut3d->tetrahedral_9.lut2;
lut3 = lut3d->tetrahedral_9.lut3;
lut_size0 = sizeof(lut3d->tetrahedral_9.lut0)/
sizeof(lut3d->tetrahedral_9.lut0[0]);
lut_size = sizeof(lut3d->tetrahedral_9.lut1)/
sizeof(lut3d->tetrahedral_9.lut1[0]);
}
mpc32_select_3dlut_ram(mpc, next_mode,
is_12bits_color_channel, mpcc_id);
mpc32_select_3dlut_ram_mask(mpc, 0x1, mpcc_id);
if (is_12bits_color_channel)
mpc32_set3dlut_ram12(mpc, lut0, lut_size0, mpcc_id);
else
mpc32_set3dlut_ram10(mpc, lut0, lut_size0, mpcc_id);
mpc32_select_3dlut_ram_mask(mpc, 0x2, mpcc_id);
if (is_12bits_color_channel)
mpc32_set3dlut_ram12(mpc, lut1, lut_size, mpcc_id);
else
mpc32_set3dlut_ram10(mpc, lut1, lut_size, mpcc_id);
mpc32_select_3dlut_ram_mask(mpc, 0x4, mpcc_id);
if (is_12bits_color_channel)
mpc32_set3dlut_ram12(mpc, lut2, lut_size, mpcc_id);
else
mpc32_set3dlut_ram10(mpc, lut2, lut_size, mpcc_id);
mpc32_select_3dlut_ram_mask(mpc, 0x8, mpcc_id);
if (is_12bits_color_channel)
mpc32_set3dlut_ram12(mpc, lut3, lut_size, mpcc_id);
else
mpc32_set3dlut_ram10(mpc, lut3, lut_size, mpcc_id);
if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc)
mpc32_power_on_shaper_3dlut(mpc, mpcc_id, false);
break;
}
}
void mpc401_program_lut_mode(
struct mpc *mpc,
const enum MCM_LUT_ID id,
const enum MCM_LUT_XABLE xable,
bool lut_bank_a,
int mpcc_id)
{
struct dcn401_mpc *mpc401 = TO_DCN401_MPC(mpc);
switch (id) {
case MCM_LUT_3DLUT:
switch (xable) {
case MCM_LUT_DISABLE:
REG_UPDATE(MPCC_MCM_3DLUT_MODE[mpcc_id], MPCC_MCM_3DLUT_MODE, 0);
break;
case MCM_LUT_ENABLE:
REG_UPDATE(MPCC_MCM_3DLUT_MODE[mpcc_id], MPCC_MCM_3DLUT_MODE, lut_bank_a ? 1 : 2);
break;
}
break;
case MCM_LUT_SHAPER:
switch (xable) {
case MCM_LUT_DISABLE:
REG_UPDATE(MPCC_MCM_SHAPER_CONTROL[mpcc_id], MPCC_MCM_SHAPER_LUT_MODE, 0);
break;
case MCM_LUT_ENABLE:
REG_UPDATE(MPCC_MCM_SHAPER_CONTROL[mpcc_id], MPCC_MCM_SHAPER_LUT_MODE, lut_bank_a ? 1 : 2);
break;
}
break;
case MCM_LUT_1DLUT:
switch (xable) {
case MCM_LUT_DISABLE:
REG_UPDATE(MPCC_MCM_1DLUT_CONTROL[mpcc_id],
MPCC_MCM_1DLUT_MODE, 0);
break;
case MCM_LUT_ENABLE:
REG_UPDATE(MPCC_MCM_1DLUT_CONTROL[mpcc_id],
MPCC_MCM_1DLUT_MODE, 2);
break;
}
REG_UPDATE(MPCC_MCM_1DLUT_CONTROL[mpcc_id],
MPCC_MCM_1DLUT_SELECT, lut_bank_a ? 0 : 1);
break;
}
}
void mpc401_program_lut_read_write_control(struct mpc *mpc, const enum MCM_LUT_ID id, bool lut_bank_a, int mpcc_id)
{
struct dcn401_mpc *mpc401 = TO_DCN401_MPC(mpc);
switch (id) {
case MCM_LUT_3DLUT:
mpc32_select_3dlut_ram_mask(mpc, 0xf, mpcc_id);
REG_UPDATE(MPCC_MCM_3DLUT_READ_WRITE_CONTROL[mpcc_id], MPCC_MCM_3DLUT_RAM_SEL, lut_bank_a ? 0 : 1);
break;
case MCM_LUT_SHAPER:
mpc32_configure_shaper_lut(mpc, lut_bank_a, mpcc_id);
break;
case MCM_LUT_1DLUT:
mpc32_configure_post1dlut(mpc, lut_bank_a, mpcc_id);
break;
}
}
static void program_gamut_remap(
struct mpc *mpc,
unsigned int mpcc_id,
const uint16_t *regval,
enum mpcc_gamut_remap_id gamut_remap_block_id,
enum mpcc_gamut_remap_mode_select mode_select)
{
struct color_matrices_reg gamut_regs;
struct dcn401_mpc *mpc401 = TO_DCN401_MPC(mpc);
switch (gamut_remap_block_id) {
case MPCC_OGAM_GAMUT_REMAP:
if (regval == NULL || mode_select == MPCC_GAMUT_REMAP_MODE_SELECT_0) {
REG_SET(MPCC_GAMUT_REMAP_MODE[mpcc_id], 0,
MPCC_GAMUT_REMAP_MODE, mode_select);
return;
}
gamut_regs.shifts.csc_c11 = mpc401->mpc_shift->MPCC_GAMUT_REMAP_C11_A;
gamut_regs.masks.csc_c11 = mpc401->mpc_mask->MPCC_GAMUT_REMAP_C11_A;
gamut_regs.shifts.csc_c12 = mpc401->mpc_shift->MPCC_GAMUT_REMAP_C12_A;
gamut_regs.masks.csc_c12 = mpc401->mpc_mask->MPCC_GAMUT_REMAP_C12_A;
switch (mode_select) {
case MPCC_GAMUT_REMAP_MODE_SELECT_1:
gamut_regs.csc_c11_c12 = REG(MPC_GAMUT_REMAP_C11_C12_A[mpcc_id]);
gamut_regs.csc_c33_c34 = REG(MPC_GAMUT_REMAP_C33_C34_A[mpcc_id]);
break;
case MPCC_GAMUT_REMAP_MODE_SELECT_2:
gamut_regs.csc_c11_c12 = REG(MPC_GAMUT_REMAP_C11_C12_B[mpcc_id]);
gamut_regs.csc_c33_c34 = REG(MPC_GAMUT_REMAP_C33_C34_B[mpcc_id]);
break;
default:
break;
}
cm_helper_program_color_matrices(
mpc->ctx,
regval,
&gamut_regs);
//select coefficient set to use, set A (MODE_1) or set B (MODE_2)
REG_SET(MPCC_GAMUT_REMAP_MODE[mpcc_id], 0, MPCC_GAMUT_REMAP_MODE, mode_select);
break;
case MPCC_MCM_FIRST_GAMUT_REMAP:
if (regval == NULL || mode_select == MPCC_GAMUT_REMAP_MODE_SELECT_0) {
REG_SET(MPCC_MCM_FIRST_GAMUT_REMAP_MODE[mpcc_id], 0,
MPCC_MCM_FIRST_GAMUT_REMAP_MODE, mode_select);
return;
}
gamut_regs.shifts.csc_c11 = mpc401->mpc_shift->MPCC_MCM_FIRST_GAMUT_REMAP_C11_A;
gamut_regs.masks.csc_c11 = mpc401->mpc_mask->MPCC_MCM_FIRST_GAMUT_REMAP_C11_A;
gamut_regs.shifts.csc_c12 = mpc401->mpc_shift->MPCC_MCM_FIRST_GAMUT_REMAP_C12_A;
gamut_regs.masks.csc_c12 = mpc401->mpc_mask->MPCC_MCM_FIRST_GAMUT_REMAP_C12_A;
switch (mode_select) {
case MPCC_GAMUT_REMAP_MODE_SELECT_1:
gamut_regs.csc_c11_c12 = REG(MPC_MCM_FIRST_GAMUT_REMAP_C11_C12_A[mpcc_id]);
gamut_regs.csc_c33_c34 = REG(MPC_MCM_FIRST_GAMUT_REMAP_C33_C34_A[mpcc_id]);
break;
case MPCC_GAMUT_REMAP_MODE_SELECT_2:
gamut_regs.csc_c11_c12 = REG(MPC_MCM_FIRST_GAMUT_REMAP_C11_C12_B[mpcc_id]);
gamut_regs.csc_c33_c34 = REG(MPC_MCM_FIRST_GAMUT_REMAP_C33_C34_B[mpcc_id]);
break;
default:
break;
}
cm_helper_program_color_matrices(
mpc->ctx,
regval,
&gamut_regs);
//select coefficient set to use, set A (MODE_1) or set B (MODE_2)
REG_SET(MPCC_MCM_FIRST_GAMUT_REMAP_MODE[mpcc_id], 0,
MPCC_MCM_FIRST_GAMUT_REMAP_MODE, mode_select);
break;
case MPCC_MCM_SECOND_GAMUT_REMAP:
if (regval == NULL || mode_select == MPCC_GAMUT_REMAP_MODE_SELECT_0) {
REG_SET(MPCC_MCM_SECOND_GAMUT_REMAP_MODE[mpcc_id], 0,
MPCC_MCM_SECOND_GAMUT_REMAP_MODE, mode_select);
return;
}
gamut_regs.shifts.csc_c11 = mpc401->mpc_shift->MPCC_MCM_SECOND_GAMUT_REMAP_C11_A;
gamut_regs.masks.csc_c11 = mpc401->mpc_mask->MPCC_MCM_SECOND_GAMUT_REMAP_C11_A;
gamut_regs.shifts.csc_c12 = mpc401->mpc_shift->MPCC_MCM_SECOND_GAMUT_REMAP_C12_A;
gamut_regs.masks.csc_c12 = mpc401->mpc_mask->MPCC_MCM_SECOND_GAMUT_REMAP_C12_A;
switch (mode_select) {
case MPCC_GAMUT_REMAP_MODE_SELECT_1:
gamut_regs.csc_c11_c12 = REG(MPC_MCM_SECOND_GAMUT_REMAP_C11_C12_A[mpcc_id]);
gamut_regs.csc_c33_c34 = REG(MPC_MCM_SECOND_GAMUT_REMAP_C33_C34_A[mpcc_id]);
break;
case MPCC_GAMUT_REMAP_MODE_SELECT_2:
gamut_regs.csc_c11_c12 = REG(MPC_MCM_SECOND_GAMUT_REMAP_C11_C12_B[mpcc_id]);
gamut_regs.csc_c33_c34 = REG(MPC_MCM_SECOND_GAMUT_REMAP_C33_C34_B[mpcc_id]);
break;
default:
break;
}
cm_helper_program_color_matrices(
mpc->ctx,
regval,
&gamut_regs);
//select coefficient set to use, set A (MODE_1) or set B (MODE_2)
REG_SET(MPCC_MCM_SECOND_GAMUT_REMAP_MODE[mpcc_id], 0,
MPCC_MCM_SECOND_GAMUT_REMAP_MODE, mode_select);
break;
default:
break;
}
}
void mpc401_set_gamut_remap(
struct mpc *mpc,
int mpcc_id,
const struct mpc_grph_gamut_adjustment *adjust)
{
struct dcn401_mpc *mpc401 = TO_DCN401_MPC(mpc);
unsigned int i = 0;
uint32_t mode_select = 0;
if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW) {
/* Bypass / Disable if type is bypass or hw */
program_gamut_remap(mpc, mpcc_id, NULL,
adjust->mpcc_gamut_remap_block_id, MPCC_GAMUT_REMAP_MODE_SELECT_0);
} else {
struct fixed31_32 arr_matrix[12];
uint16_t arr_reg_val[12];
for (i = 0; i < 12; i++)
arr_matrix[i] = adjust->temperature_matrix[i];
convert_float_matrix(arr_reg_val, arr_matrix, 12);
switch (adjust->mpcc_gamut_remap_block_id) {
case MPCC_OGAM_GAMUT_REMAP:
REG_GET(MPCC_GAMUT_REMAP_MODE[mpcc_id],
MPCC_GAMUT_REMAP_MODE_CURRENT, &mode_select);
break;
case MPCC_MCM_FIRST_GAMUT_REMAP:
REG_GET(MPCC_MCM_FIRST_GAMUT_REMAP_MODE[mpcc_id],
MPCC_MCM_FIRST_GAMUT_REMAP_MODE_CURRENT, &mode_select);
break;
case MPCC_MCM_SECOND_GAMUT_REMAP:
REG_GET(MPCC_MCM_SECOND_GAMUT_REMAP_MODE[mpcc_id],
MPCC_MCM_SECOND_GAMUT_REMAP_MODE_CURRENT, &mode_select);
break;
default:
break;
}
//If current set in use not set A (MODE_1), then use set A, otherwise use set B
if (mode_select != MPCC_GAMUT_REMAP_MODE_SELECT_1)
mode_select = MPCC_GAMUT_REMAP_MODE_SELECT_1;
else
mode_select = MPCC_GAMUT_REMAP_MODE_SELECT_2;
program_gamut_remap(mpc, mpcc_id, arr_reg_val,
adjust->mpcc_gamut_remap_block_id, mode_select);
}
}
static void read_gamut_remap(struct mpc *mpc,
int mpcc_id,
uint16_t *regval,
enum mpcc_gamut_remap_id gamut_remap_block_id,
uint32_t *mode_select)
{
struct color_matrices_reg gamut_regs;
struct dcn401_mpc *mpc401 = TO_DCN401_MPC(mpc);
switch (gamut_remap_block_id) {
case MPCC_OGAM_GAMUT_REMAP:
//current coefficient set in use
REG_GET(MPCC_GAMUT_REMAP_MODE[mpcc_id], MPCC_GAMUT_REMAP_MODE_CURRENT, mode_select);
gamut_regs.shifts.csc_c11 = mpc401->mpc_shift->MPCC_GAMUT_REMAP_C11_A;
gamut_regs.masks.csc_c11 = mpc401->mpc_mask->MPCC_GAMUT_REMAP_C11_A;
gamut_regs.shifts.csc_c12 = mpc401->mpc_shift->MPCC_GAMUT_REMAP_C12_A;
gamut_regs.masks.csc_c12 = mpc401->mpc_mask->MPCC_GAMUT_REMAP_C12_A;
switch (*mode_select) {
case MPCC_GAMUT_REMAP_MODE_SELECT_1:
gamut_regs.csc_c11_c12 = REG(MPC_GAMUT_REMAP_C11_C12_A[mpcc_id]);
gamut_regs.csc_c33_c34 = REG(MPC_GAMUT_REMAP_C33_C34_A[mpcc_id]);
break;
case MPCC_GAMUT_REMAP_MODE_SELECT_2:
gamut_regs.csc_c11_c12 = REG(MPC_GAMUT_REMAP_C11_C12_B[mpcc_id]);
gamut_regs.csc_c33_c34 = REG(MPC_GAMUT_REMAP_C33_C34_B[mpcc_id]);
break;
default:
break;
}
break;
case MPCC_MCM_FIRST_GAMUT_REMAP:
REG_GET(MPCC_MCM_FIRST_GAMUT_REMAP_MODE[mpcc_id],
MPCC_MCM_FIRST_GAMUT_REMAP_MODE_CURRENT, mode_select);
gamut_regs.shifts.csc_c11 = mpc401->mpc_shift->MPCC_MCM_FIRST_GAMUT_REMAP_C11_A;
gamut_regs.masks.csc_c11 = mpc401->mpc_mask->MPCC_MCM_FIRST_GAMUT_REMAP_C11_A;
gamut_regs.shifts.csc_c12 = mpc401->mpc_shift->MPCC_MCM_FIRST_GAMUT_REMAP_C12_A;
gamut_regs.masks.csc_c12 = mpc401->mpc_mask->MPCC_MCM_FIRST_GAMUT_REMAP_C12_A;
switch (*mode_select) {
case MPCC_GAMUT_REMAP_MODE_SELECT_1:
gamut_regs.csc_c11_c12 = REG(MPC_MCM_FIRST_GAMUT_REMAP_C11_C12_A[mpcc_id]);
gamut_regs.csc_c33_c34 = REG(MPC_MCM_FIRST_GAMUT_REMAP_C33_C34_A[mpcc_id]);
break;
case MPCC_GAMUT_REMAP_MODE_SELECT_2:
gamut_regs.csc_c11_c12 = REG(MPC_MCM_FIRST_GAMUT_REMAP_C11_C12_B[mpcc_id]);
gamut_regs.csc_c33_c34 = REG(MPC_MCM_FIRST_GAMUT_REMAP_C33_C34_B[mpcc_id]);
break;
default:
break;
}
break;
case MPCC_MCM_SECOND_GAMUT_REMAP:
REG_GET(MPCC_MCM_SECOND_GAMUT_REMAP_MODE[mpcc_id],
MPCC_MCM_SECOND_GAMUT_REMAP_MODE_CURRENT, mode_select);
gamut_regs.shifts.csc_c11 = mpc401->mpc_shift->MPCC_MCM_SECOND_GAMUT_REMAP_C11_A;
gamut_regs.masks.csc_c11 = mpc401->mpc_mask->MPCC_MCM_SECOND_GAMUT_REMAP_C11_A;
gamut_regs.shifts.csc_c12 = mpc401->mpc_shift->MPCC_MCM_SECOND_GAMUT_REMAP_C12_A;
gamut_regs.masks.csc_c12 = mpc401->mpc_mask->MPCC_MCM_SECOND_GAMUT_REMAP_C12_A;
switch (*mode_select) {
case MPCC_GAMUT_REMAP_MODE_SELECT_1:
gamut_regs.csc_c11_c12 = REG(MPC_MCM_SECOND_GAMUT_REMAP_C11_C12_A[mpcc_id]);
gamut_regs.csc_c33_c34 = REG(MPC_MCM_SECOND_GAMUT_REMAP_C33_C34_A[mpcc_id]);
break;
case MPCC_GAMUT_REMAP_MODE_SELECT_2:
gamut_regs.csc_c11_c12 = REG(MPC_MCM_SECOND_GAMUT_REMAP_C11_C12_B[mpcc_id]);
gamut_regs.csc_c33_c34 = REG(MPC_MCM_SECOND_GAMUT_REMAP_C33_C34_B[mpcc_id]);
break;
default:
break;
}
break;
default:
break;
}
if (*mode_select != MPCC_GAMUT_REMAP_MODE_SELECT_0) {
cm_helper_read_color_matrices(
mpc401->base.ctx,
regval,
&gamut_regs);
}
}
void mpc401_get_gamut_remap(struct mpc *mpc,
int mpcc_id,
struct mpc_grph_gamut_adjustment *adjust)
{
uint16_t arr_reg_val[12];
uint32_t mode_select;
read_gamut_remap(mpc, mpcc_id, arr_reg_val, adjust->mpcc_gamut_remap_block_id, &mode_select);
if (mode_select == MPCC_GAMUT_REMAP_MODE_SELECT_0) {
adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
return;
}
adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
convert_hw_matrix(adjust->temperature_matrix,
arr_reg_val, ARRAY_SIZE(arr_reg_val));
}
static const struct mpc_funcs dcn401_mpc_funcs = {
.read_mpcc_state = mpc1_read_mpcc_state,
.insert_plane = mpc1_insert_plane,
.remove_mpcc = mpc1_remove_mpcc,
.mpc_init = mpc32_mpc_init,
.mpc_init_single_inst = mpc3_mpc_init_single_inst,
.update_blending = mpc2_update_blending,
.cursor_lock = mpc1_cursor_lock,
.get_mpcc_for_dpp = mpc1_get_mpcc_for_dpp,
.wait_for_idle = mpc2_assert_idle_mpcc,
.assert_mpcc_idle_before_connect = mpc2_assert_mpcc_idle_before_connect,
.init_mpcc_list_from_hw = mpc1_init_mpcc_list_from_hw,
.set_denorm = mpc3_set_denorm,
.set_denorm_clamp = mpc3_set_denorm_clamp,
.set_output_csc = mpc3_set_output_csc,
.set_ocsc_default = mpc3_set_ocsc_default,
.set_output_gamma = mpc3_set_output_gamma,
.insert_plane_to_secondary = NULL,
.remove_mpcc_from_secondary = NULL,
.set_dwb_mux = mpc3_set_dwb_mux,
.disable_dwb_mux = mpc3_disable_dwb_mux,
.is_dwb_idle = mpc3_is_dwb_idle,
.set_gamut_remap = mpc401_set_gamut_remap,
.program_shaper = mpc32_program_shaper,
.program_3dlut = mpc32_program_3dlut,
.program_1dlut = mpc32_program_post1dlut,
.acquire_rmu = NULL,
.release_rmu = NULL,
.power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut,
.get_mpc_out_mux = mpc1_get_mpc_out_mux,
.set_bg_color = mpc1_set_bg_color,
.set_movable_cm_location = mpc401_set_movable_cm_location,
.update_3dlut_fast_load_select = mpc401_update_3dlut_fast_load_select,
.get_3dlut_fast_load_status = mpc401_get_3dlut_fast_load_status,
.populate_lut = mpc401_populate_lut,
.program_lut_read_write_control = mpc401_program_lut_read_write_control,
.program_lut_mode = mpc401_program_lut_mode,
};
void dcn401_mpc_construct(struct dcn401_mpc *mpc401,
struct dc_context *ctx,
const struct dcn401_mpc_registers *mpc_regs,
const struct dcn401_mpc_shift *mpc_shift,
const struct dcn401_mpc_mask *mpc_mask,
int num_mpcc,
int num_rmu)
{
int i;
mpc401->base.ctx = ctx;
mpc401->base.funcs = &dcn401_mpc_funcs;
mpc401->mpc_regs = mpc_regs;
mpc401->mpc_shift = mpc_shift;
mpc401->mpc_mask = mpc_mask;
mpc401->mpcc_in_use_mask = 0;
mpc401->num_mpcc = num_mpcc;
mpc401->num_rmu = num_rmu;
for (i = 0; i < MAX_MPCC; i++)
mpc3_init_mpcc(&mpc401->base.mpcc_array[i], i);
}

View file

@ -0,0 +1,234 @@
/*
* Copyright 2023 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __DC_MPCC_DCN401_H__
#define __DC_MPCC_DCN401_H__
#include "dcn30/dcn30_mpc.h"
#include "dcn32/dcn32_mpc.h"
#define TO_DCN401_MPC(mpc_base) \
container_of(mpc_base, struct dcn401_mpc, base)
#define MPC_REG_VARIABLE_LIST_DCN4_01 \
MPC_REG_VARIABLE_LIST_DCN3_0; \
MPC_REG_VARIABLE_LIST_DCN32; \
uint32_t MPCC_MCM_FIRST_GAMUT_REMAP_COEF_FORMAT[MAX_MPCC]; \
uint32_t MPCC_MCM_FIRST_GAMUT_REMAP_MODE[MAX_MPCC]; \
uint32_t MPC_MCM_FIRST_GAMUT_REMAP_C11_C12_A[MAX_MPCC]; \
uint32_t MPC_MCM_FIRST_GAMUT_REMAP_C13_C14_A[MAX_MPCC]; \
uint32_t MPC_MCM_FIRST_GAMUT_REMAP_C21_C22_A[MAX_MPCC]; \
uint32_t MPC_MCM_FIRST_GAMUT_REMAP_C23_C24_A[MAX_MPCC]; \
uint32_t MPC_MCM_FIRST_GAMUT_REMAP_C31_C32_A[MAX_MPCC]; \
uint32_t MPC_MCM_FIRST_GAMUT_REMAP_C33_C34_A[MAX_MPCC]; \
uint32_t MPC_MCM_FIRST_GAMUT_REMAP_C11_C12_B[MAX_MPCC]; \
uint32_t MPC_MCM_FIRST_GAMUT_REMAP_C13_C14_B[MAX_MPCC]; \
uint32_t MPC_MCM_FIRST_GAMUT_REMAP_C21_C22_B[MAX_MPCC]; \
uint32_t MPC_MCM_FIRST_GAMUT_REMAP_C23_C24_B[MAX_MPCC]; \
uint32_t MPC_MCM_FIRST_GAMUT_REMAP_C31_C32_B[MAX_MPCC]; \
uint32_t MPC_MCM_FIRST_GAMUT_REMAP_C33_C34_B[MAX_MPCC]; \
uint32_t MPCC_MCM_SECOND_GAMUT_REMAP_COEF_FORMAT[MAX_MPCC]; \
uint32_t MPCC_MCM_SECOND_GAMUT_REMAP_MODE[MAX_MPCC]; \
uint32_t MPC_MCM_SECOND_GAMUT_REMAP_C11_C12_A[MAX_MPCC]; \
uint32_t MPC_MCM_SECOND_GAMUT_REMAP_C13_C14_A[MAX_MPCC]; \
uint32_t MPC_MCM_SECOND_GAMUT_REMAP_C21_C22_A[MAX_MPCC]; \
uint32_t MPC_MCM_SECOND_GAMUT_REMAP_C23_C24_A[MAX_MPCC]; \
uint32_t MPC_MCM_SECOND_GAMUT_REMAP_C31_C32_A[MAX_MPCC]; \
uint32_t MPC_MCM_SECOND_GAMUT_REMAP_C33_C34_A[MAX_MPCC]; \
uint32_t MPC_MCM_SECOND_GAMUT_REMAP_C11_C12_B[MAX_MPCC]; \
uint32_t MPC_MCM_SECOND_GAMUT_REMAP_C13_C14_B[MAX_MPCC]; \
uint32_t MPC_MCM_SECOND_GAMUT_REMAP_C21_C22_B[MAX_MPCC]; \
uint32_t MPC_MCM_SECOND_GAMUT_REMAP_C23_C24_B[MAX_MPCC]; \
uint32_t MPC_MCM_SECOND_GAMUT_REMAP_C31_C32_B[MAX_MPCC]; \
uint32_t MPC_MCM_SECOND_GAMUT_REMAP_C33_C34_B[MAX_MPCC]; \
uint32_t MPCC_MCM_3DLUT_FAST_LOAD_SELECT[MAX_MPCC]; \
uint32_t MPCC_MCM_3DLUT_FAST_LOAD_STATUS[MAX_MPCC]
#define MPC_COMMON_MASK_SH_LIST_DCN4_01(mask_sh) \
MPC_COMMON_MASK_SH_LIST_DCN32(mask_sh), \
SF(MPCC_MCM0_MPCC_MCM_FIRST_GAMUT_REMAP_COEF_FORMAT, MPCC_MCM_FIRST_GAMUT_REMAP_COEF_FORMAT, mask_sh), \
SF(MPCC_MCM0_MPCC_MCM_FIRST_GAMUT_REMAP_MODE, MPCC_MCM_FIRST_GAMUT_REMAP_MODE, mask_sh), \
SF(MPCC_MCM0_MPCC_MCM_FIRST_GAMUT_REMAP_MODE, MPCC_MCM_FIRST_GAMUT_REMAP_MODE_CURRENT, mask_sh), \
SF(MPCC_MCM0_MPC_MCM_FIRST_GAMUT_REMAP_C11_C12_A, MPCC_MCM_FIRST_GAMUT_REMAP_C11_A, mask_sh), \
SF(MPCC_MCM0_MPC_MCM_FIRST_GAMUT_REMAP_C11_C12_A, MPCC_MCM_FIRST_GAMUT_REMAP_C12_A, mask_sh), \
SF(MPCC_MCM0_MPC_MCM_FIRST_GAMUT_REMAP_C13_C14_A, MPCC_MCM_FIRST_GAMUT_REMAP_C13_A, mask_sh), \
SF(MPCC_MCM0_MPC_MCM_FIRST_GAMUT_REMAP_C13_C14_A, MPCC_MCM_FIRST_GAMUT_REMAP_C14_A, mask_sh), \
SF(MPCC_MCM0_MPC_MCM_FIRST_GAMUT_REMAP_C21_C22_A, MPCC_MCM_FIRST_GAMUT_REMAP_C21_A, mask_sh), \
SF(MPCC_MCM0_MPC_MCM_FIRST_GAMUT_REMAP_C21_C22_A, MPCC_MCM_FIRST_GAMUT_REMAP_C22_A, mask_sh), \
SF(MPCC_MCM0_MPC_MCM_FIRST_GAMUT_REMAP_C23_C24_A, MPCC_MCM_FIRST_GAMUT_REMAP_C23_A, mask_sh), \
SF(MPCC_MCM0_MPC_MCM_FIRST_GAMUT_REMAP_C23_C24_A, MPCC_MCM_FIRST_GAMUT_REMAP_C24_A, mask_sh), \
SF(MPCC_MCM0_MPC_MCM_FIRST_GAMUT_REMAP_C31_C32_A, MPCC_MCM_FIRST_GAMUT_REMAP_C31_A, mask_sh), \
SF(MPCC_MCM0_MPC_MCM_FIRST_GAMUT_REMAP_C31_C32_A, MPCC_MCM_FIRST_GAMUT_REMAP_C32_A, mask_sh), \
SF(MPCC_MCM0_MPC_MCM_FIRST_GAMUT_REMAP_C33_C34_A, MPCC_MCM_FIRST_GAMUT_REMAP_C33_A, mask_sh), \
SF(MPCC_MCM0_MPC_MCM_FIRST_GAMUT_REMAP_C33_C34_A, MPCC_MCM_FIRST_GAMUT_REMAP_C34_A, mask_sh), \
SF(MPCC_MCM0_MPCC_MCM_SECOND_GAMUT_REMAP_COEF_FORMAT, MPCC_MCM_SECOND_GAMUT_REMAP_COEF_FORMAT, mask_sh), \
SF(MPCC_MCM0_MPCC_MCM_SECOND_GAMUT_REMAP_MODE, MPCC_MCM_SECOND_GAMUT_REMAP_MODE, mask_sh), \
SF(MPCC_MCM0_MPCC_MCM_SECOND_GAMUT_REMAP_MODE, MPCC_MCM_SECOND_GAMUT_REMAP_MODE_CURRENT, mask_sh), \
SF(MPCC_MCM0_MPC_MCM_SECOND_GAMUT_REMAP_C11_C12_A, MPCC_MCM_SECOND_GAMUT_REMAP_C11_A, mask_sh), \
SF(MPCC_MCM0_MPC_MCM_SECOND_GAMUT_REMAP_C11_C12_A, MPCC_MCM_SECOND_GAMUT_REMAP_C12_A, mask_sh), \
SF(MPCC_MCM0_MPC_MCM_SECOND_GAMUT_REMAP_C13_C14_A, MPCC_MCM_SECOND_GAMUT_REMAP_C13_A, mask_sh), \
SF(MPCC_MCM0_MPC_MCM_SECOND_GAMUT_REMAP_C13_C14_A, MPCC_MCM_SECOND_GAMUT_REMAP_C14_A, mask_sh), \
SF(MPCC_MCM0_MPC_MCM_SECOND_GAMUT_REMAP_C21_C22_A, MPCC_MCM_SECOND_GAMUT_REMAP_C21_A, mask_sh), \
SF(MPCC_MCM0_MPC_MCM_SECOND_GAMUT_REMAP_C21_C22_A, MPCC_MCM_SECOND_GAMUT_REMAP_C22_A, mask_sh), \
SF(MPCC_MCM0_MPC_MCM_SECOND_GAMUT_REMAP_C23_C24_A, MPCC_MCM_SECOND_GAMUT_REMAP_C23_A, mask_sh), \
SF(MPCC_MCM0_MPC_MCM_SECOND_GAMUT_REMAP_C23_C24_A, MPCC_MCM_SECOND_GAMUT_REMAP_C24_A, mask_sh), \
SF(MPCC_MCM0_MPC_MCM_SECOND_GAMUT_REMAP_C31_C32_A, MPCC_MCM_SECOND_GAMUT_REMAP_C31_A, mask_sh), \
SF(MPCC_MCM0_MPC_MCM_SECOND_GAMUT_REMAP_C31_C32_A, MPCC_MCM_SECOND_GAMUT_REMAP_C32_A, mask_sh), \
SF(MPCC_MCM0_MPC_MCM_SECOND_GAMUT_REMAP_C33_C34_A, MPCC_MCM_SECOND_GAMUT_REMAP_C33_A, mask_sh), \
SF(MPCC_MCM0_MPC_MCM_SECOND_GAMUT_REMAP_C33_C34_A, MPCC_MCM_SECOND_GAMUT_REMAP_C34_A, mask_sh), \
SF(MPCC_MCM0_MPCC_MCM_3DLUT_FAST_LOAD_SELECT, MPCC_MCM_3DLUT_FL_SEL, mask_sh), \
SF(MPCC_MCM0_MPCC_MCM_3DLUT_FAST_LOAD_STATUS, MPCC_MCM_3DLUT_FL_DONE, mask_sh), \
SF(MPCC_MCM0_MPCC_MCM_3DLUT_FAST_LOAD_STATUS, MPCC_MCM_3DLUT_FL_SOFT_UNDERFLOW, mask_sh), \
SF(MPCC_MCM0_MPCC_MCM_3DLUT_FAST_LOAD_STATUS, MPCC_MCM_3DLUT_FL_HARD_UNDERFLOW, mask_sh)
#define MPC_REG_LIST_DCN4_01_RI(inst) \
MPC_REG_LIST_DCN3_2_RI(inst),\
SRII(MPCC_MCM_FIRST_GAMUT_REMAP_COEF_FORMAT, MPCC_MCM, inst),\
SRII(MPCC_MCM_FIRST_GAMUT_REMAP_MODE, MPCC_MCM, inst),\
SRII(MPC_MCM_FIRST_GAMUT_REMAP_C11_C12_A, MPCC_MCM, inst),\
SRII(MPC_MCM_FIRST_GAMUT_REMAP_C13_C14_A, MPCC_MCM, inst),\
SRII(MPC_MCM_FIRST_GAMUT_REMAP_C21_C22_A, MPCC_MCM, inst),\
SRII(MPC_MCM_FIRST_GAMUT_REMAP_C23_C24_A, MPCC_MCM, inst),\
SRII(MPC_MCM_FIRST_GAMUT_REMAP_C31_C32_A, MPCC_MCM, inst),\
SRII(MPC_MCM_FIRST_GAMUT_REMAP_C33_C34_A, MPCC_MCM, inst),\
SRII(MPC_MCM_FIRST_GAMUT_REMAP_C11_C12_B, MPCC_MCM, inst),\
SRII(MPC_MCM_FIRST_GAMUT_REMAP_C13_C14_B, MPCC_MCM, inst),\
SRII(MPC_MCM_FIRST_GAMUT_REMAP_C21_C22_B, MPCC_MCM, inst),\
SRII(MPC_MCM_FIRST_GAMUT_REMAP_C23_C24_B, MPCC_MCM, inst),\
SRII(MPC_MCM_FIRST_GAMUT_REMAP_C31_C32_B, MPCC_MCM, inst),\
SRII(MPC_MCM_FIRST_GAMUT_REMAP_C33_C34_B, MPCC_MCM, inst),\
SRII(MPCC_MCM_SECOND_GAMUT_REMAP_COEF_FORMAT, MPCC_MCM, inst), \
SRII(MPCC_MCM_SECOND_GAMUT_REMAP_MODE, MPCC_MCM, inst), \
SRII(MPC_MCM_SECOND_GAMUT_REMAP_C11_C12_A, MPCC_MCM, inst), \
SRII(MPC_MCM_SECOND_GAMUT_REMAP_C13_C14_A, MPCC_MCM, inst), \
SRII(MPC_MCM_SECOND_GAMUT_REMAP_C21_C22_A, MPCC_MCM, inst), \
SRII(MPC_MCM_SECOND_GAMUT_REMAP_C23_C24_A, MPCC_MCM, inst), \
SRII(MPC_MCM_SECOND_GAMUT_REMAP_C31_C32_A, MPCC_MCM, inst), \
SRII(MPC_MCM_SECOND_GAMUT_REMAP_C33_C34_A, MPCC_MCM, inst), \
SRII(MPC_MCM_SECOND_GAMUT_REMAP_C11_C12_B, MPCC_MCM, inst), \
SRII(MPC_MCM_SECOND_GAMUT_REMAP_C13_C14_B, MPCC_MCM, inst), \
SRII(MPC_MCM_SECOND_GAMUT_REMAP_C21_C22_B, MPCC_MCM, inst), \
SRII(MPC_MCM_SECOND_GAMUT_REMAP_C23_C24_B, MPCC_MCM, inst), \
SRII(MPC_MCM_SECOND_GAMUT_REMAP_C31_C32_B, MPCC_MCM, inst), \
SRII(MPC_MCM_SECOND_GAMUT_REMAP_C33_C34_B, MPCC_MCM, inst), \
SRII(MPCC_MCM_3DLUT_FAST_LOAD_STATUS, MPCC_MCM, inst),\
SRII(MPCC_MCM_3DLUT_FAST_LOAD_SELECT, MPCC_MCM, inst)
#define MPC_REG_FIELD_LIST_DCN4_01(type)\
MPC_REG_FIELD_LIST_DCN3_0(type);\
MPC_REG_FIELD_LIST_DCN32(type);\
type MPCC_MCM_FIRST_GAMUT_REMAP_COEF_FORMAT;\
type MPCC_MCM_FIRST_GAMUT_REMAP_MODE;\
type MPCC_MCM_FIRST_GAMUT_REMAP_MODE_CURRENT;\
type MPCC_MCM_FIRST_GAMUT_REMAP_C11_A;\
type MPCC_MCM_FIRST_GAMUT_REMAP_C12_A;\
type MPCC_MCM_FIRST_GAMUT_REMAP_C13_A;\
type MPCC_MCM_FIRST_GAMUT_REMAP_C14_A;\
type MPCC_MCM_FIRST_GAMUT_REMAP_C21_A;\
type MPCC_MCM_FIRST_GAMUT_REMAP_C22_A;\
type MPCC_MCM_FIRST_GAMUT_REMAP_C23_A;\
type MPCC_MCM_FIRST_GAMUT_REMAP_C24_A;\
type MPCC_MCM_FIRST_GAMUT_REMAP_C31_A; \
type MPCC_MCM_FIRST_GAMUT_REMAP_C32_A; \
type MPCC_MCM_FIRST_GAMUT_REMAP_C33_A; \
type MPCC_MCM_FIRST_GAMUT_REMAP_C34_A; \
type MPCC_MCM_SECOND_GAMUT_REMAP_COEF_FORMAT;\
type MPCC_MCM_SECOND_GAMUT_REMAP_MODE;\
type MPCC_MCM_SECOND_GAMUT_REMAP_MODE_CURRENT;\
type MPCC_MCM_SECOND_GAMUT_REMAP_C11_A;\
type MPCC_MCM_SECOND_GAMUT_REMAP_C12_A;\
type MPCC_MCM_SECOND_GAMUT_REMAP_C13_A;\
type MPCC_MCM_SECOND_GAMUT_REMAP_C14_A;\
type MPCC_MCM_SECOND_GAMUT_REMAP_C21_A;\
type MPCC_MCM_SECOND_GAMUT_REMAP_C22_A;\
type MPCC_MCM_SECOND_GAMUT_REMAP_C23_A;\
type MPCC_MCM_SECOND_GAMUT_REMAP_C24_A;\
type MPCC_MCM_SECOND_GAMUT_REMAP_C31_A; \
type MPCC_MCM_SECOND_GAMUT_REMAP_C32_A; \
type MPCC_MCM_SECOND_GAMUT_REMAP_C33_A; \
type MPCC_MCM_SECOND_GAMUT_REMAP_C34_A; \
type MPCC_MCM_3DLUT_FL_SEL;\
type MPCC_MCM_3DLUT_FL_DONE;\
type MPCC_MCM_3DLUT_FL_SOFT_UNDERFLOW;\
type MPCC_MCM_3DLUT_FL_HARD_UNDERFLOW
struct dcn401_mpc_shift {
MPC_REG_FIELD_LIST_DCN4_01(uint8_t);
};
struct dcn401_mpc_mask {
MPC_REG_FIELD_LIST_DCN4_01(uint32_t);
};
struct dcn401_mpc_registers {
MPC_REG_VARIABLE_LIST_DCN4_01;
};
struct dcn401_mpc {
struct mpc base;
int mpcc_in_use_mask;
int num_mpcc;
const struct dcn401_mpc_registers *mpc_regs;
const struct dcn401_mpc_shift *mpc_shift;
const struct dcn401_mpc_mask *mpc_mask;
int num_rmu;
};
void dcn401_mpc_construct(struct dcn401_mpc *mpc401,
struct dc_context *ctx,
const struct dcn401_mpc_registers *mpc_regs,
const struct dcn401_mpc_shift *mpc_shift,
const struct dcn401_mpc_mask *mpc_mask,
int num_mpcc,
int num_rmu);
void mpc401_set_movable_cm_location(struct mpc *mpc, enum mpcc_movable_cm_location location, int mpcc_id);
void mpc401_populate_lut(struct mpc *mpc, const enum MCM_LUT_ID id, const union mcm_lut_params params,
bool lut_bank_a, int mpcc_id);
void mpc401_program_lut_mode(
struct mpc *mpc,
const enum MCM_LUT_ID id,
const enum MCM_LUT_XABLE xable,
bool lut_bank_a,
int mpcc_id);
void mpc401_program_lut_read_write_control(
struct mpc *mpc,
const enum MCM_LUT_ID id,
bool lut_bank_a,
int mpcc_id);
void mpc401_set_gamut_remap(
struct mpc *mpc,
int mpcc_id,
const struct mpc_grph_gamut_adjustment *adjust);
void mpc401_get_gamut_remap(
struct mpc *mpc,
int mpcc_id,
struct mpc_grph_gamut_adjustment *adjust);
#endif

View file

@ -0,0 +1,239 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#include "dcn401_fpu.h"
#include "dcn401/dcn401_resource.h"
// We need this includes for WATERMARKS_* defines
#include "clk_mgr/dcn401/dcn401_smu14_driver_if.h"
#include "link.h"
#define DC_LOGGER_INIT(logger)
void dcn401_build_wm_range_table_fpu(struct clk_mgr *clk_mgr)
{
/* defaults */
double pstate_latency_us = clk_mgr->ctx->dc->dml.soc.dram_clock_change_latency_us;
double fclk_change_latency_us = clk_mgr->ctx->dc->dml.soc.fclk_change_latency_us;
double sr_exit_time_us = clk_mgr->ctx->dc->dml.soc.sr_exit_time_us;
double sr_enter_plus_exit_time_us = clk_mgr->ctx->dc->dml.soc.sr_enter_plus_exit_time_us;
/* For min clocks use as reported by PM FW and report those as min */
uint16_t min_uclk_mhz = clk_mgr->bw_params->clk_table.entries[0].memclk_mhz;
uint16_t min_dcfclk_mhz = clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz;
uint16_t setb_min_uclk_mhz = min_uclk_mhz;
uint16_t dcfclk_mhz_for_the_second_state = clk_mgr->ctx->dc->dml.soc.clock_limits[2].dcfclk_mhz;
dc_assert_fp_enabled();
/* For Set B ranges use min clocks state 2 when available, and report those to PM FW */
if (dcfclk_mhz_for_the_second_state)
clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_dcfclk = dcfclk_mhz_for_the_second_state;
else
clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_dcfclk = clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz;
if (clk_mgr->bw_params->clk_table.entries[2].memclk_mhz)
setb_min_uclk_mhz = clk_mgr->bw_params->clk_table.entries[2].memclk_mhz;
/* Set A - Normal - default values */
clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid = true;
clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us = pstate_latency_us;
clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us = fclk_change_latency_us;
clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us = sr_exit_time_us;
clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us;
clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE;
clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_dcfclk = 0xFFFF;
clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_uclk = min_uclk_mhz;
clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_uclk = 0xFFFF;
/* Set B - Performance - higher clocks, using DPM[2] DCFCLK and UCLK */
clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid = true;
clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us = pstate_latency_us;
clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.fclk_change_latency_us = fclk_change_latency_us;
clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us = sr_exit_time_us;
clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us;
clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE;
clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.max_dcfclk = 0xFFFF;
clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_uclk = setb_min_uclk_mhz;
clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.max_uclk = 0xFFFF;
/* Set C - Dummy P-State - P-State latency set to "dummy p-state" value */
/* 'DalDummyClockChangeLatencyNs' registry key option set to 0x7FFFFFFF can be used to disable Set C for dummy p-state */
if (clk_mgr->ctx->dc->bb_overrides.dummy_clock_change_latency_ns != 0x7FFFFFFF) {
clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid = true;
clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.pstate_latency_us = 50;
clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.fclk_change_latency_us = fclk_change_latency_us;
clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us = sr_exit_time_us;
clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us;
clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.wm_type = WATERMARKS_DUMMY_PSTATE;
clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_dcfclk = 0xFFFF;
clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.min_uclk = min_uclk_mhz;
clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_uclk = 0xFFFF;
clk_mgr->bw_params->dummy_pstate_table[0].dram_speed_mts = clk_mgr->bw_params->clk_table.entries[0].memclk_mhz * 16;
clk_mgr->bw_params->dummy_pstate_table[0].dummy_pstate_latency_us = 50;
clk_mgr->bw_params->dummy_pstate_table[1].dram_speed_mts = clk_mgr->bw_params->clk_table.entries[1].memclk_mhz * 16;
clk_mgr->bw_params->dummy_pstate_table[1].dummy_pstate_latency_us = 9;
clk_mgr->bw_params->dummy_pstate_table[2].dram_speed_mts = clk_mgr->bw_params->clk_table.entries[2].memclk_mhz * 16;
clk_mgr->bw_params->dummy_pstate_table[2].dummy_pstate_latency_us = 8;
clk_mgr->bw_params->dummy_pstate_table[3].dram_speed_mts = clk_mgr->bw_params->clk_table.entries[3].memclk_mhz * 16;
clk_mgr->bw_params->dummy_pstate_table[3].dummy_pstate_latency_us = 5;
}
/* Set D - MALL - SR enter and exit time specific to MALL, TBD after bringup or later phase for now use DRAM values / 2 */
/* For MALL DRAM clock change latency is N/A, for watermak calculations use lowest value dummy P state latency */
clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid = true;
clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us = clk_mgr->bw_params->dummy_pstate_table[3].dummy_pstate_latency_us;
clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.fclk_change_latency_us = fclk_change_latency_us;
clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us = sr_exit_time_us / 2; // TBD
clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us / 2; // TBD
clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.wm_type = WATERMARKS_MALL;
clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_dcfclk = 0xFFFF;
clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_uclk = min_uclk_mhz;
clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_uclk = 0xFFFF;
}
/*
* dcn401_update_bw_bounding_box
*
* This would override some dcn4_01 ip_or_soc initial parameters hardcoded from
* spreadsheet with actual values as per dGPU SKU:
* - with passed few options from dc->config
* - with dentist_vco_frequency from Clk Mgr (currently hardcoded, but might
* need to get it from PM FW)
* - with passed latency values (passed in ns units) in dc-> bb override for
* debugging purposes
* - with passed latencies from VBIOS (in 100_ns units) if available for
* certain dGPU SKU
* - with number of DRAM channels from VBIOS (which differ for certain dGPU SKU
* of the same ASIC)
* - clocks levels with passed clk_table entries from Clk Mgr as reported by PM
* FW for different clocks (which might differ for certain dGPU SKU of the
* same ASIC)
*/
void dcn401_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params)
{
dc_assert_fp_enabled();
/* Override from passed dc->bb_overrides if available*/
if (dc->bb_overrides.sr_exit_time_ns)
dc->dml2_options.bbox_overrides.sr_exit_latency_us =
dc->bb_overrides.sr_exit_time_ns / 1000.0;
if (dc->bb_overrides.sr_enter_plus_exit_time_ns)
dc->dml2_options.bbox_overrides.sr_enter_plus_exit_latency_us =
dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
if (dc->bb_overrides.urgent_latency_ns)
dc->dml2_options.bbox_overrides.urgent_latency_us =
dc->bb_overrides.urgent_latency_ns / 1000.0;
if (dc->bb_overrides.dram_clock_change_latency_ns)
dc->dml2_options.bbox_overrides.dram_clock_change_latency_us =
dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
if (dc->bb_overrides.fclk_clock_change_latency_ns)
dc->dml2_options.bbox_overrides.fclk_change_latency_us =
dc->bb_overrides.fclk_clock_change_latency_ns / 1000;
/* Override from VBIOS if VBIOS bb_info available */
if (dc->ctx->dc_bios->funcs->get_soc_bb_info) {
struct bp_soc_bb_info bb_info = {0};
if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) {
if (bb_info.dram_clock_change_latency_100ns > 0)
dc->dml2_options.bbox_overrides.dram_clock_change_latency_us =
bb_info.dram_clock_change_latency_100ns * 10;
if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
dc->dml2_options.bbox_overrides.sr_enter_plus_exit_latency_us =
bb_info.dram_sr_enter_exit_latency_100ns * 10;
if (bb_info.dram_sr_exit_latency_100ns > 0)
dc->dml2_options.bbox_overrides.sr_exit_latency_us =
bb_info.dram_sr_exit_latency_100ns * 10;
}
}
/* Override from VBIOS for num_chan */
if (dc->ctx->dc_bios->vram_info.num_chans) {
dc->dml2_options.bbox_overrides.dram_num_chan =
dc->ctx->dc_bios->vram_info.num_chans;
}
if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
dc->dml2_options.bbox_overrides.dram_chanel_width_bytes =
dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
dc->dml2_options.bbox_overrides.disp_pll_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
dc->dml2_options.bbox_overrides.xtalclk_mhz = dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency / 1000.0;
dc->dml2_options.bbox_overrides.dchub_refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0;
dc->dml2_options.bbox_overrides.dprefclk_mhz = dc->clk_mgr->dprefclk_khz / 1000.0;
if (dc->clk_mgr->bw_params->clk_table.num_entries > 1) {
unsigned int i = 0;
dc->dml2_options.bbox_overrides.clks_table.num_states = dc->clk_mgr->bw_params->clk_table.num_entries;
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels =
dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels;
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_fclk_levels =
dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_fclk_levels;
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_memclk_levels =
dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels;
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_socclk_levels =
dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_socclk_levels;
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dtbclk_levels =
dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels;
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dispclk_levels =
dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels;
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dppclk_levels =
dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dppclk_levels;
for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels; i++) {
if (dc->clk_mgr->bw_params->clk_table.entries[i].dcfclk_mhz)
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dcfclk_mhz =
dc->clk_mgr->bw_params->clk_table.entries[i].dcfclk_mhz;
}
for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_fclk_levels; i++) {
if (dc->clk_mgr->bw_params->clk_table.entries[i].fclk_mhz)
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].fclk_mhz =
dc->clk_mgr->bw_params->clk_table.entries[i].fclk_mhz;
}
for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels; i++) {
if (dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz)
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].memclk_mhz =
dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz;
}
for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_socclk_levels; i++) {
if (dc->clk_mgr->bw_params->clk_table.entries[i].socclk_mhz)
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].socclk_mhz =
dc->clk_mgr->bw_params->clk_table.entries[i].socclk_mhz;
}
for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels; i++) {
if (dc->clk_mgr->bw_params->clk_table.entries[i].dtbclk_mhz)
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz =
dc->clk_mgr->bw_params->clk_table.entries[i].dtbclk_mhz;
}
for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels; i++) {
if (dc->clk_mgr->bw_params->clk_table.entries[i].dispclk_mhz) {
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dispclk_mhz =
dc->clk_mgr->bw_params->clk_table.entries[i].dispclk_mhz;
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dppclk_mhz =
dc->clk_mgr->bw_params->clk_table.entries[i].dispclk_mhz;
}
}
}
}

View file

@ -0,0 +1,14 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#ifndef __DCN401_FPU_H__
#define __DCN401_FPU_H__
#include "clk_mgr.h"
void dcn401_build_wm_range_table_fpu(struct clk_mgr *clk_mgr);
void dcn401_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params);
#endif

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,29 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#ifndef _DML21_TRANSLATION_HELPER_H_
#define _DML21_TRANSLATION_HELPER_H_
struct dc;
struct dc_state;
struct dcn_watermarks;
union dcn_watermark_set;
struct pipe_ctx;
struct dml2_context;
struct dml2_configuration_options;
struct dml2_initialize_instance_in_out;
void dml21_apply_soc_bb_overrides(struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc);
void dml21_initialize_soc_bb_params(struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc);
void dml21_initialize_ip_params(struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc);
bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx);
void dml21_copy_clocks_to_dc_state(struct dml2_context *in_ctx, struct dc_state *context);
void dml21_populate_pipe_ctx_dlg_params(struct dml2_context *dml_ctx, struct dc_state *context, struct pipe_ctx *pipe_ctx, struct dml2_per_stream_programming *stream_programming);
void dml21_extract_legacy_watermark_set(const struct dc *in_dc, struct dcn_watermarks *watermark, enum dml2_dchub_watermark_reg_set_index reg_set_idx, struct dml2_context *in_ctx);
void dml21_extract_watermark_sets(const struct dc *in_dc, union dcn_watermark_set *watermarks, struct dml2_context *in_ctx);
void dml21_map_hw_resources(struct dml2_context *dml_ctx);
void dml21_get_pipe_mcache_config(struct dc_state *context, struct pipe_ctx *pipe_ctx, struct dml2_per_plane_programming *pln_prog, struct dml2_pipe_configuration_descriptor *mcache_pipe_config);
#endif

View file

@ -0,0 +1,531 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#include "dml2_internal_shared_types.h"
#include "dml21_translation_helper.h"
#include "dml2_internal_types.h"
#include "dml21_utils.h"
#include "dml2_dc_resource_mgmt.h"
#include "dml2_core_dcn4_calcs.h"
int dml21_helper_find_dml_pipe_idx_by_stream_id(struct dml2_context *ctx, unsigned int stream_id)
{
int i;
for (i = 0; i < __DML2_WRAPPER_MAX_STREAMS_PLANES__; i++) {
if (ctx->v21.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id_valid[i] && ctx->v21.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id[i] == stream_id)
return i;
}
return -1;
}
int dml21_find_dml_pipe_idx_by_plane_id(struct dml2_context *ctx, unsigned int plane_id)
{
int i;
for (i = 0; i < __DML2_WRAPPER_MAX_STREAMS_PLANES__; i++) {
if (ctx->v21.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_id_valid[i] && ctx->v21.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_id[i] == plane_id)
return i;
}
return -1;
}
bool dml21_get_plane_id(const struct dc_state *state, const struct dc_plane_state *plane, unsigned int *plane_id)
{
int i, j;
if (!plane_id)
return false;
for (i = 0; i < state->stream_count; i++) {
for (j = 0; j < state->stream_status[i].plane_count; j++) {
if (state->stream_status[i].plane_states[j] == plane) {
*plane_id = (i << 16) | j;
return true;
}
}
}
return false;
}
unsigned int dml21_get_dc_plane_idx_from_plane_id(unsigned int plane_id)
{
return 0xffff & plane_id;
}
void find_valid_pipe_idx_for_stream_index(const struct dml2_context *dml_ctx, unsigned int *dml_pipe_idx, unsigned int stream_index)
{
unsigned int i = 0;
for (i = 0; i < __DML2_WRAPPER_MAX_STREAMS_PLANES__; i++) {
if (dml_ctx->v21.mode_programming.programming->plane_programming[i].plane_descriptor->stream_index == stream_index) {
*dml_pipe_idx = i;
return;
}
}
}
void find_pipe_regs_idx(const struct dml2_context *dml_ctx,
struct pipe_ctx *pipe, unsigned int *pipe_regs_idx)
{
struct pipe_ctx *opp_head = dml_ctx->config.callbacks.get_opp_head(pipe);
*pipe_regs_idx = dml_ctx->config.callbacks.get_odm_slice_index(opp_head);
if (pipe->plane_state)
*pipe_regs_idx += dml_ctx->config.callbacks.get_mpc_slice_index(pipe);
}
/* places pipe references into pipes arrays and returns number of pipes */
int dml21_find_dc_pipes_for_plane(const struct dc *in_dc,
struct dc_state *context,
struct dml2_context *dml_ctx,
struct pipe_ctx **dc_main_pipes,
struct pipe_ctx **dc_phantom_pipes,
int dml_plane_idx)
{
unsigned int dml_stream_index;
unsigned int main_stream_id;
unsigned int dc_plane_index;
struct dc_stream_state *dc_main_stream;
struct dc_stream_status *dc_main_stream_status;
struct dc_plane_state *dc_main_plane;
struct dc_stream_state *dc_phantom_stream;
struct dc_stream_status *dc_phantom_stream_status;
struct dc_plane_state *dc_phantom_plane;
int num_pipes = 0;
dml_stream_index = dml_ctx->v21.mode_programming.programming->plane_programming[dml_plane_idx].plane_descriptor->stream_index;
main_stream_id = dml_ctx->v21.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id[dml_stream_index];
dc_main_stream = dml_ctx->config.callbacks.get_stream_from_id(context, main_stream_id);
dc_main_stream_status = dml_ctx->config.callbacks.get_stream_status(context, dc_main_stream);
/* find main plane based on id */
dc_plane_index = dml21_get_dc_plane_idx_from_plane_id(dml_ctx->v21.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_id[dml_plane_idx]);
dc_main_plane = dc_main_stream_status->plane_states[dc_plane_index];
if (dc_main_plane) {
num_pipes = dml_ctx->config.callbacks.get_dpp_pipes_for_plane(dc_main_plane, &context->res_ctx, dc_main_pipes);
} else {
/* stream was configured with dummy plane, so get pipes from opp head */
struct pipe_ctx *otg_master_pipe = dml_ctx->config.callbacks.get_otg_master_for_stream(&context->res_ctx, dc_main_stream);
num_pipes = dml_ctx->config.callbacks.get_opp_heads_for_otg_master(otg_master_pipe, &context->res_ctx, dc_main_pipes);
}
/* if phantom exists, find associated pipes */
dc_phantom_stream = dml_ctx->config.svp_pstate.callbacks.get_paired_subvp_stream(context, dc_main_stream);
if (dc_phantom_stream && num_pipes > 0) {
dc_phantom_stream_status = dml_ctx->config.callbacks.get_stream_status(context, dc_phantom_stream);
/* phantom plane will have same index as main */
dc_phantom_plane = dc_phantom_stream_status->plane_states[dc_plane_index];
dml_ctx->config.callbacks.get_dpp_pipes_for_plane(dc_phantom_plane, &context->res_ctx, dc_phantom_pipes);
}
return num_pipes;
}
void dml21_update_pipe_ctx_dchub_regs(struct dml2_display_rq_regs *rq_regs,
struct dml2_display_dlg_regs *disp_dlg_regs,
struct dml2_display_ttu_regs *disp_ttu_regs,
struct pipe_ctx *out)
{
memset(&out->rq_regs, 0, sizeof(out->rq_regs));
out->rq_regs.rq_regs_l.chunk_size = rq_regs->rq_regs_l.chunk_size;
out->rq_regs.rq_regs_l.min_chunk_size = rq_regs->rq_regs_l.min_chunk_size;
//out->rq_regs.rq_regs_l.meta_chunk_size = rq_regs->rq_regs_l.meta_chunk_size;
//out->rq_regs.rq_regs_l.min_meta_chunk_size = rq_regs->rq_regs_l.min_meta_chunk_size;
out->rq_regs.rq_regs_l.dpte_group_size = rq_regs->rq_regs_l.dpte_group_size;
out->rq_regs.rq_regs_l.mpte_group_size = rq_regs->rq_regs_l.mpte_group_size;
out->rq_regs.rq_regs_l.swath_height = rq_regs->rq_regs_l.swath_height;
out->rq_regs.rq_regs_l.pte_row_height_linear = rq_regs->rq_regs_l.pte_row_height_linear;
out->rq_regs.rq_regs_c.chunk_size = rq_regs->rq_regs_c.chunk_size;
out->rq_regs.rq_regs_c.min_chunk_size = rq_regs->rq_regs_c.min_chunk_size;
//out->rq_regs.rq_regs_c.meta_chunk_size = rq_regs->rq_regs_c.meta_chunk_size;
//out->rq_regs.rq_regs_c.min_meta_chunk_size = rq_regs->rq_regs_c.min_meta_chunk_size;
out->rq_regs.rq_regs_c.dpte_group_size = rq_regs->rq_regs_c.dpte_group_size;
out->rq_regs.rq_regs_c.mpte_group_size = rq_regs->rq_regs_c.mpte_group_size;
out->rq_regs.rq_regs_c.swath_height = rq_regs->rq_regs_c.swath_height;
out->rq_regs.rq_regs_c.pte_row_height_linear = rq_regs->rq_regs_c.pte_row_height_linear;
out->rq_regs.drq_expansion_mode = rq_regs->drq_expansion_mode;
out->rq_regs.prq_expansion_mode = rq_regs->prq_expansion_mode;
//out->rq_regs.mrq_expansion_mode = rq_regs->mrq_expansion_mode;
out->rq_regs.crq_expansion_mode = rq_regs->crq_expansion_mode;
out->rq_regs.plane1_base_address = rq_regs->plane1_base_address;
out->unbounded_req = rq_regs->unbounded_request_enabled;
memset(&out->dlg_regs, 0, sizeof(out->dlg_regs));
out->dlg_regs.refcyc_h_blank_end = disp_dlg_regs->refcyc_h_blank_end;
out->dlg_regs.dlg_vblank_end = disp_dlg_regs->dlg_vblank_end;
out->dlg_regs.min_dst_y_next_start = disp_dlg_regs->min_dst_y_next_start;
out->dlg_regs.refcyc_per_htotal = disp_dlg_regs->refcyc_per_htotal;
out->dlg_regs.refcyc_x_after_scaler = disp_dlg_regs->refcyc_x_after_scaler;
out->dlg_regs.dst_y_after_scaler = disp_dlg_regs->dst_y_after_scaler;
out->dlg_regs.dst_y_prefetch = disp_dlg_regs->dst_y_prefetch;
out->dlg_regs.dst_y_per_vm_vblank = disp_dlg_regs->dst_y_per_vm_vblank;
out->dlg_regs.dst_y_per_row_vblank = disp_dlg_regs->dst_y_per_row_vblank;
out->dlg_regs.dst_y_per_vm_flip = disp_dlg_regs->dst_y_per_vm_flip;
out->dlg_regs.dst_y_per_row_flip = disp_dlg_regs->dst_y_per_row_flip;
out->dlg_regs.ref_freq_to_pix_freq = disp_dlg_regs->ref_freq_to_pix_freq;
out->dlg_regs.vratio_prefetch = disp_dlg_regs->vratio_prefetch;
out->dlg_regs.vratio_prefetch_c = disp_dlg_regs->vratio_prefetch_c;
out->dlg_regs.refcyc_per_tdlut_group = disp_dlg_regs->refcyc_per_tdlut_group;
out->dlg_regs.refcyc_per_pte_group_vblank_l = disp_dlg_regs->refcyc_per_pte_group_vblank_l;
out->dlg_regs.refcyc_per_pte_group_vblank_c = disp_dlg_regs->refcyc_per_pte_group_vblank_c;
//out->dlg_regs.refcyc_per_meta_chunk_vblank_l = disp_dlg_regs->refcyc_per_meta_chunk_vblank_l;
//out->dlg_regs.refcyc_per_meta_chunk_vblank_c = disp_dlg_regs->refcyc_per_meta_chunk_vblank_c;
out->dlg_regs.refcyc_per_pte_group_flip_l = disp_dlg_regs->refcyc_per_pte_group_flip_l;
out->dlg_regs.refcyc_per_pte_group_flip_c = disp_dlg_regs->refcyc_per_pte_group_flip_c;
//out->dlg_regs.refcyc_per_meta_chunk_flip_l = disp_dlg_regs->refcyc_per_meta_chunk_flip_l;
//out->dlg_regs.refcyc_per_meta_chunk_flip_c = disp_dlg_regs->refcyc_per_meta_chunk_flip_c;
out->dlg_regs.dst_y_per_pte_row_nom_l = disp_dlg_regs->dst_y_per_pte_row_nom_l;
out->dlg_regs.dst_y_per_pte_row_nom_c = disp_dlg_regs->dst_y_per_pte_row_nom_c;
out->dlg_regs.refcyc_per_pte_group_nom_l = disp_dlg_regs->refcyc_per_pte_group_nom_l;
out->dlg_regs.refcyc_per_pte_group_nom_c = disp_dlg_regs->refcyc_per_pte_group_nom_c;
//out->dlg_regs.dst_y_per_meta_row_nom_l = disp_dlg_regs->dst_y_per_meta_row_nom_l;
//out->dlg_regs.dst_y_per_meta_row_nom_c = disp_dlg_regs->dst_y_per_meta_row_nom_c;
//out->dlg_regs.refcyc_per_meta_chunk_nom_l = disp_dlg_regs->refcyc_per_meta_chunk_nom_l;
//out->dlg_regs.refcyc_per_meta_chunk_nom_c = disp_dlg_regs->refcyc_per_meta_chunk_nom_c;
out->dlg_regs.refcyc_per_line_delivery_pre_l = disp_dlg_regs->refcyc_per_line_delivery_pre_l;
out->dlg_regs.refcyc_per_line_delivery_pre_c = disp_dlg_regs->refcyc_per_line_delivery_pre_c;
out->dlg_regs.refcyc_per_line_delivery_l = disp_dlg_regs->refcyc_per_line_delivery_l;
out->dlg_regs.refcyc_per_line_delivery_c = disp_dlg_regs->refcyc_per_line_delivery_c;
out->dlg_regs.refcyc_per_vm_group_vblank = disp_dlg_regs->refcyc_per_vm_group_vblank;
out->dlg_regs.refcyc_per_vm_group_flip = disp_dlg_regs->refcyc_per_vm_group_flip;
out->dlg_regs.refcyc_per_vm_req_vblank = disp_dlg_regs->refcyc_per_vm_req_vblank;
out->dlg_regs.refcyc_per_vm_req_flip = disp_dlg_regs->refcyc_per_vm_req_flip;
out->dlg_regs.dst_y_offset_cur0 = disp_dlg_regs->dst_y_offset_cur0;
out->dlg_regs.chunk_hdl_adjust_cur0 = disp_dlg_regs->chunk_hdl_adjust_cur0;
//out->dlg_regs.dst_y_offset_cur1 = disp_dlg_regs->dst_y_offset_cur1;
//out->dlg_regs.chunk_hdl_adjust_cur1 = disp_dlg_regs->chunk_hdl_adjust_cur1;
out->dlg_regs.vready_after_vcount0 = disp_dlg_regs->vready_after_vcount0;
out->dlg_regs.dst_y_delta_drq_limit = disp_dlg_regs->dst_y_delta_drq_limit;
out->dlg_regs.refcyc_per_vm_dmdata = disp_dlg_regs->refcyc_per_vm_dmdata;
out->dlg_regs.dmdata_dl_delta = disp_dlg_regs->dmdata_dl_delta;
memset(&out->ttu_regs, 0, sizeof(out->ttu_regs));
out->ttu_regs.qos_level_low_wm = disp_ttu_regs->qos_level_low_wm;
out->ttu_regs.qos_level_high_wm = disp_ttu_regs->qos_level_high_wm;
out->ttu_regs.min_ttu_vblank = disp_ttu_regs->min_ttu_vblank;
out->ttu_regs.qos_level_flip = disp_ttu_regs->qos_level_flip;
out->ttu_regs.refcyc_per_req_delivery_l = disp_ttu_regs->refcyc_per_req_delivery_l;
out->ttu_regs.refcyc_per_req_delivery_c = disp_ttu_regs->refcyc_per_req_delivery_c;
out->ttu_regs.refcyc_per_req_delivery_cur0 = disp_ttu_regs->refcyc_per_req_delivery_cur0;
//out->ttu_regs.refcyc_per_req_delivery_cur1 = disp_ttu_regs->refcyc_per_req_delivery_cur1;
out->ttu_regs.refcyc_per_req_delivery_pre_l = disp_ttu_regs->refcyc_per_req_delivery_pre_l;
out->ttu_regs.refcyc_per_req_delivery_pre_c = disp_ttu_regs->refcyc_per_req_delivery_pre_c;
out->ttu_regs.refcyc_per_req_delivery_pre_cur0 = disp_ttu_regs->refcyc_per_req_delivery_pre_cur0;
//out->ttu_regs.refcyc_per_req_delivery_pre_cur1 = disp_ttu_regs->refcyc_per_req_delivery_pre_cur1;
out->ttu_regs.qos_level_fixed_l = disp_ttu_regs->qos_level_fixed_l;
out->ttu_regs.qos_level_fixed_c = disp_ttu_regs->qos_level_fixed_c;
out->ttu_regs.qos_level_fixed_cur0 = disp_ttu_regs->qos_level_fixed_cur0;
//out->ttu_regs.qos_level_fixed_cur1 = disp_ttu_regs->qos_level_fixed_cur1;
out->ttu_regs.qos_ramp_disable_l = disp_ttu_regs->qos_ramp_disable_l;
out->ttu_regs.qos_ramp_disable_c = disp_ttu_regs->qos_ramp_disable_c;
out->ttu_regs.qos_ramp_disable_cur0 = disp_ttu_regs->qos_ramp_disable_cur0;
//out->ttu_regs.qos_ramp_disable_cur1 = disp_ttu_regs->qos_ramp_disable_cur1;
}
void dml21_populate_mall_allocation_size(struct dc_state *context,
struct dml2_context *in_ctx,
struct dml2_per_plane_programming *pln_prog,
struct pipe_ctx *dc_pipe)
{
/* Reuse MALL Allocation Sizes logic from dcn32_fpu.c */
/* Count from active, top pipes per plane only. Only add mall_ss_size_bytes for each unique plane. */
if (dc_pipe->stream && dc_pipe->plane_state &&
(dc_pipe->top_pipe == NULL ||
dc_pipe->plane_state != dc_pipe->top_pipe->plane_state) &&
dc_pipe->prev_odm_pipe == NULL) {
/* SS: all active surfaces stored in MALL */
if (in_ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, dc_pipe) != SUBVP_PHANTOM) {
dc_pipe->surface_size_in_mall_bytes = pln_prog->surface_size_mall_bytes;
context->bw_ctx.bw.dcn.mall_ss_size_bytes += dc_pipe->surface_size_in_mall_bytes;
} else {
/* SUBVP: phantom surfaces only stored in MALL */
dc_pipe->surface_size_in_mall_bytes = pln_prog->svp_size_mall_bytes;
context->bw_ctx.bw.dcn.mall_subvp_size_bytes += dc_pipe->surface_size_in_mall_bytes;
}
}
}
bool check_dp2p0_output_encoder(const struct pipe_ctx *pipe_ctx)
{
/* If this assert is hit then we have a link encoder dynamic management issue */
ASSERT(pipe_ctx->stream_res.hpo_dp_stream_enc ? pipe_ctx->link_res.hpo_dp_link_enc != NULL : true);
return (pipe_ctx->stream_res.hpo_dp_stream_enc &&
pipe_ctx->link_res.hpo_dp_link_enc &&
dc_is_dp_signal(pipe_ctx->stream->signal));
}
void dml21_program_dc_pipe(struct dml2_context *dml_ctx, struct dc_state *context, struct pipe_ctx *pipe_ctx, struct dml2_per_plane_programming *pln_prog,
struct dml2_per_stream_programming *stream_prog)
{
unsigned int pipe_reg_index = 0;
dml21_populate_pipe_ctx_dlg_params(dml_ctx, context, pipe_ctx, stream_prog);
find_pipe_regs_idx(dml_ctx, pipe_ctx, &pipe_reg_index);
if (dml_ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM) {
memcpy(&pipe_ctx->hubp_regs, pln_prog->phantom_plane.pipe_regs[pipe_reg_index], sizeof(struct dml2_dchub_per_pipe_register_set));
pipe_ctx->unbounded_req = false;
/* legacy only, should be removed later */
dml21_update_pipe_ctx_dchub_regs(&pln_prog->phantom_plane.pipe_regs[pipe_reg_index]->rq_regs,
&pln_prog->phantom_plane.pipe_regs[pipe_reg_index]->dlg_regs,
&pln_prog->phantom_plane.pipe_regs[pipe_reg_index]->ttu_regs, pipe_ctx);
pipe_ctx->det_buffer_size_kb = 0;
} else {
memcpy(&pipe_ctx->hubp_regs, pln_prog->pipe_regs[pipe_reg_index], sizeof(struct dml2_dchub_per_pipe_register_set));
pipe_ctx->unbounded_req = pln_prog->pipe_regs[pipe_reg_index]->rq_regs.unbounded_request_enabled;
/* legacy only, should be removed later */
dml21_update_pipe_ctx_dchub_regs(&pln_prog->pipe_regs[pipe_reg_index]->rq_regs,
&pln_prog->pipe_regs[pipe_reg_index]->dlg_regs,
&pln_prog->pipe_regs[pipe_reg_index]->ttu_regs, pipe_ctx);
pipe_ctx->det_buffer_size_kb = pln_prog->pipe_regs[pipe_reg_index]->det_size * 64;
}
pipe_ctx->plane_res.bw.dppclk_khz = pln_prog->min_clocks.dcn4.dppclk_khz;
if (context->bw_ctx.bw.dcn.clk.dppclk_khz < pipe_ctx->plane_res.bw.dppclk_khz)
context->bw_ctx.bw.dcn.clk.dppclk_khz = pipe_ctx->plane_res.bw.dppclk_khz;
dml21_populate_mall_allocation_size(context, dml_ctx, pln_prog, pipe_ctx);
memcpy(&context->bw_ctx.bw.dcn.mcache_allocations[pipe_ctx->pipe_idx], &pln_prog->mcache_allocation, sizeof(struct dml2_mcache_surface_allocation));
}
static struct dc_stream_state *dml21_add_phantom_stream(struct dml2_context *dml_ctx,
const struct dc *dc,
struct dc_state *context,
struct dc_stream_state *main_stream,
struct dml2_per_stream_programming *stream_programming)
{
struct dc_stream_state *phantom_stream;
struct dml2_stream_parameters *phantom_stream_descriptor = &stream_programming->phantom_stream.descriptor;
phantom_stream = dml_ctx->config.svp_pstate.callbacks.create_phantom_stream(dc, context, main_stream);
/* copy details of phantom stream from main */
memcpy(&phantom_stream->timing, &main_stream->timing, sizeof(phantom_stream->timing));
memcpy(&phantom_stream->src, &main_stream->src, sizeof(phantom_stream->src));
memcpy(&phantom_stream->dst, &main_stream->dst, sizeof(phantom_stream->dst));
/* modify timing for phantom */
phantom_stream->timing.v_front_porch = phantom_stream_descriptor->timing.v_front_porch;
phantom_stream->timing.v_addressable = phantom_stream_descriptor->timing.v_active;
phantom_stream->timing.v_total = phantom_stream_descriptor->timing.v_total;
phantom_stream->timing.flags.DSC = 0; // phantom always has DSC disabled
phantom_stream->dst.y = 0;
phantom_stream->dst.height = stream_programming->phantom_stream.descriptor.timing.v_active;
phantom_stream->src.y = 0;
phantom_stream->src.height = (double)phantom_stream_descriptor->timing.v_active * (double)main_stream->src.height / (double)main_stream->dst.height;
phantom_stream->use_dynamic_meta = false;
dml_ctx->config.svp_pstate.callbacks.add_phantom_stream(dc, context, phantom_stream, main_stream);
return phantom_stream;
}
static struct dc_plane_state *dml21_add_phantom_plane(struct dml2_context *dml_ctx,
const struct dc *dc,
struct dc_state *context,
struct dc_stream_state *phantom_stream,
struct dc_plane_state *main_plane,
struct dml2_per_plane_programming *plane_programming)
{
struct dc_plane_state *phantom_plane;
phantom_plane = dml_ctx->config.svp_pstate.callbacks.create_phantom_plane(dc, context, main_plane);
phantom_plane->format = main_plane->format;
phantom_plane->rotation = main_plane->rotation;
phantom_plane->visible = main_plane->visible;
memcpy(&phantom_plane->address, &main_plane->address, sizeof(phantom_plane->address));
memcpy(&phantom_plane->scaling_quality, &main_plane->scaling_quality,
sizeof(phantom_plane->scaling_quality));
memcpy(&phantom_plane->src_rect, &main_plane->src_rect, sizeof(phantom_plane->src_rect));
memcpy(&phantom_plane->dst_rect, &main_plane->dst_rect, sizeof(phantom_plane->dst_rect));
memcpy(&phantom_plane->clip_rect, &main_plane->clip_rect, sizeof(phantom_plane->clip_rect));
memcpy(&phantom_plane->plane_size, &main_plane->plane_size,
sizeof(phantom_plane->plane_size));
memcpy(&phantom_plane->tiling_info, &main_plane->tiling_info,
sizeof(phantom_plane->tiling_info));
memcpy(&phantom_plane->dcc, &main_plane->dcc, sizeof(phantom_plane->dcc));
phantom_plane->format = main_plane->format;
phantom_plane->rotation = main_plane->rotation;
phantom_plane->visible = main_plane->visible;
/* Shadow pipe has small viewport. */
phantom_plane->clip_rect.y = 0;
phantom_plane->clip_rect.height = phantom_stream->src.height;
dml_ctx->config.svp_pstate.callbacks.add_phantom_plane(dc, phantom_stream, phantom_plane, context);
return phantom_plane;
}
void dml21_handle_phantom_streams_planes(const struct dc *dc, struct dc_state *context, struct dml2_context *dml_ctx)
{
unsigned int dml_stream_index, dml_plane_index, dc_plane_index;
struct dc_stream_state *main_stream;
struct dc_stream_status *main_stream_status;
struct dc_stream_state *phantom_stream;
struct dc_plane_state *main_plane;
bool phantoms_added = false;
/* create phantom streams and planes and add to context */
for (dml_stream_index = 0; dml_stream_index < dml_ctx->v21.mode_programming.programming->display_config.num_streams; dml_stream_index++) {
/* iterate through DML streams looking for phantoms */
if (dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_index].phantom_stream.enabled) {
/* find associated dc stream */
main_stream = dml_ctx->config.callbacks.get_stream_from_id(context,
dml_ctx->v21.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id[dml_stream_index]);
main_stream_status = dml_ctx->config.callbacks.get_stream_status(context, main_stream);
if (main_stream_status->plane_count == 0)
continue;
/* create phantom stream for subvp enabled stream */
phantom_stream = dml21_add_phantom_stream(dml_ctx,
dc,
context,
main_stream,
&dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_index]);
/* iterate through DML planes associated with this stream */
for (dml_plane_index = 0; dml_plane_index < dml_ctx->v21.mode_programming.programming->display_config.num_planes; dml_plane_index++) {
if (dml_ctx->v21.mode_programming.programming->plane_programming[dml_plane_index].plane_descriptor->stream_index == dml_stream_index) {
/* find associated dc plane */
dc_plane_index = dml21_get_dc_plane_idx_from_plane_id(dml_ctx->v21.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_id[dml_plane_index]);
main_plane = main_stream_status->plane_states[dc_plane_index];
/* create phantom planes for subvp enabled plane */
dml21_add_phantom_plane(dml_ctx,
dc,
context,
phantom_stream,
main_plane,
&dml_ctx->v21.mode_programming.programming->plane_programming[dml_plane_index]);
phantoms_added = true;
}
}
}
}
if (phantoms_added)
dml2_map_dc_pipes(dml_ctx, context, NULL, &dml_ctx->v21.dml_to_dc_pipe_mapping, dc->current_state);
}
void dml21_build_fams2_programming(const struct dc *dc,
struct dc_state *context,
struct dml2_context *dml_ctx)
{
int i, j, k;
/* reset fams2 data */
context->bw_ctx.bw.dcn.fams2_stream_count = 0;
memset(&context->bw_ctx.bw.dcn.fams2_stream_params, 0, sizeof(struct dmub_fams2_stream_static_state) * DML2_MAX_PLANES);
if (!dml_ctx->v21.mode_programming.programming->fams2_required)
return;
for (i = 0; i < context->stream_count; i++) {
int dml_stream_idx;
struct dc_stream_state *phantom_stream;
struct dc_stream_status *phantom_status;
struct dmub_fams2_stream_static_state *static_state = &context->bw_ctx.bw.dcn.fams2_stream_params[context->bw_ctx.bw.dcn.fams2_stream_count];
struct dc_stream_state *stream = context->streams[i];
if (context->stream_status[i].plane_count == 0 ||
dml_ctx->config.svp_pstate.callbacks.get_stream_subvp_type(context, stream) == SUBVP_PHANTOM) {
/* can ignore blanked or phantom streams */
continue;
}
dml_stream_idx = dml21_helper_find_dml_pipe_idx_by_stream_id(dml_ctx, stream->stream_id);
ASSERT(dml_stream_idx >= 0);
/* copy static state from PMO */
memcpy(static_state,
&dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_params,
sizeof(struct dmub_fams2_stream_static_state));
/* get information from context */
static_state->num_planes = context->stream_status[i].plane_count;
static_state->otg_inst = context->stream_status[i].primary_otg_inst;
/* populate pipe masks for planes */
for (j = 0; j < context->stream_status[i].plane_count; j++) {
for (k = 0; k < dc->res_pool->pipe_count; k++) {
if (context->res_ctx.pipe_ctx[k].stream &&
context->res_ctx.pipe_ctx[k].stream->stream_id == stream->stream_id &&
context->res_ctx.pipe_ctx[k].plane_state == context->stream_status[i].plane_states[j]) {
static_state->pipe_mask |= (1 << k);
static_state->plane_pipe_masks[j] |= (1 << k);
}
}
}
/* get per method programming */
switch (static_state->type) {
case FAMS2_STREAM_TYPE_VBLANK:
case FAMS2_STREAM_TYPE_VACTIVE:
case FAMS2_STREAM_TYPE_DRR:
break;
case FAMS2_STREAM_TYPE_SUBVP:
phantom_stream = dml_ctx->config.svp_pstate.callbacks.get_paired_subvp_stream(context, stream);
phantom_status = dml_ctx->config.callbacks.get_stream_status(context, phantom_stream);
/* phantom status should always be present */
ASSERT(phantom_status);
static_state->sub_state.subvp.phantom_otg_inst = phantom_status->primary_otg_inst;
/* populate pipe masks for phantom planes */
for (j = 0; j < phantom_status->plane_count; j++) {
for (k = 0; k < dc->res_pool->pipe_count; k++) {
if (context->res_ctx.pipe_ctx[k].stream &&
context->res_ctx.pipe_ctx[k].stream->stream_id == phantom_stream->stream_id &&
context->res_ctx.pipe_ctx[k].plane_state == phantom_status->plane_states[j]) {
static_state->sub_state.subvp.phantom_pipe_mask |= (1 << k);
static_state->sub_state.subvp.phantom_plane_pipe_masks[j] |= (1 << k);
}
}
}
break;
default:
ASSERT(false);
break;
}
context->bw_ctx.bw.dcn.fams2_stream_count++;
}
context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = context->bw_ctx.bw.dcn.fams2_stream_count > 0;
}
bool dml21_is_plane1_enabled(enum dml2_source_format_class source_format)
{
return source_format >= dml2_420_8 && source_format <= dml2_rgbe_alpha;
}

View file

@ -0,0 +1,50 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#ifndef _DML21_UTILS_H_
#define _DML21_UTILS_H_
struct dc_state;
struct dc_plane_state;
struct pipe_ctx;
struct dml2_context;
struct dml2_display_rq_regs;
struct dml2_display_dlg_regs;
struct dml2_display_ttu_regs;
int dml21_helper_find_dml_pipe_idx_by_stream_id(struct dml2_context *ctx, unsigned int stream_id);
int dml21_find_dml_pipe_idx_by_plane_id(struct dml2_context *ctx, unsigned int plane_id);
bool dml21_get_plane_id(const struct dc_state *state, const struct dc_plane_state *plane, unsigned int *plane_id);
void dml21_update_pipe_ctx_dchub_regs(struct dml2_display_rq_regs *rq_regs,
struct dml2_display_dlg_regs *disp_dlg_regs,
struct dml2_display_ttu_regs *disp_ttu_regs,
struct pipe_ctx *out);
void dml21_populate_mall_allocation_size(struct dc_state *context,
struct dml2_context *in_ctx,
struct dml2_per_plane_programming *pln_prog,
struct pipe_ctx *dc_pipe);
bool check_dp2p0_output_encoder(const struct pipe_ctx *pipe_ctx);
void find_valid_pipe_idx_for_stream_index(const struct dml2_context *dml_ctx, unsigned int *dml_pipe_idx, unsigned int stream_index);
void find_pipe_regs_idx(const struct dml2_context *dml_ctx,
struct pipe_ctx *pipe, unsigned int *pipe_regs_idx);
int dml21_find_dc_pipes_for_plane(const struct dc *in_dc,
struct dc_state *context,
struct dml2_context *dml_ctx,
struct pipe_ctx **dc_main_pipes,
struct pipe_ctx **dc_phantom_pipes,
int dml_plane_idx);
void dml21_program_dc_pipe(struct dml2_context *dml_ctx,
struct dc_state *context,
struct pipe_ctx *pipe_ctx,
struct dml2_per_plane_programming *pln_prog,
struct dml2_per_stream_programming *stream_prog);
void dml21_handle_phantom_streams_planes(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx);
unsigned int dml21_get_dc_plane_idx_from_plane_id(unsigned int plane_id);
void dml21_build_fams2_programming(const struct dc *dc,
struct dc_state *context,
struct dml2_context *dml_ctx);
bool dml21_is_plane1_enabled(enum dml2_source_format_class source_format);
#endif

View file

@ -0,0 +1,425 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#include "dml2_internal_types.h"
#include "dml_top.h"
#include "dml2_core_dcn4_calcs.h"
#include "dml2_internal_shared_types.h"
#include "dml21_utils.h"
#include "dml21_translation_helper.h"
#include "dml2_dc_resource_mgmt.h"
static bool dml21_allocate_memory(struct dml2_context **dml_ctx)
{
*dml_ctx = (struct dml2_context *)kzalloc(sizeof(struct dml2_context), GFP_KERNEL);
if (!(*dml_ctx))
return false;
(*dml_ctx)->v21.dml_init.dml2_instance = (struct dml2_instance *)kzalloc(sizeof(struct dml2_instance), GFP_KERNEL);
if (!((*dml_ctx)->v21.dml_init.dml2_instance))
return false;
(*dml_ctx)->v21.mode_support.dml2_instance = (*dml_ctx)->v21.dml_init.dml2_instance;
(*dml_ctx)->v21.mode_programming.dml2_instance = (*dml_ctx)->v21.dml_init.dml2_instance;
(*dml_ctx)->v21.mode_support.display_config = &(*dml_ctx)->v21.display_config;
(*dml_ctx)->v21.mode_programming.display_config = (*dml_ctx)->v21.mode_support.display_config;
(*dml_ctx)->v21.mode_programming.programming = (struct dml2_display_cfg_programming *)kzalloc(sizeof(struct dml2_display_cfg_programming), GFP_KERNEL);
if (!((*dml_ctx)->v21.mode_programming.programming))
return false;
return true;
}
static void dml21_apply_debug_options(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config)
{
bool disable_fams2;
struct dml2_pmo_options *pmo_options = &dml_ctx->v21.dml_init.options.pmo_options;
/* ODM options */
pmo_options->disable_dyn_odm = !config->minimize_dispclk_using_odm;
pmo_options->disable_dyn_odm_for_multi_stream = true;
pmo_options->disable_dyn_odm_for_stream_with_svp = true;
/* UCLK P-State options */
if (in_dc->debug.dml21_force_pstate_method) {
dml_ctx->config.pmo.force_pstate_method_enable = true;
dml_ctx->config.pmo.force_pstate_method_value = in_dc->debug.dml21_force_pstate_method_value;
} else {
dml_ctx->config.pmo.force_pstate_method_enable = false;
}
pmo_options->disable_vblank = ((in_dc->debug.dml21_disable_pstate_method_mask >> 1) & 1);
/* NOTE: DRR and SubVP Require FAMS2 */
disable_fams2 = !in_dc->debug.fams2_config.bits.enable;
pmo_options->disable_svp = ((in_dc->debug.dml21_disable_pstate_method_mask >> 2) & 1) ||
in_dc->debug.force_disable_subvp ||
disable_fams2;
pmo_options->disable_drr_fixed = ((in_dc->debug.dml21_disable_pstate_method_mask >> 3) & 1) ||
disable_fams2;
pmo_options->disable_drr_var = ((in_dc->debug.dml21_disable_pstate_method_mask >> 4) & 1) ||
disable_fams2;
pmo_options->disable_fams2 = disable_fams2;
pmo_options->disable_drr_var_when_var_active = in_dc->debug.disable_fams_gaming;
}
static void dml21_init(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config)
{
switch (in_dc->ctx->dce_version) {
case DCN_VERSION_4_01:
case DCN_VERSION_3_2: // TODO : Temporary for N-1 validation. Remove this after N-1 validation phase is complete.
(*dml_ctx)->v21.dml_init.options.project_id = dml2_project_dcn4x_stage2_auto_drr_svp;
break;
default:
(*dml_ctx)->v21.dml_init.options.project_id = dml2_project_invalid;
}
(*dml_ctx)->architecture = dml2_architecture_21;
/* Store configuration options */
(*dml_ctx)->config = *config;
/*Initialize SOCBB and DCNIP params */
dml21_initialize_soc_bb_params(&(*dml_ctx)->v21.dml_init, config, in_dc);
dml21_initialize_ip_params(&(*dml_ctx)->v21.dml_init, config, in_dc);
dml21_apply_soc_bb_overrides(&(*dml_ctx)->v21.dml_init, config, in_dc);
/* apply debug overrides */
dml21_apply_debug_options(in_dc, *dml_ctx, config);
/*Initialize DML21 instance */
dml2_initialize_instance(&(*dml_ctx)->v21.dml_init);
}
bool dml21_create(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config)
{
/* Allocate memory for initializing DML21 instance */
if (!dml21_allocate_memory(dml_ctx))
return false;
dml21_init(in_dc, dml_ctx, config);
return true;
}
static void dml21_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *context, struct resource_context *out_new_hw_state,
struct dml2_context *in_ctx, unsigned int pipe_cnt)
{
unsigned int dml_prog_idx = 0, dc_pipe_index = 0, num_dpps_required = 0;
struct dml2_per_plane_programming *pln_prog = NULL;
struct dml2_per_stream_programming *stream_prog = NULL;
struct pipe_ctx *dc_main_pipes[__DML2_WRAPPER_MAX_STREAMS_PLANES__];
struct pipe_ctx *dc_phantom_pipes[__DML2_WRAPPER_MAX_STREAMS_PLANES__];
int num_pipes;
context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
/* copy global DCHUBBUB arbiter registers */
memcpy(&context->bw_ctx.bw.dcn.arb_regs, &in_ctx->v21.mode_programming.programming->global_regs.arb_regs, sizeof(struct dml2_display_arb_regs));
/* legacy only */
context->bw_ctx.bw.dcn.compbuf_size_kb = (int)in_ctx->v21.mode_programming.programming->global_regs.arb_regs.compbuf_size * 64;
context->bw_ctx.bw.dcn.mall_ss_size_bytes = 0;
context->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes = 0;
context->bw_ctx.bw.dcn.mall_subvp_size_bytes = 0;
for (dml_prog_idx = 0; dml_prog_idx < DML2_MAX_PLANES; dml_prog_idx++) {
pln_prog = &in_ctx->v21.mode_programming.programming->plane_programming[dml_prog_idx];
if (!pln_prog->plane_descriptor)
continue;
stream_prog = &in_ctx->v21.mode_programming.programming->stream_programming[pln_prog->plane_descriptor->stream_index];
num_dpps_required = pln_prog->num_dpps_required;
if (num_dpps_required == 0) {
continue;
}
num_pipes = dml21_find_dc_pipes_for_plane(dc, context, in_ctx, dc_main_pipes, dc_phantom_pipes, dml_prog_idx);
if (num_pipes <= 0)
continue;
/* program each pipe */
for (dc_pipe_index = 0; dc_pipe_index < num_pipes; dc_pipe_index++) {
dml21_program_dc_pipe(in_ctx, context, dc_main_pipes[dc_pipe_index], pln_prog, stream_prog);
if (pln_prog->phantom_plane.valid) {
dml21_program_dc_pipe(in_ctx, context, dc_phantom_pipes[dc_pipe_index], pln_prog, stream_prog);
}
}
}
/* assign global clocks */
context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz;
context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz;
if (in_ctx->v21.dml_init.soc_bb.clk_table.dispclk.num_clk_values > 1) {
context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz =
in_ctx->v21.dml_init.soc_bb.clk_table.dispclk.clk_values_khz[in_ctx->v21.dml_init.soc_bb.clk_table.dispclk.num_clk_values] * 1000;
} else {
context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz = in_ctx->v21.dml_init.soc_bb.clk_table.dispclk.clk_values_khz[0] * 1000;
}
if (in_ctx->v21.dml_init.soc_bb.clk_table.dppclk.num_clk_values > 1) {
context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz =
in_ctx->v21.dml_init.soc_bb.clk_table.dppclk.clk_values_khz[in_ctx->v21.dml_init.soc_bb.clk_table.dppclk.num_clk_values] * 1000;
} else {
context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = in_ctx->v21.dml_init.soc_bb.clk_table.dppclk.clk_values_khz[0] * 1000;
}
/* get global mall allocation */
if (dc->res_pool->funcs->calculate_mall_ways_from_bytes) {
context->bw_ctx.bw.dcn.clk.num_ways = dc->res_pool->funcs->calculate_mall_ways_from_bytes(dc, context->bw_ctx.bw.dcn.mall_subvp_size_bytes);
} else {
context->bw_ctx.bw.dcn.clk.num_ways = 0;
}
}
static bool dml21_mode_check_and_programming(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx)
{
bool result = false;
struct dml2_build_mode_programming_in_out *mode_programming = &dml_ctx->v21.mode_programming;
memset(&dml_ctx->v21.display_config, 0, sizeof(struct dml2_display_cfg));
memset(&dml_ctx->v21.dml_to_dc_pipe_mapping, 0, sizeof(struct dml2_dml_to_dc_pipe_mapping));
memset(&dml_ctx->v21.mode_programming.dml2_instance->scratch.build_mode_programming_locals.mode_programming_params, 0, sizeof(struct dml2_core_mode_programming_in_out));
if (!context || context->stream_count == 0)
return true;
/* scrub phantom's from current dc_state */
dml_ctx->config.svp_pstate.callbacks.remove_phantom_streams_and_planes(in_dc, context);
dml_ctx->config.svp_pstate.callbacks.release_phantom_streams_and_planes(in_dc, context);
/* Populate stream, plane mappings and other fields in display config. */
result = dml21_map_dc_state_into_dml_display_cfg(in_dc, context, dml_ctx);
if (!result)
return false;
result = dml2_build_mode_programming(mode_programming);
if (!result)
return false;
/* Check and map HW resources */
if (result && !dml_ctx->config.skip_hw_state_mapping) {
dml21_map_hw_resources(dml_ctx);
dml2_map_dc_pipes(dml_ctx, context, NULL, &dml_ctx->v21.dml_to_dc_pipe_mapping, in_dc->current_state);
/* if subvp phantoms are present, expand them into dc context */
dml21_handle_phantom_streams_planes(in_dc, context, dml_ctx);
}
/* Copy DML CLK, WM and REG outputs to bandwidth context */
if (result && !dml_ctx->config.skip_hw_state_mapping) {
dml21_calculate_rq_and_dlg_params(in_dc, context, &context->res_ctx, dml_ctx, in_dc->res_pool->pipe_count);
dml21_copy_clocks_to_dc_state(dml_ctx, context);
dml21_extract_watermark_sets(in_dc, &context->bw_ctx.bw.dcn.watermarks, dml_ctx);
if (in_dc->ctx->dce_version == DCN_VERSION_3_2) {
dml21_extract_legacy_watermark_set(in_dc, &context->bw_ctx.bw.dcn.watermarks.a, DML2_DCHUB_WATERMARK_SET_A, dml_ctx);
dml21_extract_legacy_watermark_set(in_dc, &context->bw_ctx.bw.dcn.watermarks.b, DML2_DCHUB_WATERMARK_SET_A, dml_ctx);
dml21_extract_legacy_watermark_set(in_dc, &context->bw_ctx.bw.dcn.watermarks.c, DML2_DCHUB_WATERMARK_SET_A, dml_ctx);
dml21_extract_legacy_watermark_set(in_dc, &context->bw_ctx.bw.dcn.watermarks.d, DML2_DCHUB_WATERMARK_SET_A, dml_ctx);
}
dml21_build_fams2_programming(in_dc, context, dml_ctx);
}
return true;
}
static bool dml21_check_mode_support(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx)
{
bool is_supported = false;
struct dml2_initialize_instance_in_out *dml_init = &dml_ctx->v21.dml_init;
struct dml2_check_mode_supported_in_out *mode_support = &dml_ctx->v21.mode_support;
memset(&dml_ctx->v21.display_config, 0, sizeof(struct dml2_display_cfg));
memset(&dml_ctx->v21.dml_to_dc_pipe_mapping, 0, sizeof(struct dml2_dml_to_dc_pipe_mapping));
memset(&dml_ctx->v21.mode_programming.dml2_instance->scratch.check_mode_supported_locals.mode_support_params, 0, sizeof(struct dml2_core_mode_support_in_out));
if (!context || context->stream_count == 0)
return true;
/* Scrub phantom's from current dc_state */
dml_ctx->config.svp_pstate.callbacks.remove_phantom_streams_and_planes(in_dc, context);
dml_ctx->config.svp_pstate.callbacks.release_phantom_streams_and_planes(in_dc, context);
mode_support->dml2_instance = dml_init->dml2_instance;
dml21_map_dc_state_into_dml_display_cfg(in_dc, context, dml_ctx);
is_supported = dml2_check_mode_supported(mode_support);
if (!is_supported)
return false;
return true;
}
bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx, bool fast_validate)
{
bool out = false;
/* Use dml_validate_only for fast_validate path */
if (fast_validate) {
out = dml21_check_mode_support(in_dc, context, dml_ctx);
} else
out = dml21_mode_check_and_programming(in_dc, context, dml_ctx);
return out;
}
void dml21_prepare_mcache_programming(struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx)
{
unsigned int num_pipes, dml_prog_idx, dml_phantom_prog_idx, dc_pipe_index;
struct pipe_ctx *dc_main_pipes[__DML2_WRAPPER_MAX_STREAMS_PLANES__];
struct pipe_ctx *dc_phantom_pipes[__DML2_WRAPPER_MAX_STREAMS_PLANES__];
struct dml2_per_plane_programming *pln_prog = NULL;
struct dml2_plane_mcache_configuration_descriptor *mcache_config = NULL;
struct prepare_mcache_programming_locals *l = &dml_ctx->v21.scratch.prepare_mcache_locals;
if (context->stream_count == 0) {
return;
}
memset(&l->build_mcache_programming_params, 0, sizeof(struct dml2_build_mcache_programming_in_out));
l->build_mcache_programming_params.dml2_instance = dml_ctx->v21.dml_init.dml2_instance;
/* phantom's start after main planes */
dml_phantom_prog_idx = dml_ctx->v21.mode_programming.programming->display_config.num_planes;
/* Build mcache programming parameters per plane per pipe */
for (dml_prog_idx = 0; dml_prog_idx < dml_ctx->v21.mode_programming.programming->display_config.num_planes; dml_prog_idx++) {
pln_prog = &dml_ctx->v21.mode_programming.programming->plane_programming[dml_prog_idx];
mcache_config = &l->build_mcache_programming_params.mcache_configurations[dml_prog_idx];
memset(mcache_config, 0, sizeof(struct dml2_plane_mcache_configuration_descriptor));
mcache_config->plane_descriptor = pln_prog->plane_descriptor;
mcache_config->mcache_allocation = &context->bw_ctx.bw.dcn.mcache_allocations[dml_prog_idx];
mcache_config->num_pipes = pln_prog->num_dpps_required;
l->build_mcache_programming_params.num_configurations++;
if (pln_prog->num_dpps_required == 0) {
continue;
}
num_pipes = dml21_find_dc_pipes_for_plane(in_dc, context, dml_ctx, dc_main_pipes, dc_phantom_pipes, dml_prog_idx);
if (num_pipes <= 0 ||
dc_main_pipes[0]->stream == NULL ||
dc_main_pipes[0]->plane_state == NULL)
continue;
/* get config for each pipe */
for (dc_pipe_index = 0; dc_pipe_index < num_pipes; dc_pipe_index++) {
ASSERT(dc_main_pipes[dc_pipe_index]);
dml21_get_pipe_mcache_config(context, dc_main_pipes[dc_pipe_index], pln_prog, &mcache_config->pipe_configurations[dc_pipe_index]);
}
/* get config for each phantom pipe */
if (pln_prog->phantom_plane.valid) {
mcache_config = &l->build_mcache_programming_params.mcache_configurations[dml_phantom_prog_idx];
memset(mcache_config, 0, sizeof(struct dml2_plane_mcache_configuration_descriptor));
mcache_config->plane_descriptor = pln_prog->plane_descriptor;
mcache_config->mcache_allocation = &context->bw_ctx.bw.dcn.mcache_allocations[dml_phantom_prog_idx];
mcache_config->num_pipes = pln_prog->num_dpps_required;
l->build_mcache_programming_params.num_configurations++;
for (dc_pipe_index = 0; dc_pipe_index < num_pipes; dc_pipe_index++) {
ASSERT(dc_phantom_pipes[dc_pipe_index]);
dml21_get_pipe_mcache_config(context, dc_phantom_pipes[dc_pipe_index], pln_prog, &mcache_config->pipe_configurations[dc_pipe_index]);
}
/* increment phantom index */
dml_phantom_prog_idx++;
}
}
/* Call to generate mcache programming per plane per pipe for the given display configuration */
dml2_build_mcache_programming(&l->build_mcache_programming_params);
/* get per plane per pipe mcache programming */
for (dml_prog_idx = 0; dml_prog_idx < dml_ctx->v21.mode_programming.programming->display_config.num_planes; dml_prog_idx++) {
pln_prog = &dml_ctx->v21.mode_programming.programming->plane_programming[dml_prog_idx];
num_pipes = dml21_find_dc_pipes_for_plane(in_dc, context, dml_ctx, dc_main_pipes, dc_phantom_pipes, dml_prog_idx);
if (num_pipes <= 0 ||
dc_main_pipes[0]->stream == NULL ||
dc_main_pipes[0]->plane_state == NULL)
continue;
/* get config for each pipe */
for (dc_pipe_index = 0; dc_pipe_index < num_pipes; dc_pipe_index++) {
ASSERT(dc_main_pipes[dc_pipe_index]);
if (l->build_mcache_programming_params.per_plane_pipe_mcache_regs[dml_prog_idx][dc_pipe_index]) {
memcpy(&dc_main_pipes[dc_pipe_index]->mcache_regs,
l->build_mcache_programming_params.per_plane_pipe_mcache_regs[dml_prog_idx][dc_pipe_index],
sizeof(struct dml2_hubp_pipe_mcache_regs));
}
}
/* get config for each phantom pipe */
if (pln_prog->phantom_plane.valid) {
for (dc_pipe_index = 0; dc_pipe_index < num_pipes; dc_pipe_index++) {
ASSERT(dc_phantom_pipes[dc_pipe_index]);
if (l->build_mcache_programming_params.per_plane_pipe_mcache_regs[dml_phantom_prog_idx][dc_pipe_index]) {
memcpy(&dc_phantom_pipes[dc_pipe_index]->mcache_regs,
l->build_mcache_programming_params.per_plane_pipe_mcache_regs[dml_phantom_prog_idx][dc_pipe_index],
sizeof(struct dml2_hubp_pipe_mcache_regs));
}
}
/* increment phantom index */
dml_phantom_prog_idx++;
}
}
}
void dml21_copy(struct dml2_context *dst_dml_ctx,
struct dml2_context *src_dml_ctx)
{
/* Preserve references to internals */
struct dml2_instance *dst_dml2_instance = dst_dml_ctx->v21.dml_init.dml2_instance;
struct dml2_display_cfg_programming *dst_dml2_programming = dst_dml_ctx->v21.mode_programming.programming;
/* Copy context */
memcpy(dst_dml_ctx, src_dml_ctx, sizeof(struct dml2_context));
/* Copy Internals */
memcpy(dst_dml2_instance, src_dml_ctx->v21.dml_init.dml2_instance, sizeof(struct dml2_instance));
memcpy(dst_dml2_programming, src_dml_ctx->v21.mode_programming.programming, sizeof(struct dml2_display_cfg_programming));
/* Restore references to internals */
dst_dml_ctx->v21.dml_init.dml2_instance = dst_dml2_instance;
dst_dml_ctx->v21.mode_support.dml2_instance = dst_dml2_instance;
dst_dml_ctx->v21.mode_programming.dml2_instance = dst_dml2_instance;
dst_dml_ctx->v21.mode_support.display_config = &dst_dml_ctx->v21.display_config;
dst_dml_ctx->v21.mode_programming.display_config = dst_dml_ctx->v21.mode_support.display_config;
dst_dml_ctx->v21.mode_programming.programming = dst_dml2_programming;
/* need to initialize copied instance for internal references to be correct */
dml2_initialize_instance(&dst_dml_ctx->v21.dml_init);
}
bool dml21_create_copy(struct dml2_context **dst_dml_ctx,
struct dml2_context *src_dml_ctx)
{
/* Allocate memory for initializing DML21 instance */
if (!dml21_allocate_memory(dst_dml_ctx))
return false;
dml21_copy(*dst_dml_ctx, src_dml_ctx);
return true;
}
void dml21_reinit(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config)
{
dml21_init(in_dc, dml_ctx, config);
}

View file

@ -0,0 +1,67 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#ifndef _DML21_WRAPPER_H_
#define _DML21_WRAPPER_H_
#include "os_types.h"
#include "dml_top_soc_parameter_types.h"
struct dc;
struct dc_state;
struct dml2_configuration_options;
struct dml2_context;
/**
* dml2_create - Creates dml21_context.
* @in_dc: dc.
* @dml2: Created dml21 context.
* @config: dml21 configuration options.
*
* Create of DML21 is done as part of dc_state creation.
* DML21 IP, SOC and STATES are initialized at
* creation time.
*
* Return: True if dml2 is successfully created, false otherwise.
*/
bool dml21_create(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config);
void dml21_copy(struct dml2_context *dst_dml_ctx,
struct dml2_context *src_dml_ctx);
bool dml21_create_copy(struct dml2_context **dst_dml_ctx,
struct dml2_context *src_dml_ctx);
void dml21_reinit(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config);
/**
* dml21_validate - Determines if a display configuration is supported or not.
* @in_dc: dc.
* @context: dc_state to be validated.
* @fast_validate: Fast validate will not populate context.res_ctx.
*
* Based on fast_validate option internally would call:
*
* -dml21_mode_check_and_programming - for non fast_validate option
* Calculates if dc_state can be supported on the input display
* configuration. If supported, generates the necessary HW
* programming for the new dc_state.
*
* -dml21_check_mode_support - for fast_validate option
* Calculates if dc_state can be supported for the input display
* config.
* Context: Two threads may not invoke this function concurrently unless they reference
* separate dc_states for validation.
* Return: True if mode is supported, false otherwise.
*/
bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx, bool fast_validate);
/* Prepare hubp mcache_regs for hubp mcache ID and split coordinate programming */
void dml21_prepare_mcache_programming(struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx);
/* Structure for inputting external SOCBB and DCNIP values for tool based debugging. */
struct socbb_ip_params_external {
struct dml2_ip_capabilities ip_params;
struct dml2_soc_bb soc_bb;
};
#endif

View file

@ -0,0 +1,401 @@
/*
* Copyright 2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __DML_DML_DCN3_SOC_BB__
#define __DML_DML_DCN3_SOC_BB__
#include "dml_top_soc_parameter_types.h"
static const struct dml2_soc_qos_parameters dml_dcn31_soc_qos_params = {
.derate_table = {
.system_active_urgent = {
.dram_derate_percent_pixel = 22,
.dram_derate_percent_vm = 0,
.dram_derate_percent_pixel_and_vm = 0,
.fclk_derate_percent = 76,
.dcfclk_derate_percent = 100,
},
.system_active_average = {
.dram_derate_percent_pixel = 17,
.dram_derate_percent_vm = 0,
.dram_derate_percent_pixel_and_vm = 0,
.fclk_derate_percent = 57,
.dcfclk_derate_percent = 75,
},
.dcn_mall_prefetch_urgent = {
.dram_derate_percent_pixel = 22,
.dram_derate_percent_vm = 0,
.dram_derate_percent_pixel_and_vm = 0,
.fclk_derate_percent = 76,
.dcfclk_derate_percent = 100,
},
.dcn_mall_prefetch_average = {
.dram_derate_percent_pixel = 17,
.dram_derate_percent_vm = 0,
.dram_derate_percent_pixel_and_vm = 0,
.fclk_derate_percent = 57,
.dcfclk_derate_percent = 75,
},
.system_idle_average = {
.dram_derate_percent_pixel = 17,
.dram_derate_percent_vm = 0,
.dram_derate_percent_pixel_and_vm = 0,
.fclk_derate_percent = 57,
.dcfclk_derate_percent = 100,
},
},
.writeback = {
.base_latency_us = 12,
.scaling_factor_us = 0,
.scaling_factor_mhz = 0,
},
.qos_params = {
.dcn4 = {
.df_qos_response_time_fclk_cycles = 300,
.max_round_trip_to_furthest_cs_fclk_cycles = 350,
.mall_overhead_fclk_cycles = 50,
.meta_trip_adder_fclk_cycles = 36,
.average_transport_distance_fclk_cycles = 257,
.umc_urgent_ramp_latency_margin = 50,
.umc_max_latency_margin = 30,
.umc_average_latency_margin = 20,
.fabric_max_transport_latency_margin = 20,
.fabric_average_transport_latency_margin = 10,
.per_uclk_dpm_params = {
{
.minimum_uclk_khz = 97,
.urgent_ramp_uclk_cycles = 472,
.trip_to_memory_uclk_cycles = 827,
.meta_trip_to_memory_uclk_cycles = 827,
.maximum_latency_when_urgent_uclk_cycles = 72,
.average_latency_when_urgent_uclk_cycles = 61,
.maximum_latency_when_non_urgent_uclk_cycles = 827,
.average_latency_when_non_urgent_uclk_cycles = 118,
},
{
.minimum_uclk_khz = 435,
.urgent_ramp_uclk_cycles = 546,
.trip_to_memory_uclk_cycles = 848,
.meta_trip_to_memory_uclk_cycles = 848,
.maximum_latency_when_urgent_uclk_cycles = 146,
.average_latency_when_urgent_uclk_cycles = 90,
.maximum_latency_when_non_urgent_uclk_cycles = 848,
.average_latency_when_non_urgent_uclk_cycles = 135,
},
{
.minimum_uclk_khz = 731,
.urgent_ramp_uclk_cycles = 632,
.trip_to_memory_uclk_cycles = 874,
.meta_trip_to_memory_uclk_cycles = 874,
.maximum_latency_when_urgent_uclk_cycles = 232,
.average_latency_when_urgent_uclk_cycles = 124,
.maximum_latency_when_non_urgent_uclk_cycles = 874,
.average_latency_when_non_urgent_uclk_cycles = 155,
},
{
.minimum_uclk_khz = 1187,
.urgent_ramp_uclk_cycles = 716,
.trip_to_memory_uclk_cycles = 902,
.meta_trip_to_memory_uclk_cycles = 902,
.maximum_latency_when_urgent_uclk_cycles = 316,
.average_latency_when_urgent_uclk_cycles = 160,
.maximum_latency_when_non_urgent_uclk_cycles = 902,
.average_latency_when_non_urgent_uclk_cycles = 177,
},
},
},
},
.qos_type = dml2_qos_param_type_dcn4,
};
static const struct dml2_soc_bb dml2_socbb_dcn31 = {
.clk_table = {
.uclk = {
.clk_values_khz = {97000, 435000, 731000, 1187000},
.num_clk_values = 4,
},
.fclk = {
.clk_values_khz = {300000, 2500000},
.num_clk_values = 2,
},
.dcfclk = {
.clk_values_khz = {200000, 1800000},
.num_clk_values = 2,
},
.dispclk = {
.clk_values_khz = {100000, 2000000},
.num_clk_values = 2,
},
.dppclk = {
.clk_values_khz = {100000, 2000000},
.num_clk_values = 2,
},
.dtbclk = {
.clk_values_khz = {100000, 2000000},
.num_clk_values = 2,
},
.phyclk = {
.clk_values_khz = {810000, 810000},
.num_clk_values = 2,
},
.socclk = {
.clk_values_khz = {300000, 1600000},
.num_clk_values = 2,
},
.dscclk = {
.clk_values_khz = {666667, 666667},
.num_clk_values = 2,
},
.phyclk_d18 = {
.clk_values_khz = {625000, 625000},
.num_clk_values = 2,
},
.phyclk_d32 = {
.clk_values_khz = {2000000, 2000000},
.num_clk_values = 2,
},
.dram_config = {
.channel_width_bytes = 2,
.channel_count = 16,
.transactions_per_clock = 16,
},
},
.qos_parameters = {
.derate_table = {
.system_active_urgent = {
.dram_derate_percent_pixel = 22,
.dram_derate_percent_vm = 0,
.dram_derate_percent_pixel_and_vm = 0,
.fclk_derate_percent = 76,
.dcfclk_derate_percent = 100,
},
.system_active_average = {
.dram_derate_percent_pixel = 17,
.dram_derate_percent_vm = 0,
.dram_derate_percent_pixel_and_vm = 0,
.fclk_derate_percent = 57,
.dcfclk_derate_percent = 75,
},
.dcn_mall_prefetch_urgent = {
.dram_derate_percent_pixel = 22,
.dram_derate_percent_vm = 0,
.dram_derate_percent_pixel_and_vm = 0,
.fclk_derate_percent = 76,
.dcfclk_derate_percent = 100,
},
.dcn_mall_prefetch_average = {
.dram_derate_percent_pixel = 17,
.dram_derate_percent_vm = 0,
.dram_derate_percent_pixel_and_vm = 0,
.fclk_derate_percent = 57,
.dcfclk_derate_percent = 75,
},
.system_idle_average = {
.dram_derate_percent_pixel = 17,
.dram_derate_percent_vm = 0,
.dram_derate_percent_pixel_and_vm = 0,
.fclk_derate_percent = 57,
.dcfclk_derate_percent = 100,
},
},
.writeback = {
.base_latency_us = 0,
.scaling_factor_us = 0,
.scaling_factor_mhz = 0,
},
.qos_params = {
.dcn4 = {
.df_qos_response_time_fclk_cycles = 300,
.max_round_trip_to_furthest_cs_fclk_cycles = 350,
.mall_overhead_fclk_cycles = 50,
.meta_trip_adder_fclk_cycles = 36,
.average_transport_distance_fclk_cycles = 260,
.umc_urgent_ramp_latency_margin = 50,
.umc_max_latency_margin = 30,
.umc_average_latency_margin = 20,
.fabric_max_transport_latency_margin = 20,
.fabric_average_transport_latency_margin = 10,
.per_uclk_dpm_params = {
{
// State 1
.minimum_uclk_khz = 0,
.urgent_ramp_uclk_cycles = 472,
.trip_to_memory_uclk_cycles = 827,
.meta_trip_to_memory_uclk_cycles = 827,
.maximum_latency_when_urgent_uclk_cycles = 72,
.average_latency_when_urgent_uclk_cycles = 72,
.maximum_latency_when_non_urgent_uclk_cycles = 827,
.average_latency_when_non_urgent_uclk_cycles = 117,
},
{
// State 2
.minimum_uclk_khz = 0,
.urgent_ramp_uclk_cycles = 546,
.trip_to_memory_uclk_cycles = 848,
.meta_trip_to_memory_uclk_cycles = 848,
.maximum_latency_when_urgent_uclk_cycles = 146,
.average_latency_when_urgent_uclk_cycles = 146,
.maximum_latency_when_non_urgent_uclk_cycles = 848,
.average_latency_when_non_urgent_uclk_cycles = 133,
},
{
// State 3
.minimum_uclk_khz = 0,
.urgent_ramp_uclk_cycles = 564,
.trip_to_memory_uclk_cycles = 853,
.meta_trip_to_memory_uclk_cycles = 853,
.maximum_latency_when_urgent_uclk_cycles = 164,
.average_latency_when_urgent_uclk_cycles = 164,
.maximum_latency_when_non_urgent_uclk_cycles = 853,
.average_latency_when_non_urgent_uclk_cycles = 136,
},
{
// State 4
.minimum_uclk_khz = 0,
.urgent_ramp_uclk_cycles = 613,
.trip_to_memory_uclk_cycles = 869,
.meta_trip_to_memory_uclk_cycles = 869,
.maximum_latency_when_urgent_uclk_cycles = 213,
.average_latency_when_urgent_uclk_cycles = 213,
.maximum_latency_when_non_urgent_uclk_cycles = 869,
.average_latency_when_non_urgent_uclk_cycles = 149,
},
{
// State 5
.minimum_uclk_khz = 0,
.urgent_ramp_uclk_cycles = 632,
.trip_to_memory_uclk_cycles = 874,
.meta_trip_to_memory_uclk_cycles = 874,
.maximum_latency_when_urgent_uclk_cycles = 232,
.average_latency_when_urgent_uclk_cycles = 232,
.maximum_latency_when_non_urgent_uclk_cycles = 874,
.average_latency_when_non_urgent_uclk_cycles = 153,
},
{
// State 6
.minimum_uclk_khz = 0,
.urgent_ramp_uclk_cycles = 665,
.trip_to_memory_uclk_cycles = 885,
.meta_trip_to_memory_uclk_cycles = 885,
.maximum_latency_when_urgent_uclk_cycles = 265,
.average_latency_when_urgent_uclk_cycles = 265,
.maximum_latency_when_non_urgent_uclk_cycles = 885,
.average_latency_when_non_urgent_uclk_cycles = 161,
},
{
// State 7
.minimum_uclk_khz = 0,
.urgent_ramp_uclk_cycles = 689,
.trip_to_memory_uclk_cycles = 895,
.meta_trip_to_memory_uclk_cycles = 895,
.maximum_latency_when_urgent_uclk_cycles = 289,
.average_latency_when_urgent_uclk_cycles = 289,
.maximum_latency_when_non_urgent_uclk_cycles = 895,
.average_latency_when_non_urgent_uclk_cycles = 167,
},
{
// State 8
.minimum_uclk_khz = 0,
.urgent_ramp_uclk_cycles = 716,
.trip_to_memory_uclk_cycles = 902,
.meta_trip_to_memory_uclk_cycles = 902,
.maximum_latency_when_urgent_uclk_cycles = 316,
.average_latency_when_urgent_uclk_cycles = 316,
.maximum_latency_when_non_urgent_uclk_cycles = 902,
.average_latency_when_non_urgent_uclk_cycles = 174,
},
},
},
},
.qos_type = dml2_qos_param_type_dcn4,
},
.power_management_parameters = {
.dram_clk_change_blackout_us = 400,
.fclk_change_blackout_us = 0,
.g7_ppt_blackout_us = 0,
.stutter_enter_plus_exit_latency_us = 50,
.stutter_exit_latency_us = 43,
.z8_stutter_enter_plus_exit_latency_us = 0,
.z8_stutter_exit_latency_us = 0,
},
.vmin_limit = {
.dispclk_khz = 600 * 1000,
},
.dprefclk_mhz = 700,
.xtalclk_mhz = 100,
.pcie_refclk_mhz = 100,
.dchub_refclk_mhz = 50,
.mall_allocated_for_dcn_mbytes = 64,
.max_outstanding_reqs = 512,
.fabric_datapath_to_dcn_data_return_bytes = 64,
.return_bus_width_bytes = 64,
.hostvm_min_page_size_kbytes = 0,
.gpuvm_min_page_size_kbytes = 256,
.phy_downspread_percent = 0,
.dcn_downspread_percent = 0,
.dispclk_dppclk_vco_speed_mhz = 4500,
.do_urgent_latency_adjustment = 0,
.mem_word_bytes = 32,
.num_dcc_mcaches = 8,
.mcache_size_bytes = 2048,
.mcache_line_size_bytes = 32,
.max_fclk_for_uclk_dpm_khz = 1250 * 1000,
};
static const struct dml2_ip_capabilities dml2_dcn31_max_ip_caps = {
.pipe_count = 4,
.otg_count = 4,
.num_dsc = 4,
.max_num_dp2p0_streams = 4,
.max_num_hdmi_frl_outputs = 1,
.max_num_dp2p0_outputs = 4,
.rob_buffer_size_kbytes = 192,
.config_return_buffer_size_in_kbytes = 1152,
.meta_fifo_size_in_kentries = 22,
.compressed_buffer_segment_size_in_kbytes = 64,
.subvp_drr_scheduling_margin_us = 100,
.subvp_prefetch_end_to_mall_start_us = 15,
.subvp_fw_processing_delay = 15,
.fams2 = {
.max_allow_delay_us = 100 * 1000,
.scheduling_delay_us = 50,
.vertical_interrupt_ack_delay_us = 18,
.allow_programming_delay_us = 18,
.min_allow_width_us = 20,
.subvp_df_throttle_delay_us = 100,
.subvp_programming_delay_us = 18,
.subvp_prefetch_to_mall_delay_us = 18,
.drr_programming_delay_us = 18,
},
};
#endif /* __DML_DML_DCN3_SOC_BB__ */

View file

@ -0,0 +1,352 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#ifndef __DML_DML_DCN4_SOC_BB__
#define __DML_DML_DCN4_SOC_BB__
#include "dml_top_soc_parameter_types.h"
static const struct dml2_soc_qos_parameters dml_dcn401_soc_qos_params = {
.derate_table = {
.system_active_urgent = {
.dram_derate_percent_pixel = 22,
.dram_derate_percent_vm = 0,
.dram_derate_percent_pixel_and_vm = 0,
.fclk_derate_percent = 76,
.dcfclk_derate_percent = 100,
},
.system_active_average = {
.dram_derate_percent_pixel = 17,
.dram_derate_percent_vm = 0,
.dram_derate_percent_pixel_and_vm = 0,
.fclk_derate_percent = 57,
.dcfclk_derate_percent = 75,
},
.dcn_mall_prefetch_urgent = {
.dram_derate_percent_pixel = 40,
.dram_derate_percent_vm = 0,
.dram_derate_percent_pixel_and_vm = 0,
.fclk_derate_percent = 83,
.dcfclk_derate_percent = 100,
},
.dcn_mall_prefetch_average = {
.dram_derate_percent_pixel = 33,
.dram_derate_percent_vm = 0,
.dram_derate_percent_pixel_and_vm = 0,
.fclk_derate_percent = 62,
.dcfclk_derate_percent = 83,
},
.system_idle_average = {
.dram_derate_percent_pixel = 70,
.dram_derate_percent_vm = 0,
.dram_derate_percent_pixel_and_vm = 0,
.fclk_derate_percent = 83,
.dcfclk_derate_percent = 100,
},
},
.writeback = {
.base_latency_us = 12,
.scaling_factor_us = 0,
.scaling_factor_mhz = 0,
},
.qos_params = {
.dcn4 = {
.df_qos_response_time_fclk_cycles = 300,
.max_round_trip_to_furthest_cs_fclk_cycles = 350,
.mall_overhead_fclk_cycles = 50,
.meta_trip_adder_fclk_cycles = 36,
.average_transport_distance_fclk_cycles = 257,
.umc_urgent_ramp_latency_margin = 50,
.umc_max_latency_margin = 30,
.umc_average_latency_margin = 20,
.fabric_max_transport_latency_margin = 20,
.fabric_average_transport_latency_margin = 10,
.per_uclk_dpm_params = {
{
.minimum_uclk_khz = 97 * 1000,
.urgent_ramp_uclk_cycles = 472,
.trip_to_memory_uclk_cycles = 827,
.meta_trip_to_memory_uclk_cycles = 827,
.maximum_latency_when_urgent_uclk_cycles = 72,
.average_latency_when_urgent_uclk_cycles = 61,
.maximum_latency_when_non_urgent_uclk_cycles = 827,
.average_latency_when_non_urgent_uclk_cycles = 118,
},
},
},
},
.qos_type = dml2_qos_param_type_dcn4,
};
static const struct dml2_soc_bb dml2_socbb_dcn401 = {
.clk_table = {
.uclk = {
.clk_values_khz = {97000},
.num_clk_values = 1,
},
.fclk = {
.clk_values_khz = {300000, 2500000},
.num_clk_values = 2,
},
.dcfclk = {
.clk_values_khz = {200000, 1564000},
.num_clk_values = 2,
},
.dispclk = {
.clk_values_khz = {100000, 2000000},
.num_clk_values = 2,
},
.dppclk = {
.clk_values_khz = {100000, 2000000},
.num_clk_values = 2,
},
.dtbclk = {
.clk_values_khz = {100000, 1564000},
.num_clk_values = 2,
},
.phyclk = {
.clk_values_khz = {810000, 810000},
.num_clk_values = 2,
},
.socclk = {
.clk_values_khz = {300000, 1200000},
.num_clk_values = 2,
},
.dscclk = {
.clk_values_khz = {666667, 666667},
.num_clk_values = 2,
},
.phyclk_d18 = {
.clk_values_khz = {667000, 667000},
.num_clk_values = 2,
},
.phyclk_d32 = {
.clk_values_khz = {2000000, 2000000},
.num_clk_values = 2,
},
.dram_config = {
.channel_width_bytes = 2,
.channel_count = 16,
.transactions_per_clock = 16,
},
},
.qos_parameters = {
.derate_table = {
.system_active_urgent = {
.dram_derate_percent_pixel = 22,
.dram_derate_percent_vm = 0,
.dram_derate_percent_pixel_and_vm = 0,
.fclk_derate_percent = 76,
.dcfclk_derate_percent = 100,
},
.system_active_average = {
.dram_derate_percent_pixel = 15,
.dram_derate_percent_vm = 0,
.dram_derate_percent_pixel_and_vm = 0,
.fclk_derate_percent = 57,
.dcfclk_derate_percent = 75,
},
.dcn_mall_prefetch_urgent = {
.dram_derate_percent_pixel = 40,
.dram_derate_percent_vm = 0,
.dram_derate_percent_pixel_and_vm = 0,
.fclk_derate_percent = 83,
.dcfclk_derate_percent = 100,
},
.dcn_mall_prefetch_average = {
.dram_derate_percent_pixel = 30,
.dram_derate_percent_vm = 0,
.dram_derate_percent_pixel_and_vm = 0,
.fclk_derate_percent = 62,
.dcfclk_derate_percent = 83,
},
.system_idle_average = {
.dram_derate_percent_pixel = 70,
.dram_derate_percent_vm = 0,
.dram_derate_percent_pixel_and_vm = 0,
.fclk_derate_percent = 83,
.dcfclk_derate_percent = 100,
},
},
.writeback = {
.base_latency_us = 0,
.scaling_factor_us = 0,
.scaling_factor_mhz = 0,
},
.qos_params = {
.dcn4 = {
.df_qos_response_time_fclk_cycles = 300,
.max_round_trip_to_furthest_cs_fclk_cycles = 350,
.mall_overhead_fclk_cycles = 50,
.meta_trip_adder_fclk_cycles = 36,
.average_transport_distance_fclk_cycles = 260,
.umc_urgent_ramp_latency_margin = 50,
.umc_max_latency_margin = 30,
.umc_average_latency_margin = 20,
.fabric_max_transport_latency_margin = 20,
.fabric_average_transport_latency_margin = 10,
.per_uclk_dpm_params = {
{
// State 1
.minimum_uclk_khz = 0,
.urgent_ramp_uclk_cycles = 472,
.trip_to_memory_uclk_cycles = 827,
.meta_trip_to_memory_uclk_cycles = 827,
.maximum_latency_when_urgent_uclk_cycles = 72,
.average_latency_when_urgent_uclk_cycles = 72,
.maximum_latency_when_non_urgent_uclk_cycles = 827,
.average_latency_when_non_urgent_uclk_cycles = 117,
},
{
// State 2
.minimum_uclk_khz = 0,
.urgent_ramp_uclk_cycles = 546,
.trip_to_memory_uclk_cycles = 848,
.meta_trip_to_memory_uclk_cycles = 848,
.maximum_latency_when_urgent_uclk_cycles = 146,
.average_latency_when_urgent_uclk_cycles = 146,
.maximum_latency_when_non_urgent_uclk_cycles = 848,
.average_latency_when_non_urgent_uclk_cycles = 133,
},
{
// State 3
.minimum_uclk_khz = 0,
.urgent_ramp_uclk_cycles = 564,
.trip_to_memory_uclk_cycles = 853,
.meta_trip_to_memory_uclk_cycles = 853,
.maximum_latency_when_urgent_uclk_cycles = 164,
.average_latency_when_urgent_uclk_cycles = 164,
.maximum_latency_when_non_urgent_uclk_cycles = 853,
.average_latency_when_non_urgent_uclk_cycles = 136,
},
{
// State 4
.minimum_uclk_khz = 0,
.urgent_ramp_uclk_cycles = 613,
.trip_to_memory_uclk_cycles = 869,
.meta_trip_to_memory_uclk_cycles = 869,
.maximum_latency_when_urgent_uclk_cycles = 213,
.average_latency_when_urgent_uclk_cycles = 213,
.maximum_latency_when_non_urgent_uclk_cycles = 869,
.average_latency_when_non_urgent_uclk_cycles = 149,
},
{
// State 5
.minimum_uclk_khz = 0,
.urgent_ramp_uclk_cycles = 632,
.trip_to_memory_uclk_cycles = 874,
.meta_trip_to_memory_uclk_cycles = 874,
.maximum_latency_when_urgent_uclk_cycles = 232,
.average_latency_when_urgent_uclk_cycles = 232,
.maximum_latency_when_non_urgent_uclk_cycles = 874,
.average_latency_when_non_urgent_uclk_cycles = 153,
},
{
// State 6
.minimum_uclk_khz = 0,
.urgent_ramp_uclk_cycles = 665,
.trip_to_memory_uclk_cycles = 885,
.meta_trip_to_memory_uclk_cycles = 885,
.maximum_latency_when_urgent_uclk_cycles = 265,
.average_latency_when_urgent_uclk_cycles = 265,
.maximum_latency_when_non_urgent_uclk_cycles = 885,
.average_latency_when_non_urgent_uclk_cycles = 161,
},
{
// State 7
.minimum_uclk_khz = 0,
.urgent_ramp_uclk_cycles = 689,
.trip_to_memory_uclk_cycles = 895,
.meta_trip_to_memory_uclk_cycles = 895,
.maximum_latency_when_urgent_uclk_cycles = 289,
.average_latency_when_urgent_uclk_cycles = 289,
.maximum_latency_when_non_urgent_uclk_cycles = 895,
.average_latency_when_non_urgent_uclk_cycles = 167,
},
{
// State 8
.minimum_uclk_khz = 0,
.urgent_ramp_uclk_cycles = 716,
.trip_to_memory_uclk_cycles = 902,
.meta_trip_to_memory_uclk_cycles = 902,
.maximum_latency_when_urgent_uclk_cycles = 316,
.average_latency_when_urgent_uclk_cycles = 316,
.maximum_latency_when_non_urgent_uclk_cycles = 902,
.average_latency_when_non_urgent_uclk_cycles = 174,
},
},
},
},
.qos_type = dml2_qos_param_type_dcn4,
},
.power_management_parameters = {
.dram_clk_change_blackout_us = 400,
.fclk_change_blackout_us = 0,
.g7_ppt_blackout_us = 0,
.stutter_enter_plus_exit_latency_us = 21,
.stutter_exit_latency_us = 16,
.z8_stutter_enter_plus_exit_latency_us = 0,
.z8_stutter_exit_latency_us = 0,
},
.vmin_limit = {
.dispclk_khz = 600 * 1000,
},
.dprefclk_mhz = 700,
.xtalclk_mhz = 100,
.pcie_refclk_mhz = 100,
.dchub_refclk_mhz = 50,
.mall_allocated_for_dcn_mbytes = 64,
.max_outstanding_reqs = 512,
.fabric_datapath_to_dcn_data_return_bytes = 64,
.return_bus_width_bytes = 64,
.hostvm_min_page_size_kbytes = 0,
.gpuvm_min_page_size_kbytes = 256,
.phy_downspread_percent = 0,
.dcn_downspread_percent = 0,
.dispclk_dppclk_vco_speed_mhz = 4500,
.do_urgent_latency_adjustment = 0,
.mem_word_bytes = 32,
.num_dcc_mcaches = 8,
.mcache_size_bytes = 2048,
.mcache_line_size_bytes = 32,
.max_fclk_for_uclk_dpm_khz = 1250 * 1000,
};
static const struct dml2_ip_capabilities dml2_dcn401_max_ip_caps = {
.pipe_count = 4,
.otg_count = 4,
.num_dsc = 4,
.max_num_dp2p0_streams = 4,
.max_num_hdmi_frl_outputs = 1,
.max_num_dp2p0_outputs = 4,
.rob_buffer_size_kbytes = 192,
.config_return_buffer_size_in_kbytes = 1344,
.meta_fifo_size_in_kentries = 22,
.compressed_buffer_segment_size_in_kbytes = 64,
.subvp_drr_scheduling_margin_us = 100,
.subvp_prefetch_end_to_mall_start_us = 15,
.subvp_fw_processing_delay = 15,
.max_vactive_det_fill_delay_us = 400,
.fams2 = {
.max_allow_delay_us = 100 * 1000,
.scheduling_delay_us = 50,
.vertical_interrupt_ack_delay_us = 18,
.allow_programming_delay_us = 18,
.min_allow_width_us = 20,
.subvp_df_throttle_delay_us = 100,
.subvp_programming_delay_us = 18,
.subvp_prefetch_to_mall_delay_us = 18,
.drr_programming_delay_us = 18,
},
};
#endif

View file

@ -0,0 +1,10 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#ifndef __DML2_EXTERNAL_LIB_DEPS__
#define __DML2_EXTERNAL_LIB_DEPS__
#include "os_types.h"
#endif

View file

@ -0,0 +1,47 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#ifndef __DML_TOP_H__
#define __DML_TOP_H__
#include "dml_top_types.h"
/*
* Top Level Interface for DML2
*/
/*
* Returns the size of the DML instance for the caller to allocate
*/
unsigned int dml2_get_instance_size_bytes(void);
/*
* Initializes the DML instance (i.e. with configuration, soc BB, IP params, etc...)
*/
bool dml2_initialize_instance(struct dml2_initialize_instance_in_out *in_out);
/*
* Determines if the input mode is supported (boolean) on the SoC at all. Does not return
* information on how mode should be programmed.
*/
bool dml2_check_mode_supported(struct dml2_check_mode_supported_in_out *in_out);
/*
* Determines the full (optimized) programming for the input mode. Returns minimum
* clocks as well as dchub register programming values for all pipes, additional meta
* such as ODM or MPCC combine factors.
*/
bool dml2_build_mode_programming(struct dml2_build_mode_programming_in_out *in_out);
/*
* Determines the correct per pipe mcache register programming for a valid mode.
* The mcache allocation must have been calculated (successfully) in a previous
* call to dml2_build_mode_programming.
* The actual hubp viewport dimensions be what the actual registers will be
* programmed to (i.e. based on scaler setup).
*/
bool dml2_build_mcache_programming(struct dml2_build_mcache_programming_in_out *in_out);
#endif

View file

@ -0,0 +1,185 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#ifndef __dml2_TOP_DCHUB_REGISTERS_H__
#define __dml2_TOP_DCHUB_REGISTERS_H__
#include "dml2_external_lib_deps.h"
// These types are uint32_t as they represent actual calculated register values for HW
struct dml2_display_dlg_regs {
uint32_t refcyc_h_blank_end;
uint32_t dlg_vblank_end;
uint32_t min_dst_y_next_start;
uint32_t refcyc_per_htotal;
uint32_t refcyc_x_after_scaler;
uint32_t dst_y_after_scaler;
uint32_t dst_y_prefetch;
uint32_t dst_y_per_vm_vblank;
uint32_t dst_y_per_row_vblank;
uint32_t dst_y_per_vm_flip;
uint32_t dst_y_per_row_flip;
uint32_t ref_freq_to_pix_freq;
uint32_t vratio_prefetch;
uint32_t vratio_prefetch_c;
uint32_t refcyc_per_tdlut_group;
uint32_t refcyc_per_pte_group_vblank_l;
uint32_t refcyc_per_pte_group_vblank_c;
uint32_t refcyc_per_pte_group_flip_l;
uint32_t refcyc_per_pte_group_flip_c;
uint32_t dst_y_per_pte_row_nom_l;
uint32_t dst_y_per_pte_row_nom_c;
uint32_t refcyc_per_pte_group_nom_l;
uint32_t refcyc_per_pte_group_nom_c;
uint32_t refcyc_per_line_delivery_pre_l;
uint32_t refcyc_per_line_delivery_pre_c;
uint32_t refcyc_per_line_delivery_l;
uint32_t refcyc_per_line_delivery_c;
uint32_t refcyc_per_vm_group_vblank;
uint32_t refcyc_per_vm_group_flip;
uint32_t refcyc_per_vm_req_vblank;
uint32_t refcyc_per_vm_req_flip;
uint32_t dst_y_offset_cur0;
uint32_t chunk_hdl_adjust_cur0;
uint32_t vready_after_vcount0;
uint32_t dst_y_delta_drq_limit;
uint32_t refcyc_per_vm_dmdata;
uint32_t dmdata_dl_delta;
// MRQ
uint32_t refcyc_per_meta_chunk_vblank_l;
uint32_t refcyc_per_meta_chunk_vblank_c;
uint32_t refcyc_per_meta_chunk_flip_l;
uint32_t refcyc_per_meta_chunk_flip_c;
uint32_t dst_y_per_meta_row_nom_l;
uint32_t dst_y_per_meta_row_nom_c;
uint32_t refcyc_per_meta_chunk_nom_l;
uint32_t refcyc_per_meta_chunk_nom_c;
};
struct dml2_display_ttu_regs {
uint32_t qos_level_low_wm;
uint32_t qos_level_high_wm;
uint32_t min_ttu_vblank;
uint32_t qos_level_flip;
uint32_t refcyc_per_req_delivery_l;
uint32_t refcyc_per_req_delivery_c;
uint32_t refcyc_per_req_delivery_cur0;
uint32_t refcyc_per_req_delivery_pre_l;
uint32_t refcyc_per_req_delivery_pre_c;
uint32_t refcyc_per_req_delivery_pre_cur0;
uint32_t qos_level_fixed_l;
uint32_t qos_level_fixed_c;
uint32_t qos_level_fixed_cur0;
uint32_t qos_ramp_disable_l;
uint32_t qos_ramp_disable_c;
uint32_t qos_ramp_disable_cur0;
};
struct dml2_display_arb_regs {
uint32_t max_req_outstanding;
uint32_t min_req_outstanding;
uint32_t sat_level_us;
uint32_t hvm_max_qos_commit_threshold;
uint32_t hvm_min_req_outstand_commit_threshold;
uint32_t compbuf_reserved_space_kbytes;
uint32_t compbuf_size;
uint32_t sdpif_request_rate_limit;
uint32_t allow_sdpif_rate_limit_when_cstate_req;
uint32_t dcfclk_deep_sleep_hysteresis;
};
struct dml2_cursor_dlg_regs{
uint32_t dst_x_offset; // CURSOR0_DST_X_OFFSET
uint32_t dst_y_offset; // CURSOR0_DST_Y_OFFSET
uint32_t chunk_hdl_adjust; // CURSOR0_CHUNK_HDL_ADJUST
uint32_t qos_level_fixed;
uint32_t qos_ramp_disable;
};
struct dml2_display_plane_rq_regs {
uint32_t chunk_size;
uint32_t min_chunk_size;
uint32_t dpte_group_size;
uint32_t mpte_group_size;
uint32_t swath_height;
uint32_t pte_row_height_linear;
// MRQ
uint32_t meta_chunk_size;
uint32_t min_meta_chunk_size;
};
struct dml2_display_rq_regs {
struct dml2_display_plane_rq_regs rq_regs_l;
struct dml2_display_plane_rq_regs rq_regs_c;
uint32_t drq_expansion_mode;
uint32_t prq_expansion_mode;
uint32_t crq_expansion_mode;
uint32_t plane1_base_address;
uint32_t unbounded_request_enabled;
// MRQ
uint32_t mrq_expansion_mode;
};
struct dml2_display_mcache_regs {
uint32_t mcache_id_first;
uint32_t mcache_id_second;
uint32_t split_location;
};
struct dml2_hubp_pipe_mcache_regs {
struct {
struct dml2_display_mcache_regs p0;
struct dml2_display_mcache_regs p1;
} main;
struct {
struct dml2_display_mcache_regs p0;
struct dml2_display_mcache_regs p1;
} mall;
};
struct dml2_dchub_per_pipe_register_set {
struct dml2_display_rq_regs rq_regs;
struct dml2_display_ttu_regs ttu_regs;
struct dml2_display_dlg_regs dlg_regs;
uint32_t det_size;
};
struct dml2_dchub_watermark_regs {
/* watermarks */
uint32_t urgent;
uint32_t sr_enter;
uint32_t sr_exit;
uint32_t uclk_pstate;
uint32_t fclk_pstate;
uint32_t temp_read_or_ppt;
uint32_t usr;
/* qos */
uint32_t refcyc_per_trip_to_mem;
uint32_t refcyc_per_meta_trip_to_mem;
uint32_t frac_urg_bw_flip;
uint32_t frac_urg_bw_nom;
uint32_t frac_urg_bw_mall;
};
enum dml2_dchub_watermark_reg_set_index {
DML2_DCHUB_WATERMARK_SET_A = 0,
DML2_DCHUB_WATERMARK_SET_B = 1,
DML2_DCHUB_WATERMARK_SET_C = 2,
DML2_DCHUB_WATERMARK_SET_D = 3,
DML2_DCHUB_WATERMARK_SET_NUM = 4,
};
struct dml2_dchub_global_register_set {
struct dml2_display_arb_regs arb_regs;
struct dml2_dchub_watermark_regs wm_regs[DML2_DCHUB_WATERMARK_SET_NUM];
unsigned int num_watermark_sets;
};
#endif

View file

@ -0,0 +1,502 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#ifndef __DML_TOP_DISPLAY_CFG_TYPES_H__
#define __DML_TOP_DISPLAY_CFG_TYPES_H__
#include "dml2_external_lib_deps.h"
#define DML2_MAX_PLANES 8
#define DML2_MAX_DCN_PIPES 8
#define DML2_MAX_MCACHES 8 // assume plane is going to be supported by a max of 8 mcaches
enum dml2_swizzle_mode {
dml2_sw_linear,
dml2_sw_256b_2d,
dml2_sw_4kb_2d,
dml2_sw_64kb_2d,
dml2_sw_256kb_2d,
dml2_gfx11_sw_linear,
dml2_gfx11_sw_64kb_d,
dml2_gfx11_sw_64kb_d_t,
dml2_gfx11_sw_64kb_d_x,
dml2_gfx11_sw_64kb_r_x,
dml2_gfx11_sw_256kb_d_x,
dml2_gfx11_sw_256kb_r_x
};
enum dml2_source_format_class {
dml2_444_8 = 0,
dml2_444_16 = 1,
dml2_444_32 = 2,
dml2_444_64 = 3,
dml2_420_8 = 4,
dml2_420_10 = 5,
dml2_420_12 = 6,
dml2_rgbe_alpha = 9,
dml2_rgbe = 10,
dml2_mono_8 = 11,
dml2_mono_16 = 12
};
enum dml2_rotation_angle {
dml2_rotation_0 = 0,
dml2_rotation_90 = 1,
dml2_rotation_180 = 2,
dml2_rotation_270 = 3
};
enum dml2_output_format_class {
dml2_444 = 0,
dml2_s422 = 1,
dml2_n422 = 2,
dml2_420 = 3
};
enum dml2_output_encoder_class {
dml2_dp = 0,
dml2_edp = 1,
dml2_dp2p0 = 2,
dml2_hdmi = 3,
dml2_hdmifrl = 4,
dml2_none = 5
};
enum dml2_output_link_dp_rate {
dml2_dp_rate_na = 0,
dml2_dp_rate_hbr = 1,
dml2_dp_rate_hbr2 = 2,
dml2_dp_rate_hbr3 = 3,
dml2_dp_rate_uhbr10 = 4,
dml2_dp_rate_uhbr13p5 = 5,
dml2_dp_rate_uhbr20 = 6
};
enum dml2_uclk_pstate_change_strategy {
dml2_uclk_pstate_change_strategy_auto = 0,
dml2_uclk_pstate_change_strategy_force_vactive = 1,
dml2_uclk_pstate_change_strategy_force_vblank = 2,
dml2_uclk_pstate_change_strategy_force_drr = 3,
dml2_uclk_pstate_change_strategy_force_mall_svp = 4,
dml2_uclk_pstate_change_strategy_force_mall_full_frame = 5,
};
enum dml2_svp_mode_override {
dml2_svp_mode_override_auto = 0,
dml2_svp_mode_override_main_pipe = 1,
dml2_svp_mode_override_phantom_pipe = 2, //does not need to be defined explicitly, main overrides result in implicit phantom additions
dml2_svp_mode_override_phantom_pipe_no_data_return = 3,
dml2_svp_mode_override_imall = 4
};
enum dml2_refresh_from_mall_mode_override {
dml2_refresh_from_mall_mode_override_auto = 0,
dml2_refresh_from_mall_mode_override_force_disable = 1,
dml2_refresh_from_mall_mode_override_force_enable = 2
};
enum dml2_odm_mode {
dml2_odm_mode_auto = 0,
dml2_odm_mode_bypass,
dml2_odm_mode_combine_2to1,
dml2_odm_mode_combine_3to1,
dml2_odm_mode_combine_4to1,
dml2_odm_mode_split_1to2,
dml2_odm_mode_mso_1to2,
dml2_odm_mode_mso_1to4
};
enum dml2_scaling_transform {
dml2_scaling_transform_explicit = 0,
dml2_scaling_transform_fullscreen,
dml2_scaling_transform_aspect_ratio,
dml2_scaling_transform_centered
};
enum dml2_dsc_enable_option {
dml2_dsc_disable = 0,
dml2_dsc_enable = 1,
dml2_dsc_enable_if_necessary = 2
};
enum dml2_pstate_support_method {
dml2_pstate_method_uninitialized,
dml2_pstate_method_not_supported,
dml2_pstate_method_vactive,
dml2_pstate_method_vblank,
dml2_pstate_method_svp,
dml2_pstate_method_drr
};
enum dml2_tdlut_addressing_mode {
dml2_tdlut_sw_linear = 0,
dml2_tdlut_simple_linear = 1
};
enum dml2_tdlut_width_mode {
dml2_tdlut_width_17_cube = 0,
dml2_tdlut_width_33_cube = 1
};
enum dml2_twait_budgeting_setting {
dml2_twait_budgeting_setting_ignore = 0,// Ignore this budget in twait
dml2_twait_budgeting_setting_if_needed, // Budget for it only if needed
//(i.e. UCLK/FCLK DPM cannot be supported in active)
dml2_twait_budgeting_setting_try, // Budget for it as long as there is an SoC state that
// can support it
};
struct dml2_get_cursor_dlg_reg{
unsigned int cursor_x_position;
unsigned int cursor_hotspot_x;
unsigned int cursor_primary_offset;
unsigned int cursor_secondary_offset;
bool cursor_stereo_en;
bool cursor_2x_magnify;
double hratio;
double pixel_rate_mhz;
double dlg_refclk_mhz;
};
/// @brief Surface Parameters
struct dml2_surface_cfg {
enum dml2_swizzle_mode tiling;
struct {
unsigned long pitch;
unsigned long width;
unsigned long height;
} plane0;
struct {
unsigned long pitch;
unsigned long width;
unsigned long height;
} plane1;
struct {
bool enable;
struct {
unsigned long pitch;
} plane0;
struct {
unsigned long pitch;
} plane1;
struct {
double dcc_rate_plane0;
double dcc_rate_plane1;
double fraction_of_zero_size_request_plane0;
double fraction_of_zero_size_request_plane1;
} informative;
} dcc;
};
struct dml2_composition_cfg {
enum dml2_rotation_angle rotation_angle;
bool mirrored;
enum dml2_scaling_transform scaling_transform;
bool rect_out_height_spans_vactive;
struct {
bool stationary;
struct {
unsigned long width;
unsigned long height;
unsigned long x_start;
unsigned long y_start;
} plane0;
struct {
unsigned long width;
unsigned long height;
unsigned long x_start;
unsigned long y_start;
} plane1;
} viewport;
struct {
bool enabled;
struct {
double h_ratio;
double v_ratio;
unsigned int h_taps;
unsigned int v_taps;
} plane0;
struct {
double h_ratio;
double v_ratio;
unsigned int h_taps;
unsigned int v_taps;
} plane1;
unsigned long rect_out_width;
} scaler_info;
};
struct dml2_timing_cfg {
unsigned long h_total;
unsigned long v_total;
unsigned long h_blank_end;
unsigned long v_blank_end;
unsigned long h_front_porch;
unsigned long v_front_porch;
unsigned long h_sync_width;
unsigned long pixel_clock_khz;
unsigned long h_active;
unsigned long v_active;
unsigned int bpc; //FIXME: review with Jun
struct {
enum dml2_dsc_enable_option enable;
unsigned int dsc_compressed_bpp_x16;
struct {
// for dv to specify num dsc slices to use
unsigned int num_slices;
} overrides;
} dsc;
bool interlaced;
struct {
/* static */
bool enabled;
unsigned long min_refresh_uhz;
unsigned int max_instant_vtotal_delta;
/* dynamic */
bool disallowed;
bool drr_active_variable;
bool drr_active_fixed;
} drr_config;
unsigned long vblank_nom;
};
struct dml2_link_output_cfg {
enum dml2_output_format_class output_format;
enum dml2_output_encoder_class output_encoder;
unsigned int output_dp_lane_count;
enum dml2_output_link_dp_rate output_dp_link_rate;
unsigned long audio_sample_rate;
unsigned long audio_sample_layout;
bool output_disabled; // The stream does not go to a backend for output to a physical
//connector (e.g. writeback only, phantom pipe) goes to writeback
bool validate_output; // Do not validate the link configuration for this display stream.
};
struct dml2_writeback_cfg {
bool enable;
enum dml2_source_format_class pixel_format;
unsigned int active_writebacks_per_surface;
struct {
bool enabled;
unsigned long input_width;
unsigned long input_height;
unsigned long output_width;
unsigned long output_height;
unsigned long v_taps;
unsigned long h_taps;
double h_ratio;
double v_ratio;
} scaling_info;
};
struct dml2_plane_parameters {
unsigned int stream_index; // Identifies which plane will be composed
enum dml2_source_format_class pixel_format;
/*
* The surface and composition structures use
* the terms plane0 and plane1. These planes
* are expected to hold the following data based
* on the pixel format.
*
* RGB or YUV Non-Planar Types:
* dml2_444_8
* dml2_444_16
* dml2_444_32
* dml2_444_64
* dml2_rgbe
*
* plane0 = argb or rgbe
* plane1 = not used
*
* YUV Planar-Types:
* dml2_420_8
* dml2_420_10
* dml2_420_12
*
* plane0 = luma
* plane1 = chroma
*
* RGB Planar Types:
* dml2_rgbe_alpha
*
* plane0 = rgbe
* plane1 = alpha
*
* Mono Non-Planar Types:
* dml2_mono_8
* dml2_mono_16
*
* plane0 = luma
* plane1 = not used
*/
struct dml2_surface_cfg surface;
struct dml2_composition_cfg composition;
struct {
bool enable;
unsigned long lines_before_active_required;
unsigned long transmitted_bytes;
} dynamic_meta_data;
struct {
unsigned int num_cursors;
unsigned long cursor_width;
unsigned long cursor_bpp;
} cursor;
// For TDLUT, SW would assume TDLUT is setup and enable all the time and
// budget for worst case addressing/width mode
struct {
bool setup_for_tdlut;
enum dml2_tdlut_addressing_mode tdlut_addressing_mode;
enum dml2_tdlut_width_mode tdlut_width_mode;
bool tdlut_mpc_width_flag;
} tdlut;
bool immediate_flip;
struct {
// Logical overrides to power management policies (usually)
enum dml2_uclk_pstate_change_strategy uclk_pstate_change_strategy;
enum dml2_refresh_from_mall_mode_override refresh_from_mall;
unsigned int det_size_override_kb;
unsigned int mpcc_combine_factor;
long reserved_vblank_time_ns; // 0 = no override, -ve = no reserved time, +ve = explicit reserved time
unsigned int gpuvm_min_page_size_kbytes;
enum dml2_svp_mode_override legacy_svp_config; //TODO remove in favor of svp_config
struct {
// HW specific overrides, there's almost no reason to mess with these
// generally used for debugging or simulation
bool force_one_row_for_frame;
struct {
bool enable;
bool value;
} force_pte_buffer_mode;
double dppclk_mhz;
} hw;
} overrides;
};
struct dml2_stream_parameters {
struct dml2_timing_cfg timing;
struct dml2_link_output_cfg output;
struct dml2_writeback_cfg writeback;
struct {
enum dml2_odm_mode odm_mode;
bool disable_dynamic_odm;
bool disable_subvp;
int minimum_vblank_idle_requirement_us;
bool minimize_active_latency_hiding;
struct {
struct {
enum dml2_twait_budgeting_setting uclk_pstate;
enum dml2_twait_budgeting_setting fclk_pstate;
enum dml2_twait_budgeting_setting stutter_enter_exit;
} twait_budgeting;
} hw;
} overrides;
};
struct dml2_display_cfg {
bool gpuvm_enable;
bool hostvm_enable;
// Allocate DET proportionally between streams based on pixel rate
// and then allocate proportionally between planes.
bool minimize_det_reallocation;
unsigned int gpuvm_max_page_table_levels;
unsigned int hostvm_max_page_table_levels;
struct dml2_plane_parameters plane_descriptors[DML2_MAX_PLANES];
struct dml2_stream_parameters stream_descriptors[DML2_MAX_PLANES];
unsigned int num_planes;
unsigned int num_streams;
struct {
struct {
// HW specific overrides, there's almost no reason to mess with these
// generally used for debugging or simulation
struct {
bool enable;
bool value;
} force_unbounded_requesting;
struct {
bool enable;
bool value;
} force_nom_det_size_kbytes;
bool mode_support_check_disable;
bool mcache_admissibility_check_disable;
bool surface_viewport_size_check_disable;
double dlg_ref_clk_mhz;
double dispclk_mhz;
double dcfclk_mhz;
bool optimize_tdlut_scheduling; // TBD: for DV, will set this to 1, to ensure tdlut schedule is calculated based on address/width mode
} hw;
struct {
bool uclk_pstate_change_disable;
bool fclk_pstate_change_disable;
bool g6_temp_read_pstate_disable;
bool g7_ppt_pstate_disable;
} power_management;
bool enhanced_prefetch_schedule_acceleration;
bool dcc_programming_assumes_scan_direction_unknown;
bool synchronize_timings;
bool synchronize_ddr_displays_for_uclk_pstate_change;
bool max_outstanding_when_urgent_expected_disable;
bool enable_subvp_implicit_pmo; //enables PMO to switch pipe uclk strategy to subvp, and generate phantom programming
unsigned int best_effort_min_active_latency_hiding_us;
} overrides;
};
struct dml2_pipe_configuration_descriptor {
struct {
unsigned int viewport_x_start;
unsigned int viewport_width;
} plane0;
struct {
unsigned int viewport_x_start;
unsigned int viewport_width;
} plane1;
bool plane1_enabled;
bool imall_enabled;
};
struct dml2_plane_mcache_configuration_descriptor {
const struct dml2_plane_parameters *plane_descriptor;
const struct dml2_mcache_surface_allocation *mcache_allocation;
struct dml2_pipe_configuration_descriptor pipe_configurations[DML2_MAX_DCN_PIPES];
char num_pipes;
};
#endif

View file

@ -0,0 +1,14 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#ifndef __DML_TOP_POLICY_TYPES_H__
#define __DML_TOP_POLICY_TYPES_H__
struct dml2_policy_parameters {
unsigned long odm_combine_dispclk_threshold_khz;
unsigned int max_immediate_flip_latency;
};
#endif

View file

@ -0,0 +1,193 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#ifndef __DML_TOP_SOC_PARAMETER_TYPES_H__
#define __DML_TOP_SOC_PARAMETER_TYPES_H__
#include "dml2_external_lib_deps.h"
#define DML_MAX_CLK_TABLE_SIZE 20
struct dml2_soc_derate_values {
unsigned int dram_derate_percent_pixel;
unsigned int dram_derate_percent_vm;
unsigned int dram_derate_percent_pixel_and_vm;
unsigned int fclk_derate_percent;
unsigned int dcfclk_derate_percent;
};
struct dml2_soc_derates {
struct dml2_soc_derate_values system_active_urgent;
struct dml2_soc_derate_values system_active_average;
struct dml2_soc_derate_values dcn_mall_prefetch_urgent;
struct dml2_soc_derate_values dcn_mall_prefetch_average;
struct dml2_soc_derate_values system_idle_average;
};
struct dml2_dcn3_soc_qos_params {
struct {
unsigned int base_latency_us;
unsigned int base_latency_pixel_vm_us;
unsigned int base_latency_vm_us;
unsigned int scaling_factor_fclk_us;
unsigned int scaling_factor_mhz;
} urgent_latency_us;
unsigned int loaded_round_trip_latency_fclk_cycles;
unsigned int urgent_out_of_order_return_per_channel_pixel_only_bytes;
unsigned int urgent_out_of_order_return_per_channel_pixel_and_vm_bytes;
unsigned int urgent_out_of_order_return_per_channel_vm_only_bytes;
};
struct dml2_dcn4_uclk_dpm_dependent_qos_params {
unsigned long minimum_uclk_khz;
unsigned int urgent_ramp_uclk_cycles;
unsigned int trip_to_memory_uclk_cycles;
unsigned int meta_trip_to_memory_uclk_cycles;
unsigned int maximum_latency_when_urgent_uclk_cycles;
unsigned int average_latency_when_urgent_uclk_cycles;
unsigned int maximum_latency_when_non_urgent_uclk_cycles;
unsigned int average_latency_when_non_urgent_uclk_cycles;
};
struct dml2_dcn4_soc_qos_params {
unsigned int df_qos_response_time_fclk_cycles;
unsigned int max_round_trip_to_furthest_cs_fclk_cycles;
unsigned int mall_overhead_fclk_cycles;
unsigned int meta_trip_adder_fclk_cycles;
unsigned int average_transport_distance_fclk_cycles;
double umc_urgent_ramp_latency_margin;
double umc_max_latency_margin;
double umc_average_latency_margin;
double fabric_max_transport_latency_margin;
double fabric_average_transport_latency_margin;
struct dml2_dcn4_uclk_dpm_dependent_qos_params per_uclk_dpm_params[DML_MAX_CLK_TABLE_SIZE];
};
enum dml2_qos_param_type {
dml2_qos_param_type_dcn3,
dml2_qos_param_type_dcn4
};
struct dml2_soc_qos_parameters {
struct dml2_soc_derates derate_table;
struct {
unsigned int base_latency_us;
unsigned int scaling_factor_us;
unsigned int scaling_factor_mhz;
} writeback;
union {
struct dml2_dcn3_soc_qos_params dcn3;
struct dml2_dcn4_soc_qos_params dcn4;
} qos_params;
enum dml2_qos_param_type qos_type;
};
struct dml2_soc_power_management_parameters {
double dram_clk_change_blackout_us;
double dram_clk_change_read_only_us;
double dram_clk_change_write_only_us;
double fclk_change_blackout_us;
double g7_ppt_blackout_us;
double stutter_enter_plus_exit_latency_us;
double stutter_exit_latency_us;
double z8_stutter_enter_plus_exit_latency_us;
double z8_stutter_exit_latency_us;
double z8_min_idle_time;
double g6_temp_read_blackout_us[DML_MAX_CLK_TABLE_SIZE];
};
struct dml2_clk_table {
unsigned long clk_values_khz[DML_MAX_CLK_TABLE_SIZE];
unsigned char num_clk_values;
};
struct dml2_dram_params {
unsigned int channel_width_bytes;
unsigned int channel_count;
unsigned int transactions_per_clock;
};
struct dml2_soc_state_table {
struct dml2_clk_table uclk;
struct dml2_clk_table fclk;
struct dml2_clk_table dcfclk;
struct dml2_clk_table dispclk;
struct dml2_clk_table dppclk;
struct dml2_clk_table dtbclk;
struct dml2_clk_table phyclk;
struct dml2_clk_table socclk;
struct dml2_clk_table dscclk;
struct dml2_clk_table phyclk_d18;
struct dml2_clk_table phyclk_d32;
struct dml2_dram_params dram_config;
};
struct dml2_soc_vmin_clock_limits {
unsigned long dispclk_khz;
};
struct dml2_soc_bb {
struct dml2_soc_state_table clk_table;
struct dml2_soc_qos_parameters qos_parameters;
struct dml2_soc_power_management_parameters power_management_parameters;
struct dml2_soc_vmin_clock_limits vmin_limit;
unsigned int dprefclk_mhz;
unsigned int xtalclk_mhz;
unsigned int pcie_refclk_mhz;
unsigned int dchub_refclk_mhz;
unsigned int mall_allocated_for_dcn_mbytes;
unsigned int max_outstanding_reqs;
unsigned long fabric_datapath_to_dcn_data_return_bytes;
unsigned long return_bus_width_bytes;
unsigned long hostvm_min_page_size_kbytes;
unsigned long gpuvm_min_page_size_kbytes;
double phy_downspread_percent;
double dcn_downspread_percent;
double dispclk_dppclk_vco_speed_mhz;
bool do_urgent_latency_adjustment;
unsigned int mem_word_bytes;
unsigned int num_dcc_mcaches;
unsigned int mcache_size_bytes;
unsigned int mcache_line_size_bytes;
unsigned long max_fclk_for_uclk_dpm_khz;
};
struct dml2_ip_capabilities {
unsigned int pipe_count;
unsigned int otg_count;
unsigned int num_dsc;
unsigned int max_num_dp2p0_streams;
unsigned int max_num_hdmi_frl_outputs;
unsigned int max_num_dp2p0_outputs;
unsigned int rob_buffer_size_kbytes;
unsigned int config_return_buffer_size_in_kbytes;
unsigned int meta_fifo_size_in_kentries;
unsigned int compressed_buffer_segment_size_in_kbytes;
unsigned int subvp_drr_scheduling_margin_us;
unsigned int subvp_prefetch_end_to_mall_start_us;
unsigned int subvp_fw_processing_delay;
unsigned int max_vactive_det_fill_delay_us;
/* FAMS2 delays */
struct {
unsigned int max_allow_delay_us;
unsigned int scheduling_delay_us;
unsigned int vertical_interrupt_ack_delay_us; // delay to acknowledge vline int
unsigned int allow_programming_delay_us; // time requires to program allow
unsigned int min_allow_width_us;
unsigned int subvp_df_throttle_delay_us;
unsigned int subvp_programming_delay_us;
unsigned int subvp_prefetch_to_mall_delay_us;
unsigned int drr_programming_delay_us;
} fams2;
};
#endif

View file

@ -0,0 +1,718 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#ifndef __DML_TOP_TYPES_H__
#define __DML_TOP_TYPES_H__
#include "dml_top_types.h"
#include "dml_top_display_cfg_types.h"
#include "dml_top_soc_parameter_types.h"
#include "dml_top_policy_types.h"
#include "dml_top_dchub_registers.h"
#include "dmub_cmd.h"
struct dml2_instance;
enum dml2_status {
dml2_success = 0,
dml2_error_generic = 1
};
enum dml2_project_id {
dml2_project_invalid = 0,
dml2_project_dcn4x_stage1 = 1,
dml2_project_dcn4x_stage2 = 2,
dml2_project_dcn4x_stage2_auto_drr_svp = 3,
};
enum dml2_dram_clock_change_support {
dml2_dram_clock_change_vactive = 0,
dml2_dram_clock_change_vblank = 1,
dml2_dram_clock_change_vblank_and_vactive = 2,
dml2_dram_clock_change_drr = 3,
dml2_dram_clock_change_mall_svp = 4,
dml2_dram_clock_change_mall_full_frame = 6,
dml2_dram_clock_change_unsupported = 7
};
enum dml2_fclock_change_support {
dml2_fclock_change_vactive = 0,
dml2_fclock_change_vblank = 1,
dml2_fclock_change_unsupported = 2
};
enum dml2_output_type_and_rate__type {
dml2_output_type_unknown = 0,
dml2_output_type_dp = 1,
dml2_output_type_edp = 2,
dml2_output_type_dp2p0 = 3,
dml2_output_type_hdmi = 4,
dml2_output_type_hdmifrl = 5
};
enum dml2_output_type_and_rate__rate {
dml2_output_rate_unknown = 0,
dml2_output_rate_dp_rate_hbr = 1,
dml2_output_rate_dp_rate_hbr2 = 2,
dml2_output_rate_dp_rate_hbr3 = 3,
dml2_output_rate_dp_rate_uhbr10 = 4,
dml2_output_rate_dp_rate_uhbr13p5 = 5,
dml2_output_rate_dp_rate_uhbr20 = 6,
dml2_output_rate_hdmi_rate_3x3 = 7,
dml2_output_rate_hdmi_rate_6x3 = 8,
dml2_output_rate_hdmi_rate_6x4 = 9,
dml2_output_rate_hdmi_rate_8x4 = 10,
dml2_output_rate_hdmi_rate_10x4 = 11,
dml2_output_rate_hdmi_rate_12x4 = 12
};
struct dml2_pmo_options {
bool disable_vblank;
bool disable_svp;
bool disable_drr_var;
bool disable_drr_fixed;
bool disable_drr_var_when_var_active;
bool disable_fams2;
bool disable_dyn_odm;
bool disable_dyn_odm_for_multi_stream;
bool disable_dyn_odm_for_stream_with_svp;
};
struct dml2_options {
enum dml2_project_id project_id;
struct dml2_pmo_options pmo_options;
};
struct dml2_initialize_instance_in_out {
struct dml2_instance *dml2_instance;
struct dml2_options options;
struct dml2_soc_bb soc_bb;
struct dml2_ip_capabilities ip_caps;
struct {
void *explicit_ip_bb;
unsigned int explicit_ip_bb_size;
} overrides;
};
struct dml2_reset_instance_in_out {
struct dml2_instance *dml2_instance;
};
struct dml2_check_mode_supported_in_out {
/*
* Inputs
*/
struct dml2_instance *dml2_instance;
const struct dml2_display_cfg *display_config;
/*
* Outputs
*/
bool is_supported;
};
struct dml2_mcache_surface_allocation {
bool valid;
/*
* For iMALL, dedicated mall mcaches are required (sharing of last
* slice possible), for legacy phantom or phantom without return
* the only mall mcaches need to be valid.
*/
bool requires_dedicated_mall_mcache;
unsigned int num_mcaches_plane0;
unsigned int num_mcaches_plane1;
/*
* A plane is divided into vertical slices of mcaches,
* which wrap on the surface width.
*
* For example, if the surface width is 7680, and split into
* three slices of equal width, the boundary array would contain
* [2560, 5120, 7680]
*
* The assignments are
* 0 = [0 .. 2559]
* 1 = [2560 .. 5119]
* 2 = [5120 .. 7679]
* 0 = [7680 .. INF]
* The final element implicitly is the same as the first, and
* at first seems invalid since it is never referenced (since)
* it is outside the surface. However, its useful when shifting
* (see below).
*
* For any given valid mcache assignment, a shifted version, wrapped
* on the surface width boundary is also assumed to be valid.
*
* For example, shifting [2560, 5120, 7680] by -50 results in
* [2510, 5170, 7630].
*
* The assignments are now:
* 0 = [0 .. 2509]
* 1 = [2510 .. 5169]
* 2 = [5170 .. 7629]
* 0 = [7630 .. INF]
*/
int mcache_x_offsets_plane0[DML2_MAX_MCACHES + 1];
int mcache_x_offsets_plane1[DML2_MAX_MCACHES + 1];
/*
* Shift grainularity is not necessarily 1
*/
struct {
int p0;
int p1;
} shift_granularity;
/*
* MCacheIDs have global scope in the SoC, and they are stored here.
* These IDs are generally not valid until all planes in a display
* configuration have had their mcache requirements calculated.
*/
int global_mcache_ids_plane0[DML2_MAX_MCACHES + 1];
int global_mcache_ids_plane1[DML2_MAX_MCACHES + 1];
int global_mcache_ids_mall_plane0[DML2_MAX_MCACHES + 1];
int global_mcache_ids_mall_plane1[DML2_MAX_MCACHES + 1];
/*
* Generally, plane0/1 slices must use a disjoint set of caches
* but in some cases the final segement of the two planes can
* use the same cache. If plane0_plane1 is set, then this is
* allowed.
*
* Similarly, the caches allocated to MALL prefetcher are generally
* disjoint, but if mall_prefetch is set, then the final segment
* between the main and the mall pixel requestor can use the same
* cache.
*
* Note that both bits may be set at the same time.
*/
struct {
bool mall_comb_mcache_p0;
bool mall_comb_mcache_p1;
bool plane0_plane1;
} last_slice_sharing;
struct {
int meta_row_bytes_plane0;
int meta_row_bytes_plane1;
} informative;
};
enum dml2_uclk_pstate_support_method {
dml2_uclk_pstate_support_method_not_supported = 0,
/* hw */
dml2_uclk_pstate_support_method_vactive = 1,
dml2_uclk_pstate_support_method_vblank = 2,
dml2_uclk_pstate_support_method_reserved_hw = 5,
/* fw */
dml2_uclk_pstate_support_method_fw_subvp_phantom = 6,
dml2_uclk_pstate_support_method_reserved_fw = 10,
/* fw w/drr */
dml2_uclk_pstate_support_method_fw_vactive_drr = 11,
dml2_uclk_pstate_support_method_fw_vblank_drr = 12,
dml2_uclk_pstate_support_method_fw_subvp_phantom_drr = 13,
dml2_uclk_pstate_support_method_reserved_fw_drr_fixed = 20,
dml2_uclk_pstate_support_method_fw_drr = 21,
dml2_uclk_pstate_support_method_reserved_fw_drr_var = 22,
dml2_uclk_pstate_support_method_count
};
struct dml2_per_plane_programming {
const struct dml2_plane_parameters *plane_descriptor;
union {
struct {
unsigned long dppclk_khz;
} dcn4;
} min_clocks;
struct dml2_mcache_surface_allocation mcache_allocation;
// If a stream is using automatic or forced odm combine
// and the stream for this plane has num_odms_required > 1
// num_dpps_required is always equal to num_odms_required for
// ALL planes of the stream
// If a stream is using odm split, then this value is always 1
unsigned int num_dpps_required;
enum dml2_uclk_pstate_support_method uclk_pstate_support_method;
// MALL size requirements for MALL SS and SubVP
unsigned int surface_size_mall_bytes;
unsigned int svp_size_mall_bytes;
struct dml2_dchub_per_pipe_register_set *pipe_regs[DML2_MAX_PLANES];
struct {
bool valid;
struct dml2_plane_parameters descriptor;
struct dml2_dchub_per_pipe_register_set *pipe_regs[DML2_MAX_PLANES];
} phantom_plane;
};
union dml2_global_sync_programming {
struct {
unsigned int vstartup_lines;
unsigned int vupdate_offset_pixels;
unsigned int vupdate_vupdate_width_pixels;
unsigned int vready_offset_pixels;
} dcn4;
};
struct dml2_per_stream_programming {
const struct dml2_stream_parameters *stream_descriptor;
union {
struct {
unsigned long dscclk_khz;
unsigned long dtbclk_khz;
unsigned long phyclk_khz;
} dcn4;
} min_clocks;
union dml2_global_sync_programming global_sync;
unsigned int num_odms_required;
enum dml2_uclk_pstate_support_method uclk_pstate_method;
struct {
bool enabled;
struct dml2_stream_parameters descriptor;
union dml2_global_sync_programming global_sync;
} phantom_stream;
struct dmub_fams2_stream_static_state fams2_params;
};
//-----------------
// Mode Support Information
//-----------------
struct dml2_mode_support_info {
bool ModeIsSupported; //<brief Is the mode support any voltage and combine setting
bool ImmediateFlipSupport; //<brief Means mode support immediate flip at the max combine setting; determine in mode support and used in mode programming
// Mode Support Reason
bool WritebackLatencySupport;
bool ScaleRatioAndTapsSupport;
bool SourceFormatPixelAndScanSupport;
bool P2IWith420;
bool DSCOnlyIfNecessaryWithBPP;
bool DSC422NativeNotSupported;
bool LinkRateDoesNotMatchDPVersion;
bool LinkRateForMultistreamNotIndicated;
bool BPPForMultistreamNotIndicated;
bool MultistreamWithHDMIOreDP;
bool MSOOrODMSplitWithNonDPLink;
bool NotEnoughLanesForMSO;
bool NumberOfOTGSupport;
bool NumberOfHDMIFRLSupport;
bool NumberOfDP2p0Support;
bool WritebackScaleRatioAndTapsSupport;
bool CursorSupport;
bool PitchSupport;
bool ViewportExceedsSurface;
bool ImmediateFlipRequiredButTheRequirementForEachSurfaceIsNotSpecified;
bool ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe;
bool InvalidCombinationOfMALLUseForPStateAndStaticScreen;
bool InvalidCombinationOfMALLUseForPState;
bool ExceededMALLSize;
bool EnoughWritebackUnits;
bool ExceededMultistreamSlots;
bool NotEnoughDSCUnits;
bool NotEnoughDSCSlices;
bool PixelsPerLinePerDSCUnitSupport;
bool DSCCLKRequiredMoreThanSupported;
bool DTBCLKRequiredMoreThanSupported;
bool LinkCapacitySupport;
bool ROBSupport;
bool ROBUrgencyAvoidance;
bool OutstandingRequestsSupport;
bool OutstandingRequestsUrgencyAvoidance;
bool PTEBufferSizeNotExceeded;
bool DCCMetaBufferSizeNotExceeded;
bool TotalVerticalActiveBandwidthSupport;
bool VActiveBandwidthSupport;
enum dml2_fclock_change_support FCLKChangeSupport[DML2_MAX_PLANES];
bool USRRetrainingSupport;
bool PrefetchSupported;
bool DynamicMetadataSupported;
bool VRatioInPrefetchSupported;
bool DISPCLK_DPPCLK_Support;
bool TotalAvailablePipesSupport;
bool ViewportSizeSupport;
bool ImmediateFlipSupportedForState;
double MaxTotalVerticalActiveAvailableBandwidth;
bool MPCCombineEnable[DML2_MAX_PLANES]; /// <brief Indicate if the MPC Combine enable in the given state and optimize mpc combine setting
enum dml2_odm_mode ODMMode[DML2_MAX_PLANES]; /// <brief ODM mode that is chosen in the mode check stage and will be used in mode programming stage
unsigned int DPPPerSurface[DML2_MAX_PLANES]; /// <brief How many DPPs are needed drive the surface to output. If MPCC or ODMC could be 2 or 4.
bool DSCEnabled[DML2_MAX_PLANES]; /// <brief Indicate if the DSC is actually required; used in mode_programming
bool FECEnabled[DML2_MAX_PLANES]; /// <brief Indicate if the FEC is actually required
unsigned int NumberOfDSCSlices[DML2_MAX_PLANES]; /// <brief Indicate how many slices needed to support the given mode
double OutputBpp[DML2_MAX_PLANES];
enum dml2_output_type_and_rate__type OutputType[DML2_MAX_PLANES];
enum dml2_output_type_and_rate__rate OutputRate[DML2_MAX_PLANES];
unsigned int AlignedYPitch[DML2_MAX_PLANES];
unsigned int AlignedCPitch[DML2_MAX_PLANES];
bool g6_temp_read_support;
}; // dml2_mode_support_info
struct dml2_display_cfg_programming {
struct dml2_display_cfg display_config;
union {
struct {
unsigned long dcfclk_khz;
unsigned long fclk_khz;
unsigned long uclk_khz;
unsigned long socclk_khz;
unsigned long dispclk_khz;
unsigned long dcfclk_deepsleep_khz;
unsigned long dpp_ref_khz;
} dcn3;
struct {
struct {
unsigned long uclk_khz;
unsigned long fclk_khz;
unsigned long dcfclk_khz;
} active;
struct {
unsigned long uclk_khz;
unsigned long fclk_khz;
unsigned long dcfclk_khz;
} idle;
struct {
unsigned long uclk_khz;
unsigned long fclk_khz;
unsigned long dcfclk_khz;
} svp_prefetch;
unsigned long deepsleep_dcfclk_khz;
unsigned long dispclk_khz;
unsigned long dpprefclk_khz;
unsigned long dtbrefclk_khz;
unsigned long socclk_khz;
struct {
uint32_t dispclk_did;
uint32_t dpprefclk_did;
uint32_t dtbrefclk_did;
} divider_ids;
} dcn4;
} min_clocks;
bool uclk_pstate_supported;
bool fclk_pstate_supported;
/* indicates this configuration requires FW to support */
bool fams2_required;
struct {
bool supported_in_blank; // Changing to configurations where this is false requires stutter to be disabled during the transition
} stutter;
struct {
bool meets_eco; // Stutter cycles will meet Z8 ECO criteria
bool supported_in_blank; // Changing to configurations where this is false requires Z8 to be disabled during the transition
} z8_stutter;
struct dml2_dchub_global_register_set global_regs;
struct dml2_per_plane_programming plane_programming[DML2_MAX_PLANES];
struct dml2_per_stream_programming stream_programming[DML2_MAX_PLANES];
// Don't access this structure directly, access it through plane_programming.pipe_regs
struct dml2_dchub_per_pipe_register_set pipe_regs[DML2_MAX_PLANES];
struct {
struct {
double urgent_us;
double writeback_urgent_us;
double writeback_pstate_us;
double writeback_fclk_pstate_us;
double cstate_exit_us;
double cstate_enter_plus_exit_us;
double z8_cstate_exit_us;
double z8_cstate_enter_plus_exit_us;
double pstate_change_us;
double fclk_pstate_change_us;
double usr_retraining_us;
double g6_temp_read_watermark_us;
} watermarks;
struct {
unsigned int swath_width_plane0;
unsigned int swath_height_plane0;
unsigned int swath_height_plane1;
unsigned int dpte_row_height_plane0;
unsigned int dpte_row_height_plane1;
unsigned int meta_row_height_plane0;
unsigned int meta_row_height_plane1;
} plane_info[DML2_MAX_PLANES];
struct {
unsigned long long total_surface_size_in_mall_bytes;
unsigned int subviewport_lines_needed_in_mall[DML2_MAX_PLANES];
} mall;
struct {
double urgent_latency_us; // urgent ramp latency
double max_non_urgent_latency_us;
double max_urgent_latency_us;
double avg_non_urgent_latency_us;
double avg_urgent_latency_us;
double wm_memory_trip_us;
double meta_trip_memory_us;
double fraction_of_urgent_bandwidth; // nom
double fraction_of_urgent_bandwidth_immediate_flip;
double fraction_of_urgent_bandwidth_mall;
double max_active_fclk_change_latency_supported;
unsigned int min_return_latency_in_dcfclk;
struct {
struct {
double sdp_bw_mbps;
double dram_bw_mbps;
double dram_vm_only_bw_mbps;
} svp_prefetch;
struct {
double sdp_bw_mbps;
double dram_bw_mbps;
double dram_vm_only_bw_mbps;
} sys_active;
} urg_bw_available;
struct {
struct {
double sdp_bw_mbps;
double dram_bw_mbps;
} svp_prefetch;
struct {
double sdp_bw_mbps;
double dram_bw_mbps;
} sys_active;
} avg_bw_available;
struct {
struct {
double sdp_bw_mbps;
double dram_bw_mbps;
} svp_prefetch;
struct {
double sdp_bw_mbps;
double dram_bw_mbps;
} sys_active;
} non_urg_bw_required;
struct {
struct {
double sdp_bw_mbps;
double dram_bw_mbps;
} svp_prefetch;
struct {
double sdp_bw_mbps;
double dram_bw_mbps;
} sys_active;
} non_urg_bw_required_with_flip;
struct {
struct {
double sdp_bw_mbps;
double dram_bw_mbps;
} svp_prefetch;
struct {
double sdp_bw_mbps;
double dram_bw_mbps;
} sys_active;
} urg_bw_required;
struct {
struct {
double sdp_bw_mbps;
double dram_bw_mbps;
} svp_prefetch;
struct {
double sdp_bw_mbps;
double dram_bw_mbps;
} sys_active;
} urg_bw_required_with_flip;
struct {
struct {
double sdp_bw_mbps;
double dram_bw_mbps;
} svp_prefetch;
struct {
double sdp_bw_mbps;
double dram_bw_mbps;
} sys_active;
} avg_bw_required;
} qos;
struct {
unsigned long long det_size_in_kbytes[DML2_MAX_PLANES];
unsigned long long DETBufferSizeY[DML2_MAX_PLANES];
unsigned long long comp_buffer_size_kbytes;
bool UnboundedRequestEnabled;
unsigned int compbuf_reserved_space_64b;
} crb;
struct {
unsigned int max_uncompressed_block_plane0;
unsigned int max_compressed_block_plane0;
unsigned int independent_block_plane0;
unsigned int max_uncompressed_block_plane1;
unsigned int max_compressed_block_plane1;
unsigned int independent_block_plane1;
} dcc_control[DML2_MAX_PLANES];
struct {
double stutter_efficiency;
double stutter_efficiency_with_vblank;
double stutter_num_bursts;
struct {
double stutter_efficiency;
double stutter_efficiency_with_vblank;
double stutter_num_bursts;
double stutter_period;
struct {
double stutter_efficiency;
double stutter_num_bursts;
double stutter_period;
} bestcase;
} z8;
} power_management;
struct {
double min_ttu_vblank_us[DML2_MAX_PLANES];
bool vready_at_or_after_vsync[DML2_MAX_PLANES];
double min_dst_y_next_start[DML2_MAX_PLANES];
bool cstate_max_cap_mode;
bool hw_debug5;
unsigned int dcfclk_deep_sleep_hysteresis;
unsigned int dst_x_after_scaler[DML2_MAX_PLANES];
unsigned int dst_y_after_scaler[DML2_MAX_PLANES];
unsigned int prefetch_source_lines_plane0[DML2_MAX_PLANES];
unsigned int prefetch_source_lines_plane1[DML2_MAX_PLANES];
bool ImmediateFlipSupportedForPipe[DML2_MAX_PLANES];
bool UsesMALLForStaticScreen[DML2_MAX_PLANES];
unsigned int CursorDstXOffset[DML2_MAX_PLANES];
unsigned int CursorDstYOffset[DML2_MAX_PLANES];
unsigned int CursorChunkHDLAdjust[DML2_MAX_PLANES];
unsigned int dpte_group_bytes[DML2_MAX_PLANES];
unsigned int vm_group_bytes[DML2_MAX_PLANES];
double DisplayPipeRequestDeliveryTimeLuma[DML2_MAX_PLANES];
double DisplayPipeRequestDeliveryTimeChroma[DML2_MAX_PLANES];
double DisplayPipeRequestDeliveryTimeLumaPrefetch[DML2_MAX_PLANES];
double DisplayPipeRequestDeliveryTimeChromaPrefetch[DML2_MAX_PLANES];
double TimePerVMGroupVBlank[DML2_MAX_PLANES];
double TimePerVMGroupFlip[DML2_MAX_PLANES];
double TimePerVMRequestVBlank[DML2_MAX_PLANES];
double TimePerVMRequestFlip[DML2_MAX_PLANES];
double Tdmdl_vm[DML2_MAX_PLANES];
double Tdmdl[DML2_MAX_PLANES];
unsigned int VStartup[DML2_MAX_PLANES];
unsigned int VUpdateOffsetPix[DML2_MAX_PLANES];
unsigned int VUpdateWidthPix[DML2_MAX_PLANES];
unsigned int VReadyOffsetPix[DML2_MAX_PLANES];
double DST_Y_PER_PTE_ROW_NOM_L[DML2_MAX_PLANES];
double DST_Y_PER_PTE_ROW_NOM_C[DML2_MAX_PLANES];
double time_per_pte_group_nom_luma[DML2_MAX_PLANES];
double time_per_pte_group_nom_chroma[DML2_MAX_PLANES];
double time_per_pte_group_vblank_luma[DML2_MAX_PLANES];
double time_per_pte_group_vblank_chroma[DML2_MAX_PLANES];
double time_per_pte_group_flip_luma[DML2_MAX_PLANES];
double time_per_pte_group_flip_chroma[DML2_MAX_PLANES];
double VRatioPrefetchY[DML2_MAX_PLANES];
double VRatioPrefetchC[DML2_MAX_PLANES];
double DestinationLinesForPrefetch[DML2_MAX_PLANES];
double DestinationLinesToRequestVMInVBlank[DML2_MAX_PLANES];
double DestinationLinesToRequestRowInVBlank[DML2_MAX_PLANES];
double DestinationLinesToRequestVMInImmediateFlip[DML2_MAX_PLANES];
double DestinationLinesToRequestRowInImmediateFlip[DML2_MAX_PLANES];
double DisplayPipeLineDeliveryTimeLuma[DML2_MAX_PLANES];
double DisplayPipeLineDeliveryTimeChroma[DML2_MAX_PLANES];
double DisplayPipeLineDeliveryTimeLumaPrefetch[DML2_MAX_PLANES];
double DisplayPipeLineDeliveryTimeChromaPrefetch[DML2_MAX_PLANES];
double WritebackAllowDRAMClockChangeEndPosition[DML2_MAX_PLANES];
double WritebackAllowFCLKChangeEndPosition[DML2_MAX_PLANES];
double DSCCLK_calculated[DML2_MAX_PLANES];
unsigned int BIGK_FRAGMENT_SIZE[DML2_MAX_PLANES];
bool PTE_BUFFER_MODE[DML2_MAX_PLANES];
double DSCDelay[DML2_MAX_PLANES];
double MaxActiveDRAMClockChangeLatencySupported[DML2_MAX_PLANES];
unsigned int PrefetchMode[DML2_MAX_PLANES]; // LEGACY_ONLY
} misc;
struct dml2_mode_support_info mode_support_info;
unsigned int voltage_level; // LEGACY_ONLY
// For DV only
// This is what dml core calculated, only on the full_vp width and assume we have
// unlimited # of mcache
struct dml2_mcache_surface_allocation non_optimized_mcache_allocation[DML2_MAX_PLANES];
bool failed_mcache_validation;
bool failed_dpmm;
bool failed_mode_programming;
} informative;
};
struct dml2_build_mode_programming_in_out {
/*
* Inputs
*/
struct dml2_instance *dml2_instance;
const struct dml2_display_cfg *display_config;
/*
* Outputs
*/
struct dml2_display_cfg_programming *programming;
};
struct dml2_build_mcache_programming_in_out {
/*
* Inputs
*/
struct dml2_instance *dml2_instance;
struct dml2_plane_mcache_configuration_descriptor mcache_configurations[DML2_MAX_PLANES];
char num_configurations;
/*
* Outputs
*/
// per_plane_pipe_mcache_regs[i][j] refers to the proper programming for the j-th pipe of the
// i-th plane (from mcache_configurations)
struct dml2_hubp_pipe_mcache_regs *per_plane_pipe_mcache_regs[DML2_MAX_PLANES][DML2_MAX_DCN_PIPES];
// It's not a good idea to reference this directly, better to use the pointer structure above instead
struct dml2_hubp_pipe_mcache_regs mcache_regs_set[DML2_MAX_DCN_PIPES];
};
struct dml2_unit_test_in_out {
/*
* Inputs
*/
struct dml2_instance *dml2_instance;
};
#endif

View file

@ -0,0 +1,628 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#include "dml2_internal_shared_types.h"
#include "dml2_core_shared_types.h"
#include "dml2_core_dcn4.h"
#include "dml2_core_dcn4_calcs.h"
#include "dml2_debug.h"
#include "lib_float_math.h"
struct dml2_core_ip_params core_dcn4_ip_caps_base = {
// Hardcoded values for DCN3x
.vblank_nom_default_us = 668,
.remote_iommu_outstanding_translations = 256,
.rob_buffer_size_kbytes = 128,
.config_return_buffer_size_in_kbytes = 1280,
.config_return_buffer_segment_size_in_kbytes = 64,
.compressed_buffer_segment_size_in_kbytes = 64,
.dpte_buffer_size_in_pte_reqs_luma = 68,
.dpte_buffer_size_in_pte_reqs_chroma = 36,
.pixel_chunk_size_kbytes = 8,
.alpha_pixel_chunk_size_kbytes = 4,
.min_pixel_chunk_size_bytes = 1024,
.writeback_chunk_size_kbytes = 8,
.line_buffer_size_bits = 1171920,
.max_line_buffer_lines = 32,
.writeback_interface_buffer_size_kbytes = 90,
//Number of pipes after DCN Pipe harvesting
.max_num_dpp = 4,
.max_num_otg = 4,
.max_num_wb = 1,
.max_dchub_pscl_bw_pix_per_clk = 4,
.max_pscl_lb_bw_pix_per_clk = 2,
.max_lb_vscl_bw_pix_per_clk = 4,
.max_vscl_hscl_bw_pix_per_clk = 4,
.max_hscl_ratio = 6,
.max_vscl_ratio = 6,
.max_hscl_taps = 8,
.max_vscl_taps = 8,
.dispclk_ramp_margin_percent = 1,
.dppclk_delay_subtotal = 47,
.dppclk_delay_scl = 50,
.dppclk_delay_scl_lb_only = 16,
.dppclk_delay_cnvc_formatter = 28,
.dppclk_delay_cnvc_cursor = 6,
.cursor_buffer_size = 24,
.cursor_chunk_size = 2,
.dispclk_delay_subtotal = 125,
.max_inter_dcn_tile_repeaters = 8,
.writeback_max_hscl_ratio = 1,
.writeback_max_vscl_ratio = 1,
.writeback_min_hscl_ratio = 1,
.writeback_min_vscl_ratio = 1,
.writeback_max_hscl_taps = 1,
.writeback_max_vscl_taps = 1,
.writeback_line_buffer_buffer_size = 0,
.num_dsc = 4,
.maximum_dsc_bits_per_component = 12,
.maximum_pixels_per_line_per_dsc_unit = 5760,
.dsc422_native_support = true,
.dcc_supported = true,
.ptoi_supported = false,
.cursor_64bpp_support = true,
.dynamic_metadata_vm_enabled = false,
.max_num_hdmi_frl_outputs = 1,
.max_num_dp2p0_outputs = 4,
.max_num_dp2p0_streams = 4,
.imall_supported = 1,
.max_flip_time_us = 80,
.words_per_channel = 16,
.subvp_fw_processing_delay_us = 15,
.subvp_pstate_allow_width_us = 20,
.subvp_swath_height_margin_lines = 16,
};
static void patch_ip_caps_with_explicit_ip_params(struct dml2_ip_capabilities *ip_caps, const struct dml2_core_ip_params *ip_params)
{
ip_caps->pipe_count = ip_params->max_num_dpp;
ip_caps->otg_count = ip_params->max_num_otg;
ip_caps->num_dsc = ip_params->num_dsc;
ip_caps->max_num_dp2p0_streams = ip_params->max_num_dp2p0_streams;
ip_caps->max_num_dp2p0_outputs = ip_params->max_num_dp2p0_outputs;
ip_caps->rob_buffer_size_kbytes = ip_params->rob_buffer_size_kbytes;
ip_caps->config_return_buffer_size_in_kbytes = ip_params->config_return_buffer_size_in_kbytes;
ip_caps->meta_fifo_size_in_kentries = ip_params->meta_fifo_size_in_kentries;
ip_caps->compressed_buffer_segment_size_in_kbytes = ip_params->compressed_buffer_segment_size_in_kbytes;
// FIXME_STAGE2: cleanup after adding all dv override to ip_caps
ip_caps->subvp_drr_scheduling_margin_us = 100;
ip_caps->subvp_prefetch_end_to_mall_start_us = 15;
ip_caps->subvp_fw_processing_delay = 16;
}
static void patch_ip_params_with_ip_caps(struct dml2_core_ip_params *ip_params, const struct dml2_ip_capabilities *ip_caps)
{
ip_params->max_num_dpp = ip_caps->pipe_count;
ip_params->max_num_otg = ip_caps->otg_count;
ip_params->num_dsc = ip_caps->num_dsc;
ip_params->max_num_dp2p0_streams = ip_caps->max_num_dp2p0_streams;
ip_params->max_num_dp2p0_outputs = ip_caps->max_num_dp2p0_outputs;
ip_params->rob_buffer_size_kbytes = ip_caps->rob_buffer_size_kbytes;
ip_params->config_return_buffer_size_in_kbytes = ip_caps->config_return_buffer_size_in_kbytes;
ip_params->meta_fifo_size_in_kentries = ip_caps->meta_fifo_size_in_kentries;
ip_params->compressed_buffer_segment_size_in_kbytes = ip_caps->compressed_buffer_segment_size_in_kbytes;
}
bool core_dcn4_initialize(struct dml2_core_initialize_in_out *in_out)
{
struct dml2_core_instance *core = in_out->instance;
if (!in_out->minimum_clock_table)
return false;
else
core->minimum_clock_table = in_out->minimum_clock_table;
if (in_out->explicit_ip_bb && in_out->explicit_ip_bb_size > 0) {
memcpy(&core->clean_me_up.mode_lib.ip, in_out->explicit_ip_bb, in_out->explicit_ip_bb_size);
// FIXME_STAGE2:
// DV still uses stage1 ip_param_st for each variant, need to patch the ip_caps with ip_param info
// Should move DV to use ip_caps but need move more overrides to ip_caps
patch_ip_caps_with_explicit_ip_params(in_out->ip_caps, in_out->explicit_ip_bb);
core->clean_me_up.mode_lib.ip.subvp_pstate_allow_width_us = core_dcn4_ip_caps_base.subvp_pstate_allow_width_us;
core->clean_me_up.mode_lib.ip.subvp_fw_processing_delay_us = core_dcn4_ip_caps_base.subvp_pstate_allow_width_us;
core->clean_me_up.mode_lib.ip.subvp_swath_height_margin_lines = core_dcn4_ip_caps_base.subvp_swath_height_margin_lines;
} else {
memcpy(&core->clean_me_up.mode_lib.ip, &core_dcn4_ip_caps_base, sizeof(struct dml2_core_ip_params));
patch_ip_params_with_ip_caps(&core->clean_me_up.mode_lib.ip, in_out->ip_caps);
core->clean_me_up.mode_lib.ip.imall_supported = false;
}
memcpy(&core->clean_me_up.mode_lib.soc, in_out->soc_bb, sizeof(struct dml2_soc_bb));
return true;
}
static void create_phantom_stream_from_main_stream(struct dml2_stream_parameters *phantom, const struct dml2_stream_parameters *main,
const struct dml2_implicit_svp_meta *meta)
{
memcpy(phantom, main, sizeof(struct dml2_stream_parameters));
phantom->timing.v_total = meta->v_total;
phantom->timing.v_active = meta->v_active;
phantom->timing.v_front_porch = meta->v_front_porch;
phantom->timing.vblank_nom = phantom->timing.v_total - phantom->timing.v_active;
phantom->timing.dsc.enable = dml2_dsc_disable;
phantom->timing.drr_config.enabled = false;
}
static void create_phantom_plane_from_main_plane(struct dml2_plane_parameters *phantom, const struct dml2_plane_parameters *main,
const struct dml2_stream_parameters *phantom_stream, int phantom_stream_index, const struct dml2_stream_parameters *main_stream)
{
memcpy(phantom, main, sizeof(struct dml2_plane_parameters));
phantom->stream_index = phantom_stream_index;
phantom->overrides.refresh_from_mall = dml2_refresh_from_mall_mode_override_force_disable;
phantom->overrides.legacy_svp_config = dml2_svp_mode_override_phantom_pipe_no_data_return;
phantom->composition.viewport.plane0.height = (long int unsigned) math_ceil2(
(double)phantom->composition.viewport.plane0.height * (double)phantom_stream->timing.v_active / (double)main_stream->timing.v_active, 16.0);
phantom->composition.viewport.plane1.height = (long int unsigned) math_ceil2(
(double)phantom->composition.viewport.plane1.height * (double)phantom_stream->timing.v_active / (double)main_stream->timing.v_active, 16.0);
phantom->immediate_flip = false;
phantom->dynamic_meta_data.enable = false;
phantom->cursor.num_cursors = 0;
phantom->cursor.cursor_width = 0;
phantom->tdlut.setup_for_tdlut = false;
}
static void expand_implict_subvp(const struct display_configuation_with_meta *display_cfg, struct dml2_display_cfg *svp_expanded_display_cfg,
struct dml2_core_scratch *scratch)
{
unsigned int stream_index, plane_index;
const struct dml2_plane_parameters *main_plane;
const struct dml2_stream_parameters *main_stream;
const struct dml2_stream_parameters *phantom_stream;
memcpy(svp_expanded_display_cfg, &display_cfg->display_config, sizeof(struct dml2_display_cfg));
memset(scratch->main_stream_index_from_svp_stream_index, 0, sizeof(int) * DML2_MAX_PLANES);
memset(scratch->svp_stream_index_from_main_stream_index, 0, sizeof(int) * DML2_MAX_PLANES);
memset(scratch->main_plane_index_to_phantom_plane_index, 0, sizeof(int) * DML2_MAX_PLANES);
if (!display_cfg->display_config.overrides.enable_subvp_implicit_pmo)
return;
/* disable unbounded requesting for all planes until stage 3 has been performed */
if (!display_cfg->stage3.performed) {
svp_expanded_display_cfg->overrides.hw.force_unbounded_requesting.enable = true;
svp_expanded_display_cfg->overrides.hw.force_unbounded_requesting.value = false;
}
// Create the phantom streams
for (stream_index = 0; stream_index < display_cfg->display_config.num_streams; stream_index++) {
main_stream = &display_cfg->display_config.stream_descriptors[stream_index];
scratch->main_stream_index_from_svp_stream_index[stream_index] = stream_index;
scratch->svp_stream_index_from_main_stream_index[stream_index] = stream_index;
if (display_cfg->stage3.stream_svp_meta[stream_index].valid) {
// Create the phantom stream
create_phantom_stream_from_main_stream(&svp_expanded_display_cfg->stream_descriptors[svp_expanded_display_cfg->num_streams],
main_stream, &display_cfg->stage3.stream_svp_meta[stream_index]);
// Associate this phantom stream to the main stream
scratch->main_stream_index_from_svp_stream_index[svp_expanded_display_cfg->num_streams] = stream_index;
scratch->svp_stream_index_from_main_stream_index[stream_index] = svp_expanded_display_cfg->num_streams;
// Increment num streams
svp_expanded_display_cfg->num_streams++;
}
}
// Create the phantom planes
for (plane_index = 0; plane_index < display_cfg->display_config.num_planes; plane_index++) {
main_plane = &display_cfg->display_config.plane_descriptors[plane_index];
if (display_cfg->stage3.stream_svp_meta[main_plane->stream_index].valid) {
main_stream = &display_cfg->display_config.stream_descriptors[main_plane->stream_index];
phantom_stream = &svp_expanded_display_cfg->stream_descriptors[scratch->svp_stream_index_from_main_stream_index[main_plane->stream_index]];
create_phantom_plane_from_main_plane(&svp_expanded_display_cfg->plane_descriptors[svp_expanded_display_cfg->num_planes],
main_plane, phantom_stream, scratch->svp_stream_index_from_main_stream_index[main_plane->stream_index], main_stream);
// Associate this phantom plane to the main plane
scratch->phantom_plane_index_to_main_plane_index[svp_expanded_display_cfg->num_planes] = plane_index;
scratch->main_plane_index_to_phantom_plane_index[plane_index] = svp_expanded_display_cfg->num_planes;
// Increment num planes
svp_expanded_display_cfg->num_planes++;
// Adjust the main plane settings
svp_expanded_display_cfg->plane_descriptors[plane_index].overrides.legacy_svp_config = dml2_svp_mode_override_main_pipe;
}
}
}
static void pack_mode_programming_params_with_implicit_subvp(struct dml2_core_instance *core, const struct display_configuation_with_meta *display_cfg,
const struct dml2_display_cfg *svp_expanded_display_cfg, struct dml2_display_cfg_programming *programming, struct dml2_core_scratch *scratch)
{
unsigned int stream_index, plane_index, pipe_offset, stream_already_populated_mask, main_plane_index;
int total_pipe_regs_copied = 0;
int dml_internal_pipe_index = 0;
const struct dml2_plane_parameters *main_plane;
const struct dml2_plane_parameters *phantom_plane;
const struct dml2_stream_parameters *main_stream;
const struct dml2_stream_parameters *phantom_stream;
// Copy the unexpanded display config to output
memcpy(&programming->display_config, &display_cfg->display_config, sizeof(struct dml2_display_cfg));
// Set the global register values
dml2_core_calcs_get_arb_params(&display_cfg->display_config, &core->clean_me_up.mode_lib, &programming->global_regs.arb_regs);
// Get watermarks uses display config for ref clock override, so it doesn't matter whether we pass the pre or post expansion
// display config
dml2_core_calcs_get_watermarks(&display_cfg->display_config, &core->clean_me_up.mode_lib, &programming->global_regs.wm_regs[0]);
// Check if FAMS2 is required
if (display_cfg->stage3.performed && display_cfg->stage3.success) {
programming->fams2_required = display_cfg->stage3.fams2_required;
}
// Only loop over all the main streams (the implicit svp streams will be packed as part of the main stream)
for (stream_index = 0; stream_index < programming->display_config.num_streams; stream_index++) {
main_stream = &svp_expanded_display_cfg->stream_descriptors[stream_index];
phantom_stream = &svp_expanded_display_cfg->stream_descriptors[scratch->svp_stream_index_from_main_stream_index[stream_index]];
// Set the descriptor
programming->stream_programming[stream_index].stream_descriptor = &programming->display_config.stream_descriptors[stream_index];
// Set the odm combine factor
programming->stream_programming[stream_index].num_odms_required = display_cfg->mode_support_result.cfg_support_info.stream_support_info[stream_index].odms_used;
// Check if the stream has implicit SVP enabled
if (main_stream != phantom_stream) {
// If so, copy the phantom stream descriptor
programming->stream_programming[stream_index].phantom_stream.enabled = true;
memcpy(&programming->stream_programming[stream_index].phantom_stream.descriptor, phantom_stream, sizeof(struct dml2_stream_parameters));
} else {
programming->stream_programming[stream_index].phantom_stream.enabled = false;
}
// Due to the way DML indexes data internally, it's easier to populate the rest of the display
// stream programming in the next stage
}
dml_internal_pipe_index = 0;
total_pipe_regs_copied = 0;
stream_already_populated_mask = 0x0;
// Loop over all main planes
for (plane_index = 0; plane_index < programming->display_config.num_planes; plane_index++) {
main_plane = &svp_expanded_display_cfg->plane_descriptors[plane_index];
// Set the descriptor
programming->plane_programming[plane_index].plane_descriptor = &programming->display_config.plane_descriptors[plane_index];
// Set the mpc combine factor
programming->plane_programming[plane_index].num_dpps_required = core->clean_me_up.mode_lib.mp.NoOfDPP[plane_index];
// Setup the appropriate p-state strategy
if (display_cfg->stage3.performed && display_cfg->stage3.success) {
switch (display_cfg->stage3.pstate_switch_modes[plane_index]) {
case dml2_uclk_pstate_support_method_vactive:
case dml2_uclk_pstate_support_method_vblank:
case dml2_uclk_pstate_support_method_fw_subvp_phantom:
case dml2_uclk_pstate_support_method_fw_drr:
case dml2_uclk_pstate_support_method_fw_vactive_drr:
case dml2_uclk_pstate_support_method_fw_vblank_drr:
case dml2_uclk_pstate_support_method_fw_subvp_phantom_drr:
programming->plane_programming[plane_index].uclk_pstate_support_method = display_cfg->stage3.pstate_switch_modes[plane_index];
break;
case dml2_uclk_pstate_support_method_reserved_hw:
case dml2_uclk_pstate_support_method_reserved_fw:
case dml2_uclk_pstate_support_method_reserved_fw_drr_fixed:
case dml2_uclk_pstate_support_method_reserved_fw_drr_var:
case dml2_uclk_pstate_support_method_not_supported:
case dml2_uclk_pstate_support_method_count:
default:
programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_not_supported;
break;
}
} else {
programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_not_supported;
}
dml2_core_calcs_get_mall_allocation(&core->clean_me_up.mode_lib, &programming->plane_programming[plane_index].surface_size_mall_bytes, dml_internal_pipe_index);
for (pipe_offset = 0; pipe_offset < programming->plane_programming[plane_index].num_dpps_required; pipe_offset++) {
// Assign storage for this pipe's register values
programming->plane_programming[plane_index].pipe_regs[pipe_offset] = &programming->pipe_regs[total_pipe_regs_copied];
memset(programming->plane_programming[plane_index].pipe_regs[pipe_offset], 0, sizeof(struct dml2_dchub_per_pipe_register_set));
total_pipe_regs_copied++;
// Populate the main plane regs
dml2_core_calcs_get_pipe_regs(svp_expanded_display_cfg, &core->clean_me_up.mode_lib, programming->plane_programming[plane_index].pipe_regs[pipe_offset], dml_internal_pipe_index);
// Multiple planes can refer to the same stream index, so it's only necessary to populate it once
if (!(stream_already_populated_mask & (0x1 << main_plane->stream_index))) {
dml2_core_calcs_get_stream_programming(&core->clean_me_up.mode_lib, &programming->stream_programming[main_plane->stream_index], dml_internal_pipe_index);
programming->stream_programming[main_plane->stream_index].uclk_pstate_method = programming->plane_programming[plane_index].uclk_pstate_support_method;
// If FAMS2 is required, populate stream params
if (programming->fams2_required) {
dml2_core_calcs_get_stream_fams2_programming(&core->clean_me_up.mode_lib,
display_cfg,
&programming->stream_programming[main_plane->stream_index].fams2_params,
programming->stream_programming[main_plane->stream_index].uclk_pstate_method,
plane_index);
}
stream_already_populated_mask |= (0x1 << main_plane->stream_index);
}
dml_internal_pipe_index++;
}
}
for (plane_index = programming->display_config.num_planes; plane_index < svp_expanded_display_cfg->num_planes; plane_index++) {
phantom_plane = &svp_expanded_display_cfg->plane_descriptors[plane_index];
main_plane_index = scratch->phantom_plane_index_to_main_plane_index[plane_index];
main_plane = &svp_expanded_display_cfg->plane_descriptors[main_plane_index];
programming->plane_programming[main_plane_index].phantom_plane.valid = true;
memcpy(&programming->plane_programming[main_plane_index].phantom_plane.descriptor, phantom_plane, sizeof(struct dml2_plane_parameters));
dml2_core_calcs_get_mall_allocation(&core->clean_me_up.mode_lib, &programming->plane_programming[main_plane_index].svp_size_mall_bytes, dml_internal_pipe_index);
for (pipe_offset = 0; pipe_offset < programming->plane_programming[main_plane_index].num_dpps_required; pipe_offset++) {
// Assign storage for this pipe's register values
programming->plane_programming[main_plane_index].phantom_plane.pipe_regs[pipe_offset] = &programming->pipe_regs[total_pipe_regs_copied];
memset(programming->plane_programming[main_plane_index].phantom_plane.pipe_regs[pipe_offset], 0, sizeof(struct dml2_dchub_per_pipe_register_set));
total_pipe_regs_copied++;
// Populate the phantom plane regs
dml2_core_calcs_get_pipe_regs(svp_expanded_display_cfg, &core->clean_me_up.mode_lib, programming->plane_programming[main_plane_index].phantom_plane.pipe_regs[pipe_offset], dml_internal_pipe_index);
// Populate the phantom stream specific programming
if (!(stream_already_populated_mask & (0x1 << phantom_plane->stream_index))) {
dml2_core_calcs_get_global_sync_programming(&core->clean_me_up.mode_lib, &programming->stream_programming[main_plane->stream_index].phantom_stream.global_sync, dml_internal_pipe_index);
stream_already_populated_mask |= (0x1 << phantom_plane->stream_index);
}
dml_internal_pipe_index++;
}
}
}
bool core_dcn4_mode_support(struct dml2_core_mode_support_in_out *in_out)
{
struct dml2_core_instance *core = (struct dml2_core_instance *)in_out->instance;
struct dml2_core_mode_support_locals *l = &core->scratch.mode_support_locals;
bool result;
unsigned int i, stream_index, stream_bitmask;
int unsigned odm_count, dpp_count;
expand_implict_subvp(in_out->display_cfg, &l->svp_expanded_display_cfg, &core->scratch);
l->mode_support_ex_params.mode_lib = &core->clean_me_up.mode_lib;
l->mode_support_ex_params.in_display_cfg = &l->svp_expanded_display_cfg;
l->mode_support_ex_params.min_clk_table = in_out->min_clk_table;
l->mode_support_ex_params.min_clk_index = in_out->min_clk_index;
l->mode_support_ex_params.out_evaluation_info = &in_out->mode_support_result.cfg_support_info.clean_me_up.support_info;
result = dml2_core_calcs_mode_support_ex(&l->mode_support_ex_params);
in_out->mode_support_result.cfg_support_info.is_supported = result;
if (result) {
in_out->mode_support_result.global.dispclk_khz = (unsigned int)(core->clean_me_up.mode_lib.ms.RequiredDISPCLK * 1000);
in_out->mode_support_result.global.dcfclk_deepsleep_khz = (unsigned int)(core->clean_me_up.mode_lib.ms.dcfclk_deepsleep * 1000);
in_out->mode_support_result.global.socclk_khz = (unsigned int)(core->clean_me_up.mode_lib.ms.SOCCLK * 1000);
in_out->mode_support_result.global.fclk_pstate_supported = l->mode_support_ex_params.out_evaluation_info->global_fclk_change_supported;
in_out->mode_support_result.global.uclk_pstate_supported = l->mode_support_ex_params.out_evaluation_info->global_dram_clock_change_supported;
in_out->mode_support_result.global.active.fclk_khz = (unsigned long)(core->clean_me_up.mode_lib.ms.FabricClock * 1000);
in_out->mode_support_result.global.active.dcfclk_khz = (unsigned long)(core->clean_me_up.mode_lib.ms.DCFCLK * 1000);
in_out->mode_support_result.global.svp_prefetch.fclk_khz = (unsigned long)core->clean_me_up.mode_lib.ms.FabricClock * 1000;
in_out->mode_support_result.global.svp_prefetch.dcfclk_khz = (unsigned long)core->clean_me_up.mode_lib.ms.DCFCLK * 1000;
in_out->mode_support_result.global.active.average_bw_sdp_kbps = 0;
in_out->mode_support_result.global.active.urgent_bw_dram_kbps = 0;
in_out->mode_support_result.global.svp_prefetch.average_bw_sdp_kbps = 0;
in_out->mode_support_result.global.svp_prefetch.urgent_bw_dram_kbps = 0;
in_out->mode_support_result.global.active.average_bw_sdp_kbps = (unsigned long)math_ceil2((l->mode_support_ex_params.out_evaluation_info->avg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp] * 1000), 1.0);
in_out->mode_support_result.global.active.urgent_bw_sdp_kbps = (unsigned long)math_ceil2((l->mode_support_ex_params.out_evaluation_info->urg_bandwidth_required_flip[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp] * 1000), 1.0);
in_out->mode_support_result.global.svp_prefetch.average_bw_sdp_kbps = (unsigned long)math_ceil2((l->mode_support_ex_params.out_evaluation_info->avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_sdp] * 1000), 1.0);
in_out->mode_support_result.global.svp_prefetch.urgent_bw_sdp_kbps = (unsigned long)math_ceil2((l->mode_support_ex_params.out_evaluation_info->urg_bandwidth_required_flip[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_sdp] * 1000), 1.0);
in_out->mode_support_result.global.active.average_bw_dram_kbps = (unsigned long)math_ceil2((l->mode_support_ex_params.out_evaluation_info->avg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram] * 1000), 1.0);
in_out->mode_support_result.global.active.urgent_bw_dram_kbps = (unsigned long)math_ceil2((l->mode_support_ex_params.out_evaluation_info->urg_bandwidth_required_flip[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram] * 1000), 1.0);
in_out->mode_support_result.global.svp_prefetch.average_bw_dram_kbps = (unsigned long)math_ceil2((l->mode_support_ex_params.out_evaluation_info->avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram] * 1000), 1.0);
in_out->mode_support_result.global.svp_prefetch.urgent_bw_dram_kbps = (unsigned long)math_ceil2((l->mode_support_ex_params.out_evaluation_info->urg_bandwidth_required_flip[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram] * 1000), 1.0);
dml2_printf("DML::%s: in_out->mode_support_result.global.active.urgent_bw_sdp_kbps = %ld\n", __func__, in_out->mode_support_result.global.active.urgent_bw_sdp_kbps);
dml2_printf("DML::%s: in_out->mode_support_result.global.svp_prefetch.urgent_bw_sdp_kbps = %ld\n", __func__, in_out->mode_support_result.global.svp_prefetch.urgent_bw_sdp_kbps);
dml2_printf("DML::%s: in_out->mode_support_result.global.active.urgent_bw_dram_kbps = %ld\n", __func__, in_out->mode_support_result.global.active.urgent_bw_dram_kbps);
dml2_printf("DML::%s: in_out->mode_support_result.global.svp_prefetch.urgent_bw_dram_kbps = %ld\n", __func__, in_out->mode_support_result.global.svp_prefetch.urgent_bw_dram_kbps);
for (i = 0; i < l->svp_expanded_display_cfg.num_planes; i++) {
in_out->mode_support_result.per_plane[i].dppclk_khz = (unsigned int)(core->clean_me_up.mode_lib.ms.RequiredDPPCLK[i] * 1000);
}
stream_bitmask = 0;
for (i = 0; i < l->svp_expanded_display_cfg.num_planes; i++) {
switch (l->mode_support_ex_params.out_evaluation_info->ODMMode[i]) {
case dml2_odm_mode_bypass:
odm_count = 1;
dpp_count = l->mode_support_ex_params.out_evaluation_info->DPPPerSurface[i];
break;
case dml2_odm_mode_combine_2to1:
odm_count = 2;
dpp_count = 2;
break;
case dml2_odm_mode_combine_3to1:
odm_count = 3;
dpp_count = 3;
break;
case dml2_odm_mode_combine_4to1:
odm_count = 4;
dpp_count = 4;
break;
case dml2_odm_mode_split_1to2:
case dml2_odm_mode_mso_1to2:
case dml2_odm_mode_mso_1to4:
case dml2_odm_mode_auto:
default:
odm_count = 1;
dpp_count = l->mode_support_ex_params.out_evaluation_info->DPPPerSurface[i];
break;
}
in_out->mode_support_result.cfg_support_info.plane_support_info[i].dpps_used = dpp_count;
dml2_core_calcs_get_plane_support_info(&l->svp_expanded_display_cfg, &core->clean_me_up.mode_lib, &in_out->mode_support_result.cfg_support_info.plane_support_info[i], i);
stream_index = l->svp_expanded_display_cfg.plane_descriptors[i].stream_index;
in_out->mode_support_result.per_stream[stream_index].dscclk_khz = (unsigned int)core->clean_me_up.mode_lib.ms.required_dscclk_freq_mhz[i] * 1000;
dml2_printf("CORE_DCN4::%s: i=%d stream_index=%d, in_out->mode_support_result.per_stream[stream_index].dscclk_khz = %u\n", __func__, i, stream_index, in_out->mode_support_result.per_stream[stream_index].dscclk_khz);
if (!((stream_bitmask >> stream_index) & 0x1)) {
in_out->mode_support_result.cfg_support_info.stream_support_info[stream_index].odms_used = odm_count;
in_out->mode_support_result.cfg_support_info.stream_support_info[stream_index].dsc_enable = l->mode_support_ex_params.out_evaluation_info->DSCEnabled[i];
in_out->mode_support_result.cfg_support_info.stream_support_info[stream_index].num_dsc_slices = l->mode_support_ex_params.out_evaluation_info->NumberOfDSCSlices[i];
dml2_core_calcs_get_stream_support_info(&l->svp_expanded_display_cfg, &core->clean_me_up.mode_lib, &in_out->mode_support_result.cfg_support_info.stream_support_info[stream_index], i);
in_out->mode_support_result.per_stream[stream_index].dtbclk_khz = (unsigned int)(core->clean_me_up.mode_lib.ms.RequiredDTBCLK[i] * 1000);
stream_bitmask |= 0x1 << stream_index;
}
}
}
return result;
}
static int lookup_uclk_dpm_index_by_freq(unsigned long uclk_freq_khz, struct dml2_soc_bb *soc_bb)
{
int i;
for (i = 0; i < soc_bb->clk_table.uclk.num_clk_values; i++) {
if (uclk_freq_khz == soc_bb->clk_table.uclk.clk_values_khz[i])
return i;
}
return 0;
}
bool core_dcn4_mode_programming(struct dml2_core_mode_programming_in_out *in_out)
{
struct dml2_core_instance *core = (struct dml2_core_instance *)in_out->instance;
struct dml2_core_mode_programming_locals *l = &core->scratch.mode_programming_locals;
bool result = false;
unsigned int pipe_offset;
int dml_internal_pipe_index;
int total_pipe_regs_copied = 0;
int stream_already_populated_mask = 0;
int main_stream_index;
unsigned int plane_index;
expand_implict_subvp(in_out->display_cfg, &l->svp_expanded_display_cfg, &core->scratch);
l->mode_programming_ex_params.mode_lib = &core->clean_me_up.mode_lib;
l->mode_programming_ex_params.in_display_cfg = &l->svp_expanded_display_cfg;
l->mode_programming_ex_params.min_clk_table = in_out->instance->minimum_clock_table;
l->mode_programming_ex_params.cfg_support_info = in_out->cfg_support_info;
l->mode_programming_ex_params.programming = in_out->programming;
l->mode_programming_ex_params.min_clk_index = lookup_uclk_dpm_index_by_freq(in_out->programming->min_clocks.dcn4.active.uclk_khz,
&core->clean_me_up.mode_lib.soc);
result = dml2_core_calcs_mode_programming_ex(&l->mode_programming_ex_params);
if (result) {
// If the input display configuration contains implict SVP, we need to use a special packer
if (in_out->display_cfg->display_config.overrides.enable_subvp_implicit_pmo) {
pack_mode_programming_params_with_implicit_subvp(core, in_out->display_cfg, &l->svp_expanded_display_cfg, in_out->programming, &core->scratch);
} else {
memcpy(&in_out->programming->display_config, in_out->display_cfg, sizeof(struct dml2_display_cfg));
dml2_core_calcs_get_arb_params(&l->svp_expanded_display_cfg, &core->clean_me_up.mode_lib, &in_out->programming->global_regs.arb_regs);
dml2_core_calcs_get_watermarks(&l->svp_expanded_display_cfg, &core->clean_me_up.mode_lib, &in_out->programming->global_regs.wm_regs[0]);
dml_internal_pipe_index = 0;
for (plane_index = 0; plane_index < in_out->programming->display_config.num_planes; plane_index++) {
in_out->programming->plane_programming[plane_index].num_dpps_required = core->clean_me_up.mode_lib.mp.NoOfDPP[plane_index];
if (in_out->programming->display_config.plane_descriptors->overrides.legacy_svp_config == dml2_svp_mode_override_main_pipe)
in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_fw_subvp_phantom;
else if (in_out->programming->display_config.plane_descriptors->overrides.legacy_svp_config == dml2_svp_mode_override_phantom_pipe)
in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_fw_subvp_phantom;
else if (in_out->programming->display_config.plane_descriptors->overrides.legacy_svp_config == dml2_svp_mode_override_phantom_pipe_no_data_return)
in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_fw_subvp_phantom;
else {
if (core->clean_me_up.mode_lib.mp.MaxActiveDRAMClockChangeLatencySupported[plane_index] >= core->clean_me_up.mode_lib.soc.power_management_parameters.dram_clk_change_blackout_us)
in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_vactive;
else if (core->clean_me_up.mode_lib.mp.TWait[plane_index] >= core->clean_me_up.mode_lib.soc.power_management_parameters.dram_clk_change_blackout_us)
in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_vblank;
else
in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_not_supported;
}
dml2_core_calcs_get_mall_allocation(&core->clean_me_up.mode_lib, &in_out->programming->plane_programming[plane_index].surface_size_mall_bytes, dml_internal_pipe_index);
for (pipe_offset = 0; pipe_offset < in_out->programming->plane_programming[plane_index].num_dpps_required; pipe_offset++) {
in_out->programming->plane_programming[plane_index].plane_descriptor = &in_out->programming->display_config.plane_descriptors[plane_index];
// Assign storage for this pipe's register values
in_out->programming->plane_programming[plane_index].pipe_regs[pipe_offset] = &in_out->programming->pipe_regs[total_pipe_regs_copied];
memset(in_out->programming->plane_programming[plane_index].pipe_regs[pipe_offset], 0, sizeof(struct dml2_dchub_per_pipe_register_set));
total_pipe_regs_copied++;
// Populate
dml2_core_calcs_get_pipe_regs(&l->svp_expanded_display_cfg, &core->clean_me_up.mode_lib, in_out->programming->plane_programming[plane_index].pipe_regs[pipe_offset], dml_internal_pipe_index);
main_stream_index = in_out->programming->display_config.plane_descriptors[plane_index].stream_index;
// Multiple planes can refer to the same stream index, so it's only necessary to populate it once
if (!(stream_already_populated_mask & (0x1 << main_stream_index))) {
in_out->programming->stream_programming[main_stream_index].stream_descriptor = &in_out->programming->display_config.stream_descriptors[main_stream_index];
in_out->programming->stream_programming[main_stream_index].num_odms_required = in_out->cfg_support_info->stream_support_info[main_stream_index].odms_used;
dml2_core_calcs_get_stream_programming(&core->clean_me_up.mode_lib, &in_out->programming->stream_programming[main_stream_index], dml_internal_pipe_index);
stream_already_populated_mask |= (0x1 << main_stream_index);
}
dml_internal_pipe_index++;
}
}
}
}
return result;
}
bool core_dcn4_populate_informative(struct dml2_core_populate_informative_in_out *in_out)
{
struct dml2_core_internal_display_mode_lib *mode_lib = &in_out->instance->clean_me_up.mode_lib;
if (in_out->mode_is_supported)
in_out->programming->informative.voltage_level = in_out->instance->scratch.mode_programming_locals.mode_programming_ex_params.min_clk_index;
else
in_out->programming->informative.voltage_level = in_out->instance->scratch.mode_support_locals.mode_support_ex_params.min_clk_index;
dml2_core_calcs_get_informative(mode_lib, in_out->programming);
return true;
}
bool core_dcn4_calculate_mcache_allocation(struct dml2_calculate_mcache_allocation_in_out *in_out)
{
memset(in_out->mcache_allocation, 0, sizeof(struct dml2_mcache_surface_allocation));
dml2_core_calcs_get_mcache_allocation(&in_out->instance->clean_me_up.mode_lib, in_out->mcache_allocation, in_out->plane_index);
if (in_out->mcache_allocation->num_mcaches_plane0 > 0)
in_out->mcache_allocation->mcache_x_offsets_plane0[in_out->mcache_allocation->num_mcaches_plane0 - 1] = in_out->plane_descriptor->surface.plane0.width;
if (in_out->mcache_allocation->num_mcaches_plane1 > 0)
in_out->mcache_allocation->mcache_x_offsets_plane1[in_out->mcache_allocation->num_mcaches_plane1 - 1] = in_out->plane_descriptor->surface.plane1.width;
in_out->mcache_allocation->requires_dedicated_mall_mcache = false;
return true;
}

View file

@ -0,0 +1,16 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#ifndef __DML2_CORE_DCN4_H__
#define __DML2_CORE_DCN4_H__
bool core_dcn4_initialize(struct dml2_core_initialize_in_out *in_out);
bool core_dcn4_mode_support(struct dml2_core_mode_support_in_out *in_out);
bool core_dcn4_mode_programming(struct dml2_core_mode_programming_in_out *in_out);
bool core_dcn4_populate_informative(struct dml2_core_populate_informative_in_out *in_out);
bool core_dcn4_calculate_mcache_allocation(struct dml2_calculate_mcache_allocation_in_out *in_out);
bool core_dcn4_unit_test(void);
#endif

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,39 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#ifndef __DML2_CORE_DCN4_CALCS_H__
#define __DML2_CORE_DCN4_CALCS_H__
#include "dml2_core_shared_types.h"
struct dml2_dchub_watermark_regs;
struct dml2_display_arb_regs;
struct dml2_per_stream_programming;
struct dml2_dchub_per_pipe_register_set;
struct core_plane_support_info;
struct core_stream_support_info;
struct dml2_cursor_dlg_regs;
struct display_configuation_with_meta;
unsigned int dml2_core_calcs_mode_support_ex(struct dml2_core_calcs_mode_support_ex *in_out_params);
bool dml2_core_calcs_mode_programming_ex(struct dml2_core_calcs_mode_programming_ex *in_out_params);
void dml2_core_calcs_get_watermarks(const struct dml2_display_cfg *display_cfg, const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_dchub_watermark_regs *out);
void dml2_core_calcs_get_arb_params(const struct dml2_display_cfg *display_cfg, const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_display_arb_regs *out);
void dml2_core_calcs_get_pipe_regs(const struct dml2_display_cfg *dml2_display_cfg, struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_dchub_per_pipe_register_set *out, int pipe_index);
void dml2_core_calcs_get_stream_programming(const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_per_stream_programming *out, int pipe_index);
void dml2_core_calcs_get_global_sync_programming(const struct dml2_core_internal_display_mode_lib *mode_lib, union dml2_global_sync_programming *out, int pipe_index);
void dml2_core_calcs_get_mcache_allocation(const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_mcache_surface_allocation *out, int plane_index);
void dml2_core_calcs_get_plane_support_info(const struct dml2_display_cfg *display_cfg, const struct dml2_core_internal_display_mode_lib *mode_lib, struct core_plane_support_info *out, int plane_index);
void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_display_cfg_programming *out);
void dml2_core_calcs_get_stream_support_info(const struct dml2_display_cfg *display_cfg, const struct dml2_core_internal_display_mode_lib *mode_lib, struct core_stream_support_info *out, int plane_index);
void dml2_core_calcs_get_mall_allocation(struct dml2_core_internal_display_mode_lib *mode_lib, unsigned int *out, int pipe_index);
void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_internal_display_mode_lib *mode_lib, const struct display_configuation_with_meta *display_cfg, struct dmub_fams2_stream_static_state *fams2_programming, enum dml2_uclk_pstate_support_method pstate_method, int plane_index);
void dml2_core_calcs_get_dpte_row_height(unsigned int *dpte_row_height, struct dml2_core_internal_display_mode_lib *mode_lib, bool is_plane1, enum dml2_source_format_class SourcePixelFormat, enum dml2_swizzle_mode SurfaceTiling, enum dml2_rotation_angle ScanDirection, unsigned int pitch, unsigned int GPUVMMinPageSizeKBytes);
void dml2_core_calcs_cursor_dlg_reg(struct dml2_cursor_dlg_regs *cursor_dlg_regs, const struct dml2_get_cursor_dlg_reg *p);
const char *dml2_core_internal_bw_type_str(enum dml2_core_internal_bw_type bw_type);
const char *dml2_core_internal_soc_state_type_str(enum dml2_core_internal_soc_state_type dml2_core_internal_soc_state_type);
#endif

View file

@ -0,0 +1,38 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#include "dml2_core_factory.h"
#include "dml2_core_dcn4.h"
#include "dml2_external_lib_deps.h"
bool dml2_core_create(enum dml2_project_id project_id, struct dml2_core_instance *out)
{
bool result = false;
if (out == 0)
return false;
memset(out, 0, sizeof(struct dml2_core_instance));
switch (project_id) {
case dml2_project_dcn4x_stage1:
result = false;
break;
case dml2_project_dcn4x_stage2:
case dml2_project_dcn4x_stage2_auto_drr_svp:
out->initialize = &core_dcn4_initialize;
out->mode_support = &core_dcn4_mode_support;
out->mode_programming = &core_dcn4_mode_programming;
out->populate_informative = &core_dcn4_populate_informative;
out->calculate_mcache_allocation = &core_dcn4_calculate_mcache_allocation;
result = true;
break;
case dml2_project_invalid:
default:
break;
}
return result;
}

View file

@ -0,0 +1,14 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#ifndef __DML2_CORE_FACTORY_H__
#define __DML2_CORE_FACTORY_H__
#include "dml2_internal_shared_types.h"
#include "dml_top_types.h"
bool dml2_core_create(enum dml2_project_id project_id, struct dml2_core_instance *out);
#endif

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,38 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#ifndef __DML2_CORE_SHARED_H__
#define __DML2_CORE_SHARED_H__
#define __DML_VBA_DEBUG__
#define __DML2_CALCS_MAX_VRATIO_PRE_OTO__ 4.0 //<brief Prefetch schedule max vratio for one to one scheduling calculation for prefetch
#define __DML2_CALCS_MAX_VRATIO_PRE_ENHANCE_PREFETCH_ACC__ 6.0 //<brief Prefetch schedule max vratio when enhance prefetch schedule acceleration is enabled and vstartup is earliest possible already
#define __DML2_CALCS_DPP_INVALID__ 0
#define __DML2_CALCS_DCFCLK_FACTOR__ 1.15 //<brief fudge factor for min dcfclk calclation
#define __DML2_CALCS_PIPE_NO_PLANE__ 99
#include "dml2_core_shared_types.h"
#include "dml2_internal_shared_types.h"
double dml2_core_shared_div_rem(double dividend, unsigned int divisor, unsigned int *remainder);
const char *dml2_core_internal_bw_type_str(enum dml2_core_internal_bw_type bw_type);
const char *dml2_core_internal_soc_state_type_str(enum dml2_core_internal_soc_state_type dml2_core_internal_soc_state_type);
bool dml2_core_shared_is_420(enum dml2_source_format_class source_format);
bool dml2_core_shared_mode_support(struct dml2_core_calcs_mode_support_ex *in_out_params);
bool dml2_core_shared_mode_programming(struct dml2_core_calcs_mode_programming_ex *in_out_params);
void dml2_core_shared_get_watermarks(const struct dml2_display_cfg *display_cfg, const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_dchub_watermark_regs *out);
void dml2_core_shared_get_arb_params(const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_display_arb_regs *out);
void dml2_core_shared_get_pipe_regs(const struct dml2_display_cfg *display_cfg, struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_dchub_per_pipe_register_set *out, int pipe_index);
void dml2_core_shared_get_stream_programming(const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_per_stream_programming *out, int pipe_index);
void dml2_core_shared_get_mcache_allocation(const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_mcache_surface_allocation *out, int plane_idx);
void dml2_core_shared_get_mall_allocation(struct dml2_core_internal_display_mode_lib *mode_lib, unsigned int *out, int pipe_index);
void dml2_core_shared_get_plane_support_info(const struct dml2_display_cfg *display_cfg, const struct dml2_core_internal_display_mode_lib *mode_lib, struct core_plane_support_info *out, int plane_idx);
void dml2_core_shared_get_stream_support_info(const struct dml2_display_cfg *display_cfg, const struct dml2_core_internal_display_mode_lib *mode_lib, struct core_stream_support_info *out, int plane_index);
void dml2_core_shared_get_informative(const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_display_cfg_programming *out);
void dml2_core_shared_cursor_dlg_reg(struct dml2_cursor_dlg_regs *cursor_dlg_regs, const struct dml2_get_cursor_dlg_reg *p);
#endif

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,644 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#include "dml2_dpmm_dcn4.h"
#include "dml2_internal_shared_types.h"
#include "dml_top_types.h"
#include "lib_float_math.h"
static double dram_bw_kbps_to_uclk_khz(unsigned long long bandwidth_kbps, const struct dml2_dram_params *dram_config)
{
double uclk_khz = 0;
unsigned long uclk_mbytes_per_tick = 0;
uclk_mbytes_per_tick = dram_config->channel_count * dram_config->channel_width_bytes * dram_config->transactions_per_clock;
uclk_khz = (double)bandwidth_kbps / uclk_mbytes_per_tick;
return uclk_khz;
}
static void get_minimum_clocks_for_latency(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out,
double *uclk,
double *fclk,
double *dcfclk)
{
int min_clock_index_for_latency;
if (in_out->display_cfg->stage3.success)
min_clock_index_for_latency = in_out->display_cfg->stage3.min_clk_index_for_latency;
else
min_clock_index_for_latency = in_out->display_cfg->stage1.min_clk_index_for_latency;
*dcfclk = in_out->min_clk_table->dram_bw_table.entries[min_clock_index_for_latency].min_dcfclk_khz;
*fclk = in_out->min_clk_table->dram_bw_table.entries[min_clock_index_for_latency].min_fclk_khz;
*uclk = dram_bw_kbps_to_uclk_khz(in_out->min_clk_table->dram_bw_table.entries[min_clock_index_for_latency].pre_derate_dram_bw_kbps,
&in_out->soc_bb->clk_table.dram_config);
}
static unsigned long dml_round_up(double a)
{
if (a - (unsigned long)a > 0) {
return ((unsigned long)a) + 1;
}
return (unsigned long)a;
}
static void calculate_system_active_minimums(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out)
{
double min_uclk_avg, min_uclk_urgent, min_uclk_bw;
double min_fclk_avg, min_fclk_urgent, min_fclk_bw;
double min_dcfclk_avg, min_dcfclk_urgent, min_dcfclk_bw;
double min_uclk_latency, min_fclk_latency, min_dcfclk_latency;
const struct dml2_core_mode_support_result *mode_support_result = &in_out->display_cfg->mode_support_result;
min_uclk_avg = dram_bw_kbps_to_uclk_khz(mode_support_result->global.active.average_bw_dram_kbps, &in_out->soc_bb->clk_table.dram_config);
min_uclk_avg = (double)min_uclk_avg / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_average.dram_derate_percent_pixel / 100);
min_uclk_urgent = dram_bw_kbps_to_uclk_khz(mode_support_result->global.active.urgent_bw_dram_kbps, &in_out->soc_bb->clk_table.dram_config);
min_uclk_urgent = (double)min_uclk_urgent / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_urgent.dram_derate_percent_pixel / 100);
min_uclk_bw = min_uclk_urgent > min_uclk_avg ? min_uclk_urgent : min_uclk_avg;
min_fclk_avg = (double)mode_support_result->global.active.average_bw_sdp_kbps / in_out->soc_bb->fabric_datapath_to_dcn_data_return_bytes;
min_fclk_avg = (double)min_fclk_avg / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_average.fclk_derate_percent / 100);
min_fclk_urgent = (double)mode_support_result->global.active.urgent_bw_sdp_kbps / in_out->soc_bb->fabric_datapath_to_dcn_data_return_bytes;
min_fclk_urgent = (double)min_fclk_urgent / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_urgent.fclk_derate_percent / 100);
min_fclk_bw = min_fclk_urgent > min_fclk_avg ? min_fclk_urgent : min_fclk_avg;
min_dcfclk_avg = (double)mode_support_result->global.active.average_bw_sdp_kbps / in_out->soc_bb->return_bus_width_bytes;
min_dcfclk_avg = (double)min_dcfclk_avg / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_average.dcfclk_derate_percent / 100);
min_dcfclk_urgent = (double)mode_support_result->global.active.urgent_bw_sdp_kbps / in_out->soc_bb->return_bus_width_bytes;
min_dcfclk_urgent = (double)min_dcfclk_urgent / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_urgent.dcfclk_derate_percent / 100);
min_dcfclk_bw = min_dcfclk_urgent > min_dcfclk_avg ? min_dcfclk_urgent : min_dcfclk_avg;
get_minimum_clocks_for_latency(in_out, &min_uclk_latency, &min_fclk_latency, &min_dcfclk_latency);
in_out->programming->min_clocks.dcn4.active.uclk_khz = dml_round_up(min_uclk_bw > min_uclk_latency ? min_uclk_bw : min_uclk_latency);
in_out->programming->min_clocks.dcn4.active.fclk_khz = dml_round_up(min_fclk_bw > min_fclk_latency ? min_fclk_bw : min_fclk_latency);
in_out->programming->min_clocks.dcn4.active.dcfclk_khz = dml_round_up(min_dcfclk_bw > min_dcfclk_latency ? min_dcfclk_bw : min_dcfclk_latency);
}
static void calculate_svp_prefetch_minimums(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out)
{
double min_uclk_avg, min_uclk_urgent, min_uclk_bw;
double min_fclk_avg, min_fclk_urgent, min_fclk_bw;
double min_dcfclk_avg, min_dcfclk_urgent, min_dcfclk_bw;
double min_fclk_latency, min_dcfclk_latency;
double min_uclk_latency;
const struct dml2_core_mode_support_result *mode_support_result = &in_out->display_cfg->mode_support_result;
min_uclk_avg = dram_bw_kbps_to_uclk_khz(mode_support_result->global.svp_prefetch.average_bw_dram_kbps, &in_out->soc_bb->clk_table.dram_config);
min_uclk_avg = (double)min_uclk_avg / ((double)in_out->soc_bb->qos_parameters.derate_table.dcn_mall_prefetch_average.dram_derate_percent_pixel / 100);
min_uclk_urgent = dram_bw_kbps_to_uclk_khz(mode_support_result->global.svp_prefetch.urgent_bw_dram_kbps, &in_out->soc_bb->clk_table.dram_config);
min_uclk_urgent = (double)min_uclk_urgent / ((double)in_out->soc_bb->qos_parameters.derate_table.dcn_mall_prefetch_urgent.dram_derate_percent_pixel / 100);
min_uclk_bw = min_uclk_urgent > min_uclk_avg ? min_uclk_urgent : min_uclk_avg;
min_fclk_avg = (double)mode_support_result->global.svp_prefetch.average_bw_sdp_kbps / in_out->soc_bb->fabric_datapath_to_dcn_data_return_bytes;
min_fclk_avg = (double)min_fclk_avg / ((double)in_out->soc_bb->qos_parameters.derate_table.dcn_mall_prefetch_average.fclk_derate_percent / 100);
min_fclk_urgent = (double)mode_support_result->global.svp_prefetch.urgent_bw_sdp_kbps / in_out->soc_bb->fabric_datapath_to_dcn_data_return_bytes;
min_fclk_urgent = (double)min_fclk_urgent / ((double)in_out->soc_bb->qos_parameters.derate_table.dcn_mall_prefetch_urgent.fclk_derate_percent / 100);
min_fclk_bw = min_fclk_urgent > min_fclk_avg ? min_fclk_urgent : min_fclk_avg;
min_dcfclk_avg = (double)mode_support_result->global.svp_prefetch.average_bw_sdp_kbps / in_out->soc_bb->return_bus_width_bytes;
min_dcfclk_avg = (double)min_dcfclk_avg / ((double)in_out->soc_bb->qos_parameters.derate_table.dcn_mall_prefetch_average.dcfclk_derate_percent / 100);
min_dcfclk_urgent = (double)mode_support_result->global.svp_prefetch.urgent_bw_sdp_kbps / in_out->soc_bb->return_bus_width_bytes;
min_dcfclk_urgent = (double)min_dcfclk_urgent / ((double)in_out->soc_bb->qos_parameters.derate_table.dcn_mall_prefetch_urgent.dcfclk_derate_percent / 100);
min_dcfclk_bw = min_dcfclk_urgent > min_dcfclk_avg ? min_dcfclk_urgent : min_dcfclk_avg;
get_minimum_clocks_for_latency(in_out, &min_uclk_latency, &min_fclk_latency, &min_dcfclk_latency);
in_out->programming->min_clocks.dcn4.svp_prefetch.uclk_khz = dml_round_up(min_uclk_bw > min_uclk_latency ? min_uclk_bw : min_uclk_latency);
in_out->programming->min_clocks.dcn4.svp_prefetch.fclk_khz = dml_round_up(min_fclk_bw > min_fclk_latency ? min_fclk_bw : min_fclk_latency);
in_out->programming->min_clocks.dcn4.svp_prefetch.dcfclk_khz = dml_round_up(min_dcfclk_bw > min_dcfclk_latency ? min_dcfclk_bw : min_dcfclk_latency);
}
static void calculate_idle_minimums(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out)
{
double min_uclk_avg;
double min_fclk_avg;
double min_dcfclk_avg;
double min_uclk_latency, min_fclk_latency, min_dcfclk_latency;
const struct dml2_core_mode_support_result *mode_support_result = &in_out->display_cfg->mode_support_result;
min_uclk_avg = dram_bw_kbps_to_uclk_khz(mode_support_result->global.active.average_bw_dram_kbps, &in_out->soc_bb->clk_table.dram_config);
min_uclk_avg = (double)min_uclk_avg / ((double)in_out->soc_bb->qos_parameters.derate_table.system_idle_average.dram_derate_percent_pixel / 100);
min_fclk_avg = (double)mode_support_result->global.active.average_bw_sdp_kbps / in_out->soc_bb->fabric_datapath_to_dcn_data_return_bytes;
min_fclk_avg = (double)min_fclk_avg / ((double)in_out->soc_bb->qos_parameters.derate_table.system_idle_average.fclk_derate_percent / 100);
min_dcfclk_avg = (double)mode_support_result->global.active.average_bw_sdp_kbps / in_out->soc_bb->return_bus_width_bytes;
min_dcfclk_avg = (double)min_dcfclk_avg / ((double)in_out->soc_bb->qos_parameters.derate_table.system_idle_average.dcfclk_derate_percent / 100);
get_minimum_clocks_for_latency(in_out, &min_uclk_latency, &min_fclk_latency, &min_dcfclk_latency);
in_out->programming->min_clocks.dcn4.idle.uclk_khz = dml_round_up(min_uclk_avg > min_uclk_latency ? min_uclk_avg : min_uclk_latency);
in_out->programming->min_clocks.dcn4.idle.fclk_khz = dml_round_up(min_fclk_avg > min_fclk_latency ? min_fclk_avg : min_fclk_latency);
in_out->programming->min_clocks.dcn4.idle.dcfclk_khz = dml_round_up(min_dcfclk_avg > min_dcfclk_latency ? min_dcfclk_avg : min_dcfclk_latency);
}
static bool add_margin_and_round_to_dfs_grainularity(double clock_khz, double margin, unsigned long vco_freq_khz, unsigned long *rounded_khz, uint32_t *divider_id)
{
enum dentist_divider_range {
DFS_DIVIDER_RANGE_1_START = 8, /* 2.00 */
DFS_DIVIDER_RANGE_1_STEP = 1, /* 0.25 */
DFS_DIVIDER_RANGE_2_START = 64, /* 16.00 */
DFS_DIVIDER_RANGE_2_STEP = 2, /* 0.50 */
DFS_DIVIDER_RANGE_3_START = 128, /* 32.00 */
DFS_DIVIDER_RANGE_3_STEP = 4, /* 1.00 */
DFS_DIVIDER_RANGE_4_START = 248, /* 62.00 */
DFS_DIVIDER_RANGE_4_STEP = 264, /* 66.00 */
DFS_DIVIDER_RANGE_SCALE_FACTOR = 4
};
enum DFS_base_divider_id {
DFS_BASE_DID_1 = 0x08,
DFS_BASE_DID_2 = 0x40,
DFS_BASE_DID_3 = 0x60,
DFS_BASE_DID_4 = 0x7e,
DFS_MAX_DID = 0x7f
};
unsigned int divider;
if (clock_khz < 1 || vco_freq_khz < 1 || clock_khz > vco_freq_khz)
return false;
clock_khz *= 1.0 + margin;
divider = (unsigned int)(DFS_DIVIDER_RANGE_SCALE_FACTOR * (vco_freq_khz / clock_khz));
/* we want to floor here to get higher clock than required rather than lower */
if (divider < DFS_DIVIDER_RANGE_2_START) {
if (divider < DFS_DIVIDER_RANGE_1_START)
*divider_id = DFS_BASE_DID_1;
else
*divider_id = DFS_BASE_DID_1 + ((divider - DFS_DIVIDER_RANGE_1_START) / DFS_DIVIDER_RANGE_1_STEP);
} else if (divider < DFS_DIVIDER_RANGE_3_START) {
*divider_id = DFS_BASE_DID_2 + ((divider - DFS_DIVIDER_RANGE_2_START) / DFS_DIVIDER_RANGE_2_STEP);
} else if (divider < DFS_DIVIDER_RANGE_4_START) {
*divider_id = DFS_BASE_DID_3 + ((divider - DFS_DIVIDER_RANGE_3_START) / DFS_DIVIDER_RANGE_3_STEP);
} else {
*divider_id = DFS_BASE_DID_4 + ((divider - DFS_DIVIDER_RANGE_4_START) / DFS_DIVIDER_RANGE_4_STEP);
if (*divider_id > DFS_MAX_DID)
*divider_id = DFS_MAX_DID;
}
*rounded_khz = vco_freq_khz * DFS_DIVIDER_RANGE_SCALE_FACTOR / divider;
return true;
}
static bool round_up_and_copy_to_next_dpm(unsigned long min_value, unsigned long *rounded_value, const struct dml2_clk_table *clock_table)
{
bool result = false;
int index = 0;
if (clock_table->num_clk_values > 2) {
while (index < clock_table->num_clk_values && clock_table->clk_values_khz[index] < min_value)
index++;
if (index < clock_table->num_clk_values) {
*rounded_value = clock_table->clk_values_khz[index];
result = true;
}
} else if (clock_table->clk_values_khz[clock_table->num_clk_values - 1] >= min_value) {
*rounded_value = min_value;
result = true;
}
return result;
}
static bool round_up_to_next_dpm(unsigned long *clock_value, const struct dml2_clk_table *clock_table)
{
return round_up_and_copy_to_next_dpm(*clock_value, clock_value, clock_table);
}
static bool map_min_clocks_to_dpm(const struct dml2_core_mode_support_result *mode_support_result, struct dml2_display_cfg_programming *display_cfg, const struct dml2_soc_state_table *state_table)
{
bool result;
unsigned int i;
if (!state_table || !display_cfg)
return false;
result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.active.dcfclk_khz, &state_table->dcfclk);
if (result)
result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.active.fclk_khz, &state_table->fclk);
if (result)
result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.active.uclk_khz, &state_table->uclk);
if (result)
result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.svp_prefetch.dcfclk_khz, &state_table->dcfclk);
if (result)
result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.svp_prefetch.fclk_khz, &state_table->fclk);
if (result)
result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.svp_prefetch.uclk_khz, &state_table->uclk);
if (result)
result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.idle.dcfclk_khz, &state_table->dcfclk);
if (result)
result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.idle.fclk_khz, &state_table->fclk);
if (result)
result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.idle.uclk_khz, &state_table->uclk);
if (result)
result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.dispclk_khz, &state_table->dispclk);
if (result)
result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.deepsleep_dcfclk_khz, &state_table->dcfclk);
for (i = 0; i < DML2_MAX_DCN_PIPES; i++) {
if (result)
result = round_up_to_next_dpm(&display_cfg->plane_programming[i].min_clocks.dcn4.dppclk_khz, &state_table->dppclk);
}
for (i = 0; i < display_cfg->display_config.num_streams; i++) {
if (result)
result = round_up_and_copy_to_next_dpm(mode_support_result->per_stream[i].dscclk_khz, &display_cfg->stream_programming[i].min_clocks.dcn4.dscclk_khz, &state_table->dscclk);
if (result)
result = round_up_and_copy_to_next_dpm(mode_support_result->per_stream[i].dtbclk_khz, &display_cfg->stream_programming[i].min_clocks.dcn4.dtbclk_khz, &state_table->dtbclk);
if (result)
result = round_up_and_copy_to_next_dpm(mode_support_result->per_stream[i].phyclk_khz, &display_cfg->stream_programming[i].min_clocks.dcn4.phyclk_khz, &state_table->phyclk);
}
if (result)
result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.dpprefclk_khz, &state_table->dppclk);
if (result)
result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.dtbrefclk_khz, &state_table->dtbclk);
return result;
}
static bool are_timings_trivially_synchronizable(struct dml2_display_cfg *display_config, int mask)
{
unsigned int i;
bool identical = true;
bool contains_drr = false;
unsigned int remap_array[DML2_MAX_PLANES];
unsigned int remap_array_size = 0;
// Create a remap array to enable simple iteration through only masked stream indicies
for (i = 0; i < display_config->num_streams; i++) {
if (mask & (0x1 << i)) {
remap_array[remap_array_size++] = i;
}
}
// 0 or 1 display is always trivially synchronizable
if (remap_array_size <= 1)
return true;
// Check that all displays timings are the same
for (i = 1; i < remap_array_size; i++) {
if (memcmp(&display_config->stream_descriptors[remap_array[i - 1]].timing, &display_config->stream_descriptors[remap_array[i]].timing, sizeof(struct dml2_timing_cfg))) {
identical = false;
break;
}
}
// Check if any displays are drr
for (i = 0; i < remap_array_size; i++) {
if (display_config->stream_descriptors[remap_array[i]].timing.drr_config.enabled) {
contains_drr = true;
break;
}
}
// Trivial sync is possible if all displays are identical and none are DRR
return !contains_drr && identical;
}
static int find_smallest_idle_time_in_vblank_us(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out, int mask)
{
unsigned int i;
int min_idle_us = 0;
unsigned int remap_array[DML2_MAX_PLANES];
unsigned int remap_array_size = 0;
const struct dml2_core_mode_support_result *mode_support_result = &in_out->display_cfg->mode_support_result;
// Create a remap array to enable simple iteration through only masked stream indicies
for (i = 0; i < in_out->programming->display_config.num_streams; i++) {
if (mask & (0x1 << i)) {
remap_array[remap_array_size++] = i;
}
}
if (remap_array_size == 0)
return 0;
min_idle_us = mode_support_result->cfg_support_info.stream_support_info[remap_array[0]].vblank_reserved_time_us;
for (i = 1; i < remap_array_size; i++) {
if (min_idle_us > mode_support_result->cfg_support_info.stream_support_info[remap_array[i]].vblank_reserved_time_us)
min_idle_us = mode_support_result->cfg_support_info.stream_support_info[remap_array[i]].vblank_reserved_time_us;
}
return min_idle_us;
}
static bool determine_power_management_features_with_vblank_only(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out)
{
int min_idle_us;
if (are_timings_trivially_synchronizable(&in_out->programming->display_config, 0xF)) {
min_idle_us = find_smallest_idle_time_in_vblank_us(in_out, 0xF);
if (min_idle_us >= in_out->soc_bb->power_management_parameters.dram_clk_change_blackout_us)
in_out->programming->uclk_pstate_supported = true;
if (min_idle_us >= in_out->soc_bb->power_management_parameters.fclk_change_blackout_us)
in_out->programming->fclk_pstate_supported = true;
}
return true;
}
static int get_displays_without_vactive_margin_mask(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out, int latency_hiding_requirement_us)
{
unsigned int i;
int displays_without_vactive_margin_mask = 0x0;
const struct dml2_core_mode_support_result *mode_support_result = &in_out->display_cfg->mode_support_result;
for (i = 0; i < in_out->programming->display_config.num_planes; i++) {
if (mode_support_result->cfg_support_info.plane_support_info[i].active_latency_hiding_us
< latency_hiding_requirement_us)
displays_without_vactive_margin_mask |= (0x1 << i);
}
return displays_without_vactive_margin_mask;
}
static int get_displays_with_fams_mask(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out, int latency_hiding_requirement_us)
{
unsigned int i;
int displays_with_fams_mask = 0x0;
for (i = 0; i < in_out->programming->display_config.num_planes; i++) {
if (in_out->programming->display_config.plane_descriptors->overrides.legacy_svp_config != dml2_svp_mode_override_auto)
displays_with_fams_mask |= (0x1 << i);
}
return displays_with_fams_mask;
}
static bool determine_power_management_features_with_vactive_and_vblank(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out)
{
int displays_without_vactive_margin_mask = 0x0;
int min_idle_us = 0;
if (in_out->programming->uclk_pstate_supported == false) {
displays_without_vactive_margin_mask =
get_displays_without_vactive_margin_mask(in_out, (int)(in_out->soc_bb->power_management_parameters.dram_clk_change_blackout_us));
if (are_timings_trivially_synchronizable(&in_out->programming->display_config, displays_without_vactive_margin_mask)) {
min_idle_us = find_smallest_idle_time_in_vblank_us(in_out, displays_without_vactive_margin_mask);
if (min_idle_us >= in_out->soc_bb->power_management_parameters.dram_clk_change_blackout_us)
in_out->programming->uclk_pstate_supported = true;
}
}
if (in_out->programming->fclk_pstate_supported == false) {
displays_without_vactive_margin_mask =
get_displays_without_vactive_margin_mask(in_out, (int)(in_out->soc_bb->power_management_parameters.fclk_change_blackout_us));
if (are_timings_trivially_synchronizable(&in_out->programming->display_config, displays_without_vactive_margin_mask)) {
min_idle_us = find_smallest_idle_time_in_vblank_us(in_out, displays_without_vactive_margin_mask);
if (min_idle_us >= in_out->soc_bb->power_management_parameters.fclk_change_blackout_us)
in_out->programming->fclk_pstate_supported = true;
}
}
return true;
}
static bool determine_power_management_features_with_fams(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out)
{
int displays_without_vactive_margin_mask = 0x0;
int displays_without_fams_mask = 0x0;
displays_without_vactive_margin_mask =
get_displays_without_vactive_margin_mask(in_out, (int)(in_out->soc_bb->power_management_parameters.dram_clk_change_blackout_us));
displays_without_fams_mask =
get_displays_with_fams_mask(in_out, (int)(in_out->soc_bb->power_management_parameters.dram_clk_change_blackout_us));
if ((displays_without_vactive_margin_mask & ~displays_without_fams_mask) == 0)
in_out->programming->uclk_pstate_supported = true;
return true;
}
static void clamp_uclk_to_max(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out)
{
in_out->programming->min_clocks.dcn4.active.uclk_khz = in_out->soc_bb->clk_table.uclk.clk_values_khz[in_out->soc_bb->clk_table.uclk.num_clk_values - 1];
in_out->programming->min_clocks.dcn4.svp_prefetch.uclk_khz = in_out->soc_bb->clk_table.uclk.clk_values_khz[in_out->soc_bb->clk_table.uclk.num_clk_values - 1];
in_out->programming->min_clocks.dcn4.idle.uclk_khz = in_out->soc_bb->clk_table.uclk.clk_values_khz[in_out->soc_bb->clk_table.uclk.num_clk_values - 1];
}
static void clamp_fclk_to_max(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out)
{
in_out->programming->min_clocks.dcn4.active.fclk_khz = in_out->soc_bb->clk_table.fclk.clk_values_khz[in_out->soc_bb->clk_table.fclk.num_clk_values - 1];
in_out->programming->min_clocks.dcn4.idle.fclk_khz = in_out->soc_bb->clk_table.fclk.clk_values_khz[in_out->soc_bb->clk_table.fclk.num_clk_values - 1];
}
static bool map_mode_to_soc_dpm(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out)
{
int i;
bool result;
double dispclk_khz;
const struct dml2_core_mode_support_result *mode_support_result = &in_out->display_cfg->mode_support_result;
calculate_system_active_minimums(in_out);
calculate_svp_prefetch_minimums(in_out);
calculate_idle_minimums(in_out);
// In DCN4, there's no support for FCLK or DCFCLK DPM change before SVP prefetch starts, therefore
// active minimums must be boosted to prefetch minimums
if (in_out->programming->min_clocks.dcn4.svp_prefetch.uclk_khz > in_out->programming->min_clocks.dcn4.active.uclk_khz)
in_out->programming->min_clocks.dcn4.active.uclk_khz = in_out->programming->min_clocks.dcn4.svp_prefetch.uclk_khz;
if (in_out->programming->min_clocks.dcn4.svp_prefetch.fclk_khz > in_out->programming->min_clocks.dcn4.active.fclk_khz)
in_out->programming->min_clocks.dcn4.active.fclk_khz = in_out->programming->min_clocks.dcn4.svp_prefetch.fclk_khz;
if (in_out->programming->min_clocks.dcn4.svp_prefetch.dcfclk_khz > in_out->programming->min_clocks.dcn4.active.dcfclk_khz)
in_out->programming->min_clocks.dcn4.active.dcfclk_khz = in_out->programming->min_clocks.dcn4.svp_prefetch.dcfclk_khz;
// need some massaging for the dispclk ramping cases:
dispclk_khz = mode_support_result->global.dispclk_khz * (1 + in_out->soc_bb->dcn_downspread_percent / 100.0) * (1.0 + in_out->ip->dispclk_ramp_margin_percent / 100.0);
// ramping margin should not make dispclk exceed the maximum dispclk speed:
dispclk_khz = math_min2(dispclk_khz, in_out->min_clk_table->max_clocks_khz.dispclk);
// but still the required dispclk can be more than the maximum dispclk speed:
dispclk_khz = math_max2(dispclk_khz, mode_support_result->global.dispclk_khz * (1 + in_out->soc_bb->dcn_downspread_percent / 100.0));
add_margin_and_round_to_dfs_grainularity(dispclk_khz, 0.0,
(unsigned long)(in_out->soc_bb->dispclk_dppclk_vco_speed_mhz * 1000), &in_out->programming->min_clocks.dcn4.dispclk_khz, &in_out->programming->min_clocks.dcn4.divider_ids.dispclk_did);
// DPP Ref is always set to max of all DPP clocks
for (i = 0; i < DML2_MAX_DCN_PIPES; i++) {
if (in_out->programming->min_clocks.dcn4.dpprefclk_khz < mode_support_result->per_plane[i].dppclk_khz)
in_out->programming->min_clocks.dcn4.dpprefclk_khz = mode_support_result->per_plane[i].dppclk_khz;
}
add_margin_and_round_to_dfs_grainularity(in_out->programming->min_clocks.dcn4.dpprefclk_khz, in_out->soc_bb->dcn_downspread_percent / 100.0,
(unsigned long)(in_out->soc_bb->dispclk_dppclk_vco_speed_mhz * 1000), &in_out->programming->min_clocks.dcn4.dpprefclk_khz, &in_out->programming->min_clocks.dcn4.divider_ids.dpprefclk_did);
for (i = 0; i < DML2_MAX_DCN_PIPES; i++) {
in_out->programming->plane_programming[i].min_clocks.dcn4.dppclk_khz = (unsigned long)(in_out->programming->min_clocks.dcn4.dpprefclk_khz / 255.0
* math_ceil2(in_out->display_cfg->mode_support_result.per_plane[i].dppclk_khz * (1.0 + in_out->soc_bb->dcn_downspread_percent / 100.0) * 255.0 / in_out->programming->min_clocks.dcn4.dpprefclk_khz, 1.0));
}
// DTB Ref is always set to max of all DTB clocks
for (i = 0; i < DML2_MAX_DCN_PIPES; i++) {
if (in_out->programming->min_clocks.dcn4.dtbrefclk_khz < mode_support_result->per_stream[i].dtbclk_khz)
in_out->programming->min_clocks.dcn4.dtbrefclk_khz = mode_support_result->per_stream[i].dtbclk_khz;
}
add_margin_and_round_to_dfs_grainularity(in_out->programming->min_clocks.dcn4.dtbrefclk_khz, in_out->soc_bb->dcn_downspread_percent / 100.0,
(unsigned long)(in_out->soc_bb->dispclk_dppclk_vco_speed_mhz * 1000), &in_out->programming->min_clocks.dcn4.dtbrefclk_khz, &in_out->programming->min_clocks.dcn4.divider_ids.dtbrefclk_did);
in_out->programming->min_clocks.dcn4.deepsleep_dcfclk_khz = mode_support_result->global.dcfclk_deepsleep_khz;
in_out->programming->min_clocks.dcn4.socclk_khz = mode_support_result->global.socclk_khz;
result = map_min_clocks_to_dpm(mode_support_result, in_out->programming, &in_out->soc_bb->clk_table);
// By default, all power management features are not enabled
in_out->programming->fclk_pstate_supported = false;
in_out->programming->uclk_pstate_supported = false;
return result;
}
bool dpmm_dcn3_map_mode_to_soc_dpm(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out)
{
bool result;
result = map_mode_to_soc_dpm(in_out);
// Check if any can be enabled by nominal vblank idle time
determine_power_management_features_with_vblank_only(in_out);
// Check if any can be enabled in vactive/vblank
determine_power_management_features_with_vactive_and_vblank(in_out);
// Check if any can be enabled via fams
determine_power_management_features_with_fams(in_out);
if (in_out->programming->uclk_pstate_supported == false)
clamp_uclk_to_max(in_out);
if (in_out->programming->fclk_pstate_supported == false)
clamp_fclk_to_max(in_out);
return result;
}
bool dpmm_dcn4_map_mode_to_soc_dpm(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out)
{
bool result;
int displays_without_vactive_margin_mask = 0x0;
int min_idle_us = 0;
result = map_mode_to_soc_dpm(in_out);
if (in_out->display_cfg->stage3.success)
in_out->programming->uclk_pstate_supported = true;
displays_without_vactive_margin_mask =
get_displays_without_vactive_margin_mask(in_out, (int)(in_out->soc_bb->power_management_parameters.fclk_change_blackout_us));
if (displays_without_vactive_margin_mask == 0) {
in_out->programming->fclk_pstate_supported = true;
} else {
if (are_timings_trivially_synchronizable(&in_out->programming->display_config, displays_without_vactive_margin_mask)) {
min_idle_us = find_smallest_idle_time_in_vblank_us(in_out, displays_without_vactive_margin_mask);
if (min_idle_us >= in_out->soc_bb->power_management_parameters.fclk_change_blackout_us)
in_out->programming->fclk_pstate_supported = true;
}
}
if (in_out->programming->uclk_pstate_supported == false)
clamp_uclk_to_max(in_out);
if (in_out->programming->fclk_pstate_supported == false)
clamp_fclk_to_max(in_out);
min_idle_us = find_smallest_idle_time_in_vblank_us(in_out, 0xFF);
if (in_out->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us > 0 &&
min_idle_us >= in_out->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us)
in_out->programming->stutter.supported_in_blank = true;
else
in_out->programming->stutter.supported_in_blank = false;
// TODO: Fix me Sam
if (in_out->soc_bb->power_management_parameters.z8_min_idle_time > 0 &&
in_out->programming->informative.power_management.z8.stutter_period >= in_out->soc_bb->power_management_parameters.z8_min_idle_time)
in_out->programming->z8_stutter.meets_eco = true;
else
in_out->programming->z8_stutter.meets_eco = false;
if (in_out->soc_bb->power_management_parameters.z8_stutter_exit_latency_us > 0 &&
min_idle_us >= in_out->soc_bb->power_management_parameters.z8_stutter_exit_latency_us)
in_out->programming->z8_stutter.supported_in_blank = true;
else
in_out->programming->z8_stutter.supported_in_blank = false;
return result;
}
bool dpmm_dcn4_map_watermarks(struct dml2_dpmm_map_watermarks_params_in_out *in_out)
{
const struct dml2_display_cfg *display_cfg = &in_out->display_cfg->display_config;
const struct dml2_core_internal_display_mode_lib *mode_lib = &in_out->core->clean_me_up.mode_lib;
struct dml2_dchub_global_register_set *dchubbub_regs = &in_out->programming->global_regs;
double refclk_freq_in_mhz = (display_cfg->overrides.hw.dlg_ref_clk_mhz > 0) ? (double)display_cfg->overrides.hw.dlg_ref_clk_mhz : mode_lib->soc.dchub_refclk_mhz;
/* set A */
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].fclk_pstate = (int unsigned)(mode_lib->mp.Watermark.FCLKChangeWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].sr_enter = (int unsigned)(mode_lib->mp.Watermark.StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].sr_exit = (int unsigned)(mode_lib->mp.Watermark.StutterExitWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].temp_read_or_ppt = (int unsigned)(mode_lib->mp.Watermark.g6_temp_read_watermark_us * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].uclk_pstate = (int unsigned)(mode_lib->mp.Watermark.DRAMClockChangeWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].urgent = (int unsigned)(mode_lib->mp.Watermark.UrgentWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].usr = (int unsigned)(mode_lib->mp.Watermark.USRRetrainingWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].refcyc_per_trip_to_mem = (unsigned int)(mode_lib->mp.Watermark.UrgentWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].refcyc_per_meta_trip_to_mem = (unsigned int)(mode_lib->mp.Watermark.UrgentWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].frac_urg_bw_flip = (unsigned int)(mode_lib->mp.FractionOfUrgentBandwidthImmediateFlip * 1000);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].frac_urg_bw_nom = (unsigned int)(mode_lib->mp.FractionOfUrgentBandwidth * 1000);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].frac_urg_bw_mall = (unsigned int)(mode_lib->mp.FractionOfUrgentBandwidthMALL * 1000);
/* set B */
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].fclk_pstate = (int unsigned)(mode_lib->mp.Watermark.FCLKChangeWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].sr_enter = (int unsigned)(mode_lib->mp.Watermark.StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].sr_exit = (int unsigned)(mode_lib->mp.Watermark.StutterExitWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].temp_read_or_ppt = (int unsigned)(mode_lib->mp.Watermark.g6_temp_read_watermark_us * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].uclk_pstate = (int unsigned)(mode_lib->mp.Watermark.DRAMClockChangeWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].urgent = (int unsigned)(mode_lib->mp.Watermark.UrgentWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].usr = (int unsigned)(mode_lib->mp.Watermark.USRRetrainingWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].refcyc_per_trip_to_mem = (unsigned int)(mode_lib->mp.Watermark.UrgentWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].refcyc_per_meta_trip_to_mem = (unsigned int)(mode_lib->mp.Watermark.UrgentWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].frac_urg_bw_flip = (unsigned int)(mode_lib->mp.FractionOfUrgentBandwidthImmediateFlip * 1000);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].frac_urg_bw_nom = (unsigned int)(mode_lib->mp.FractionOfUrgentBandwidth * 1000);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].frac_urg_bw_mall = (unsigned int)(mode_lib->mp.FractionOfUrgentBandwidthMALL * 1000);
dchubbub_regs->num_watermark_sets = 2;
return true;
}

View file

@ -0,0 +1,17 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#ifndef __DML2_DPMM_DCN4_H__
#define __DML2_DPMM_DCN4_H__
#include "dml2_internal_shared_types.h"
bool dpmm_dcn3_map_mode_to_soc_dpm(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out);
bool dpmm_dcn4_map_mode_to_soc_dpm(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out);
bool dpmm_dcn4_map_watermarks(struct dml2_dpmm_map_watermarks_params_in_out *in_out);
bool dpmm_dcn4_unit_test(void);
#endif

View file

@ -0,0 +1,50 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#include "dml2_dpmm_factory.h"
#include "dml2_dpmm_dcn4.h"
#include "dml2_external_lib_deps.h"
static bool dummy_map_mode_to_soc_dpm(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out)
{
return true;
}
static bool dummy_map_watermarks(struct dml2_dpmm_map_watermarks_params_in_out *in_out)
{
return true;
}
bool dml2_dpmm_create(enum dml2_project_id project_id, struct dml2_dpmm_instance *out)
{
bool result = false;
if (out == 0)
return false;
memset(out, 0, sizeof(struct dml2_dpmm_instance));
switch (project_id) {
case dml2_project_dcn4x_stage1:
out->map_mode_to_soc_dpm = &dummy_map_mode_to_soc_dpm;
out->map_watermarks = &dummy_map_watermarks;
result = true;
break;
case dml2_project_dcn4x_stage2:
out->map_mode_to_soc_dpm = &dpmm_dcn3_map_mode_to_soc_dpm;
out->map_watermarks = &dummy_map_watermarks;
result = true;
break;
case dml2_project_dcn4x_stage2_auto_drr_svp:
out->map_mode_to_soc_dpm = &dpmm_dcn4_map_mode_to_soc_dpm;
result = true;
break;
case dml2_project_invalid:
default:
break;
}
return result;
}

View file

@ -0,0 +1,14 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#ifndef __DML2_DPMM_FACTORY_H__
#define __DML2_DPMM_FACTORY_H__
#include "dml2_internal_shared_types.h"
#include "dml_top_types.h"
bool dml2_dpmm_create(enum dml2_project_id project_id, struct dml2_dpmm_instance *out);
#endif

View file

@ -0,0 +1,156 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#include "dml2_mcg_dcn4.h"
#include "dml_top_soc_parameter_types.h"
static bool build_min_clock_table(const struct dml2_soc_bb *soc_bb, struct dml2_mcg_min_clock_table *min_table);
bool mcg_dcn4_build_min_clock_table(struct dml2_mcg_build_min_clock_table_params_in_out *in_out)
{
return build_min_clock_table(in_out->soc_bb, in_out->min_clk_table);
}
static unsigned long long uclk_to_dram_bw_kbps(unsigned long uclk_khz, const struct dml2_dram_params *dram_config)
{
unsigned long long bw_kbps = 0;
bw_kbps = (unsigned long long) uclk_khz * dram_config->channel_count * dram_config->channel_width_bytes * dram_config->transactions_per_clock;
return bw_kbps;
}
static unsigned long round_up_to_quantized_values(unsigned long value, const unsigned long *quantized_values, int num_quantized_values)
{
int i;
if (!quantized_values)
return 0;
for (i = 0; i < num_quantized_values; i++) {
if (quantized_values[i] > value)
return quantized_values[i];
}
return 0;
}
static bool build_min_clock_table(const struct dml2_soc_bb *soc_bb, struct dml2_mcg_min_clock_table *min_table)
{
int i;
unsigned int j;
bool dcfclk_fine_grained = false, fclk_fine_grained = false;
unsigned long min_dcfclk_khz = 0, max_dcfclk_khz = 0;
unsigned long min_fclk_khz = 0, max_fclk_khz = 0;
unsigned long prev_100, cur_50;
if (!soc_bb || !min_table)
return false;
if (soc_bb->clk_table.dcfclk.num_clk_values < 2 || soc_bb->clk_table.fclk.num_clk_values < 2)
return false;
if (soc_bb->clk_table.uclk.num_clk_values > DML_MCG_MAX_CLK_TABLE_SIZE)
return false;
min_table->fixed_clocks_khz.amclk = 0;
min_table->fixed_clocks_khz.dprefclk = soc_bb->dprefclk_mhz * 1000;
min_table->fixed_clocks_khz.pcierefclk = soc_bb->pcie_refclk_mhz * 1000;
min_table->fixed_clocks_khz.dchubrefclk = soc_bb->dchub_refclk_mhz * 1000;
min_table->fixed_clocks_khz.xtalclk = soc_bb->xtalclk_mhz * 1000;
if (soc_bb->clk_table.dcfclk.num_clk_values == 2) {
dcfclk_fine_grained = true;
}
max_dcfclk_khz = soc_bb->clk_table.dcfclk.clk_values_khz[soc_bb->clk_table.dcfclk.num_clk_values - 1];
min_dcfclk_khz = soc_bb->clk_table.dcfclk.clk_values_khz[0];
if (soc_bb->clk_table.fclk.num_clk_values == 2) {
fclk_fine_grained = true;
}
max_fclk_khz = soc_bb->clk_table.fclk.clk_values_khz[soc_bb->clk_table.fclk.num_clk_values - 1];
min_fclk_khz = soc_bb->clk_table.fclk.clk_values_khz[0];
min_table->max_clocks_khz.dispclk = soc_bb->clk_table.dispclk.clk_values_khz[soc_bb->clk_table.dispclk.num_clk_values - 1];
min_table->max_clocks_khz.dppclk = soc_bb->clk_table.dppclk.clk_values_khz[soc_bb->clk_table.dppclk.num_clk_values - 1];
min_table->max_clocks_khz.dscclk = soc_bb->clk_table.dscclk.clk_values_khz[soc_bb->clk_table.dscclk.num_clk_values - 1];
min_table->max_clocks_khz.dtbclk = soc_bb->clk_table.dtbclk.clk_values_khz[soc_bb->clk_table.dtbclk.num_clk_values - 1];
min_table->max_clocks_khz.phyclk = soc_bb->clk_table.phyclk.clk_values_khz[soc_bb->clk_table.phyclk.num_clk_values - 1];
min_table->max_clocks_khz.dcfclk = max_dcfclk_khz;
min_table->max_clocks_khz.fclk = max_fclk_khz;
// First calculate the table for "balanced" bandwidths across UCLK/FCLK
for (i = 0; i < soc_bb->clk_table.uclk.num_clk_values; i++) {
min_table->dram_bw_table.entries[i].pre_derate_dram_bw_kbps = uclk_to_dram_bw_kbps(soc_bb->clk_table.uclk.clk_values_khz[i], &soc_bb->clk_table.dram_config);
min_table->dram_bw_table.entries[i].min_fclk_khz = (unsigned long)((((double)min_table->dram_bw_table.entries[i].pre_derate_dram_bw_kbps * soc_bb->qos_parameters.derate_table.system_active_urgent.dram_derate_percent_pixel / 100) / ((double)soc_bb->qos_parameters.derate_table.system_active_urgent.fclk_derate_percent / 100)) / soc_bb->fabric_datapath_to_dcn_data_return_bytes);
}
min_table->dram_bw_table.num_entries = soc_bb->clk_table.uclk.num_clk_values;
// To create the minium table, effectively shift "up" all the dcfclk/fclk entries by 1, and then replace the lowest entry with min fclk/dcfclk
for (i = min_table->dram_bw_table.num_entries - 1; i > 0; i--) {
prev_100 = min_table->dram_bw_table.entries[i - 1].min_fclk_khz;
cur_50 = min_table->dram_bw_table.entries[i].min_fclk_khz / 2;
min_table->dram_bw_table.entries[i].min_fclk_khz = prev_100 > cur_50 ? prev_100 : cur_50;
if (!fclk_fine_grained) {
min_table->dram_bw_table.entries[i].min_fclk_khz = round_up_to_quantized_values(min_table->dram_bw_table.entries[i].min_fclk_khz, soc_bb->clk_table.fclk.clk_values_khz, soc_bb->clk_table.fclk.num_clk_values);
}
}
min_table->dram_bw_table.entries[0].min_fclk_khz /= 2;
// Clamp to minimums and maximums
for (i = 0; i < (int)min_table->dram_bw_table.num_entries; i++) {
if (min_table->dram_bw_table.entries[i].min_dcfclk_khz < min_dcfclk_khz)
min_table->dram_bw_table.entries[i].min_dcfclk_khz = min_dcfclk_khz;
if (min_table->dram_bw_table.entries[i].min_fclk_khz < min_fclk_khz)
min_table->dram_bw_table.entries[i].min_fclk_khz = min_fclk_khz;
if (soc_bb->max_fclk_for_uclk_dpm_khz > 0 &&
min_table->dram_bw_table.entries[i].min_fclk_khz > soc_bb->max_fclk_for_uclk_dpm_khz)
min_table->dram_bw_table.entries[i].min_fclk_khz = soc_bb->max_fclk_for_uclk_dpm_khz;
min_table->dram_bw_table.entries[i].min_dcfclk_khz =
min_table->dram_bw_table.entries[i].min_fclk_khz *
soc_bb->qos_parameters.derate_table.system_active_urgent.fclk_derate_percent / soc_bb->qos_parameters.derate_table.system_active_urgent.dcfclk_derate_percent;
min_table->dram_bw_table.entries[i].min_dcfclk_khz =
min_table->dram_bw_table.entries[i].min_dcfclk_khz * soc_bb->fabric_datapath_to_dcn_data_return_bytes / soc_bb->return_bus_width_bytes;
if (!dcfclk_fine_grained) {
min_table->dram_bw_table.entries[i].min_dcfclk_khz = round_up_to_quantized_values(min_table->dram_bw_table.entries[i].min_dcfclk_khz, soc_bb->clk_table.dcfclk.clk_values_khz, soc_bb->clk_table.dcfclk.num_clk_values);
}
}
// Prune states which are invalid (some clocks exceed maximum)
for (i = 0; i < (int)min_table->dram_bw_table.num_entries; i++) {
if (min_table->dram_bw_table.entries[i].min_dcfclk_khz > min_table->max_clocks_khz.dcfclk ||
min_table->dram_bw_table.entries[i].min_fclk_khz > min_table->max_clocks_khz.fclk) {
min_table->dram_bw_table.num_entries = i;
break;
}
}
// Prune duplicate states
for (i = 0; i < (int)min_table->dram_bw_table.num_entries - 1; i++) {
if (min_table->dram_bw_table.entries[i].min_dcfclk_khz == min_table->dram_bw_table.entries[i + 1].min_dcfclk_khz &&
min_table->dram_bw_table.entries[i].min_fclk_khz == min_table->dram_bw_table.entries[i + 1].min_fclk_khz &&
min_table->dram_bw_table.entries[i].pre_derate_dram_bw_kbps == min_table->dram_bw_table.entries[i + 1].pre_derate_dram_bw_kbps) {
// i + 1 is the same state as i, so shift everything
for (j = i + 1; j < min_table->dram_bw_table.num_entries; j++) {
min_table->dram_bw_table.entries[j].min_dcfclk_khz = min_table->dram_bw_table.entries[j + 1].min_dcfclk_khz;
min_table->dram_bw_table.entries[j].min_fclk_khz = min_table->dram_bw_table.entries[j + 1].min_fclk_khz;
min_table->dram_bw_table.entries[j].pre_derate_dram_bw_kbps = min_table->dram_bw_table.entries[j + 1].pre_derate_dram_bw_kbps;
}
min_table->dram_bw_table.num_entries--;
}
}
return true;
}

View file

@ -0,0 +1,14 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#ifndef __DML2_MCG_DCN4_H__
#define __DML2_MCG_DCN4_H__
#include "dml2_internal_shared_types.h"
bool mcg_dcn4_build_min_clock_table(struct dml2_mcg_build_min_clock_table_params_in_out *in_out);
bool mcg_dcn4_unit_test(void);
#endif

View file

@ -0,0 +1,40 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#include "dml2_mcg_factory.h"
#include "dml2_mcg_dcn4.h"
#include "dml2_external_lib_deps.h"
static bool dummy_build_min_clock_table(struct dml2_mcg_build_min_clock_table_params_in_out *in_out)
{
return true;
}
bool dml2_mcg_create(enum dml2_project_id project_id, struct dml2_mcg_instance *out)
{
bool result = false;
if (out == 0)
return false;
memset(out, 0, sizeof(struct dml2_mcg_instance));
switch (project_id) {
case dml2_project_dcn4x_stage1:
out->build_min_clock_table = &dummy_build_min_clock_table;
result = true;
break;
case dml2_project_dcn4x_stage2:
case dml2_project_dcn4x_stage2_auto_drr_svp:
out->build_min_clock_table = &mcg_dcn4_build_min_clock_table;
result = true;
break;
case dml2_project_invalid:
default:
break;
}
return result;
}

View file

@ -0,0 +1,14 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#ifndef __DML2_MCG_FACTORY_H__
#define __DML2_MCG_FACTORY_H__
#include "dml2_internal_shared_types.h"
#include "dml_top_types.h"
bool dml2_mcg_create(enum dml2_project_id project_id, struct dml2_mcg_instance *out);
#endif

View file

@ -0,0 +1,688 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#include "dml2_pmo_factory.h"
#include "dml2_pmo_dcn3.h"
static void sort(double *list_a, int list_a_size)
{
double temp;
// For all elements b[i] in list_b[]
for (int i = 0; i < list_a_size - 1; i++) {
// Find the first element of list_a that's larger than b[i]
for (int j = i; j < list_a_size - 1; j++) {
if (list_a[j] > list_a[j + 1]) {
temp = list_a[j];
list_a[j] = list_a[j + 1];
list_a[j + 1] = temp;
}
}
}
}
static void set_reserved_time_on_all_planes_with_stream_index(struct display_configuation_with_meta *config, unsigned int stream_index, double reserved_time_us)
{
struct dml2_plane_parameters *plane_descriptor;
for (unsigned int i = 0; i < config->display_config.num_planes; i++) {
plane_descriptor = &config->display_config.plane_descriptors[i];
if (plane_descriptor->stream_index == stream_index)
plane_descriptor->overrides.reserved_vblank_time_ns = (long int)(reserved_time_us * 1000);
}
}
static void remove_duplicates(double *list_a, int *list_a_size)
{
int cur_element = 0;
// For all elements b[i] in list_b[]
while (cur_element < *list_a_size - 1) {
if (list_a[cur_element] == list_a[cur_element + 1]) {
for (int j = cur_element + 1; j < *list_a_size - 1; j++) {
list_a[j] = list_a[j + 1];
}
*list_a_size = *list_a_size - 1;
} else {
cur_element++;
}
}
}
static bool increase_mpc_combine_factor(unsigned int *mpc_combine_factor, unsigned int limit)
{
if (*mpc_combine_factor < limit) {
(*mpc_combine_factor)++;
return true;
}
return false;
}
static bool optimize_dcc_mcache_no_odm(struct dml2_pmo_optimize_dcc_mcache_in_out *in_out,
int free_pipes)
{
struct dml2_pmo_instance *pmo = in_out->instance;
unsigned int i;
bool result = true;
for (i = 0; i < in_out->optimized_display_cfg->num_planes; i++) {
// For pipes that failed dcc mcache check, we want to increase the pipe count.
// The logic for doing this depends on how many pipes is already being used,
// and whether it's mpcc or odm combine.
if (!in_out->dcc_mcache_supported[i]) {
// For the general case of "n displays", we can only optimize streams with an ODM combine factor of 1
if (in_out->cfg_support_info->stream_support_info[in_out->optimized_display_cfg->plane_descriptors[i].stream_index].odms_used == 1) {
in_out->optimized_display_cfg->plane_descriptors[i].overrides.mpcc_combine_factor =
in_out->cfg_support_info->plane_support_info[i].dpps_used;
// For each plane that is not passing mcache validation, just add another pipe to it, up to the limit.
if (free_pipes > 0) {
if (!increase_mpc_combine_factor(&in_out->optimized_display_cfg->plane_descriptors[i].overrides.mpcc_combine_factor,
pmo->mpc_combine_limit)) {
// We've reached max pipes allocatable to a single plane, so we fail.
result = false;
break;
} else {
// Successfully added another pipe to this failing plane.
free_pipes--;
}
} else {
// No free pipes to add.
result = false;
break;
}
} else {
// If the stream of this plane needs ODM combine, no further optimization can be done.
result = false;
break;
}
}
}
return result;
}
static bool iterate_to_next_candidiate(struct dml2_pmo_instance *pmo, int size)
{
int borrow_from, i;
bool success = false;
if (pmo->scratch.pmo_dcn3.current_candidate[0] > 0) {
pmo->scratch.pmo_dcn3.current_candidate[0]--;
success = true;
} else {
for (borrow_from = 1; borrow_from < size && pmo->scratch.pmo_dcn3.current_candidate[borrow_from] == 0; borrow_from++)
;
if (borrow_from < size) {
pmo->scratch.pmo_dcn3.current_candidate[borrow_from]--;
for (i = 0; i < borrow_from; i++) {
pmo->scratch.pmo_dcn3.current_candidate[i] = pmo->scratch.pmo_dcn3.reserved_time_candidates_count[i] - 1;
}
success = true;
}
}
return success;
}
static bool increase_odm_combine_factor(enum dml2_odm_mode *odm_mode, int odms_calculated)
{
bool result = true;
if (*odm_mode == dml2_odm_mode_auto) {
switch (odms_calculated) {
case 1:
*odm_mode = dml2_odm_mode_bypass;
break;
case 2:
*odm_mode = dml2_odm_mode_combine_2to1;
break;
case 3:
*odm_mode = dml2_odm_mode_combine_3to1;
break;
case 4:
*odm_mode = dml2_odm_mode_combine_4to1;
break;
default:
result = false;
break;
}
}
if (result) {
if (*odm_mode == dml2_odm_mode_bypass) {
*odm_mode = dml2_odm_mode_combine_2to1;
} else if (*odm_mode == dml2_odm_mode_combine_2to1) {
*odm_mode = dml2_odm_mode_combine_3to1;
} else if (*odm_mode == dml2_odm_mode_combine_3to1) {
*odm_mode = dml2_odm_mode_combine_4to1;
} else {
result = false;
}
}
return result;
}
static int count_planes_with_stream_index(const struct dml2_display_cfg *display_cfg, unsigned int stream_index)
{
unsigned int i, count;
count = 0;
for (i = 0; i < display_cfg->num_planes; i++) {
if (display_cfg->plane_descriptors[i].stream_index == stream_index)
count++;
}
return count;
}
static bool are_timings_trivially_synchronizable(struct display_configuation_with_meta *display_config, int mask)
{
unsigned int i;
bool identical = true;
bool contains_drr = false;
unsigned int remap_array[DML2_MAX_PLANES];
unsigned int remap_array_size = 0;
// Create a remap array to enable simple iteration through only masked stream indicies
for (i = 0; i < display_config->display_config.num_streams; i++) {
if (mask & (0x1 << i)) {
remap_array[remap_array_size++] = i;
}
}
// 0 or 1 display is always trivially synchronizable
if (remap_array_size <= 1)
return true;
for (i = 1; i < remap_array_size; i++) {
if (memcmp(&display_config->display_config.stream_descriptors[remap_array[i - 1]].timing,
&display_config->display_config.stream_descriptors[remap_array[i]].timing,
sizeof(struct dml2_timing_cfg))) {
identical = false;
break;
}
}
for (i = 0; i < remap_array_size; i++) {
if (display_config->display_config.stream_descriptors[remap_array[i]].timing.drr_config.enabled) {
contains_drr = true;
break;
}
}
return !contains_drr && identical;
}
bool pmo_dcn3_initialize(struct dml2_pmo_initialize_in_out *in_out)
{
struct dml2_pmo_instance *pmo = in_out->instance;
pmo->soc_bb = in_out->soc_bb;
pmo->ip_caps = in_out->ip_caps;
pmo->mpc_combine_limit = 2;
pmo->odm_combine_limit = 4;
pmo->min_clock_table_size = in_out->min_clock_table_size;
pmo->options = in_out->options;
return true;
}
static bool is_h_timing_divisible_by(const struct dml2_timing_cfg *timing, unsigned char denominator)
{
/*
* Htotal, Hblank start/end, and Hsync start/end all must be divisible
* in order for the horizontal timing params to be considered divisible
* by 2. Hsync start is always 0.
*/
unsigned long h_blank_start = timing->h_total - timing->h_front_porch;
return (timing->h_total % denominator == 0) &&
(h_blank_start % denominator == 0) &&
(timing->h_blank_end % denominator == 0) &&
(timing->h_sync_width % denominator == 0);
}
static bool is_dp_encoder(enum dml2_output_encoder_class encoder_type)
{
switch (encoder_type) {
case dml2_dp:
case dml2_edp:
case dml2_dp2p0:
case dml2_none:
return true;
case dml2_hdmi:
case dml2_hdmifrl:
default:
return false;
}
}
bool pmo_dcn3_init_for_vmin(struct dml2_pmo_init_for_vmin_in_out *in_out)
{
unsigned int i;
const struct dml2_display_cfg *display_config =
&in_out->base_display_config->display_config;
const struct dml2_core_mode_support_result *mode_support_result =
&in_out->base_display_config->mode_support_result;
if (in_out->instance->options->disable_dyn_odm ||
(in_out->instance->options->disable_dyn_odm_for_multi_stream && display_config->num_streams > 1))
return false;
for (i = 0; i < display_config->num_planes; i++)
/*
* vmin optimization is required to be seamlessly switched off
* at any time when the new configuration is no longer
* supported. However switching from ODM combine to MPC combine
* is not always seamless. When there not enough free pipes, we
* will have to use the same secondary OPP heads as secondary
* DPP pipes in MPC combine in new state. This transition is
* expected to cause glitches. To avoid the transition, we only
* allow vmin optimization if the stream's base configuration
* doesn't require MPC combine. This condition checks if MPC
* combine is enabled. If so do not optimize the stream.
*/
if (mode_support_result->cfg_support_info.plane_support_info[i].dpps_used > 1 &&
mode_support_result->cfg_support_info.stream_support_info[display_config->plane_descriptors[i].stream_index].odms_used == 1)
in_out->base_display_config->stage4.unoptimizable_streams[display_config->plane_descriptors[i].stream_index] = true;
for (i = 0; i < display_config->num_streams; i++) {
if (display_config->stream_descriptors[i].overrides.disable_dynamic_odm)
in_out->base_display_config->stage4.unoptimizable_streams[i] = true;
else if (in_out->base_display_config->stage3.stream_svp_meta[i].valid &&
in_out->instance->options->disable_dyn_odm_for_stream_with_svp)
in_out->base_display_config->stage4.unoptimizable_streams[i] = true;
/*
* ODM Combine requires horizontal timing divisible by 2 so each
* ODM segment has the same size.
*/
else if (!is_h_timing_divisible_by(&display_config->stream_descriptors[i].timing, 2))
in_out->base_display_config->stage4.unoptimizable_streams[i] = true;
/*
* Our hardware support seamless ODM transitions for DP encoders
* only.
*/
else if (!is_dp_encoder(display_config->stream_descriptors[i].output.output_encoder))
in_out->base_display_config->stage4.unoptimizable_streams[i] = true;
}
return true;
}
bool pmo_dcn3_test_for_vmin(struct dml2_pmo_test_for_vmin_in_out *in_out)
{
bool is_vmin = true;
if (in_out->vmin_limits->dispclk_khz > 0 &&
in_out->display_config->mode_support_result.global.dispclk_khz > in_out->vmin_limits->dispclk_khz)
is_vmin = false;
return is_vmin;
}
static int find_highest_odm_load_stream_index(
const struct dml2_display_cfg *display_config,
const struct dml2_core_mode_support_result *mode_support_result)
{
unsigned int i;
int odm_load, highest_odm_load = -1, highest_odm_load_index = -1;
for (i = 0; i < display_config->num_streams; i++) {
odm_load = display_config->stream_descriptors[i].timing.pixel_clock_khz
/ mode_support_result->cfg_support_info.stream_support_info[i].odms_used;
if (odm_load > highest_odm_load) {
highest_odm_load_index = i;
highest_odm_load = odm_load;
}
}
return highest_odm_load_index;
}
bool pmo_dcn3_optimize_for_vmin(struct dml2_pmo_optimize_for_vmin_in_out *in_out)
{
int stream_index;
const struct dml2_display_cfg *display_config =
&in_out->base_display_config->display_config;
const struct dml2_core_mode_support_result *mode_support_result =
&in_out->base_display_config->mode_support_result;
unsigned int odms_used;
struct dml2_stream_parameters *stream_descriptor;
bool optimizable = false;
/*
* highest odm load stream must be optimizable to continue as dispclk is
* bounded by it.
*/
stream_index = find_highest_odm_load_stream_index(display_config,
mode_support_result);
if (stream_index < 0 ||
in_out->base_display_config->stage4.unoptimizable_streams[stream_index])
return false;
odms_used = mode_support_result->cfg_support_info.stream_support_info[stream_index].odms_used;
if ((int)odms_used >= in_out->instance->odm_combine_limit)
return false;
memcpy(in_out->optimized_display_config,
in_out->base_display_config,
sizeof(struct display_configuation_with_meta));
stream_descriptor = &in_out->optimized_display_config->display_config.stream_descriptors[stream_index];
while (!optimizable && increase_odm_combine_factor(
&stream_descriptor->overrides.odm_mode,
odms_used)) {
switch (stream_descriptor->overrides.odm_mode) {
case dml2_odm_mode_combine_2to1:
optimizable = true;
break;
case dml2_odm_mode_combine_3to1:
/*
* In ODM Combine 3:1 OTG_valid_pixel rate is 1/4 of
* actual pixel rate. Therefore horizontal timing must
* be divisible by 4.
*/
if (is_h_timing_divisible_by(&display_config->stream_descriptors[stream_index].timing, 4)) {
if (mode_support_result->cfg_support_info.stream_support_info[stream_index].dsc_enable) {
/*
* DSC h slice count must be divisible
* by 3.
*/
if (mode_support_result->cfg_support_info.stream_support_info[stream_index].num_dsc_slices % 3 == 0)
optimizable = true;
} else {
optimizable = true;
}
}
break;
case dml2_odm_mode_combine_4to1:
/*
* In ODM Combine 4:1 OTG_valid_pixel rate is 1/4 of
* actual pixel rate. Therefore horizontal timing must
* be divisible by 4.
*/
if (is_h_timing_divisible_by(&display_config->stream_descriptors[stream_index].timing, 4)) {
if (mode_support_result->cfg_support_info.stream_support_info[stream_index].dsc_enable) {
/*
* DSC h slice count must be divisible
* by 4.
*/
if (mode_support_result->cfg_support_info.stream_support_info[stream_index].num_dsc_slices % 4 == 0)
optimizable = true;
} else {
optimizable = true;
}
}
break;
case dml2_odm_mode_auto:
case dml2_odm_mode_bypass:
case dml2_odm_mode_split_1to2:
case dml2_odm_mode_mso_1to2:
case dml2_odm_mode_mso_1to4:
default:
break;
}
}
return optimizable;
}
bool pmo_dcn3_optimize_dcc_mcache(struct dml2_pmo_optimize_dcc_mcache_in_out *in_out)
{
struct dml2_pmo_instance *pmo = in_out->instance;
unsigned int i, used_pipes, free_pipes, planes_on_stream;
bool result;
if (in_out->display_config != in_out->optimized_display_cfg) {
memcpy(in_out->optimized_display_cfg, in_out->display_config, sizeof(struct dml2_display_cfg));
}
//Count number of free pipes, and check if any odm combine is in use.
used_pipes = 0;
for (i = 0; i < in_out->optimized_display_cfg->num_planes; i++) {
used_pipes += in_out->cfg_support_info->plane_support_info[i].dpps_used;
}
free_pipes = pmo->ip_caps->pipe_count - used_pipes;
// Optimization loop
// The goal here is to add more pipes to any planes
// which are failing mcache admissibility
result = true;
// The optimization logic depends on whether ODM combine is enabled, and the stream count.
if (in_out->optimized_display_cfg->num_streams > 1) {
// If there are multiple streams, we are limited to only be able to optimize mcache failures on planes
// which are not ODM combined.
result = optimize_dcc_mcache_no_odm(in_out, free_pipes);
} else if (in_out->optimized_display_cfg->num_streams == 1) {
// In single stream cases, we still optimize mcache failures when there's ODM combine with some
// additional logic.
if (in_out->cfg_support_info->stream_support_info[0].odms_used > 1) {
// If ODM combine is enabled, then the logic is to increase ODM combine factor.
// Optimization for streams with > 1 ODM combine factor is only supported for single display.
planes_on_stream = count_planes_with_stream_index(in_out->optimized_display_cfg, 0);
for (i = 0; i < in_out->optimized_display_cfg->num_planes; i++) {
// For pipes that failed dcc mcache check, we want to increase the pipe count.
// The logic for doing this depends on how many pipes is already being used,
// and whether it's mpcc or odm combine.
if (!in_out->dcc_mcache_supported[i]) {
// Increasing ODM combine factor on a stream requires a free pipe for each plane on the stream.
if (free_pipes >= planes_on_stream) {
if (!increase_odm_combine_factor(&in_out->optimized_display_cfg->stream_descriptors[i].overrides.odm_mode,
in_out->cfg_support_info->plane_support_info[i].dpps_used)) {
result = false;
} else {
free_pipes -= planes_on_stream;
break;
}
} else {
result = false;
break;
}
}
}
} else {
// If ODM combine is not enabled, then we can actually use the same logic as before.
result = optimize_dcc_mcache_no_odm(in_out, free_pipes);
}
} else {
result = true;
}
return result;
}
bool pmo_dcn3_init_for_pstate_support(struct dml2_pmo_init_for_pstate_support_in_out *in_out)
{
struct dml2_pmo_instance *pmo = in_out->instance;
struct dml2_optimization_stage3_state *state = &in_out->base_display_config->stage3;
const struct dml2_stream_parameters *stream_descriptor;
const struct dml2_plane_parameters *plane_descriptor;
unsigned int stream_index, plane_index, candidate_count;
double min_reserved_vblank_time = 0;
int fclk_twait_needed_mask = 0x0;
int uclk_twait_needed_mask = 0x0;
state->performed = true;
state->min_clk_index_for_latency = in_out->base_display_config->stage1.min_clk_index_for_latency;
pmo->scratch.pmo_dcn3.min_latency_index = in_out->base_display_config->stage1.min_clk_index_for_latency;
pmo->scratch.pmo_dcn3.max_latency_index = pmo->min_clock_table_size;
pmo->scratch.pmo_dcn3.cur_latency_index = in_out->base_display_config->stage1.min_clk_index_for_latency;
pmo->scratch.pmo_dcn3.stream_mask = 0xF;
for (plane_index = 0; plane_index < in_out->base_display_config->display_config.num_planes; plane_index++) {
plane_descriptor = &in_out->base_display_config->display_config.plane_descriptors[plane_index];
stream_descriptor = &in_out->base_display_config->display_config.stream_descriptors[plane_descriptor->stream_index];
if (in_out->base_display_config->mode_support_result.cfg_support_info.plane_support_info[plane_index].active_latency_hiding_us <
in_out->instance->soc_bb->power_management_parameters.dram_clk_change_blackout_us &&
stream_descriptor->overrides.hw.twait_budgeting.uclk_pstate == dml2_twait_budgeting_setting_if_needed)
uclk_twait_needed_mask |= (0x1 << plane_descriptor->stream_index);
if (stream_descriptor->overrides.hw.twait_budgeting.uclk_pstate == dml2_twait_budgeting_setting_try)
uclk_twait_needed_mask |= (0x1 << plane_descriptor->stream_index);
if (in_out->base_display_config->mode_support_result.cfg_support_info.plane_support_info[plane_index].active_latency_hiding_us <
in_out->instance->soc_bb->power_management_parameters.fclk_change_blackout_us &&
stream_descriptor->overrides.hw.twait_budgeting.fclk_pstate == dml2_twait_budgeting_setting_if_needed)
fclk_twait_needed_mask |= (0x1 << plane_descriptor->stream_index);
if (stream_descriptor->overrides.hw.twait_budgeting.fclk_pstate == dml2_twait_budgeting_setting_try)
fclk_twait_needed_mask |= (0x1 << plane_descriptor->stream_index);
if (plane_descriptor->overrides.legacy_svp_config != dml2_svp_mode_override_auto) {
pmo->scratch.pmo_dcn3.stream_mask &= ~(0x1 << plane_descriptor->stream_index);
}
}
for (stream_index = 0; stream_index < in_out->base_display_config->display_config.num_streams; stream_index++) {
stream_descriptor = &in_out->base_display_config->display_config.stream_descriptors[stream_index];
// The absolute minimum required time is the minimum of all the required budgets
/*
if (stream_descriptor->overrides.hw.twait_budgeting.fclk_pstate
== dml2_twait_budgeting_setting_require)
if (are_timings_trivially_synchronizable(in_out->base_display_config, pmo->scratch.pmo_dcn3.stream_mask)) {
min_reserved_vblank_time = max_double2(min_reserved_vblank_time,
in_out->instance->soc_bb->power_management_parameters.fclk_change_blackout_us);
}
if (stream_descriptor->overrides.hw.twait_budgeting.uclk_pstate
== dml2_twait_budgeting_setting_require) {
if (are_timings_trivially_synchronizable(in_out->base_display_config, pmo->scratch.pmo_dcn3.stream_mask)) {
min_reserved_vblank_time = max_double2(min_reserved_vblank_time,
in_out->instance->soc_bb->power_management_parameters.dram_clk_change_blackout_us);
}
}
if (stream_descriptor->overrides.hw.twait_budgeting.stutter_enter_exit
== dml2_twait_budgeting_setting_require)
min_reserved_vblank_time = max_double2(min_reserved_vblank_time,
in_out->instance->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us);
*/
// Insert the absolute minimum into the array
candidate_count = 1;
pmo->scratch.pmo_dcn3.reserved_time_candidates[stream_index][0] = min_reserved_vblank_time;
pmo->scratch.pmo_dcn3.reserved_time_candidates_count[stream_index] = candidate_count;
if (!(pmo->scratch.pmo_dcn3.stream_mask & (0x1 << stream_index)))
continue;
// For every optional feature, we create a candidate for it only if it's larger minimum.
if ((fclk_twait_needed_mask & (0x1 << stream_index)) &&
in_out->instance->soc_bb->power_management_parameters.fclk_change_blackout_us > min_reserved_vblank_time) {
if (are_timings_trivially_synchronizable(in_out->base_display_config, pmo->scratch.pmo_dcn3.stream_mask)) {
pmo->scratch.pmo_dcn3.reserved_time_candidates[stream_index][candidate_count++] =
in_out->instance->soc_bb->power_management_parameters.fclk_change_blackout_us;
}
}
if ((uclk_twait_needed_mask & (0x1 << stream_index)) &&
in_out->instance->soc_bb->power_management_parameters.dram_clk_change_blackout_us > min_reserved_vblank_time) {
if (are_timings_trivially_synchronizable(in_out->base_display_config, pmo->scratch.pmo_dcn3.stream_mask)) {
pmo->scratch.pmo_dcn3.reserved_time_candidates[stream_index][candidate_count++] =
in_out->instance->soc_bb->power_management_parameters.dram_clk_change_blackout_us;
}
}
if ((stream_descriptor->overrides.hw.twait_budgeting.stutter_enter_exit == dml2_twait_budgeting_setting_try ||
stream_descriptor->overrides.hw.twait_budgeting.stutter_enter_exit == dml2_twait_budgeting_setting_if_needed) &&
in_out->instance->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us > min_reserved_vblank_time) {
pmo->scratch.pmo_dcn3.reserved_time_candidates[stream_index][candidate_count++] =
in_out->instance->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us;
}
pmo->scratch.pmo_dcn3.reserved_time_candidates_count[stream_index] = candidate_count;
// Finally sort the array of candidates
sort(pmo->scratch.pmo_dcn3.reserved_time_candidates[stream_index],
pmo->scratch.pmo_dcn3.reserved_time_candidates_count[stream_index]);
remove_duplicates(pmo->scratch.pmo_dcn3.reserved_time_candidates[stream_index],
&pmo->scratch.pmo_dcn3.reserved_time_candidates_count[stream_index]);
pmo->scratch.pmo_dcn3.current_candidate[stream_index] =
pmo->scratch.pmo_dcn3.reserved_time_candidates_count[stream_index] - 1;
}
return true;
}
bool pmo_dcn3_test_for_pstate_support(struct dml2_pmo_test_for_pstate_support_in_out *in_out)
{
struct dml2_pmo_instance *pmo = in_out->instance;
unsigned int i, stream_index;
for (i = 0; i < in_out->base_display_config->display_config.num_planes; i++) {
stream_index = in_out->base_display_config->display_config.plane_descriptors[i].stream_index;
if (in_out->base_display_config->display_config.plane_descriptors[i].overrides.reserved_vblank_time_ns <
pmo->scratch.pmo_dcn3.reserved_time_candidates[stream_index][pmo->scratch.pmo_dcn3.current_candidate[stream_index]] * 1000) {
return false;
}
}
return true;
}
bool pmo_dcn3_optimize_for_pstate_support(struct dml2_pmo_optimize_for_pstate_support_in_out *in_out)
{
struct dml2_pmo_instance *pmo = in_out->instance;
unsigned int stream_index;
bool success = false;
bool reached_end = true;
memcpy(in_out->optimized_display_config, in_out->base_display_config, sizeof(struct display_configuation_with_meta));
if (in_out->last_candidate_failed) {
if (pmo->scratch.pmo_dcn3.cur_latency_index < pmo->scratch.pmo_dcn3.max_latency_index) {
// If we haven't tried all the clock bounds to support this state, try a higher one
pmo->scratch.pmo_dcn3.cur_latency_index++;
success = true;
} else {
// If there's nothing higher to try, then we have to have a smaller canadidate
reached_end = !iterate_to_next_candidiate(pmo, in_out->optimized_display_config->display_config.num_streams);
if (!reached_end) {
pmo->scratch.pmo_dcn3.cur_latency_index = pmo->scratch.pmo_dcn3.min_latency_index;
success = true;
}
}
} else {
success = true;
}
if (success) {
in_out->optimized_display_config->stage3.min_clk_index_for_latency = pmo->scratch.pmo_dcn3.cur_latency_index;
for (stream_index = 0; stream_index < in_out->optimized_display_config->display_config.num_streams; stream_index++) {
set_reserved_time_on_all_planes_with_stream_index(in_out->optimized_display_config, stream_index,
pmo->scratch.pmo_dcn3.reserved_time_candidates[stream_index][pmo->scratch.pmo_dcn3.current_candidate[stream_index]]);
}
}
return success;
}

View file

@ -0,0 +1,23 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#ifndef __DML2_PMO_DCN3_H__
#define __DML2_PMO_DCN3_H__
#include "dml2_internal_shared_types.h"
bool pmo_dcn3_initialize(struct dml2_pmo_initialize_in_out *in_out);
bool pmo_dcn3_optimize_dcc_mcache(struct dml2_pmo_optimize_dcc_mcache_in_out *in_out);
bool pmo_dcn3_init_for_vmin(struct dml2_pmo_init_for_vmin_in_out *in_out);
bool pmo_dcn3_test_for_vmin(struct dml2_pmo_test_for_vmin_in_out *in_out);
bool pmo_dcn3_optimize_for_vmin(struct dml2_pmo_optimize_for_vmin_in_out *in_out);
bool pmo_dcn3_init_for_pstate_support(struct dml2_pmo_init_for_pstate_support_in_out *in_out);
bool pmo_dcn3_test_for_pstate_support(struct dml2_pmo_test_for_pstate_support_in_out *in_out);
bool pmo_dcn3_optimize_for_pstate_support(struct dml2_pmo_optimize_for_pstate_support_in_out *in_out);
#endif

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,25 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#ifndef __DML2_PMO_DCN4_H__
#define __DML2_PMO_DCN4_H__
#include "dml2_internal_shared_types.h"
bool pmo_dcn4_initialize(struct dml2_pmo_initialize_in_out *in_out);
bool pmo_dcn4_optimize_dcc_mcache(struct dml2_pmo_optimize_dcc_mcache_in_out *in_out);
bool pmo_dcn4_init_for_vmin(struct dml2_pmo_init_for_vmin_in_out *in_out);
bool pmo_dcn4_test_for_vmin(struct dml2_pmo_test_for_vmin_in_out *in_out);
bool pmo_dcn4_optimize_for_vmin(struct dml2_pmo_optimize_for_vmin_in_out *in_out);
bool pmo_dcn4_init_for_pstate_support(struct dml2_pmo_init_for_pstate_support_in_out *in_out);
bool pmo_dcn4_test_for_pstate_support(struct dml2_pmo_test_for_pstate_support_in_out *in_out);
bool pmo_dcn4_optimize_for_pstate_support(struct dml2_pmo_optimize_for_pstate_support_in_out *in_out);
bool pmo_dcn4_unit_test(void);
#endif

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,27 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#ifndef __DML2_PMO_FAMS2_DCN4_H__
#define __DML2_PMO_FAMS2_DCN4_H__
#include "dml2_internal_shared_types.h"
bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out);
bool pmo_dcn4_fams2_optimize_dcc_mcache(struct dml2_pmo_optimize_dcc_mcache_in_out *in_out);
bool pmo_dcn4_fams2_init_for_vmin(struct dml2_pmo_init_for_vmin_in_out *in_out);
bool pmo_dcn4_fams2_test_for_vmin(struct dml2_pmo_test_for_vmin_in_out *in_out);
bool pmo_dcn4_fams2_optimize_for_vmin(struct dml2_pmo_optimize_for_vmin_in_out *in_out);
bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_support_in_out *in_out);
bool pmo_dcn4_fams2_test_for_pstate_support(struct dml2_pmo_test_for_pstate_support_in_out *in_out);
bool pmo_dcn4_fams2_optimize_for_pstate_support(struct dml2_pmo_optimize_for_pstate_support_in_out *in_out);
bool pmo_dcn4_fams2_init_for_stutter(struct dml2_pmo_init_for_stutter_in_out *in_out);
bool pmo_dcn4_fams2_test_for_stutter(struct dml2_pmo_test_for_stutter_in_out *in_out);
bool pmo_dcn4_fams2_optimize_for_stutter(struct dml2_pmo_optimize_for_stutter_in_out *in_out);
#endif

View file

@ -0,0 +1,86 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#include "dml2_pmo_factory.h"
#include "dml2_pmo_dcn4_fams2.h"
#include "dml2_pmo_dcn4.h"
#include "dml2_pmo_dcn3.h"
#include "dml2_external_lib_deps.h"
static bool dummy_init_for_stutter(struct dml2_pmo_init_for_stutter_in_out *in_out)
{
return false;
}
static bool dummy_test_for_stutter(struct dml2_pmo_test_for_stutter_in_out *in_out)
{
return true;
}
static bool dummy_optimize_for_stutter(struct dml2_pmo_optimize_for_stutter_in_out *in_out)
{
return false;
}
bool dml2_pmo_create(enum dml2_project_id project_id, struct dml2_pmo_instance *out)
{
bool result = false;
if (out == 0)
return false;
memset(out, 0, sizeof(struct dml2_pmo_instance));
switch (project_id) {
case dml2_project_dcn4x_stage1:
out->initialize = pmo_dcn4_initialize;
out->optimize_dcc_mcache = pmo_dcn4_optimize_dcc_mcache;
result = true;
break;
case dml2_project_dcn4x_stage2:
out->initialize = pmo_dcn3_initialize;
out->optimize_dcc_mcache = pmo_dcn3_optimize_dcc_mcache;
out->init_for_vmin = pmo_dcn3_init_for_vmin;
out->test_for_vmin = pmo_dcn3_test_for_vmin;
out->optimize_for_vmin = pmo_dcn3_optimize_for_vmin;
out->init_for_uclk_pstate = pmo_dcn3_init_for_pstate_support;
out->test_for_uclk_pstate = pmo_dcn3_test_for_pstate_support;
out->optimize_for_uclk_pstate = pmo_dcn3_optimize_for_pstate_support;
out->init_for_stutter = dummy_init_for_stutter;
out->test_for_stutter = dummy_test_for_stutter;
out->optimize_for_stutter = dummy_optimize_for_stutter;
result = true;
break;
case dml2_project_dcn4x_stage2_auto_drr_svp:
out->initialize = pmo_dcn4_fams2_initialize;
out->optimize_dcc_mcache = pmo_dcn4_fams2_optimize_dcc_mcache;
out->init_for_vmin = pmo_dcn4_fams2_init_for_vmin;
out->test_for_vmin = pmo_dcn4_fams2_test_for_vmin;
out->optimize_for_vmin = pmo_dcn4_fams2_optimize_for_vmin;
out->init_for_uclk_pstate = pmo_dcn4_fams2_init_for_pstate_support;
out->test_for_uclk_pstate = pmo_dcn4_fams2_test_for_pstate_support;
out->optimize_for_uclk_pstate = pmo_dcn4_fams2_optimize_for_pstate_support;
out->init_for_stutter = pmo_dcn4_fams2_init_for_stutter;
out->test_for_stutter = pmo_dcn4_fams2_test_for_stutter;
out->optimize_for_stutter = pmo_dcn4_fams2_optimize_for_stutter;
result = true;
break;
case dml2_project_invalid:
default:
break;
}
return result;
}

View file

@ -0,0 +1,14 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#ifndef __DML2_PMO_FACTORY_H__
#define __DML2_PMO_FACTORY_H__
#include "dml2_internal_shared_types.h"
#include "dml_top_types.h"
bool dml2_pmo_create(enum dml2_project_id project_id, struct dml2_pmo_instance *out);
#endif

View file

@ -0,0 +1,140 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#include "lib_float_math.h"
#ifndef ASSERT
#define ASSERT(condition)
#endif
#define isNaN(number) ((number) != (number))
/*
* NOTE:
* This file is gcc-parseable HW gospel, coming straight from HW engineers.
*
* It doesn't adhere to Linux kernel style and sometimes will do things in odd
* ways. Unless there is something clearly wrong with it the code should
* remain as-is as it provides us with a guarantee from HW that it is correct.
*/
double math_mod(const double arg1, const double arg2)
{
if (isNaN(arg1))
return arg2;
if (isNaN(arg2))
return arg1;
return arg1 - arg1 * ((int)(arg1 / arg2));
}
double math_min2(const double arg1, const double arg2)
{
if (isNaN(arg1))
return arg2;
if (isNaN(arg2))
return arg1;
return arg1 < arg2 ? arg1 : arg2;
}
double math_max2(const double arg1, const double arg2)
{
if (isNaN(arg1))
return arg2;
if (isNaN(arg2))
return arg1;
return arg1 > arg2 ? arg1 : arg2;
}
double math_floor2(const double arg, const double significance)
{
ASSERT(significance != 0);
return ((int)(arg / significance)) * significance;
}
double math_floor(const double arg)
{
return ((int)(arg));
}
double math_ceil(const double arg)
{
return (int)(arg + 0.99999);
}
double math_ceil2(const double arg, const double significance)
{
ASSERT(significance != 0);
return ((int)(arg / significance + 0.99999)) * significance;
}
double math_max3(double v1, double v2, double v3)
{
return v3 > math_max2(v1, v2) ? v3 : math_max2(v1, v2);
}
double math_max4(double v1, double v2, double v3, double v4)
{
return v4 > math_max3(v1, v2, v3) ? v4 : math_max3(v1, v2, v3);
}
double math_max5(double v1, double v2, double v3, double v4, double v5)
{
return math_max3(v1, v2, v3) > math_max2(v4, v5) ? math_max3(v1, v2, v3) : math_max2(v4, v5);
}
float math_pow(float a, float exp)
{
double temp;
if ((int)exp == 0)
return 1;
temp = math_pow(a, (float)((int)(exp / 2)));
if (((int)exp % 2) == 0) {
return (float)(temp * temp);
} else {
if ((int)exp > 0)
return (float)(a * temp * temp);
else
return (float)((temp * temp) / a);
}
}
double math_fabs(double a)
{
if (a > 0)
return (a);
else
return (-a);
}
float math_log(float a, float b)
{
int *const exp_ptr = (int *)(&a);
int x = *exp_ptr;
const int log_2 = ((x >> 23) & 255) - 128;
x &= ~(255 << 23);
x += 127 << 23;
*exp_ptr = x;
a = ((-1.0f / 3) * a + 2) * a - 2.0f / 3;
if (b > 2.00001 || b < 1.99999)
return (a + log_2) / math_log(b, 2);
else
return (a + log_2);
}
float math_log2(float a)
{
return math_log(a, 2.0);
}
double math_round(double a)
{
const double round_pt = 0.5;
return math_floor(a + round_pt);
}

View file

@ -0,0 +1,25 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#ifndef __LIB_FLOAT_MATH_H__
#define __LIB_FLOAT_MATH_H__
double math_mod(const double arg1, const double arg2);
double math_min2(const double arg1, const double arg2);
double math_max2(const double arg1, const double arg2);
double math_floor2(const double arg, const double significance);
double math_floor(const double arg);
double math_ceil(const double arg);
double math_ceil2(const double arg, const double significance);
double math_max3(double v1, double v2, double v3);
double math_max4(double v1, double v2, double v3, double v4);
double math_max5(double v1, double v2, double v3, double v4, double v5);
float math_pow(float a, float exp);
double math_fabs(double a);
float math_log(float a, float b);
float math_log2(float a);
double math_round(double a);
#endif

View file

@ -0,0 +1,309 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#include "dml2_top_optimization.h"
#include "dml2_internal_shared_types.h"
#include "dml_top_mcache.h"
static void copy_display_configuration_with_meta(struct display_configuation_with_meta *dst, const struct display_configuation_with_meta *src)
{
memcpy(dst, src, sizeof(struct display_configuation_with_meta));
}
bool dml2_top_optimization_init_function_min_clk_for_latency(const struct optimization_init_function_params *params)
{
struct dml2_optimization_stage1_state *state = &params->display_config->stage1;
state->performed = true;
return true;
}
bool dml2_top_optimization_test_function_min_clk_for_latency(const struct optimization_test_function_params *params)
{
struct dml2_optimization_stage1_state *state = &params->display_config->stage1;
return state->min_clk_index_for_latency == 0;
}
bool dml2_top_optimization_optimize_function_min_clk_for_latency(const struct optimization_optimize_function_params *params)
{
bool result = false;
if (params->display_config->stage1.min_clk_index_for_latency > 0) {
copy_display_configuration_with_meta(params->optimized_display_config, params->display_config);
params->optimized_display_config->stage1.min_clk_index_for_latency--;
result = true;
}
return result;
}
bool dml2_top_optimization_test_function_mcache(const struct optimization_test_function_params *params)
{
struct dml2_optimization_test_function_locals *l = params->locals;
bool mcache_success = false;
bool result = false;
memset(l, 0, sizeof(struct dml2_optimization_test_function_locals));
l->test_mcache.calc_mcache_count_params.dml2_instance = params->dml;
l->test_mcache.calc_mcache_count_params.display_config = &params->display_config->display_config;
l->test_mcache.calc_mcache_count_params.mcache_allocations = params->display_config->stage2.mcache_allocations;
result = dml2_top_mcache_calc_mcache_count_and_offsets(&l->test_mcache.calc_mcache_count_params); // use core to get the basic mcache_allocations
if (result) {
l->test_mcache.assign_global_mcache_ids_params.allocations = params->display_config->stage2.mcache_allocations;
l->test_mcache.assign_global_mcache_ids_params.num_allocations = params->display_config->display_config.num_planes;
dml2_top_mcache_assign_global_mcache_ids(&l->test_mcache.assign_global_mcache_ids_params);
l->test_mcache.validate_admissibility_params.dml2_instance = params->dml;
l->test_mcache.validate_admissibility_params.display_cfg = &params->display_config->display_config;
l->test_mcache.validate_admissibility_params.mcache_allocations = params->display_config->stage2.mcache_allocations;
l->test_mcache.validate_admissibility_params.cfg_support_info = &params->display_config->mode_support_result.cfg_support_info;
mcache_success = dml2_top_mcache_validate_admissability(&l->test_mcache.validate_admissibility_params); // also find the shift to make mcache allocation works
memcpy(params->display_config->stage2.per_plane_mcache_support, l->test_mcache.validate_admissibility_params.per_plane_status, sizeof(bool) * DML2_MAX_PLANES);
}
return mcache_success;
}
bool dml2_top_optimization_optimize_function_mcache(const struct optimization_optimize_function_params *params)
{
struct dml2_optimization_optimize_function_locals *l = params->locals;
bool optimize_success = false;
if (params->last_candidate_supported == false)
return false;
copy_display_configuration_with_meta(params->optimized_display_config, params->display_config);
l->optimize_mcache.optimize_mcache_params.instance = &params->dml->pmo_instance;
l->optimize_mcache.optimize_mcache_params.dcc_mcache_supported = params->display_config->stage2.per_plane_mcache_support;
l->optimize_mcache.optimize_mcache_params.display_config = &params->display_config->display_config;
l->optimize_mcache.optimize_mcache_params.optimized_display_cfg = &params->optimized_display_config->display_config;
l->optimize_mcache.optimize_mcache_params.cfg_support_info = &params->optimized_display_config->mode_support_result.cfg_support_info;
optimize_success = params->dml->pmo_instance.optimize_dcc_mcache(&l->optimize_mcache.optimize_mcache_params);
return optimize_success;
}
bool dml2_top_optimization_init_function_vmin(const struct optimization_init_function_params *params)
{
struct dml2_optimization_init_function_locals *l = params->locals;
l->vmin.init_params.instance = &params->dml->pmo_instance;
l->vmin.init_params.base_display_config = params->display_config;
return params->dml->pmo_instance.init_for_vmin(&l->vmin.init_params);
}
bool dml2_top_optimization_test_function_vmin(const struct optimization_test_function_params *params)
{
struct dml2_optimization_test_function_locals *l = params->locals;
l->test_vmin.pmo_test_vmin_params.instance = &params->dml->pmo_instance;
l->test_vmin.pmo_test_vmin_params.display_config = params->display_config;
l->test_vmin.pmo_test_vmin_params.vmin_limits = &params->dml->soc_bbox.vmin_limit;
return params->dml->pmo_instance.test_for_vmin(&l->test_vmin.pmo_test_vmin_params);
}
bool dml2_top_optimization_optimize_function_vmin(const struct optimization_optimize_function_params *params)
{
struct dml2_optimization_optimize_function_locals *l = params->locals;
if (params->last_candidate_supported == false)
return false;
l->optimize_vmin.pmo_optimize_vmin_params.instance = &params->dml->pmo_instance;
l->optimize_vmin.pmo_optimize_vmin_params.base_display_config = params->display_config;
l->optimize_vmin.pmo_optimize_vmin_params.optimized_display_config = params->optimized_display_config;
return params->dml->pmo_instance.optimize_for_vmin(&l->optimize_vmin.pmo_optimize_vmin_params);
}
bool dml2_top_optimization_perform_optimization_phase(struct dml2_optimization_phase_locals *l, const struct optimization_phase_params *params)
{
bool test_passed = false;
bool optimize_succeeded = true;
bool candidate_validation_passed = true;
struct optimization_init_function_params init_params = { 0 };
struct optimization_test_function_params test_params = { 0 };
struct optimization_optimize_function_params optimize_params = { 0 };
if (!params->dml ||
!params->optimize_function ||
!params->test_function ||
!params->display_config ||
!params->optimized_display_config)
return false;
copy_display_configuration_with_meta(&l->cur_candidate_display_cfg, params->display_config);
init_params.locals = &l->init_function_locals;
init_params.dml = params->dml;
init_params.display_config = &l->cur_candidate_display_cfg;
if (params->init_function && !params->init_function(&init_params))
return false;
test_params.locals = &l->test_function_locals;
test_params.dml = params->dml;
test_params.display_config = &l->cur_candidate_display_cfg;
test_passed = params->test_function(&test_params);
while (!test_passed && optimize_succeeded) {
memset(&optimize_params, 0, sizeof(struct optimization_optimize_function_params));
optimize_params.locals = &l->optimize_function_locals;
optimize_params.dml = params->dml;
optimize_params.display_config = &l->cur_candidate_display_cfg;
optimize_params.optimized_display_config = &l->next_candidate_display_cfg;
optimize_params.last_candidate_supported = candidate_validation_passed;
optimize_succeeded = params->optimize_function(&optimize_params);
if (optimize_succeeded) {
l->mode_support_params.instance = &params->dml->core_instance;
l->mode_support_params.display_cfg = &l->next_candidate_display_cfg;
l->mode_support_params.min_clk_table = &params->dml->min_clk_table;
if (l->next_candidate_display_cfg.stage3.performed)
l->mode_support_params.min_clk_index = l->next_candidate_display_cfg.stage3.min_clk_index_for_latency;
else
l->mode_support_params.min_clk_index = l->next_candidate_display_cfg.stage1.min_clk_index_for_latency;
candidate_validation_passed = params->dml->core_instance.mode_support(&l->mode_support_params);
l->next_candidate_display_cfg.mode_support_result = l->mode_support_params.mode_support_result;
}
if (optimize_succeeded && candidate_validation_passed) {
memset(&test_params, 0, sizeof(struct optimization_test_function_params));
test_params.locals = &l->test_function_locals;
test_params.dml = params->dml;
test_params.display_config = &l->next_candidate_display_cfg;
test_passed = params->test_function(&test_params);
copy_display_configuration_with_meta(&l->cur_candidate_display_cfg, &l->next_candidate_display_cfg);
// If optimization is not all or nothing, then store partial progress in output
if (!params->all_or_nothing)
copy_display_configuration_with_meta(params->optimized_display_config, &l->next_candidate_display_cfg);
}
}
if (test_passed)
copy_display_configuration_with_meta(params->optimized_display_config, &l->cur_candidate_display_cfg);
return test_passed;
}
bool dml2_top_optimization_perform_optimization_phase_1(struct dml2_optimization_phase_locals *l, const struct optimization_phase_params *params)
{
int highest_state, lowest_state, cur_state;
bool supported = false;
if (!params->dml ||
!params->optimize_function ||
!params->test_function ||
!params->display_config ||
!params->optimized_display_config)
return false;
copy_display_configuration_with_meta(&l->cur_candidate_display_cfg, params->display_config);
highest_state = l->cur_candidate_display_cfg.stage1.min_clk_index_for_latency;
lowest_state = 0;
cur_state = 0;
while (highest_state > lowest_state) {
cur_state = (highest_state + lowest_state) / 2;
l->mode_support_params.instance = &params->dml->core_instance;
l->mode_support_params.display_cfg = &l->cur_candidate_display_cfg;
l->mode_support_params.min_clk_table = &params->dml->min_clk_table;
l->mode_support_params.min_clk_index = cur_state;
supported = params->dml->core_instance.mode_support(&l->mode_support_params);
if (supported) {
l->cur_candidate_display_cfg.mode_support_result = l->mode_support_params.mode_support_result;
highest_state = cur_state;
} else {
lowest_state = cur_state + 1;
}
}
l->cur_candidate_display_cfg.stage1.min_clk_index_for_latency = lowest_state;
copy_display_configuration_with_meta(params->optimized_display_config, &l->cur_candidate_display_cfg);
return true;
}
bool dml2_top_optimization_init_function_uclk_pstate(const struct optimization_init_function_params *params)
{
struct dml2_optimization_init_function_locals *l = params->locals;
l->uclk_pstate.init_params.instance = &params->dml->pmo_instance;
l->uclk_pstate.init_params.base_display_config = params->display_config;
return params->dml->pmo_instance.init_for_uclk_pstate(&l->uclk_pstate.init_params);
}
bool dml2_top_optimization_test_function_uclk_pstate(const struct optimization_test_function_params *params)
{
struct dml2_optimization_test_function_locals *l = params->locals;
l->uclk_pstate.test_params.instance = &params->dml->pmo_instance;
l->uclk_pstate.test_params.base_display_config = params->display_config;
return params->dml->pmo_instance.test_for_uclk_pstate(&l->uclk_pstate.test_params);
}
bool dml2_top_optimization_optimize_function_uclk_pstate(const struct optimization_optimize_function_params *params)
{
struct dml2_optimization_optimize_function_locals *l = params->locals;
l->uclk_pstate.optimize_params.instance = &params->dml->pmo_instance;
l->uclk_pstate.optimize_params.base_display_config = params->display_config;
l->uclk_pstate.optimize_params.optimized_display_config = params->optimized_display_config;
l->uclk_pstate.optimize_params.last_candidate_failed = !params->last_candidate_supported;
return params->dml->pmo_instance.optimize_for_uclk_pstate(&l->uclk_pstate.optimize_params);
}
bool dml2_top_optimization_init_function_stutter(const struct optimization_init_function_params *params)
{
struct dml2_optimization_init_function_locals *l = params->locals;
l->uclk_pstate.init_params.instance = &params->dml->pmo_instance;
l->uclk_pstate.init_params.base_display_config = params->display_config;
return params->dml->pmo_instance.init_for_stutter(&l->stutter.stutter_params);
}
bool dml2_top_optimization_test_function_stutter(const struct optimization_test_function_params *params)
{
struct dml2_optimization_test_function_locals *l = params->locals;
l->stutter.stutter_params.instance = &params->dml->pmo_instance;
l->stutter.stutter_params.base_display_config = params->display_config;
return params->dml->pmo_instance.test_for_stutter(&l->stutter.stutter_params);
}
bool dml2_top_optimization_optimize_function_stutter(const struct optimization_optimize_function_params *params)
{
struct dml2_optimization_optimize_function_locals *l = params->locals;
l->stutter.stutter_params.instance = &params->dml->pmo_instance;
l->stutter.stutter_params.base_display_config = params->display_config;
l->stutter.stutter_params.optimized_display_config = params->optimized_display_config;
l->stutter.stutter_params.last_candidate_failed = !params->last_candidate_supported;
return params->dml->pmo_instance.optimize_for_stutter(&l->stutter.stutter_params);
}

View file

@ -0,0 +1,34 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#ifndef __DML2_TOP_OPTIMIZATION_H__
#define __DML2_TOP_OPTIMIZATION_H__
#include "dml2_external_lib_deps.h"
#include "dml2_internal_shared_types.h"
bool dml2_top_optimization_perform_optimization_phase(struct dml2_optimization_phase_locals *l, const struct optimization_phase_params *params);
bool dml2_top_optimization_perform_optimization_phase_1(struct dml2_optimization_phase_locals *l, const struct optimization_phase_params *params);
bool dml2_top_optimization_init_function_min_clk_for_latency(const struct optimization_init_function_params *params);
bool dml2_top_optimization_test_function_min_clk_for_latency(const struct optimization_test_function_params *params);
bool dml2_top_optimization_optimize_function_min_clk_for_latency(const struct optimization_optimize_function_params *params);
bool dml2_top_optimization_test_function_mcache(const struct optimization_test_function_params *params);
bool dml2_top_optimization_optimize_function_mcache(const struct optimization_optimize_function_params *params);
bool dml2_top_optimization_init_function_uclk_pstate(const struct optimization_init_function_params *params);
bool dml2_top_optimization_test_function_uclk_pstate(const struct optimization_test_function_params *params);
bool dml2_top_optimization_optimize_function_uclk_pstate(const struct optimization_optimize_function_params *params);
bool dml2_top_optimization_init_function_vmin(const struct optimization_init_function_params *params);
bool dml2_top_optimization_test_function_vmin(const struct optimization_test_function_params *params);
bool dml2_top_optimization_optimize_function_vmin(const struct optimization_optimize_function_params *params);
bool dml2_top_optimization_init_function_stutter(const struct optimization_init_function_params *params);
bool dml2_top_optimization_test_function_stutter(const struct optimization_test_function_params *params);
bool dml2_top_optimization_optimize_function_stutter(const struct optimization_optimize_function_params *params);
#endif

View file

@ -0,0 +1,329 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#include "dml2_internal_shared_types.h"
#include "dml_top.h"
#include "dml2_mcg_factory.h"
#include "dml2_core_factory.h"
#include "dml2_dpmm_factory.h"
#include "dml2_pmo_factory.h"
#include "dml_top_mcache.h"
#include "dml2_top_optimization.h"
#include "dml2_external_lib_deps.h"
unsigned int dml2_get_instance_size_bytes(void)
{
return sizeof(struct dml2_instance);
}
bool dml2_initialize_instance(struct dml2_initialize_instance_in_out *in_out)
{
struct dml2_instance *dml = (struct dml2_instance *)in_out->dml2_instance;
struct dml2_initialize_instance_locals *l = &dml->scratch.initialize_instance_locals;
struct dml2_core_initialize_in_out core_init_params = { 0 };
struct dml2_mcg_build_min_clock_table_params_in_out mcg_build_min_clk_params = { 0 };
struct dml2_pmo_initialize_in_out pmo_init_params = { 0 };
bool result = false;
memset(l, 0, sizeof(struct dml2_initialize_instance_locals));
memcpy(&dml->ip_caps, &in_out->ip_caps, sizeof(struct dml2_ip_capabilities));
memcpy(&dml->soc_bbox, &in_out->soc_bb, sizeof(struct dml2_soc_bb));
dml->project_id = in_out->options.project_id;
dml->pmo_options = in_out->options.pmo_options;
// Initialize All Components
result = dml2_mcg_create(in_out->options.project_id, &dml->mcg_instance);
if (result)
result = dml2_dpmm_create(in_out->options.project_id, &dml->dpmm_instance);
if (result)
result = dml2_core_create(in_out->options.project_id, &dml->core_instance);
if (result) {
mcg_build_min_clk_params.soc_bb = &in_out->soc_bb;
mcg_build_min_clk_params.min_clk_table = &dml->min_clk_table;
result = dml->mcg_instance.build_min_clock_table(&mcg_build_min_clk_params);
}
if (result) {
core_init_params.project_id = in_out->options.project_id;
core_init_params.instance = &dml->core_instance;
core_init_params.minimum_clock_table = &dml->min_clk_table;
core_init_params.explicit_ip_bb = in_out->overrides.explicit_ip_bb;
core_init_params.explicit_ip_bb_size = in_out->overrides.explicit_ip_bb_size;
core_init_params.ip_caps = &in_out->ip_caps;
core_init_params.soc_bb = &in_out->soc_bb;
result = dml->core_instance.initialize(&core_init_params);
if (core_init_params.explicit_ip_bb && core_init_params.explicit_ip_bb_size > 0) {
memcpy(&dml->ip_caps, &in_out->ip_caps, sizeof(struct dml2_ip_capabilities));
}
}
if (result)
result = dml2_pmo_create(in_out->options.project_id, &dml->pmo_instance);
if (result) {
pmo_init_params.instance = &dml->pmo_instance;
pmo_init_params.soc_bb = &dml->soc_bbox;
pmo_init_params.ip_caps = &dml->ip_caps;
pmo_init_params.min_clock_table_size = dml->min_clk_table.dram_bw_table.num_entries;
pmo_init_params.options = &dml->pmo_options;
dml->pmo_instance.initialize(&pmo_init_params);
}
return result;
}
static void setup_unoptimized_display_config_with_meta(const struct dml2_instance *dml, struct display_configuation_with_meta *out, const struct dml2_display_cfg *display_config)
{
memcpy(&out->display_config, display_config, sizeof(struct dml2_display_cfg));
out->stage1.min_clk_index_for_latency = dml->min_clk_table.dram_bw_table.num_entries - 1; //dml->min_clk_table.clean_me_up.soc_bb.num_states - 1;
}
static void setup_speculative_display_config_with_meta(const struct dml2_instance *dml, struct display_configuation_with_meta *out, const struct dml2_display_cfg *display_config)
{
memcpy(&out->display_config, display_config, sizeof(struct dml2_display_cfg));
out->stage1.min_clk_index_for_latency = 0;
}
bool dml2_check_mode_supported(struct dml2_check_mode_supported_in_out *in_out)
{
struct dml2_instance *dml = (struct dml2_instance *)in_out->dml2_instance;
struct dml2_check_mode_supported_locals *l = &dml->scratch.check_mode_supported_locals;
bool result = false;
bool mcache_success = false;
setup_unoptimized_display_config_with_meta(dml, &l->base_display_config_with_meta, in_out->display_config);
l->mode_support_params.instance = &dml->core_instance;
l->mode_support_params.display_cfg = &l->base_display_config_with_meta;
l->mode_support_params.min_clk_table = &dml->min_clk_table;
l->mode_support_params.min_clk_index = l->base_display_config_with_meta.stage1.min_clk_index_for_latency;
result = dml->core_instance.mode_support(&l->mode_support_params);
l->base_display_config_with_meta.mode_support_result = l->mode_support_params.mode_support_result;
if (result) {
struct optimization_phase_params mcache_phase = {
.dml = dml,
.display_config = &l->base_display_config_with_meta,
.test_function = dml2_top_optimization_test_function_mcache,
.optimize_function = dml2_top_optimization_optimize_function_mcache,
.optimized_display_config = &l->optimized_display_config_with_meta,
.all_or_nothing = false,
};
mcache_success = dml2_top_optimization_perform_optimization_phase(&l->optimization_phase_locals, &mcache_phase);
}
in_out->is_supported = mcache_success;
return result;
}
bool dml2_build_mode_programming(struct dml2_build_mode_programming_in_out *in_out)
{
struct dml2_instance *dml = (struct dml2_instance *)in_out->dml2_instance;
struct dml2_build_mode_programming_locals *l = &dml->scratch.build_mode_programming_locals;
bool result = false;
bool mcache_success = false;
bool uclk_pstate_success = false;
bool vmin_success = false;
bool stutter_success = false;
unsigned int i;
memset(l, 0, sizeof(struct dml2_build_mode_programming_locals));
memset(in_out->programming, 0, sizeof(struct dml2_display_cfg_programming));
memcpy(&in_out->programming->display_config, in_out->display_config, sizeof(struct dml2_display_cfg));
setup_speculative_display_config_with_meta(dml, &l->base_display_config_with_meta, in_out->display_config);
l->mode_support_params.instance = &dml->core_instance;
l->mode_support_params.display_cfg = &l->base_display_config_with_meta;
l->mode_support_params.min_clk_table = &dml->min_clk_table;
l->mode_support_params.min_clk_index = l->base_display_config_with_meta.stage1.min_clk_index_for_latency;
result = dml->core_instance.mode_support(&l->mode_support_params);
l->base_display_config_with_meta.mode_support_result = l->mode_support_params.mode_support_result;
if (!result) {
setup_unoptimized_display_config_with_meta(dml, &l->base_display_config_with_meta, in_out->display_config);
l->mode_support_params.instance = &dml->core_instance;
l->mode_support_params.display_cfg = &l->base_display_config_with_meta;
l->mode_support_params.min_clk_table = &dml->min_clk_table;
l->mode_support_params.min_clk_index = l->base_display_config_with_meta.stage1.min_clk_index_for_latency;
result = dml->core_instance.mode_support(&l->mode_support_params);
l->base_display_config_with_meta.mode_support_result = l->mode_support_params.mode_support_result;
if (!result) {
l->informative_params.instance = &dml->core_instance;
l->informative_params.programming = in_out->programming;
l->informative_params.mode_is_supported = false;
dml->core_instance.populate_informative(&l->informative_params);
return false;
}
/*
* Phase 1: Determine minimum clocks to satisfy latency requirements for this mode
*/
memset(&l->min_clock_for_latency_phase, 0, sizeof(struct optimization_phase_params));
l->min_clock_for_latency_phase.dml = dml;
l->min_clock_for_latency_phase.display_config = &l->base_display_config_with_meta;
l->min_clock_for_latency_phase.init_function = dml2_top_optimization_init_function_min_clk_for_latency;
l->min_clock_for_latency_phase.test_function = dml2_top_optimization_test_function_min_clk_for_latency;
l->min_clock_for_latency_phase.optimize_function = dml2_top_optimization_optimize_function_min_clk_for_latency;
l->min_clock_for_latency_phase.optimized_display_config = &l->optimized_display_config_with_meta;
l->min_clock_for_latency_phase.all_or_nothing = false;
dml2_top_optimization_perform_optimization_phase_1(&l->optimization_phase_locals, &l->min_clock_for_latency_phase);
memcpy(&l->base_display_config_with_meta, &l->optimized_display_config_with_meta, sizeof(struct display_configuation_with_meta));
}
/*
* Phase 2: Satisfy DCC mcache requirements
*/
memset(&l->mcache_phase, 0, sizeof(struct optimization_phase_params));
l->mcache_phase.dml = dml;
l->mcache_phase.display_config = &l->base_display_config_with_meta;
l->mcache_phase.test_function = dml2_top_optimization_test_function_mcache;
l->mcache_phase.optimize_function = dml2_top_optimization_optimize_function_mcache;
l->mcache_phase.optimized_display_config = &l->optimized_display_config_with_meta;
l->mcache_phase.all_or_nothing = true;
mcache_success = dml2_top_optimization_perform_optimization_phase(&l->optimization_phase_locals, &l->mcache_phase);
if (!mcache_success) {
l->informative_params.instance = &dml->core_instance;
l->informative_params.programming = in_out->programming;
l->informative_params.mode_is_supported = false;
dml->core_instance.populate_informative(&l->informative_params);
in_out->programming->informative.failed_mcache_validation = true;
return false;
}
memcpy(&l->base_display_config_with_meta, &l->optimized_display_config_with_meta, sizeof(struct display_configuation_with_meta));
/*
* Phase 3: Optimize for Pstate
*/
memset(&l->uclk_pstate_phase, 0, sizeof(struct optimization_phase_params));
l->uclk_pstate_phase.dml = dml;
l->uclk_pstate_phase.display_config = &l->base_display_config_with_meta;
l->uclk_pstate_phase.init_function = dml2_top_optimization_init_function_uclk_pstate;
l->uclk_pstate_phase.test_function = dml2_top_optimization_test_function_uclk_pstate;
l->uclk_pstate_phase.optimize_function = dml2_top_optimization_optimize_function_uclk_pstate;
l->uclk_pstate_phase.optimized_display_config = &l->optimized_display_config_with_meta;
l->uclk_pstate_phase.all_or_nothing = true;
uclk_pstate_success = dml2_top_optimization_perform_optimization_phase(&l->optimization_phase_locals, &l->uclk_pstate_phase);
if (uclk_pstate_success) {
memcpy(&l->base_display_config_with_meta, &l->optimized_display_config_with_meta, sizeof(struct display_configuation_with_meta));
l->base_display_config_with_meta.stage3.success = true;
}
/*
* Phase 4: Optimize for Vmin
*/
memset(&l->vmin_phase, 0, sizeof(struct optimization_phase_params));
l->vmin_phase.dml = dml;
l->vmin_phase.display_config = &l->base_display_config_with_meta;
l->vmin_phase.init_function = dml2_top_optimization_init_function_vmin;
l->vmin_phase.test_function = dml2_top_optimization_test_function_vmin;
l->vmin_phase.optimize_function = dml2_top_optimization_optimize_function_vmin;
l->vmin_phase.optimized_display_config = &l->optimized_display_config_with_meta;
l->vmin_phase.all_or_nothing = false;
vmin_success = dml2_top_optimization_perform_optimization_phase(&l->optimization_phase_locals, &l->vmin_phase);
if (vmin_success) {
memcpy(&l->base_display_config_with_meta, &l->optimized_display_config_with_meta, sizeof(struct display_configuation_with_meta));
l->base_display_config_with_meta.stage4.success = true;
}
/*
* Phase 5: Optimize for Stutter
*/
memset(&l->vmin_phase, 0, sizeof(struct optimization_phase_params));
l->stutter_phase.dml = dml;
l->stutter_phase.display_config = &l->base_display_config_with_meta;
l->stutter_phase.init_function = dml2_top_optimization_init_function_stutter;
l->stutter_phase.test_function = dml2_top_optimization_test_function_stutter;
l->stutter_phase.optimize_function = dml2_top_optimization_optimize_function_stutter;
l->stutter_phase.optimized_display_config = &l->optimized_display_config_with_meta;
l->stutter_phase.all_or_nothing = true;
stutter_success = dml2_top_optimization_perform_optimization_phase(&l->optimization_phase_locals, &l->stutter_phase);
if (stutter_success) {
memcpy(&l->base_display_config_with_meta, &l->optimized_display_config_with_meta, sizeof(struct display_configuation_with_meta));
l->base_display_config_with_meta.stage4.success = true;
}
/*
* Populate mcache programming
*/
for (i = 0; i < in_out->display_config->num_planes; i++) {
in_out->programming->plane_programming[i].mcache_allocation = l->base_display_config_with_meta.stage2.mcache_allocations[i];
}
/*
* Call DPMM to map all requirements to minimum clock state
*/
if (result) {
l->dppm_map_mode_params.min_clk_table = &dml->min_clk_table;
l->dppm_map_mode_params.display_cfg = &l->base_display_config_with_meta;
l->dppm_map_mode_params.programming = in_out->programming;
l->dppm_map_mode_params.soc_bb = &dml->soc_bbox;
l->dppm_map_mode_params.ip = &dml->core_instance.clean_me_up.mode_lib.ip;
result = dml->dpmm_instance.map_mode_to_soc_dpm(&l->dppm_map_mode_params);
if (!result)
in_out->programming->informative.failed_dpmm = true;
}
if (result) {
l->mode_programming_params.instance = &dml->core_instance;
l->mode_programming_params.display_cfg = &l->base_display_config_with_meta;
l->mode_programming_params.cfg_support_info = &l->base_display_config_with_meta.mode_support_result.cfg_support_info;
l->mode_programming_params.programming = in_out->programming;
result = dml->core_instance.mode_programming(&l->mode_programming_params);
if (!result)
in_out->programming->informative.failed_mode_programming = true;
}
if (result) {
l->dppm_map_watermarks_params.core = &dml->core_instance;
l->dppm_map_watermarks_params.display_cfg = &l->base_display_config_with_meta;
l->dppm_map_watermarks_params.programming = in_out->programming;
result = dml->dpmm_instance.map_watermarks(&l->dppm_map_watermarks_params);
}
l->informative_params.instance = &dml->core_instance;
l->informative_params.programming = in_out->programming;
l->informative_params.mode_is_supported = result;
dml->core_instance.populate_informative(&l->informative_params);
return result;
}
bool dml2_build_mcache_programming(struct dml2_build_mcache_programming_in_out *in_out)
{
return dml2_top_mcache_build_mcache_programming(in_out);
}

View file

@ -0,0 +1,545 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#include "dml2_debug.h"
#include "dml_top_mcache.h"
#include "lib_float_math.h"
#include "dml2_internal_shared_types.h"
/*
* Takes an input set of mcache boundaries and finds the appropriate setting of cache programming.
* Returns true if a valid set of programming can be made, and false otherwise. "Valid" means
* that the horizontal viewport does not span more than 2 cache slices.
*
* It optionally also can apply a constant shift to all the cache boundaries.
*/
static const uint32_t MCACHE_ID_UNASSIGNED = 0xF;
static const uint32_t SPLIT_LOCATION_UNDEFINED = 0xFFFF;
static bool calculate_first_second_splitting(const int *mcache_boundaries, int num_boundaries, int shift,
int pipe_h_vp_start, int pipe_h_vp_end, int *first_offset, int *second_offset)
{
const int MAX_VP = 0xFFFFFF;
int left_cache_id;
int right_cache_id;
int range_start;
int range_end;
bool success = false;
if (num_boundaries <= 1) {
if (first_offset && second_offset) {
*first_offset = 0;
*second_offset = -1;
}
success = true;
return success;
} else {
range_start = 0;
for (left_cache_id = 0; left_cache_id < num_boundaries; left_cache_id++) {
range_end = mcache_boundaries[left_cache_id] - shift - 1;
if (range_start <= pipe_h_vp_start && pipe_h_vp_start <= range_end)
break;
range_start = range_end + 1;
}
range_end = MAX_VP;
for (right_cache_id = num_boundaries - 1; right_cache_id >= -1; right_cache_id--) {
if (right_cache_id >= 0)
range_start = mcache_boundaries[right_cache_id] - shift;
else
range_start = 0;
if (range_start <= pipe_h_vp_end && pipe_h_vp_end <= range_end) {
break;
}
range_end = range_start - 1;
}
right_cache_id = (right_cache_id + 1) % num_boundaries;
if (right_cache_id == left_cache_id) {
if (first_offset && second_offset) {
*first_offset = left_cache_id;
*second_offset = -1;
}
success = true;
} else if (right_cache_id == (left_cache_id + 1) % num_boundaries) {
if (first_offset && second_offset) {
*first_offset = left_cache_id;
*second_offset = right_cache_id;
}
success = true;
}
}
return success;
}
/*
* For a given set of pipe start/end x positions, checks to see it can support the input mcache splitting.
* It also attempts to "optimize" by finding a shift if the default 0 shift does not work.
*/
static bool find_shift_for_valid_cache_id_assignment(int *mcache_boundaries, unsigned int num_boundaries,
int *pipe_vp_startx, int *pipe_vp_endx, unsigned int pipe_count, int shift_granularity, int *shift)
{
int max_shift = 0xFFFF;
unsigned int pipe_index;
unsigned int i, slice_width;
bool success = false;
for (i = 0; i < num_boundaries; i++) {
if (i == 0)
slice_width = mcache_boundaries[i];
else
slice_width = mcache_boundaries[i] - mcache_boundaries[i - 1];
if (max_shift > (int)slice_width) {
max_shift = slice_width;
}
}
for (*shift = 0; *shift <= max_shift; *shift += shift_granularity) {
success = true;
for (pipe_index = 0; pipe_index < pipe_count; pipe_index++) {
if (!calculate_first_second_splitting(mcache_boundaries, num_boundaries, *shift,
pipe_vp_startx[pipe_index], pipe_vp_endx[pipe_index], 0, 0)) {
success = false;
break;
}
}
if (success)
break;
}
return success;
}
/*
* Counts the number of elements inside input array within the given span length.
* Formally, what is the size of the largest subset of the array where the largest and smallest element
* differ no more than the span.
*/
static unsigned int count_elements_in_span(int *array, unsigned int array_size, unsigned int span)
{
unsigned int i;
unsigned int span_start_value;
unsigned int span_start_index;
unsigned int greatest_element_count;
if (array_size == 0)
return 1;
if (span == 0)
return array_size > 0 ? 1 : 0;
span_start_value = 0;
span_start_index = 0;
greatest_element_count = 0;
while (span_start_index < array_size) {
for (i = span_start_index; i < array_size; i++) {
if (array[i] - span_start_value > span) {
if (i - span_start_index + 1 > greatest_element_count) {
greatest_element_count = i - span_start_index + 1;
}
break;
}
}
span_start_index++;
if (span_start_index < array_size) {
span_start_value = array[span_start_index - 1] + 1;
}
}
return greatest_element_count;
}
static bool calculate_h_split_for_scaling_transform(int full_vp_width, int h_active, int num_pipes,
enum dml2_scaling_transform scaling_transform, int *pipe_vp_x_start, int *pipe_vp_x_end)
{
int i, slice_width;
const char MAX_SCL_VP_OVERLAP = 3;
bool success = false;
switch (scaling_transform) {
case dml2_scaling_transform_centered:
case dml2_scaling_transform_aspect_ratio:
case dml2_scaling_transform_fullscreen:
slice_width = full_vp_width / num_pipes;
for (i = 0; i < num_pipes; i++) {
pipe_vp_x_start[i] = i * slice_width;
pipe_vp_x_end[i] = (i + 1) * slice_width - 1;
if (pipe_vp_x_start[i] < MAX_SCL_VP_OVERLAP)
pipe_vp_x_start[i] = 0;
else
pipe_vp_x_start[i] -= MAX_SCL_VP_OVERLAP;
if (pipe_vp_x_end[i] > full_vp_width - MAX_SCL_VP_OVERLAP - 1)
pipe_vp_x_end[i] = full_vp_width - 1;
else
pipe_vp_x_end[i] += MAX_SCL_VP_OVERLAP;
}
break;
case dml2_scaling_transform_explicit:
default:
success = false;
break;
}
return success;
}
bool dml2_top_mcache_validate_admissability(struct top_mcache_validate_admissability_in_out *params)
{
struct dml2_instance *dml = (struct dml2_instance *)params->dml2_instance;
struct dml2_top_mcache_validate_admissability_locals *l = &dml->scratch.mcache_validate_admissability_locals;
const int MAX_PIXEL_OVERLAP = 6;
int max_per_pipe_vp_p0 = 0;
int max_per_pipe_vp_p1 = 0;
int temp, p0shift, p1shift;
unsigned int plane_index = 0;
unsigned int i;
char odm_combine_factor = 1;
char mpc_combine_factor = 1;
char num_dpps;
unsigned int num_boundaries;
enum dml2_scaling_transform scaling_transform;
const struct dml2_plane_parameters *plane;
const struct dml2_stream_parameters *stream;
bool p0pass = false;
bool p1pass = false;
bool all_pass = true;
for (plane_index = 0; plane_index < params->display_cfg->num_planes; plane_index++) {
if (!params->display_cfg->plane_descriptors[plane_index].surface.dcc.enable)
continue;
plane = &params->display_cfg->plane_descriptors[plane_index];
stream = &params->display_cfg->stream_descriptors[plane->stream_index];
odm_combine_factor = (char)params->cfg_support_info->stream_support_info[plane->stream_index].odms_used;
if (odm_combine_factor == 1)
mpc_combine_factor = (char)params->cfg_support_info->plane_support_info[plane_index].dpps_used;
else
mpc_combine_factor = 1;
if (odm_combine_factor > 1) {
max_per_pipe_vp_p0 = plane->surface.plane0.width;
temp = (unsigned int)math_ceil(plane->composition.scaler_info.plane0.h_ratio * stream->timing.h_active / odm_combine_factor);
if (temp < max_per_pipe_vp_p0)
max_per_pipe_vp_p0 = temp;
max_per_pipe_vp_p1 = plane->surface.plane1.width;
temp = (unsigned int)math_ceil(plane->composition.scaler_info.plane1.h_ratio * stream->timing.h_active / odm_combine_factor);
if (temp < max_per_pipe_vp_p1)
max_per_pipe_vp_p1 = temp;
} else {
max_per_pipe_vp_p0 = plane->surface.plane0.width / mpc_combine_factor;
max_per_pipe_vp_p1 = plane->surface.plane1.width / mpc_combine_factor;
}
max_per_pipe_vp_p0 += 2 * MAX_PIXEL_OVERLAP;
max_per_pipe_vp_p1 += MAX_PIXEL_OVERLAP;
p0shift = 0;
p1shift = 0;
// The last element in the unshifted boundary array will always be the first pixel outside the
// plane, which means theres no mcache associated with it, so -1
num_boundaries = params->mcache_allocations[plane_index].num_mcaches_plane0 == 0 ? 0 : params->mcache_allocations[plane_index].num_mcaches_plane0 - 1;
if (count_elements_in_span(params->mcache_allocations[plane_index].mcache_x_offsets_plane0,
num_boundaries, max_per_pipe_vp_p0) <= 1) {
p0pass = true;
}
num_boundaries = params->mcache_allocations[plane_index].num_mcaches_plane1 == 0 ? 0 : params->mcache_allocations[plane_index].num_mcaches_plane1 - 1;
if (count_elements_in_span(params->mcache_allocations[plane_index].mcache_x_offsets_plane1,
num_boundaries, max_per_pipe_vp_p1) <= 1) {
p1pass = true;
}
if (!p0pass || !p1pass) {
if (odm_combine_factor > 1) {
num_dpps = odm_combine_factor;
scaling_transform = plane->composition.scaling_transform;
} else {
num_dpps = mpc_combine_factor;
scaling_transform = dml2_scaling_transform_fullscreen;
}
if (!p0pass) {
if (plane->composition.viewport.stationary) {
calculate_h_split_for_scaling_transform(plane->surface.plane0.width,
stream->timing.h_active, num_dpps, scaling_transform,
&l->plane0.pipe_vp_startx[plane_index], &l->plane0.pipe_vp_endx[plane_index]);
p0pass = find_shift_for_valid_cache_id_assignment(params->mcache_allocations[plane_index].mcache_x_offsets_plane0,
params->mcache_allocations[plane_index].num_mcaches_plane0,
&l->plane0.pipe_vp_startx[plane_index], &l->plane0.pipe_vp_endx[plane_index], num_dpps,
params->mcache_allocations[plane_index].shift_granularity.p0, &p0shift);
}
}
if (!p1pass) {
if (plane->composition.viewport.stationary) {
calculate_h_split_for_scaling_transform(plane->surface.plane1.width,
stream->timing.h_active, num_dpps, scaling_transform,
&l->plane0.pipe_vp_startx[plane_index], &l->plane0.pipe_vp_endx[plane_index]);
p1pass = find_shift_for_valid_cache_id_assignment(params->mcache_allocations[plane_index].mcache_x_offsets_plane1,
params->mcache_allocations[plane_index].num_mcaches_plane1,
&l->plane1.pipe_vp_startx[plane_index], &l->plane1.pipe_vp_endx[plane_index], num_dpps,
params->mcache_allocations[plane_index].shift_granularity.p1, &p1shift);
}
}
}
if (p0pass && p1pass) {
for (i = 0; i < params->mcache_allocations[plane_index].num_mcaches_plane0; i++) {
params->mcache_allocations[plane_index].mcache_x_offsets_plane0[i] -= p0shift;
}
for (i = 0; i < params->mcache_allocations[plane_index].num_mcaches_plane1; i++) {
params->mcache_allocations[plane_index].mcache_x_offsets_plane1[i] -= p1shift;
}
}
params->per_plane_status[plane_index] = p0pass && p1pass;
all_pass &= p0pass && p1pass;
}
return all_pass;
}
bool dml2_top_mcache_build_mcache_programming(struct dml2_build_mcache_programming_in_out *params)
{
bool success = true;
int config_index, pipe_index;
int first_offset, second_offset;
int free_per_plane_reg_index = 0;
memset(params->per_plane_pipe_mcache_regs, 0, DML2_MAX_PLANES * DML2_MAX_DCN_PIPES * sizeof(struct dml2_hubp_pipe_mcache_regs *));
for (config_index = 0; config_index < params->num_configurations; config_index++) {
for (pipe_index = 0; pipe_index < params->mcache_configurations[config_index].num_pipes; pipe_index++) {
// Allocate storage for the mcache regs
params->per_plane_pipe_mcache_regs[config_index][pipe_index] = &params->mcache_regs_set[free_per_plane_reg_index++];
// First initialize all entries to special valid MCache ID and special valid split coordinate
params->per_plane_pipe_mcache_regs[config_index][pipe_index]->main.p0.mcache_id_first = MCACHE_ID_UNASSIGNED;
params->per_plane_pipe_mcache_regs[config_index][pipe_index]->main.p0.mcache_id_second = MCACHE_ID_UNASSIGNED;
params->per_plane_pipe_mcache_regs[config_index][pipe_index]->main.p0.split_location = SPLIT_LOCATION_UNDEFINED;
params->per_plane_pipe_mcache_regs[config_index][pipe_index]->mall.p0.mcache_id_first = MCACHE_ID_UNASSIGNED;
params->per_plane_pipe_mcache_regs[config_index][pipe_index]->mall.p0.mcache_id_second = MCACHE_ID_UNASSIGNED;
params->per_plane_pipe_mcache_regs[config_index][pipe_index]->mall.p0.split_location = SPLIT_LOCATION_UNDEFINED;
params->per_plane_pipe_mcache_regs[config_index][pipe_index]->main.p1.mcache_id_first = MCACHE_ID_UNASSIGNED;
params->per_plane_pipe_mcache_regs[config_index][pipe_index]->main.p1.mcache_id_second = MCACHE_ID_UNASSIGNED;
params->per_plane_pipe_mcache_regs[config_index][pipe_index]->main.p1.split_location = SPLIT_LOCATION_UNDEFINED;
params->per_plane_pipe_mcache_regs[config_index][pipe_index]->mall.p1.mcache_id_first = MCACHE_ID_UNASSIGNED;
params->per_plane_pipe_mcache_regs[config_index][pipe_index]->mall.p1.mcache_id_second = MCACHE_ID_UNASSIGNED;
params->per_plane_pipe_mcache_regs[config_index][pipe_index]->mall.p1.split_location = SPLIT_LOCATION_UNDEFINED;
if (params->mcache_configurations[config_index].plane_descriptor->surface.dcc.enable) {
// P0 always enabled
if (!calculate_first_second_splitting(params->mcache_configurations[config_index].mcache_allocation->mcache_x_offsets_plane0,
params->mcache_configurations[config_index].mcache_allocation->num_mcaches_plane0,
0,
params->mcache_configurations[config_index].pipe_configurations[pipe_index].plane0.viewport_x_start,
params->mcache_configurations[config_index].pipe_configurations[pipe_index].plane0.viewport_x_start +
params->mcache_configurations[config_index].pipe_configurations[pipe_index].plane0.viewport_width - 1,
&first_offset, &second_offset)) {
success = false;
break;
}
params->per_plane_pipe_mcache_regs[config_index][pipe_index]->main.p0.mcache_id_first =
params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_plane0[first_offset];
params->per_plane_pipe_mcache_regs[config_index][pipe_index]->mall.p0.mcache_id_first =
params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_mall_plane0[first_offset];
if (second_offset >= 0) {
params->per_plane_pipe_mcache_regs[config_index][pipe_index]->main.p0.mcache_id_second =
params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_plane0[second_offset];
params->per_plane_pipe_mcache_regs[config_index][pipe_index]->main.p0.split_location =
params->mcache_configurations[config_index].mcache_allocation->mcache_x_offsets_plane0[first_offset] - 1;
params->per_plane_pipe_mcache_regs[config_index][pipe_index]->mall.p0.mcache_id_second =
params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_mall_plane0[second_offset];
params->per_plane_pipe_mcache_regs[config_index][pipe_index]->mall.p0.split_location =
params->mcache_configurations[config_index].mcache_allocation->mcache_x_offsets_plane0[first_offset] - 1;
}
// Populate P1 if enabled
if (params->mcache_configurations[config_index].pipe_configurations[pipe_index].plane1_enabled) {
if (!calculate_first_second_splitting(params->mcache_configurations[config_index].mcache_allocation->mcache_x_offsets_plane1,
params->mcache_configurations[config_index].mcache_allocation->num_mcaches_plane1,
0,
params->mcache_configurations[config_index].pipe_configurations[pipe_index].plane1.viewport_x_start,
params->mcache_configurations[config_index].pipe_configurations[pipe_index].plane1.viewport_x_start +
params->mcache_configurations[config_index].pipe_configurations[pipe_index].plane1.viewport_width - 1,
&first_offset, &second_offset)) {
success = false;
break;
}
params->per_plane_pipe_mcache_regs[config_index][pipe_index]->main.p1.mcache_id_first =
params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_plane1[first_offset];
params->per_plane_pipe_mcache_regs[config_index][pipe_index]->mall.p1.mcache_id_first =
params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_mall_plane1[first_offset];
if (second_offset >= 0) {
params->per_plane_pipe_mcache_regs[config_index][pipe_index]->main.p1.mcache_id_second =
params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_plane1[second_offset];
params->per_plane_pipe_mcache_regs[config_index][pipe_index]->main.p1.split_location =
params->mcache_configurations[config_index].mcache_allocation->mcache_x_offsets_plane1[first_offset] - 1;
params->per_plane_pipe_mcache_regs[config_index][pipe_index]->mall.p1.mcache_id_second =
params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_mall_plane1[second_offset];
params->per_plane_pipe_mcache_regs[config_index][pipe_index]->mall.p1.split_location =
params->mcache_configurations[config_index].mcache_allocation->mcache_x_offsets_plane1[first_offset] - 1;
}
}
}
}
}
return success;
}
void dml2_top_mcache_assign_global_mcache_ids(struct top_mcache_assign_global_mcache_ids_in_out *params)
{
int i;
unsigned int j;
int next_unused_cache_id = 0;
for (i = 0; i < params->num_allocations; i++) {
if (!params->allocations[i].valid)
continue;
for (j = 0; j < params->allocations[i].num_mcaches_plane0; j++) {
params->allocations[i].global_mcache_ids_plane0[j] = next_unused_cache_id++;
}
for (j = 0; j < params->allocations[i].num_mcaches_plane1; j++) {
params->allocations[i].global_mcache_ids_plane1[j] = next_unused_cache_id++;
}
// The "psuedo-last" slice is always wrapped around
params->allocations[i].global_mcache_ids_plane0[params->allocations[i].num_mcaches_plane0] =
params->allocations[i].global_mcache_ids_plane0[0];
params->allocations[i].global_mcache_ids_plane1[params->allocations[i].num_mcaches_plane1] =
params->allocations[i].global_mcache_ids_plane1[0];
// If we need dedicated caches for mall requesting, then we assign them here.
if (params->allocations[i].requires_dedicated_mall_mcache) {
for (j = 0; j < params->allocations[i].num_mcaches_plane0; j++) {
params->allocations[i].global_mcache_ids_mall_plane0[j] = next_unused_cache_id++;
}
for (j = 0; j < params->allocations[i].num_mcaches_plane1; j++) {
params->allocations[i].global_mcache_ids_mall_plane1[j] = next_unused_cache_id++;
}
// The "psuedo-last" slice is always wrapped around
params->allocations[i].global_mcache_ids_mall_plane0[params->allocations[i].num_mcaches_plane0] =
params->allocations[i].global_mcache_ids_mall_plane0[0];
params->allocations[i].global_mcache_ids_mall_plane1[params->allocations[i].num_mcaches_plane1] =
params->allocations[i].global_mcache_ids_mall_plane1[0];
}
// If P0 and P1 are sharing caches, then it means the largest mcache IDs for p0 and p1 can be the same
// since mcache IDs are always ascending, then it means the largest mcacheID of p1 should be the
// largest mcacheID of P0
if (params->allocations[i].num_mcaches_plane0 > 0 && params->allocations[i].num_mcaches_plane1 > 0 &&
params->allocations[i].last_slice_sharing.plane0_plane1) {
params->allocations[i].global_mcache_ids_plane1[params->allocations[i].num_mcaches_plane1 - 1] =
params->allocations[i].global_mcache_ids_plane0[params->allocations[i].num_mcaches_plane0 - 1];
}
// If we need dedicated caches handle last slice sharing
if (params->allocations[i].requires_dedicated_mall_mcache) {
if (params->allocations[i].num_mcaches_plane0 > 0 && params->allocations[i].num_mcaches_plane1 > 0 &&
params->allocations[i].last_slice_sharing.plane0_plane1) {
params->allocations[i].global_mcache_ids_mall_plane1[params->allocations[i].num_mcaches_plane1 - 1] =
params->allocations[i].global_mcache_ids_mall_plane0[params->allocations[i].num_mcaches_plane0 - 1];
}
// If mall_comb_mcache_l is set then it means that largest mcache ID for MALL p0 can be same as regular read p0
if (params->allocations[i].num_mcaches_plane0 > 0 && params->allocations[i].last_slice_sharing.mall_comb_mcache_p0) {
params->allocations[i].global_mcache_ids_mall_plane0[params->allocations[i].num_mcaches_plane0 - 1] =
params->allocations[i].global_mcache_ids_plane0[params->allocations[i].num_mcaches_plane0 - 1];
}
// If mall_comb_mcache_c is set then it means that largest mcache ID for MALL p1 can be same as regular
// read p1 (which can be same as regular read p0 if plane0_plane1 is also set)
if (params->allocations[i].num_mcaches_plane1 > 0 && params->allocations[i].last_slice_sharing.mall_comb_mcache_p1) {
params->allocations[i].global_mcache_ids_mall_plane1[params->allocations[i].num_mcaches_plane1 - 1] =
params->allocations[i].global_mcache_ids_plane1[params->allocations[i].num_mcaches_plane1 - 1];
}
}
// If you don't need dedicated mall mcaches, the mall mcache assignments are identical to the normal requesting
if (!params->allocations[i].requires_dedicated_mall_mcache) {
memcpy(params->allocations[i].global_mcache_ids_mall_plane0, params->allocations[i].global_mcache_ids_plane0,
sizeof(params->allocations[i].global_mcache_ids_mall_plane0));
memcpy(params->allocations[i].global_mcache_ids_mall_plane1, params->allocations[i].global_mcache_ids_plane1,
sizeof(params->allocations[i].global_mcache_ids_mall_plane1));
}
}
}
bool dml2_top_mcache_calc_mcache_count_and_offsets(struct top_mcache_calc_mcache_count_and_offsets_in_out *params)
{
struct dml2_instance *dml = (struct dml2_instance *)params->dml2_instance;
struct dml2_top_mcache_verify_mcache_size_locals *l = &dml->scratch.mcache_verify_mcache_size_locals;
unsigned int total_mcaches_required;
unsigned int i;
bool result = false;
if (dml->soc_bbox.num_dcc_mcaches == 0) {
return true;
}
total_mcaches_required = 0;
l->calc_mcache_params.instance = &dml->core_instance;
for (i = 0; i < params->display_config->num_planes; i++) {
if (!params->display_config->plane_descriptors[i].surface.dcc.enable) {
memset(&params->mcache_allocations[i], 0, sizeof(struct dml2_mcache_surface_allocation));
continue;
}
l->calc_mcache_params.plane_descriptor = &params->display_config->plane_descriptors[i];
l->calc_mcache_params.mcache_allocation = &params->mcache_allocations[i];
l->calc_mcache_params.plane_index = i;
if (!dml->core_instance.calculate_mcache_allocation(&l->calc_mcache_params)) {
result = false;
break;
}
if (params->mcache_allocations[i].valid) {
total_mcaches_required += params->mcache_allocations[i].num_mcaches_plane0 + params->mcache_allocations[i].num_mcaches_plane1;
if (params->mcache_allocations[i].last_slice_sharing.plane0_plane1)
total_mcaches_required--;
}
}
dml2_printf("DML_CORE_DCN3::%s: plane_%d, total_mcaches_required=%d\n", __func__, i, total_mcaches_required);
if (total_mcaches_required > dml->soc_bbox.num_dcc_mcaches) {
result = false;
} else {
result = true;
}
return result;
}

View file

@ -0,0 +1,24 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#ifndef __DML_TOP_MCACHE_H__
#define __DML_TOP_MCACHE_H__
#include "dml2_external_lib_deps.h"
#include "dml_top_display_cfg_types.h"
#include "dml_top_types.h"
#include "dml2_internal_shared_types.h"
bool dml2_top_mcache_calc_mcache_count_and_offsets(struct top_mcache_calc_mcache_count_and_offsets_in_out *params);
void dml2_top_mcache_assign_global_mcache_ids(struct top_mcache_assign_global_mcache_ids_in_out *params);
bool dml2_top_mcache_validate_admissability(struct top_mcache_validate_admissability_in_out *params);
bool dml2_top_mcache_build_mcache_programming(struct dml2_build_mcache_programming_in_out *params);
bool dml2_top_mcache_unit_test(void);
#endif

View file

@ -0,0 +1,32 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#include "dml2_debug.h"
int dml2_printf(const char *format, ...)
{
#ifdef _DEBUG
#ifdef _DEBUG_PRINTS
int result;
va_list args;
va_start(args, format);
result = vprintf(format, args);
va_end(args);
return result;
#else
return 0;
#endif
#else
return 0;
#endif
}
void dml2_assert(int condition)
{
//ASSERT(condition);
}

View file

@ -0,0 +1,18 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#ifndef __DML2_DEBUG_H__
#define __DML2_DEBUG_H__
#ifdef _DEBUG
#define DML2_ASSERT(condition) dml2_assert(condition)
#else
#define DML2_ASSERT(condition)
#endif
int dml2_printf(const char *format, ...);
void dml2_assert(int condition);
#endif

View file

@ -0,0 +1,981 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#ifndef __DML2_INTERNAL_SHARED_TYPES_H__
#define __DML2_INTERNAL_SHARED_TYPES_H__
#include "dml2_external_lib_deps.h"
#include "dml_top_types.h"
#include "dml2_core_shared_types.h"
/*
* DML2 MCG Types and Interfaces
*/
#define DML_MCG_MAX_CLK_TABLE_SIZE 20
struct dram_bw_to_min_clk_table_entry {
unsigned long long pre_derate_dram_bw_kbps;
unsigned long min_fclk_khz;
unsigned long min_dcfclk_khz;
};
struct dml2_mcg_dram_bw_to_min_clk_table {
struct dram_bw_to_min_clk_table_entry entries[DML_MCG_MAX_CLK_TABLE_SIZE];
unsigned int num_entries;
};
struct dml2_mcg_min_clock_table {
struct {
unsigned int dispclk;
unsigned int dppclk;
unsigned int dscclk;
unsigned int dtbclk;
unsigned int phyclk;
unsigned int fclk;
unsigned int dcfclk;
} max_clocks_khz;
struct {
unsigned int dprefclk;
unsigned int xtalclk;
unsigned int pcierefclk;
unsigned int dchubrefclk;
unsigned int amclk;
} fixed_clocks_khz;
struct dml2_mcg_dram_bw_to_min_clk_table dram_bw_table;
};
struct dml2_mcg_build_min_clock_table_params_in_out {
/*
* Input
*/
struct dml2_soc_bb *soc_bb;
struct {
bool perform_pseudo_build;
} clean_me_up;
/*
* Output
*/
struct dml2_mcg_min_clock_table *min_clk_table;
};
struct dml2_mcg_instance {
bool (*build_min_clock_table)(struct dml2_mcg_build_min_clock_table_params_in_out *in_out);
bool (*unit_test)(void);
};
/*
* DML2 DPMM Types and Interfaces
*/
struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out {
/*
* Input
*/
struct dml2_core_ip_params *ip;
struct dml2_soc_bb *soc_bb;
struct dml2_mcg_min_clock_table *min_clk_table;
const struct display_configuation_with_meta *display_cfg;
struct {
bool perform_pseudo_map;
struct dml2_core_internal_soc_bb *soc_bb;
} clean_me_up;
/*
* Output
*/
struct dml2_display_cfg_programming *programming;
};
struct dml2_dpmm_map_watermarks_params_in_out {
/*
* Input
*/
const struct display_configuation_with_meta *display_cfg;
const struct dml2_core_instance *core;
/*
* Output
*/
struct dml2_display_cfg_programming *programming;
};
struct dml2_dpmm_instance {
bool (*map_mode_to_soc_dpm)(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out);
bool (*map_watermarks)(struct dml2_dpmm_map_watermarks_params_in_out *in_out);
bool (*unit_test)(void);
};
/*
* DML2 Core Types and Interfaces
*/
struct dml2_core_initialize_in_out {
enum dml2_project_id project_id;
struct dml2_core_instance *instance;
struct dml2_soc_bb *soc_bb;
struct dml2_ip_capabilities *ip_caps;
struct dml2_mcg_min_clock_table *minimum_clock_table;
void *explicit_ip_bb;
unsigned int explicit_ip_bb_size;
// FIXME_STAGE2 can remove but dcn3 version still need this
struct {
struct soc_bounding_box_st *soc_bb;
struct soc_states_st *soc_states;
} legacy;
};
struct core_bandwidth_requirements {
int urgent_bandwidth_kbytes_per_sec;
int average_bandwidth_kbytes_per_sec;
};
struct core_plane_support_info {
int dpps_used;
int dram_change_latency_hiding_margin_in_active;
int active_latency_hiding_us;
int mall_svp_size_requirement_ways;
int nominal_vblank_pstate_latency_hiding_us;
};
struct core_stream_support_info {
unsigned int odms_used;
/* FAMS2 SubVP support info */
unsigned int phantom_min_v_active;
unsigned int phantom_v_startup;
unsigned int phantom_v_active;
unsigned int phantom_v_total;
int vblank_reserved_time_us;
int num_dsc_slices;
bool dsc_enable;
};
struct core_display_cfg_support_info {
bool is_supported;
struct core_stream_support_info stream_support_info[DML2_MAX_PLANES];
struct core_plane_support_info plane_support_info[DML2_MAX_PLANES];
struct {
struct dml2_core_internal_mode_support_info support_info;
} clean_me_up;
};
struct dml2_core_mode_support_result {
struct {
struct {
unsigned long urgent_bw_sdp_kbps;
unsigned long average_bw_sdp_kbps;
unsigned long urgent_bw_dram_kbps;
unsigned long average_bw_dram_kbps;
unsigned long dcfclk_khz;
unsigned long fclk_khz;
} svp_prefetch;
struct {
unsigned long urgent_bw_sdp_kbps;
unsigned long average_bw_sdp_kbps;
unsigned long urgent_bw_dram_kbps;
unsigned long average_bw_dram_kbps;
unsigned long dcfclk_khz;
unsigned long fclk_khz;
} active;
unsigned int dispclk_khz;
unsigned int dcfclk_deepsleep_khz;
unsigned int socclk_khz;
unsigned int uclk_pstate_supported;
unsigned int fclk_pstate_supported;
} global;
struct {
unsigned int dscclk_khz;
unsigned int dtbclk_khz;
unsigned int phyclk_khz;
} per_stream[DML2_MAX_PLANES];
struct {
unsigned int dppclk_khz;
unsigned int mall_svp_allocation_mblks;
unsigned int mall_full_frame_allocation_mblks;
} per_plane[DML2_MAX_PLANES];
struct core_display_cfg_support_info cfg_support_info;
};
struct dml2_optimization_stage1_state {
bool performed;
bool success;
int min_clk_index_for_latency;
};
struct dml2_optimization_stage2_state {
bool performed;
bool success;
// Whether or not each plane supports mcache
// The number of valid elements == display_cfg.num_planes
// The indexing of pstate_switch_modes matches plane_descriptors[]
bool per_plane_mcache_support[DML2_MAX_PLANES];
struct dml2_mcache_surface_allocation mcache_allocations[DML2_MAX_PLANES];
};
#define DML2_PMO_LEGACY_PREFETCH_MAX_TWAIT_OPTIONS 8
#define DML2_PMO_PSTATE_CANDIDATE_LIST_SIZE 10
#define DML2_PMO_STUTTER_CANDIDATE_LIST_SIZE 3
struct dml2_implicit_svp_meta {
bool valid;
unsigned long v_active;
unsigned long v_total;
unsigned long v_front_porch;
};
struct dml2_fams2_per_method_common_meta {
/* generic params */
unsigned int allow_start_otg_vline;
unsigned int allow_end_otg_vline;
/* scheduling params */
double allow_time_us;
double disallow_time_us;
double period_us;
};
struct dml2_fams2_meta {
bool valid;
double otg_vline_time_us;
unsigned int scheduling_delay_otg_vlines;
unsigned int vertical_interrupt_ack_delay_otg_vlines;
unsigned int allow_to_target_delay_otg_vlines;
unsigned int contention_delay_otg_vlines;
unsigned int min_allow_width_otg_vlines;
unsigned int nom_vtotal;
double nom_refresh_rate_hz;
double nom_frame_time_us;
unsigned int max_vtotal;
double min_refresh_rate_hz;
double max_frame_time_us;
unsigned int dram_clk_change_blackout_otg_vlines;
struct {
unsigned int max_vactive_det_fill_delay_otg_vlines;
struct dml2_fams2_per_method_common_meta common;
} method_vactive;
struct {
struct dml2_fams2_per_method_common_meta common;
} method_vblank;
struct {
unsigned int programming_delay_otg_vlines;
unsigned int df_throttle_delay_otg_vlines;
unsigned int prefetch_to_mall_delay_otg_vlines;
unsigned long phantom_vactive;
unsigned long phantom_vfp;
unsigned long phantom_vtotal;
struct dml2_fams2_per_method_common_meta common;
} method_subvp;
struct {
unsigned int programming_delay_otg_vlines;
unsigned int stretched_vtotal;
struct dml2_fams2_per_method_common_meta common;
} method_drr;
};
struct dml2_optimization_stage3_state {
bool performed;
bool success;
// The pstate support mode for each plane
// The number of valid elements == display_cfg.num_planes
// The indexing of pstate_switch_modes matches plane_descriptors[]
enum dml2_uclk_pstate_support_method pstate_switch_modes[DML2_MAX_PLANES];
// Meta-data for implicit SVP generation, indexed by stream index
struct dml2_implicit_svp_meta stream_svp_meta[DML2_MAX_PLANES];
// Meta-data for FAMS2
bool fams2_required;
struct dml2_fams2_meta stream_fams2_meta[DML2_MAX_PLANES];
int min_clk_index_for_latency;
};
struct dml2_optimization_stage4_state {
bool performed;
bool success;
bool unoptimizable_streams[DML2_MAX_DCN_PIPES];
};
struct dml2_optimization_stage5_state {
bool performed;
bool success;
bool optimal_reserved_time_in_vblank_us;
bool vblank_includes_z8_optimization;
};
struct display_configuation_with_meta {
struct dml2_display_cfg display_config;
struct dml2_core_mode_support_result mode_support_result;
// Stage 1 = Min Clocks for Latency
struct dml2_optimization_stage1_state stage1;
// Stage 2 = MCache
struct dml2_optimization_stage2_state stage2;
// Stage 3 = UCLK PState
struct dml2_optimization_stage3_state stage3;
// Stage 4 = Vmin
struct dml2_optimization_stage4_state stage4;
// Stage 5 = Stutter
struct dml2_optimization_stage5_state stage5;
};
struct dml2_core_mode_support_in_out {
/*
* Inputs
*/
struct dml2_core_instance *instance;
const struct display_configuation_with_meta *display_cfg;
struct dml2_mcg_min_clock_table *min_clk_table;
int min_clk_index;
/*
* Outputs
*/
struct dml2_core_mode_support_result mode_support_result;
struct {
// Inputs
struct dml_display_cfg_st *display_cfg;
// Outputs
struct dml_mode_support_info_st *support_info;
unsigned int out_lowest_state_idx;
unsigned int min_fclk_khz;
unsigned int min_dcfclk_khz;
unsigned int min_dram_speed_mts;
unsigned int min_socclk_khz;
unsigned int min_dscclk_khz;
unsigned int min_dtbclk_khz;
unsigned int min_phyclk_khz;
} legacy;
};
struct dml2_core_mode_programming_in_out {
/*
* Inputs
*/
struct dml2_core_instance *instance;
const struct display_configuation_with_meta *display_cfg;
const struct core_display_cfg_support_info *cfg_support_info;
/*
* Outputs (also Input the clk freq are also from programming struct)
*/
struct dml2_display_cfg_programming *programming;
};
struct dml2_core_populate_informative_in_out {
/*
* Inputs
*/
struct dml2_core_instance *instance;
// If this is set, then the mode was supported, and mode programming
// was successfully run.
// Otherwise, mode programming was not run, because mode support failed.
bool mode_is_supported;
/*
* Outputs
*/
struct dml2_display_cfg_programming *programming;
};
struct dml2_calculate_mcache_allocation_in_out {
/*
* Inputs
*/
struct dml2_core_instance *instance;
const struct dml2_plane_parameters *plane_descriptor;
unsigned int plane_index;
/*
* Outputs
*/
struct dml2_mcache_surface_allocation *mcache_allocation;
};
struct dml2_core_internal_state_inputs {
unsigned int dummy;
};
struct dml2_core_internal_state_intermediates {
unsigned int dummy;
};
struct dml2_core_mode_support_locals {
struct dml2_core_calcs_mode_support_ex mode_support_ex_params;
struct dml2_display_cfg svp_expanded_display_cfg;
};
struct dml2_core_mode_programming_locals {
struct dml2_core_calcs_mode_programming_ex mode_programming_ex_params;
struct dml2_display_cfg svp_expanded_display_cfg;
};
struct dml2_core_scratch {
struct dml2_core_mode_support_locals mode_support_locals;
struct dml2_core_mode_programming_locals mode_programming_locals;
int main_stream_index_from_svp_stream_index[DML2_MAX_PLANES];
int svp_stream_index_from_main_stream_index[DML2_MAX_PLANES];
int main_plane_index_to_phantom_plane_index[DML2_MAX_PLANES];
int phantom_plane_index_to_main_plane_index[DML2_MAX_PLANES];
};
struct dml2_core_instance {
struct dml2_mcg_min_clock_table *minimum_clock_table;
struct dml2_core_internal_state_inputs inputs;
struct dml2_core_internal_state_intermediates intermediates;
struct dml2_core_scratch scratch;
bool (*initialize)(struct dml2_core_initialize_in_out *in_out);
bool (*mode_support)(struct dml2_core_mode_support_in_out *in_out);
bool (*mode_programming)(struct dml2_core_mode_programming_in_out *in_out);
bool (*populate_informative)(struct dml2_core_populate_informative_in_out *in_out);
bool (*calculate_mcache_allocation)(struct dml2_calculate_mcache_allocation_in_out *in_out);
bool (*unit_test)(void);
struct {
struct dml2_core_internal_display_mode_lib mode_lib;
} clean_me_up;
};
/*
* DML2 PMO Types and Interfaces
*/
struct dml2_pmo_initialize_in_out {
/*
* Input
*/
struct dml2_pmo_instance *instance;
struct dml2_soc_bb *soc_bb;
struct dml2_ip_capabilities *ip_caps;
struct dml2_pmo_options *options;
int min_clock_table_size;
};
struct dml2_pmo_optimize_dcc_mcache_in_out {
/*
* Input
*/
struct dml2_pmo_instance *instance;
const struct dml2_display_cfg *display_config;
bool *dcc_mcache_supported;
struct core_display_cfg_support_info *cfg_support_info;
/*
* Output
*/
struct dml2_display_cfg *optimized_display_cfg;
};
struct dml2_pmo_init_for_vmin_in_out {
/*
* Input
*/
struct dml2_pmo_instance *instance;
struct display_configuation_with_meta *base_display_config;
};
struct dml2_pmo_test_for_vmin_in_out {
/*
* Input
*/
struct dml2_pmo_instance *instance;
const struct display_configuation_with_meta *display_config;
const struct dml2_soc_vmin_clock_limits *vmin_limits;
};
struct dml2_pmo_optimize_for_vmin_in_out {
/*
* Input
*/
struct dml2_pmo_instance *instance;
struct display_configuation_with_meta *base_display_config;
/*
* Output
*/
struct display_configuation_with_meta *optimized_display_config;
};
struct dml2_pmo_init_for_pstate_support_in_out {
/*
* Input
*/
struct dml2_pmo_instance *instance;
struct display_configuation_with_meta *base_display_config;
};
struct dml2_pmo_test_for_pstate_support_in_out {
/*
* Input
*/
struct dml2_pmo_instance *instance;
struct display_configuation_with_meta *base_display_config;
};
struct dml2_pmo_optimize_for_pstate_support_in_out {
/*
* Input
*/
struct dml2_pmo_instance *instance;
struct display_configuation_with_meta *base_display_config;
bool last_candidate_failed;
/*
* Output
*/
struct display_configuation_with_meta *optimized_display_config;
};
struct dml2_pmo_init_for_stutter_in_out {
/*
* Input
*/
struct dml2_pmo_instance *instance;
struct display_configuation_with_meta *base_display_config;
};
struct dml2_pmo_test_for_stutter_in_out {
/*
* Input
*/
struct dml2_pmo_instance *instance;
struct display_configuation_with_meta *base_display_config;
};
struct dml2_pmo_optimize_for_stutter_in_out {
/*
* Input
*/
struct dml2_pmo_instance *instance;
struct display_configuation_with_meta *base_display_config;
bool last_candidate_failed;
/*
* Output
*/
struct display_configuation_with_meta *optimized_display_config;
};
enum dml2_pmo_pstate_strategy {
dml2_pmo_pstate_strategy_na = 0,
/* hw exclusive modes */
dml2_pmo_pstate_strategy_vactive = 1,
dml2_pmo_pstate_strategy_vblank = 2,
dml2_pmo_pstate_strategy_reserved_hw = 5,
/* fw assisted exclusive modes */
dml2_pmo_pstate_strategy_fw_svp = 6,
dml2_pmo_pstate_strategy_reserved_fw = 10,
/* fw assisted modes requiring drr modulation */
dml2_pmo_pstate_strategy_fw_vactive_drr = 11,
dml2_pmo_pstate_strategy_fw_vblank_drr = 12,
dml2_pmo_pstate_strategy_fw_svp_drr = 13,
dml2_pmo_pstate_strategy_reserved_fw_drr_fixed = 20,
dml2_pmo_pstate_strategy_fw_drr = 21,
dml2_pmo_pstate_strategy_reserved_fw_drr_var = 22,
};
#define PMO_NO_DRR_STRATEGY_MASK (((1 << (dml2_pmo_pstate_strategy_reserved_fw - dml2_pmo_pstate_strategy_na + 1)) - 1) << dml2_pmo_pstate_strategy_na)
#define PMO_DRR_STRATEGY_MASK (((1 << (dml2_pmo_pstate_strategy_reserved_fw_drr_var - dml2_pmo_pstate_strategy_fw_vactive_drr + 1)) - 1) << dml2_pmo_pstate_strategy_fw_vactive_drr)
#define PMO_DRR_FIXED_STRATEGY_MASK (((1 << (dml2_pmo_pstate_strategy_fw_drr - dml2_pmo_pstate_strategy_fw_vactive_drr + 1)) - 1) << dml2_pmo_pstate_strategy_fw_vactive_drr)
#define PMO_DRR_VAR_STRATEGY_MASK (((1 << (dml2_pmo_pstate_strategy_reserved_fw_drr_var - dml2_pmo_pstate_strategy_fw_drr + 1)) - 1) << dml2_pmo_pstate_strategy_fw_drr)
#define PMO_FW_STRATEGY_MASK (((1 << (dml2_pmo_pstate_strategy_reserved_fw_drr_var - dml2_pmo_pstate_strategy_fw_svp + 1)) - 1) << dml2_pmo_pstate_strategy_fw_svp)
#define PMO_DCN4_MAX_DISPLAYS 4
#define PMO_DCN4_MAX_NUM_VARIANTS 2
#define PMO_DCN4_MAX_BASE_STRATEGIES 10
struct dml2_pmo_scratch {
union {
struct {
double reserved_time_candidates[DML2_MAX_PLANES][DML2_PMO_LEGACY_PREFETCH_MAX_TWAIT_OPTIONS];
int reserved_time_candidates_count[DML2_MAX_PLANES];
int current_candidate[DML2_MAX_PLANES];
int min_latency_index;
int max_latency_index;
int cur_latency_index;
int stream_mask;
} pmo_dcn3;
struct {
enum dml2_pmo_pstate_strategy per_stream_pstate_strategy[DML2_MAX_PLANES][DML2_PMO_PSTATE_CANDIDATE_LIST_SIZE];
bool allow_state_increase_for_strategy[DML2_PMO_PSTATE_CANDIDATE_LIST_SIZE];
int num_pstate_candidates;
int cur_pstate_candidate;
unsigned int stream_plane_mask[DML2_MAX_PLANES];
unsigned int stream_vactive_capability_mask;
int min_latency_index;
int max_latency_index;
int cur_latency_index;
// Stores all the implicit SVP meta information indexed by stream index of the display
// configuration under inspection, built at optimization stage init
struct dml2_implicit_svp_meta stream_svp_meta[DML2_MAX_PLANES];
struct dml2_fams2_meta stream_fams2_meta[DML2_MAX_PLANES];
unsigned int optimal_vblank_reserved_time_for_stutter_us[DML2_PMO_STUTTER_CANDIDATE_LIST_SIZE];
unsigned int num_stutter_candidates;
unsigned int cur_stutter_candidate;
bool z8_vblank_optimizable;
/* mask of synchronized timings by stream index */
unsigned int num_timing_groups;
unsigned int synchronized_timing_group_masks[DML2_MAX_PLANES];
bool group_is_drr_enabled[DML2_MAX_PLANES];
double group_line_time_us[DML2_MAX_PLANES];
/* scheduling check locals */
struct dml2_fams2_per_method_common_meta group_common_fams2_meta[DML2_MAX_PLANES];
unsigned int sorted_group_gtl_disallow_index[DML2_MAX_PLANES];
unsigned int sorted_group_gtl_period_index[DML2_MAX_PLANES];
double group_phase_offset[DML2_MAX_PLANES];
} pmo_dcn4;
};
};
struct dml2_pmo_init_data {
union {
struct {
/* populated once during initialization */
enum dml2_pmo_pstate_strategy expanded_strategy_list_1_display[PMO_DCN4_MAX_BASE_STRATEGIES * 1][PMO_DCN4_MAX_DISPLAYS];
enum dml2_pmo_pstate_strategy expanded_strategy_list_2_display[PMO_DCN4_MAX_BASE_STRATEGIES * 2 * 2][PMO_DCN4_MAX_DISPLAYS];
enum dml2_pmo_pstate_strategy expanded_strategy_list_3_display[PMO_DCN4_MAX_BASE_STRATEGIES * 6 * 2][PMO_DCN4_MAX_DISPLAYS];
enum dml2_pmo_pstate_strategy expanded_strategy_list_4_display[PMO_DCN4_MAX_BASE_STRATEGIES * 24 * 2][PMO_DCN4_MAX_DISPLAYS];
unsigned int num_expanded_strategies_per_list[PMO_DCN4_MAX_DISPLAYS];
} pmo_dcn4;
};
};
struct dml2_pmo_instance {
struct dml2_soc_bb *soc_bb;
struct dml2_ip_capabilities *ip_caps;
struct dml2_pmo_options *options;
int disp_clk_vmin_threshold;
int mpc_combine_limit;
int odm_combine_limit;
int min_clock_table_size;
union {
struct {
struct {
int prefetch_end_to_mall_start_us;
int fw_processing_delay_us;
int refresh_rate_limit_min;
int refresh_rate_limit_max;
} subvp;
} v1;
struct {
struct {
int refresh_rate_limit_min;
int refresh_rate_limit_max;
} subvp;
struct {
int refresh_rate_limit_min;
int refresh_rate_limit_max;
} drr;
} v2;
} fams_params;
bool (*initialize)(struct dml2_pmo_initialize_in_out *in_out);
bool (*optimize_dcc_mcache)(struct dml2_pmo_optimize_dcc_mcache_in_out *in_out);
bool (*init_for_vmin)(struct dml2_pmo_init_for_vmin_in_out *in_out);
bool (*test_for_vmin)(struct dml2_pmo_test_for_vmin_in_out *in_out);
bool (*optimize_for_vmin)(struct dml2_pmo_optimize_for_vmin_in_out *in_out);
bool (*init_for_uclk_pstate)(struct dml2_pmo_init_for_pstate_support_in_out *in_out);
bool (*test_for_uclk_pstate)(struct dml2_pmo_test_for_pstate_support_in_out *in_out);
bool (*optimize_for_uclk_pstate)(struct dml2_pmo_optimize_for_pstate_support_in_out *in_out);
bool (*init_for_stutter)(struct dml2_pmo_init_for_stutter_in_out *in_out);
bool (*test_for_stutter)(struct dml2_pmo_test_for_stutter_in_out *in_out);
bool (*optimize_for_stutter)(struct dml2_pmo_optimize_for_stutter_in_out *in_out);
bool (*unit_test)(void);
struct dml2_pmo_init_data init_data;
struct dml2_pmo_scratch scratch;
};
/*
* DML2 MCache Types
*/
struct top_mcache_validate_admissability_in_out {
struct dml2_instance *dml2_instance;
const struct dml2_display_cfg *display_cfg;
const struct core_display_cfg_support_info *cfg_support_info;
struct dml2_mcache_surface_allocation *mcache_allocations;
bool per_plane_status[DML2_MAX_PLANES];
struct {
const struct dml_mode_support_info_st *mode_support_info;
} legacy;
};
struct top_mcache_assign_ids_in_out {
/*
* Input
*/
const struct dml2_mcache_surface_allocation *mcache_allocations;
int plane_count;
int per_pipe_viewport_x_start[DML2_MAX_PLANES][DML2_MAX_DCN_PIPES];
int per_pipe_viewport_x_end[DML2_MAX_PLANES][DML2_MAX_DCN_PIPES];
int pipe_count_per_plane[DML2_MAX_PLANES];
struct dml2_display_mcache_regs *current_mcache_regs[DML2_MAX_PLANES][DML2_MAX_DCN_PIPES]; //One set per pipe/hubp
/*
* Output
*/
struct dml2_display_mcache_regs mcache_regs[DML2_MAX_PLANES][DML2_MAX_DCN_PIPES]; //One set per pipe/hubp
struct dml2_build_mcache_programming_in_out *mcache_programming;
};
struct top_mcache_calc_mcache_count_and_offsets_in_out {
/*
* Inputs
*/
struct dml2_instance *dml2_instance;
const struct dml2_display_cfg *display_config;
/*
* Outputs
*/
struct dml2_mcache_surface_allocation *mcache_allocations;
};
struct top_mcache_assign_global_mcache_ids_in_out {
/*
* Inputs/Outputs
*/
struct dml2_mcache_surface_allocation *allocations;
int num_allocations;
};
/*
* DML2 Top Types
*/
struct dml2_initialize_instance_locals {
int dummy;
};
struct dml2_optimization_init_function_locals {
union {
struct {
struct dml2_pmo_init_for_pstate_support_in_out init_params;
} uclk_pstate;
struct {
struct dml2_pmo_init_for_stutter_in_out stutter_params;
} stutter;
struct {
struct dml2_pmo_init_for_vmin_in_out init_params;
} vmin;
};
};
struct dml2_optimization_test_function_locals {
union {
struct {
struct top_mcache_calc_mcache_count_and_offsets_in_out calc_mcache_count_params;
struct top_mcache_assign_global_mcache_ids_in_out assign_global_mcache_ids_params;
struct top_mcache_validate_admissability_in_out validate_admissibility_params;
} test_mcache;
struct {
struct dml2_pmo_test_for_vmin_in_out pmo_test_vmin_params;
} test_vmin;
struct {
struct dml2_pmo_test_for_pstate_support_in_out test_params;
} uclk_pstate;
struct {
struct dml2_pmo_test_for_stutter_in_out stutter_params;
} stutter;
};
};
struct dml2_optimization_optimize_function_locals {
union {
struct {
struct dml2_pmo_optimize_dcc_mcache_in_out optimize_mcache_params;
} optimize_mcache;
struct {
struct dml2_pmo_optimize_for_vmin_in_out pmo_optimize_vmin_params;
} optimize_vmin;
struct {
struct dml2_pmo_optimize_for_pstate_support_in_out optimize_params;
} uclk_pstate;
struct {
struct dml2_pmo_optimize_for_stutter_in_out stutter_params;
} stutter;
};
};
struct dml2_optimization_phase_locals {
struct display_configuation_with_meta cur_candidate_display_cfg;
struct display_configuation_with_meta next_candidate_display_cfg;
struct dml2_core_mode_support_in_out mode_support_params;
struct dml2_optimization_init_function_locals init_function_locals;
struct dml2_optimization_test_function_locals test_function_locals;
struct dml2_optimization_optimize_function_locals optimize_function_locals;
};
struct dml2_check_mode_supported_locals {
struct dml2_display_cfg display_cfg_working_copy;
struct dml2_core_mode_support_in_out mode_support_params;
struct dml2_optimization_phase_locals optimization_phase_locals;
struct display_configuation_with_meta base_display_config_with_meta;
struct display_configuation_with_meta optimized_display_config_with_meta;
};
struct optimization_init_function_params {
struct dml2_optimization_init_function_locals *locals;
struct dml2_instance *dml;
struct display_configuation_with_meta *display_config;
};
struct optimization_test_function_params {
struct dml2_optimization_test_function_locals *locals;
struct dml2_instance *dml;
struct display_configuation_with_meta *display_config;
};
struct optimization_optimize_function_params {
bool last_candidate_supported;
struct dml2_optimization_optimize_function_locals *locals;
struct dml2_instance *dml;
struct display_configuation_with_meta *display_config;
struct display_configuation_with_meta *optimized_display_config;
};
struct optimization_phase_params {
struct dml2_instance *dml;
const struct display_configuation_with_meta *display_config; // Initial Display Configuration
bool (*init_function)(const struct optimization_init_function_params *params); // Test function to determine optimization is complete
bool (*test_function)(const struct optimization_test_function_params *params); // Test function to determine optimization is complete
bool (*optimize_function)(const struct optimization_optimize_function_params *params); // Function which produces a more optimized display configuration
struct display_configuation_with_meta *optimized_display_config; // The optimized display configuration
bool all_or_nothing;
};
struct dml2_build_mode_programming_locals {
struct dml2_core_mode_support_in_out mode_support_params;
struct dml2_core_mode_programming_in_out mode_programming_params;
struct dml2_core_populate_informative_in_out informative_params;
struct dml2_pmo_optimize_dcc_mcache_in_out optimize_mcache_params;
struct display_configuation_with_meta base_display_config_with_meta;
struct display_configuation_with_meta optimized_display_config_with_meta;
struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out dppm_map_mode_params;
struct dml2_dpmm_map_watermarks_params_in_out dppm_map_watermarks_params;
struct dml2_optimization_phase_locals optimization_phase_locals;
struct optimization_phase_params min_clock_for_latency_phase;
struct optimization_phase_params mcache_phase;
struct optimization_phase_params uclk_pstate_phase;
struct optimization_phase_params vmin_phase;
struct optimization_phase_params stutter_phase;
};
struct dml2_legacy_core_build_mode_programming_wrapper_locals {
struct dml2_core_mode_support_in_out mode_support_params;
struct dml2_core_mode_programming_in_out mode_programming_params;
struct dml2_core_populate_informative_in_out informative_params;
struct top_mcache_calc_mcache_count_and_offsets_in_out calc_mcache_count_params;
struct top_mcache_validate_admissability_in_out validate_admissibility_params;
struct dml2_mcache_surface_allocation mcache_allocations[DML2_MAX_PLANES];
struct top_mcache_assign_global_mcache_ids_in_out assign_global_mcache_ids_params;
struct dml2_pmo_optimize_dcc_mcache_in_out optimize_mcache_params;
struct dml2_display_cfg optimized_display_cfg;
struct core_display_cfg_support_info core_support_info;
};
struct dml2_top_mcache_verify_mcache_size_locals {
struct dml2_calculate_mcache_allocation_in_out calc_mcache_params;
};
struct dml2_top_mcache_validate_admissability_locals {
struct {
int pipe_vp_startx[DML2_MAX_DCN_PIPES];
int pipe_vp_endx[DML2_MAX_DCN_PIPES];
} plane0;
struct {
int pipe_vp_startx[DML2_MAX_DCN_PIPES];
int pipe_vp_endx[DML2_MAX_DCN_PIPES];
} plane1;
};
struct dml2_top_display_cfg_support_info {
const struct dml2_display_cfg *display_config;
struct core_display_cfg_support_info core_info;
enum dml2_pstate_support_method per_plane_pstate_method[DML2_MAX_PLANES];
};
struct dml2_instance {
enum dml2_project_id project_id;
struct dml2_core_instance core_instance;
struct dml2_mcg_instance mcg_instance;
struct dml2_dpmm_instance dpmm_instance;
struct dml2_pmo_instance pmo_instance;
struct dml2_soc_bb soc_bbox;
struct dml2_ip_capabilities ip_caps;
struct dml2_mcg_min_clock_table min_clk_table;
struct dml2_pmo_options pmo_options;
struct {
struct dml2_initialize_instance_locals initialize_instance_locals;
struct dml2_top_mcache_verify_mcache_size_locals mcache_verify_mcache_size_locals;
struct dml2_top_mcache_validate_admissability_locals mcache_validate_admissability_locals;
struct dml2_check_mode_supported_locals check_mode_supported_locals;
struct dml2_build_mode_programming_locals build_mode_programming_locals;
} scratch;
struct {
struct {
struct dml2_legacy_core_build_mode_programming_wrapper_locals legacy_core_build_mode_programming_wrapper_locals;
} scratch;
} legacy;
};
#endif

View file

@ -0,0 +1,432 @@
/*
* Copyright 2023 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "core_types.h"
#include "reg_helper.h"
#include "dcn401/dcn401_dpp.h"
#include "basics/conversion.h"
#include "dcn30/dcn30_cm_common.h"
#include "dcn32/dcn32_dpp.h"
#define REG(reg)\
dpp->tf_regs->reg
#define CTX \
dpp->base.ctx
#undef FN
#define FN(reg_name, field_name) \
dpp->tf_shift->field_name, dpp->tf_mask->field_name
void dpp401_read_state(struct dpp *dpp_base, struct dcn_dpp_state *s)
{
struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
REG_GET(DPP_CONTROL,
DPP_CLOCK_ENABLE, &s->is_enabled);
// TODO: Implement for DCN4
}
void dpp401_dpp_setup(
struct dpp *dpp_base,
enum surface_pixel_format format,
enum expansion_mode mode,
struct dc_csc_transform input_csc_color_matrix,
enum dc_color_space input_color_space,
struct cnv_alpha_2bit_lut *alpha_2bit_lut)
{
struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base);
uint32_t pixel_format = 0;
uint32_t alpha_en = 1;
enum dc_color_space color_space = COLOR_SPACE_SRGB;
enum dcn10_input_csc_select select = INPUT_CSC_SELECT_BYPASS;
uint32_t is_2bit = 0;
uint32_t alpha_plane_enable = 0;
uint32_t dealpha_en = 0, dealpha_ablnd_en = 0;
uint32_t realpha_en = 0, realpha_ablnd_en = 0;
uint32_t program_prealpha_dealpha = 0;
struct out_csc_color_matrix tbl_entry;
int i;
REG_SET_2(FORMAT_CONTROL, 0,
CNVC_BYPASS, 0,
FORMAT_EXPANSION_MODE, mode);
REG_UPDATE(FORMAT_CONTROL, FORMAT_CNV16, 0);
REG_UPDATE(FORMAT_CONTROL, CNVC_BYPASS_MSB_ALIGN, 0);
REG_UPDATE(FORMAT_CONTROL, CLAMP_POSITIVE, 0);
REG_UPDATE(FORMAT_CONTROL, CLAMP_POSITIVE_C, 0);
REG_UPDATE(FORMAT_CONTROL, FORMAT_CROSSBAR_R, 0);
REG_UPDATE(FORMAT_CONTROL, FORMAT_CROSSBAR_G, 1);
REG_UPDATE(FORMAT_CONTROL, FORMAT_CROSSBAR_B, 2);
switch (format) {
case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
pixel_format = 1;
break;
case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
pixel_format = 3;
alpha_en = 0;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
pixel_format = 8;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
pixel_format = 10;
is_2bit = 1;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
pixel_format = 65;
color_space = COLOR_SPACE_YCBCR709;
select = INPUT_CSC_SELECT_ICSC;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
pixel_format = 64;
color_space = COLOR_SPACE_YCBCR709;
select = INPUT_CSC_SELECT_ICSC;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
pixel_format = 67;
color_space = COLOR_SPACE_YCBCR709;
select = INPUT_CSC_SELECT_ICSC;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
pixel_format = 66;
color_space = COLOR_SPACE_YCBCR709;
select = INPUT_CSC_SELECT_ICSC;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
pixel_format = 26; /* ARGB16161616_UNORM */
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
pixel_format = 24;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
pixel_format = 25;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888:
pixel_format = 12;
color_space = COLOR_SPACE_YCBCR709;
select = INPUT_CSC_SELECT_ICSC;
break;
case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX:
pixel_format = 112;
alpha_en = 0;
break;
case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX:
pixel_format = 113;
alpha_en = 0;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010:
pixel_format = 114;
color_space = COLOR_SPACE_YCBCR709;
select = INPUT_CSC_SELECT_ICSC;
is_2bit = 1;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_CrYCbA1010102:
pixel_format = 115;
color_space = COLOR_SPACE_YCBCR709;
select = INPUT_CSC_SELECT_ICSC;
is_2bit = 1;
break;
case SURFACE_PIXEL_FORMAT_GRPH_RGBE:
pixel_format = 116;
alpha_plane_enable = 0;
break;
case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA:
pixel_format = 116;
alpha_plane_enable = 1;
break;
case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT:
pixel_format = 118;
alpha_en = 0;
break;
case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT:
pixel_format = 119;
alpha_en = 0;
break;
default:
break;
}
/* Set default color space based on format if none is given. */
color_space = input_color_space ? input_color_space : color_space;
if (is_2bit == 1 && alpha_2bit_lut != NULL) {
REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT0, alpha_2bit_lut->lut0);
REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT1, alpha_2bit_lut->lut1);
REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT2, alpha_2bit_lut->lut2);
REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT3, alpha_2bit_lut->lut3);
}
REG_SET_2(CNVC_SURFACE_PIXEL_FORMAT, 0,
CNVC_SURFACE_PIXEL_FORMAT, pixel_format,
CNVC_ALPHA_PLANE_ENABLE, alpha_plane_enable);
REG_UPDATE(FORMAT_CONTROL, FORMAT_CONTROL__ALPHA_EN, alpha_en);
if (program_prealpha_dealpha) {
dealpha_en = 1;
realpha_en = 1;
}
REG_SET_2(PRE_DEALPHA, 0,
PRE_DEALPHA_EN, dealpha_en,
PRE_DEALPHA_ABLND_EN, dealpha_ablnd_en);
REG_SET_2(PRE_REALPHA, 0,
PRE_REALPHA_EN, realpha_en,
PRE_REALPHA_ABLND_EN, realpha_ablnd_en);
/* If input adjustment exists, program the ICSC with those values. */
if (input_csc_color_matrix.enable_adjustment == true) {
for (i = 0; i < 12; i++)
tbl_entry.regval[i] = input_csc_color_matrix.matrix[i];
tbl_entry.color_space = input_color_space;
if (color_space >= COLOR_SPACE_YCBCR601)
select = INPUT_CSC_SELECT_ICSC;
else
select = INPUT_CSC_SELECT_BYPASS;
dpp3_program_post_csc(dpp_base, color_space, select,
&tbl_entry);
} else {
dpp3_program_post_csc(dpp_base, color_space, select, NULL);
}
}
static struct dpp_funcs dcn401_dpp_funcs = {
.dpp_program_gamcor_lut = dpp3_program_gamcor_lut,
.dpp_read_state = dpp401_read_state,
.dpp_reset = dpp_reset,
.dpp_set_scaler = dpp401_dscl_set_scaler_manual_scale,
.dpp_get_optimal_number_of_taps = dpp3_get_optimal_number_of_taps,
.dpp_set_gamut_remap = NULL,
.dpp_set_csc_adjustment = NULL,
.dpp_set_csc_default = NULL,
.dpp_program_regamma_pwl = NULL,
.dpp_set_pre_degam = dpp3_set_pre_degam,
.dpp_program_input_lut = NULL,
.dpp_full_bypass = dpp401_full_bypass,
.dpp_setup = dpp401_dpp_setup,
.dpp_program_degamma_pwl = NULL,
.dpp_program_cm_dealpha = dpp3_program_cm_dealpha,
.dpp_program_cm_bias = dpp3_program_cm_bias,
.dpp_program_blnd_lut = NULL, // BLNDGAM is removed completely in DCN3.2 DPP
.dpp_program_shaper_lut = NULL, // CM SHAPER block is removed in DCN3.2 DPP, (it is in MPCC, programmable before or after BLND)
.dpp_program_3dlut = NULL, // CM 3DLUT block is removed in DCN3.2 DPP, (it is in MPCC, programmable before or after BLND)
.dpp_program_bias_and_scale = NULL,
.dpp_cnv_set_alpha_keyer = dpp2_cnv_set_alpha_keyer,
.set_cursor_attributes = dpp401_set_cursor_attributes,
.set_cursor_position = dpp401_set_cursor_position,
.set_optional_cursor_attributes = dpp401_set_optional_cursor_attributes,
.dpp_dppclk_control = dpp1_dppclk_control,
.dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier,
.set_cursor_matrix = dpp401_set_cursor_matrix,
};
static struct dpp_caps dcn401_dpp_cap = {
.dscl_data_proc_format = DSCL_DATA_PRCESSING_FLOAT_FORMAT,
.max_lb_partitions = 63,
.dscl_calc_lb_num_partitions = dscl401_calc_lb_num_partitions,
};
bool dpp401_construct(
struct dcn401_dpp *dpp,
struct dc_context *ctx,
uint32_t inst,
const struct dcn401_dpp_registers *tf_regs,
const struct dcn401_dpp_shift *tf_shift,
const struct dcn401_dpp_mask *tf_mask)
{
dpp->base.ctx = ctx;
dpp->base.inst = inst;
dpp->base.funcs = &dcn401_dpp_funcs;
dpp->base.caps = &dcn401_dpp_cap;
dpp->tf_regs = tf_regs;
dpp->tf_shift = tf_shift;
dpp->tf_mask = tf_mask;
return true;
}
/* Compute the maximum number of lines that we can fit in the line buffer */
void dscl401_calc_lb_num_partitions(
const struct scaler_data *scl_data,
enum lb_memory_config lb_config,
int *num_part_y,
int *num_part_c)
{
int memory_line_size_y, memory_line_size_c, memory_line_size_a,
lb_memory_size, lb_memory_size_c, lb_memory_size_a, num_partitions_a;
int line_size = scl_data->viewport.width < scl_data->recout.width ?
scl_data->viewport.width : scl_data->recout.width;
int line_size_c = scl_data->viewport_c.width < scl_data->recout.width ?
scl_data->viewport_c.width : scl_data->recout.width;
if (line_size == 0)
line_size = 1;
if (line_size_c == 0)
line_size_c = 1;
memory_line_size_y = (line_size + 5) / 6; /* +5 to ceil */
memory_line_size_c = (line_size_c + 5) / 6; /* +5 to ceil */
memory_line_size_a = (line_size + 5) / 6; /* +5 to ceil */
if (lb_config == LB_MEMORY_CONFIG_1) {
lb_memory_size = 970;
lb_memory_size_c = 970;
lb_memory_size_a = 970;
} else if (lb_config == LB_MEMORY_CONFIG_2) {
lb_memory_size = 1290;
lb_memory_size_c = 1290;
lb_memory_size_a = 1290;
} else if (lb_config == LB_MEMORY_CONFIG_3) {
if (scl_data->viewport.width == scl_data->h_active &&
scl_data->viewport.height == scl_data->v_active) {
/* 420 mode: luma using all 3 mem from Y, plus 3rd mem from Cr and Cb */
/* use increased LB size for calculation only if Scaler not enabled */
lb_memory_size = 970 + 1290 + 1170 + 1170 + 1170;
lb_memory_size_c = 970 + 1290;
lb_memory_size_a = 970 + 1290 + 1170;
} else {
/* 420 mode: luma using all 3 mem from Y, plus 3rd mem from Cr and Cb */
lb_memory_size = 970 + 1290 + 484 + 484 + 484;
lb_memory_size_c = 970 + 1290;
lb_memory_size_a = 970 + 1290 + 484;
}
} else {
if (scl_data->viewport.width == scl_data->h_active &&
scl_data->viewport.height == scl_data->v_active) {
/* use increased LB size for calculation only if Scaler not enabled */
lb_memory_size = 970 + 1290 + 1170;
lb_memory_size_c = 970 + 1290 + 1170;
lb_memory_size_a = 970 + 1290 + 1170;
} else {
lb_memory_size = 970 + 1290 + 484;
lb_memory_size_c = 970 + 1290 + 484;
lb_memory_size_a = 970 + 1290 + 484;
}
}
*num_part_y = lb_memory_size / memory_line_size_y;
*num_part_c = lb_memory_size_c / memory_line_size_c;
num_partitions_a = lb_memory_size_a / memory_line_size_a;
if (scl_data->lb_params.alpha_en
&& (num_partitions_a < *num_part_y))
*num_part_y = num_partitions_a;
if (*num_part_y > 64)
*num_part_y = 64;
if (*num_part_c > 64)
*num_part_c = 64;
}
/* Compute the maximum number of lines that we can fit in the line buffer */
void dscl401_spl_calc_lb_num_partitions(
bool alpha_en,
const struct spl_scaler_data *scl_data,
enum lb_memory_config lb_config,
int *num_part_y,
int *num_part_c)
{
int memory_line_size_y, memory_line_size_c, memory_line_size_a,
lb_memory_size, lb_memory_size_c, lb_memory_size_a, num_partitions_a;
int line_size = scl_data->viewport.width < scl_data->recout.width ?
scl_data->viewport.width : scl_data->recout.width;
int line_size_c = scl_data->viewport_c.width < scl_data->recout.width ?
scl_data->viewport_c.width : scl_data->recout.width;
if (line_size == 0)
line_size = 1;
if (line_size_c == 0)
line_size_c = 1;
memory_line_size_y = (line_size + 5) / 6; /* +5 to ceil */
memory_line_size_c = (line_size_c + 5) / 6; /* +5 to ceil */
memory_line_size_a = (line_size + 5) / 6; /* +5 to ceil */
if (lb_config == LB_MEMORY_CONFIG_1) {
lb_memory_size = 970;
lb_memory_size_c = 970;
lb_memory_size_a = 970;
} else if (lb_config == LB_MEMORY_CONFIG_2) {
lb_memory_size = 1290;
lb_memory_size_c = 1290;
lb_memory_size_a = 1290;
} else if (lb_config == LB_MEMORY_CONFIG_3) {
if (scl_data->viewport.width == scl_data->h_active &&
scl_data->viewport.height == scl_data->v_active) {
/* 420 mode: luma using all 3 mem from Y, plus 3rd mem from Cr and Cb */
/* use increased LB size for calculation only if Scaler not enabled */
lb_memory_size = 970 + 1290 + 1170 + 1170 + 1170;
lb_memory_size_c = 970 + 1290;
lb_memory_size_a = 970 + 1290 + 1170;
} else {
/* 420 mode: luma using all 3 mem from Y, plus 3rd mem from Cr and Cb */
lb_memory_size = 970 + 1290 + 484 + 484 + 484;
lb_memory_size_c = 970 + 1290;
lb_memory_size_a = 970 + 1290 + 484;
}
} else {
if (scl_data->viewport.width == scl_data->h_active &&
scl_data->viewport.height == scl_data->v_active) {
/* use increased LB size for calculation only if Scaler not enabled */
lb_memory_size = 970 + 1290 + 1170;
lb_memory_size_c = 970 + 1290 + 1170;
lb_memory_size_a = 970 + 1290 + 1170;
} else {
lb_memory_size = 970 + 1290 + 484;
lb_memory_size_c = 970 + 1290 + 484;
lb_memory_size_a = 970 + 1290 + 484;
}
}
*num_part_y = lb_memory_size / memory_line_size_y;
*num_part_c = lb_memory_size_c / memory_line_size_c;
num_partitions_a = lb_memory_size_a / memory_line_size_a;
if (alpha_en && (num_partitions_a < *num_part_y))
*num_part_y = num_partitions_a;
if (*num_part_y > 64)
*num_part_y = 64;
if (*num_part_c > 64)
*num_part_c = 64;
}

View file

@ -0,0 +1,725 @@
/* Copyright 2023 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __DCN401_DPP_H__
#define __DCN401_DPP_H__
#include "dcn20/dcn20_dpp.h"
#include "dcn30/dcn30_dpp.h"
#include "dcn32/dcn32_dpp.h"
#define TO_DCN401_DPP(dpp)\
container_of(dpp, struct dcn401_dpp, base)
#define DPP_REG_LIST_SH_MASK_DCN401_COMMON(mask_sh)\
TF_SF(CM0_CM_MEM_PWR_STATUS, GAMCOR_MEM_PWR_STATE, mask_sh),\
TF_SF(CM0_CM_DEALPHA, CM_DEALPHA_EN, mask_sh),\
TF_SF(CM0_CM_DEALPHA, CM_DEALPHA_ABLND, mask_sh),\
TF_SF(CM0_CM_BIAS_CR_R, CM_BIAS_CR_R, mask_sh),\
TF_SF(CM0_CM_BIAS_Y_G_CB_B, CM_BIAS_Y_G, mask_sh),\
TF_SF(CM0_CM_BIAS_Y_G_CB_B, CM_BIAS_CB_B, mask_sh),\
TF_SF(CM0_CM_MEM_PWR_CTRL, GAMCOR_MEM_PWR_DIS, mask_sh),\
TF_SF(CM0_CM_MEM_PWR_CTRL, GAMCOR_MEM_PWR_FORCE, mask_sh),\
TF_SF(CNVC_CFG0_PRE_DEGAM, PRE_DEGAM_MODE, mask_sh),\
TF_SF(CNVC_CFG0_PRE_DEGAM, PRE_DEGAM_SELECT, mask_sh),\
TF_SF(CM0_CM_GAMCOR_CONTROL, CM_GAMCOR_MODE, mask_sh),\
TF_SF(CM0_CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT, mask_sh),\
TF_SF(CM0_CM_GAMCOR_CONTROL, CM_GAMCOR_PWL_DISABLE, mask_sh),\
TF_SF(CM0_CM_GAMCOR_CONTROL, CM_GAMCOR_MODE_CURRENT, mask_sh),\
TF_SF(CM0_CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT_CURRENT, mask_sh),\
TF_SF(CM0_CM_GAMCOR_LUT_INDEX, CM_GAMCOR_LUT_INDEX, mask_sh),\
TF_SF(CM0_CM_GAMCOR_LUT_DATA, CM_GAMCOR_LUT_DATA, mask_sh),\
TF_SF(CM0_CM_GAMCOR_LUT_CONTROL, CM_GAMCOR_LUT_WRITE_COLOR_MASK, mask_sh),\
TF_SF(CM0_CM_GAMCOR_LUT_CONTROL, CM_GAMCOR_LUT_READ_COLOR_SEL, mask_sh),\
TF_SF(CM0_CM_GAMCOR_LUT_CONTROL, CM_GAMCOR_LUT_READ_DBG, mask_sh),\
TF_SF(CM0_CM_GAMCOR_LUT_CONTROL, CM_GAMCOR_LUT_HOST_SEL, mask_sh),\
TF_SF(CM0_CM_GAMCOR_LUT_CONTROL, CM_GAMCOR_LUT_CONFIG_MODE, mask_sh),\
TF_SF(CM0_CM_GAMCOR_RAMA_START_CNTL_B, CM_GAMCOR_RAMA_EXP_REGION_START_B, mask_sh),\
TF_SF(CM0_CM_GAMCOR_RAMA_START_CNTL_B, CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh),\
TF_SF(CM0_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B, CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B, mask_sh),\
TF_SF(CM0_CM_GAMCOR_RAMA_START_BASE_CNTL_B, CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B, mask_sh),\
TF_SF(CM0_CM_GAMCOR_RAMA_END_CNTL1_B, CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B, mask_sh),\
TF_SF(CM0_CM_GAMCOR_RAMA_END_CNTL2_B, CM_GAMCOR_RAMA_EXP_REGION_END_B, mask_sh),\
TF_SF(CM0_CM_GAMCOR_RAMA_END_CNTL2_B, CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B, mask_sh),\
TF_SF(CM0_CM_GAMCOR_RAMA_OFFSET_B, CM_GAMCOR_RAMA_OFFSET_B, mask_sh),\
TF_SF(CM0_CM_GAMCOR_RAMA_REGION_0_1, CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh),\
TF_SF(CM0_CM_GAMCOR_RAMA_REGION_0_1, CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh),\
TF_SF(CM0_CM_GAMCOR_RAMA_REGION_0_1, CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh),\
TF_SF(CM0_CM_GAMCOR_RAMA_REGION_0_1, CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh),\
TF_SF(DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_LEFT, mask_sh),\
TF_SF(DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_RIGHT, mask_sh),\
TF_SF(DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_BOTTOM, mask_sh),\
TF_SF(DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_TOP, mask_sh),\
TF_SF(DSCL0_OTG_H_BLANK, OTG_H_BLANK_START, mask_sh),\
TF_SF(DSCL0_OTG_H_BLANK, OTG_H_BLANK_END, mask_sh),\
TF_SF(DSCL0_OTG_V_BLANK, OTG_V_BLANK_START, mask_sh),\
TF_SF(DSCL0_OTG_V_BLANK, OTG_V_BLANK_END, mask_sh),\
TF_SF(DSCL0_LB_DATA_FORMAT, INTERLEAVE_EN, mask_sh),\
TF2_SF(DSCL0, LB_DATA_FORMAT__ALPHA_EN, mask_sh),\
TF_SF(DSCL0_LB_MEMORY_CTRL, MEMORY_CONFIG, mask_sh),\
TF_SF(DSCL0_LB_MEMORY_CTRL, LB_MAX_PARTITIONS, mask_sh),\
TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_MODE, mask_sh),\
TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_NUM_PIPE, mask_sh),\
TF_SF(DSCL0_DSCL_CONTROL, SCL_BOUNDARY_MODE, mask_sh),\
TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_PIPE_ID, mask_sh),\
TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_V_NUM_TAPS, mask_sh),\
TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_H_NUM_TAPS, mask_sh),\
TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_V_NUM_TAPS_C, mask_sh),\
TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_H_NUM_TAPS_C, mask_sh),\
TF_SF(DSCL0_SCL_COEF_RAM_TAP_SELECT, SCL_COEF_RAM_TAP_PAIR_IDX, mask_sh),\
TF_SF(DSCL0_SCL_COEF_RAM_TAP_SELECT, SCL_COEF_RAM_PHASE, mask_sh),\
TF_SF(DSCL0_SCL_COEF_RAM_TAP_SELECT, SCL_COEF_RAM_FILTER_TYPE, mask_sh),\
TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_EVEN_TAP_COEF, mask_sh),\
TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_EVEN_TAP_COEF_EN, mask_sh),\
TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_ODD_TAP_COEF, mask_sh),\
TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_ODD_TAP_COEF_EN, mask_sh),\
TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_H_2TAP_HARDCODE_COEF_EN, mask_sh),\
TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_H_2TAP_SHARP_EN, mask_sh),\
TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_H_2TAP_SHARP_FACTOR, mask_sh),\
TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_V_2TAP_HARDCODE_COEF_EN, mask_sh),\
TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_V_2TAP_SHARP_EN, mask_sh),\
TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_V_2TAP_SHARP_FACTOR, mask_sh),\
TF_SF(DSCL0_SCL_MODE, SCL_COEF_RAM_SELECT, mask_sh),\
TF_SF(DSCL0_SCL_MODE, DSCL_MODE, mask_sh),\
TF_SF(DSCL0_RECOUT_START, RECOUT_START_X, mask_sh),\
TF_SF(DSCL0_RECOUT_START, RECOUT_START_Y, mask_sh),\
TF_SF(DSCL0_RECOUT_SIZE, RECOUT_WIDTH, mask_sh),\
TF_SF(DSCL0_RECOUT_SIZE, RECOUT_HEIGHT, mask_sh),\
TF_SF(DSCL0_MPC_SIZE, MPC_WIDTH, mask_sh),\
TF_SF(DSCL0_MPC_SIZE, MPC_HEIGHT, mask_sh),\
TF_SF(DSCL0_SCL_HORZ_FILTER_SCALE_RATIO, SCL_H_SCALE_RATIO, mask_sh),\
TF_SF(DSCL0_SCL_VERT_FILTER_SCALE_RATIO, SCL_V_SCALE_RATIO, mask_sh),\
TF_SF(DSCL0_SCL_HORZ_FILTER_SCALE_RATIO_C, SCL_H_SCALE_RATIO_C, mask_sh),\
TF_SF(DSCL0_SCL_VERT_FILTER_SCALE_RATIO_C, SCL_V_SCALE_RATIO_C, mask_sh),\
TF_SF(DSCL0_SCL_HORZ_FILTER_INIT, SCL_H_INIT_FRAC, mask_sh),\
TF_SF(DSCL0_SCL_HORZ_FILTER_INIT, SCL_H_INIT_INT, mask_sh),\
TF_SF(DSCL0_SCL_HORZ_FILTER_INIT_C, SCL_H_INIT_FRAC_C, mask_sh),\
TF_SF(DSCL0_SCL_HORZ_FILTER_INIT_C, SCL_H_INIT_INT_C, mask_sh),\
TF_SF(DSCL0_SCL_VERT_FILTER_INIT, SCL_V_INIT_FRAC, mask_sh),\
TF_SF(DSCL0_SCL_VERT_FILTER_INIT, SCL_V_INIT_INT, mask_sh),\
TF_SF(DSCL0_SCL_VERT_FILTER_INIT_C, SCL_V_INIT_FRAC_C, mask_sh),\
TF_SF(DSCL0_SCL_VERT_FILTER_INIT_C, SCL_V_INIT_INT_C, mask_sh),\
TF_SF(DSCL0_SCL_MODE, SCL_CHROMA_COEF_MODE, mask_sh),\
TF_SF(DSCL0_SCL_MODE, SCL_COEF_RAM_SELECT_CURRENT, mask_sh), \
TF_SF(CNVC_CFG0_PRE_DEALPHA, PRE_DEALPHA_EN, mask_sh), \
TF_SF(CNVC_CFG0_PRE_DEALPHA, PRE_DEALPHA_ABLND_EN, mask_sh), \
TF_SF(CNVC_CFG0_PRE_REALPHA, PRE_REALPHA_EN, mask_sh), \
TF_SF(CNVC_CFG0_PRE_REALPHA, PRE_REALPHA_ABLND_EN, mask_sh), \
TF_SF(CNVC_CFG0_PRE_CSC_MODE, PRE_CSC_MODE, mask_sh), \
TF_SF(CNVC_CFG0_PRE_CSC_MODE, PRE_CSC_MODE_CURRENT, mask_sh), \
TF_SF(CNVC_CFG0_PRE_CSC_C11_C12, PRE_CSC_C11, mask_sh), \
TF_SF(CNVC_CFG0_PRE_CSC_C11_C12, PRE_CSC_C12, mask_sh), \
TF_SF(CNVC_CFG0_PRE_CSC_C33_C34, PRE_CSC_C33, mask_sh), \
TF_SF(CNVC_CFG0_PRE_CSC_C33_C34, PRE_CSC_C34, mask_sh), \
TF_SF(CM0_CM_POST_CSC_CONTROL, CM_POST_CSC_MODE, mask_sh), \
TF_SF(CM0_CM_POST_CSC_CONTROL, CM_POST_CSC_MODE_CURRENT, mask_sh), \
TF_SF(CM0_CM_POST_CSC_C11_C12, CM_POST_CSC_C11, mask_sh), \
TF_SF(CM0_CM_POST_CSC_C11_C12, CM_POST_CSC_C12, mask_sh), \
TF_SF(CM0_CM_POST_CSC_C33_C34, CM_POST_CSC_C33, mask_sh), \
TF_SF(CM0_CM_POST_CSC_C33_C34, CM_POST_CSC_C34, mask_sh), \
TF_SF(CM0_CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_INDEX, mask_sh), \
TF_SF(CNVC_CFG0_FORMAT_CONTROL, CNVC_BYPASS, mask_sh), \
TF2_SF(CNVC_CFG0, FORMAT_CONTROL__ALPHA_EN, mask_sh), \
TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_EXPANSION_MODE, mask_sh), \
TF_SF(CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT, CNVC_SURFACE_PIXEL_FORMAT, mask_sh), \
TF_SF(CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT, CNVC_ALPHA_PLANE_ENABLE, mask_sh), \
TF_SF(CM_CUR0_CURSOR0_CONTROL, CUR0_MODE, mask_sh), \
TF_SF(CM_CUR0_CURSOR0_CONTROL, CUR0_EXPANSION_MODE, mask_sh), \
TF_SF(CM_CUR0_CURSOR0_CONTROL, CUR0_ENABLE, mask_sh), \
TF_SF(CM_CUR0_CURSOR0_COLOR0, CUR0_COLOR0, mask_sh), \
TF_SF(CM_CUR0_CURSOR0_COLOR1, CUR0_COLOR1, mask_sh), \
TF_SF(CM_CUR0_CURSOR0_FP_SCALE_BIAS_G_Y, CUR0_FP_BIAS_G_Y, mask_sh), \
TF_SF(CM_CUR0_CURSOR0_FP_SCALE_BIAS_G_Y, CUR0_FP_SCALE_G_Y, mask_sh), \
TF_SF(CM_CUR0_CURSOR0_FP_SCALE_BIAS_RB_CRCB, CUR0_FP_BIAS_RB_CRCB, mask_sh), \
TF_SF(CM_CUR0_CURSOR0_FP_SCALE_BIAS_RB_CRCB, CUR0_FP_SCALE_RB_CRCB, mask_sh), \
TF_SF(CM_CUR0_CUR0_MATRIX_MODE, CUR0_MATRIX_MODE, mask_sh), \
TF_SF(CM_CUR0_CUR0_MATRIX_MODE, CUR0_MATRIX_MODE_CURRENT, mask_sh), \
TF_SF(CM_CUR0_CUR0_MATRIX_MODE, CUR0_MATRIX_COEF_FORMAT, mask_sh), \
TF_SF(CM_CUR0_CUR0_MATRIX_C11_C12_A, CUR0_MATRIX_C11_A, mask_sh), \
TF_SF(CM_CUR0_CUR0_MATRIX_C11_C12_A, CUR0_MATRIX_C12_A, mask_sh), \
TF_SF(CM_CUR0_CUR0_MATRIX_C13_C14_A, CUR0_MATRIX_C13_A, mask_sh), \
TF_SF(CM_CUR0_CUR0_MATRIX_C13_C14_A, CUR0_MATRIX_C14_A, mask_sh), \
TF_SF(CM_CUR0_CUR0_MATRIX_C21_C22_A, CUR0_MATRIX_C21_A, mask_sh), \
TF_SF(CM_CUR0_CUR0_MATRIX_C21_C22_A, CUR0_MATRIX_C22_A, mask_sh), \
TF_SF(CM_CUR0_CUR0_MATRIX_C23_C24_A, CUR0_MATRIX_C23_A, mask_sh), \
TF_SF(CM_CUR0_CUR0_MATRIX_C23_C24_A, CUR0_MATRIX_C24_A, mask_sh), \
TF_SF(CM_CUR0_CUR0_MATRIX_C31_C32_A, CUR0_MATRIX_C31_A, mask_sh), \
TF_SF(CM_CUR0_CUR0_MATRIX_C31_C32_A, CUR0_MATRIX_C32_A, mask_sh), \
TF_SF(CM_CUR0_CUR0_MATRIX_C33_C34_A, CUR0_MATRIX_C33_A, mask_sh), \
TF_SF(CM_CUR0_CUR0_MATRIX_C33_C34_A, CUR0_MATRIX_C34_A, mask_sh), \
TF_SF(DPP_TOP0_DPP_CONTROL, DPP_CLOCK_ENABLE, mask_sh), \
TF_SF(CM0_CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, mask_sh), \
TF_SF(CM0_CM_CONTROL, CM_BYPASS, mask_sh), \
TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \
TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_PITCH, mask_sh), \
TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_LINES_PER_CHUNK, mask_sh), \
TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_ENABLE, mask_sh), \
TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_CNV16, mask_sh), \
TF_SF(CNVC_CFG0_FORMAT_CONTROL, CNVC_BYPASS_MSB_ALIGN, mask_sh), \
TF_SF(CNVC_CFG0_FORMAT_CONTROL, CLAMP_POSITIVE, mask_sh), \
TF_SF(CNVC_CFG0_FORMAT_CONTROL, CLAMP_POSITIVE_C, mask_sh), \
TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_CROSSBAR_R, mask_sh), \
TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_CROSSBAR_G, mask_sh), \
TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_CROSSBAR_B, mask_sh), \
TF_SF(CNVC_CFG0_ALPHA_2BIT_LUT, ALPHA_2BIT_LUT0, mask_sh), \
TF_SF(CNVC_CFG0_ALPHA_2BIT_LUT, ALPHA_2BIT_LUT1, mask_sh), \
TF_SF(CNVC_CFG0_ALPHA_2BIT_LUT, ALPHA_2BIT_LUT2, mask_sh), \
TF_SF(CNVC_CFG0_ALPHA_2BIT_LUT, ALPHA_2BIT_LUT3, mask_sh), \
TF_SF(CNVC_CFG0_FCNV_FP_BIAS_R, FCNV_FP_BIAS_R, mask_sh), \
TF_SF(CNVC_CFG0_FCNV_FP_BIAS_G, FCNV_FP_BIAS_G, mask_sh), \
TF_SF(CNVC_CFG0_FCNV_FP_BIAS_B, FCNV_FP_BIAS_B, mask_sh), \
TF_SF(CNVC_CFG0_FCNV_FP_SCALE_R, FCNV_FP_SCALE_R, mask_sh), \
TF_SF(CNVC_CFG0_FCNV_FP_SCALE_G, FCNV_FP_SCALE_G, mask_sh), \
TF_SF(CNVC_CFG0_FCNV_FP_SCALE_B, FCNV_FP_SCALE_B, mask_sh), \
TF_SF(CNVC_CFG0_COLOR_KEYER_CONTROL, COLOR_KEYER_EN, mask_sh), \
TF_SF(CNVC_CFG0_COLOR_KEYER_CONTROL, LUMA_KEYER_EN, mask_sh), \
TF_SF(CNVC_CFG0_COLOR_KEYER_CONTROL, COLOR_KEYER_MODE, mask_sh), \
TF_SF(CNVC_CFG0_COLOR_KEYER_ALPHA, COLOR_KEYER_ALPHA_LOW, mask_sh), \
TF_SF(CNVC_CFG0_COLOR_KEYER_ALPHA, COLOR_KEYER_ALPHA_HIGH, mask_sh), \
TF_SF(CNVC_CFG0_COLOR_KEYER_RED, COLOR_KEYER_RED_LOW, mask_sh), \
TF_SF(CNVC_CFG0_COLOR_KEYER_RED, COLOR_KEYER_RED_HIGH, mask_sh), \
TF_SF(CNVC_CFG0_COLOR_KEYER_GREEN, COLOR_KEYER_GREEN_LOW, mask_sh), \
TF_SF(CNVC_CFG0_COLOR_KEYER_GREEN, COLOR_KEYER_GREEN_HIGH, mask_sh), \
TF_SF(CNVC_CFG0_COLOR_KEYER_BLUE, COLOR_KEYER_BLUE_LOW, mask_sh), \
TF_SF(CNVC_CFG0_COLOR_KEYER_BLUE, COLOR_KEYER_BLUE_HIGH, mask_sh), \
TF_SF(CM_CUR0_CURSOR0_CONTROL, CUR0_PIX_INV_MODE, mask_sh), \
TF_SF(CM_CUR0_CURSOR0_CONTROL, CUR0_PIXEL_ALPHA_MOD_EN, mask_sh), \
TF_SF(CM_CUR0_CURSOR0_CONTROL, CUR0_ROM_EN, mask_sh),\
TF_SF(DSCL0_OBUF_MEM_PWR_CTRL, OBUF_MEM_PWR_FORCE, mask_sh),\
TF_SF(DSCL0_DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, mask_sh),\
TF_SF(DSCL0_DSCL_MEM_PWR_STATUS, LUT_MEM_PWR_STATE, mask_sh),\
TF_SF(DSCL0_DSCL_SC_MODE, SCL_SC_MATRIX_MODE, mask_sh),\
TF_SF(DSCL0_DSCL_SC_MODE, SCL_SC_LTONL_EN, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_MODE, SCL_EASF_H_EN, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_MODE, SCL_EASF_H_RINGEST_FORCE_EN, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_MODE, SCL_EASF_H_2TAP_SHARP_FACTOR, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF_CNTL, SCL_EASF_H_BF1_EN, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF_CNTL, SCL_EASF_H_BF2_MODE, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF_CNTL, SCL_EASF_H_BF3_MODE, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF_CNTL, SCL_EASF_H_BF2_FLAT1_GAIN, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF_CNTL, SCL_EASF_H_BF2_FLAT2_GAIN, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF_CNTL, SCL_EASF_H_BF2_ROC_GAIN, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_RINGEST_EVENTAP_REDUCE, SCL_EASF_H_RINGEST_EVENTAP_REDUCEG1, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_RINGEST_EVENTAP_REDUCE, SCL_EASF_H_RINGEST_EVENTAP_REDUCEG2, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_RINGEST_EVENTAP_GAIN, SCL_EASF_H_RINGEST_EVENTAP_GAIN1, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_RINGEST_EVENTAP_GAIN, SCL_EASF_H_RINGEST_EVENTAP_GAIN2, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF_FINAL_MAX_MIN, SCL_EASF_H_BF_MAXA, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF_FINAL_MAX_MIN, SCL_EASF_H_BF_MAXB, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF_FINAL_MAX_MIN, SCL_EASF_H_BF_MINA, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF_FINAL_MAX_MIN, SCL_EASF_H_BF_MINB, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF1_PWL_SEG0, SCL_EASF_H_BF1_PWL_IN_SEG0, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF1_PWL_SEG0, SCL_EASF_H_BF1_PWL_BASE_SEG0, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF1_PWL_SEG0, SCL_EASF_H_BF1_PWL_SLOPE_SEG0, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF1_PWL_SEG1, SCL_EASF_H_BF1_PWL_IN_SEG1, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF1_PWL_SEG1, SCL_EASF_H_BF1_PWL_BASE_SEG1, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF1_PWL_SEG1, SCL_EASF_H_BF1_PWL_SLOPE_SEG1, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF1_PWL_SEG2, SCL_EASF_H_BF1_PWL_IN_SEG2, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF1_PWL_SEG2, SCL_EASF_H_BF1_PWL_BASE_SEG2, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF1_PWL_SEG2, SCL_EASF_H_BF1_PWL_SLOPE_SEG2, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF1_PWL_SEG3, SCL_EASF_H_BF1_PWL_IN_SEG3, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF1_PWL_SEG3, SCL_EASF_H_BF1_PWL_BASE_SEG3, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF1_PWL_SEG3, SCL_EASF_H_BF1_PWL_SLOPE_SEG3, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF1_PWL_SEG4, SCL_EASF_H_BF1_PWL_IN_SEG4, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF1_PWL_SEG4, SCL_EASF_H_BF1_PWL_BASE_SEG4, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF1_PWL_SEG4, SCL_EASF_H_BF1_PWL_SLOPE_SEG4, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF1_PWL_SEG5, SCL_EASF_H_BF1_PWL_IN_SEG5, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF1_PWL_SEG5, SCL_EASF_H_BF1_PWL_BASE_SEG5, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF1_PWL_SEG5, SCL_EASF_H_BF1_PWL_SLOPE_SEG5, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF1_PWL_SEG6, SCL_EASF_H_BF1_PWL_IN_SEG6, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF1_PWL_SEG6, SCL_EASF_H_BF1_PWL_BASE_SEG6, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF1_PWL_SEG6, SCL_EASF_H_BF1_PWL_SLOPE_SEG6, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF1_PWL_SEG7, SCL_EASF_H_BF1_PWL_IN_SEG7, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF1_PWL_SEG7, SCL_EASF_H_BF1_PWL_BASE_SEG7, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF3_PWL_SEG0, SCL_EASF_H_BF3_PWL_IN_SEG0, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF3_PWL_SEG0, SCL_EASF_H_BF3_PWL_BASE_SEG0, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF3_PWL_SEG0, SCL_EASF_H_BF3_PWL_SLOPE_SEG0, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF3_PWL_SEG1, SCL_EASF_H_BF3_PWL_IN_SEG1, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF3_PWL_SEG1, SCL_EASF_H_BF3_PWL_BASE_SEG1, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF3_PWL_SEG1, SCL_EASF_H_BF3_PWL_SLOPE_SEG1, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF3_PWL_SEG2, SCL_EASF_H_BF3_PWL_IN_SEG2, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF3_PWL_SEG2, SCL_EASF_H_BF3_PWL_BASE_SEG2, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF3_PWL_SEG2, SCL_EASF_H_BF3_PWL_SLOPE_SEG2, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF3_PWL_SEG3, SCL_EASF_H_BF3_PWL_IN_SEG3, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF3_PWL_SEG3, SCL_EASF_H_BF3_PWL_BASE_SEG3, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF3_PWL_SEG3, SCL_EASF_H_BF3_PWL_SLOPE_SEG3, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF3_PWL_SEG4, SCL_EASF_H_BF3_PWL_IN_SEG4, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF3_PWL_SEG4, SCL_EASF_H_BF3_PWL_BASE_SEG4, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF3_PWL_SEG4, SCL_EASF_H_BF3_PWL_SLOPE_SEG4, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF3_PWL_SEG5, SCL_EASF_H_BF3_PWL_IN_SEG5, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_H_BF3_PWL_SEG5, SCL_EASF_H_BF3_PWL_BASE_SEG5, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_MODE, SCL_EASF_V_EN, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_MODE, SCL_EASF_V_RINGEST_FORCE_EN, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_MODE, SCL_EASF_V_2TAP_SHARP_FACTOR, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF_CNTL, SCL_EASF_V_BF1_EN, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF_CNTL, SCL_EASF_V_BF2_MODE, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF_CNTL, SCL_EASF_V_BF3_MODE, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF_CNTL, SCL_EASF_V_BF2_FLAT1_GAIN, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF_CNTL, SCL_EASF_V_BF2_FLAT2_GAIN, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF_CNTL, SCL_EASF_V_BF2_ROC_GAIN, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_RINGEST_3TAP_CNTL1, SCL_EASF_V_RINGEST_3TAP_DNTILT_UPTILT, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_RINGEST_3TAP_CNTL1, SCL_EASF_V_RINGEST_3TAP_UPTILT_MAXVAL, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_RINGEST_3TAP_CNTL2, SCL_EASF_V_RINGEST_3TAP_DNTILT_SLOPE, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_RINGEST_3TAP_CNTL2, SCL_EASF_V_RINGEST_3TAP_UPTILT1_SLOPE, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_RINGEST_3TAP_CNTL3, SCL_EASF_V_RINGEST_3TAP_UPTILT2_SLOPE, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_RINGEST_3TAP_CNTL3, SCL_EASF_V_RINGEST_3TAP_UPTILT2_OFFSET, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_RINGEST_EVENTAP_REDUCE, SCL_EASF_V_RINGEST_EVENTAP_REDUCEG1, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_RINGEST_EVENTAP_REDUCE, SCL_EASF_V_RINGEST_EVENTAP_REDUCEG2, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_RINGEST_EVENTAP_GAIN, SCL_EASF_V_RINGEST_EVENTAP_GAIN1, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_RINGEST_EVENTAP_GAIN, SCL_EASF_V_RINGEST_EVENTAP_GAIN2, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF_FINAL_MAX_MIN, SCL_EASF_V_BF_MAXA, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF_FINAL_MAX_MIN, SCL_EASF_V_BF_MAXB, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF_FINAL_MAX_MIN, SCL_EASF_V_BF_MINA, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF_FINAL_MAX_MIN, SCL_EASF_V_BF_MINB, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF1_PWL_SEG0, SCL_EASF_V_BF1_PWL_IN_SEG0, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF1_PWL_SEG0, SCL_EASF_V_BF1_PWL_BASE_SEG0, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF1_PWL_SEG0, SCL_EASF_V_BF1_PWL_SLOPE_SEG0, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF1_PWL_SEG1, SCL_EASF_V_BF1_PWL_IN_SEG1, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF1_PWL_SEG1, SCL_EASF_V_BF1_PWL_BASE_SEG1, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF1_PWL_SEG1, SCL_EASF_V_BF1_PWL_SLOPE_SEG1, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF1_PWL_SEG2, SCL_EASF_V_BF1_PWL_IN_SEG2, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF1_PWL_SEG2, SCL_EASF_V_BF1_PWL_BASE_SEG2, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF1_PWL_SEG2, SCL_EASF_V_BF1_PWL_SLOPE_SEG2, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF1_PWL_SEG3, SCL_EASF_V_BF1_PWL_IN_SEG3, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF1_PWL_SEG3, SCL_EASF_V_BF1_PWL_BASE_SEG3, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF1_PWL_SEG3, SCL_EASF_V_BF1_PWL_SLOPE_SEG3, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF1_PWL_SEG4, SCL_EASF_V_BF1_PWL_IN_SEG4, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF1_PWL_SEG4, SCL_EASF_V_BF1_PWL_BASE_SEG4, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF1_PWL_SEG4, SCL_EASF_V_BF1_PWL_SLOPE_SEG4, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF1_PWL_SEG5, SCL_EASF_V_BF1_PWL_IN_SEG5, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF1_PWL_SEG5, SCL_EASF_V_BF1_PWL_BASE_SEG5, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF1_PWL_SEG5, SCL_EASF_V_BF1_PWL_SLOPE_SEG5, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF1_PWL_SEG6, SCL_EASF_V_BF1_PWL_IN_SEG6, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF1_PWL_SEG6, SCL_EASF_V_BF1_PWL_BASE_SEG6, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF1_PWL_SEG6, SCL_EASF_V_BF1_PWL_SLOPE_SEG6, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF1_PWL_SEG7, SCL_EASF_V_BF1_PWL_IN_SEG7, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF1_PWL_SEG7, SCL_EASF_V_BF1_PWL_BASE_SEG7, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF3_PWL_SEG0, SCL_EASF_V_BF3_PWL_IN_SEG0, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF3_PWL_SEG0, SCL_EASF_V_BF3_PWL_BASE_SEG0, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF3_PWL_SEG0, SCL_EASF_V_BF3_PWL_SLOPE_SEG0, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF3_PWL_SEG1, SCL_EASF_V_BF3_PWL_IN_SEG1, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF3_PWL_SEG1, SCL_EASF_V_BF3_PWL_BASE_SEG1, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF3_PWL_SEG1, SCL_EASF_V_BF3_PWL_SLOPE_SEG1, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF3_PWL_SEG2, SCL_EASF_V_BF3_PWL_IN_SEG2, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF3_PWL_SEG2, SCL_EASF_V_BF3_PWL_BASE_SEG2, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF3_PWL_SEG2, SCL_EASF_V_BF3_PWL_SLOPE_SEG2, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF3_PWL_SEG3, SCL_EASF_V_BF3_PWL_IN_SEG3, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF3_PWL_SEG3, SCL_EASF_V_BF3_PWL_BASE_SEG3, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF3_PWL_SEG3, SCL_EASF_V_BF3_PWL_SLOPE_SEG3, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF3_PWL_SEG4, SCL_EASF_V_BF3_PWL_IN_SEG4, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF3_PWL_SEG4, SCL_EASF_V_BF3_PWL_BASE_SEG4, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF3_PWL_SEG4, SCL_EASF_V_BF3_PWL_SLOPE_SEG4, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF3_PWL_SEG5, SCL_EASF_V_BF3_PWL_IN_SEG5, mask_sh),\
TF_SF(DSCL0_DSCL_EASF_V_BF3_PWL_SEG5, SCL_EASF_V_BF3_PWL_BASE_SEG5, mask_sh),\
TF_SF(DSCL0_DSCL_SC_MATRIX_C0C1, SCL_SC_MATRIX_C0, mask_sh),\
TF_SF(DSCL0_DSCL_SC_MATRIX_C0C1, SCL_SC_MATRIX_C1, mask_sh),\
TF_SF(DSCL0_DSCL_SC_MATRIX_C2C3, SCL_SC_MATRIX_C2, mask_sh),\
TF_SF(DSCL0_DSCL_SC_MATRIX_C2C3, SCL_SC_MATRIX_C3, mask_sh),\
TF_SF(DSCL0_ISHARP_DELTA_CTRL, ISHARP_DELTA_LUT_HOST_SELECT, mask_sh),\
TF_SF(DSCL0_ISHARP_DELTA_DATA, ISHARP_DELTA_DATA, mask_sh),\
TF_SF(DSCL0_ISHARP_DELTA_INDEX, ISHARP_DELTA_INDEX, mask_sh),\
TF_SF(DSCL0_ISHARP_MODE, ISHARP_EN, mask_sh),\
TF_SF(DSCL0_ISHARP_MODE, ISHARP_NOISEDET_EN, mask_sh),\
TF_SF(DSCL0_ISHARP_MODE, ISHARP_NOISEDET_MODE, mask_sh),\
TF_SF(DSCL0_ISHARP_MODE, ISHARP_LBA_MODE, mask_sh),\
TF_SF(DSCL0_ISHARP_MODE, ISHARP_DELTA_LUT_SELECT, mask_sh),\
TF_SF(DSCL0_ISHARP_MODE, ISHARP_FMT_MODE, mask_sh),\
TF_SF(DSCL0_ISHARP_MODE, ISHARP_FMT_NORM, mask_sh),\
TF_SF(DSCL0_ISHARP_MODE, ISHARP_DELTA_LUT_SELECT_CURRENT, mask_sh),\
TF_SF(DSCL0_ISHARP_LBA_PWL_SEG0, ISHARP_LBA_PWL_IN_SEG0, mask_sh),\
TF_SF(DSCL0_ISHARP_LBA_PWL_SEG0, ISHARP_LBA_PWL_BASE_SEG0, mask_sh),\
TF_SF(DSCL0_ISHARP_LBA_PWL_SEG0, ISHARP_LBA_PWL_SLOPE_SEG0, mask_sh), \
TF_SF(DSCL0_ISHARP_LBA_PWL_SEG1, ISHARP_LBA_PWL_IN_SEG1, mask_sh),\
TF_SF(DSCL0_ISHARP_LBA_PWL_SEG1, ISHARP_LBA_PWL_BASE_SEG1, mask_sh),\
TF_SF(DSCL0_ISHARP_LBA_PWL_SEG1, ISHARP_LBA_PWL_SLOPE_SEG1, mask_sh),\
TF_SF(DSCL0_ISHARP_LBA_PWL_SEG2, ISHARP_LBA_PWL_IN_SEG2, mask_sh),\
TF_SF(DSCL0_ISHARP_LBA_PWL_SEG2, ISHARP_LBA_PWL_BASE_SEG2, mask_sh),\
TF_SF(DSCL0_ISHARP_LBA_PWL_SEG2, ISHARP_LBA_PWL_SLOPE_SEG2, mask_sh),\
TF_SF(DSCL0_ISHARP_LBA_PWL_SEG3, ISHARP_LBA_PWL_IN_SEG3, mask_sh),\
TF_SF(DSCL0_ISHARP_LBA_PWL_SEG3, ISHARP_LBA_PWL_BASE_SEG3, mask_sh),\
TF_SF(DSCL0_ISHARP_LBA_PWL_SEG3, ISHARP_LBA_PWL_SLOPE_SEG3, mask_sh),\
TF_SF(DSCL0_ISHARP_LBA_PWL_SEG4, ISHARP_LBA_PWL_IN_SEG4, mask_sh),\
TF_SF(DSCL0_ISHARP_LBA_PWL_SEG4, ISHARP_LBA_PWL_BASE_SEG4, mask_sh),\
TF_SF(DSCL0_ISHARP_LBA_PWL_SEG4, ISHARP_LBA_PWL_SLOPE_SEG4, mask_sh),\
TF_SF(DSCL0_ISHARP_LBA_PWL_SEG5, ISHARP_LBA_PWL_IN_SEG5, mask_sh),\
TF_SF(DSCL0_ISHARP_LBA_PWL_SEG5, ISHARP_LBA_PWL_BASE_SEG5, mask_sh),\
TF_SF(DSCL0_ISHARP_NOISEDET_THRESHOLD, ISHARP_NOISEDET_UTHRE, mask_sh),\
TF_SF(DSCL0_ISHARP_NOISEDET_THRESHOLD, ISHARP_NOISEDET_DTHRE, mask_sh), \
TF_SF(DSCL0_ISHARP_NOISE_GAIN_PWL, ISHARP_NOISEDET_PWL_START_IN, mask_sh), \
TF_SF(DSCL0_ISHARP_NOISE_GAIN_PWL, ISHARP_NOISEDET_PWL_END_IN, mask_sh), \
TF_SF(DSCL0_ISHARP_NOISE_GAIN_PWL, ISHARP_NOISEDET_PWL_SLOPE, mask_sh), \
TF_SF(DSCL0_ISHARP_NLDELTA_SOFT_CLIP, ISHARP_NLDELTA_SCLIP_EN_P, mask_sh), \
TF_SF(DSCL0_ISHARP_NLDELTA_SOFT_CLIP, ISHARP_NLDELTA_SCLIP_PIVOT_P, mask_sh), \
TF_SF(DSCL0_ISHARP_NLDELTA_SOFT_CLIP, ISHARP_NLDELTA_SCLIP_SLOPE_P, mask_sh), \
TF_SF(DSCL0_ISHARP_NLDELTA_SOFT_CLIP, ISHARP_NLDELTA_SCLIP_EN_N, mask_sh), \
TF_SF(DSCL0_ISHARP_NLDELTA_SOFT_CLIP, ISHARP_NLDELTA_SCLIP_PIVOT_N, mask_sh), \
TF_SF(DSCL0_ISHARP_NLDELTA_SOFT_CLIP, ISHARP_NLDELTA_SCLIP_SLOPE_N, mask_sh)
#define DPP_REG_FIELD_LIST_DCN401(type) \
DPP_REG_FIELD_LIST_DCN3(type); \
type CUR0_FP_BIAS_G_Y; \
type CUR0_FP_SCALE_G_Y; \
type CUR0_FP_BIAS_RB_CRCB; \
type CUR0_FP_SCALE_RB_CRCB; \
type CUR0_MATRIX_MODE; \
type CUR0_MATRIX_MODE_CURRENT; \
type CUR0_MATRIX_COEF_FORMAT; \
type CUR0_MATRIX_C11_A; \
type CUR0_MATRIX_C12_A; \
type CUR0_MATRIX_C13_A; \
type CUR0_MATRIX_C14_A; \
type CUR0_MATRIX_C21_A; \
type CUR0_MATRIX_C22_A; \
type CUR0_MATRIX_C23_A; \
type CUR0_MATRIX_C24_A; \
type CUR0_MATRIX_C31_A; \
type CUR0_MATRIX_C32_A; \
type CUR0_MATRIX_C33_A; \
type CUR0_MATRIX_C34_A; \
type LUMA_KEYER_EN; \
type SCL_SC_MATRIX_MODE; \
type SCL_SC_LTONL_EN; \
type SCL_EASF_H_EN; \
type SCL_EASF_H_RINGEST_FORCE_EN; \
type SCL_EASF_H_2TAP_SHARP_FACTOR; \
type SCL_EASF_H_BF1_EN; \
type SCL_EASF_H_BF2_MODE; \
type SCL_EASF_H_BF3_MODE; \
type SCL_EASF_H_BF2_FLAT1_GAIN; \
type SCL_EASF_H_BF2_FLAT2_GAIN; \
type SCL_EASF_H_BF2_ROC_GAIN; \
type SCL_EASF_H_RINGEST_EVENTAP_REDUCEG1; \
type SCL_EASF_H_RINGEST_EVENTAP_REDUCEG2; \
type SCL_EASF_H_RINGEST_EVENTAP_GAIN1; \
type SCL_EASF_H_RINGEST_EVENTAP_GAIN2; \
type SCL_EASF_H_BF_MAXA; \
type SCL_EASF_H_BF_MAXB; \
type SCL_EASF_H_BF_MINA; \
type SCL_EASF_H_BF_MINB; \
type SCL_EASF_H_BF1_PWL_IN_SEG0; \
type SCL_EASF_H_BF1_PWL_BASE_SEG0; \
type SCL_EASF_H_BF1_PWL_SLOPE_SEG0; \
type SCL_EASF_H_BF1_PWL_IN_SEG1; \
type SCL_EASF_H_BF1_PWL_BASE_SEG1; \
type SCL_EASF_H_BF1_PWL_SLOPE_SEG1; \
type SCL_EASF_H_BF1_PWL_IN_SEG2; \
type SCL_EASF_H_BF1_PWL_BASE_SEG2; \
type SCL_EASF_H_BF1_PWL_SLOPE_SEG2; \
type SCL_EASF_H_BF1_PWL_IN_SEG3; \
type SCL_EASF_H_BF1_PWL_BASE_SEG3; \
type SCL_EASF_H_BF1_PWL_SLOPE_SEG3; \
type SCL_EASF_H_BF1_PWL_IN_SEG4; \
type SCL_EASF_H_BF1_PWL_BASE_SEG4; \
type SCL_EASF_H_BF1_PWL_SLOPE_SEG4; \
type SCL_EASF_H_BF1_PWL_IN_SEG5; \
type SCL_EASF_H_BF1_PWL_BASE_SEG5; \
type SCL_EASF_H_BF1_PWL_SLOPE_SEG5; \
type SCL_EASF_H_BF1_PWL_IN_SEG6; \
type SCL_EASF_H_BF1_PWL_BASE_SEG6; \
type SCL_EASF_H_BF1_PWL_SLOPE_SEG6; \
type SCL_EASF_H_BF1_PWL_IN_SEG7; \
type SCL_EASF_H_BF1_PWL_BASE_SEG7; \
type SCL_EASF_H_BF3_PWL_IN_SEG0; \
type SCL_EASF_H_BF3_PWL_BASE_SEG0; \
type SCL_EASF_H_BF3_PWL_SLOPE_SEG0; \
type SCL_EASF_H_BF3_PWL_IN_SEG1; \
type SCL_EASF_H_BF3_PWL_BASE_SEG1; \
type SCL_EASF_H_BF3_PWL_SLOPE_SEG1; \
type SCL_EASF_H_BF3_PWL_IN_SEG2; \
type SCL_EASF_H_BF3_PWL_BASE_SEG2; \
type SCL_EASF_H_BF3_PWL_SLOPE_SEG2; \
type SCL_EASF_H_BF3_PWL_IN_SEG3; \
type SCL_EASF_H_BF3_PWL_BASE_SEG3; \
type SCL_EASF_H_BF3_PWL_SLOPE_SEG3; \
type SCL_EASF_H_BF3_PWL_IN_SEG4; \
type SCL_EASF_H_BF3_PWL_BASE_SEG4; \
type SCL_EASF_H_BF3_PWL_SLOPE_SEG4; \
type SCL_EASF_H_BF3_PWL_IN_SEG5; \
type SCL_EASF_H_BF3_PWL_BASE_SEG5; \
type SCL_EASF_V_EN; \
type SCL_EASF_V_RINGEST_FORCE_EN; \
type SCL_EASF_V_2TAP_SHARP_FACTOR; \
type SCL_EASF_V_BF1_EN; \
type SCL_EASF_V_BF2_MODE; \
type SCL_EASF_V_BF3_MODE; \
type SCL_EASF_V_BF2_FLAT1_GAIN; \
type SCL_EASF_V_BF2_FLAT2_GAIN; \
type SCL_EASF_V_BF2_ROC_GAIN; \
type SCL_EASF_V_RINGEST_3TAP_DNTILT_UPTILT; \
type SCL_EASF_V_RINGEST_3TAP_UPTILT_MAXVAL; \
type SCL_EASF_V_RINGEST_3TAP_DNTILT_SLOPE; \
type SCL_EASF_V_RINGEST_3TAP_UPTILT1_SLOPE; \
type SCL_EASF_V_RINGEST_3TAP_UPTILT2_SLOPE; \
type SCL_EASF_V_RINGEST_3TAP_UPTILT2_OFFSET; \
type SCL_EASF_V_RINGEST_EVENTAP_REDUCEG1; \
type SCL_EASF_V_RINGEST_EVENTAP_REDUCEG2; \
type SCL_EASF_V_RINGEST_EVENTAP_GAIN1; \
type SCL_EASF_V_RINGEST_EVENTAP_GAIN2; \
type SCL_EASF_V_BF_MAXA; \
type SCL_EASF_V_BF_MAXB; \
type SCL_EASF_V_BF_MINA; \
type SCL_EASF_V_BF_MINB; \
type SCL_EASF_V_BF1_PWL_IN_SEG0; \
type SCL_EASF_V_BF1_PWL_BASE_SEG0; \
type SCL_EASF_V_BF1_PWL_SLOPE_SEG0; \
type SCL_EASF_V_BF1_PWL_IN_SEG1; \
type SCL_EASF_V_BF1_PWL_BASE_SEG1; \
type SCL_EASF_V_BF1_PWL_SLOPE_SEG1; \
type SCL_EASF_V_BF1_PWL_IN_SEG2; \
type SCL_EASF_V_BF1_PWL_BASE_SEG2; \
type SCL_EASF_V_BF1_PWL_SLOPE_SEG2; \
type SCL_EASF_V_BF1_PWL_IN_SEG3; \
type SCL_EASF_V_BF1_PWL_BASE_SEG3; \
type SCL_EASF_V_BF1_PWL_SLOPE_SEG3; \
type SCL_EASF_V_BF1_PWL_IN_SEG4; \
type SCL_EASF_V_BF1_PWL_BASE_SEG4; \
type SCL_EASF_V_BF1_PWL_SLOPE_SEG4; \
type SCL_EASF_V_BF1_PWL_IN_SEG5; \
type SCL_EASF_V_BF1_PWL_BASE_SEG5; \
type SCL_EASF_V_BF1_PWL_SLOPE_SEG5; \
type SCL_EASF_V_BF1_PWL_IN_SEG6; \
type SCL_EASF_V_BF1_PWL_BASE_SEG6; \
type SCL_EASF_V_BF1_PWL_SLOPE_SEG6; \
type SCL_EASF_V_BF1_PWL_IN_SEG7; \
type SCL_EASF_V_BF1_PWL_BASE_SEG7; \
type SCL_EASF_V_BF3_PWL_IN_SEG0; \
type SCL_EASF_V_BF3_PWL_BASE_SEG0; \
type SCL_EASF_V_BF3_PWL_SLOPE_SEG0; \
type SCL_EASF_V_BF3_PWL_IN_SEG1; \
type SCL_EASF_V_BF3_PWL_BASE_SEG1; \
type SCL_EASF_V_BF3_PWL_SLOPE_SEG1; \
type SCL_EASF_V_BF3_PWL_IN_SEG2; \
type SCL_EASF_V_BF3_PWL_BASE_SEG2; \
type SCL_EASF_V_BF3_PWL_SLOPE_SEG2; \
type SCL_EASF_V_BF3_PWL_IN_SEG3; \
type SCL_EASF_V_BF3_PWL_BASE_SEG3; \
type SCL_EASF_V_BF3_PWL_SLOPE_SEG3; \
type SCL_EASF_V_BF3_PWL_IN_SEG4; \
type SCL_EASF_V_BF3_PWL_BASE_SEG4; \
type SCL_EASF_V_BF3_PWL_SLOPE_SEG4; \
type SCL_EASF_V_BF3_PWL_IN_SEG5; \
type SCL_EASF_V_BF3_PWL_BASE_SEG5; \
type SCL_SC_MATRIX_C0; \
type SCL_SC_MATRIX_C1; \
type SCL_SC_MATRIX_C2; \
type SCL_SC_MATRIX_C3; \
type ISHARP_EN; \
type ISHARP_NOISEDET_EN; \
type ISHARP_NOISEDET_MODE; \
type ISHARP_NOISEDET_UTHRE; \
type ISHARP_NOISEDET_DTHRE; \
type ISHARP_NOISEDET_PWL_START_IN; \
type ISHARP_NOISEDET_PWL_END_IN; \
type ISHARP_NOISEDET_PWL_SLOPE; \
type ISHARP_LBA_MODE; \
type ISHARP_LBA_PWL_IN_SEG0; \
type ISHARP_LBA_PWL_BASE_SEG0; \
type ISHARP_LBA_PWL_SLOPE_SEG0; \
type ISHARP_LBA_PWL_IN_SEG1; \
type ISHARP_LBA_PWL_BASE_SEG1; \
type ISHARP_LBA_PWL_SLOPE_SEG1; \
type ISHARP_LBA_PWL_IN_SEG2; \
type ISHARP_LBA_PWL_BASE_SEG2; \
type ISHARP_LBA_PWL_SLOPE_SEG2; \
type ISHARP_LBA_PWL_IN_SEG3; \
type ISHARP_LBA_PWL_BASE_SEG3; \
type ISHARP_LBA_PWL_SLOPE_SEG3; \
type ISHARP_LBA_PWL_IN_SEG4; \
type ISHARP_LBA_PWL_BASE_SEG4; \
type ISHARP_LBA_PWL_SLOPE_SEG4; \
type ISHARP_LBA_PWL_IN_SEG5; \
type ISHARP_LBA_PWL_BASE_SEG5; \
type ISHARP_FMT_MODE; \
type ISHARP_FMT_NORM; \
type ISHARP_DELTA_LUT_SELECT; \
type ISHARP_DELTA_LUT_SELECT_CURRENT; \
type ISHARP_DELTA_LUT_HOST_SELECT; \
type ISHARP_DELTA_DATA; \
type ISHARP_DELTA_INDEX; \
type ISHARP_NLDELTA_SCLIP_EN_P; \
type ISHARP_NLDELTA_SCLIP_PIVOT_P; \
type ISHARP_NLDELTA_SCLIP_SLOPE_P; \
type ISHARP_NLDELTA_SCLIP_EN_N; \
type ISHARP_NLDELTA_SCLIP_PIVOT_N; \
type ISHARP_NLDELTA_SCLIP_SLOPE_N
struct dcn401_dpp_registers {
DPP_DCN3_REG_VARIABLE_LIST_COMMON;
uint32_t CURSOR0_FP_SCALE_BIAS_G_Y;
uint32_t CURSOR0_FP_SCALE_BIAS_RB_CRCB;
uint32_t CUR0_MATRIX_MODE;
uint32_t CUR0_MATRIX_C11_C12_A;
uint32_t CUR0_MATRIX_C13_C14_A;
uint32_t CUR0_MATRIX_C21_C22_A;
uint32_t CUR0_MATRIX_C23_C24_A;
uint32_t CUR0_MATRIX_C31_C32_A;
uint32_t CUR0_MATRIX_C33_C34_A;
uint32_t CUR0_MATRIX_C11_C12_B;
uint32_t CUR0_MATRIX_C13_C14_B;
uint32_t CUR0_MATRIX_C21_C22_B;
uint32_t CUR0_MATRIX_C23_C24_B;
uint32_t CUR0_MATRIX_C31_C32_B;
uint32_t CUR0_MATRIX_C33_C34_B;
uint32_t DSCL_SC_MODE;
uint32_t DSCL_EASF_H_MODE;
uint32_t DSCL_EASF_H_BF_CNTL;
uint32_t DSCL_EASF_H_RINGEST_EVENTAP_REDUCE;
uint32_t DSCL_EASF_H_RINGEST_EVENTAP_GAIN;
uint32_t DSCL_EASF_H_BF_FINAL_MAX_MIN;
uint32_t DSCL_EASF_H_BF1_PWL_SEG0;
uint32_t DSCL_EASF_H_BF1_PWL_SEG1;
uint32_t DSCL_EASF_H_BF1_PWL_SEG2;
uint32_t DSCL_EASF_H_BF1_PWL_SEG3;
uint32_t DSCL_EASF_H_BF1_PWL_SEG4;
uint32_t DSCL_EASF_H_BF1_PWL_SEG5;
uint32_t DSCL_EASF_H_BF1_PWL_SEG6;
uint32_t DSCL_EASF_H_BF1_PWL_SEG7;
uint32_t DSCL_EASF_H_BF3_PWL_SEG0;
uint32_t DSCL_EASF_H_BF3_PWL_SEG1;
uint32_t DSCL_EASF_H_BF3_PWL_SEG2;
uint32_t DSCL_EASF_H_BF3_PWL_SEG3;
uint32_t DSCL_EASF_H_BF3_PWL_SEG4;
uint32_t DSCL_EASF_H_BF3_PWL_SEG5;
uint32_t DSCL_EASF_V_MODE;
uint32_t DSCL_EASF_V_BF_CNTL;
uint32_t DSCL_EASF_V_RINGEST_3TAP_CNTL1;
uint32_t DSCL_EASF_V_RINGEST_3TAP_CNTL2;
uint32_t DSCL_EASF_V_RINGEST_3TAP_CNTL3;
uint32_t DSCL_EASF_V_RINGEST_EVENTAP_REDUCE;
uint32_t DSCL_EASF_V_RINGEST_EVENTAP_GAIN;
uint32_t DSCL_EASF_V_BF_FINAL_MAX_MIN;
uint32_t DSCL_EASF_V_BF1_PWL_SEG0;
uint32_t DSCL_EASF_V_BF1_PWL_SEG1;
uint32_t DSCL_EASF_V_BF1_PWL_SEG2;
uint32_t DSCL_EASF_V_BF1_PWL_SEG3;
uint32_t DSCL_EASF_V_BF1_PWL_SEG4;
uint32_t DSCL_EASF_V_BF1_PWL_SEG5;
uint32_t DSCL_EASF_V_BF1_PWL_SEG6;
uint32_t DSCL_EASF_V_BF1_PWL_SEG7;
uint32_t DSCL_EASF_V_BF3_PWL_SEG0;
uint32_t DSCL_EASF_V_BF3_PWL_SEG1;
uint32_t DSCL_EASF_V_BF3_PWL_SEG2;
uint32_t DSCL_EASF_V_BF3_PWL_SEG3;
uint32_t DSCL_EASF_V_BF3_PWL_SEG4;
uint32_t DSCL_EASF_V_BF3_PWL_SEG5;
uint32_t DSCL_SC_MATRIX_C0C1;
uint32_t DSCL_SC_MATRIX_C2C3;
uint32_t ISHARP_MODE;
uint32_t ISHARP_NOISEDET_THRESHOLD;
uint32_t ISHARP_NOISE_GAIN_PWL;
uint32_t ISHARP_LBA_PWL_SEG0;
uint32_t ISHARP_LBA_PWL_SEG1;
uint32_t ISHARP_LBA_PWL_SEG2;
uint32_t ISHARP_LBA_PWL_SEG3;
uint32_t ISHARP_LBA_PWL_SEG4;
uint32_t ISHARP_LBA_PWL_SEG5;
uint32_t ISHARP_DELTA_CTRL;
uint32_t ISHARP_DELTA_DATA;
uint32_t ISHARP_DELTA_INDEX;
uint32_t ISHARP_NLDELTA_SOFT_CLIP;
};
struct dcn401_dpp_shift {
DPP_REG_FIELD_LIST_DCN401(uint8_t);
};
struct dcn401_dpp_mask {
DPP_REG_FIELD_LIST_DCN401(uint32_t);
};
struct dcn401_dpp {
struct dpp base;
const struct dcn401_dpp_registers *tf_regs;
const struct dcn401_dpp_shift *tf_shift;
const struct dcn401_dpp_mask *tf_mask;
const uint16_t *filter_v;
const uint16_t *filter_h;
const uint16_t *filter_v_c;
const uint16_t *filter_h_c;
int lb_pixel_depth_supported;
int lb_memory_size;
int lb_bits_per_entry;
bool is_write_to_ram_a_safe;
struct scaler_data scl_data;
struct pwl_params pwl_data;
};
bool dpp401_construct(struct dcn401_dpp *dpp401,
struct dc_context *ctx,
uint32_t inst,
const struct dcn401_dpp_registers *tf_regs,
const struct dcn401_dpp_shift *tf_shift,
const struct dcn401_dpp_mask *tf_mask);
void dpp401_dscl_set_scaler_manual_scale(
struct dpp *dpp_base,
const struct scaler_data *scl_data);
void dpp401_full_bypass(struct dpp *dpp_base);
void dpp401_dpp_setup(
struct dpp *dpp_base,
enum surface_pixel_format format,
enum expansion_mode mode,
struct dc_csc_transform input_csc_color_matrix,
enum dc_color_space input_color_space,
struct cnv_alpha_2bit_lut *alpha_2bit_lut);
void dpp401_set_cursor_attributes(
struct dpp *dpp_base,
struct dc_cursor_attributes *cursor_attributes);
void dpp401_set_cursor_position(
struct dpp *dpp_base,
const struct dc_cursor_position *pos,
const struct dc_cursor_mi_param *param,
uint32_t width,
uint32_t height);
void dpp401_set_optional_cursor_attributes(
struct dpp *dpp_base,
struct dpp_cursor_attributes *attr);
void dscl401_calc_lb_num_partitions(
const struct scaler_data *scl_data,
enum lb_memory_config lb_config,
int *num_part_y,
int *num_part_c);
void dscl401_spl_calc_lb_num_partitions(
bool alpha_en,
const struct spl_scaler_data *scl_data,
enum lb_memory_config lb_config,
int *num_part_y,
int *num_part_c);
void dpp401_read_state(struct dpp *dpp_base, struct dcn_dpp_state *s);
void dpp401_set_cursor_matrix(
struct dpp *dpp_base,
enum dc_color_space color_space,
struct dc_csc_transform cursor_csc_color_matrix);
#endif

View file

@ -0,0 +1,303 @@
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "core_types.h"
#include "reg_helper.h"
#include "dcn401/dcn401_dpp.h"
#include "basics/conversion.h"
#include "dcn10/dcn10_cm_common.h"
#define NUM_PHASES 64
#define HORZ_MAX_TAPS 8
#define VERT_MAX_TAPS 8
#define BLACK_OFFSET_RGB_Y 0x0
#define BLACK_OFFSET_CBCR 0x8000
#define REG(reg)\
dpp->tf_regs->reg
#define CTX \
dpp->base.ctx
#undef FN
#define FN(reg_name, field_name) \
dpp->tf_shift->field_name, dpp->tf_mask->field_name
#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
enum dcn401_coef_filter_type_sel {
SCL_COEF_LUMA_VERT_FILTER = 0,
SCL_COEF_LUMA_HORZ_FILTER = 1,
SCL_COEF_CHROMA_VERT_FILTER = 2,
SCL_COEF_CHROMA_HORZ_FILTER = 3,
SCL_COEF_SC_VERT_FILTER = 4,
SCL_COEF_SC_HORZ_FILTER = 5
};
enum dscl_autocal_mode {
AUTOCAL_MODE_OFF = 0,
/* Autocal calculate the scaling ratio and initial phase and the
* DSCL_MODE_SEL must be set to 1
*/
AUTOCAL_MODE_AUTOSCALE = 1,
/* Autocal perform auto centering without replication and the
* DSCL_MODE_SEL must be set to 0
*/
AUTOCAL_MODE_AUTOCENTER = 2,
/* Autocal perform auto centering and auto replication and the
* DSCL_MODE_SEL must be set to 0
*/
AUTOCAL_MODE_AUTOREPLICATE = 3
};
enum dscl_mode_sel {
DSCL_MODE_SCALING_444_BYPASS = 0,
DSCL_MODE_SCALING_444_RGB_ENABLE = 1,
DSCL_MODE_SCALING_444_YCBCR_ENABLE = 2,
DSCL_MODE_SCALING_YCBCR_ENABLE = 3,
DSCL_MODE_LUMA_SCALING_BYPASS = 4,
DSCL_MODE_CHROMA_SCALING_BYPASS = 5,
DSCL_MODE_DSCL_BYPASS = 6
};
void dpp401_full_bypass(struct dpp *dpp_base)
{
struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base);
/* Input pixel format: ARGB8888 */
REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0,
CNVC_SURFACE_PIXEL_FORMAT, 0x8);
/* Zero expansion */
REG_SET_3(FORMAT_CONTROL, 0,
CNVC_BYPASS, 0,
FORMAT_CONTROL__ALPHA_EN, 0,
FORMAT_EXPANSION_MODE, 0);
/* COLOR_KEYER_CONTROL.COLOR_KEYER_EN = 0 this should be default */
if (dpp->tf_mask->CM_BYPASS_EN)
REG_SET(CM_CONTROL, 0, CM_BYPASS_EN, 1);
else
REG_SET(CM_CONTROL, 0, CM_BYPASS, 1);
/* Setting degamma bypass for now */
REG_SET(CM_DGAM_CONTROL, 0, CM_DGAM_LUT_MODE, 0);
}
void dpp401_set_cursor_attributes(
struct dpp *dpp_base,
struct dc_cursor_attributes *cursor_attributes)
{
struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base);
enum dc_cursor_color_format color_format = cursor_attributes->color_format;
int cur_rom_en = 0;
if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA ||
color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) {
if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) {
cur_rom_en = 1;
}
}
REG_UPDATE_3(CURSOR0_CONTROL,
CUR0_MODE, color_format,
CUR0_EXPANSION_MODE, 0,
CUR0_ROM_EN, cur_rom_en);
if (color_format == CURSOR_MODE_MONO) {
/* todo: clarify what to program these to */
REG_UPDATE(CURSOR0_COLOR0,
CUR0_COLOR0, 0x00000000);
REG_UPDATE(CURSOR0_COLOR1,
CUR0_COLOR1, 0xFFFFFFFF);
}
dpp_base->att.cur0_ctl.bits.expansion_mode = 0;
dpp_base->att.cur0_ctl.bits.cur0_rom_en = cur_rom_en;
dpp_base->att.cur0_ctl.bits.mode = color_format;
}
void dpp401_set_cursor_position(
struct dpp *dpp_base,
const struct dc_cursor_position *pos,
const struct dc_cursor_mi_param *param,
uint32_t width,
uint32_t height)
{
struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base);
int x_pos = pos->x - param->recout.x;
int y_pos = pos->y - param->recout.y;
int x_hotspot = pos->x_hotspot;
int y_hotspot = pos->y_hotspot;
int rec_x_offset = x_pos - pos->x_hotspot;
int rec_y_offset = y_pos - pos->y_hotspot;
int cursor_height = (int)height;
int cursor_width = (int)width;
uint32_t cur_en = pos->enable ? 1 : 0;
// Transform cursor width / height and hotspots for offset calculations
if (param->rotation == ROTATION_ANGLE_90 || param->rotation == ROTATION_ANGLE_270) {
swap(cursor_height, cursor_width);
swap(x_hotspot, y_hotspot);
if (param->rotation == ROTATION_ANGLE_90) {
// hotspot = (-y, x)
rec_x_offset = x_pos - (cursor_width - x_hotspot);
rec_y_offset = y_pos - y_hotspot;
} else if (param->rotation == ROTATION_ANGLE_270) {
// hotspot = (y, -x)
rec_x_offset = x_pos - x_hotspot;
rec_y_offset = y_pos - (cursor_height - y_hotspot);
}
} else if (param->rotation == ROTATION_ANGLE_180) {
// hotspot = (-x, -y)
if (!param->mirror)
rec_x_offset = x_pos - (cursor_width - x_hotspot);
rec_y_offset = y_pos - (cursor_height - y_hotspot);
}
if (rec_x_offset >= (int)param->recout.width)
cur_en = 0; /* not visible beyond right edge*/
if (rec_x_offset + cursor_width <= 0)
cur_en = 0; /* not visible beyond left edge*/
if (rec_y_offset >= (int)param->recout.height)
cur_en = 0; /* not visible beyond bottom edge*/
if (rec_y_offset + cursor_height <= 0)
cur_en = 0; /* not visible beyond top edge*/
REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en);
dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
}
void dpp401_set_optional_cursor_attributes(
struct dpp *dpp_base,
struct dpp_cursor_attributes *attr)
{
struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base);
if (attr) {
REG_UPDATE(CURSOR0_FP_SCALE_BIAS_G_Y, CUR0_FP_BIAS_G_Y, attr->bias);
REG_UPDATE(CURSOR0_FP_SCALE_BIAS_G_Y, CUR0_FP_SCALE_G_Y, attr->scale);
REG_UPDATE(CURSOR0_FP_SCALE_BIAS_RB_CRCB, CUR0_FP_BIAS_RB_CRCB, attr->bias);
REG_UPDATE(CURSOR0_FP_SCALE_BIAS_RB_CRCB, CUR0_FP_SCALE_RB_CRCB, attr->scale);
}
}
/* Program Cursor matrix block in DPP CM */
static void dpp401_program_cursor_csc(
struct dpp *dpp_base,
enum dc_color_space color_space,
const struct dpp_input_csc_matrix *tbl_entry)
{
struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base);
uint32_t mode_select = 0;
struct color_matrices_reg cur_matrix_regs;
unsigned int i;
const uint16_t *regval = NULL;
int arr_size = sizeof(dpp_input_csc_matrix) / sizeof(struct dpp_input_csc_matrix);
if (color_space < COLOR_SPACE_YCBCR601) {
REG_SET(CUR0_MATRIX_MODE, 0, CUR0_MATRIX_MODE, CUR_MATRIX_BYPASS);
return;
}
/* If adjustments not provided use hardcoded table for color space conversion */
if (tbl_entry == NULL) {
for (i = 0; i < arr_size; i++)
if (dpp_input_csc_matrix[i].color_space == color_space) {
regval = dpp_input_csc_matrix[i].regval;
break;
}
if (regval == NULL) {
BREAK_TO_DEBUGGER();
REG_SET(CUR0_MATRIX_MODE, 0, CUR0_MATRIX_MODE, CUR_MATRIX_BYPASS);
return;
}
} else {
regval = tbl_entry->regval;
}
REG_GET(CUR0_MATRIX_MODE, CUR0_MATRIX_MODE_CURRENT, &mode_select);
//If current set in use not set A, then use set A, otherwise use set B
if (mode_select != CUR_MATRIX_SET_A)
mode_select = CUR_MATRIX_SET_A;
else
mode_select = CUR_MATRIX_SET_B;
cur_matrix_regs.shifts.csc_c11 = dpp->tf_shift->CUR0_MATRIX_C11_A;
cur_matrix_regs.masks.csc_c11 = dpp->tf_mask->CUR0_MATRIX_C11_A;
cur_matrix_regs.shifts.csc_c12 = dpp->tf_shift->CUR0_MATRIX_C12_A;
cur_matrix_regs.masks.csc_c12 = dpp->tf_mask->CUR0_MATRIX_C12_A;
if (mode_select == CUR_MATRIX_SET_A) {
cur_matrix_regs.csc_c11_c12 = REG(CUR0_MATRIX_C11_C12_A);
cur_matrix_regs.csc_c33_c34 = REG(CUR0_MATRIX_C33_C34_A);
} else {
cur_matrix_regs.csc_c11_c12 = REG(CUR0_MATRIX_C11_C12_B);
cur_matrix_regs.csc_c33_c34 = REG(CUR0_MATRIX_C33_C34_B);
}
cm_helper_program_color_matrices(
dpp->base.ctx,
regval,
&cur_matrix_regs);
//select coefficient set to use
REG_SET(CUR0_MATRIX_MODE, 0, CUR0_MATRIX_MODE, mode_select);
}
/* Program Cursor matrix block in DPP CM */
void dpp401_set_cursor_matrix(
struct dpp *dpp_base,
enum dc_color_space color_space,
struct dc_csc_transform cursor_csc_color_matrix)
{
struct dpp_input_csc_matrix cursor_tbl_entry;
unsigned int i;
if (cursor_csc_color_matrix.enable_adjustment == true) {
for (i = 0; i < 12; i++)
cursor_tbl_entry.regval[i] = cursor_csc_color_matrix.matrix[i];
cursor_tbl_entry.color_space = color_space;
dpp401_program_cursor_csc(dpp_base, color_space, &cursor_tbl_entry);
} else {
dpp401_program_cursor_csc(dpp_base, color_space, NULL);
}
}

View file

@ -0,0 +1,968 @@
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "core_types.h"
#include "reg_helper.h"
#include "dcn401/dcn401_dpp.h"
#include "basics/conversion.h"
#define NUM_PHASES 64
#define HORZ_MAX_TAPS 8
#define VERT_MAX_TAPS 8
#define NUM_LEVELS 32
#define BLACK_OFFSET_RGB_Y 0x0
#define BLACK_OFFSET_CBCR 0x8000
#define REG(reg)\
dpp->tf_regs->reg
#define CTX \
dpp->base.ctx
#undef FN
#define FN(reg_name, field_name) \
dpp->tf_shift->field_name, dpp->tf_mask->field_name
enum dcn401_coef_filter_type_sel {
SCL_COEF_LUMA_VERT_FILTER = 0,
SCL_COEF_LUMA_HORZ_FILTER = 1,
SCL_COEF_CHROMA_VERT_FILTER = 2,
SCL_COEF_CHROMA_HORZ_FILTER = 3,
SCL_COEF_ALPHA_VERT_FILTER = 4,
SCL_COEF_ALPHA_HORZ_FILTER = 5
};
enum dscl_autocal_mode {
AUTOCAL_MODE_OFF = 0,
/* Autocal calculate the scaling ratio and initial phase and the
* DSCL_MODE_SEL must be set to 1
*/
AUTOCAL_MODE_AUTOSCALE = 1,
/* Autocal perform auto centering without replication and the
* DSCL_MODE_SEL must be set to 0
*/
AUTOCAL_MODE_AUTOCENTER = 2,
/* Autocal perform auto centering and auto replication and the
* DSCL_MODE_SEL must be set to 0
*/
AUTOCAL_MODE_AUTOREPLICATE = 3
};
enum dscl_mode_sel {
DSCL_MODE_SCALING_444_BYPASS = 0,
DSCL_MODE_SCALING_444_RGB_ENABLE = 1,
DSCL_MODE_SCALING_444_YCBCR_ENABLE = 2,
DSCL_MODE_SCALING_420_YCBCR_ENABLE = 3,
DSCL_MODE_SCALING_420_LUMA_BYPASS = 4,
DSCL_MODE_SCALING_420_CHROMA_BYPASS = 5,
DSCL_MODE_DSCL_BYPASS = 6
};
static int dpp401_dscl_get_pixel_depth_val(enum lb_pixel_depth depth)
{
if (depth == LB_PIXEL_DEPTH_30BPP)
return 0; /* 10 bpc */
else if (depth == LB_PIXEL_DEPTH_24BPP)
return 1; /* 8 bpc */
else if (depth == LB_PIXEL_DEPTH_18BPP)
return 2; /* 6 bpc */
else if (depth == LB_PIXEL_DEPTH_36BPP)
return 3; /* 12 bpc */
else {
ASSERT(0);
return -1; /* Unsupported */
}
}
static bool dpp401_dscl_is_video_format(enum pixel_format format)
{
if (format >= PIXEL_FORMAT_VIDEO_BEGIN
&& format <= PIXEL_FORMAT_VIDEO_END)
return true;
else
return false;
}
static bool dpp401_dscl_is_420_format(enum pixel_format format)
{
if (format == PIXEL_FORMAT_420BPP8 ||
format == PIXEL_FORMAT_420BPP10)
return true;
else
return false;
}
static enum dscl_mode_sel dpp401_dscl_get_dscl_mode(
struct dpp *dpp_base,
const struct scaler_data *data,
bool dbg_always_scale)
{
const long long one = dc_fixpt_one.value;
if (dpp_base->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT) {
/* DSCL is processing data in fixed format */
if (data->format == PIXEL_FORMAT_FP16)
return DSCL_MODE_DSCL_BYPASS;
}
if (data->ratios.horz.value == one
&& data->ratios.vert.value == one
&& data->ratios.horz_c.value == one
&& data->ratios.vert_c.value == one
&& !dbg_always_scale)
return DSCL_MODE_SCALING_444_BYPASS;
if (!dpp401_dscl_is_420_format(data->format)) {
if (dpp401_dscl_is_video_format(data->format))
return DSCL_MODE_SCALING_444_YCBCR_ENABLE;
else
return DSCL_MODE_SCALING_444_RGB_ENABLE;
}
if (data->ratios.horz.value == one && data->ratios.vert.value == one)
return DSCL_MODE_SCALING_420_LUMA_BYPASS;
if (data->ratios.horz_c.value == one && data->ratios.vert_c.value == one)
return DSCL_MODE_SCALING_420_CHROMA_BYPASS;
return DSCL_MODE_SCALING_420_YCBCR_ENABLE;
}
static void dpp401_power_on_dscl(
struct dpp *dpp_base,
bool power_on)
{
struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base);
if (dpp->tf_regs->DSCL_MEM_PWR_CTRL) {
if (power_on) {
REG_UPDATE(DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, 0);
REG_WAIT(DSCL_MEM_PWR_STATUS, LUT_MEM_PWR_STATE, 0, 1, 5);
} else {
if (dpp->base.ctx->dc->debug.enable_mem_low_power.bits.dscl) {
dpp->base.ctx->dc->optimized_required = true;
dpp->base.deferred_reg_writes.bits.disable_dscl = true;
} else {
REG_UPDATE(DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, 3);
}
}
}
}
static void dpp401_dscl_set_lb(
struct dcn401_dpp *dpp,
const struct line_buffer_params *lb_params,
enum lb_memory_config mem_size_config)
{
uint32_t max_partitions = 63; /* Currently hardcoded on all ASICs before DCN 3.2 */
/* LB */
if (dpp->base.caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT) {
/* DSCL caps: pixel data processed in fixed format */
uint32_t pixel_depth = dpp401_dscl_get_pixel_depth_val(lb_params->depth);
uint32_t dyn_pix_depth = lb_params->dynamic_pixel_depth;
REG_SET_7(LB_DATA_FORMAT, 0,
PIXEL_DEPTH, pixel_depth, /* Pixel depth stored in LB */
PIXEL_EXPAN_MODE, lb_params->pixel_expan_mode, /* Pixel expansion mode */
PIXEL_REDUCE_MODE, 1, /* Pixel reduction mode: Rounding */
DYNAMIC_PIXEL_DEPTH, dyn_pix_depth, /* Dynamic expansion pixel depth */
DITHER_EN, 0, /* Dithering enable: Disabled */
INTERLEAVE_EN, lb_params->interleave_en, /* Interleave source enable */
LB_DATA_FORMAT__ALPHA_EN, lb_params->alpha_en); /* Alpha enable */
} else {
/* DSCL caps: pixel data processed in float format */
REG_SET_2(LB_DATA_FORMAT, 0,
INTERLEAVE_EN, lb_params->interleave_en, /* Interleave source enable */
LB_DATA_FORMAT__ALPHA_EN, lb_params->alpha_en); /* Alpha enable */
}
if (dpp->base.caps->max_lb_partitions == 31)
max_partitions = 31;
REG_SET_2(LB_MEMORY_CTRL, 0,
MEMORY_CONFIG, mem_size_config,
LB_MAX_PARTITIONS, max_partitions);
}
static const uint16_t *dpp401_dscl_get_filter_coeffs_64p(int taps, struct fixed31_32 ratio)
{
if (taps == 8)
return get_filter_8tap_64p(ratio);
else if (taps == 7)
return get_filter_7tap_64p(ratio);
else if (taps == 6)
return get_filter_6tap_64p(ratio);
else if (taps == 5)
return get_filter_5tap_64p(ratio);
else if (taps == 4)
return get_filter_4tap_64p(ratio);
else if (taps == 3)
return get_filter_3tap_64p(ratio);
else if (taps == 2)
return get_filter_2tap_64p();
else if (taps == 1)
return NULL;
else {
/* should never happen, bug */
BREAK_TO_DEBUGGER();
return NULL;
}
}
static void dpp401_dscl_set_scaler_filter(
struct dcn401_dpp *dpp,
uint32_t taps,
enum dcn401_coef_filter_type_sel filter_type,
const uint16_t *filter)
{
const int tap_pairs = (taps + 1) / 2;
int phase;
int pair;
uint16_t odd_coef, even_coef;
REG_SET_3(SCL_COEF_RAM_TAP_SELECT, 0,
SCL_COEF_RAM_TAP_PAIR_IDX, 0,
SCL_COEF_RAM_PHASE, 0,
SCL_COEF_RAM_FILTER_TYPE, filter_type);
for (phase = 0; phase < (NUM_PHASES / 2 + 1); phase++) {
for (pair = 0; pair < tap_pairs; pair++) {
even_coef = filter[phase * taps + 2 * pair];
if ((pair * 2 + 1) < taps)
odd_coef = filter[phase * taps + 2 * pair + 1];
else
odd_coef = 0;
REG_SET_4(SCL_COEF_RAM_TAP_DATA, 0,
/* Even tap coefficient (bits 1:0 fixed to 0) */
SCL_COEF_RAM_EVEN_TAP_COEF, even_coef,
/* Write/read control for even coefficient */
SCL_COEF_RAM_EVEN_TAP_COEF_EN, 1,
/* Odd tap coefficient (bits 1:0 fixed to 0) */
SCL_COEF_RAM_ODD_TAP_COEF, odd_coef,
/* Write/read control for odd coefficient */
SCL_COEF_RAM_ODD_TAP_COEF_EN, 1);
}
}
}
static void dpp401_dscl_set_scl_filter(
struct dcn401_dpp *dpp,
const struct scaler_data *scl_data,
bool chroma_coef_mode)
{
bool h_2tap_hardcode_coef_en = false;
bool v_2tap_hardcode_coef_en = false;
bool h_2tap_sharp_en = false;
bool v_2tap_sharp_en = false;
uint32_t h_2tap_sharp_factor = scl_data->sharpness.horz;
uint32_t v_2tap_sharp_factor = scl_data->sharpness.vert;
bool coef_ram_current;
const uint16_t *filter_h = NULL;
const uint16_t *filter_v = NULL;
const uint16_t *filter_h_c = NULL;
const uint16_t *filter_v_c = NULL;
if (dpp->base.ctx->dc->config.use_spl) {
filter_h = scl_data->dscl_prog_data.filter_h;
filter_v = scl_data->dscl_prog_data.filter_v;
filter_h_c = scl_data->dscl_prog_data.filter_h_c;
filter_v_c = scl_data->dscl_prog_data.filter_v_c;
} else {
filter_h = dpp401_dscl_get_filter_coeffs_64p(
scl_data->taps.h_taps, scl_data->ratios.horz);
filter_v = dpp401_dscl_get_filter_coeffs_64p(
scl_data->taps.v_taps, scl_data->ratios.vert);
filter_h_c = dpp401_dscl_get_filter_coeffs_64p(
scl_data->taps.h_taps_c, scl_data->ratios.horz_c);
filter_v_c = dpp401_dscl_get_filter_coeffs_64p(
scl_data->taps.v_taps_c, scl_data->ratios.vert_c);
}
h_2tap_hardcode_coef_en = scl_data->taps.h_taps < 3
&& scl_data->taps.h_taps_c < 3
&& (scl_data->taps.h_taps > 1 && scl_data->taps.h_taps_c > 1);
v_2tap_hardcode_coef_en = scl_data->taps.v_taps < 3
&& scl_data->taps.v_taps_c < 3
&& (scl_data->taps.v_taps > 1 && scl_data->taps.v_taps_c > 1);
h_2tap_sharp_en = h_2tap_hardcode_coef_en && h_2tap_sharp_factor != 0;
v_2tap_sharp_en = v_2tap_hardcode_coef_en && v_2tap_sharp_factor != 0;
REG_UPDATE_6(DSCL_2TAP_CONTROL,
SCL_H_2TAP_HARDCODE_COEF_EN, h_2tap_hardcode_coef_en,
SCL_H_2TAP_SHARP_EN, h_2tap_sharp_en,
SCL_H_2TAP_SHARP_FACTOR, h_2tap_sharp_factor,
SCL_V_2TAP_HARDCODE_COEF_EN, v_2tap_hardcode_coef_en,
SCL_V_2TAP_SHARP_EN, v_2tap_sharp_en,
SCL_V_2TAP_SHARP_FACTOR, v_2tap_sharp_factor);
if (!v_2tap_hardcode_coef_en || !h_2tap_hardcode_coef_en) {
bool filter_updated = false;
filter_updated = (filter_h && (filter_h != dpp->filter_h))
|| (filter_v && (filter_v != dpp->filter_v));
if (chroma_coef_mode) {
filter_updated = filter_updated || (filter_h_c && (filter_h_c != dpp->filter_h_c))
|| (filter_v_c && (filter_v_c != dpp->filter_v_c));
}
if (filter_updated) {
uint32_t scl_mode = REG_READ(SCL_MODE);
if (!h_2tap_hardcode_coef_en && filter_h) {
dpp401_dscl_set_scaler_filter(
dpp, scl_data->taps.h_taps,
SCL_COEF_LUMA_HORZ_FILTER, filter_h);
}
dpp->filter_h = filter_h;
if (!v_2tap_hardcode_coef_en && filter_v) {
dpp401_dscl_set_scaler_filter(
dpp, scl_data->taps.v_taps,
SCL_COEF_LUMA_VERT_FILTER, filter_v);
}
dpp->filter_v = filter_v;
if (chroma_coef_mode) {
if (!h_2tap_hardcode_coef_en && filter_h_c) {
dpp401_dscl_set_scaler_filter(
dpp, scl_data->taps.h_taps_c,
SCL_COEF_CHROMA_HORZ_FILTER, filter_h_c);
}
if (!v_2tap_hardcode_coef_en && filter_v_c) {
dpp401_dscl_set_scaler_filter(
dpp, scl_data->taps.v_taps_c,
SCL_COEF_CHROMA_VERT_FILTER, filter_v_c);
}
}
dpp->filter_h_c = filter_h_c;
dpp->filter_v_c = filter_v_c;
coef_ram_current = get_reg_field_value_ex(
scl_mode, dpp->tf_mask->SCL_COEF_RAM_SELECT_CURRENT,
dpp->tf_shift->SCL_COEF_RAM_SELECT_CURRENT);
/* Swap coefficient RAM and set chroma coefficient mode */
REG_SET_2(SCL_MODE, scl_mode,
SCL_COEF_RAM_SELECT, !coef_ram_current,
SCL_CHROMA_COEF_MODE, chroma_coef_mode);
}
}
}
// TODO: Fix defined but not used error
//static int dpp401_dscl_get_lb_depth_bpc(enum lb_pixel_depth depth)
//{
// if (depth == LB_PIXEL_DEPTH_30BPP)
// return 10;
// else if (depth == LB_PIXEL_DEPTH_24BPP)
// return 8;
// else if (depth == LB_PIXEL_DEPTH_18BPP)
// return 6;
// else if (depth == LB_PIXEL_DEPTH_36BPP)
// return 12;
// else {
// BREAK_TO_DEBUGGER();
// return -1; /* Unsupported */
// }
//}
// TODO: Fix defined but not used error
//void dpp401_dscl_calc_lb_num_partitions(
// const struct scaler_data *scl_data,
// enum lb_memory_config lb_config,
// int *num_part_y,
// int *num_part_c)
//{
// int lb_memory_size, lb_memory_size_c, lb_memory_size_a, num_partitions_a,
// lb_bpc, memory_line_size_y, memory_line_size_c, memory_line_size_a;
//
// int line_size = scl_data->viewport.width < scl_data->recout.width ?
// scl_data->viewport.width : scl_data->recout.width;
// int line_size_c = scl_data->viewport_c.width < scl_data->recout.width ?
// scl_data->viewport_c.width : scl_data->recout.width;
//
// if (line_size == 0)
// line_size = 1;
//
// if (line_size_c == 0)
// line_size_c = 1;
//
//
// lb_bpc = dpp401_dscl_get_lb_depth_bpc(scl_data->lb_params.depth);
// memory_line_size_y = (line_size * lb_bpc + 71) / 72; /* +71 to ceil */
// memory_line_size_c = (line_size_c * lb_bpc + 71) / 72; /* +71 to ceil */
// memory_line_size_a = (line_size + 5) / 6; /* +5 to ceil */
//
// if (lb_config == LB_MEMORY_CONFIG_1) {
// lb_memory_size = 816;
// lb_memory_size_c = 816;
// lb_memory_size_a = 984;
// } else if (lb_config == LB_MEMORY_CONFIG_2) {
// lb_memory_size = 1088;
// lb_memory_size_c = 1088;
// lb_memory_size_a = 1312;
// } else if (lb_config == LB_MEMORY_CONFIG_3) {
// /* 420 mode: using 3rd mem from Y, Cr and Cb */
// lb_memory_size = 816 + 1088 + 848 + 848 + 848;
// lb_memory_size_c = 816 + 1088;
// lb_memory_size_a = 984 + 1312 + 456;
// } else {
// lb_memory_size = 816 + 1088 + 848;
// lb_memory_size_c = 816 + 1088 + 848;
// lb_memory_size_a = 984 + 1312 + 456;
// }
// *num_part_y = lb_memory_size / memory_line_size_y;
// *num_part_c = lb_memory_size_c / memory_line_size_c;
// num_partitions_a = lb_memory_size_a / memory_line_size_a;
//
// if (scl_data->lb_params.alpha_en
// && (num_partitions_a < *num_part_y))
// *num_part_y = num_partitions_a;
//
// if (*num_part_y > 64)
// *num_part_y = 64;
// if (*num_part_c > 64)
// *num_part_c = 64;
//
//}
static bool dpp401_dscl_is_lb_conf_valid(int ceil_vratio, int num_partitions, int vtaps)
{
if (ceil_vratio > 2)
return vtaps <= (num_partitions - ceil_vratio + 2);
else
return vtaps <= num_partitions;
}
/*find first match configuration which meets the min required lb size*/
static enum lb_memory_config dpp401_dscl_find_lb_memory_config(struct dcn401_dpp *dpp,
const struct scaler_data *scl_data)
{
int num_part_y, num_part_c;
int vtaps = scl_data->taps.v_taps;
int vtaps_c = scl_data->taps.v_taps_c;
int ceil_vratio = dc_fixpt_ceil(scl_data->ratios.vert);
int ceil_vratio_c = dc_fixpt_ceil(scl_data->ratios.vert_c);
if (dpp->base.ctx->dc->debug.use_max_lb) {
if (scl_data->format == PIXEL_FORMAT_420BPP8
|| scl_data->format == PIXEL_FORMAT_420BPP10)
return LB_MEMORY_CONFIG_3;
return LB_MEMORY_CONFIG_0;
}
dpp->base.caps->dscl_calc_lb_num_partitions(
scl_data, LB_MEMORY_CONFIG_1, &num_part_y, &num_part_c);
if (dpp401_dscl_is_lb_conf_valid(ceil_vratio, num_part_y, vtaps)
&& dpp401_dscl_is_lb_conf_valid(ceil_vratio_c, num_part_c, vtaps_c))
return LB_MEMORY_CONFIG_1;
dpp->base.caps->dscl_calc_lb_num_partitions(
scl_data, LB_MEMORY_CONFIG_2, &num_part_y, &num_part_c);
if (dpp401_dscl_is_lb_conf_valid(ceil_vratio, num_part_y, vtaps)
&& dpp401_dscl_is_lb_conf_valid(ceil_vratio_c, num_part_c, vtaps_c))
return LB_MEMORY_CONFIG_2;
if (scl_data->format == PIXEL_FORMAT_420BPP8
|| scl_data->format == PIXEL_FORMAT_420BPP10) {
dpp->base.caps->dscl_calc_lb_num_partitions(
scl_data, LB_MEMORY_CONFIG_3, &num_part_y, &num_part_c);
if (dpp401_dscl_is_lb_conf_valid(ceil_vratio, num_part_y, vtaps)
&& dpp401_dscl_is_lb_conf_valid(ceil_vratio_c, num_part_c, vtaps_c))
return LB_MEMORY_CONFIG_3;
}
dpp->base.caps->dscl_calc_lb_num_partitions(
scl_data, LB_MEMORY_CONFIG_0, &num_part_y, &num_part_c);
/*Ensure we can support the requested number of vtaps*/
ASSERT(dpp401_dscl_is_lb_conf_valid(ceil_vratio, num_part_y, vtaps)
&& dpp401_dscl_is_lb_conf_valid(ceil_vratio_c, num_part_c, vtaps_c));
return LB_MEMORY_CONFIG_0;
}
static void dpp401_dscl_set_manual_ratio_init(
struct dcn401_dpp *dpp, const struct scaler_data *data)
{
uint32_t init_frac = 0;
uint32_t init_int = 0;
if (dpp->base.ctx->dc->config.use_spl) {
REG_SET(SCL_HORZ_FILTER_SCALE_RATIO, 0,
SCL_H_SCALE_RATIO, data->dscl_prog_data.ratios.h_scale_ratio);
REG_SET(SCL_VERT_FILTER_SCALE_RATIO, 0,
SCL_V_SCALE_RATIO, data->dscl_prog_data.ratios.v_scale_ratio);
REG_SET(SCL_HORZ_FILTER_SCALE_RATIO_C, 0,
SCL_H_SCALE_RATIO_C, data->dscl_prog_data.ratios.h_scale_ratio_c);
REG_SET(SCL_VERT_FILTER_SCALE_RATIO_C, 0,
SCL_V_SCALE_RATIO_C, data->dscl_prog_data.ratios.v_scale_ratio_c);
REG_SET_2(SCL_HORZ_FILTER_INIT, 0,
SCL_H_INIT_FRAC, data->dscl_prog_data.init.h_filter_init_frac,
SCL_H_INIT_INT, data->dscl_prog_data.init.h_filter_init_int);
REG_SET_2(SCL_HORZ_FILTER_INIT_C, 0,
SCL_H_INIT_FRAC_C, data->dscl_prog_data.init.h_filter_init_frac_c,
SCL_H_INIT_INT_C, data->dscl_prog_data.init.h_filter_init_int_c);
REG_SET_2(SCL_VERT_FILTER_INIT, 0,
SCL_V_INIT_FRAC, data->dscl_prog_data.init.v_filter_init_frac,
SCL_V_INIT_INT, data->dscl_prog_data.init.v_filter_init_int);
if (REG(SCL_VERT_FILTER_INIT_BOT)) {
REG_SET_2(SCL_VERT_FILTER_INIT_BOT, 0,
SCL_V_INIT_FRAC_BOT, data->dscl_prog_data.init.v_filter_init_bot_frac,
SCL_V_INIT_INT_BOT, data->dscl_prog_data.init.v_filter_init_bot_int);
}
REG_SET_2(SCL_VERT_FILTER_INIT_C, 0,
SCL_V_INIT_FRAC_C, data->dscl_prog_data.init.v_filter_init_frac_c,
SCL_V_INIT_INT_C, data->dscl_prog_data.init.v_filter_init_int_c);
if (REG(SCL_VERT_FILTER_INIT_BOT_C)) {
REG_SET_2(SCL_VERT_FILTER_INIT_BOT_C, 0,
SCL_V_INIT_FRAC_BOT_C, data->dscl_prog_data.init.v_filter_init_bot_frac_c,
SCL_V_INIT_INT_BOT_C, data->dscl_prog_data.init.v_filter_init_bot_int_c);
}
return;
}
REG_SET(SCL_HORZ_FILTER_SCALE_RATIO, 0,
SCL_H_SCALE_RATIO, dc_fixpt_u3d19(data->ratios.horz) << 5);
REG_SET(SCL_VERT_FILTER_SCALE_RATIO, 0,
SCL_V_SCALE_RATIO, dc_fixpt_u3d19(data->ratios.vert) << 5);
REG_SET(SCL_HORZ_FILTER_SCALE_RATIO_C, 0,
SCL_H_SCALE_RATIO_C, dc_fixpt_u3d19(data->ratios.horz_c) << 5);
REG_SET(SCL_VERT_FILTER_SCALE_RATIO_C, 0,
SCL_V_SCALE_RATIO_C, dc_fixpt_u3d19(data->ratios.vert_c) << 5);
/*
* 0.24 format for fraction, first five bits zeroed
*/
init_frac = dc_fixpt_u0d19(data->inits.h) << 5;
init_int = dc_fixpt_floor(data->inits.h);
REG_SET_2(SCL_HORZ_FILTER_INIT, 0,
SCL_H_INIT_FRAC, init_frac,
SCL_H_INIT_INT, init_int);
init_frac = dc_fixpt_u0d19(data->inits.h_c) << 5;
init_int = dc_fixpt_floor(data->inits.h_c);
REG_SET_2(SCL_HORZ_FILTER_INIT_C, 0,
SCL_H_INIT_FRAC_C, init_frac,
SCL_H_INIT_INT_C, init_int);
init_frac = dc_fixpt_u0d19(data->inits.v) << 5;
init_int = dc_fixpt_floor(data->inits.v);
REG_SET_2(SCL_VERT_FILTER_INIT, 0,
SCL_V_INIT_FRAC, init_frac,
SCL_V_INIT_INT, init_int);
if (REG(SCL_VERT_FILTER_INIT_BOT)) {
struct fixed31_32 bot = dc_fixpt_add(data->inits.v, data->ratios.vert);
init_frac = dc_fixpt_u0d19(bot) << 5;
init_int = dc_fixpt_floor(bot);
REG_SET_2(SCL_VERT_FILTER_INIT_BOT, 0,
SCL_V_INIT_FRAC_BOT, init_frac,
SCL_V_INIT_INT_BOT, init_int);
}
init_frac = dc_fixpt_u0d19(data->inits.v_c) << 5;
init_int = dc_fixpt_floor(data->inits.v_c);
REG_SET_2(SCL_VERT_FILTER_INIT_C, 0,
SCL_V_INIT_FRAC_C, init_frac,
SCL_V_INIT_INT_C, init_int);
if (REG(SCL_VERT_FILTER_INIT_BOT_C)) {
struct fixed31_32 bot = dc_fixpt_add(data->inits.v_c, data->ratios.vert_c);
init_frac = dc_fixpt_u0d19(bot) << 5;
init_int = dc_fixpt_floor(bot);
REG_SET_2(SCL_VERT_FILTER_INIT_BOT_C, 0,
SCL_V_INIT_FRAC_BOT_C, init_frac,
SCL_V_INIT_INT_BOT_C, init_int);
}
}
/**
* dpp401_dscl_set_recout - Set the first pixel of RECOUT in the OTG active area
*
* @dpp: DPP data struct
* @recout: Rectangle information
*
* This function sets the MPC RECOUT_START and RECOUT_SIZE registers based on
* the values specified in the recount parameter.
*
* Note: This function only have effect if AutoCal is disabled.
*/
static void dpp401_dscl_set_recout(struct dcn401_dpp *dpp,
const struct rect *recout)
{
REG_SET_2(RECOUT_START, 0,
/* First pixel of RECOUT in the active OTG area */
RECOUT_START_X, recout->x,
/* First line of RECOUT in the active OTG area */
RECOUT_START_Y, recout->y);
REG_SET_2(RECOUT_SIZE, 0,
/* Number of RECOUT horizontal pixels */
RECOUT_WIDTH, recout->width,
/* Number of RECOUT vertical lines */
RECOUT_HEIGHT, recout->height);
}
/**
* dpp401_dscl_program_easf - Program EASF
*
* @dpp_base: High level DPP struct
* @scl_data: scalaer_data info
*
* This is the primary function to program EASF
*
*/
static void dpp401_dscl_program_easf(struct dpp *dpp_base, const struct scaler_data *scl_data)
{
struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base);
PERF_TRACE();
REG_UPDATE(DSCL_SC_MODE,
SCL_SC_MATRIX_MODE, scl_data->dscl_prog_data.easf_matrix_mode);
REG_UPDATE(DSCL_SC_MODE,
SCL_SC_LTONL_EN, scl_data->dscl_prog_data.easf_ltonl_en);
// DSCL_EASF_V_MODE
REG_UPDATE(DSCL_EASF_V_MODE,
SCL_EASF_V_EN, scl_data->dscl_prog_data.easf_v_en);
REG_UPDATE(DSCL_EASF_V_MODE,
SCL_EASF_V_2TAP_SHARP_FACTOR, scl_data->dscl_prog_data.easf_v_sharp_factor);
REG_UPDATE(DSCL_EASF_V_MODE,
SCL_EASF_V_RINGEST_FORCE_EN, scl_data->dscl_prog_data.easf_v_ring);
REG_UPDATE(DSCL_EASF_V_BF_CNTL,
SCL_EASF_V_BF1_EN, scl_data->dscl_prog_data.easf_v_bf1_en);
REG_UPDATE(DSCL_EASF_V_BF_CNTL,
SCL_EASF_V_BF2_MODE, scl_data->dscl_prog_data.easf_v_bf2_mode);
REG_UPDATE(DSCL_EASF_V_BF_CNTL,
SCL_EASF_V_BF3_MODE, scl_data->dscl_prog_data.easf_v_bf3_mode);
REG_UPDATE(DSCL_EASF_V_BF_CNTL,
SCL_EASF_V_BF2_FLAT1_GAIN, scl_data->dscl_prog_data.easf_v_bf2_flat1_gain);
REG_UPDATE(DSCL_EASF_V_BF_CNTL,
SCL_EASF_V_BF2_FLAT2_GAIN, scl_data->dscl_prog_data.easf_v_bf2_flat2_gain);
REG_UPDATE(DSCL_EASF_V_BF_CNTL,
SCL_EASF_V_BF2_ROC_GAIN, scl_data->dscl_prog_data.easf_v_bf2_roc_gain);
REG_UPDATE(DSCL_EASF_V_RINGEST_3TAP_CNTL1,
SCL_EASF_V_RINGEST_3TAP_DNTILT_UPTILT, scl_data->dscl_prog_data.easf_v_ringest_3tap_dntilt_uptilt);
REG_UPDATE(DSCL_EASF_V_RINGEST_3TAP_CNTL1,
SCL_EASF_V_RINGEST_3TAP_UPTILT_MAXVAL, scl_data->dscl_prog_data.easf_v_ringest_3tap_uptilt_max);
REG_UPDATE(DSCL_EASF_V_RINGEST_3TAP_CNTL2,
SCL_EASF_V_RINGEST_3TAP_DNTILT_SLOPE, scl_data->dscl_prog_data.easf_v_ringest_3tap_dntilt_slope);
REG_UPDATE(DSCL_EASF_V_RINGEST_3TAP_CNTL2,
SCL_EASF_V_RINGEST_3TAP_UPTILT1_SLOPE, scl_data->dscl_prog_data.easf_v_ringest_3tap_uptilt1_slope);
REG_UPDATE(DSCL_EASF_V_RINGEST_3TAP_CNTL3,
SCL_EASF_V_RINGEST_3TAP_UPTILT2_SLOPE, scl_data->dscl_prog_data.easf_v_ringest_3tap_uptilt2_slope);
REG_UPDATE(DSCL_EASF_V_RINGEST_3TAP_CNTL3,
SCL_EASF_V_RINGEST_3TAP_UPTILT2_OFFSET, scl_data->dscl_prog_data.easf_v_ringest_3tap_uptilt2_offset);
REG_UPDATE(DSCL_EASF_V_RINGEST_EVENTAP_REDUCE,
SCL_EASF_V_RINGEST_EVENTAP_REDUCEG1, scl_data->dscl_prog_data.easf_v_ringest_eventap_reduceg1);
REG_UPDATE(DSCL_EASF_V_RINGEST_EVENTAP_REDUCE,
SCL_EASF_V_RINGEST_EVENTAP_REDUCEG2, scl_data->dscl_prog_data.easf_v_ringest_eventap_reduceg2);
REG_UPDATE(DSCL_EASF_V_RINGEST_EVENTAP_GAIN,
SCL_EASF_V_RINGEST_EVENTAP_GAIN1, scl_data->dscl_prog_data.easf_v_ringest_eventap_gain1);
REG_UPDATE(DSCL_EASF_V_RINGEST_EVENTAP_GAIN,
SCL_EASF_V_RINGEST_EVENTAP_GAIN2, scl_data->dscl_prog_data.easf_v_ringest_eventap_gain2);
REG_UPDATE(DSCL_EASF_V_BF_FINAL_MAX_MIN,
SCL_EASF_V_BF_MAXA, scl_data->dscl_prog_data.easf_v_bf_maxa);
REG_UPDATE(DSCL_EASF_V_BF_FINAL_MAX_MIN,
SCL_EASF_V_BF_MAXB, scl_data->dscl_prog_data.easf_v_bf_maxb);
REG_UPDATE(DSCL_EASF_V_BF_FINAL_MAX_MIN,
SCL_EASF_V_BF_MINA, scl_data->dscl_prog_data.easf_v_bf_mina);
REG_UPDATE(DSCL_EASF_V_BF_FINAL_MAX_MIN,
SCL_EASF_V_BF_MINB, scl_data->dscl_prog_data.easf_v_bf_minb);
// DSCL_EASF_H_MODE
REG_UPDATE(DSCL_EASF_H_MODE,
SCL_EASF_H_EN, scl_data->dscl_prog_data.easf_h_en);
REG_UPDATE(DSCL_EASF_H_MODE,
SCL_EASF_H_2TAP_SHARP_FACTOR, scl_data->dscl_prog_data.easf_h_sharp_factor);
REG_UPDATE(DSCL_EASF_H_MODE,
SCL_EASF_H_RINGEST_FORCE_EN, scl_data->dscl_prog_data.easf_h_ring);
REG_UPDATE(DSCL_EASF_H_BF_CNTL,
SCL_EASF_H_BF1_EN, scl_data->dscl_prog_data.easf_h_bf1_en);
REG_UPDATE(DSCL_EASF_H_BF_CNTL,
SCL_EASF_H_BF2_MODE, scl_data->dscl_prog_data.easf_h_bf2_mode);
REG_UPDATE(DSCL_EASF_H_BF_CNTL,
SCL_EASF_H_BF3_MODE, scl_data->dscl_prog_data.easf_h_bf3_mode);
REG_UPDATE(DSCL_EASF_H_BF_CNTL,
SCL_EASF_H_BF2_FLAT1_GAIN, scl_data->dscl_prog_data.easf_h_bf2_flat1_gain);
REG_UPDATE(DSCL_EASF_H_BF_CNTL,
SCL_EASF_H_BF2_FLAT2_GAIN, scl_data->dscl_prog_data.easf_h_bf2_flat2_gain);
REG_UPDATE(DSCL_EASF_H_BF_CNTL,
SCL_EASF_H_BF2_ROC_GAIN, scl_data->dscl_prog_data.easf_h_bf2_roc_gain);
REG_UPDATE(DSCL_EASF_H_RINGEST_EVENTAP_REDUCE,
SCL_EASF_H_RINGEST_EVENTAP_REDUCEG1, scl_data->dscl_prog_data.easf_h_ringest_eventap_reduceg1);
REG_UPDATE(DSCL_EASF_H_RINGEST_EVENTAP_REDUCE,
SCL_EASF_H_RINGEST_EVENTAP_REDUCEG2, scl_data->dscl_prog_data.easf_h_ringest_eventap_reduceg2);
REG_UPDATE(DSCL_EASF_H_RINGEST_EVENTAP_GAIN,
SCL_EASF_H_RINGEST_EVENTAP_GAIN1, scl_data->dscl_prog_data.easf_h_ringest_eventap_gain1);
REG_UPDATE(DSCL_EASF_H_RINGEST_EVENTAP_GAIN,
SCL_EASF_H_RINGEST_EVENTAP_GAIN2, scl_data->dscl_prog_data.easf_h_ringest_eventap_gain2);
REG_UPDATE(DSCL_EASF_H_BF_FINAL_MAX_MIN,
SCL_EASF_H_BF_MAXA, scl_data->dscl_prog_data.easf_h_bf_maxa);
REG_UPDATE(DSCL_EASF_H_BF_FINAL_MAX_MIN,
SCL_EASF_H_BF_MAXB, scl_data->dscl_prog_data.easf_h_bf_maxb);
REG_UPDATE(DSCL_EASF_H_BF_FINAL_MAX_MIN,
SCL_EASF_H_BF_MINA, scl_data->dscl_prog_data.easf_h_bf_mina);
REG_UPDATE(DSCL_EASF_H_BF_FINAL_MAX_MIN,
SCL_EASF_H_BF_MINB, scl_data->dscl_prog_data.easf_h_bf_minb);
PERF_TRACE();
}
static void dpp401_dscl_set_isharp_filter(
struct dcn401_dpp *dpp, const uint32_t *filter)
{
int level;
uint32_t filter_data;
REG_UPDATE(ISHARP_DELTA_CTRL,
ISHARP_DELTA_LUT_HOST_SELECT, 0);
for (level = 0; level < NUM_LEVELS; level++) {
filter_data = filter[level];
REG_SET(ISHARP_DELTA_INDEX, 0,
ISHARP_DELTA_INDEX, level);
REG_SET(ISHARP_DELTA_DATA, 0,
ISHARP_DELTA_DATA, filter_data);
}
} // dpp401_dscl_set_isharp_filter
/**
* dpp401_dscl_program_isharp - Program isharp
*
* @dpp_base: High level DPP struct
* @scl_data: scalaer_data info
*
* This is the primary function to program isharp
*
*/
static void dpp401_dscl_program_isharp(struct dpp *dpp_base,
const struct scaler_data *scl_data)
{
struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base);
if (memcmp(&dpp->scl_data, scl_data, sizeof(*scl_data)) == 0)
return;
PERF_TRACE();
dpp->scl_data = *scl_data;
// ISHARP_EN
REG_SET(ISHARP_MODE, 0,
ISHARP_EN, scl_data->dscl_prog_data.isharp_en);
// ISHARP_NOISEDET_EN
REG_SET(ISHARP_MODE, 0,
ISHARP_NOISEDET_EN, scl_data->dscl_prog_data.isharp_noise_det.enable);
// ISHARP_NOISEDET_MODE
REG_SET(ISHARP_MODE, 0,
ISHARP_NOISEDET_MODE, scl_data->dscl_prog_data.isharp_noise_det.mode);
// ISHARP_NOISEDET_UTHRE
REG_SET(ISHARP_NOISEDET_THRESHOLD, 0,
ISHARP_NOISEDET_UTHRE, scl_data->dscl_prog_data.isharp_noise_det.uthreshold);
// ISHARP_NOISEDET_DTHRE
REG_SET(ISHARP_NOISEDET_THRESHOLD, 0,
ISHARP_NOISEDET_DTHRE, scl_data->dscl_prog_data.isharp_noise_det.dthreshold);
REG_SET(ISHARP_MODE, 0,
ISHARP_NOISEDET_MODE, scl_data->dscl_prog_data.isharp_noise_det.mode);
// ISHARP_NOISEDET_UTHRE
REG_SET(ISHARP_NOISEDET_THRESHOLD, 0,
ISHARP_NOISEDET_UTHRE, scl_data->dscl_prog_data.isharp_noise_det.uthreshold);
// ISHARP_NOISEDET_DTHRE
REG_SET(ISHARP_NOISEDET_THRESHOLD, 0,
ISHARP_NOISEDET_DTHRE, scl_data->dscl_prog_data.isharp_noise_det.dthreshold);
// ISHARP_NOISEDET_PWL_START_IN
REG_SET(ISHARP_NOISE_GAIN_PWL, 0,
ISHARP_NOISEDET_PWL_START_IN, scl_data->dscl_prog_data.isharp_noise_det.pwl_start_in);
// ISHARP_NOISEDET_PWL_END_IN
REG_SET(ISHARP_NOISE_GAIN_PWL, 0,
ISHARP_NOISEDET_PWL_END_IN, scl_data->dscl_prog_data.isharp_noise_det.pwl_end_in);
// ISHARP_NOISEDET_PWL_SLOPE
REG_SET(ISHARP_NOISE_GAIN_PWL, 0,
ISHARP_NOISEDET_PWL_SLOPE, scl_data->dscl_prog_data.isharp_noise_det.pwl_slope);
// ISHARP_LBA_MODE
REG_SET(ISHARP_MODE, 0,
ISHARP_LBA_MODE, scl_data->dscl_prog_data.isharp_lba.mode);
// TODO: ISHARP_LBA: IN_SEG, BASE_SEG, SLOPE_SEG
// ISHARP_FMT_MODE
REG_SET(ISHARP_MODE, 0,
ISHARP_FMT_MODE, scl_data->dscl_prog_data.isharp_fmt.mode);
// ISHARP_FMT_NORM
REG_SET(ISHARP_MODE, 0,
ISHARP_FMT_NORM, scl_data->dscl_prog_data.isharp_fmt.norm);
// ISHARP_DELTA_LUT
dpp401_dscl_set_isharp_filter(dpp, scl_data->dscl_prog_data.isharp_delta);
// ISHARP_NLDELTA_SCLIP_EN_P
REG_SET(ISHARP_NLDELTA_SOFT_CLIP, 0,
ISHARP_NLDELTA_SCLIP_EN_P, scl_data->dscl_prog_data.isharp_nldelta_sclip.enable_p);
// ISHARP_NLDELTA_SCLIP_PIVOT_P
REG_SET(ISHARP_NLDELTA_SOFT_CLIP, 0,
ISHARP_NLDELTA_SCLIP_PIVOT_P, scl_data->dscl_prog_data.isharp_nldelta_sclip.pivot_p);
// ISHARP_NLDELTA_SCLIP_SLOPE_P
REG_SET(ISHARP_NLDELTA_SOFT_CLIP, 0,
ISHARP_NLDELTA_SCLIP_SLOPE_P, scl_data->dscl_prog_data.isharp_nldelta_sclip.slope_p);
// ISHARP_NLDELTA_SCLIP_EN_N
REG_SET(ISHARP_NLDELTA_SOFT_CLIP, 0,
ISHARP_NLDELTA_SCLIP_EN_N, scl_data->dscl_prog_data.isharp_nldelta_sclip.enable_n);
// ISHARP_NLDELTA_SCLIP_PIVOT_N
REG_SET(ISHARP_NLDELTA_SOFT_CLIP, 0,
ISHARP_NLDELTA_SCLIP_PIVOT_N, scl_data->dscl_prog_data.isharp_nldelta_sclip.pivot_n);
// ISHARP_NLDELTA_SCLIP_SLOPE_N
REG_SET(ISHARP_NLDELTA_SOFT_CLIP, 0,
ISHARP_NLDELTA_SCLIP_SLOPE_N, scl_data->dscl_prog_data.isharp_nldelta_sclip.slope_n);
PERF_TRACE();
} // dpp401_dscl_program_isharp
/**
* dpp401_dscl_set_scaler_manual_scale - Manually program scaler and line buffer
*
* @dpp_base: High level DPP struct
* @scl_data: scalaer_data info
*
* This is the primary function to program scaler and line buffer in manual
* scaling mode. To execute the required operations for manual scale, we need
* to disable AutoCal first.
*/
void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base,
const struct scaler_data *scl_data)
{
enum lb_memory_config lb_config;
struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base);
const struct rect *rect = &scl_data->recout;
uint32_t mpc_width = scl_data->h_active;
uint32_t mpc_height = scl_data->v_active;
uint32_t v_num_taps = scl_data->taps.v_taps - 1;
uint32_t v_num_taps_c = scl_data->taps.v_taps_c - 1;
uint32_t h_num_taps = scl_data->taps.h_taps - 1;
uint32_t h_num_taps_c = scl_data->taps.h_taps_c - 1;
enum dscl_mode_sel dscl_mode = dpp401_dscl_get_dscl_mode(
dpp_base, scl_data, dpp_base->ctx->dc->debug.always_scale);
bool ycbcr = scl_data->format >= PIXEL_FORMAT_VIDEO_BEGIN
&& scl_data->format <= PIXEL_FORMAT_VIDEO_END;
if (memcmp(&dpp->scl_data, scl_data, sizeof(*scl_data)) == 0)
return;
PERF_TRACE();
dpp->scl_data = *scl_data;
if (dpp->base.ctx->dc->config.use_spl) {
dscl_mode = (enum dscl_mode_sel) scl_data->dscl_prog_data.dscl_mode;
rect = (struct rect *)&scl_data->dscl_prog_data.recout;
mpc_width = scl_data->dscl_prog_data.mpc_size.width;
mpc_height = scl_data->dscl_prog_data.mpc_size.height;
v_num_taps = scl_data->dscl_prog_data.taps.v_taps;
v_num_taps_c = scl_data->dscl_prog_data.taps.v_taps_c;
h_num_taps = scl_data->dscl_prog_data.taps.h_taps;
h_num_taps_c = scl_data->dscl_prog_data.taps.h_taps_c;
}
if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.dscl) {
if (dscl_mode != DSCL_MODE_DSCL_BYPASS)
dpp401_power_on_dscl(dpp_base, true);
}
/* Autocal off */
REG_SET_3(DSCL_AUTOCAL, 0,
AUTOCAL_MODE, AUTOCAL_MODE_OFF,
AUTOCAL_NUM_PIPE, 0,
AUTOCAL_PIPE_ID, 0);
/*clean scaler boundary mode when Autocal off*/
REG_SET(DSCL_CONTROL, 0,
SCL_BOUNDARY_MODE, 0);
/* Recout */
dpp401_dscl_set_recout(dpp, rect);
/* MPC Size */
REG_SET_2(MPC_SIZE, 0,
/* Number of horizontal pixels of MPC */
MPC_WIDTH, mpc_width,
/* Number of vertical lines of MPC */
MPC_HEIGHT, mpc_height);
/* SCL mode */
REG_UPDATE(SCL_MODE, DSCL_MODE, dscl_mode);
if (dscl_mode == DSCL_MODE_DSCL_BYPASS) {
if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.dscl)
dpp401_power_on_dscl(dpp_base, false);
return;
}
/* LB */
lb_config = dpp401_dscl_find_lb_memory_config(dpp, scl_data);
dpp401_dscl_set_lb(dpp, &scl_data->lb_params, lb_config);
if (dscl_mode == DSCL_MODE_SCALING_444_BYPASS)
return;
/* Black offsets */
if (REG(SCL_BLACK_OFFSET)) {
if (ycbcr)
REG_SET_2(SCL_BLACK_OFFSET, 0,
SCL_BLACK_OFFSET_RGB_Y, BLACK_OFFSET_RGB_Y,
SCL_BLACK_OFFSET_CBCR, BLACK_OFFSET_CBCR);
else
REG_SET_2(SCL_BLACK_OFFSET, 0,
SCL_BLACK_OFFSET_RGB_Y, BLACK_OFFSET_RGB_Y,
SCL_BLACK_OFFSET_CBCR, BLACK_OFFSET_RGB_Y);
}
/* Manually calculate scale ratio and init values */
dpp401_dscl_set_manual_ratio_init(dpp, scl_data);
/* HTaps/VTaps */
REG_SET_4(SCL_TAP_CONTROL, 0,
SCL_V_NUM_TAPS, v_num_taps,
SCL_H_NUM_TAPS, h_num_taps,
SCL_V_NUM_TAPS_C, v_num_taps_c,
SCL_H_NUM_TAPS_C, h_num_taps_c);
dpp401_dscl_set_scl_filter(dpp, scl_data, ycbcr);
/* Edge adaptive scaler function configuration */
if (dpp->base.ctx->dc->config.prefer_easf)
dpp401_dscl_program_easf(dpp_base, scl_data);
/* isharp configuration */
//if (dpp->base.ctx->dc->config.prefer_easf)
dpp401_dscl_program_isharp(dpp_base, scl_data);
PERF_TRACE();
}

View file

@ -0,0 +1,747 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#include <drm/display/drm_dsc_helper.h>
#include "reg_helper.h"
#include "dcn401_dsc.h"
#include "dsc/dscc_types.h"
#include "dsc/rc_calc.h"
static void dsc_write_to_registers(struct display_stream_compressor *dsc, const struct dsc_reg_values *reg_vals);
/* Object I/F functions */
//static void dsc401_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz);
static void dsc401_read_state(struct display_stream_compressor *dsc, struct dcn_dsc_state *s);
static bool dsc401_validate_stream(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg);
static void dsc401_set_config(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg,
struct dsc_optc_config *dsc_optc_cfg);
//static bool dsc401_get_packed_pps(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg, uint8_t *dsc_packed_pps);
static void dsc401_enable(struct display_stream_compressor *dsc, int opp_pipe);
static void dsc401_disable(struct display_stream_compressor *dsc);
static void dsc401_disconnect(struct display_stream_compressor *dsc);
const struct dsc_funcs dcn401_dsc_funcs = {
.dsc_get_enc_caps = dsc2_get_enc_caps,
.dsc_read_state = dsc401_read_state,
.dsc_validate_stream = dsc401_validate_stream,
.dsc_set_config = dsc401_set_config,
.dsc_get_packed_pps = dsc2_get_packed_pps,
.dsc_enable = dsc401_enable,
.dsc_disable = dsc401_disable,
.dsc_disconnect = dsc401_disconnect,
};
/* Macro definitios for REG_SET macros*/
#define CTX \
dsc401->base.ctx
#define REG(reg)\
dsc401->dsc_regs->reg
#undef FN
#define FN(reg_name, field_name) \
dsc401->dsc_shift->field_name, dsc401->dsc_mask->field_name
#define DC_LOGGER \
dsc->ctx->logger
#define DCN401_MAX_PIXEL_CLOCK_Mhz 1188
#define DCN401_MAX_DISPLAY_CLOCK_Mhz 1200
enum dsc_bits_per_comp {
DSC_BPC_8 = 8,
DSC_BPC_10 = 10,
DSC_BPC_12 = 12,
DSC_BPC_UNKNOWN
};
/* API functions (external or via structure->function_pointer) */
void dsc401_construct(struct dcn401_dsc *dsc,
struct dc_context *ctx,
int inst,
const struct dcn401_dsc_registers *dsc_regs,
const struct dcn401_dsc_shift *dsc_shift,
const struct dcn401_dsc_mask *dsc_mask)
{
dsc->base.ctx = ctx;
dsc->base.inst = inst;
dsc->base.funcs = &dcn401_dsc_funcs;
dsc->dsc_regs = dsc_regs;
dsc->dsc_shift = dsc_shift;
dsc->dsc_mask = dsc_mask;
dsc->max_image_width = 5184;
}
/* This returns the capabilities for a single DSC encoder engine. Number of slices and total throughput
* can be doubled, tripled etc. by using additional DSC engines.
*/
//static void dsc401_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz)
//{
// dsc_enc_caps->dsc_version = 0x21; /* v1.2 - DP spec defined it in reverse order and we kept it */
//
// /*dsc_enc_caps->slice_caps.bits.NUM_SLICES_1 = 1;
// dsc_enc_caps->slice_caps.bits.NUM_SLICES_2 = 1;
// dsc_enc_caps->slice_caps.bits.NUM_SLICES_3 = 1;
// dsc_enc_caps->slice_caps.bits.NUM_SLICES_4 = 1;
//
// dsc_enc_caps->lb_bit_depth = 13;
// dsc_enc_caps->is_block_pred_supported = true;
//
// dsc_enc_caps->color_formats.bits.RGB = 1;
// dsc_enc_caps->color_formats.bits.YCBCR_444 = 1;
// dsc_enc_caps->color_formats.bits.YCBCR_SIMPLE_422 = 1;
// dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 0;
// dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_420 = 1;
//
// dsc_enc_caps->color_depth.bits.COLOR_DEPTH_8_BPC = 1;
// dsc_enc_caps->color_depth.bits.COLOR_DEPTH_10_BPC = 1;
// dsc_enc_caps->color_depth.bits.COLOR_DEPTH_12_BPC = 1;
//
// /* Maximum total throughput with all the slices combined. This is different from how DP spec specifies it.
// * Our decoder's total throughput in Pix/s is equal to DISPCLK. This is then shared between slices.
// * The value below is the absolute maximum value. The actual throughput may be lower, but it'll always
// * be sufficient to process the input pixel rate fed into a single DSC engine.
// */
// /*dsc_enc_caps->max_total_throughput_mps = DCN401_MAX_DISPLAY_CLOCK_Mhz;
//
// /* For pixel clock bigger than a single-pipe limit we'll need two engines, which then doubles our
// * throughput and number of slices, but also introduces a lower limit of 2 slices
// */
// /*if (pixel_clock_100Hz >= DCN401_MAX_PIXEL_CLOCK_Mhz*10000) {
// dsc_enc_caps->slice_caps.bits.NUM_SLICES_1 = 0;
// dsc_enc_caps->slice_caps.bits.NUM_SLICES_8 = 1;
// dsc_enc_caps->max_total_throughput_mps = DCN401_MAX_DISPLAY_CLOCK_Mhz * 2;
// }
//
// dsc_enc_caps->max_slice_width = 5184; /* (including 64 overlap pixels for eDP MSO mode) */
// /*dsc_enc_caps->bpp_increment_div = 16; /* 1/16th of a bit */
//}
/* this function read dsc related register fields to be logged later in dcn10_log_hw_state
* into a dcn_dsc_state struct.
*/
static void dsc401_read_state(struct display_stream_compressor *dsc, struct dcn_dsc_state *s)
{
struct dcn401_dsc *dsc401 = TO_DCN401_DSC(dsc);
REG_GET(DSC_TOP_CONTROL, DSC_CLOCK_EN, &s->dsc_clock_en);
REG_GET(DSCC_PPS_CONFIG3, SLICE_WIDTH, &s->dsc_slice_width);
REG_GET(DSCC_PPS_CONFIG1, BITS_PER_PIXEL, &s->dsc_bits_per_pixel);
REG_GET(DSCC_PPS_CONFIG3, SLICE_HEIGHT, &s->dsc_slice_height);
REG_GET(DSCC_PPS_CONFIG1, CHUNK_SIZE, &s->dsc_chunk_size);
REG_GET(DSCC_PPS_CONFIG2, PIC_WIDTH, &s->dsc_pic_width);
REG_GET(DSCC_PPS_CONFIG2, PIC_HEIGHT, &s->dsc_pic_height);
REG_GET(DSCC_PPS_CONFIG7, SLICE_BPG_OFFSET, &s->dsc_slice_bpg_offset);
REG_GET_2(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, &s->dsc_fw_en,
DSCRM_DSC_OPP_PIPE_SOURCE, &s->dsc_opp_source);
}
static bool dsc401_validate_stream(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg)
{
struct dsc_optc_config dsc_optc_cfg;
struct dcn401_dsc *dsc401 = TO_DCN401_DSC(dsc);
if (dsc_cfg->pic_width > dsc401->max_image_width)
return false;
return dsc_prepare_config(dsc_cfg, &dsc401->reg_vals, &dsc_optc_cfg);
}
/*
static void dsc_config_log(struct display_stream_compressor *dsc, const struct dsc_config *config)
{
DC_LOG_DSC("\tnum_slices_h %d", config->dc_dsc_cfg.num_slices_h);
DC_LOG_DSC("\tnum_slices_v %d", config->dc_dsc_cfg.num_slices_v);
DC_LOG_DSC("\tbits_per_pixel %d (%d.%04d)",
config->dc_dsc_cfg.bits_per_pixel,
config->dc_dsc_cfg.bits_per_pixel / 16,
((config->dc_dsc_cfg.bits_per_pixel % 16) * 10000) / 16);
DC_LOG_DSC("\tcolor_depth %d", config->color_depth);
}
*/
static void dsc401_set_config(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg,
struct dsc_optc_config *dsc_optc_cfg)
{
bool is_config_ok;
struct dcn401_dsc *dsc401 = TO_DCN401_DSC(dsc);
DC_LOG_DSC("Setting DSC Config at DSC inst %d", dsc->inst);
dsc_config_log(dsc, dsc_cfg);
is_config_ok = dsc_prepare_config(dsc_cfg, &dsc401->reg_vals, dsc_optc_cfg);
ASSERT(is_config_ok);
DC_LOG_DSC("programming DSC Picture Parameter Set (PPS):");
dsc_log_pps(dsc, &dsc401->reg_vals.pps);
dsc_write_to_registers(dsc, &dsc401->reg_vals);
}
/*
static bool dsc401_get_packed_pps(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg, uint8_t *dsc_packed_pps)
{
bool is_config_ok;
struct dsc_reg_values dsc_reg_vals;
struct dsc_optc_config dsc_optc_cfg;
memset(&dsc_reg_vals, 0, sizeof(dsc_reg_vals));
memset(&dsc_optc_cfg, 0, sizeof(dsc_optc_cfg));
DC_LOG_DSC("Getting packed DSC PPS for DSC Config:");
dsc_config_log(dsc, dsc_cfg);
DC_LOG_DSC("DSC Picture Parameter Set (PPS):");
is_config_ok = dsc_prepare_config(dsc_cfg, &dsc_reg_vals, &dsc_optc_cfg);
ASSERT(is_config_ok);
drm_dsc_pps_payload_pack((struct drm_dsc_picture_parameter_set *)dsc_packed_pps, &dsc_reg_vals.pps);
dsc_log_pps(dsc, &dsc_reg_vals.pps);
return is_config_ok;
}
*/
static void dsc401_enable(struct display_stream_compressor *dsc, int opp_pipe)
{
struct dcn401_dsc *dsc401 = TO_DCN401_DSC(dsc);
int dsc_clock_en;
int dsc_fw_config;
int enabled_opp_pipe;
DC_LOG_DSC("enable DSC %d at opp pipe %d", dsc->inst, opp_pipe);
REG_GET(DSC_TOP_CONTROL, DSC_CLOCK_EN, &dsc_clock_en);
REG_GET_2(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, &dsc_fw_config, DSCRM_DSC_OPP_PIPE_SOURCE, &enabled_opp_pipe);
if ((dsc_clock_en || dsc_fw_config) && enabled_opp_pipe != opp_pipe) {
DC_LOG_DSC("ERROR: DSC %d at opp pipe %d already enabled!", dsc->inst, enabled_opp_pipe);
ASSERT(0);
}
REG_UPDATE(DSC_TOP_CONTROL,
DSC_CLOCK_EN, 1);
REG_UPDATE_2(DSCRM_DSC_FORWARD_CONFIG,
DSCRM_DSC_FORWARD_EN, 1,
DSCRM_DSC_OPP_PIPE_SOURCE, opp_pipe);
}
static void dsc401_disable(struct display_stream_compressor *dsc)
{
struct dcn401_dsc *dsc401 = TO_DCN401_DSC(dsc);
int dsc_clock_en;
int dsc_fw_config;
int enabled_opp_pipe;
DC_LOG_DSC("disable DSC %d", dsc->inst);
REG_GET(DSC_TOP_CONTROL, DSC_CLOCK_EN, &dsc_clock_en);
REG_GET_2(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, &dsc_fw_config, DSCRM_DSC_OPP_PIPE_SOURCE, &enabled_opp_pipe);
if (!dsc_clock_en || !dsc_fw_config) {
DC_LOG_DSC("ERROR: DSC %d at opp pipe %d already disabled!", dsc->inst, enabled_opp_pipe);
ASSERT(0);
}
REG_UPDATE(DSCRM_DSC_FORWARD_CONFIG,
DSCRM_DSC_FORWARD_EN, 0);
REG_UPDATE(DSC_TOP_CONTROL,
DSC_CLOCK_EN, 0);
}
static void dsc401_disconnect(struct display_stream_compressor *dsc)
{
struct dcn401_dsc *dsc401 = TO_DCN401_DSC(dsc);
DC_LOG_DSC("disconnect DSC %d", dsc->inst);
REG_UPDATE(DSCRM_DSC_FORWARD_CONFIG,
DSCRM_DSC_FORWARD_EN, 0);
}
/* This module's internal functions */
//static void dsc_log_pps(struct display_stream_compressor *dsc, struct drm_dsc_config *pps)
//{
// int i;
// int bits_per_pixel = pps->bits_per_pixel;
//
// DC_LOG_DSC("\tdsc_version_major %d", pps->dsc_version_major);
// DC_LOG_DSC("\tdsc_version_minor %d", pps->dsc_version_minor);
// DC_LOG_DSC("\tbits_per_component %d", pps->bits_per_component);
// DC_LOG_DSC("\tline_buf_depth %d", pps->line_buf_depth);
// DC_LOG_DSC("\tblock_pred_enable %d", pps->block_pred_enable);
// DC_LOG_DSC("\tconvert_rgb %d", pps->convert_rgb);
// DC_LOG_DSC("\tsimple_422 %d", pps->simple_422);
// DC_LOG_DSC("\tvbr_enable %d", pps->vbr_enable);
// DC_LOG_DSC("\tbits_per_pixel %d (%d.%04d)", bits_per_pixel, bits_per_pixel / 16, ((bits_per_pixel % 16) * 10000) / 16);
// DC_LOG_DSC("\tpic_height %d", pps->pic_height);
// DC_LOG_DSC("\tpic_width %d", pps->pic_width);
// DC_LOG_DSC("\tslice_height %d", pps->slice_height);
// DC_LOG_DSC("\tslice_width %d", pps->slice_width);
// DC_LOG_DSC("\tslice_chunk_size %d", pps->slice_chunk_size);
// DC_LOG_DSC("\tinitial_xmit_delay %d", pps->initial_xmit_delay);
// DC_LOG_DSC("\tinitial_dec_delay %d", pps->initial_dec_delay);
// DC_LOG_DSC("\tinitial_scale_value %d", pps->initial_scale_value);
// DC_LOG_DSC("\tscale_increment_interval %d", pps->scale_increment_interval);
// DC_LOG_DSC("\tscale_decrement_interval %d", pps->scale_decrement_interval);
// DC_LOG_DSC("\tfirst_line_bpg_offset %d", pps->first_line_bpg_offset);
// DC_LOG_DSC("\tnfl_bpg_offset %d", pps->nfl_bpg_offset);
// DC_LOG_DSC("\tslice_bpg_offset %d", pps->slice_bpg_offset);
// DC_LOG_DSC("\tinitial_offset %d", pps->initial_offset);
// DC_LOG_DSC("\tfinal_offset %d", pps->final_offset);
// DC_LOG_DSC("\tflatness_min_qp %d", pps->flatness_min_qp);
// DC_LOG_DSC("\tflatness_max_qp %d", pps->flatness_max_qp);
// /* DC_LOG_DSC("\trc_parameter_set %d", pps->rc_parameter_set); */
// /*DC_LOG_DSC("\tnative_420 %d", pps->native_420);
// DC_LOG_DSC("\tnative_422 %d", pps->native_422);
// DC_LOG_DSC("\tsecond_line_bpg_offset %d", pps->second_line_bpg_offset);
// DC_LOG_DSC("\tnsl_bpg_offset %d", pps->nsl_bpg_offset);
// DC_LOG_DSC("\tsecond_line_offset_adj %d", pps->second_line_offset_adj);
// DC_LOG_DSC("\trc_model_size %d", pps->rc_model_size);
// DC_LOG_DSC("\trc_edge_factor %d", pps->rc_edge_factor);
// DC_LOG_DSC("\trc_quant_incr_limit0 %d", pps->rc_quant_incr_limit0);
// DC_LOG_DSC("\trc_quant_incr_limit1 %d", pps->rc_quant_incr_limit1);
// DC_LOG_DSC("\trc_tgt_offset_high %d", pps->rc_tgt_offset_high);
// DC_LOG_DSC("\trc_tgt_offset_low %d", pps->rc_tgt_offset_low);
//
// for (i = 0; i < NUM_BUF_RANGES - 1; i++)
// DC_LOG_DSC("\trc_buf_thresh[%d] %d", i, pps->rc_buf_thresh[i]);
//
// for (i = 0; i < NUM_BUF_RANGES; i++) {
// DC_LOG_DSC("\trc_range_parameters[%d].range_min_qp %d", i, pps->rc_range_params[i].range_min_qp);
// DC_LOG_DSC("\trc_range_parameters[%d].range_max_qp %d", i, pps->rc_range_params[i].range_max_qp);
// DC_LOG_DSC("\trc_range_parameters[%d].range_bpg_offset %d", i, pps->rc_range_params[i].range_bpg_offset);
// }
//}
//
//static void dsc_override_rc_params(struct rc_params *rc, const struct dc_dsc_rc_params_override *override)
//{
// uint8_t i;
//
// rc->rc_model_size = override->rc_model_size;
// for (i = 0; i < DC_DSC_RC_BUF_THRESH_SIZE; i++)
// rc->rc_buf_thresh[i] = override->rc_buf_thresh[i];
// for (i = 0; i < DC_DSC_QP_SET_SIZE; i++) {
// rc->qp_min[i] = override->rc_minqp[i];
// rc->qp_max[i] = override->rc_maxqp[i];
// rc->ofs[i] = override->rc_offset[i];
// }
//
// rc->rc_tgt_offset_hi = override->rc_tgt_offset_hi;
// rc->rc_tgt_offset_lo = override->rc_tgt_offset_lo;
// rc->rc_edge_factor = override->rc_edge_factor;
// rc->rc_quant_incr_limit0 = override->rc_quant_incr_limit0;
// rc->rc_quant_incr_limit1 = override->rc_quant_incr_limit1;
//
// rc->initial_fullness_offset = override->initial_fullness_offset;
// rc->initial_xmit_delay = override->initial_delay;
//
// rc->flatness_min_qp = override->flatness_min_qp;
// rc->flatness_max_qp = override->flatness_max_qp;
// rc->flatness_det_thresh = override->flatness_det_thresh;
//}
//
//static bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_values *dsc_reg_vals,
// struct dsc_optc_config *dsc_optc_cfg)
//{
// struct dsc_parameters dsc_params;
// struct rc_params rc;
//
// /* Validate input parameters */
// /*ASSERT(dsc_cfg->dc_dsc_cfg.num_slices_h);
// ASSERT(dsc_cfg->dc_dsc_cfg.num_slices_v);
// ASSERT(dsc_cfg->dc_dsc_cfg.version_minor == 1 || dsc_cfg->dc_dsc_cfg.version_minor == 2);
// ASSERT(dsc_cfg->pic_width);
// ASSERT(dsc_cfg->pic_height);
// ASSERT((dsc_cfg->dc_dsc_cfg.version_minor == 1 &&
// (8 <= dsc_cfg->dc_dsc_cfg.linebuf_depth && dsc_cfg->dc_dsc_cfg.linebuf_depth <= 13)) ||
// (dsc_cfg->dc_dsc_cfg.version_minor == 2 &&
// ((8 <= dsc_cfg->dc_dsc_cfg.linebuf_depth && dsc_cfg->dc_dsc_cfg.linebuf_depth <= 15) ||
// dsc_cfg->dc_dsc_cfg.linebuf_depth == 0)));
// ASSERT(96 <= dsc_cfg->dc_dsc_cfg.bits_per_pixel && dsc_cfg->dc_dsc_cfg.bits_per_pixel <= 0x3ff); // 6.0 <= bits_per_pixel <= 63.9375
//
// if (!dsc_cfg->dc_dsc_cfg.num_slices_v || !dsc_cfg->dc_dsc_cfg.num_slices_h ||
// !(dsc_cfg->dc_dsc_cfg.version_minor == 1 || dsc_cfg->dc_dsc_cfg.version_minor == 2) ||
// !dsc_cfg->pic_width || !dsc_cfg->pic_height ||
// !((dsc_cfg->dc_dsc_cfg.version_minor == 1 && // v1.1 line buffer depth range:
// 8 <= dsc_cfg->dc_dsc_cfg.linebuf_depth && dsc_cfg->dc_dsc_cfg.linebuf_depth <= 13) ||
// (dsc_cfg->dc_dsc_cfg.version_minor == 2 && // v1.2 line buffer depth range:
// ((8 <= dsc_cfg->dc_dsc_cfg.linebuf_depth && dsc_cfg->dc_dsc_cfg.linebuf_depth <= 15) ||
// dsc_cfg->dc_dsc_cfg.linebuf_depth == 0))) ||
// !(96 <= dsc_cfg->dc_dsc_cfg.bits_per_pixel && dsc_cfg->dc_dsc_cfg.bits_per_pixel <= 0x3ff)) {
// dm_output_to_console("%s: Invalid parameters\n", __func__);
// return false;
// }
//
// dsc_init_reg_values(dsc_reg_vals);
//
// /* Copy input config */
// /*dsc_reg_vals->pixel_format = dsc_dc_pixel_encoding_to_dsc_pixel_format(dsc_cfg->pixel_encoding, dsc_cfg->dc_dsc_cfg.ycbcr422_simple);
// dsc_reg_vals->num_slices_h = dsc_cfg->dc_dsc_cfg.num_slices_h;
// dsc_reg_vals->num_slices_v = dsc_cfg->dc_dsc_cfg.num_slices_v;
// dsc_reg_vals->pps.dsc_version_minor = dsc_cfg->dc_dsc_cfg.version_minor;
// dsc_reg_vals->pps.pic_width = dsc_cfg->pic_width;
// dsc_reg_vals->pps.pic_height = dsc_cfg->pic_height;
// dsc_reg_vals->pps.bits_per_component = dsc_dc_color_depth_to_dsc_bits_per_comp(dsc_cfg->color_depth);
// dsc_reg_vals->pps.block_pred_enable = dsc_cfg->dc_dsc_cfg.block_pred_enable;
// dsc_reg_vals->pps.line_buf_depth = dsc_cfg->dc_dsc_cfg.linebuf_depth;
// dsc_reg_vals->alternate_ich_encoding_en = dsc_reg_vals->pps.dsc_version_minor == 1 ? 0 : 1;
// dsc_reg_vals->ich_reset_at_eol = (dsc_cfg->is_odm || dsc_reg_vals->num_slices_h > 1) ? 0xF : 0;
//
// // TODO: in addition to validating slice height (pic height must be divisible by slice height),
// // see what happens when the same condition doesn't apply for slice_width/pic_width.
// dsc_reg_vals->pps.slice_width = dsc_cfg->pic_width / dsc_cfg->dc_dsc_cfg.num_slices_h;
// dsc_reg_vals->pps.slice_height = dsc_cfg->pic_height / dsc_cfg->dc_dsc_cfg.num_slices_v;
//
// ASSERT(dsc_reg_vals->pps.slice_height * dsc_cfg->dc_dsc_cfg.num_slices_v == dsc_cfg->pic_height);
// if (!(dsc_reg_vals->pps.slice_height * dsc_cfg->dc_dsc_cfg.num_slices_v == dsc_cfg->pic_height)) {
// dm_output_to_console("%s: pix height %d not divisible by num_slices_v %d\n\n", __func__, dsc_cfg->pic_height, dsc_cfg->dc_dsc_cfg.num_slices_v);
// return false;
// }
//
// dsc_reg_vals->bpp_x32 = dsc_cfg->dc_dsc_cfg.bits_per_pixel << 1;
// if (dsc_reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR420 || dsc_reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR422)
// dsc_reg_vals->pps.bits_per_pixel = dsc_reg_vals->bpp_x32;
// else
// dsc_reg_vals->pps.bits_per_pixel = dsc_reg_vals->bpp_x32 >> 1;
//
// dsc_reg_vals->pps.convert_rgb = dsc_reg_vals->pixel_format == DSC_PIXFMT_RGB ? 1 : 0;
// dsc_reg_vals->pps.native_422 = (dsc_reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR422);
// dsc_reg_vals->pps.native_420 = (dsc_reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR420);
// dsc_reg_vals->pps.simple_422 = (dsc_reg_vals->pixel_format == DSC_PIXFMT_SIMPLE_YCBCR422);
//
// calc_rc_params(&rc, &dsc_reg_vals->pps);
//
// if (dsc_cfg->dc_dsc_cfg.rc_params_ovrd)
// dsc_override_rc_params(&rc, dsc_cfg->dc_dsc_cfg.rc_params_ovrd);
//
// if (dscc_compute_dsc_parameters(&dsc_reg_vals->pps, &rc, &dsc_params)) {
// dm_output_to_console("%s: DSC config failed\n", __func__);
// return false;
// }
//
// dsc_update_from_dsc_parameters(dsc_reg_vals, &dsc_params);
//
// dsc_optc_cfg->bytes_per_pixel = dsc_params.bytes_per_pixel;
// dsc_optc_cfg->slice_width = dsc_reg_vals->pps.slice_width;
// dsc_optc_cfg->is_pixel_format_444 = dsc_reg_vals->pixel_format == DSC_PIXFMT_RGB ||
// dsc_reg_vals->pixel_format == DSC_PIXFMT_YCBCR444 ||
// dsc_reg_vals->pixel_format == DSC_PIXFMT_SIMPLE_YCBCR422;
//
// return true;
//}
//static enum dsc_pixel_format dsc_dc_pixel_encoding_to_dsc_pixel_format(enum dc_pixel_encoding dc_pix_enc, bool is_ycbcr422_simple)
//{
// enum dsc_pixel_format dsc_pix_fmt = DSC_PIXFMT_UNKNOWN;
//
// /* NOTE: We don't support DSC_PIXFMT_SIMPLE_YCBCR422 */
//
// /*switch (dc_pix_enc) {
// case PIXEL_ENCODING_RGB:
// dsc_pix_fmt = DSC_PIXFMT_RGB;
// break;
// case PIXEL_ENCODING_YCBCR422:
// if (is_ycbcr422_simple)
// dsc_pix_fmt = DSC_PIXFMT_SIMPLE_YCBCR422;
// else
// dsc_pix_fmt = DSC_PIXFMT_NATIVE_YCBCR422;
// break;
// case PIXEL_ENCODING_YCBCR444:
// dsc_pix_fmt = DSC_PIXFMT_YCBCR444;
// break;
// case PIXEL_ENCODING_YCBCR420:
// dsc_pix_fmt = DSC_PIXFMT_NATIVE_YCBCR420;
// break;
// default:
// dsc_pix_fmt = DSC_PIXFMT_UNKNOWN;
// break;
// }
//
// ASSERT(dsc_pix_fmt != DSC_PIXFMT_UNKNOWN);
// return dsc_pix_fmt;
//}
//static enum dsc_bits_per_comp dsc_dc_color_depth_to_dsc_bits_per_comp(enum dc_color_depth dc_color_depth)
//{
// enum dsc_bits_per_comp bpc = DSC_BPC_UNKNOWN;
//
// switch (dc_color_depth) {
// case COLOR_DEPTH_888:
// bpc = DSC_BPC_8;
// break;
// case COLOR_DEPTH_101010:
// bpc = DSC_BPC_10;
// break;
// case COLOR_DEPTH_121212:
// bpc = DSC_BPC_12;
// break;
// default:
// bpc = DSC_BPC_UNKNOWN;
// break;
// }
//
// return bpc;
//}
//static void dsc_init_reg_values(struct dsc_reg_values *reg_vals)
//{
// int i;
//
// memset(reg_vals, 0, sizeof(struct dsc_reg_values));
//
// /* Non-PPS values */
// /*reg_vals->dsc_clock_enable = 1;
// reg_vals->dsc_clock_gating_disable = 0;
// reg_vals->underflow_recovery_en = 0;
// reg_vals->underflow_occurred_int_en = 0;
// reg_vals->underflow_occurred_status = 0;
// reg_vals->ich_reset_at_eol = 0;
// reg_vals->alternate_ich_encoding_en = 0;
// reg_vals->rc_buffer_model_size = 0;
// /*reg_vals->disable_ich = 0;*/
// /*reg_vals->dsc_dbg_en = 0;
//
// for (i = 0; i < 4; i++)
// reg_vals->rc_buffer_model_overflow_int_en[i] = 0;
//
// /* PPS values */
// /*reg_vals->pps.dsc_version_minor = 2;
// reg_vals->pps.dsc_version_major = 1;
// reg_vals->pps.line_buf_depth = 9;
// reg_vals->pps.bits_per_component = 8;
// reg_vals->pps.block_pred_enable = 1;
// reg_vals->pps.slice_chunk_size = 0;
// reg_vals->pps.pic_width = 0;
// reg_vals->pps.pic_height = 0;
// reg_vals->pps.slice_width = 0;
// reg_vals->pps.slice_height = 0;
// reg_vals->pps.initial_xmit_delay = 170;
// reg_vals->pps.initial_dec_delay = 0;
// reg_vals->pps.initial_scale_value = 0;
// reg_vals->pps.scale_increment_interval = 0;
// reg_vals->pps.scale_decrement_interval = 0;
// reg_vals->pps.nfl_bpg_offset = 0;
// reg_vals->pps.slice_bpg_offset = 0;
// reg_vals->pps.nsl_bpg_offset = 0;
// reg_vals->pps.initial_offset = 6144;
// reg_vals->pps.final_offset = 0;
// reg_vals->pps.flatness_min_qp = 3;
// reg_vals->pps.flatness_max_qp = 12;
// reg_vals->pps.rc_model_size = 8192;
// reg_vals->pps.rc_edge_factor = 6;
// reg_vals->pps.rc_quant_incr_limit0 = 11;
// reg_vals->pps.rc_quant_incr_limit1 = 11;
// reg_vals->pps.rc_tgt_offset_low = 3;
// reg_vals->pps.rc_tgt_offset_high = 3;
//}
/* Updates dsc_reg_values::reg_vals::xxx fields based on the values from computed params.
* This is required because dscc_compute_dsc_parameters returns a modified PPS, which in turn
* affects non-PPS register values.
*/
//static void dsc_update_from_dsc_parameters(struct dsc_reg_values *reg_vals, const struct dsc_parameters *dsc_params)
//{
// int i;
//
// reg_vals->pps = dsc_params->pps;
//
// // pps_computed will have the "expanded" values; need to shift them to make them fit for regs.
// for (i = 0; i < NUM_BUF_RANGES - 1; i++)
// reg_vals->pps.rc_buf_thresh[i] = reg_vals->pps.rc_buf_thresh[i] >> 6;
//
// reg_vals->rc_buffer_model_size = dsc_params->rc_buffer_model_size;
//}
static void dsc_write_to_registers(struct display_stream_compressor *dsc, const struct dsc_reg_values *reg_vals)
{
uint32_t temp_int;
struct dcn401_dsc *dsc401 = TO_DCN401_DSC(dsc);
REG_SET(DSC_DEBUG_CONTROL, 0,
DSC_DBG_EN, reg_vals->dsc_dbg_en);
// dsccif registers
REG_SET_2(DSCCIF_CONFIG0, 0,
//INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN, reg_vals->underflow_recovery_en,
//INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN, reg_vals->underflow_occurred_int_en,
//INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS, reg_vals->underflow_occurred_status,
INPUT_PIXEL_FORMAT, reg_vals->pixel_format,
DSCCIF_CONFIG0__BITS_PER_COMPONENT, reg_vals->pps.bits_per_component);
/* REG_SET_2(DSCCIF_CONFIG1, 0,
PIC_WIDTH, reg_vals->pps.pic_width,
PIC_HEIGHT, reg_vals->pps.pic_height);
*/
// dscc registers
if (dsc401->dsc_mask->ICH_RESET_AT_END_OF_LINE == 0) {
REG_SET_3(DSCC_CONFIG0, 0,
NUMBER_OF_SLICES_PER_LINE, reg_vals->num_slices_h - 1,
ALTERNATE_ICH_ENCODING_EN, reg_vals->alternate_ich_encoding_en,
NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION, reg_vals->num_slices_v - 1);
} else {
REG_SET_4(DSCC_CONFIG0, 0, ICH_RESET_AT_END_OF_LINE,
reg_vals->ich_reset_at_eol, NUMBER_OF_SLICES_PER_LINE,
reg_vals->num_slices_h - 1, ALTERNATE_ICH_ENCODING_EN,
reg_vals->alternate_ich_encoding_en, NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION,
reg_vals->num_slices_v - 1);
}
REG_SET(DSCC_CONFIG1, 0,
DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE, reg_vals->rc_buffer_model_size);
/*REG_SET_2(DSCC_CONFIG1, 0,
DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE, reg_vals->rc_buffer_model_size,
DSCC_DISABLE_ICH, reg_vals->disable_ich);*/
REG_SET_4(DSCC_INTERRUPT_CONTROL0, 0,
DSCC_RATE_CONTROL_BUFFER_MODEL_OVERFLOW_OCCURRED_INT_EN0, reg_vals->rc_buffer_model_overflow_int_en[0],
DSCC_RATE_CONTROL_BUFFER_MODEL_OVERFLOW_OCCURRED_INT_EN1, reg_vals->rc_buffer_model_overflow_int_en[1],
DSCC_RATE_CONTROL_BUFFER_MODEL_OVERFLOW_OCCURRED_INT_EN2, reg_vals->rc_buffer_model_overflow_int_en[2],
DSCC_RATE_CONTROL_BUFFER_MODEL_OVERFLOW_OCCURRED_INT_EN3, reg_vals->rc_buffer_model_overflow_int_en[3]);
REG_SET_3(DSCC_PPS_CONFIG0, 0,
DSC_VERSION_MINOR, reg_vals->pps.dsc_version_minor,
LINEBUF_DEPTH, reg_vals->pps.line_buf_depth,
DSCC_PPS_CONFIG0__BITS_PER_COMPONENT, reg_vals->pps.bits_per_component);
if (reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR420 || reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR422)
temp_int = reg_vals->bpp_x32;
else
temp_int = reg_vals->bpp_x32 >> 1;
REG_SET_7(DSCC_PPS_CONFIG1, 0,
BITS_PER_PIXEL, temp_int,
SIMPLE_422, reg_vals->pixel_format == DSC_PIXFMT_SIMPLE_YCBCR422,
CONVERT_RGB, reg_vals->pixel_format == DSC_PIXFMT_RGB,
BLOCK_PRED_ENABLE, reg_vals->pps.block_pred_enable,
NATIVE_422, reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR422,
NATIVE_420, reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR420,
CHUNK_SIZE, reg_vals->pps.slice_chunk_size);
REG_SET_2(DSCC_PPS_CONFIG2, 0,
PIC_WIDTH, reg_vals->pps.pic_width,
PIC_HEIGHT, reg_vals->pps.pic_height);
REG_SET_2(DSCC_PPS_CONFIG3, 0,
SLICE_WIDTH, reg_vals->pps.slice_width,
SLICE_HEIGHT, reg_vals->pps.slice_height);
REG_SET(DSCC_PPS_CONFIG4, 0,
INITIAL_XMIT_DELAY, reg_vals->pps.initial_xmit_delay);
REG_SET_2(DSCC_PPS_CONFIG5, 0,
INITIAL_SCALE_VALUE, reg_vals->pps.initial_scale_value,
SCALE_INCREMENT_INTERVAL, reg_vals->pps.scale_increment_interval);
REG_SET_3(DSCC_PPS_CONFIG6, 0,
SCALE_DECREMENT_INTERVAL, reg_vals->pps.scale_decrement_interval,
FIRST_LINE_BPG_OFFSET, reg_vals->pps.first_line_bpg_offset,
SECOND_LINE_BPG_OFFSET, reg_vals->pps.second_line_bpg_offset);
REG_SET_2(DSCC_PPS_CONFIG7, 0,
NFL_BPG_OFFSET, reg_vals->pps.nfl_bpg_offset,
SLICE_BPG_OFFSET, reg_vals->pps.slice_bpg_offset);
REG_SET_2(DSCC_PPS_CONFIG8, 0,
NSL_BPG_OFFSET, reg_vals->pps.nsl_bpg_offset,
SECOND_LINE_OFFSET_ADJ, reg_vals->pps.second_line_offset_adj);
REG_SET_2(DSCC_PPS_CONFIG9, 0,
INITIAL_OFFSET, reg_vals->pps.initial_offset,
FINAL_OFFSET, reg_vals->pps.final_offset);
REG_SET_3(DSCC_PPS_CONFIG10, 0,
FLATNESS_MIN_QP, reg_vals->pps.flatness_min_qp,
FLATNESS_MAX_QP, reg_vals->pps.flatness_max_qp,
RC_MODEL_SIZE, reg_vals->pps.rc_model_size);
REG_SET_5(DSCC_PPS_CONFIG11, 0,
RC_EDGE_FACTOR, reg_vals->pps.rc_edge_factor,
RC_QUANT_INCR_LIMIT0, reg_vals->pps.rc_quant_incr_limit0,
RC_QUANT_INCR_LIMIT1, reg_vals->pps.rc_quant_incr_limit1,
RC_TGT_OFFSET_LO, reg_vals->pps.rc_tgt_offset_low,
RC_TGT_OFFSET_HI, reg_vals->pps.rc_tgt_offset_high);
REG_SET_4(DSCC_PPS_CONFIG12, 0,
RC_BUF_THRESH0, reg_vals->pps.rc_buf_thresh[0],
RC_BUF_THRESH1, reg_vals->pps.rc_buf_thresh[1],
RC_BUF_THRESH2, reg_vals->pps.rc_buf_thresh[2],
RC_BUF_THRESH3, reg_vals->pps.rc_buf_thresh[3]);
REG_SET_4(DSCC_PPS_CONFIG13, 0,
RC_BUF_THRESH4, reg_vals->pps.rc_buf_thresh[4],
RC_BUF_THRESH5, reg_vals->pps.rc_buf_thresh[5],
RC_BUF_THRESH6, reg_vals->pps.rc_buf_thresh[6],
RC_BUF_THRESH7, reg_vals->pps.rc_buf_thresh[7]);
REG_SET_4(DSCC_PPS_CONFIG14, 0,
RC_BUF_THRESH8, reg_vals->pps.rc_buf_thresh[8],
RC_BUF_THRESH9, reg_vals->pps.rc_buf_thresh[9],
RC_BUF_THRESH10, reg_vals->pps.rc_buf_thresh[10],
RC_BUF_THRESH11, reg_vals->pps.rc_buf_thresh[11]);
REG_SET_5(DSCC_PPS_CONFIG15, 0,
RC_BUF_THRESH12, reg_vals->pps.rc_buf_thresh[12],
RC_BUF_THRESH13, reg_vals->pps.rc_buf_thresh[13],
RANGE_MIN_QP0, reg_vals->pps.rc_range_params[0].range_min_qp,
RANGE_MAX_QP0, reg_vals->pps.rc_range_params[0].range_max_qp,
RANGE_BPG_OFFSET0, reg_vals->pps.rc_range_params[0].range_bpg_offset);
REG_SET_6(DSCC_PPS_CONFIG16, 0,
RANGE_MIN_QP1, reg_vals->pps.rc_range_params[1].range_min_qp,
RANGE_MAX_QP1, reg_vals->pps.rc_range_params[1].range_max_qp,
RANGE_BPG_OFFSET1, reg_vals->pps.rc_range_params[1].range_bpg_offset,
RANGE_MIN_QP2, reg_vals->pps.rc_range_params[2].range_min_qp,
RANGE_MAX_QP2, reg_vals->pps.rc_range_params[2].range_max_qp,
RANGE_BPG_OFFSET2, reg_vals->pps.rc_range_params[2].range_bpg_offset);
REG_SET_6(DSCC_PPS_CONFIG17, 0,
RANGE_MIN_QP3, reg_vals->pps.rc_range_params[3].range_min_qp,
RANGE_MAX_QP3, reg_vals->pps.rc_range_params[3].range_max_qp,
RANGE_BPG_OFFSET3, reg_vals->pps.rc_range_params[3].range_bpg_offset,
RANGE_MIN_QP4, reg_vals->pps.rc_range_params[4].range_min_qp,
RANGE_MAX_QP4, reg_vals->pps.rc_range_params[4].range_max_qp,
RANGE_BPG_OFFSET4, reg_vals->pps.rc_range_params[4].range_bpg_offset);
REG_SET_6(DSCC_PPS_CONFIG18, 0,
RANGE_MIN_QP5, reg_vals->pps.rc_range_params[5].range_min_qp,
RANGE_MAX_QP5, reg_vals->pps.rc_range_params[5].range_max_qp,
RANGE_BPG_OFFSET5, reg_vals->pps.rc_range_params[5].range_bpg_offset,
RANGE_MIN_QP6, reg_vals->pps.rc_range_params[6].range_min_qp,
RANGE_MAX_QP6, reg_vals->pps.rc_range_params[6].range_max_qp,
RANGE_BPG_OFFSET6, reg_vals->pps.rc_range_params[6].range_bpg_offset);
REG_SET_6(DSCC_PPS_CONFIG19, 0,
RANGE_MIN_QP7, reg_vals->pps.rc_range_params[7].range_min_qp,
RANGE_MAX_QP7, reg_vals->pps.rc_range_params[7].range_max_qp,
RANGE_BPG_OFFSET7, reg_vals->pps.rc_range_params[7].range_bpg_offset,
RANGE_MIN_QP8, reg_vals->pps.rc_range_params[8].range_min_qp,
RANGE_MAX_QP8, reg_vals->pps.rc_range_params[8].range_max_qp,
RANGE_BPG_OFFSET8, reg_vals->pps.rc_range_params[8].range_bpg_offset);
REG_SET_6(DSCC_PPS_CONFIG20, 0,
RANGE_MIN_QP9, reg_vals->pps.rc_range_params[9].range_min_qp,
RANGE_MAX_QP9, reg_vals->pps.rc_range_params[9].range_max_qp,
RANGE_BPG_OFFSET9, reg_vals->pps.rc_range_params[9].range_bpg_offset,
RANGE_MIN_QP10, reg_vals->pps.rc_range_params[10].range_min_qp,
RANGE_MAX_QP10, reg_vals->pps.rc_range_params[10].range_max_qp,
RANGE_BPG_OFFSET10, reg_vals->pps.rc_range_params[10].range_bpg_offset);
REG_SET_6(DSCC_PPS_CONFIG21, 0,
RANGE_MIN_QP11, reg_vals->pps.rc_range_params[11].range_min_qp,
RANGE_MAX_QP11, reg_vals->pps.rc_range_params[11].range_max_qp,
RANGE_BPG_OFFSET11, reg_vals->pps.rc_range_params[11].range_bpg_offset,
RANGE_MIN_QP12, reg_vals->pps.rc_range_params[12].range_min_qp,
RANGE_MAX_QP12, reg_vals->pps.rc_range_params[12].range_max_qp,
RANGE_BPG_OFFSET12, reg_vals->pps.rc_range_params[12].range_bpg_offset);
REG_SET_6(DSCC_PPS_CONFIG22, 0,
RANGE_MIN_QP13, reg_vals->pps.rc_range_params[13].range_min_qp,
RANGE_MAX_QP13, reg_vals->pps.rc_range_params[13].range_max_qp,
RANGE_BPG_OFFSET13, reg_vals->pps.rc_range_params[13].range_bpg_offset,
RANGE_MIN_QP14, reg_vals->pps.rc_range_params[14].range_min_qp,
RANGE_MAX_QP14, reg_vals->pps.rc_range_params[14].range_max_qp,
RANGE_BPG_OFFSET14, reg_vals->pps.rc_range_params[14].range_bpg_offset);
}
void dsc401_set_fgcg(struct dcn401_dsc *dsc401, bool enable)
{
REG_UPDATE(DSC_TOP_CONTROL, DSC_FGCG_REP_DIS, !enable);
}

View file

@ -0,0 +1,337 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#ifndef __DCN401_DSC_H__
#define __DCN401_DSC_H__
#include "dsc.h"
#include "dsc/dscc_types.h"
#include "dcn20/dcn20_dsc.h"
#include <drm/display/drm_dsc.h>
#define TO_DCN401_DSC(dsc)\
container_of(dsc, struct dcn401_dsc, base)
#define DSC_REG_LIST_SH_MASK_DCN401(mask_sh)\
DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_CLOCK_EN, mask_sh), \
DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_DISPCLK_R_GATE_DIS, mask_sh), \
DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_DSCCLK_R_GATE_DIS, mask_sh), \
DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_FGCG_REP_DIS, mask_sh), \
DSC_SF(DSC_TOP0_DSC_DEBUG_CONTROL, DSC_DBG_EN, mask_sh), \
DSC_SF(DSC_TOP0_DSC_DEBUG_CONTROL, DSC_TEST_CLOCK_MUX_SEL, mask_sh), \
DSC_SF(DSCC0_DSCC_CONFIG0, ICH_RESET_AT_END_OF_LINE, mask_sh), \
DSC_SF(DSCC0_DSCC_CONFIG0, NUMBER_OF_SLICES_PER_LINE, mask_sh), \
DSC_SF(DSCC0_DSCC_CONFIG0, ALTERNATE_ICH_ENCODING_EN, mask_sh), \
DSC_SF(DSCC0_DSCC_CONFIG0, NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION, mask_sh), \
DSC_SF(DSCC0_DSCC_CONFIG1, DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE, mask_sh), \
/*DSC_SF(DSCC0_DSCC_CONFIG1, DSCC_DISABLE_ICH, mask_sh),*/ \
DSC_SF(DSCC0_DSCC_STATUS, DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING, mask_sh), \
DSC_SF(DSCC0_DSCC_INTERRUPT_STATUS0, DSCC_RATE_CONTROL_BUFFER_MODEL_OVERFLOW_OCCURRED0, mask_sh), \
DSC_SF(DSCC0_DSCC_INTERRUPT_STATUS0, DSCC_RATE_CONTROL_BUFFER_MODEL_OVERFLOW_OCCURRED1, mask_sh), \
DSC_SF(DSCC0_DSCC_INTERRUPT_STATUS0, DSCC_RATE_CONTROL_BUFFER_MODEL_OVERFLOW_OCCURRED2, mask_sh), \
DSC_SF(DSCC0_DSCC_INTERRUPT_STATUS0, DSCC_RATE_CONTROL_BUFFER_MODEL_OVERFLOW_OCCURRED3, mask_sh), \
DSC_SF(DSCC0_DSCC_INTERRUPT_STATUS0, DSCC_RATE_CONTROL_BUFFER_MODEL_OVERFLOW_CLEAR0, mask_sh), \
DSC_SF(DSCC0_DSCC_INTERRUPT_STATUS0, DSCC_RATE_CONTROL_BUFFER_MODEL_OVERFLOW_CLEAR1, mask_sh), \
DSC_SF(DSCC0_DSCC_INTERRUPT_STATUS0, DSCC_RATE_CONTROL_BUFFER_MODEL_OVERFLOW_CLEAR2, mask_sh), \
DSC_SF(DSCC0_DSCC_INTERRUPT_STATUS0, DSCC_RATE_CONTROL_BUFFER_MODEL_OVERFLOW_CLEAR3, mask_sh), \
DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL0, DSCC_RATE_CONTROL_BUFFER_MODEL_OVERFLOW_OCCURRED_INT_EN0, mask_sh), \
DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL0, DSCC_RATE_CONTROL_BUFFER_MODEL_OVERFLOW_OCCURRED_INT_EN1, mask_sh), \
DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL0, DSCC_RATE_CONTROL_BUFFER_MODEL_OVERFLOW_OCCURRED_INT_EN2, mask_sh), \
DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL0, DSCC_RATE_CONTROL_BUFFER_MODEL_OVERFLOW_OCCURRED_INT_EN3, mask_sh), \
DSC_SF(DSCC0_DSCC_INTERRUPT_STATUS1, DSCC_OUTPUT_BUFFER_OVERFLOW_OCCURRED0, mask_sh), \
DSC_SF(DSCC0_DSCC_INTERRUPT_STATUS1, DSCC_OUTPUT_BUFFER_OVERFLOW_OCCURRED1, mask_sh), \
DSC_SF(DSCC0_DSCC_INTERRUPT_STATUS1, DSCC_OUTPUT_BUFFER_OVERFLOW_OCCURRED2, mask_sh), \
DSC_SF(DSCC0_DSCC_INTERRUPT_STATUS1, DSCC_OUTPUT_BUFFER_OVERFLOW_OCCURRED3, mask_sh), \
DSC_SF(DSCC0_DSCC_INTERRUPT_STATUS1, DSCC_OUTPUT_BUFFER_UNDERFLOW_OCCURRED0, mask_sh), \
DSC_SF(DSCC0_DSCC_INTERRUPT_STATUS1, DSCC_OUTPUT_BUFFER_UNDERFLOW_OCCURRED1, mask_sh), \
DSC_SF(DSCC0_DSCC_INTERRUPT_STATUS1, DSCC_OUTPUT_BUFFER_UNDERFLOW_OCCURRED2, mask_sh), \
DSC_SF(DSCC0_DSCC_INTERRUPT_STATUS1, DSCC_OUTPUT_BUFFER_UNDERFLOW_OCCURRED3, mask_sh), \
DSC_SF(DSCC0_DSCC_INTERRUPT_STATUS1, DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED, mask_sh), \
DSC_SF(DSCC0_DSCC_INTERRUPT_STATUS1, DSCC_OUTPUT_BUFFER_OVERFLOW_CLEAR0, mask_sh), \
DSC_SF(DSCC0_DSCC_INTERRUPT_STATUS1, DSCC_OUTPUT_BUFFER_OVERFLOW_CLEAR1, mask_sh), \
DSC_SF(DSCC0_DSCC_INTERRUPT_STATUS1, DSCC_OUTPUT_BUFFER_OVERFLOW_CLEAR2, mask_sh), \
DSC_SF(DSCC0_DSCC_INTERRUPT_STATUS1, DSCC_OUTPUT_BUFFER_OVERFLOW_CLEAR3, mask_sh), \
DSC_SF(DSCC0_DSCC_INTERRUPT_STATUS1, DSCC_OUTPUT_BUFFER_UNDERFLOW_CLEAR0, mask_sh), \
DSC_SF(DSCC0_DSCC_INTERRUPT_STATUS1, DSCC_OUTPUT_BUFFER_UNDERFLOW_CLEAR1, mask_sh), \
DSC_SF(DSCC0_DSCC_INTERRUPT_STATUS1, DSCC_OUTPUT_BUFFER_UNDERFLOW_CLEAR2, mask_sh), \
DSC_SF(DSCC0_DSCC_INTERRUPT_STATUS1, DSCC_OUTPUT_BUFFER_UNDERFLOW_CLEAR3, mask_sh), \
DSC_SF(DSCC0_DSCC_INTERRUPT_STATUS1, DSCC_END_OF_FRAME_NOT_REACHED_CLEAR, mask_sh), \
DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL1, DSCC_OUTPUT_BUFFER_OVERFLOW_OCCURRED_INT_EN0, mask_sh), \
DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL1, DSCC_OUTPUT_BUFFER_OVERFLOW_OCCURRED_INT_EN1, mask_sh), \
DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL1, DSCC_OUTPUT_BUFFER_OVERFLOW_OCCURRED_INT_EN2, mask_sh), \
DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL1, DSCC_OUTPUT_BUFFER_OVERFLOW_OCCURRED_INT_EN3, mask_sh), \
DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL1, DSCC_OUTPUT_BUFFER_UNDERFLOW_OCCURRED_INT_EN0, mask_sh), \
DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL1, DSCC_OUTPUT_BUFFER_UNDERFLOW_OCCURRED_INT_EN1, mask_sh), \
DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL1, DSCC_OUTPUT_BUFFER_UNDERFLOW_OCCURRED_INT_EN2, mask_sh), \
DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL1, DSCC_OUTPUT_BUFFER_UNDERFLOW_OCCURRED_INT_EN3, mask_sh), \
DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL1, DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED_INT_EN, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG0, DSC_VERSION_MINOR, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG0, DSC_VERSION_MAJOR, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG0, PPS_IDENTIFIER, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG0, LINEBUF_DEPTH, mask_sh), \
DSC2_SF(DSCC0, DSCC_PPS_CONFIG0__BITS_PER_COMPONENT, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG1, BITS_PER_PIXEL, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG1, VBR_ENABLE, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG1, SIMPLE_422, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG1, CONVERT_RGB, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG1, BLOCK_PRED_ENABLE, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG1, NATIVE_422, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG1, NATIVE_420, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG1, CHUNK_SIZE, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG2, PIC_WIDTH, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG2, PIC_HEIGHT, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG3, SLICE_WIDTH, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG3, SLICE_HEIGHT, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG4, INITIAL_XMIT_DELAY, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG4, INITIAL_DEC_DELAY, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG5, INITIAL_SCALE_VALUE, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG5, SCALE_INCREMENT_INTERVAL, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG6, SCALE_DECREMENT_INTERVAL, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG6, FIRST_LINE_BPG_OFFSET, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG6, SECOND_LINE_BPG_OFFSET, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG7, NFL_BPG_OFFSET, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG7, SLICE_BPG_OFFSET, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG8, NSL_BPG_OFFSET, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG8, SECOND_LINE_OFFSET_ADJ, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG9, INITIAL_OFFSET, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG9, FINAL_OFFSET, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG10, FLATNESS_MIN_QP, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG10, FLATNESS_MAX_QP, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG10, RC_MODEL_SIZE, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_EDGE_FACTOR, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_QUANT_INCR_LIMIT0, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_QUANT_INCR_LIMIT1, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_TGT_OFFSET_LO, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_TGT_OFFSET_HI, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG12, RC_BUF_THRESH0, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG12, RC_BUF_THRESH1, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG12, RC_BUF_THRESH2, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG12, RC_BUF_THRESH3, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG13, RC_BUF_THRESH4, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG13, RC_BUF_THRESH5, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG13, RC_BUF_THRESH6, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG13, RC_BUF_THRESH7, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG14, RC_BUF_THRESH8, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG14, RC_BUF_THRESH9, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG14, RC_BUF_THRESH10, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG14, RC_BUF_THRESH11, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RC_BUF_THRESH12, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RC_BUF_THRESH13, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RANGE_MIN_QP0, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RANGE_MAX_QP0, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RANGE_BPG_OFFSET0, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_MIN_QP1, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_MAX_QP1, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_BPG_OFFSET1, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_MIN_QP2, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_MAX_QP2, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_BPG_OFFSET2, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_MIN_QP3, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_MAX_QP3, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_BPG_OFFSET3, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_MIN_QP4, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_MAX_QP4, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_BPG_OFFSET4, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_MIN_QP5, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_MAX_QP5, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_BPG_OFFSET5, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_MIN_QP6, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_MAX_QP6, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_BPG_OFFSET6, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_MIN_QP7, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_MAX_QP7, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_BPG_OFFSET7, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_MIN_QP8, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_MAX_QP8, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_BPG_OFFSET8, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_MIN_QP9, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_MAX_QP9, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_BPG_OFFSET9, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_MIN_QP10, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_MAX_QP10, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_BPG_OFFSET10, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_MIN_QP11, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_MAX_QP11, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_BPG_OFFSET11, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_MIN_QP12, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_MAX_QP12, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_BPG_OFFSET12, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_MIN_QP13, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_MAX_QP13, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_BPG_OFFSET13, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_MIN_QP14, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_MAX_QP14, mask_sh), \
DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_BPG_OFFSET14, mask_sh), \
DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL0, DSCC_DEFAULT_MEM_LOW_POWER_STATE, mask_sh), \
DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL0, DSCC_MEM_PWR_FORCE, mask_sh), \
DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL0, DSCC_MEM_PWR_DIS, mask_sh), \
DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL0, DSCC_MEM_PWR_STATE, mask_sh), \
DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL1, DSCC_DEFAULT_MEM_LOW_POWER_STATE, mask_sh), \
DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL1, DSCC_MEM_PWR_FORCE, mask_sh), \
DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL1, DSCC_MEM_PWR_DIS, mask_sh), \
DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL1, DSCC_MEM_PWR_STATE, mask_sh), \
DSC_SF(DSCC0_DSCC_R_Y_SQUARED_ERROR_LOWER, DSCC_R_Y_SQUARED_ERROR_LOWER, mask_sh), \
DSC_SF(DSCC0_DSCC_R_Y_SQUARED_ERROR_UPPER, DSCC_R_Y_SQUARED_ERROR_UPPER, mask_sh), \
DSC_SF(DSCC0_DSCC_G_CB_SQUARED_ERROR_LOWER, DSCC_G_CB_SQUARED_ERROR_LOWER, mask_sh), \
DSC_SF(DSCC0_DSCC_G_CB_SQUARED_ERROR_UPPER, DSCC_G_CB_SQUARED_ERROR_UPPER, mask_sh), \
DSC_SF(DSCC0_DSCC_B_CR_SQUARED_ERROR_LOWER, DSCC_B_CR_SQUARED_ERROR_LOWER, mask_sh), \
DSC_SF(DSCC0_DSCC_B_CR_SQUARED_ERROR_UPPER, DSCC_B_CR_SQUARED_ERROR_UPPER, mask_sh), \
DSC_SF(DSCC0_DSCC_MAX_ABS_ERROR0, DSCC_R_Y_MAX_ABS_ERROR, mask_sh), \
DSC_SF(DSCC0_DSCC_MAX_ABS_ERROR0, DSCC_G_CB_MAX_ABS_ERROR, mask_sh), \
DSC_SF(DSCC0_DSCC_MAX_ABS_ERROR1, DSCC_B_CR_MAX_ABS_ERROR, mask_sh), \
DSC_SF(DSCC0_DSCC_OUTPUT_BUFFER_MAX_FULLNESS_LEVEL0, DSCC_OUTPUT_BUFFER_MAX_FULLNESS_LEVEL0, mask_sh), \
DSC_SF(DSCC0_DSCC_OUTPUT_BUFFER_MAX_FULLNESS_LEVEL1, DSCC_OUTPUT_BUFFER_MAX_FULLNESS_LEVEL1, mask_sh), \
DSC_SF(DSCC0_DSCC_OUTPUT_BUFFER_MAX_FULLNESS_LEVEL2, DSCC_OUTPUT_BUFFER_MAX_FULLNESS_LEVEL2, mask_sh), \
DSC_SF(DSCC0_DSCC_OUTPUT_BUFFER_MAX_FULLNESS_LEVEL3, DSCC_OUTPUT_BUFFER_MAX_FULLNESS_LEVEL3, mask_sh), \
DSC_SF(DSCC0_DSCC_RATE_BUFFER_MODEL_MAX_FULLNESS_LEVEL0, DSCC_RATE_BUFFER_MODEL_MAX_FULLNESS_LEVEL0, mask_sh), \
DSC_SF(DSCC0_DSCC_RATE_BUFFER_MODEL_MAX_FULLNESS_LEVEL1, DSCC_RATE_BUFFER_MODEL_MAX_FULLNESS_LEVEL1, mask_sh), \
DSC_SF(DSCC0_DSCC_RATE_BUFFER_MODEL_MAX_FULLNESS_LEVEL2, DSCC_RATE_BUFFER_MODEL_MAX_FULLNESS_LEVEL2, mask_sh), \
DSC_SF(DSCC0_DSCC_RATE_BUFFER_MODEL_MAX_FULLNESS_LEVEL3, DSCC_RATE_BUFFER_MODEL_MAX_FULLNESS_LEVEL3, mask_sh), \
DSC_SF(DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE, DSCC_TEST_DEBUG_BUS0_ROTATE, mask_sh), \
DSC_SF(DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE, DSCC_TEST_DEBUG_BUS1_ROTATE, mask_sh), \
DSC_SF(DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE, DSCC_TEST_DEBUG_BUS2_ROTATE, mask_sh), \
DSC_SF(DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE, DSCC_TEST_DEBUG_BUS3_ROTATE, mask_sh), \
DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_PIXEL_FORMAT, mask_sh), \
DSC2_SF(DSCCIF0, DSCCIF_CONFIG0__BITS_PER_COMPONENT, mask_sh), \
DSC_SF(DSCCIF0_DSCCIF_CONFIG0, DOUBLE_BUFFER_REG_UPDATE_PENDING, mask_sh), \
DSC_SF(DSCRM0_DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, mask_sh), \
DSC_SF(DSCRM0_DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_OPP_PIPE_SOURCE, mask_sh)
struct dcn401_dsc_registers {
uint32_t DSC_TOP_CONTROL;
uint32_t DSC_DEBUG_CONTROL;
uint32_t DSCC_CONFIG0;
uint32_t DSCC_CONFIG1;
uint32_t DSCC_STATUS;
uint32_t DSCC_INTERRUPT_CONTROL0;
uint32_t DSCC_INTERRUPT_CONTROL1;
uint32_t DSCC_INTERRUPT_STATUS0;
uint32_t DSCC_INTERRUPT_STATUS1;
uint32_t DSCC_PPS_CONFIG0;
uint32_t DSCC_PPS_CONFIG1;
uint32_t DSCC_PPS_CONFIG2;
uint32_t DSCC_PPS_CONFIG3;
uint32_t DSCC_PPS_CONFIG4;
uint32_t DSCC_PPS_CONFIG5;
uint32_t DSCC_PPS_CONFIG6;
uint32_t DSCC_PPS_CONFIG7;
uint32_t DSCC_PPS_CONFIG8;
uint32_t DSCC_PPS_CONFIG9;
uint32_t DSCC_PPS_CONFIG10;
uint32_t DSCC_PPS_CONFIG11;
uint32_t DSCC_PPS_CONFIG12;
uint32_t DSCC_PPS_CONFIG13;
uint32_t DSCC_PPS_CONFIG14;
uint32_t DSCC_PPS_CONFIG15;
uint32_t DSCC_PPS_CONFIG16;
uint32_t DSCC_PPS_CONFIG17;
uint32_t DSCC_PPS_CONFIG18;
uint32_t DSCC_PPS_CONFIG19;
uint32_t DSCC_PPS_CONFIG20;
uint32_t DSCC_PPS_CONFIG21;
uint32_t DSCC_PPS_CONFIG22;
uint32_t DSCC_MEM_POWER_CONTROL0;
uint32_t DSCC_MEM_POWER_CONTROL1;
uint32_t DSCC_R_Y_SQUARED_ERROR_LOWER;
uint32_t DSCC_R_Y_SQUARED_ERROR_UPPER;
uint32_t DSCC_G_CB_SQUARED_ERROR_LOWER;
uint32_t DSCC_G_CB_SQUARED_ERROR_UPPER;
uint32_t DSCC_B_CR_SQUARED_ERROR_LOWER;
uint32_t DSCC_B_CR_SQUARED_ERROR_UPPER;
uint32_t DSCC_MAX_ABS_ERROR0;
uint32_t DSCC_MAX_ABS_ERROR1;
uint32_t DSCC_TEST_DEBUG_BUS_ROTATE;
uint32_t DSCCIF_CONFIG0;
uint32_t DSCRM_DSC_FORWARD_CONFIG;
uint32_t DSCC_RATE_BUFFER_MODEL_MAX_FULLNESS_LEVEL0;
uint32_t DSCC_RATE_BUFFER_MODEL_MAX_FULLNESS_LEVEL1;
uint32_t DSCC_RATE_BUFFER_MODEL_MAX_FULLNESS_LEVEL2;
uint32_t DSCC_RATE_BUFFER_MODEL_MAX_FULLNESS_LEVEL3;
uint32_t DSCC_OUTPUT_BUFFER_MAX_FULLNESS_LEVEL0;
uint32_t DSCC_OUTPUT_BUFFER_MAX_FULLNESS_LEVEL1;
uint32_t DSCC_OUTPUT_BUFFER_MAX_FULLNESS_LEVEL2;
uint32_t DSCC_OUTPUT_BUFFER_MAX_FULLNESS_LEVEL3;
};
#define DSC_FIELD_LIST_DCN401(type)\
DSC_FIELD_LIST_DCN20(type); \
type DSC_FGCG_REP_DIS; \
type DSCC_RATE_CONTROL_BUFFER_MODEL_OVERFLOW_OCCURRED_INT_EN0; \
type DSCC_RATE_CONTROL_BUFFER_MODEL_OVERFLOW_OCCURRED_INT_EN1; \
type DSCC_RATE_CONTROL_BUFFER_MODEL_OVERFLOW_OCCURRED_INT_EN2; \
type DSCC_RATE_CONTROL_BUFFER_MODEL_OVERFLOW_OCCURRED_INT_EN3; \
type DSCC_OUTPUT_BUFFER_OVERFLOW_OCCURRED0; \
type DSCC_OUTPUT_BUFFER_OVERFLOW_OCCURRED1; \
type DSCC_OUTPUT_BUFFER_OVERFLOW_OCCURRED2; \
type DSCC_OUTPUT_BUFFER_OVERFLOW_OCCURRED3; \
type DSCC_OUTPUT_BUFFER_UNDERFLOW_OCCURRED0; \
type DSCC_OUTPUT_BUFFER_UNDERFLOW_OCCURRED1; \
type DSCC_OUTPUT_BUFFER_UNDERFLOW_OCCURRED2; \
type DSCC_OUTPUT_BUFFER_UNDERFLOW_OCCURRED3; \
type DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED; \
type DSCC_OUTPUT_BUFFER_OVERFLOW_CLEAR0; \
type DSCC_OUTPUT_BUFFER_OVERFLOW_CLEAR1; \
type DSCC_OUTPUT_BUFFER_OVERFLOW_CLEAR2; \
type DSCC_OUTPUT_BUFFER_OVERFLOW_CLEAR3; \
type DSCC_OUTPUT_BUFFER_UNDERFLOW_CLEAR0; \
type DSCC_OUTPUT_BUFFER_UNDERFLOW_CLEAR1; \
type DSCC_OUTPUT_BUFFER_UNDERFLOW_CLEAR2; \
type DSCC_OUTPUT_BUFFER_UNDERFLOW_CLEAR3; \
type DSCC_END_OF_FRAME_NOT_REACHED_CLEAR; \
type DSCC_RATE_CONTROL_BUFFER_MODEL_OVERFLOW_OCCURRED0; \
type DSCC_RATE_CONTROL_BUFFER_MODEL_OVERFLOW_OCCURRED1; \
type DSCC_RATE_CONTROL_BUFFER_MODEL_OVERFLOW_OCCURRED2; \
type DSCC_RATE_CONTROL_BUFFER_MODEL_OVERFLOW_OCCURRED3; \
type DSCC_RATE_CONTROL_BUFFER_MODEL_OVERFLOW_CLEAR0; \
type DSCC_RATE_CONTROL_BUFFER_MODEL_OVERFLOW_CLEAR1; \
type DSCC_RATE_CONTROL_BUFFER_MODEL_OVERFLOW_CLEAR2; \
type DSCC_RATE_CONTROL_BUFFER_MODEL_OVERFLOW_CLEAR3; \
type DSCC_OUTPUT_BUFFER_OVERFLOW_OCCURRED_INT_EN0; \
type DSCC_OUTPUT_BUFFER_OVERFLOW_OCCURRED_INT_EN1; \
type DSCC_OUTPUT_BUFFER_OVERFLOW_OCCURRED_INT_EN2; \
type DSCC_OUTPUT_BUFFER_OVERFLOW_OCCURRED_INT_EN3; \
type DSCC_OUTPUT_BUFFER_UNDERFLOW_OCCURRED_INT_EN0; \
type DSCC_OUTPUT_BUFFER_UNDERFLOW_OCCURRED_INT_EN1; \
type DSCC_OUTPUT_BUFFER_UNDERFLOW_OCCURRED_INT_EN2; \
type DSCC_OUTPUT_BUFFER_UNDERFLOW_OCCURRED_INT_EN3; \
type DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED_INT_EN; \
type DSCC_OUTPUT_BUFFER_MAX_FULLNESS_LEVEL0; \
type DSCC_OUTPUT_BUFFER_MAX_FULLNESS_LEVEL1; \
type DSCC_OUTPUT_BUFFER_MAX_FULLNESS_LEVEL2; \
type DSCC_OUTPUT_BUFFER_MAX_FULLNESS_LEVEL3; \
type DSCC_RATE_BUFFER_MODEL_MAX_FULLNESS_LEVEL0; \
type DSCC_RATE_BUFFER_MODEL_MAX_FULLNESS_LEVEL1; \
type DSCC_RATE_BUFFER_MODEL_MAX_FULLNESS_LEVEL2; \
type DSCC_RATE_BUFFER_MODEL_MAX_FULLNESS_LEVEL3
struct dcn401_dsc_shift {
DSC_FIELD_LIST_DCN401(uint8_t);
};
struct dcn401_dsc_mask {
DSC_FIELD_LIST_DCN401(uint32_t);
};
struct dcn401_dsc {
struct display_stream_compressor base;
const struct dcn401_dsc_registers *dsc_regs;
const struct dcn401_dsc_shift *dsc_shift;
const struct dcn401_dsc_mask *dsc_mask;
struct dsc_reg_values reg_vals;
int max_image_width;
};
void dsc401_construct(struct dcn401_dsc *dsc,
struct dc_context *ctx,
int inst,
const struct dcn401_dsc_registers *dsc_regs,
const struct dcn401_dsc_shift *dsc_shift,
const struct dcn401_dsc_mask *dsc_mask);
void dsc401_set_fgcg(struct dcn401_dsc *dsc401, bool enable);
#endif

View file

@ -0,0 +1,252 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#include "dm_services.h"
#include "include/gpio_types.h"
#include "../hw_factory.h"
#include "../hw_gpio.h"
#include "../hw_ddc.h"
#include "../hw_hpd.h"
#include "../hw_generic.h"
#include "dcn/dcn_4_1_0_offset.h"
#include "dcn/dcn_4_1_0_sh_mask.h"
#include "reg_helper.h"
#include "../hpd_regs.h"
#include "hw_factory_dcn401.h"
#define DCN_BASE__INST0_SEG2 0x000034C0
/* begin *********************
* macros to expend register list macro defined in HW object header file */
/* DCN */
#define block HPD
#define reg_num 0
#undef BASE_INNER
#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
#define BASE(seg) BASE_INNER(seg)
#define REG(reg_name)\
BASE(reg ## reg_name ## _BASE_IDX) + reg ## reg_name
#define SF_HPD(reg_name, field_name, post_fix)\
.field_name = HPD0_ ## reg_name ## __ ## field_name ## post_fix
#define REGI(reg_name, block, id)\
BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
reg ## block ## id ## _ ## reg_name
#define SF(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
/* macros to expend register list macro defined in HW object header file
* end *********************/
#define hpd_regs(id) \
{\
HPD_REG_LIST(id)\
}
static const struct hpd_registers hpd_regs[] = {
hpd_regs(0),
hpd_regs(1),
hpd_regs(2),
hpd_regs(3),
// hpd_regs(4),
};
static const struct hpd_sh_mask hpd_shift = {
HPD_MASK_SH_LIST(__SHIFT)
};
static const struct hpd_sh_mask hpd_mask = {
HPD_MASK_SH_LIST(_MASK)
};
#include "../ddc_regs.h"
/* set field name */
#define SF_DDC(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
static const struct ddc_registers ddc_data_regs_dcn[] = {
ddc_data_regs_dcn2(1),
ddc_data_regs_dcn2(2),
ddc_data_regs_dcn2(3),
ddc_data_regs_dcn2(4),
// ddc_data_regs_dcn2(5),
{
// add a dummy entry for cases no such port
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,},
.ddc_setup = 0,
.phy_aux_cntl = 0,
.dc_gpio_aux_ctrl_5 = 0
},
{
DDC_GPIO_VGA_REG_LIST(DATA),
.ddc_setup = 0,
.phy_aux_cntl = 0,
.dc_gpio_aux_ctrl_5 = 0
}
};
static const struct ddc_registers ddc_clk_regs_dcn[] = {
ddc_clk_regs_dcn2(1),
ddc_clk_regs_dcn2(2),
ddc_clk_regs_dcn2(3),
ddc_clk_regs_dcn2(4),
// ddc_clk_regs_dcn2(5),
{
// add a dummy entry for cases no such port
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,},
.ddc_setup = 0,
.phy_aux_cntl = 0,
.dc_gpio_aux_ctrl_5 = 0
},
{
DDC_GPIO_VGA_REG_LIST(CLK),
.ddc_setup = 0,
.phy_aux_cntl = 0,
.dc_gpio_aux_ctrl_5 = 0
}
};
static const struct ddc_sh_mask ddc_shift[] = {
DDC_MASK_SH_LIST_DCN2(__SHIFT, 1),
DDC_MASK_SH_LIST_DCN2(__SHIFT, 2),
DDC_MASK_SH_LIST_DCN2(__SHIFT, 3),
DDC_MASK_SH_LIST_DCN2(__SHIFT, 4),
DDC_MASK_SH_LIST_DCN2(__SHIFT, 5),
DDC_MASK_SH_LIST_DCN2(__SHIFT, 6),
DDC_MASK_SH_LIST_DCN2_VGA(__SHIFT)
};
static const struct ddc_sh_mask ddc_mask[] = {
DDC_MASK_SH_LIST_DCN2(_MASK, 1),
DDC_MASK_SH_LIST_DCN2(_MASK, 2),
DDC_MASK_SH_LIST_DCN2(_MASK, 3),
DDC_MASK_SH_LIST_DCN2(_MASK, 4),
DDC_MASK_SH_LIST_DCN2(_MASK, 5),
DDC_MASK_SH_LIST_DCN2(_MASK, 6),
DDC_MASK_SH_LIST_DCN2_VGA(_MASK)
};
#include "../generic_regs.h"
/* set field name */
#define SF_GENERIC(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
#define generic_regs(id) \
{\
GENERIC_REG_LIST(id)\
}
static const struct generic_registers generic_regs[] = {
generic_regs(A),
generic_regs(B),
};
static const struct generic_sh_mask generic_shift[] = {
GENERIC_MASK_SH_LIST(__SHIFT, A),
GENERIC_MASK_SH_LIST(__SHIFT, B),
};
static const struct generic_sh_mask generic_mask[] = {
GENERIC_MASK_SH_LIST(_MASK, A),
GENERIC_MASK_SH_LIST(_MASK, B),
};
static void define_generic_registers(struct hw_gpio_pin *pin, uint32_t en)
{
struct hw_generic *generic = HW_GENERIC_FROM_BASE(pin);
generic->regs = &generic_regs[en];
generic->shifts = &generic_shift[en];
generic->masks = &generic_mask[en];
generic->base.regs = &generic_regs[en].gpio;
}
static void define_ddc_registers(
struct hw_gpio_pin *pin,
uint32_t en)
{
struct hw_ddc *ddc = HW_DDC_FROM_BASE(pin);
switch (pin->id) {
case GPIO_ID_DDC_DATA:
ddc->regs = &ddc_data_regs_dcn[en];
ddc->base.regs = &ddc_data_regs_dcn[en].gpio;
break;
case GPIO_ID_DDC_CLOCK:
ddc->regs = &ddc_clk_regs_dcn[en];
ddc->base.regs = &ddc_clk_regs_dcn[en].gpio;
break;
default:
ASSERT_CRITICAL(false);
return;
}
ddc->shifts = &ddc_shift[en];
ddc->masks = &ddc_mask[en];
}
static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en)
{
struct hw_hpd *hpd = HW_HPD_FROM_BASE(pin);
hpd->regs = &hpd_regs[en];
hpd->shifts = &hpd_shift;
hpd->masks = &hpd_mask;
hpd->base.regs = &hpd_regs[en].gpio;
}
/* function table */
static const struct hw_factory_funcs funcs = {
.init_ddc_data = dal_hw_ddc_init,
.init_generic = dal_hw_generic_init,
.init_hpd = dal_hw_hpd_init,
.get_ddc_pin = dal_hw_ddc_get_pin,
.get_hpd_pin = dal_hw_hpd_get_pin,
.get_generic_pin = dal_hw_generic_get_pin,
.define_hpd_registers = define_hpd_registers,
.define_ddc_registers = define_ddc_registers,
.define_generic_registers = define_generic_registers
};
/*
* dal_hw_factory_dcn401_init
*
* @brief
* Initialize HW factory function pointers and pin info
*
* @param
* struct hw_factory *factory - [out] struct of function pointers
*/
void dal_hw_factory_dcn401_init(struct hw_factory *factory)
{
factory->number_of_pins[GPIO_ID_DDC_DATA] = 8;
factory->number_of_pins[GPIO_ID_DDC_CLOCK] = 8;
factory->number_of_pins[GPIO_ID_GENERIC] = 4;
factory->number_of_pins[GPIO_ID_HPD] = 5;
factory->number_of_pins[GPIO_ID_GPIO_PAD] = 28;
factory->number_of_pins[GPIO_ID_VIP_PAD] = 0;
factory->number_of_pins[GPIO_ID_SYNC] = 0;
factory->number_of_pins[GPIO_ID_GSL] = 0;/*add this*/
factory->funcs = &funcs;
}

View file

@ -0,0 +1,11 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#ifndef __DAL_HW_FACTORY_DCN401_H__
#define __DAL_HW_FACTORY_DCN401_H__
/* Initialize HW factory function pointers and pin info */
void dal_hw_factory_dcn401_init(struct hw_factory *factory);
#endif /* __DAL_HW_FACTORY_DCN401_H__ */

View file

@ -0,0 +1,335 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#include "hw_translate_dcn401.h"
#include "dm_services.h"
#include "include/gpio_types.h"
#include "../hw_translate.h"
#include "dcn/dcn_4_1_0_offset.h"
#include "dcn/dcn_4_1_0_sh_mask.h"
#define DCN_BASE__INST0_SEG2 0x000034C0
/* begin *********************
* macros to expend register list macro defined in HW object header file */
/* DCN */
#define block HPD
#define reg_num 0
#undef BASE_INNER
#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
#define BASE(seg) BASE_INNER(seg)
#undef REG
#define REG(reg_name)\
BASE(reg ## reg_name ## _BASE_IDX) + reg ## reg_name
#define SF_HPD(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
/* macros to expend register list macro defined in HW object header file
* end *********************/
static bool offset_to_id(
uint32_t offset,
uint32_t mask,
enum gpio_id *id,
uint32_t *en)
{
switch (offset) {
/* GENERIC */
case REG(DC_GPIO_GENERIC_A):
*id = GPIO_ID_GENERIC;
switch (mask) {
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK:
*en = GPIO_GENERIC_A;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK:
*en = GPIO_GENERIC_B;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK:
*en = GPIO_GENERIC_C;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK:
*en = GPIO_GENERIC_D;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK:
*en = GPIO_GENERIC_E;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK:
*en = GPIO_GENERIC_F;
return true;
default:
ASSERT_CRITICAL(false);
return false;
}
break;
/* HPD */
case REG(DC_GPIO_HPD_A):
*id = GPIO_ID_HPD;
switch (mask) {
case DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK:
*en = GPIO_HPD_1;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK:
*en = GPIO_HPD_2;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK:
*en = GPIO_HPD_3;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK:
*en = GPIO_HPD_4;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK:
*en = GPIO_HPD_5;
return true;
default:
ASSERT_CRITICAL(false);
return false;
}
break;
/* REG(DC_GPIO_GENLK_MASK */
case REG(DC_GPIO_GENLK_A):
*id = GPIO_ID_GSL;
switch (mask) {
case DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK:
*en = GPIO_GSL_GENLOCK_CLOCK;
return true;
case DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK:
*en = GPIO_GSL_GENLOCK_VSYNC;
return true;
case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK:
*en = GPIO_GSL_SWAPLOCK_A;
return true;
case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK:
*en = GPIO_GSL_SWAPLOCK_B;
return true;
default:
ASSERT_CRITICAL(false);
return false;
}
break;
/* DDC */
/* we don't care about the GPIO_ID for DDC
* in DdcHandle it will use GPIO_ID_DDC_DATA/GPIO_ID_DDC_CLOCK
* directly in the create method
*/
case REG(DC_GPIO_DDC1_A):
*en = GPIO_DDC_LINE_DDC1;
return true;
case REG(DC_GPIO_DDC2_A):
*en = GPIO_DDC_LINE_DDC2;
return true;
case REG(DC_GPIO_DDC3_A):
*en = GPIO_DDC_LINE_DDC3;
return true;
case REG(DC_GPIO_DDC4_A):
*en = GPIO_DDC_LINE_DDC4;
return true;
case REG(DC_GPIO_DDCVGA_A):
*en = GPIO_DDC_LINE_DDC_VGA;
return true;
/*
* case REG(DC_GPIO_I2CPAD_A): not exit
* case REG(DC_GPIO_PWRSEQ_A):
* case REG(DC_GPIO_PAD_STRENGTH_1):
* case REG(DC_GPIO_PAD_STRENGTH_2):
* case REG(DC_GPIO_DEBUG):
*/
/* UNEXPECTED */
default:
/* case REG(DC_GPIO_SYNCA_A): not exist */
ASSERT_CRITICAL(false);
return false;
}
}
static bool id_to_offset(
enum gpio_id id,
uint32_t en,
struct gpio_pin_info *info)
{
bool result = true;
switch (id) {
case GPIO_ID_DDC_DATA:
info->mask = DC_GPIO_DDC1_A__DC_GPIO_DDC1DATA_A_MASK;
switch (en) {
case GPIO_DDC_LINE_DDC1:
info->offset = REG(DC_GPIO_DDC1_A);
break;
case GPIO_DDC_LINE_DDC2:
info->offset = REG(DC_GPIO_DDC2_A);
break;
case GPIO_DDC_LINE_DDC3:
info->offset = REG(DC_GPIO_DDC3_A);
break;
case GPIO_DDC_LINE_DDC4:
info->offset = REG(DC_GPIO_DDC4_A);
break;
/* case GPIO_DDC_LINE_DDC5:
info->offset = REG(DC_GPIO_DDC5_A);
break; */
case GPIO_DDC_LINE_DDC_VGA:
info->offset = REG(DC_GPIO_DDCVGA_A);
break;
case GPIO_DDC_LINE_I2C_PAD:
default:
ASSERT_CRITICAL(false);
result = false;
}
break;
case GPIO_ID_DDC_CLOCK:
info->mask = DC_GPIO_DDC1_A__DC_GPIO_DDC1CLK_A_MASK;
switch (en) {
case GPIO_DDC_LINE_DDC1:
info->offset = REG(DC_GPIO_DDC1_A);
break;
case GPIO_DDC_LINE_DDC2:
info->offset = REG(DC_GPIO_DDC2_A);
break;
case GPIO_DDC_LINE_DDC3:
info->offset = REG(DC_GPIO_DDC3_A);
break;
case GPIO_DDC_LINE_DDC4:
info->offset = REG(DC_GPIO_DDC4_A);
break;
/* case GPIO_DDC_LINE_DDC5:
info->offset = REG(DC_GPIO_DDC5_A);
break; */
case GPIO_DDC_LINE_DDC_VGA:
info->offset = REG(DC_GPIO_DDCVGA_A);
break;
case GPIO_DDC_LINE_I2C_PAD:
default:
ASSERT_CRITICAL(false);
result = false;
}
break;
case GPIO_ID_GENERIC:
info->offset = REG(DC_GPIO_GENERIC_A);
switch (en) {
case GPIO_GENERIC_A:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK;
break;
case GPIO_GENERIC_B:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK;
break;
case GPIO_GENERIC_C:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK;
break;
case GPIO_GENERIC_D:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK;
break;
case GPIO_GENERIC_E:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK;
break;
case GPIO_GENERIC_F:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK;
break;
default:
ASSERT_CRITICAL(false);
result = false;
}
break;
case GPIO_ID_HPD:
info->offset = REG(DC_GPIO_HPD_A);
switch (en) {
case GPIO_HPD_1:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK;
break;
case GPIO_HPD_2:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK;
break;
case GPIO_HPD_3:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK;
break;
case GPIO_HPD_4:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK;
break;
case GPIO_HPD_5:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK;
break;
default:
ASSERT_CRITICAL(false);
result = false;
}
break;
case GPIO_ID_GSL:
switch (en) {
case GPIO_GSL_GENLOCK_CLOCK:
/*not implmented*/
ASSERT_CRITICAL(false);
result = false;
break;
case GPIO_GSL_GENLOCK_VSYNC:
/*not implmented*/
ASSERT_CRITICAL(false);
result = false;
break;
case GPIO_GSL_SWAPLOCK_A:
/*not implmented*/
ASSERT_CRITICAL(false);
result = false;
break;
case GPIO_GSL_SWAPLOCK_B:
/*not implmented*/
ASSERT_CRITICAL(false);
result = false;
break;
default:
ASSERT_CRITICAL(false);
result = false;
}
break;
case GPIO_ID_SYNC:
case GPIO_ID_VIP_PAD:
default:
ASSERT_CRITICAL(false);
result = false;
}
if (result) {
info->offset_y = info->offset + 2;
info->offset_en = info->offset + 1;
info->offset_mask = info->offset - 1;
info->mask_y = info->mask;
info->mask_en = info->mask;
info->mask_mask = info->mask;
}
return result;
}
/* function table */
static const struct hw_translate_funcs funcs = {
.offset_to_id = offset_to_id,
.id_to_offset = id_to_offset,
};
/*
* dal_hw_translate_dcn401_init
*
* @brief
* Initialize Hw translate function pointers.
*
* @param
* struct hw_translate *tr - [out] struct of function pointers
*
*/
void dal_hw_translate_dcn401_init(struct hw_translate *tr)
{
tr->funcs = &funcs;
}

View file

@ -0,0 +1,13 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#ifndef __DAL_HW_TRANSLATE_DCN401_H__
#define __DAL_HW_TRANSLATE_DCN401_H__
struct hw_translate;
/* Initialize Hw translate function pointers */
void dal_hw_translate_dcn401_init(struct hw_translate *tr);
#endif /* __DAL_HW_TRANSLATE_DCN401_H__ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,76 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#ifndef __DC_HWSS_DCN401_H__
#define __DC_HWSS_DCN401_H__
#include "inc/core_types.h"
#include "dc.h"
#include "dc_stream.h"
#include "hw_sequencer_private.h"
#include "dcn401/dcn401_dccg.h"
struct dc;
enum ips_ono_state {
ONO_ON = 0,
ONO_ON_IN_PROGRESS = 1,
ONO_OFF = 2,
ONO_OFF_IN_PROGRESS = 3
};
struct ips_ono_region_state {
/**
* @desire_pwr_state: desired power state based on configured value
*/
uint32_t desire_pwr_state;
/**
* @current_pwr_state: current power gate status
*/
uint32_t current_pwr_state;
};
void dcn401_program_gamut_remap(struct pipe_ctx *pipe_ctx);
void dcn401_init_hw(struct dc *dc);
bool dcn401_set_mcm_luts(struct pipe_ctx *pipe_ctx,
const struct dc_plane_state *plane_state);
bool dcn401_set_output_transfer_func(struct dc *dc,
struct pipe_ctx *pipe_ctx,
const struct dc_stream_state *stream);
void dcn401_calculate_dccg_tmds_div_value(struct pipe_ctx *pipe_ctx,
unsigned int *tmds_div);
enum dc_status dcn401_enable_stream_timing(
struct pipe_ctx *pipe_ctx,
struct dc_state *context,
struct dc *dc);
void dcn401_enable_stream(struct pipe_ctx *pipe_ctx);
void dcn401_populate_mcm_luts(struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_cm2_func_luts mcm_luts,
bool lut_bank_a);
void dcn401_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable);
void dcn401_set_cursor_position(struct pipe_ctx *pipe_ctx);
bool dcn401_apply_idle_power_optimizations(struct dc *dc, bool enable);
struct ips_ono_region_state dcn401_read_ono_state(struct dc *dc,
uint8_t region);
void dcn401_prepare_bandwidth(struct dc *dc,
struct dc_state *context);
void dcn401_optimize_bandwidth(
struct dc *dc,
struct dc_state *context);
void dcn401_fams2_global_control_lock(struct dc *dc,
struct dc_state *context,
bool lock);
void dcn401_fams2_update_config(struct dc *dc, struct dc_state *context, bool enable);
void dcn401_fams2_global_control_lock_fast(union block_sequence_params *params);
#endif /* __DC_HWSS_DCN401_H__ */

View file

@ -0,0 +1,151 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#include "dce110/dce110_hwseq.h"
#include "dcn10/dcn10_hwseq.h"
#include "dcn20/dcn20_hwseq.h"
#include "dcn21/dcn21_hwseq.h"
#include "dcn30/dcn30_hwseq.h"
#include "dcn31/dcn31_hwseq.h"
#include "dcn32/dcn32_hwseq.h"
#include "dcn401/dcn401_hwseq.h"
#include "dcn401_init.h"
static const struct hw_sequencer_funcs dcn401_funcs = {
.program_gamut_remap = dcn401_program_gamut_remap,
.init_hw = dcn401_init_hw,
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = NULL,
.program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
.wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
.post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
.update_plane_addr = dcn20_update_plane_addr,
.update_dchub = dcn10_update_dchub,
.update_pending_status = dcn10_update_pending_status,
.program_output_csc = dcn20_program_output_csc,
.enable_accelerated_mode = dce110_enable_accelerated_mode,
.enable_timing_synchronization = dcn10_enable_timing_synchronization,
.enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset,
.update_info_frame = dcn31_update_info_frame,
.send_immediate_sdp_message = dcn10_send_immediate_sdp_message,
.enable_stream = dcn401_enable_stream,
.disable_stream = dce110_disable_stream,
.unblank_stream = dcn32_unblank_stream,
.blank_stream = dce110_blank_stream,
.enable_audio_stream = dce110_enable_audio_stream,
.disable_audio_stream = dce110_disable_audio_stream,
.disable_plane = dcn20_disable_plane,
.pipe_control_lock = dcn20_pipe_control_lock,
.interdependent_update_lock = dcn32_interdependent_update_lock,
.cursor_lock = dcn10_cursor_lock,
.prepare_bandwidth = dcn401_prepare_bandwidth,
.optimize_bandwidth = dcn401_optimize_bandwidth,
.update_bandwidth = dcn20_update_bandwidth,
.set_drr = dcn10_set_drr,
.get_position = dcn10_get_position,
.set_static_screen_control = dcn31_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
.set_avmute = dcn30_set_avmute,
.log_hw_state = dcn10_log_hw_state,
.get_hw_state = dcn10_get_hw_state,
.clear_status_bits = dcn10_clear_status_bits,
.wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
.edp_backlight_control = dce110_edp_backlight_control,
.edp_power_control = dce110_edp_power_control,
.edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
.edp_wait_for_T12 = dce110_edp_wait_for_T12,
.set_cursor_position = dcn401_set_cursor_position,
.set_cursor_attribute = dcn10_set_cursor_attribute,
.set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
.setup_periodic_interrupt = dcn10_setup_periodic_interrupt,
.set_clock = dcn10_set_clock,
.get_clock = dcn10_get_clock,
.program_triplebuffer = dcn20_program_triple_buffer,
.enable_writeback = dcn30_enable_writeback,
.disable_writeback = dcn30_disable_writeback,
.update_writeback = dcn30_update_writeback,
.mmhubbub_warmup = dcn30_mmhubbub_warmup,
.dmdata_status_done = dcn20_dmdata_status_done,
.program_dmdata_engine = dcn30_program_dmdata_engine,
.set_dmdata_attributes = dcn20_set_dmdata_attributes,
.init_sys_ctx = dcn20_init_sys_ctx,
.init_vm_ctx = dcn20_init_vm_ctx,
.set_flip_control_gsl = dcn20_set_flip_control_gsl,
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.calc_vupdate_position = dcn10_calc_vupdate_position,
.apply_idle_power_optimizations = dcn401_apply_idle_power_optimizations,
.does_plane_fit_in_mall = NULL,
.set_backlight_level = dcn21_set_backlight_level,
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.hardware_release = dcn30_hardware_release,
.set_pipe = dcn21_set_pipe,
.enable_lvds_link_output = dce110_enable_lvds_link_output,
.enable_tmds_link_output = dce110_enable_tmds_link_output,
.enable_dp_link_output = dce110_enable_dp_link_output,
.disable_link_output = dcn32_disable_link_output,
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
.enable_phantom_streams = dcn32_enable_phantom_streams,
.disable_phantom_streams = dcn32_disable_phantom_streams,
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
.update_phantom_vp_position = dcn32_update_phantom_vp_position,
.update_dsc_pg = dcn32_update_dsc_pg,
.apply_update_flags_for_phantom = dcn32_apply_update_flags_for_phantom,
.blank_phantom = dcn32_blank_phantom,
.is_pipe_topology_transition_seamless = dcn32_is_pipe_topology_transition_seamless,
.fams2_global_control_lock = dcn401_fams2_global_control_lock,
.fams2_update_config = dcn401_fams2_update_config,
.fams2_global_control_lock_fast = dcn401_fams2_global_control_lock_fast,
};
static const struct hwseq_private_funcs dcn401_private_funcs = {
.init_pipes = dcn10_init_pipes,
.update_plane_addr = dcn20_update_plane_addr,
.plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
.update_mpcc = dcn20_update_mpcc,
.set_input_transfer_func = dcn32_set_input_transfer_func,
.set_output_transfer_func = dcn401_set_output_transfer_func,
.power_down = dce110_power_down,
.enable_display_power_gating = dcn10_dummy_display_power_gating,
.blank_pixel_data = dcn20_blank_pixel_data,
.reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap,
.enable_stream_timing = dcn401_enable_stream_timing,
.edp_backlight_control = dce110_edp_backlight_control,
.disable_stream_gating = dcn20_disable_stream_gating,
.enable_stream_gating = dcn20_enable_stream_gating,
.setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
.did_underflow_occur = dcn10_did_underflow_occur,
.init_blank = dcn32_init_blank,
.disable_vga = dcn20_disable_vga,
.bios_golden_init = dcn10_bios_golden_init,
.plane_atomic_disable = dcn20_plane_atomic_disable,
.plane_atomic_power_down = dcn10_plane_atomic_power_down,
.enable_power_gating_plane = dcn32_enable_power_gating_plane,
.hubp_pg_control = dcn32_hubp_pg_control,
.program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree,
.update_odm = dcn32_update_odm,
.dsc_pg_control = dcn32_dsc_pg_control,
.dsc_pg_status = dcn32_dsc_pg_status,
.set_hdr_multiplier = dcn10_set_hdr_multiplier,
.verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high,
.wait_for_blank_complete = dcn20_wait_for_blank_complete,
.dccg_init = dcn20_dccg_init,
.set_mcm_luts = dcn401_set_mcm_luts,
.program_mall_pipe_config = dcn32_program_mall_pipe_config,
.update_force_pstate = dcn32_update_force_pstate,
.update_mall_sel = dcn32_update_mall_sel,
.setup_hpo_hw_control = dcn401_setup_hpo_hw_control,
.calculate_dccg_k1_k2_values = NULL,
.set_pixels_per_cycle = dcn32_set_pixels_per_cycle,
.is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy,
.apply_single_controller_ctx_to_hw = dce110_apply_single_controller_ctx_to_hw,
.reset_back_end_for_pipe = dcn20_reset_back_end_for_pipe,
.populate_mcm_luts = dcn401_populate_mcm_luts,
};
void dcn401_hw_sequencer_init_functions(struct dc *dc)
{
dc->hwss = dcn401_funcs;
dc->hwseq->funcs = dcn401_private_funcs;
}

View file

@ -0,0 +1,12 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#ifndef __DC_DCN401_INIT_H__
#define __DC_DCN401_INIT_H__
struct dc;
void dcn401_hw_sequencer_init_functions(struct dc *dc);
#endif /* __DC_DCN401_INIT_H__ */

View file

@ -0,0 +1,411 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#include "dm_services.h"
#include "include/logger_interface.h"
#include "../dce110/irq_service_dce110.h"
#include "dcn/dcn_3_2_0_offset.h"
#include "dcn/dcn_3_2_0_sh_mask.h"
#include "irq_service_dcn401.h"
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
#define DCN_BASE__INST0_SEG2 0x000034C0
static enum dc_irq_source to_dal_irq_source_dcn401(
struct irq_service *irq_service,
uint32_t src_id,
uint32_t ext_id)
{
switch (src_id) {
case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK1;
case DCN_1_0__SRCID__DC_D2_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK2;
case DCN_1_0__SRCID__DC_D3_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK3;
case DCN_1_0__SRCID__DC_D4_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK4;
case DCN_1_0__SRCID__DC_D5_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK5;
case DCN_1_0__SRCID__DC_D6_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK6;
case DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC1_VLINE0;
case DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC2_VLINE0;
case DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC3_VLINE0;
case DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC4_VLINE0;
case DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC5_VLINE0;
case DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC6_VLINE0;
case DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP1;
case DCN_1_0__SRCID__HUBP1_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP2;
case DCN_1_0__SRCID__HUBP2_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP3;
case DCN_1_0__SRCID__HUBP3_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP4;
case DCN_1_0__SRCID__HUBP4_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP5;
case DCN_1_0__SRCID__HUBP5_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP6;
case DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE1;
case DCN_1_0__SRCID__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE2;
case DCN_1_0__SRCID__OTG2_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE3;
case DCN_1_0__SRCID__OTG3_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE4;
case DCN_1_0__SRCID__OTG4_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE5;
case DCN_1_0__SRCID__OTG5_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE6;
case DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT:
return DC_IRQ_SOURCE_DMCUB_OUTBOX;
case DCN_1_0__SRCID__DC_HPD1_INT:
/* generic src_id for all HPD and HPDRX interrupts */
switch (ext_id) {
case DCN_1_0__CTXID__DC_HPD1_INT:
return DC_IRQ_SOURCE_HPD1;
case DCN_1_0__CTXID__DC_HPD2_INT:
return DC_IRQ_SOURCE_HPD2;
case DCN_1_0__CTXID__DC_HPD3_INT:
return DC_IRQ_SOURCE_HPD3;
case DCN_1_0__CTXID__DC_HPD4_INT:
return DC_IRQ_SOURCE_HPD4;
case DCN_1_0__CTXID__DC_HPD5_INT:
return DC_IRQ_SOURCE_HPD5;
case DCN_1_0__CTXID__DC_HPD6_INT:
return DC_IRQ_SOURCE_HPD6;
case DCN_1_0__CTXID__DC_HPD1_RX_INT:
return DC_IRQ_SOURCE_HPD1RX;
case DCN_1_0__CTXID__DC_HPD2_RX_INT:
return DC_IRQ_SOURCE_HPD2RX;
case DCN_1_0__CTXID__DC_HPD3_RX_INT:
return DC_IRQ_SOURCE_HPD3RX;
case DCN_1_0__CTXID__DC_HPD4_RX_INT:
return DC_IRQ_SOURCE_HPD4RX;
case DCN_1_0__CTXID__DC_HPD5_RX_INT:
return DC_IRQ_SOURCE_HPD5RX;
case DCN_1_0__CTXID__DC_HPD6_RX_INT:
return DC_IRQ_SOURCE_HPD6RX;
default:
return DC_IRQ_SOURCE_INVALID;
}
break;
default:
return DC_IRQ_SOURCE_INVALID;
}
}
static bool hpd_ack(
struct irq_service *irq_service,
const struct irq_source_info *info)
{
uint32_t addr = info->status_reg;
uint32_t value = dm_read_reg(irq_service->ctx, addr);
uint32_t current_status =
get_reg_field_value(
value,
HPD0_DC_HPD_INT_STATUS,
DC_HPD_SENSE_DELAYED);
dal_irq_service_ack_generic(irq_service, info);
value = dm_read_reg(irq_service->ctx, info->enable_reg);
set_reg_field_value(
value,
current_status ? 0 : 1,
HPD0_DC_HPD_INT_CONTROL,
DC_HPD_INT_POLARITY);
dm_write_reg(irq_service->ctx, info->enable_reg, value);
return true;
}
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
.ack = hpd_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static struct irq_source_info_funcs pflip_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static struct irq_source_info_funcs vblank_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static struct irq_source_info_funcs outbox_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static struct irq_source_info_funcs vline0_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
#undef BASE_INNER
#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
/* compile time expand base address. */
#define BASE(seg) \
BASE_INNER(seg)
#define SRI(reg_name, block, id)\
BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
reg ## block ## id ## _ ## reg_name
#define SRI_DMUB(reg_name)\
BASE(reg ## reg_name ## _BASE_IDX) + \
reg ## reg_name
#define IRQ_REG_ENTRY(block, reg_num, reg1, mask1, reg2, mask2)\
.enable_reg = SRI(reg1, block, reg_num),\
.enable_mask = \
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
.enable_value = {\
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
},\
.ack_reg = SRI(reg2, block, reg_num),\
.ack_mask = \
block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\
.ack_value = \
block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK \
#define IRQ_REG_ENTRY_DMUB(reg1, mask1, reg2, mask2)\
.enable_reg = SRI_DMUB(reg1),\
.enable_mask = \
reg1 ## __ ## mask1 ## _MASK,\
.enable_value = {\
reg1 ## __ ## mask1 ## _MASK,\
~reg1 ## __ ## mask1 ## _MASK \
},\
.ack_reg = SRI_DMUB(reg2),\
.ack_mask = \
reg2 ## __ ## mask2 ## _MASK,\
.ack_value = \
reg2 ## __ ## mask2 ## _MASK \
#define hpd_int_entry(reg_num)\
[DC_IRQ_SOURCE_HPD1 + reg_num] = {\
IRQ_REG_ENTRY(HPD, reg_num,\
DC_HPD_INT_CONTROL, DC_HPD_INT_EN,\
DC_HPD_INT_CONTROL, DC_HPD_INT_ACK),\
.status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
.funcs = &hpd_irq_info_funcs\
}
#define hpd_rx_int_entry(reg_num)\
[DC_IRQ_SOURCE_HPD1RX + reg_num] = {\
IRQ_REG_ENTRY(HPD, reg_num,\
DC_HPD_INT_CONTROL, DC_HPD_RX_INT_EN,\
DC_HPD_INT_CONTROL, DC_HPD_RX_INT_ACK),\
.status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
.funcs = &hpd_rx_irq_info_funcs\
}
#define pflip_int_entry(reg_num)\
[DC_IRQ_SOURCE_PFLIP1 + reg_num] = {\
IRQ_REG_ENTRY(HUBPREQ, reg_num,\
DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK,\
DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_CLEAR),\
.funcs = &pflip_irq_info_funcs\
}
/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic
* of DCE's DC_IRQ_SOURCE_VUPDATEx.
*/
#define vupdate_no_lock_int_entry(reg_num)\
[DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_INT_EN,\
OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR),\
.funcs = &vupdate_no_lock_irq_info_funcs\
}
#define vblank_int_entry(reg_num)\
[DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\
OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\
.funcs = &vblank_irq_info_funcs\
}
#define vline0_int_entry(reg_num)\
[DC_IRQ_SOURCE_DC1_VLINE0 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE,\
OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_CLEAR),\
.funcs = &vline0_irq_info_funcs\
}
#define dmub_outbox_int_entry()\
[DC_IRQ_SOURCE_DMCUB_OUTBOX] = {\
IRQ_REG_ENTRY_DMUB(\
DMCUB_INTERRUPT_ENABLE, DMCUB_OUTBOX1_READY_INT_EN,\
DMCUB_INTERRUPT_ACK, DMCUB_OUTBOX1_READY_INT_ACK),\
.funcs = &outbox_irq_info_funcs\
}
#define dummy_irq_entry() \
{\
.funcs = &dummy_irq_info_funcs\
}
#define i2c_int_entry(reg_num) \
[DC_IRQ_SOURCE_I2C_DDC ## reg_num] = dummy_irq_entry()
#define dp_sink_int_entry(reg_num) \
[DC_IRQ_SOURCE_DPSINK ## reg_num] = dummy_irq_entry()
#define gpio_pad_int_entry(reg_num) \
[DC_IRQ_SOURCE_GPIOPAD ## reg_num] = dummy_irq_entry()
#define dc_underflow_int_entry(reg_num) \
[DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW] = dummy_irq_entry()
static struct irq_source_info_funcs dummy_irq_info_funcs = {
.set = dal_irq_service_dummy_set,
.ack = dal_irq_service_dummy_ack
};
static const struct irq_source_info
irq_source_info_dcn401[DAL_IRQ_SOURCES_NUMBER] = {
[DC_IRQ_SOURCE_INVALID] = dummy_irq_entry(),
hpd_int_entry(0),
hpd_int_entry(1),
hpd_int_entry(2),
hpd_int_entry(3),
hpd_int_entry(4),
hpd_rx_int_entry(0),
hpd_rx_int_entry(1),
hpd_rx_int_entry(2),
hpd_rx_int_entry(3),
hpd_rx_int_entry(4),
i2c_int_entry(1),
i2c_int_entry(2),
i2c_int_entry(3),
i2c_int_entry(4),
i2c_int_entry(5),
i2c_int_entry(6),
dp_sink_int_entry(1),
dp_sink_int_entry(2),
dp_sink_int_entry(3),
dp_sink_int_entry(4),
dp_sink_int_entry(5),
dp_sink_int_entry(6),
[DC_IRQ_SOURCE_TIMER] = dummy_irq_entry(),
pflip_int_entry(0),
pflip_int_entry(1),
pflip_int_entry(2),
pflip_int_entry(3),
[DC_IRQ_SOURCE_PFLIP5] = dummy_irq_entry(),
[DC_IRQ_SOURCE_PFLIP6] = dummy_irq_entry(),
[DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(),
gpio_pad_int_entry(0),
gpio_pad_int_entry(1),
gpio_pad_int_entry(2),
gpio_pad_int_entry(3),
gpio_pad_int_entry(4),
gpio_pad_int_entry(5),
gpio_pad_int_entry(6),
gpio_pad_int_entry(7),
gpio_pad_int_entry(8),
gpio_pad_int_entry(9),
gpio_pad_int_entry(10),
gpio_pad_int_entry(11),
gpio_pad_int_entry(12),
gpio_pad_int_entry(13),
gpio_pad_int_entry(14),
gpio_pad_int_entry(15),
gpio_pad_int_entry(16),
gpio_pad_int_entry(17),
gpio_pad_int_entry(18),
gpio_pad_int_entry(19),
gpio_pad_int_entry(20),
gpio_pad_int_entry(21),
gpio_pad_int_entry(22),
gpio_pad_int_entry(23),
gpio_pad_int_entry(24),
gpio_pad_int_entry(25),
gpio_pad_int_entry(26),
gpio_pad_int_entry(27),
gpio_pad_int_entry(28),
gpio_pad_int_entry(29),
gpio_pad_int_entry(30),
dc_underflow_int_entry(1),
dc_underflow_int_entry(2),
dc_underflow_int_entry(3),
dc_underflow_int_entry(4),
dc_underflow_int_entry(5),
dc_underflow_int_entry(6),
[DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
[DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
vupdate_no_lock_int_entry(0),
vupdate_no_lock_int_entry(1),
vupdate_no_lock_int_entry(2),
vupdate_no_lock_int_entry(3),
vblank_int_entry(0),
vblank_int_entry(1),
vblank_int_entry(2),
vblank_int_entry(3),
vline0_int_entry(0),
vline0_int_entry(1),
vline0_int_entry(2),
vline0_int_entry(3),
[DC_IRQ_SOURCE_DC5_VLINE1] = dummy_irq_entry(),
[DC_IRQ_SOURCE_DC6_VLINE1] = dummy_irq_entry(),
dmub_outbox_int_entry(),
};
static const struct irq_service_funcs irq_service_funcs_dcn401 = {
.to_dal_irq_source = to_dal_irq_source_dcn401
};
static void dcn401_irq_construct(
struct irq_service *irq_service,
struct irq_service_init_data *init_data)
{
dal_irq_service_construct(irq_service, init_data);
irq_service->info = irq_source_info_dcn401;
irq_service->funcs = &irq_service_funcs_dcn401;
}
struct irq_service *dal_irq_service_dcn401_create(
struct irq_service_init_data *init_data)
{
struct irq_service *irq_service = kzalloc(sizeof(*irq_service),
GFP_KERNEL);
if (!irq_service)
return NULL;
dcn401_irq_construct(irq_service, init_data);
return irq_service;
}

View file

@ -0,0 +1,13 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#ifndef __DAL_IRQ_SERVICE_DCN401_H__
#define __DAL_IRQ_SERVICE_DCN401_H__
#include "../irq_service.h"
struct irq_service *dal_irq_service_dcn401_create(
struct irq_service_init_data *init_data);
#endif /* __DAL_IRQ_SERVICE_DCN401_H__ */

View file

@ -0,0 +1,475 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#include "dcn401_optc.h"
#include "dcn30/dcn30_optc.h"
#include "dcn31/dcn31_optc.h"
#include "dcn32/dcn32_optc.h"
#include "reg_helper.h"
#include "dc.h"
#include "dcn_calc_math.h"
#include "dc_dmub_srv.h"
#define REG(reg)\
optc1->tg_regs->reg
#define CTX \
optc1->base.ctx
#undef FN
#define FN(reg_name, field_name) \
optc1->tg_shift->field_name, optc1->tg_mask->field_name
/*
* OPTC uses ODM_MEM sub block to merge pixel data coming from different OPPs
* into unified memory location per horizontal line. ODM_MEM contains shared
* memory resources global to the ASIC. Each memory resource is capable of
* storing 2048 pixels independent from actual pixel data size. Total number of
* memory allocated must be even. The memory resource allocation is described in
* a memory bit map per OPTC instance. Driver has to make sure that there is no
* double allocation across different OPTC instances. Bit offset in the map
* represents memory instance id. Driver allocates a memory instance to the
* current OPTC by setting the bit with offset associated with the desired
* memory instance to 1 in the current OPTC memory map register.
*
* It is upto software to decide how to allocate the shared memory resources
* across different OPTC instances. Driver understands that the total number
* of memory available is always 2 times the max number of OPP pipes. So each
* OPP pipe can be mapped 2 pieces of memory. However there exists cases such as
* 11520x2160 which could use 6 pieces of memory for 2 OPP pipes i.e. 3 pieces
* for each OPP pipe.
*
* Driver will reserve the first and second preferred memory instances for each
* OPP pipe. For example, OPP0's first and second preferred memory is ODM_MEM0
* and ODM_MEM1. OPP1's first and second preferred memory is ODM_MEM2 and
* ODM_MEM3 so on so forth.
*
* Driver will first allocate from first preferred memory instances associated
* with current OPP pipes in use. If needed driver will then allocate from
* second preferred memory instances associated with current OPP pipes in use.
* Finally if still needed, driver will allocate from second preferred memory
* instances not associated with current OPP pipes. So if memory instances are
* enough other OPTCs can still allocate from their OPPs' first preferred memory
* instances without worrying about double allocation.
*/
static uint32_t decide_odm_mem_bit_map(int *opp_id, int opp_cnt, int h_active)
{
bool first_preferred_memory_for_opp[MAX_PIPES] = {0};
bool second_preferred_memory_for_opp[MAX_PIPES] = {0};
uint32_t memory_bit_map = 0;
int total_required = ((h_active + 4095) / 4096) * 2;
int total_allocated = 0;
int i;
for (i = 0; i < opp_cnt; i++) {
first_preferred_memory_for_opp[opp_id[i]] = true;
total_allocated++;
if (total_required == total_allocated)
break;
}
if (total_required > total_allocated) {
for (i = 0; i < opp_cnt; i++) {
second_preferred_memory_for_opp[opp_id[i]] = true;
total_allocated++;
if (total_required == total_allocated)
break;
}
}
if (total_required > total_allocated) {
for (i = 0; i < MAX_PIPES; i++) {
if (second_preferred_memory_for_opp[i] == false) {
second_preferred_memory_for_opp[i] = true;
total_allocated++;
if (total_required == total_allocated)
break;
}
}
}
ASSERT(total_required == total_allocated);
for (i = 0; i < MAX_PIPES; i++) {
if (first_preferred_memory_for_opp[i])
memory_bit_map |= 0x1 << (i * 2);
if (second_preferred_memory_for_opp[i])
memory_bit_map |= 0x2 << (i * 2);
}
return memory_bit_map;
}
static void optc401_set_odm_combine(struct timing_generator *optc, int *opp_id,
int opp_cnt, struct dc_crtc_timing *timing)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
uint32_t h_active = timing->h_addressable +
timing->h_border_left + timing->h_border_right;
uint32_t odm_segment_width = h_active / opp_cnt;
uint32_t odm_segment_width_last =
h_active - odm_segment_width * (opp_cnt - 1);
uint32_t odm_mem_bit_map = decide_odm_mem_bit_map(
opp_id, opp_cnt, h_active);
REG_SET(OPTC_MEMORY_CONFIG, 0,
OPTC_MEM_SEL, odm_mem_bit_map);
switch (opp_cnt) {
case 2: /* ODM Combine 2:1 */
REG_SET_3(OPTC_DATA_SOURCE_SELECT, 0,
OPTC_NUM_OF_INPUT_SEGMENT, 1,
OPTC_SEG0_SRC_SEL, opp_id[0],
OPTC_SEG1_SRC_SEL, opp_id[1]);
REG_UPDATE(OPTC_WIDTH_CONTROL,
OPTC_SEGMENT_WIDTH, odm_segment_width);
REG_UPDATE(OTG_H_TIMING_CNTL,
OTG_H_TIMING_DIV_MODE, H_TIMING_DIV_BY2);
break;
case 3: /* ODM Combine 3:1 */
REG_SET_4(OPTC_DATA_SOURCE_SELECT, 0,
OPTC_NUM_OF_INPUT_SEGMENT, 2,
OPTC_SEG0_SRC_SEL, opp_id[0],
OPTC_SEG1_SRC_SEL, opp_id[1],
OPTC_SEG2_SRC_SEL, opp_id[2]);
REG_UPDATE(OPTC_WIDTH_CONTROL,
OPTC_SEGMENT_WIDTH, odm_segment_width);
REG_UPDATE(OPTC_WIDTH_CONTROL2,
OPTC_SEGMENT_WIDTH_LAST,
odm_segment_width_last);
/* In ODM combine 3:1 mode ODM packs 4 pixels per data transfer
* so OTG_H_TIMING_DIV_MODE should be configured to
* H_TIMING_DIV_BY4 even though ODM combines 3 OPP inputs, it
* outputs 4 pixels from single OPP at a time.
*/
REG_UPDATE(OTG_H_TIMING_CNTL,
OTG_H_TIMING_DIV_MODE, H_TIMING_DIV_BY4);
break;
case 4: /* ODM Combine 4:1 */
REG_SET_5(OPTC_DATA_SOURCE_SELECT, 0,
OPTC_NUM_OF_INPUT_SEGMENT, 3,
OPTC_SEG0_SRC_SEL, opp_id[0],
OPTC_SEG1_SRC_SEL, opp_id[1],
OPTC_SEG2_SRC_SEL, opp_id[2],
OPTC_SEG3_SRC_SEL, opp_id[3]);
REG_UPDATE(OPTC_WIDTH_CONTROL,
OPTC_SEGMENT_WIDTH, odm_segment_width);
REG_UPDATE(OTG_H_TIMING_CNTL,
OTG_H_TIMING_DIV_MODE, H_TIMING_DIV_BY4);
break;
default:
ASSERT(false);
}
;
optc1->opp_count = opp_cnt;
}
static void optc401_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_UPDATE(OTG_H_TIMING_CNTL,
OTG_H_TIMING_DIV_MODE_MANUAL, manual_mode ? 1 : 0);
}
/**
* Enable CRTC
* Enable CRTC - call ASIC Control Object to enable Timing generator.
*/
static bool optc401_enable_crtc(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
/* opp instance for OTG, 1 to 1 mapping and odm will adjust */
REG_UPDATE(OPTC_DATA_SOURCE_SELECT,
OPTC_SEG0_SRC_SEL, optc->inst);
/* VTG enable first is for HW workaround */
REG_UPDATE(CONTROL,
VTG0_ENABLE, 1);
REG_SEQ_START();
/* Enable CRTC */
REG_UPDATE_2(OTG_CONTROL,
OTG_DISABLE_POINT_CNTL, 2,
OTG_MASTER_EN, 1);
REG_SEQ_SUBMIT();
REG_SEQ_WAIT_DONE();
return true;
}
/* disable_crtc */
static bool optc401_disable_crtc(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_UPDATE_5(OPTC_DATA_SOURCE_SELECT,
OPTC_SEG0_SRC_SEL, 0xf,
OPTC_SEG1_SRC_SEL, 0xf,
OPTC_SEG2_SRC_SEL, 0xf,
OPTC_SEG3_SRC_SEL, 0xf,
OPTC_NUM_OF_INPUT_SEGMENT, 0);
REG_UPDATE(OPTC_MEMORY_CONFIG,
OPTC_MEM_SEL, 0);
/* disable otg request until end of the first line
* in the vertical blank region
*/
REG_UPDATE(OTG_CONTROL,
OTG_MASTER_EN, 0);
REG_UPDATE(CONTROL,
VTG0_ENABLE, 0);
/* CRTC disabled, so disable clock. */
REG_WAIT(OTG_CLOCK_CONTROL,
OTG_BUSY, 0,
1, 150000);
return true;
}
static void optc401_phantom_crtc_post_enable(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
/* Disable immediately. */
REG_UPDATE_2(OTG_CONTROL, OTG_DISABLE_POINT_CNTL, 0, OTG_MASTER_EN, 0);
/* CRTC disabled, so disable clock. */
REG_WAIT(OTG_CLOCK_CONTROL, OTG_BUSY, 0, 1, 100000);
}
static void optc401_disable_phantom_otg(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_UPDATE_5(OPTC_DATA_SOURCE_SELECT,
OPTC_SEG0_SRC_SEL, 0xf,
OPTC_SEG1_SRC_SEL, 0xf,
OPTC_SEG2_SRC_SEL, 0xf,
OPTC_SEG3_SRC_SEL, 0xf,
OPTC_NUM_OF_INPUT_SEGMENT, 0);
REG_UPDATE(OTG_CONTROL, OTG_MASTER_EN, 0);
}
static void optc401_set_odm_bypass(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
enum h_timing_div_mode h_div = H_TIMING_NO_DIV;
REG_SET_5(OPTC_DATA_SOURCE_SELECT, 0,
OPTC_NUM_OF_INPUT_SEGMENT, 0,
OPTC_SEG0_SRC_SEL, optc->inst,
OPTC_SEG1_SRC_SEL, 0xf,
OPTC_SEG2_SRC_SEL, 0xf,
OPTC_SEG3_SRC_SEL, 0xf
);
h_div = optc1_is_two_pixels_per_containter(dc_crtc_timing);
REG_UPDATE(OTG_H_TIMING_CNTL,
OTG_H_TIMING_DIV_MODE, h_div);
REG_SET(OPTC_MEMORY_CONFIG, 0,
OPTC_MEM_SEL, 0);
optc1->opp_count = 1;
}
/* only to be used when FAMS2 is disabled or unsupported */
void optc401_setup_manual_trigger(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
struct dc *dc = optc->ctx->dc;
if (dc->caps.dmub_caps.fams_ver == 1 && !dc->debug.disable_fams)
/* FAMS */
dc_dmub_srv_set_drr_manual_trigger_cmd(dc, optc->inst);
else {
/*
* MIN_MASK_EN is gone and MASK is now always enabled.
*
* To get it to it work with manual trigger we need to make sure
* we program the correct bit.
*/
REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
OTG_V_TOTAL_MIN_SEL, 1,
OTG_V_TOTAL_MAX_SEL, 1,
OTG_FORCE_LOCK_ON_EVENT, 0,
OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */
}
}
void optc401_set_drr(
struct timing_generator *optc,
const struct drr_params *params)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
struct dc *dc = optc->ctx->dc;
struct drr_params amended_params = { 0 };
bool program_manual_trigger = false;
if (dc->caps.dmub_caps.fams_ver >= 2 && dc->debug.fams2_config.bits.enable) {
if (params != NULL &&
params->vertical_total_max > 0 &&
params->vertical_total_min > 0) {
amended_params.vertical_total_max = params->vertical_total_max - 1;
amended_params.vertical_total_min = params->vertical_total_min - 1;
if (params->vertical_total_mid != 0) {
amended_params.vertical_total_mid = params->vertical_total_mid - 1;
amended_params.vertical_total_mid_frame_num = params->vertical_total_mid_frame_num;
}
program_manual_trigger = true;
}
dc_dmub_srv_fams2_drr_update(dc, optc->inst,
amended_params.vertical_total_min,
amended_params.vertical_total_max,
amended_params.vertical_total_mid,
amended_params.vertical_total_mid_frame_num,
program_manual_trigger);
} else {
if (params != NULL &&
params->vertical_total_max > 0 &&
params->vertical_total_min > 0) {
if (params->vertical_total_mid != 0) {
REG_SET(OTG_V_TOTAL_MID, 0,
OTG_V_TOTAL_MID, params->vertical_total_mid - 1);
REG_UPDATE_2(OTG_V_TOTAL_CONTROL,
OTG_VTOTAL_MID_REPLACING_MAX_EN, 1,
OTG_VTOTAL_MID_FRAME_NUM,
(uint8_t)params->vertical_total_mid_frame_num);
}
optc->funcs->set_vtotal_min_max(optc, params->vertical_total_min - 1, params->vertical_total_max - 1);
optc401_setup_manual_trigger(optc);
} else {
REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
OTG_SET_V_TOTAL_MIN_MASK, 0,
OTG_V_TOTAL_MIN_SEL, 0,
OTG_V_TOTAL_MAX_SEL, 0,
OTG_FORCE_LOCK_ON_EVENT, 0);
optc->funcs->set_vtotal_min_max(optc, 0, 0);
}
}
}
static void optc401_set_out_mux(struct timing_generator *optc, enum otg_out_mux_dest dest)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
/* 00 - OTG_CONTROL_OTG_OUT_MUX_0 : Connects to DIO.
01 - OTG_CONTROL_OTG_OUT_MUX_1 : Reserved.
02 - OTG_CONTROL_OTG_OUT_MUX_2 : Connects to HPO.
*/
REG_UPDATE(OTG_CONTROL, OTG_OUT_MUX, dest);
}
void optc401_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max)
{
struct dc *dc = optc->ctx->dc;
if (dc->caps.dmub_caps.fams_ver >= 2 && dc->debug.fams2_config.bits.enable) {
/* FAMS2 */
dc_dmub_srv_fams2_drr_update(dc, optc->inst,
vtotal_min,
vtotal_max,
0,
0,
false);
} else if (dc->caps.dmub_caps.fams_ver == 1 && !dc->debug.disable_fams) {
/* FAMS */
dc_dmub_srv_drr_update_cmd(dc, optc->inst, vtotal_min, vtotal_max);
} else {
optc1_set_vtotal_min_max(optc, vtotal_min, vtotal_max);
}
}
static struct timing_generator_funcs dcn401_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
.setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
.setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1,
.setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2,
.program_global_sync = optc1_program_global_sync,
.enable_crtc = optc401_enable_crtc,
.disable_crtc = optc401_disable_crtc,
.phantom_crtc_post_enable = optc401_phantom_crtc_post_enable,
.disable_phantom_crtc = optc401_disable_phantom_otg,
/* used by enable_timing_synchronization. Not need for FPGA */
.is_counter_moving = optc1_is_counter_moving,
.get_position = optc1_get_position,
.get_frame_count = optc1_get_vblank_counter,
.get_scanoutpos = optc1_get_crtc_scanoutpos,
.get_otg_active_size = optc1_get_otg_active_size,
.set_early_control = optc1_set_early_control,
/* used by enable_timing_synchronization. Not need for FPGA */
.wait_for_state = optc1_wait_for_state,
.set_blank_color = optc3_program_blank_color,
.did_triggered_reset_occur = optc1_did_triggered_reset_occur,
.triplebuffer_lock = optc3_triplebuffer_lock,
.triplebuffer_unlock = optc2_triplebuffer_unlock,
.enable_reset_trigger = optc1_enable_reset_trigger,
.enable_crtc_reset = optc1_enable_crtc_reset,
.disable_reset_trigger = optc1_disable_reset_trigger,
.lock = optc3_lock,
.unlock = optc1_unlock,
.lock_doublebuffer_enable = optc3_lock_doublebuffer_enable,
.lock_doublebuffer_disable = optc3_lock_doublebuffer_disable,
.enable_optc_clock = optc1_enable_optc_clock,
.set_drr = optc401_set_drr,
.get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal,
.set_vtotal_min_max = optc3_set_vtotal_min_max,
.set_static_screen_control = optc1_set_static_screen_control,
.program_stereo = optc1_program_stereo,
.is_stereo_left_eye = optc1_is_stereo_left_eye,
.tg_init = optc3_tg_init,
.is_tg_enabled = optc1_is_tg_enabled,
.is_optc_underflow_occurred = optc1_is_optc_underflow_occurred,
.clear_optc_underflow = optc1_clear_optc_underflow,
.setup_global_swap_lock = NULL,
.get_crc = optc1_get_crc,
.configure_crc = optc1_configure_crc,
.set_dsc_config = optc3_set_dsc_config,
.get_dsc_status = optc2_get_dsc_status,
.set_dwb_source = NULL,
.set_odm_bypass = optc401_set_odm_bypass,
.set_odm_combine = optc401_set_odm_combine,
.set_h_timing_div_manual_mode = optc401_set_h_timing_div_manual_mode,
.get_optc_source = optc2_get_optc_source,
.set_out_mux = optc401_set_out_mux,
.set_drr_trigger_window = optc3_set_drr_trigger_window,
.set_vtotal_change_limit = optc3_set_vtotal_change_limit,
.set_gsl = optc2_set_gsl,
.set_gsl_source_select = optc2_set_gsl_source_select,
.set_vtg_params = optc1_set_vtg_params,
.program_manual_trigger = optc2_program_manual_trigger,
.setup_manual_trigger = optc2_setup_manual_trigger,
.get_hw_timing = optc1_get_hw_timing,
};
void dcn401_timing_generator_init(struct optc *optc1)
{
optc1->base.funcs = &dcn401_tg_funcs;
optc1->max_h_total = optc1->tg_mask->OTG_H_TOTAL + 1;
optc1->max_v_total = optc1->tg_mask->OTG_V_TOTAL + 1;
optc1->min_h_blank = 32;
optc1->min_v_blank = 3;
optc1->min_v_blank_interlace = 5;
optc1->min_h_sync_width = 4;
optc1->min_v_sync_width = 1;
}

View file

@ -0,0 +1,167 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#ifndef __DC_OPTC_DCN401_H__
#define __DC_OPTC_DCN401_H__
#include "dcn10/dcn10_optc.h"
#define OPTC_COMMON_MASK_SH_LIST_DCN401(mask_sh)\
SF(OTG0_OTG_VSTARTUP_PARAM, VSTARTUP_START, mask_sh),\
SF(OTG0_OTG_VUPDATE_PARAM, VUPDATE_OFFSET, mask_sh),\
SF(OTG0_OTG_VUPDATE_PARAM, VUPDATE_WIDTH, mask_sh),\
SF(OTG0_OTG_VREADY_PARAM, VREADY_OFFSET, mask_sh),\
SF(OTG0_OTG_MASTER_UPDATE_LOCK, OTG_MASTER_UPDATE_LOCK, mask_sh),\
SF(OTG0_OTG_MASTER_UPDATE_LOCK, UPDATE_LOCK_STATUS, mask_sh),\
SF(OTG0_OTG_GLOBAL_CONTROL0, MASTER_UPDATE_LOCK_DB_START_X, mask_sh),\
SF(OTG0_OTG_GLOBAL_CONTROL0, MASTER_UPDATE_LOCK_DB_END_X, mask_sh),\
SF(OTG0_OTG_GLOBAL_CONTROL0, MASTER_UPDATE_LOCK_DB_EN, mask_sh),\
SF(OTG0_OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_START_Y, mask_sh),\
SF(OTG0_OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_END_Y, mask_sh),\
SF(OTG0_OTG_GLOBAL_CONTROL2, OTG_MASTER_UPDATE_LOCK_SEL, mask_sh),\
SF(OTG0_OTG_GLOBAL_CONTROL4, DIG_UPDATE_POSITION_X, mask_sh),\
SF(OTG0_OTG_GLOBAL_CONTROL4, DIG_UPDATE_POSITION_Y, mask_sh),\
SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_UPDATE_PENDING, mask_sh),\
SF(OTG0_OTG_H_TOTAL, OTG_H_TOTAL, mask_sh),\
SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_START, mask_sh),\
SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_END, mask_sh),\
SF(OTG0_OTG_H_SYNC_A, OTG_H_SYNC_A_START, mask_sh),\
SF(OTG0_OTG_H_SYNC_A, OTG_H_SYNC_A_END, mask_sh),\
SF(OTG0_OTG_H_SYNC_A_CNTL, OTG_H_SYNC_A_POL, mask_sh),\
SF(OTG0_OTG_V_TOTAL, OTG_V_TOTAL, mask_sh),\
SF(OTG0_OTG_V_BLANK_START_END, OTG_V_BLANK_START, mask_sh),\
SF(OTG0_OTG_V_BLANK_START_END, OTG_V_BLANK_END, mask_sh),\
SF(OTG0_OTG_V_SYNC_A, OTG_V_SYNC_A_START, mask_sh),\
SF(OTG0_OTG_V_SYNC_A, OTG_V_SYNC_A_END, mask_sh),\
SF(OTG0_OTG_V_SYNC_A_CNTL, OTG_V_SYNC_A_POL, mask_sh),\
SF(OTG0_OTG_V_SYNC_A_CNTL, OTG_V_SYNC_MODE, mask_sh),\
SF(OTG0_OTG_CONTROL, OTG_MASTER_EN, mask_sh),\
SF(OTG0_OTG_CONTROL, OTG_START_POINT_CNTL, mask_sh),\
SF(OTG0_OTG_CONTROL, OTG_DISABLE_POINT_CNTL, mask_sh),\
SF(OTG0_OTG_CONTROL, OTG_FIELD_NUMBER_CNTL, mask_sh),\
SF(OTG0_OTG_CONTROL, OTG_OUT_MUX, mask_sh),\
SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_EN, mask_sh),\
SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_SYNC_OUTPUT_LINE_NUM, mask_sh),\
SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_SYNC_OUTPUT_POLARITY, mask_sh),\
SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_EYE_FLAG_POLARITY, mask_sh),\
SF(OTG0_OTG_STEREO_CONTROL, OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP, mask_sh),\
SF(OTG0_OTG_STEREO_STATUS, OTG_STEREO_CURRENT_EYE, mask_sh),\
SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_EN, mask_sh),\
SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_V_UPDATE_MODE, mask_sh),\
SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_STEREO_SEL_OVR, mask_sh),\
SF(OTG0_OTG_V_TOTAL_MAX, OTG_V_TOTAL_MAX, mask_sh),\
SF(OTG0_OTG_V_TOTAL_MIN, OTG_V_TOTAL_MIN, mask_sh),\
SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_V_TOTAL_MIN_SEL, mask_sh),\
SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_V_TOTAL_MAX_SEL, mask_sh),\
SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_FORCE_LOCK_ON_EVENT, mask_sh),\
SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_SET_V_TOTAL_MIN_MASK, mask_sh),\
SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_VTOTAL_MID_REPLACING_MIN_EN, mask_sh),\
SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_VTOTAL_MID_REPLACING_MAX_EN, mask_sh),\
SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_CLEAR, mask_sh),\
SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_MODE, mask_sh),\
SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_OCCURRED, mask_sh),\
SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_SOURCE_SELECT, mask_sh),\
SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_SOURCE_PIPE_SELECT, mask_sh),\
SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_RISING_EDGE_DETECT_CNTL, mask_sh),\
SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, mask_sh),\
SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_POLARITY_SELECT, mask_sh),\
SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_FREQUENCY_SELECT, mask_sh),\
SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_DELAY, mask_sh),\
SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_CLEAR, mask_sh),\
SF(OTG0_OTG_STATIC_SCREEN_CONTROL, OTG_STATIC_SCREEN_EVENT_MASK, mask_sh),\
SF(OTG0_OTG_STATIC_SCREEN_CONTROL, OTG_STATIC_SCREEN_FRAME_COUNT, mask_sh),\
SF(OTG0_OTG_STATUS_FRAME_COUNT, OTG_FRAME_COUNT, mask_sh),\
SF(OTG0_OTG_STATUS, OTG_V_BLANK, mask_sh),\
SF(OTG0_OTG_STATUS, OTG_V_ACTIVE_DISP, mask_sh),\
SF(OTG0_OTG_STATUS_POSITION, OTG_HORZ_COUNT, mask_sh),\
SF(OTG0_OTG_STATUS_POSITION, OTG_VERT_COUNT, mask_sh),\
SF(OTG0_OTG_NOM_VERT_POSITION, OTG_VERT_COUNT_NOM, mask_sh),\
SF(OTG0_OTG_M_CONST_DTO0, OTG_M_CONST_DTO_PHASE, mask_sh),\
SF(OTG0_OTG_M_CONST_DTO1, OTG_M_CONST_DTO_MODULO, mask_sh),\
SF(OTG0_OTG_CLOCK_CONTROL, OTG_BUSY, mask_sh),\
SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_EN, mask_sh),\
SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_ON, mask_sh),\
SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_GATE_DIS, mask_sh),\
SF(OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE, mask_sh),\
SF(OTG0_OTG_VERTICAL_INTERRUPT0_POSITION, OTG_VERTICAL_INTERRUPT0_LINE_START, mask_sh),\
SF(OTG0_OTG_VERTICAL_INTERRUPT0_POSITION, OTG_VERTICAL_INTERRUPT0_LINE_END, mask_sh),\
SF(OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL, OTG_VERTICAL_INTERRUPT1_INT_ENABLE, mask_sh),\
SF(OTG0_OTG_VERTICAL_INTERRUPT1_POSITION, OTG_VERTICAL_INTERRUPT1_LINE_START, mask_sh),\
SF(OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_INT_ENABLE, mask_sh),\
SF(OTG0_OTG_VERTICAL_INTERRUPT2_POSITION, OTG_VERTICAL_INTERRUPT2_LINE_START, mask_sh),\
SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_EN, mask_sh),\
SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_ON, mask_sh),\
SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_GATE_DIS, mask_sh),\
SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_OCCURRED_STATUS, mask_sh),\
SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_CLEAR, mask_sh),\
SF(VTG0_CONTROL, VTG0_ENABLE, mask_sh),\
SF(VTG0_CONTROL, VTG0_FP2, mask_sh),\
SF(VTG0_CONTROL, VTG0_VCOUNT_INIT, mask_sh),\
SF(OTG0_OTG_VERT_SYNC_CONTROL, OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED, mask_sh),\
SF(OTG0_OTG_VERT_SYNC_CONTROL, OTG_FORCE_VSYNC_NEXT_LINE_CLEAR, mask_sh),\
SF(OTG0_OTG_VERT_SYNC_CONTROL, OTG_AUTO_FORCE_VSYNC_MODE, mask_sh),\
SF(OTG0_OTG_GSL_CONTROL, OTG_GSL0_EN, mask_sh),\
SF(OTG0_OTG_GSL_CONTROL, OTG_GSL1_EN, mask_sh),\
SF(OTG0_OTG_GSL_CONTROL, OTG_GSL2_EN, mask_sh),\
SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_EN, mask_sh),\
SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_FORCE_DELAY, mask_sh),\
SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_CHECK_ALL_FIELDS, mask_sh),\
SF(OTG0_OTG_CRC_CNTL, OTG_CRC_CONT_EN, mask_sh),\
SF(OTG0_OTG_CRC_CNTL, OTG_CRC0_SELECT, mask_sh),\
SF(OTG0_OTG_CRC_CNTL, OTG_CRC_EN, mask_sh),\
SF(OTG0_OTG_CRC0_DATA_RG, CRC0_R_CR, mask_sh),\
SF(OTG0_OTG_CRC0_DATA_RG, CRC0_G_Y, mask_sh),\
SF(OTG0_OTG_CRC0_DATA_B, CRC0_B_CB, mask_sh),\
SF(OTG0_OTG_CRC0_WINDOWA_X_CONTROL, OTG_CRC0_WINDOWA_X_START, mask_sh),\
SF(OTG0_OTG_CRC0_WINDOWA_X_CONTROL, OTG_CRC0_WINDOWA_X_END, mask_sh),\
SF(OTG0_OTG_CRC0_WINDOWA_Y_CONTROL, OTG_CRC0_WINDOWA_Y_START, mask_sh),\
SF(OTG0_OTG_CRC0_WINDOWA_Y_CONTROL, OTG_CRC0_WINDOWA_Y_END, mask_sh),\
SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL, OTG_CRC0_WINDOWB_X_START, mask_sh),\
SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL, OTG_CRC0_WINDOWB_X_END, mask_sh),\
SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_START, mask_sh),\
SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_END, mask_sh),\
SF(OTG0_OTG_TRIGA_MANUAL_TRIG, OTG_TRIGA_MANUAL_TRIG, mask_sh),\
SF(GSL_SOURCE_SELECT, GSL0_READY_SOURCE_SEL, mask_sh),\
SF(GSL_SOURCE_SELECT, GSL1_READY_SOURCE_SEL, mask_sh),\
SF(GSL_SOURCE_SELECT, GSL2_READY_SOURCE_SEL, mask_sh),\
SF(OTG0_OTG_GLOBAL_CONTROL2, MANUAL_FLOW_CONTROL_SEL, mask_sh),\
SF(OTG0_OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, mask_sh),\
SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_START_X, mask_sh),\
SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_END_X, mask_sh), \
SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_START_Y, mask_sh),\
SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_END_Y, mask_sh),\
SF(OTG0_OTG_VUPDATE_KEEPOUT, OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, mask_sh), \
SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, mask_sh), \
SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, mask_sh), \
SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_MODE, mask_sh), \
SF(OTG0_OTG_GSL_CONTROL, OTG_MASTER_UPDATE_LOCK_GSL_EN, mask_sh), \
SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG0_SRC_SEL, mask_sh),\
SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG1_SRC_SEL, mask_sh),\
SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG2_SRC_SEL, mask_sh),\
SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG3_SRC_SEL, mask_sh),\
SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_NUM_OF_INPUT_SEGMENT, mask_sh),\
SF(ODM0_OPTC_MEMORY_CONFIG, OPTC_MEM_SEL, mask_sh),\
SF(ODM0_OPTC_DATA_FORMAT_CONTROL, OPTC_DATA_FORMAT, mask_sh),\
SF(ODM0_OPTC_DATA_FORMAT_CONTROL, OPTC_DSC_MODE, mask_sh),\
SF(ODM0_OPTC_BYTES_PER_PIXEL, OPTC_DSC_BYTES_PER_PIXEL, mask_sh),\
SF(ODM0_OPTC_WIDTH_CONTROL, OPTC_DSC_SLICE_WIDTH, mask_sh),\
SF(ODM0_OPTC_WIDTH_CONTROL, OPTC_SEGMENT_WIDTH, mask_sh),\
SF(ODM0_OPTC_WIDTH_CONTROL2, OPTC_SEGMENT_WIDTH_LAST, mask_sh),\
SF(OTG0_OTG_DRR_TRIGGER_WINDOW, OTG_DRR_TRIGGER_WINDOW_START_X, mask_sh),\
SF(OTG0_OTG_DRR_TRIGGER_WINDOW, OTG_DRR_TRIGGER_WINDOW_END_X, mask_sh),\
SF(OTG0_OTG_DRR_V_TOTAL_CHANGE, OTG_DRR_V_TOTAL_CHANGE_LIMIT, mask_sh),\
SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE, mask_sh),\
SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE_MANUAL, mask_sh),\
SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh),\
SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh)
void dcn401_timing_generator_init(struct optc *optc1);
void optc401_set_drr(
struct timing_generator *optc,
const struct drr_params *params);
void optc401_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max);
void optc401_setup_manual_trigger(struct timing_generator *optc);
#endif /* __DC_OPTC_DCN401_H__ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,581 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#ifndef _DCN401_RESOURCE_H_
#define _DCN401_RESOURCE_H_
#include "core_types.h"
#include "dcn32/dcn32_resource.h"
#include "dcn401/dcn401_hubp.h"
#define TO_DCN401_RES_POOL(pool)\
container_of(pool, struct dcn401_resource_pool, base)
struct dcn401_resource_pool {
struct resource_pool base;
};
struct resource_pool *dcn401_create_resource_pool(
const struct dc_init_data *init_data,
struct dc *dc);
enum dc_status dcn401_patch_unknown_plane_state(struct dc_plane_state *plane_state);
bool dcn401_validate_bandwidth(struct dc *dc,
struct dc_state *context,
bool fast_validate);
/* Following are definitions for run time init of reg offsets */
/* HUBP */
#define HUBP_REG_LIST_DCN401_RI(id) \
SRI_ARR(NOM_PARAMETERS_0, HUBPREQ, id), \
SRI_ARR(NOM_PARAMETERS_1, HUBPREQ, id), \
SRI_ARR(NOM_PARAMETERS_2, HUBPREQ, id), \
SRI_ARR(NOM_PARAMETERS_3, HUBPREQ, id), \
SRI_ARR(DCN_VM_MX_L1_TLB_CNTL, HUBPREQ, id), \
SRI_ARR(DCHUBP_CNTL, HUBP, id), \
SRI_ARR(HUBPREQ_DEBUG_DB, HUBP, id), \
SRI_ARR(HUBPREQ_DEBUG, HUBP, id), \
SRI_ARR(DCSURF_ADDR_CONFIG, HUBP, id), \
SRI_ARR(DCSURF_TILING_CONFIG, HUBP, id), \
SRI_ARR(DCSURF_SURFACE_PITCH, HUBPREQ, id), \
SRI_ARR(DCSURF_SURFACE_PITCH_C, HUBPREQ, id), \
SRI_ARR(DCSURF_SURFACE_CONFIG, HUBP, id), \
SRI_ARR(DCSURF_FLIP_CONTROL, HUBPREQ, id), \
SRI_ARR(DCSURF_PRI_VIEWPORT_DIMENSION, HUBP, id), \
SRI_ARR(DCSURF_PRI_VIEWPORT_START, HUBP, id), \
SRI_ARR(DCSURF_SEC_VIEWPORT_DIMENSION, HUBP, id), \
SRI_ARR(DCSURF_SEC_VIEWPORT_START, HUBP, id), \
SRI_ARR(DCSURF_PRI_VIEWPORT_DIMENSION_C, HUBP, id), \
SRI_ARR(DCSURF_PRI_VIEWPORT_START_C, HUBP, id), \
SRI_ARR(DCSURF_SEC_VIEWPORT_DIMENSION_C, HUBP, id), \
SRI_ARR(DCSURF_SEC_VIEWPORT_START_C, HUBP, id), \
SRI_ARR(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, HUBPREQ, id), \
SRI_ARR(DCSURF_PRIMARY_SURFACE_ADDRESS, HUBPREQ, id), \
SRI_ARR(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, HUBPREQ, id), \
SRI_ARR(DCSURF_SECONDARY_SURFACE_ADDRESS, HUBPREQ, id), \
SRI_ARR(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id), \
SRI_ARR(DCSURF_PRIMARY_SURFACE_ADDRESS_C, HUBPREQ, id), \
SRI_ARR(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id), \
SRI_ARR(DCSURF_SECONDARY_SURFACE_ADDRESS_C, HUBPREQ, id), \
SRI_ARR(DCSURF_SURFACE_INUSE, HUBPREQ, id), \
SRI_ARR(DCSURF_SURFACE_INUSE_HIGH, HUBPREQ, id), \
SRI_ARR(DCSURF_SURFACE_INUSE_C, HUBPREQ, id), \
SRI_ARR(DCSURF_SURFACE_INUSE_HIGH_C, HUBPREQ, id), \
SRI_ARR(DCSURF_SURFACE_EARLIEST_INUSE, HUBPREQ, id), \
SRI_ARR(DCSURF_SURFACE_EARLIEST_INUSE_HIGH, HUBPREQ, id), \
SRI_ARR(DCSURF_SURFACE_EARLIEST_INUSE_C, HUBPREQ, id), \
SRI_ARR(DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C, HUBPREQ, id), \
SRI_ARR(DCSURF_SURFACE_CONTROL, HUBPREQ, id), \
SRI_ARR(DCSURF_SURFACE_FLIP_INTERRUPT, HUBPREQ, id), \
SRI_ARR(HUBPRET_CONTROL, HUBPRET, id), \
SRI_ARR(HUBPRET_READ_LINE_STATUS, HUBPRET, id), \
SRI_ARR(DCN_EXPANSION_MODE, HUBPREQ, id), \
SRI_ARR(DCHUBP_REQ_SIZE_CONFIG, HUBP, id), \
SRI_ARR(DCHUBP_REQ_SIZE_CONFIG_C, HUBP, id), \
SRI_ARR(BLANK_OFFSET_0, HUBPREQ, id), \
SRI_ARR(BLANK_OFFSET_1, HUBPREQ, id), \
SRI_ARR(DST_DIMENSIONS, HUBPREQ, id), \
SRI_ARR(DST_AFTER_SCALER, HUBPREQ, id), \
SRI_ARR(VBLANK_PARAMETERS_0, HUBPREQ, id), \
SRI_ARR(REF_FREQ_TO_PIX_FREQ, HUBPREQ, id), \
SRI_ARR(VBLANK_PARAMETERS_1, HUBPREQ, id), \
SRI_ARR(VBLANK_PARAMETERS_3, HUBPREQ, id), \
SRI_ARR(NOM_PARAMETERS_4, HUBPREQ, id), \
SRI_ARR(NOM_PARAMETERS_5, HUBPREQ, id), \
SRI_ARR(PER_LINE_DELIVERY_PRE, HUBPREQ, id), \
SRI_ARR(PER_LINE_DELIVERY, HUBPREQ, id), \
SRI_ARR(VBLANK_PARAMETERS_2, HUBPREQ, id), \
SRI_ARR(VBLANK_PARAMETERS_4, HUBPREQ, id), \
SRI_ARR(NOM_PARAMETERS_6, HUBPREQ, id), \
SRI_ARR(NOM_PARAMETERS_7, HUBPREQ, id), \
SRI_ARR(DCN_TTU_QOS_WM, HUBPREQ, id), \
SRI_ARR(DCN_GLOBAL_TTU_CNTL, HUBPREQ, id), \
SRI_ARR(DCN_SURF0_TTU_CNTL0, HUBPREQ, id), \
SRI_ARR(DCN_SURF0_TTU_CNTL1, HUBPREQ, id), \
SRI_ARR(DCN_SURF1_TTU_CNTL0, HUBPREQ, id), \
SRI_ARR(DCN_SURF1_TTU_CNTL1, HUBPREQ, id), \
SRI_ARR(DCN_CUR0_TTU_CNTL0, HUBPREQ, id), \
SRI_ARR(DCN_CUR0_TTU_CNTL1, HUBPREQ, id), \
SRI_ARR(HUBP_CLK_CNTL, HUBP, id), \
SRI_ARR(PREFETCH_SETTINGS, HUBPREQ, id), \
SRI_ARR(PREFETCH_SETTINGS_C, HUBPREQ, id), \
SRI_ARR(DCN_VM_SYSTEM_APERTURE_LOW_ADDR, HUBPREQ, id), \
SRI_ARR(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR, HUBPREQ, id), \
SRI_ARR(CURSOR_SETTINGS, HUBPREQ, id), \
SRI_ARR(CURSOR_SURFACE_ADDRESS_HIGH, CURSOR0_, id), \
SRI_ARR(CURSOR_SURFACE_ADDRESS, CURSOR0_, id), \
SRI_ARR(CURSOR_SIZE, CURSOR0_, id), \
SRI_ARR(CURSOR_CONTROL, CURSOR0_, id), \
SRI_ARR(CURSOR_POSITION, CURSOR0_, id), \
SRI_ARR(CURSOR_HOT_SPOT, CURSOR0_, id), \
SRI_ARR(CURSOR_DST_OFFSET, CURSOR0_, id), \
SRI_ARR(DMDATA_ADDRESS_HIGH, CURSOR0_, id), \
SRI_ARR(DMDATA_ADDRESS_LOW, CURSOR0_, id), \
SRI_ARR(DMDATA_CNTL, CURSOR0_, id), \
SRI_ARR(DMDATA_SW_CNTL, CURSOR0_, id), \
SRI_ARR(DMDATA_QOS_CNTL, CURSOR0_, id), \
SRI_ARR(DMDATA_SW_DATA, CURSOR0_, id), \
SRI_ARR(DMDATA_STATUS, CURSOR0_, id), \
SRI_ARR(FLIP_PARAMETERS_0, HUBPREQ, id), \
SRI_ARR(FLIP_PARAMETERS_1, HUBPREQ, id), \
SRI_ARR(FLIP_PARAMETERS_2, HUBPREQ, id), \
SRI_ARR(DCN_CUR1_TTU_CNTL0, HUBPREQ, id), \
SRI_ARR(DCN_CUR1_TTU_CNTL1, HUBPREQ, id), \
SRI_ARR(DCSURF_FLIP_CONTROL2, HUBPREQ, id), \
SRI_ARR(VMID_SETTINGS_0, HUBPREQ, id), \
SRI_ARR(FLIP_PARAMETERS_3, HUBPREQ, id), \
SRI_ARR(FLIP_PARAMETERS_4, HUBPREQ, id), \
SRI_ARR(FLIP_PARAMETERS_5, HUBPREQ, id), \
SRI_ARR(FLIP_PARAMETERS_6, HUBPREQ, id), \
SRI_ARR(VBLANK_PARAMETERS_5, HUBPREQ, id), \
SRI_ARR(VBLANK_PARAMETERS_6, HUBPREQ, id), \
SRI_ARR(DCN_DMDATA_VM_CNTL, HUBPREQ, id), \
SRI_ARR(DCHUBP_MALL_CONFIG, HUBP, id), \
SRI_ARR(DCHUBP_VMPG_CONFIG, HUBP, id), \
SRI_ARR(UCLK_PSTATE_FORCE, HUBPREQ, id), \
HUBP_3DLUT_FL_REG_LIST_DCN401(id)
/* ABM */
#define ABM_DCN401_REG_LIST_RI(id) \
SRI_ARR(DC_ABM1_HG_SAMPLE_RATE, ABM, id), \
SRI_ARR(DC_ABM1_LS_SAMPLE_RATE, ABM, id), \
SRI_ARR(DC_ABM1_HG_MISC_CTRL, ABM, id), \
SRI_ARR(DC_ABM1_IPCSC_COEFF_SEL, ABM, id), \
SRI_ARR(BL1_PWM_BL_UPDATE_SAMPLE_RATE, ABM, id), \
SRI_ARR(BL1_PWM_CURRENT_ABM_LEVEL, ABM, id), \
SRI_ARR(BL1_PWM_TARGET_ABM_LEVEL, ABM, id), \
SRI_ARR(BL1_PWM_USER_LEVEL, ABM, id), \
SRI_ARR(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, ABM, id), \
SRI_ARR(DC_ABM1_HGLS_REG_READ_PROGRESS, ABM, id), \
SRI_ARR(DC_ABM1_HG_BIN_33_40_SHIFT_INDEX, ABM, id), \
SRI_ARR(DC_ABM1_HG_BIN_33_64_SHIFT_FLAG, ABM, id), \
SRI_ARR(DC_ABM1_HG_BIN_41_48_SHIFT_INDEX, ABM, id), \
SRI_ARR(DC_ABM1_HG_BIN_49_56_SHIFT_INDEX, ABM, id), \
SRI_ARR(DC_ABM1_HG_BIN_57_64_SHIFT_INDEX, ABM, id), \
SRI_ARR(DC_ABM1_HG_RESULT_DATA, ABM, id), \
SRI_ARR(DC_ABM1_HG_RESULT_INDEX, ABM, id), \
SRI_ARR(DC_ABM1_ACE_OFFSET_SLOPE_DATA, ABM, id), \
SRI_ARR(DC_ABM1_ACE_PWL_CNTL, ABM, id), \
SRI_ARR(DC_ABM1_ACE_THRES_DATA, ABM, id), \
NBIO_SR_ARR(BIOS_SCRATCH_2, id)
/* VPG */
#define VPG_DCN401_REG_LIST_RI(id) \
VPG_DCN3_REG_LIST_RI(id), \
SRI_ARR(VPG_MEM_PWR, VPG, id)
/* Stream encoder */
#define SE_DCN4_01_REG_LIST_RI(id) \
SRI_ARR(AFMT_CNTL, DIG, id), SRI_ARR(DIG_FE_CNTL, DIG, id), \
SRI_ARR(HDMI_CONTROL, DIG, id), SRI_ARR(HDMI_DB_CONTROL, DIG, id), \
SRI_ARR(HDMI_GC, DIG, id), \
SRI_ARR(HDMI_GENERIC_PACKET_CONTROL0, DIG, id), \
SRI_ARR(HDMI_GENERIC_PACKET_CONTROL1, DIG, id), \
SRI_ARR(HDMI_GENERIC_PACKET_CONTROL2, DIG, id), \
SRI_ARR(HDMI_GENERIC_PACKET_CONTROL3, DIG, id), \
SRI_ARR(HDMI_GENERIC_PACKET_CONTROL4, DIG, id), \
SRI_ARR(HDMI_GENERIC_PACKET_CONTROL5, DIG, id), \
SRI_ARR(HDMI_GENERIC_PACKET_CONTROL6, DIG, id), \
SRI_ARR(HDMI_GENERIC_PACKET_CONTROL7, DIG, id), \
SRI_ARR(HDMI_GENERIC_PACKET_CONTROL8, DIG, id), \
SRI_ARR(HDMI_GENERIC_PACKET_CONTROL9, DIG, id), \
SRI_ARR(HDMI_GENERIC_PACKET_CONTROL10, DIG, id), \
SRI_ARR(HDMI_INFOFRAME_CONTROL0, DIG, id), \
SRI_ARR(HDMI_INFOFRAME_CONTROL1, DIG, id), \
SRI_ARR(HDMI_VBI_PACKET_CONTROL, DIG, id), \
SRI_ARR(HDMI_AUDIO_PACKET_CONTROL, DIG, id), \
SRI_ARR(HDMI_ACR_PACKET_CONTROL, DIG, id), \
SRI_ARR(HDMI_ACR_32_0, DIG, id), SRI_ARR(HDMI_ACR_32_1, DIG, id), \
SRI_ARR(HDMI_ACR_44_0, DIG, id), SRI_ARR(HDMI_ACR_44_1, DIG, id), \
SRI_ARR(HDMI_ACR_48_0, DIG, id), SRI_ARR(HDMI_ACR_48_1, DIG, id), \
SRI_ARR(DP_DB_CNTL, DP, id), SRI_ARR(DP_MSA_MISC, DP, id), \
SRI_ARR(DP_MSA_VBID_MISC, DP, id), SRI_ARR(DP_MSA_COLORIMETRY, DP, id), \
SRI_ARR(DP_MSA_TIMING_PARAM1, DP, id), \
SRI_ARR(DP_MSA_TIMING_PARAM2, DP, id), \
SRI_ARR(DP_MSA_TIMING_PARAM3, DP, id), \
SRI_ARR(DP_MSA_TIMING_PARAM4, DP, id), \
SRI_ARR(DP_MSE_RATE_CNTL, DP, id), SRI_ARR(DP_MSE_RATE_UPDATE, DP, id), \
SRI_ARR(DP_PIXEL_FORMAT, DP, id), SRI_ARR(DP_SEC_CNTL, DP, id), \
SRI_ARR(DP_SEC_CNTL1, DP, id), SRI_ARR(DP_SEC_CNTL2, DP, id), \
SRI_ARR(DP_SEC_CNTL5, DP, id), SRI_ARR(DP_SEC_CNTL6, DP, id), \
SRI_ARR(DP_STEER_FIFO, DP, id), SRI_ARR(DP_VID_M, DP, id), \
SRI_ARR(DP_VID_N, DP, id), SRI_ARR(DP_VID_STREAM_CNTL, DP, id), \
SRI_ARR(DP_VID_TIMING, DP, id), SRI_ARR(DP_SEC_AUD_N, DP, id), \
SRI_ARR(DP_SEC_TIMESTAMP, DP, id), \
SRI_ARR(DP_SEC_METADATA_TRANSMISSION, DP, id), \
SRI_ARR(HDMI_METADATA_PACKET_CONTROL, DIG, id), \
SRI_ARR(DP_SEC_FRAMING4, DP, id), SRI_ARR(DP_GSP11_CNTL, DP, id), \
SRI_ARR(DME_CONTROL, DME, id), \
SRI_ARR(DP_SEC_METADATA_TRANSMISSION, DP, id), \
SRI_ARR(HDMI_METADATA_PACKET_CONTROL, DIG, id), \
SRI_ARR(DIG_FE_CNTL, DIG, id), \
SRI_ARR(DIG_FE_EN_CNTL, DIG, id), \
SRI_ARR(DIG_FE_CLK_CNTL, DIG, id), \
SRI_ARR(DIG_CLOCK_PATTERN, DIG, id), \
SRI_ARR(DIG_FIFO_CTRL0, DIG, id), \
SRI_ARR(STREAM_MAPPER_CONTROL, DIG, id)
/* Link encoder */
#define LE_DCN401_REG_LIST_RI(id) \
LE_DCN3_REG_LIST_RI(id), \
SRI_ARR(DP_DPHY_INTERNAL_CTRL, DP, id), \
SRI_ARR(DIG_BE_CLK_CNTL, DIG, id)
/* DPP */
#define DPP_REG_LIST_DCN401_COMMON_RI(id) \
SRI_ARR(CM_DEALPHA, CM, id), SRI_ARR(CM_MEM_PWR_STATUS, CM, id), \
SRI_ARR(CM_BIAS_CR_R, CM, id), SRI_ARR(CM_BIAS_Y_G_CB_B, CM, id), \
SRI_ARR(PRE_DEGAM, CNVC_CFG, id), SRI_ARR(CM_GAMCOR_CONTROL, CM, id), \
SRI_ARR(CM_GAMCOR_LUT_CONTROL, CM, id), \
SRI_ARR(CM_GAMCOR_LUT_INDEX, CM, id), \
SRI_ARR(CM_GAMCOR_LUT_INDEX, CM, id), \
SRI_ARR(CM_GAMCOR_LUT_DATA, CM, id), \
SRI_ARR(CM_GAMCOR_RAMB_START_CNTL_B, CM, id), \
SRI_ARR(CM_GAMCOR_RAMB_START_CNTL_G, CM, id), \
SRI_ARR(CM_GAMCOR_RAMB_START_CNTL_R, CM, id), \
SRI_ARR(CM_GAMCOR_RAMB_START_SLOPE_CNTL_B, CM, id), \
SRI_ARR(CM_GAMCOR_RAMB_START_SLOPE_CNTL_G, CM, id), \
SRI_ARR(CM_GAMCOR_RAMB_START_SLOPE_CNTL_R, CM, id), \
SRI_ARR(CM_GAMCOR_RAMB_END_CNTL1_B, CM, id), \
SRI_ARR(CM_GAMCOR_RAMB_END_CNTL2_B, CM, id), \
SRI_ARR(CM_GAMCOR_RAMB_END_CNTL1_G, CM, id), \
SRI_ARR(CM_GAMCOR_RAMB_END_CNTL2_G, CM, id), \
SRI_ARR(CM_GAMCOR_RAMB_END_CNTL1_R, CM, id), \
SRI_ARR(CM_GAMCOR_RAMB_END_CNTL2_R, CM, id), \
SRI_ARR(CM_GAMCOR_RAMB_REGION_0_1, CM, id), \
SRI_ARR(CM_GAMCOR_RAMB_REGION_32_33, CM, id), \
SRI_ARR(CM_GAMCOR_RAMB_OFFSET_B, CM, id), \
SRI_ARR(CM_GAMCOR_RAMB_OFFSET_G, CM, id), \
SRI_ARR(CM_GAMCOR_RAMB_OFFSET_R, CM, id), \
SRI_ARR(CM_GAMCOR_RAMB_START_BASE_CNTL_B, CM, id), \
SRI_ARR(CM_GAMCOR_RAMB_START_BASE_CNTL_G, CM, id), \
SRI_ARR(CM_GAMCOR_RAMB_START_BASE_CNTL_R, CM, id), \
SRI_ARR(CM_GAMCOR_RAMA_START_CNTL_B, CM, id), \
SRI_ARR(CM_GAMCOR_RAMA_START_CNTL_G, CM, id), \
SRI_ARR(CM_GAMCOR_RAMA_START_CNTL_R, CM, id), \
SRI_ARR(CM_GAMCOR_RAMA_START_SLOPE_CNTL_B, CM, id), \
SRI_ARR(CM_GAMCOR_RAMA_START_SLOPE_CNTL_G, CM, id), \
SRI_ARR(CM_GAMCOR_RAMA_START_SLOPE_CNTL_R, CM, id), \
SRI_ARR(CM_GAMCOR_RAMA_END_CNTL1_B, CM, id), \
SRI_ARR(CM_GAMCOR_RAMA_END_CNTL2_B, CM, id), \
SRI_ARR(CM_GAMCOR_RAMA_END_CNTL1_G, CM, id), \
SRI_ARR(CM_GAMCOR_RAMA_END_CNTL2_G, CM, id), \
SRI_ARR(CM_GAMCOR_RAMA_END_CNTL1_R, CM, id), \
SRI_ARR(CM_GAMCOR_RAMA_END_CNTL2_R, CM, id), \
SRI_ARR(CM_GAMCOR_RAMA_REGION_0_1, CM, id), \
SRI_ARR(CM_GAMCOR_RAMA_REGION_32_33, CM, id), \
SRI_ARR(CM_GAMCOR_RAMA_OFFSET_B, CM, id), \
SRI_ARR(CM_GAMCOR_RAMA_OFFSET_G, CM, id), \
SRI_ARR(CM_GAMCOR_RAMA_OFFSET_R, CM, id), \
SRI_ARR(CM_GAMCOR_RAMA_START_BASE_CNTL_B, CM, id), \
SRI_ARR(CM_GAMCOR_RAMA_START_BASE_CNTL_G, CM, id), \
SRI_ARR(CM_GAMCOR_RAMA_START_BASE_CNTL_R, CM, id), \
SRI_ARR(DSCL_EXT_OVERSCAN_LEFT_RIGHT, DSCL, id), \
SRI_ARR(DSCL_EXT_OVERSCAN_TOP_BOTTOM, DSCL, id), \
SRI_ARR(OTG_H_BLANK, DSCL, id), SRI_ARR(OTG_V_BLANK, DSCL, id), \
SRI_ARR(SCL_MODE, DSCL, id), SRI_ARR(LB_DATA_FORMAT, DSCL, id), \
SRI_ARR(LB_MEMORY_CTRL, DSCL, id), SRI_ARR(DSCL_AUTOCAL, DSCL, id), \
SRI_ARR(SCL_TAP_CONTROL, DSCL, id), \
SRI_ARR(SCL_COEF_RAM_TAP_SELECT, DSCL, id), \
SRI_ARR(SCL_COEF_RAM_TAP_DATA, DSCL, id), \
SRI_ARR(DSCL_2TAP_CONTROL, DSCL, id), SRI_ARR(MPC_SIZE, DSCL, id), \
SRI_ARR(SCL_HORZ_FILTER_SCALE_RATIO, DSCL, id), \
SRI_ARR(SCL_VERT_FILTER_SCALE_RATIO, DSCL, id), \
SRI_ARR(SCL_HORZ_FILTER_SCALE_RATIO_C, DSCL, id), \
SRI_ARR(SCL_VERT_FILTER_SCALE_RATIO_C, DSCL, id), \
SRI_ARR(SCL_HORZ_FILTER_INIT, DSCL, id), \
SRI_ARR(SCL_HORZ_FILTER_INIT_C, DSCL, id), \
SRI_ARR(SCL_VERT_FILTER_INIT, DSCL, id), \
SRI_ARR(SCL_VERT_FILTER_INIT_C, DSCL, id), \
SRI_ARR(RECOUT_START, DSCL, id), SRI_ARR(RECOUT_SIZE, DSCL, id), \
SRI_ARR(PRE_DEALPHA, CNVC_CFG, id), SRI_ARR(PRE_REALPHA, CNVC_CFG, id), \
SRI_ARR(PRE_CSC_MODE, CNVC_CFG, id), \
SRI_ARR(PRE_CSC_C11_C12, CNVC_CFG, id), \
SRI_ARR(PRE_CSC_C33_C34, CNVC_CFG, id), \
SRI_ARR(PRE_CSC_B_C11_C12, CNVC_CFG, id), \
SRI_ARR(PRE_CSC_B_C33_C34, CNVC_CFG, id), \
SRI_ARR(CM_POST_CSC_CONTROL, CM, id), \
SRI_ARR(CM_POST_CSC_C11_C12, CM, id), \
SRI_ARR(CM_POST_CSC_C33_C34, CM, id), \
SRI_ARR(CM_POST_CSC_B_C11_C12, CM, id), \
SRI_ARR(CM_POST_CSC_B_C33_C34, CM, id), \
SRI_ARR(CM_MEM_PWR_CTRL, CM, id), SRI_ARR(CM_CONTROL, CM, id), \
SRI_ARR(CM_TEST_DEBUG_INDEX, CM, id), \
SRI_ARR(CM_TEST_DEBUG_DATA, CM, id), \
SRI_ARR(FORMAT_CONTROL, CNVC_CFG, id), \
SRI_ARR(CNVC_SURFACE_PIXEL_FORMAT, CNVC_CFG, id), \
SRI_ARR(CURSOR0_CONTROL, CM_CUR, id), \
SRI_ARR(CURSOR0_COLOR0, CM_CUR, id), \
SRI_ARR(CURSOR0_COLOR1, CM_CUR, id), \
SRI_ARR(CURSOR0_FP_SCALE_BIAS_G_Y, CM_CUR, id), \
SRI_ARR(CURSOR0_FP_SCALE_BIAS_RB_CRCB, CM_CUR, id), \
SRI_ARR(CUR0_MATRIX_MODE, CM_CUR, id), \
SRI_ARR(CUR0_MATRIX_C11_C12_A, CM_CUR, id), \
SRI_ARR(CUR0_MATRIX_C13_C14_A, CM_CUR, id), \
SRI_ARR(CUR0_MATRIX_C21_C22_A, CM_CUR, id), \
SRI_ARR(CUR0_MATRIX_C23_C24_A, CM_CUR, id), \
SRI_ARR(CUR0_MATRIX_C31_C32_A, CM_CUR, id), \
SRI_ARR(CUR0_MATRIX_C33_C34_A, CM_CUR, id), \
SRI_ARR(CUR0_MATRIX_C11_C12_B, CM_CUR, id), \
SRI_ARR(CUR0_MATRIX_C13_C14_B, CM_CUR, id), \
SRI_ARR(CUR0_MATRIX_C21_C22_B, CM_CUR, id), \
SRI_ARR(CUR0_MATRIX_C23_C24_B, CM_CUR, id), \
SRI_ARR(CUR0_MATRIX_C31_C32_B, CM_CUR, id), \
SRI_ARR(CUR0_MATRIX_C33_C34_B, CM_CUR, id), \
SRI_ARR(DPP_CONTROL, DPP_TOP, id), SRI_ARR(CM_HDR_MULT_COEF, CM, id), \
SRI_ARR(CURSOR_CONTROL, CURSOR0_, id), \
SRI_ARR(ALPHA_2BIT_LUT, CNVC_CFG, id), \
SRI_ARR(FCNV_FP_BIAS_R, CNVC_CFG, id), \
SRI_ARR(FCNV_FP_BIAS_G, CNVC_CFG, id), \
SRI_ARR(FCNV_FP_BIAS_B, CNVC_CFG, id), \
SRI_ARR(FCNV_FP_SCALE_R, CNVC_CFG, id), \
SRI_ARR(FCNV_FP_SCALE_G, CNVC_CFG, id), \
SRI_ARR(FCNV_FP_SCALE_B, CNVC_CFG, id), \
SRI_ARR(COLOR_KEYER_CONTROL, CNVC_CFG, id), \
SRI_ARR(COLOR_KEYER_ALPHA, CNVC_CFG, id), \
SRI_ARR(COLOR_KEYER_RED, CNVC_CFG, id), \
SRI_ARR(COLOR_KEYER_GREEN, CNVC_CFG, id), \
SRI_ARR(COLOR_KEYER_BLUE, CNVC_CFG, id), \
SRI_ARR(OBUF_MEM_PWR_CTRL, DSCL, id), \
SRI_ARR(DSCL_MEM_PWR_STATUS, DSCL, id), \
SRI_ARR(DSCL_MEM_PWR_CTRL, DSCL, id), \
SRI_ARR(DSCL_CONTROL, DSCL, id)
/* OPP */
#define OPP_REG_LIST_DCN401_RI(id) \
OPP_REG_LIST_DCN10_RI(id), OPP_DPG_REG_LIST_RI(id), \
SRI_ARR(FMT_422_CONTROL, FMT, id)
/* DSC */
#define DSC_REG_LIST_DCN401_RI(id) \
SRI_ARR(DSC_TOP_CONTROL, DSC_TOP, id), \
SRI_ARR(DSC_DEBUG_CONTROL, DSC_TOP, id), \
SRI_ARR(DSCC_CONFIG0, DSCC, id), SRI_ARR(DSCC_CONFIG1, DSCC, id), \
SRI_ARR(DSCC_STATUS, DSCC, id), \
SRI_ARR(DSCC_INTERRUPT_CONTROL0, DSCC, id), \
SRI_ARR(DSCC_INTERRUPT_CONTROL1, DSCC, id), \
SRI_ARR(DSCC_INTERRUPT_STATUS0, DSCC, id), \
SRI_ARR(DSCC_INTERRUPT_STATUS1, DSCC, id), \
SRI_ARR(DSCC_PPS_CONFIG0, DSCC, id), \
SRI_ARR(DSCC_PPS_CONFIG1, DSCC, id), \
SRI_ARR(DSCC_PPS_CONFIG2, DSCC, id), \
SRI_ARR(DSCC_PPS_CONFIG3, DSCC, id), \
SRI_ARR(DSCC_PPS_CONFIG4, DSCC, id), \
SRI_ARR(DSCC_PPS_CONFIG5, DSCC, id), \
SRI_ARR(DSCC_PPS_CONFIG6, DSCC, id), \
SRI_ARR(DSCC_PPS_CONFIG7, DSCC, id), \
SRI_ARR(DSCC_PPS_CONFIG8, DSCC, id), \
SRI_ARR(DSCC_PPS_CONFIG9, DSCC, id), \
SRI_ARR(DSCC_PPS_CONFIG10, DSCC, id), \
SRI_ARR(DSCC_PPS_CONFIG11, DSCC, id), \
SRI_ARR(DSCC_PPS_CONFIG12, DSCC, id), \
SRI_ARR(DSCC_PPS_CONFIG13, DSCC, id), \
SRI_ARR(DSCC_PPS_CONFIG14, DSCC, id), \
SRI_ARR(DSCC_PPS_CONFIG15, DSCC, id), \
SRI_ARR(DSCC_PPS_CONFIG16, DSCC, id), \
SRI_ARR(DSCC_PPS_CONFIG17, DSCC, id), \
SRI_ARR(DSCC_PPS_CONFIG18, DSCC, id), \
SRI_ARR(DSCC_PPS_CONFIG19, DSCC, id), \
SRI_ARR(DSCC_PPS_CONFIG20, DSCC, id), \
SRI_ARR(DSCC_PPS_CONFIG21, DSCC, id), \
SRI_ARR(DSCC_PPS_CONFIG22, DSCC, id), \
SRI_ARR(DSCC_MEM_POWER_CONTROL0, DSCC, id), \
SRI_ARR(DSCC_MEM_POWER_CONTROL1, DSCC, id), \
SRI_ARR(DSCC_R_Y_SQUARED_ERROR_LOWER, DSCC, id), \
SRI_ARR(DSCC_R_Y_SQUARED_ERROR_UPPER, DSCC, id), \
SRI_ARR(DSCC_G_CB_SQUARED_ERROR_LOWER, DSCC, id), \
SRI_ARR(DSCC_G_CB_SQUARED_ERROR_UPPER, DSCC, id), \
SRI_ARR(DSCC_B_CR_SQUARED_ERROR_LOWER, DSCC, id), \
SRI_ARR(DSCC_B_CR_SQUARED_ERROR_UPPER, DSCC, id), \
SRI_ARR(DSCC_MAX_ABS_ERROR0, DSCC, id), \
SRI_ARR(DSCC_MAX_ABS_ERROR1, DSCC, id), \
SRI_ARR(DSCC_RATE_BUFFER_MODEL_MAX_FULLNESS_LEVEL0, DSCC, id), \
SRI_ARR(DSCC_RATE_BUFFER_MODEL_MAX_FULLNESS_LEVEL1, DSCC, id), \
SRI_ARR(DSCC_RATE_BUFFER_MODEL_MAX_FULLNESS_LEVEL2, DSCC, id), \
SRI_ARR(DSCC_RATE_BUFFER_MODEL_MAX_FULLNESS_LEVEL3, DSCC, id), \
SRI_ARR(DSCC_TEST_DEBUG_BUS_ROTATE, DSCC, id), \
SRI_ARR(DSCCIF_CONFIG0, DSCCIF, id), \
SRI_ARR(DSCRM_DSC_FORWARD_CONFIG, DSCRM, id)
/* MPC */
#define MPC_DWB_MUX_REG_LIST_DCN4_01_RI(inst) \
MPC_DWB_MUX_REG_LIST_DCN3_0_RI(inst)
#define MPC_OUT_MUX_COMMON_REG_LIST_DCN4_01_RI(inst) \
MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0_RI(inst)
#define MPC_OUT_MUX_REG_LIST_DCN4_01_RI(inst) \
MPC_OUT_MUX_REG_LIST_DCN3_0_RI(inst)
/* OPTC */
#define OPTC_COMMON_REG_LIST_DCN401_RI(inst) \
SRI_ARR(OTG_VSTARTUP_PARAM, OTG, inst), \
SRI_ARR(OTG_VUPDATE_PARAM, OTG, inst), \
SRI_ARR(OTG_VREADY_PARAM, OTG, inst), \
SRI_ARR(OTG_MASTER_UPDATE_LOCK, OTG, inst), \
SRI_ARR(OTG_GLOBAL_CONTROL0, OTG, inst), \
SRI_ARR(OTG_GLOBAL_CONTROL1, OTG, inst), \
SRI_ARR(OTG_GLOBAL_CONTROL2, OTG, inst), \
SRI_ARR(OTG_GLOBAL_CONTROL4, OTG, inst), \
SRI_ARR(OTG_DOUBLE_BUFFER_CONTROL, OTG, inst), \
SRI_ARR(OTG_H_TOTAL, OTG, inst), \
SRI_ARR(OTG_H_BLANK_START_END, OTG, inst), \
SRI_ARR(OTG_H_SYNC_A, OTG, inst), SRI_ARR(OTG_H_SYNC_A_CNTL, OTG, inst), \
SRI_ARR(OTG_H_TIMING_CNTL, OTG, inst), SRI_ARR(OTG_V_TOTAL, OTG, inst), \
SRI_ARR(OTG_V_BLANK_START_END, OTG, inst), \
SRI_ARR(OTG_V_SYNC_A, OTG, inst), SRI_ARR(OTG_V_SYNC_A_CNTL, OTG, inst), \
SRI_ARR(OTG_CONTROL, OTG, inst), SRI_ARR(OTG_STEREO_CONTROL, OTG, inst), \
SRI_ARR(OTG_3D_STRUCTURE_CONTROL, OTG, inst), \
SRI_ARR(OTG_STEREO_STATUS, OTG, inst), \
SRI_ARR(OTG_V_TOTAL_MAX, OTG, inst), \
SRI_ARR(OTG_V_TOTAL_MIN, OTG, inst), \
SRI_ARR(OTG_V_TOTAL_CONTROL, OTG, inst), \
SRI_ARR(OTG_TRIGA_CNTL, OTG, inst), \
SRI_ARR(OTG_FORCE_COUNT_NOW_CNTL, OTG, inst), \
SRI_ARR(OTG_STATIC_SCREEN_CONTROL, OTG, inst), \
SRI_ARR(OTG_STATUS_FRAME_COUNT, OTG, inst), \
SRI_ARR(OTG_STATUS, OTG, inst), SRI_ARR(OTG_STATUS_POSITION, OTG, inst), \
SRI_ARR(OTG_NOM_VERT_POSITION, OTG, inst), \
SRI_ARR(OTG_M_CONST_DTO0, OTG, inst), \
SRI_ARR(OTG_M_CONST_DTO1, OTG, inst), \
SRI_ARR(OTG_CLOCK_CONTROL, OTG, inst), \
SRI_ARR(OTG_VERTICAL_INTERRUPT0_CONTROL, OTG, inst), \
SRI_ARR(OTG_VERTICAL_INTERRUPT0_POSITION, OTG, inst), \
SRI_ARR(OTG_VERTICAL_INTERRUPT1_CONTROL, OTG, inst), \
SRI_ARR(OTG_VERTICAL_INTERRUPT1_POSITION, OTG, inst), \
SRI_ARR(OTG_VERTICAL_INTERRUPT2_CONTROL, OTG, inst), \
SRI_ARR(OTG_VERTICAL_INTERRUPT2_POSITION, OTG, inst), \
SRI_ARR(OPTC_INPUT_CLOCK_CONTROL, ODM, inst), \
SRI_ARR(OPTC_DATA_SOURCE_SELECT, ODM, inst), \
SRI_ARR(OPTC_INPUT_GLOBAL_CONTROL, ODM, inst), \
SRI_ARR(CONTROL, VTG, inst), SRI_ARR(OTG_VERT_SYNC_CONTROL, OTG, inst), \
SRI_ARR(OTG_GSL_CONTROL, OTG, inst), SRI_ARR(OTG_CRC_CNTL, OTG, inst), \
SRI_ARR(OTG_CRC0_DATA_RG, OTG, inst), \
SRI_ARR(OTG_CRC0_DATA_B, OTG, inst), \
SRI_ARR(OTG_CRC0_WINDOWA_X_CONTROL, OTG, inst), \
SRI_ARR(OTG_CRC0_WINDOWA_Y_CONTROL, OTG, inst), \
SRI_ARR(OTG_CRC0_WINDOWB_X_CONTROL, OTG, inst), \
SRI_ARR(OTG_CRC0_WINDOWB_Y_CONTROL, OTG, inst), \
SR_ARR(GSL_SOURCE_SELECT, inst), \
SRI_ARR(OTG_TRIGA_MANUAL_TRIG, OTG, inst), \
SRI_ARR(OTG_GLOBAL_CONTROL1, OTG, inst), \
SRI_ARR(OTG_GLOBAL_CONTROL2, OTG, inst), \
SRI_ARR(OTG_GSL_WINDOW_X, OTG, inst), \
SRI_ARR(OTG_GSL_WINDOW_Y, OTG, inst), \
SRI_ARR(OTG_VUPDATE_KEEPOUT, OTG, inst), \
SRI_ARR(OTG_DRR_TRIGGER_WINDOW, OTG, inst), \
SRI_ARR(OTG_DRR_V_TOTAL_CHANGE, OTG, inst), \
SRI_ARR(OPTC_DATA_FORMAT_CONTROL, ODM, inst), \
SRI_ARR(OPTC_BYTES_PER_PIXEL, ODM, inst), \
SRI_ARR(OPTC_WIDTH_CONTROL, ODM, inst), \
SRI_ARR(OPTC_WIDTH_CONTROL2, ODM, inst), \
SRI_ARR(OPTC_MEMORY_CONFIG, ODM, inst), \
SRI_ARR(OTG_DRR_CONTROL, OTG, inst)
/* HUBBUB */
#define HUBBUB_REG_LIST_DCN4_01_RI(id) \
SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A), \
SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B), \
SR(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL), \
SR(DCHUBBUB_ARB_DRAM_STATE_CNTL), \
SR(DCHUBBUB_ARB_SAT_LEVEL), \
SR(DCHUBBUB_ARB_DF_REQ_OUTSTAND), \
SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \
SR(DCHUBBUB_TEST_DEBUG_INDEX), \
SR(DCHUBBUB_TEST_DEBUG_DATA), \
SR(DCHUBBUB_SOFT_RESET), \
SR(DCHUBBUB_CRC_CTRL), \
SR(DCN_VM_FB_LOCATION_BASE), \
SR(DCN_VM_FB_LOCATION_TOP), \
SR(DCN_VM_FB_OFFSET), \
SR(DCN_VM_AGP_BOT), \
SR(DCN_VM_AGP_TOP), \
SR(DCN_VM_AGP_BASE), \
SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A), \
SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A), \
SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B), \
SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B), \
SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK1_A), \
SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK1_A), \
SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK1_B), \
SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK1_B), \
SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK2_A), \
SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK2_A), \
SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK2_B), \
SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK2_B), \
SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK3_A), \
SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK3_A), \
SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK3_B), \
SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK3_B), \
SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A), \
SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B), \
SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A), \
SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B), \
SR(DCHUBBUB_ARB_FRAC_URG_BW_MALL_A), \
SR(DCHUBBUB_ARB_FRAC_URG_BW_MALL_B), \
SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A), \
SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B), \
SR(DCHUBBUB_ARB_REFCYC_PER_META_TRIP_A), \
SR(DCHUBBUB_ARB_REFCYC_PER_META_TRIP_B), \
SR(DCHUBBUB_DET0_CTRL), \
SR(DCHUBBUB_DET1_CTRL), \
SR(DCHUBBUB_DET2_CTRL), \
SR(DCHUBBUB_DET3_CTRL), \
SR(DCHUBBUB_COMPBUF_CTRL), \
SR(COMPBUF_RESERVED_SPACE), \
SR(DCHUBBUB_DEBUG_CTRL_0), \
SR(DCHUBBUB_ARB_USR_RETRAINING_CNTL), \
SR(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A), \
SR(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B), \
SR(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A), \
SR(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B), \
SR(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK1_A), \
SR(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK1_B), \
SR(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A), \
SR(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B), \
SR(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK1_A), \
SR(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK1_B), \
SR(DCN_VM_FAULT_ADDR_MSB), SR(DCN_VM_FAULT_ADDR_LSB), \
SR(DCN_VM_FAULT_CNTL), \
SR(DCN_VM_FAULT_STATUS), \
SR(SDPIF_REQUEST_RATE_LIMIT), \
SR(DCHUBBUB_CLOCK_CNTL), \
SR(DCHUBBUB_SDPIF_CFG0), \
SR(DCHUBBUB_SDPIF_CFG1), \
SR(DCHUBBUB_MEM_PWR_MODE_CTRL)
/* DCCG */
#define DCCG_REG_LIST_DCN401_RI() \
SR(DPPCLK_DTO_CTRL), DCCG_SRII(DTO_PARAM, DPPCLK, 0), \
DCCG_SRII(DTO_PARAM, DPPCLK, 1), DCCG_SRII(DTO_PARAM, DPPCLK, 2), \
DCCG_SRII(DTO_PARAM, DPPCLK, 3), DCCG_SRII(CLOCK_CNTL, HDMICHARCLK, 0), \
SR(PHYASYMCLK_CLOCK_CNTL), SR(PHYBSYMCLK_CLOCK_CNTL), \
SR(PHYCSYMCLK_CLOCK_CNTL), SR(PHYDSYMCLK_CLOCK_CNTL), \
SR(DPSTREAMCLK_CNTL), SR(HDMISTREAMCLK_CNTL), \
SR(SYMCLK32_SE_CNTL), SR(SYMCLK32_LE_CNTL), \
DCCG_SRII(PIXEL_RATE_CNTL, OTG, 0), DCCG_SRII(PIXEL_RATE_CNTL, OTG, 1), \
DCCG_SRII(PIXEL_RATE_CNTL, OTG, 2), DCCG_SRII(PIXEL_RATE_CNTL, OTG, 3), \
SR(OTG_PIXEL_RATE_DIV), SR(DTBCLK_P_CNTL), \
SR(DCCG_AUDIO_DTO_SOURCE), SR(DENTIST_DISPCLK_CNTL), \
SR(DPPCLK_CTRL), \
DCCG_SRII(MODULO, DP_DTO, 0), DCCG_SRII(MODULO, DP_DTO, 1), \
DCCG_SRII(MODULO, DP_DTO, 2), DCCG_SRII(MODULO, DP_DTO, 3), \
DCCG_SRII(PHASE, DP_DTO, 0), DCCG_SRII(PHASE, DP_DTO, 1), \
DCCG_SRII(PHASE, DP_DTO, 2), DCCG_SRII(PHASE, DP_DTO, 3), \
SR(DSCCLK0_DTO_PARAM),\
SR(DSCCLK1_DTO_PARAM),\
SR(DSCCLK2_DTO_PARAM),\
SR(DSCCLK3_DTO_PARAM),\
SR(DSCCLK_DTO_CTRL),\
SR(DCCG_GATE_DISABLE_CNTL),\
SR(DCCG_GATE_DISABLE_CNTL2),\
SR(DCCG_GATE_DISABLE_CNTL3),\
SR(DCCG_GATE_DISABLE_CNTL4),\
SR(DCCG_GATE_DISABLE_CNTL5),\
SR(DCCG_GATE_DISABLE_CNTL6)
#endif /* _DCN401_RESOURCE_H_ */

View file

@ -0,0 +1,33 @@
#
# Copyright 2017 Advanced Micro Devices, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
#
# Makefile for the 'spl' sub-component of DAL.
# It provides the scaling library interface.
SPL = dc_spl.o dc_spl_scl_filters.o dc_spl_isharp_filters.o
AMD_DAL_SPL = $(addprefix $(AMDDALPATH)/dc/spl/,$(SPL))
AMD_DISPLAY_FILES += $(AMD_DAL_SPL)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,24 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#ifndef __DC_SPL_H__
#define __DC_SPL_H__
#include "dc_spl_types.h"
#define BLACK_OFFSET_RGB_Y 0x0
#define BLACK_OFFSET_CBCR 0x8000
#ifdef __cplusplus
extern "C" {
#endif
/* SPL interfaces */
bool spl_calculate_scaler_params(struct spl_in *spl_in, struct spl_out *spl_out);
#ifdef __cplusplus
}
#endif
#endif /* __DC_SPL_H__ */

View file

@ -0,0 +1,350 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#include "dc_spl_types.h"
#include "dc_spl_isharp_filters.h"
//========================================
// Delta Gain 1DLUT
// LUT content is packed as 4-bytes into one DWORD/entry
// A_start = 0.000000
// A_end = 10.000000
// A_gain = 2.000000
// B_start = 11.000000
// B_end = 86.000000
// C_start = 40.000000
// C_end = 64.000000
//========================================
static const uint32_t filter_isharp_1D_lut_0[32] = {
0x02010000,
0x0A070503,
0x1614100D,
0x1C1B1918,
0x22211F1E,
0x27262423,
0x2A2A2928,
0x2D2D2C2B,
0x302F2F2E,
0x31313030,
0x31313131,
0x31313131,
0x30303031,
0x292D2F2F,
0x191D2125,
0x050A0F14,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
};
//========================================
// Delta Gain 1DLUT
// LUT content is packed as 4-bytes into one DWORD/entry
// A_start = 0.000000
// A_end = 10.000000
// A_gain = 0.500000
// B_start = 11.000000
// B_end = 127.000000
// C_start = 96.000000
// C_end = 127.000000
//========================================
static const uint32_t filter_isharp_1D_lut_0p5x[32] = {
0x00000000,
0x02020101,
0x06050403,
0x07070606,
0x09080808,
0x0A0A0A09,
0x0C0B0B0B,
0x0D0D0C0C,
0x0E0E0D0D,
0x0F0F0E0E,
0x100F0F0F,
0x10101010,
0x11111010,
0x11111111,
0x11111111,
0x11111111,
0x11111111,
0x11111111,
0x11111111,
0x10101111,
0x10101010,
0x0F0F0F10,
0x0E0E0F0F,
0x0D0D0E0E,
0x0C0C0D0D,
0x0B0B0B0C,
0x090A0A0A,
0x08080809,
0x06060707,
0x04050506,
0x02030304,
0x00010102,
};
//========================================
// Delta Gain 1DLUT
// LUT content is packed as 4-bytes into one DWORD/entry
// A_start = 0.000000
// A_end = 10.000000
// A_gain = 1.000000
// B_start = 11.000000
// B_end = 127.000000
// C_start = 96.000000
// C_end = 127.000000
//========================================
static const uint32_t filter_isharp_1D_lut_1p0x[32] = {
0x01000000,
0x05040302,
0x0B0A0806,
0x0E0E0D0C,
0x1211100F,
0x15141312,
0x17171615,
0x1A191918,
0x1C1B1B1A,
0x1E1D1D1C,
0x1F1F1E1E,
0x2020201F,
0x21212121,
0x22222222,
0x23232222,
0x23232323,
0x23232323,
0x22222323,
0x22222222,
0x21212121,
0x1F202020,
0x1E1E1F1F,
0x1C1D1D1E,
0x1A1B1B1C,
0x1819191A,
0x15161717,
0x12131415,
0x0F101112,
0x0C0D0E0E,
0x08090A0B,
0x04050607,
0x00010203,
};
//========================================
// Delta Gain 1DLUT
// LUT content is packed as 4-bytes into one DWORD/entry
// A_start = 0.000000
// A_end = 10.000000
// A_gain = 1.500000
// B_start = 11.000000
// B_end = 127.000000
// C_start = 96.000000
// C_end = 127.000000
//========================================
static const uint32_t filter_isharp_1D_lut_1p5x[32] = {
0x01010000,
0x07050402,
0x110F0C0A,
0x16141312,
0x1B191817,
0x1F1E1D1C,
0x23222120,
0x26262524,
0x2A292827,
0x2C2C2B2A,
0x2F2E2E2D,
0x3130302F,
0x32323131,
0x33333332,
0x34343433,
0x34343434,
0x34343434,
0x33343434,
0x32333333,
0x31313232,
0x2F303031,
0x2D2E2E2F,
0x2A2B2C2C,
0x2728292A,
0x24252626,
0x20212223,
0x1C1D1E1F,
0x1718191B,
0x12131416,
0x0C0E0F10,
0x0608090B,
0x00020305
};
//========================================
// Delta Gain 1DLUT
// LUT content is packed as 4-bytes into one DWORD/entry
// A_start = 0.000000
// A_end = 10.000000
// A_gain = 2.000000
// B_start = 11.000000
// B_end = 127.000000
// C_start = 40.000000
// C_end = 127.000000
//========================================
static const uint32_t filter_isharp_1D_lut_2p0x[32] = {
0x02010000,
0x0A070503,
0x1614100D,
0x1D1B1A18,
0x2322201F,
0x29282625,
0x2F2D2C2B,
0x33323130,
0x38373534,
0x3B3A3938,
0x3E3E3D3C,
0x4140403F,
0x43424241,
0x44444443,
0x45454545,
0x46454545,
0x45454546,
0x45454545,
0x43444444,
0x41424243,
0x3F404041,
0x3C3D3E3E,
0x38393A3B,
0x34353738,
0x30313233,
0x2B2C2D2F,
0x25262829,
0x1F202223,
0x181A1B1D,
0x10121416,
0x080B0D0E,
0x00020406,
};
// Wide scaler coefficients
//========================================================
// <using> gen_scaler_coeffs.m
// <date> 15-Dec-2021
// <coeffDescrip> 6t_64p_LanczosEd_p_1_p_10qb_
// <num_taps> 6
// <num_phases> 64
// <CoefType> LanczosEd
// <CoefQuant> S1.10
//========================================================
static const uint32_t filter_isharp_wide_6tap_64p[198] = {
0x0000, 0x0000, 0x0400, 0x0000, 0x0000, 0x0000,
0x0003, 0x0FF3, 0x0400, 0x000D, 0x0FFD, 0x0000,
0x0006, 0x0FE7, 0x03FE, 0x001C, 0x0FF9, 0x0000,
0x0009, 0x0FDB, 0x03FC, 0x002B, 0x0FF5, 0x0000,
0x000C, 0x0FD0, 0x03F9, 0x003A, 0x0FF1, 0x0000,
0x000E, 0x0FC5, 0x03F5, 0x004A, 0x0FED, 0x0001,
0x0011, 0x0FBB, 0x03F0, 0x005A, 0x0FE9, 0x0001,
0x0013, 0x0FB2, 0x03EB, 0x006A, 0x0FE5, 0x0001,
0x0015, 0x0FA9, 0x03E4, 0x007B, 0x0FE1, 0x0002,
0x0017, 0x0FA1, 0x03DD, 0x008D, 0x0FDC, 0x0002,
0x0018, 0x0F99, 0x03D4, 0x00A0, 0x0FD8, 0x0003,
0x001A, 0x0F92, 0x03CB, 0x00B2, 0x0FD3, 0x0004,
0x001B, 0x0F8C, 0x03C1, 0x00C6, 0x0FCE, 0x0004,
0x001C, 0x0F86, 0x03B7, 0x00D9, 0x0FC9, 0x0005,
0x001D, 0x0F80, 0x03AB, 0x00EE, 0x0FC4, 0x0006,
0x001E, 0x0F7C, 0x039F, 0x0101, 0x0FBF, 0x0007,
0x001F, 0x0F78, 0x0392, 0x0115, 0x0FBA, 0x0008,
0x001F, 0x0F74, 0x0385, 0x012B, 0x0FB5, 0x0008,
0x0020, 0x0F71, 0x0376, 0x0140, 0x0FB0, 0x0009,
0x0020, 0x0F6E, 0x0367, 0x0155, 0x0FAB, 0x000B,
0x0020, 0x0F6C, 0x0357, 0x016B, 0x0FA6, 0x000C,
0x0020, 0x0F6A, 0x0347, 0x0180, 0x0FA2, 0x000D,
0x0020, 0x0F69, 0x0336, 0x0196, 0x0F9D, 0x000E,
0x0020, 0x0F69, 0x0325, 0x01AB, 0x0F98, 0x000F,
0x001F, 0x0F68, 0x0313, 0x01C3, 0x0F93, 0x0010,
0x001F, 0x0F69, 0x0300, 0x01D8, 0x0F8F, 0x0011,
0x001E, 0x0F69, 0x02ED, 0x01EF, 0x0F8B, 0x0012,
0x001D, 0x0F6A, 0x02D9, 0x0205, 0x0F87, 0x0014,
0x001D, 0x0F6C, 0x02C5, 0x021A, 0x0F83, 0x0015,
0x001C, 0x0F6E, 0x02B1, 0x0230, 0x0F7F, 0x0016,
0x001B, 0x0F70, 0x029C, 0x0247, 0x0F7B, 0x0017,
0x001A, 0x0F72, 0x0287, 0x025D, 0x0F78, 0x0018,
0x0019, 0x0F75, 0x0272, 0x0272, 0x0F75, 0x0019
};
// Blur and scale coefficients
//========================================================
// <using> gen_BlurScale_coeffs.m
// <date> 25-Apr-2022
// <num_taps> 4
// <num_phases> 64
// <CoefType> Blur & Scale LPF
// <CoefQuant> S1.10
//========================================================
static const uint32_t filter_isharp_bs_4tap_64p[198] = {
0x0000, 0x00E5, 0x0237, 0x00E4, 0x0000, 0x0000,
0x0000, 0x00DE, 0x0237, 0x00EB, 0x0000, 0x0000,
0x0000, 0x00D7, 0x0236, 0x00F2, 0x0001, 0x0000,
0x0000, 0x00D0, 0x0235, 0x00FA, 0x0001, 0x0000,
0x0000, 0x00C9, 0x0234, 0x0101, 0x0002, 0x0000,
0x0000, 0x00C2, 0x0233, 0x0108, 0x0003, 0x0000,
0x0000, 0x00BB, 0x0232, 0x0110, 0x0003, 0x0000,
0x0000, 0x00B5, 0x0230, 0x0117, 0x0004, 0x0000,
0x0000, 0x00AE, 0x022E, 0x011F, 0x0005, 0x0000,
0x0000, 0x00A8, 0x022C, 0x0126, 0x0006, 0x0000,
0x0000, 0x00A2, 0x022A, 0x012D, 0x0007, 0x0000,
0x0000, 0x009C, 0x0228, 0x0134, 0x0008, 0x0000,
0x0000, 0x0096, 0x0225, 0x013C, 0x0009, 0x0000,
0x0000, 0x0090, 0x0222, 0x0143, 0x000B, 0x0000,
0x0000, 0x008A, 0x021F, 0x014B, 0x000C, 0x0000,
0x0000, 0x0085, 0x021C, 0x0151, 0x000E, 0x0000,
0x0000, 0x007F, 0x0218, 0x015A, 0x000F, 0x0000,
0x0000, 0x007A, 0x0215, 0x0160, 0x0011, 0x0000,
0x0000, 0x0074, 0x0211, 0x0168, 0x0013, 0x0000,
0x0000, 0x006F, 0x020D, 0x016F, 0x0015, 0x0000,
0x0000, 0x006A, 0x0209, 0x0176, 0x0017, 0x0000,
0x0000, 0x0065, 0x0204, 0x017E, 0x0019, 0x0000,
0x0000, 0x0060, 0x0200, 0x0185, 0x001B, 0x0000,
0x0000, 0x005C, 0x01FB, 0x018C, 0x001D, 0x0000,
0x0000, 0x0057, 0x01F6, 0x0193, 0x0020, 0x0000,
0x0000, 0x0053, 0x01F1, 0x019A, 0x0022, 0x0000,
0x0000, 0x004E, 0x01EC, 0x01A1, 0x0025, 0x0000,
0x0000, 0x004A, 0x01E6, 0x01A8, 0x0028, 0x0000,
0x0000, 0x0046, 0x01E1, 0x01AF, 0x002A, 0x0000,
0x0000, 0x0042, 0x01DB, 0x01B6, 0x002D, 0x0000,
0x0000, 0x003F, 0x01D5, 0x01BB, 0x0031, 0x0000,
0x0000, 0x003B, 0x01CF, 0x01C2, 0x0034, 0x0000,
0x0000, 0x0037, 0x01C9, 0x01C9, 0x0037, 0x0000
};
const uint32_t *spl_get_filter_isharp_1D_lut_0(void)
{
return filter_isharp_1D_lut_0;
}
const uint32_t *spl_get_filter_isharp_1D_lut_0p5x(void)
{
return filter_isharp_1D_lut_0p5x;
}
const uint32_t *spl_get_filter_isharp_1D_lut_1p0x(void)
{
return filter_isharp_1D_lut_1p0x;
}
const uint32_t *spl_get_filter_isharp_1D_lut_1p5x(void)
{
return filter_isharp_1D_lut_1p5x;
}
const uint32_t *spl_get_filter_isharp_1D_lut_2p0x(void)
{
return filter_isharp_1D_lut_2p0x;
}
const uint32_t *spl_get_filter_isharp_wide_6tap_64p(void)
{
return filter_isharp_wide_6tap_64p;
}
const uint32_t *spl_get_filter_isharp_bs_4tap_64p(void)
{
return filter_isharp_bs_4tap_64p;
}

View file

@ -0,0 +1,17 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#ifndef __DC_SPL_ISHARP_FILTERS_H__
#define __DC_SPL_ISHARP_FILTERS_H__
#include "dc_spl_types.h"
const uint32_t *spl_get_filter_isharp_1D_lut_0(void);
const uint32_t *spl_get_filter_isharp_1D_lut_0p5x(void);
const uint32_t *spl_get_filter_isharp_1D_lut_1p0x(void);
const uint32_t *spl_get_filter_isharp_1D_lut_1p5x(void);
const uint32_t *spl_get_filter_isharp_1D_lut_2p0x(void);
const uint32_t *spl_get_filter_isharp_bs_4tap_64p(void);
const uint32_t *spl_get_filter_isharp_wide_6tap_64p(void);
#endif /* __DC_SPL_ISHARP_FILTERS_H__ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,59 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#ifndef __DC_SPL_SCL_FILTERS_H__
#define __DC_SPL_SCL_FILTERS_H__
#include "dc_spl_types.h"
const uint16_t *spl_get_filter_3tap_16p(struct fixed31_32 ratio);
const uint16_t *spl_get_filter_3tap_64p(struct fixed31_32 ratio);
const uint16_t *spl_get_filter_4tap_16p(struct fixed31_32 ratio);
const uint16_t *spl_get_filter_4tap_64p(struct fixed31_32 ratio);
const uint16_t *spl_get_filter_5tap_64p(struct fixed31_32 ratio);
const uint16_t *spl_get_filter_6tap_64p(struct fixed31_32 ratio);
const uint16_t *spl_get_filter_7tap_64p(struct fixed31_32 ratio);
const uint16_t *spl_get_filter_8tap_64p(struct fixed31_32 ratio);
const uint16_t *spl_get_filter_2tap_16p(void);
const uint16_t *spl_get_filter_2tap_64p(void);
const uint16_t *spl_get_filter_3tap_16p_upscale(void);
const uint16_t *spl_get_filter_3tap_16p_116(void);
const uint16_t *spl_get_filter_3tap_16p_149(void);
const uint16_t *spl_get_filter_3tap_16p_183(void);
const uint16_t *spl_get_filter_4tap_16p_upscale(void);
const uint16_t *spl_get_filter_4tap_16p_116(void);
const uint16_t *spl_get_filter_4tap_16p_149(void);
const uint16_t *spl_get_filter_4tap_16p_183(void);
const uint16_t *spl_get_filter_3tap_64p_upscale(void);
const uint16_t *spl_get_filter_3tap_64p_116(void);
const uint16_t *spl_get_filter_3tap_64p_149(void);
const uint16_t *spl_get_filter_3tap_64p_183(void);
const uint16_t *spl_get_filter_4tap_64p_upscale(void);
const uint16_t *spl_get_filter_4tap_64p_116(void);
const uint16_t *spl_get_filter_4tap_64p_149(void);
const uint16_t *spl_get_filter_4tap_64p_183(void);
const uint16_t *spl_get_filter_5tap_64p_upscale(void);
const uint16_t *spl_get_filter_5tap_64p_116(void);
const uint16_t *spl_get_filter_5tap_64p_149(void);
const uint16_t *spl_get_filter_5tap_64p_183(void);
const uint16_t *spl_get_filter_6tap_64p_upscale(void);
const uint16_t *spl_get_filter_6tap_64p_116(void);
const uint16_t *spl_get_filter_6tap_64p_149(void);
const uint16_t *spl_get_filter_6tap_64p_183(void);
const uint16_t *spl_get_filter_7tap_64p_upscale(void);
const uint16_t *spl_get_filter_7tap_64p_116(void);
const uint16_t *spl_get_filter_7tap_64p_149(void);
const uint16_t *spl_get_filter_7tap_64p_183(void);
const uint16_t *spl_get_filter_8tap_64p_upscale(void);
const uint16_t *spl_get_filter_8tap_64p_116(void);
const uint16_t *spl_get_filter_8tap_64p_149(void);
const uint16_t *spl_get_filter_8tap_64p_183(void);
#endif /* __DC_SPL_SCL_FILTERS_H__ */

Some files were not shown because too many files have changed in this diff Show more