drm/amdgpu/gmc: set a default disable value for AGP
To disable AGP, the start needs to be set to a higher value than the end. Set a default disable value for the AGP aperture and allow the IP specific GMC code to enable it selectively be calling amdgpu_gmc_agp_location(). Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
29495d8145
commit
de59b69932
10 changed files with 37 additions and 18 deletions
|
@ -316,14 +316,6 @@ void amdgpu_gmc_agp_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
|
||||||
const uint64_t sixteen_gb_mask = ~(sixteen_gb - 1);
|
const uint64_t sixteen_gb_mask = ~(sixteen_gb - 1);
|
||||||
u64 size_af, size_bf;
|
u64 size_af, size_bf;
|
||||||
|
|
||||||
if (amdgpu_sriov_vf(adev)) {
|
|
||||||
mc->agp_start = 0xffffffffffff;
|
|
||||||
mc->agp_end = 0x0;
|
|
||||||
mc->agp_size = 0;
|
|
||||||
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (mc->fb_start > mc->gart_start) {
|
if (mc->fb_start > mc->gart_start) {
|
||||||
size_bf = (mc->fb_start & sixteen_gb_mask) -
|
size_bf = (mc->fb_start & sixteen_gb_mask) -
|
||||||
ALIGN(mc->gart_end + 1, sixteen_gb);
|
ALIGN(mc->gart_end + 1, sixteen_gb);
|
||||||
|
@ -347,6 +339,25 @@ void amdgpu_gmc_agp_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
|
||||||
mc->agp_size >> 20, mc->agp_start, mc->agp_end);
|
mc->agp_size >> 20, mc->agp_start, mc->agp_end);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* amdgpu_gmc_set_agp_default - Set the default AGP aperture value.
|
||||||
|
* @adev: amdgpu device structure holding all necessary information
|
||||||
|
* @mc: memory controller structure holding memory information
|
||||||
|
*
|
||||||
|
* To disable the AGP aperture, you need to set the start to a larger
|
||||||
|
* value than the end. This function sets the default value which
|
||||||
|
* can then be overridden using amdgpu_gmc_agp_location() if you want
|
||||||
|
* to enable the AGP aperture on a specific chip.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
void amdgpu_gmc_set_agp_default(struct amdgpu_device *adev,
|
||||||
|
struct amdgpu_gmc *mc)
|
||||||
|
{
|
||||||
|
mc->agp_start = 0xffffffffffff;
|
||||||
|
mc->agp_end = 0;
|
||||||
|
mc->agp_size = 0;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_gmc_fault_key - get hask key from vm fault address and pasid
|
* amdgpu_gmc_fault_key - get hask key from vm fault address and pasid
|
||||||
*
|
*
|
||||||
|
|
|
@ -394,6 +394,8 @@ void amdgpu_gmc_gart_location(struct amdgpu_device *adev,
|
||||||
struct amdgpu_gmc *mc);
|
struct amdgpu_gmc *mc);
|
||||||
void amdgpu_gmc_agp_location(struct amdgpu_device *adev,
|
void amdgpu_gmc_agp_location(struct amdgpu_device *adev,
|
||||||
struct amdgpu_gmc *mc);
|
struct amdgpu_gmc *mc);
|
||||||
|
void amdgpu_gmc_set_agp_default(struct amdgpu_device *adev,
|
||||||
|
struct amdgpu_gmc *mc);
|
||||||
bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev,
|
bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev,
|
||||||
struct amdgpu_ih_ring *ih, uint64_t addr,
|
struct amdgpu_ih_ring *ih, uint64_t addr,
|
||||||
uint16_t pasid, uint64_t timestamp);
|
uint16_t pasid, uint64_t timestamp);
|
||||||
|
|
|
@ -1062,6 +1062,9 @@ static const char * const amdgpu_vram_names[] = {
|
||||||
*/
|
*/
|
||||||
int amdgpu_bo_init(struct amdgpu_device *adev)
|
int amdgpu_bo_init(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
|
/* set the default AGP aperture state */
|
||||||
|
amdgpu_gmc_set_agp_default(adev, &adev->gmc);
|
||||||
|
|
||||||
/* On A+A platform, VRAM can be mapped as WB */
|
/* On A+A platform, VRAM can be mapped as WB */
|
||||||
if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
|
if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
|
||||||
/* reserve PAT memory space to WC for VRAM */
|
/* reserve PAT memory space to WC for VRAM */
|
||||||
|
|
|
@ -671,6 +671,7 @@ static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
|
||||||
|
|
||||||
amdgpu_gmc_vram_location(adev, &adev->gmc, base);
|
amdgpu_gmc_vram_location(adev, &adev->gmc, base);
|
||||||
amdgpu_gmc_gart_location(adev, mc);
|
amdgpu_gmc_gart_location(adev, mc);
|
||||||
|
if (!amdgpu_sriov_vf(adev))
|
||||||
amdgpu_gmc_agp_location(adev, mc);
|
amdgpu_gmc_agp_location(adev, mc);
|
||||||
|
|
||||||
/* base offset of vram pages */
|
/* base offset of vram pages */
|
||||||
|
|
|
@ -635,6 +635,7 @@ static void gmc_v11_0_vram_gtt_location(struct amdgpu_device *adev,
|
||||||
|
|
||||||
amdgpu_gmc_vram_location(adev, &adev->gmc, base);
|
amdgpu_gmc_vram_location(adev, &adev->gmc, base);
|
||||||
amdgpu_gmc_gart_location(adev, mc);
|
amdgpu_gmc_gart_location(adev, mc);
|
||||||
|
if (!amdgpu_sriov_vf(adev))
|
||||||
amdgpu_gmc_agp_location(adev, mc);
|
amdgpu_gmc_agp_location(adev, mc);
|
||||||
|
|
||||||
/* base offset of vram pages */
|
/* base offset of vram pages */
|
||||||
|
|
|
@ -253,8 +253,8 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
|
||||||
WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
|
WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
|
||||||
adev->mem_scratch.gpu_addr >> 12);
|
adev->mem_scratch.gpu_addr >> 12);
|
||||||
WREG32(mmMC_VM_AGP_BASE, 0);
|
WREG32(mmMC_VM_AGP_BASE, 0);
|
||||||
WREG32(mmMC_VM_AGP_TOP, 0);
|
WREG32(mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 22);
|
||||||
WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
|
WREG32(mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 22);
|
||||||
|
|
||||||
if (gmc_v6_0_wait_for_idle((void *)adev))
|
if (gmc_v6_0_wait_for_idle((void *)adev))
|
||||||
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
|
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
|
||||||
|
|
|
@ -288,8 +288,8 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
|
||||||
WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
|
WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
|
||||||
adev->mem_scratch.gpu_addr >> 12);
|
adev->mem_scratch.gpu_addr >> 12);
|
||||||
WREG32(mmMC_VM_AGP_BASE, 0);
|
WREG32(mmMC_VM_AGP_BASE, 0);
|
||||||
WREG32(mmMC_VM_AGP_TOP, 0);
|
WREG32(mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 22);
|
||||||
WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
|
WREG32(mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 22);
|
||||||
if (gmc_v7_0_wait_for_idle((void *)adev))
|
if (gmc_v7_0_wait_for_idle((void *)adev))
|
||||||
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
|
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
|
||||||
|
|
||||||
|
|
|
@ -473,8 +473,8 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
|
||||||
}
|
}
|
||||||
|
|
||||||
WREG32(mmMC_VM_AGP_BASE, 0);
|
WREG32(mmMC_VM_AGP_BASE, 0);
|
||||||
WREG32(mmMC_VM_AGP_TOP, 0);
|
WREG32(mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 22);
|
||||||
WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
|
WREG32(mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 22);
|
||||||
if (gmc_v8_0_wait_for_idle((void *)adev))
|
if (gmc_v8_0_wait_for_idle((void *)adev))
|
||||||
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
|
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
|
||||||
|
|
||||||
|
|
|
@ -1612,6 +1612,7 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
|
||||||
} else {
|
} else {
|
||||||
amdgpu_gmc_vram_location(adev, mc, base);
|
amdgpu_gmc_vram_location(adev, mc, base);
|
||||||
amdgpu_gmc_gart_location(adev, mc);
|
amdgpu_gmc_gart_location(adev, mc);
|
||||||
|
if (!amdgpu_sriov_vf(adev))
|
||||||
amdgpu_gmc_agp_location(adev, mc);
|
amdgpu_gmc_agp_location(adev, mc);
|
||||||
}
|
}
|
||||||
/* base offset of vram pages */
|
/* base offset of vram pages */
|
||||||
|
|
|
@ -1256,7 +1256,7 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
|
||||||
agp_top = adev->gmc.agp_end >> 24;
|
agp_top = adev->gmc.agp_end >> 24;
|
||||||
|
|
||||||
/* AGP aperture is disabled */
|
/* AGP aperture is disabled */
|
||||||
if (agp_bot == agp_top) {
|
if (agp_bot > agp_top) {
|
||||||
logical_addr_low = adev->gmc.fb_start >> 18;
|
logical_addr_low = adev->gmc.fb_start >> 18;
|
||||||
if (adev->apu_flags & AMD_APU_IS_RAVEN2)
|
if (adev->apu_flags & AMD_APU_IS_RAVEN2)
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Add table
Reference in a new issue