drm/amdgpu: keep copy of VRAM lost counter in job
Instead of reading the current counter from fpriv. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Nicolai Hähnle <nicolai.haehnle@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
396bcb41e0
commit
14e47f93c5
3 changed files with 10 additions and 6 deletions
|
@ -1125,6 +1125,7 @@ struct amdgpu_job {
|
||||||
uint32_t gds_base, gds_size;
|
uint32_t gds_base, gds_size;
|
||||||
uint32_t gws_base, gws_size;
|
uint32_t gws_base, gws_size;
|
||||||
uint32_t oa_base, oa_size;
|
uint32_t oa_base, oa_size;
|
||||||
|
uint32_t vram_lost_counter;
|
||||||
|
|
||||||
/* user fence handling */
|
/* user fence handling */
|
||||||
uint64_t uf_addr;
|
uint64_t uf_addr;
|
||||||
|
|
|
@ -172,6 +172,8 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
|
||||||
if (ret)
|
if (ret)
|
||||||
goto free_all_kdata;
|
goto free_all_kdata;
|
||||||
|
|
||||||
|
p->job->vram_lost_counter = fpriv->vram_lost_counter;
|
||||||
|
|
||||||
if (p->uf_entry.robj)
|
if (p->uf_entry.robj)
|
||||||
p->job->uf_addr = uf_offset;
|
p->job->uf_addr = uf_offset;
|
||||||
kfree(chunk_array);
|
kfree(chunk_array);
|
||||||
|
|
|
@ -61,6 +61,7 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
|
||||||
(*job)->vm = vm;
|
(*job)->vm = vm;
|
||||||
(*job)->ibs = (void *)&(*job)[1];
|
(*job)->ibs = (void *)&(*job)[1];
|
||||||
(*job)->num_ibs = num_ibs;
|
(*job)->num_ibs = num_ibs;
|
||||||
|
(*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
|
||||||
|
|
||||||
amdgpu_sync_create(&(*job)->sync);
|
amdgpu_sync_create(&(*job)->sync);
|
||||||
amdgpu_sync_create(&(*job)->dep_sync);
|
amdgpu_sync_create(&(*job)->dep_sync);
|
||||||
|
@ -180,8 +181,8 @@ static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
|
||||||
static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
|
static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
|
||||||
{
|
{
|
||||||
struct dma_fence *fence = NULL;
|
struct dma_fence *fence = NULL;
|
||||||
|
struct amdgpu_device *adev;
|
||||||
struct amdgpu_job *job;
|
struct amdgpu_job *job;
|
||||||
struct amdgpu_fpriv *fpriv = NULL;
|
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
if (!sched_job) {
|
if (!sched_job) {
|
||||||
|
@ -189,17 +190,17 @@ static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
job = to_amdgpu_job(sched_job);
|
job = to_amdgpu_job(sched_job);
|
||||||
|
adev = job->adev;
|
||||||
|
|
||||||
BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
|
BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
|
||||||
|
|
||||||
trace_amdgpu_sched_run_job(job);
|
trace_amdgpu_sched_run_job(job);
|
||||||
if (job->vm)
|
|
||||||
fpriv = container_of(job->vm, struct amdgpu_fpriv, vm);
|
|
||||||
/* skip ib schedule when vram is lost */
|
/* skip ib schedule when vram is lost */
|
||||||
if (fpriv && amdgpu_kms_vram_lost(job->adev, fpriv))
|
if (job->vram_lost_counter != atomic_read(&adev->vram_lost_counter)) {
|
||||||
DRM_ERROR("Skip scheduling IBs!\n");
|
DRM_ERROR("Skip scheduling IBs!\n");
|
||||||
else {
|
} else {
|
||||||
r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job, &fence);
|
r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job,
|
||||||
|
&fence);
|
||||||
if (r)
|
if (r)
|
||||||
DRM_ERROR("Error scheduling IBs (%d)\n", r);
|
DRM_ERROR("Error scheduling IBs (%d)\n", r);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue