1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00
linux/drivers/gpu/drm/scheduler/sched_entity.c
Vitaly Prosyak f34e8bb7d6 drm/sched: fix null-ptr-deref in init entity
The bug can be triggered by sending an amdgpu_cs_wait_ioctl
to the AMDGPU DRM driver on any ASICs with valid context.
The bug was reported by Joonkyo Jung <joonkyoj@yonsei.ac.kr>.
For example the following code:

    static void Syzkaller2(int fd)
    {
	union drm_amdgpu_ctx arg1;
	union drm_amdgpu_wait_cs arg2;

	arg1.in.op = AMDGPU_CTX_OP_ALLOC_CTX;
	ret = drmIoctl(fd, 0x140106442 /* amdgpu_ctx_ioctl */, &arg1);

	arg2.in.handle = 0x0;
	arg2.in.timeout = 0x2000000000000;
	arg2.in.ip_type = AMD_IP_VPE /* 0x9 */;
	arg2->in.ip_instance = 0x0;
	arg2.in.ring = 0x0;
	arg2.in.ctx_id = arg1.out.alloc.ctx_id;

	drmIoctl(fd, 0xc0206449 /* AMDGPU_WAIT_CS * /, &arg2);
    }

The ioctl AMDGPU_WAIT_CS without previously submitted job could be assumed that
the error should be returned, but the following commit 1decbf6bb0
modified the logic and allowed to have sched_rq equal to NULL.

As a result when there is no job the ioctl AMDGPU_WAIT_CS returns success.
The change fixes null-ptr-deref in init entity and the stack below demonstrates
the error condition:

[  +0.000007] BUG: kernel NULL pointer dereference, address: 0000000000000028
[  +0.007086] #PF: supervisor read access in kernel mode
[  +0.005234] #PF: error_code(0x0000) - not-present page
[  +0.005232] PGD 0 P4D 0
[  +0.002501] Oops: 0000 [#1] PREEMPT SMP KASAN NOPTI
[  +0.005034] CPU: 10 PID: 9229 Comm: amd_basic Tainted: G    B   W    L     6.7.0+ #4
[  +0.007797] Hardware name: ASUS System Product Name/ROG STRIX B550-F GAMING (WI-FI), BIOS 1401 12/03/2020
[  +0.009798] RIP: 0010:drm_sched_entity_init+0x2d3/0x420 [gpu_sched]
[  +0.006426] Code: 80 00 00 00 00 00 00 00 e8 1a 81 82 e0 49 89 9c 24 c0 00 00 00 4c 89 ef e8 4a 80 82 e0 49 8b 5d 00 48 8d 7b 28 e8 3d 80 82 e0 <48> 83 7b 28 00 0f 84 28 01 00 00 4d 8d ac 24 98 00 00 00 49 8d 5c
[  +0.019094] RSP: 0018:ffffc90014c1fa40 EFLAGS: 00010282
[  +0.005237] RAX: 0000000000000001 RBX: 0000000000000000 RCX: ffffffff8113f3fa
[  +0.007326] RDX: fffffbfff0a7889d RSI: 0000000000000008 RDI: ffffffff853c44e0
[  +0.007264] RBP: ffffc90014c1fa80 R08: 0000000000000001 R09: fffffbfff0a7889c
[  +0.007266] R10: ffffffff853c44e7 R11: 0000000000000001 R12: ffff8881a719b010
[  +0.007263] R13: ffff88810d412748 R14: 0000000000000002 R15: 0000000000000000
[  +0.007264] FS:  00007ffff7045540(0000) GS:ffff8883cc900000(0000) knlGS:0000000000000000
[  +0.008236] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[  +0.005851] CR2: 0000000000000028 CR3: 000000011912e000 CR4: 0000000000350ef0
[  +0.007175] Call Trace:
[  +0.002561]  <TASK>
[  +0.002141]  ? show_regs+0x6a/0x80
[  +0.003473]  ? __die+0x25/0x70
[  +0.003124]  ? page_fault_oops+0x214/0x720
[  +0.004179]  ? preempt_count_sub+0x18/0xc0
[  +0.004093]  ? __pfx_page_fault_oops+0x10/0x10
[  +0.004590]  ? srso_return_thunk+0x5/0x5f
[  +0.004000]  ? vprintk_default+0x1d/0x30
[  +0.004063]  ? srso_return_thunk+0x5/0x5f
[  +0.004087]  ? vprintk+0x5c/0x90
[  +0.003296]  ? drm_sched_entity_init+0x2d3/0x420 [gpu_sched]
[  +0.005807]  ? srso_return_thunk+0x5/0x5f
[  +0.004090]  ? _printk+0xb3/0xe0
[  +0.003293]  ? __pfx__printk+0x10/0x10
[  +0.003735]  ? asm_sysvec_apic_timer_interrupt+0x1b/0x20
[  +0.005482]  ? do_user_addr_fault+0x345/0x770
[  +0.004361]  ? exc_page_fault+0x64/0xf0
[  +0.003972]  ? asm_exc_page_fault+0x27/0x30
[  +0.004271]  ? add_taint+0x2a/0xa0
[  +0.003476]  ? drm_sched_entity_init+0x2d3/0x420 [gpu_sched]
[  +0.005812]  amdgpu_ctx_get_entity+0x3f9/0x770 [amdgpu]
[  +0.009530]  ? finish_task_switch.isra.0+0x129/0x470
[  +0.005068]  ? __pfx_amdgpu_ctx_get_entity+0x10/0x10 [amdgpu]
[  +0.010063]  ? __kasan_check_write+0x14/0x20
[  +0.004356]  ? srso_return_thunk+0x5/0x5f
[  +0.004001]  ? mutex_unlock+0x81/0xd0
[  +0.003802]  ? srso_return_thunk+0x5/0x5f
[  +0.004096]  amdgpu_cs_wait_ioctl+0xf6/0x270 [amdgpu]
[  +0.009355]  ? __pfx_amdgpu_cs_wait_ioctl+0x10/0x10 [amdgpu]
[  +0.009981]  ? srso_return_thunk+0x5/0x5f
[  +0.004089]  ? srso_return_thunk+0x5/0x5f
[  +0.004090]  ? __srcu_read_lock+0x20/0x50
[  +0.004096]  drm_ioctl_kernel+0x140/0x1f0 [drm]
[  +0.005080]  ? __pfx_amdgpu_cs_wait_ioctl+0x10/0x10 [amdgpu]
[  +0.009974]  ? __pfx_drm_ioctl_kernel+0x10/0x10 [drm]
[  +0.005618]  ? srso_return_thunk+0x5/0x5f
[  +0.004088]  ? __kasan_check_write+0x14/0x20
[  +0.004357]  drm_ioctl+0x3da/0x730 [drm]
[  +0.004461]  ? __pfx_amdgpu_cs_wait_ioctl+0x10/0x10 [amdgpu]
[  +0.009979]  ? __pfx_drm_ioctl+0x10/0x10 [drm]
[  +0.004993]  ? srso_return_thunk+0x5/0x5f
[  +0.004090]  ? __kasan_check_write+0x14/0x20
[  +0.004356]  ? srso_return_thunk+0x5/0x5f
[  +0.004090]  ? _raw_spin_lock_irqsave+0x99/0x100
[  +0.004712]  ? __pfx__raw_spin_lock_irqsave+0x10/0x10
[  +0.005063]  ? __pfx_arch_do_signal_or_restart+0x10/0x10
[  +0.005477]  ? srso_return_thunk+0x5/0x5f
[  +0.004000]  ? preempt_count_sub+0x18/0xc0
[  +0.004237]  ? srso_return_thunk+0x5/0x5f
[  +0.004090]  ? _raw_spin_unlock_irqrestore+0x27/0x50
[  +0.005069]  amdgpu_drm_ioctl+0x7e/0xe0 [amdgpu]
[  +0.008912]  __x64_sys_ioctl+0xcd/0x110
[  +0.003918]  do_syscall_64+0x5f/0xe0
[  +0.003649]  ? noist_exc_debug+0xe6/0x120
[  +0.004095]  entry_SYSCALL_64_after_hwframe+0x6e/0x76
[  +0.005150] RIP: 0033:0x7ffff7b1a94f
[  +0.003647] Code: 00 48 89 44 24 18 31 c0 48 8d 44 24 60 c7 04 24 10 00 00 00 48 89 44 24 08 48 8d 44 24 20 48 89 44 24 10 b8 10 00 00 00 0f 05 <41> 89 c0 3d 00 f0 ff ff 77 1f 48 8b 44 24 18 64 48 2b 04 25 28 00
[  +0.019097] RSP: 002b:00007fffffffe0a0 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
[  +0.007708] RAX: ffffffffffffffda RBX: 000055555558b360 RCX: 00007ffff7b1a94f
[  +0.007176] RDX: 000055555558b360 RSI: 00000000c0206449 RDI: 0000000000000003
[  +0.007326] RBP: 00000000c0206449 R08: 000055555556ded0 R09: 000000007fffffff
[  +0.007176] R10: 0000000000000000 R11: 0000000000000246 R12: 00007fffffffe5d8
[  +0.007238] R13: 0000000000000003 R14: 000055555555cba8 R15: 00007ffff7ffd040
[  +0.007250]  </TASK>

v2: Reworked check to guard against null ptr deref and added helpful comments
    (Christian)

Cc: Christian Koenig <christian.koenig@amd.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: Luben Tuikov <ltuikov89@gmail.com>
Cc: Bas Nieuwenhuizen <bas@basnieuwenhuizen.nl>
Cc: Joonkyo Jung <joonkyoj@yonsei.ac.kr>
Cc: Dokyung Song <dokyungs@yonsei.ac.kr>
Cc: <jisoo.jang@yonsei.ac.kr>
Cc: <yw9865@yonsei.ac.kr>
Signed-off-by: Vitaly Prosyak <vitaly.prosyak@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Fixes: 56e449603f ("drm/sched: Convert the GPU scheduler to variable number of run-queues")
Link: https://patchwork.freedesktop.org/patch/msgid/20240315023926.343164-1-vitaly.prosyak@amd.com
Signed-off-by: Christian König <christian.koenig@amd.com>
2024-03-15 15:29:26 +01:00

618 lines
18 KiB
C

/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/kthread.h>
#include <linux/slab.h>
#include <linux/completion.h>
#include <drm/drm_print.h>
#include <drm/gpu_scheduler.h>
#include "gpu_scheduler_trace.h"
#define to_drm_sched_job(sched_job) \
container_of((sched_job), struct drm_sched_job, queue_node)
/**
* drm_sched_entity_init - Init a context entity used by scheduler when
* submit to HW ring.
*
* @entity: scheduler entity to init
* @priority: priority of the entity
* @sched_list: the list of drm scheds on which jobs from this
* entity can be submitted
* @num_sched_list: number of drm sched in sched_list
* @guilty: atomic_t set to 1 when a job on this queue
* is found to be guilty causing a timeout
*
* Note that the &sched_list must have at least one element to schedule the entity.
*
* For changing @priority later on at runtime see
* drm_sched_entity_set_priority(). For changing the set of schedulers
* @sched_list at runtime see drm_sched_entity_modify_sched().
*
* An entity is cleaned up by callind drm_sched_entity_fini(). See also
* drm_sched_entity_destroy().
*
* Returns 0 on success or a negative error code on failure.
*/
int drm_sched_entity_init(struct drm_sched_entity *entity,
enum drm_sched_priority priority,
struct drm_gpu_scheduler **sched_list,
unsigned int num_sched_list,
atomic_t *guilty)
{
if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0])))
return -EINVAL;
memset(entity, 0, sizeof(struct drm_sched_entity));
INIT_LIST_HEAD(&entity->list);
entity->rq = NULL;
entity->guilty = guilty;
entity->num_sched_list = num_sched_list;
entity->priority = priority;
/*
* It's perfectly valid to initialize an entity without having a valid
* scheduler attached. It's just not valid to use the scheduler before it
* is initialized itself.
*/
entity->sched_list = num_sched_list > 1 ? sched_list : NULL;
RCU_INIT_POINTER(entity->last_scheduled, NULL);
RB_CLEAR_NODE(&entity->rb_tree_node);
if (num_sched_list && !sched_list[0]->sched_rq) {
/* Since every entry covered by num_sched_list
* should be non-NULL and therefore we warn drivers
* not to do this and to fix their DRM calling order.
*/
pr_warn("%s: called with uninitialized scheduler\n", __func__);
} else if (num_sched_list) {
/* The "priority" of an entity cannot exceed the number of run-queues of a
* scheduler. Protect against num_rqs being 0, by converting to signed. Choose
* the lowest priority available.
*/
if (entity->priority >= sched_list[0]->num_rqs) {
drm_err(sched_list[0], "entity with out-of-bounds priority:%u num_rqs:%u\n",
entity->priority, sched_list[0]->num_rqs);
entity->priority = max_t(s32, (s32) sched_list[0]->num_rqs - 1,
(s32) DRM_SCHED_PRIORITY_KERNEL);
}
entity->rq = sched_list[0]->sched_rq[entity->priority];
}
init_completion(&entity->entity_idle);
/* We start in an idle state. */
complete_all(&entity->entity_idle);
spin_lock_init(&entity->rq_lock);
spsc_queue_init(&entity->job_queue);
atomic_set(&entity->fence_seq, 0);
entity->fence_context = dma_fence_context_alloc(2);
return 0;
}
EXPORT_SYMBOL(drm_sched_entity_init);
/**
* drm_sched_entity_modify_sched - Modify sched of an entity
* @entity: scheduler entity to init
* @sched_list: the list of new drm scheds which will replace
* existing entity->sched_list
* @num_sched_list: number of drm sched in sched_list
*
* Note that this must be called under the same common lock for @entity as
* drm_sched_job_arm() and drm_sched_entity_push_job(), or the driver needs to
* guarantee through some other means that this is never called while new jobs
* can be pushed to @entity.
*/
void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
struct drm_gpu_scheduler **sched_list,
unsigned int num_sched_list)
{
WARN_ON(!num_sched_list || !sched_list);
entity->sched_list = sched_list;
entity->num_sched_list = num_sched_list;
}
EXPORT_SYMBOL(drm_sched_entity_modify_sched);
static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
{
rmb(); /* for list_empty to work without lock */
if (list_empty(&entity->list) ||
spsc_queue_count(&entity->job_queue) == 0 ||
entity->stopped)
return true;
return false;
}
/* Return true if entity could provide a job. */
bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
{
if (spsc_queue_peek(&entity->job_queue) == NULL)
return false;
if (READ_ONCE(entity->dependency))
return false;
return true;
}
/**
* drm_sched_entity_error - return error of last scheduled job
* @entity: scheduler entity to check
*
* Opportunistically return the error of the last scheduled job. Result can
* change any time when new jobs are pushed to the hw.
*/
int drm_sched_entity_error(struct drm_sched_entity *entity)
{
struct dma_fence *fence;
int r;
rcu_read_lock();
fence = rcu_dereference(entity->last_scheduled);
r = fence ? fence->error : 0;
rcu_read_unlock();
return r;
}
EXPORT_SYMBOL(drm_sched_entity_error);
static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk)
{
struct drm_sched_job *job = container_of(wrk, typeof(*job), work);
drm_sched_fence_finished(job->s_fence, -ESRCH);
WARN_ON(job->s_fence->parent);
job->sched->ops->free_job(job);
}
/* Signal the scheduler finished fence when the entity in question is killed. */
static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
struct dma_fence_cb *cb)
{
struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
finish_cb);
unsigned long index;
dma_fence_put(f);
/* Wait for all dependencies to avoid data corruptions */
xa_for_each(&job->dependencies, index, f) {
struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
if (s_fence && f == &s_fence->scheduled) {
/* The dependencies array had a reference on the scheduled
* fence, and the finished fence refcount might have
* dropped to zero. Use dma_fence_get_rcu() so we get
* a NULL fence in that case.
*/
f = dma_fence_get_rcu(&s_fence->finished);
/* Now that we have a reference on the finished fence,
* we can release the reference the dependencies array
* had on the scheduled fence.
*/
dma_fence_put(&s_fence->scheduled);
}
xa_erase(&job->dependencies, index);
if (f && !dma_fence_add_callback(f, &job->finish_cb,
drm_sched_entity_kill_jobs_cb))
return;
dma_fence_put(f);
}
INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work);
schedule_work(&job->work);
}
/* Remove the entity from the scheduler and kill all pending jobs */
static void drm_sched_entity_kill(struct drm_sched_entity *entity)
{
struct drm_sched_job *job;
struct dma_fence *prev;
if (!entity->rq)
return;
spin_lock(&entity->rq_lock);
entity->stopped = true;
drm_sched_rq_remove_entity(entity->rq, entity);
spin_unlock(&entity->rq_lock);
/* Make sure this entity is not used by the scheduler at the moment */
wait_for_completion(&entity->entity_idle);
/* The entity is guaranteed to not be used by the scheduler */
prev = rcu_dereference_check(entity->last_scheduled, true);
dma_fence_get(prev);
while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
struct drm_sched_fence *s_fence = job->s_fence;
dma_fence_get(&s_fence->finished);
if (!prev || dma_fence_add_callback(prev, &job->finish_cb,
drm_sched_entity_kill_jobs_cb))
drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
prev = &s_fence->finished;
}
dma_fence_put(prev);
}
/**
* drm_sched_entity_flush - Flush a context entity
*
* @entity: scheduler entity
* @timeout: time to wait in for Q to become empty in jiffies.
*
* Splitting drm_sched_entity_fini() into two functions, The first one does the
* waiting, removes the entity from the runqueue and returns an error when the
* process was killed.
*
* Returns the remaining time in jiffies left from the input timeout
*/
long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
{
struct drm_gpu_scheduler *sched;
struct task_struct *last_user;
long ret = timeout;
if (!entity->rq)
return 0;
sched = entity->rq->sched;
/**
* The client will not queue more IBs during this fini, consume existing
* queued IBs or discard them on SIGKILL
*/
if (current->flags & PF_EXITING) {
if (timeout)
ret = wait_event_timeout(
sched->job_scheduled,
drm_sched_entity_is_idle(entity),
timeout);
} else {
wait_event_killable(sched->job_scheduled,
drm_sched_entity_is_idle(entity));
}
/* For killed process disable any more IBs enqueue right now */
last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
if ((!last_user || last_user == current->group_leader) &&
(current->flags & PF_EXITING) && (current->exit_code == SIGKILL))
drm_sched_entity_kill(entity);
return ret;
}
EXPORT_SYMBOL(drm_sched_entity_flush);
/**
* drm_sched_entity_fini - Destroy a context entity
*
* @entity: scheduler entity
*
* Cleanups up @entity which has been initialized by drm_sched_entity_init().
*
* If there are potentially job still in flight or getting newly queued
* drm_sched_entity_flush() must be called first. This function then goes over
* the entity and signals all jobs with an error code if the process was killed.
*/
void drm_sched_entity_fini(struct drm_sched_entity *entity)
{
/*
* If consumption of existing IBs wasn't completed. Forcefully remove
* them here. Also makes sure that the scheduler won't touch this entity
* any more.
*/
drm_sched_entity_kill(entity);
if (entity->dependency) {
dma_fence_remove_callback(entity->dependency, &entity->cb);
dma_fence_put(entity->dependency);
entity->dependency = NULL;
}
dma_fence_put(rcu_dereference_check(entity->last_scheduled, true));
RCU_INIT_POINTER(entity->last_scheduled, NULL);
}
EXPORT_SYMBOL(drm_sched_entity_fini);
/**
* drm_sched_entity_destroy - Destroy a context entity
* @entity: scheduler entity
*
* Calls drm_sched_entity_flush() and drm_sched_entity_fini() as a
* convenience wrapper.
*/
void drm_sched_entity_destroy(struct drm_sched_entity *entity)
{
drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
drm_sched_entity_fini(entity);
}
EXPORT_SYMBOL(drm_sched_entity_destroy);
/* drm_sched_entity_clear_dep - callback to clear the entities dependency */
static void drm_sched_entity_clear_dep(struct dma_fence *f,
struct dma_fence_cb *cb)
{
struct drm_sched_entity *entity =
container_of(cb, struct drm_sched_entity, cb);
entity->dependency = NULL;
dma_fence_put(f);
}
/*
* drm_sched_entity_clear_dep - callback to clear the entities dependency and
* wake up scheduler
*/
static void drm_sched_entity_wakeup(struct dma_fence *f,
struct dma_fence_cb *cb)
{
struct drm_sched_entity *entity =
container_of(cb, struct drm_sched_entity, cb);
drm_sched_entity_clear_dep(f, cb);
drm_sched_wakeup(entity->rq->sched, entity);
}
/**
* drm_sched_entity_set_priority - Sets priority of the entity
*
* @entity: scheduler entity
* @priority: scheduler priority
*
* Update the priority of runqueus used for the entity.
*/
void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
enum drm_sched_priority priority)
{
spin_lock(&entity->rq_lock);
entity->priority = priority;
spin_unlock(&entity->rq_lock);
}
EXPORT_SYMBOL(drm_sched_entity_set_priority);
/*
* Add a callback to the current dependency of the entity to wake up the
* scheduler when the entity becomes available.
*/
static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
{
struct drm_gpu_scheduler *sched = entity->rq->sched;
struct dma_fence *fence = entity->dependency;
struct drm_sched_fence *s_fence;
if (fence->context == entity->fence_context ||
fence->context == entity->fence_context + 1) {
/*
* Fence is a scheduled/finished fence from a job
* which belongs to the same entity, we can ignore
* fences from ourself
*/
dma_fence_put(entity->dependency);
return false;
}
s_fence = to_drm_sched_fence(fence);
if (!fence->error && s_fence && s_fence->sched == sched &&
!test_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &fence->flags)) {
/*
* Fence is from the same scheduler, only need to wait for
* it to be scheduled
*/
fence = dma_fence_get(&s_fence->scheduled);
dma_fence_put(entity->dependency);
entity->dependency = fence;
if (!dma_fence_add_callback(fence, &entity->cb,
drm_sched_entity_clear_dep))
return true;
/* Ignore it when it is already scheduled */
dma_fence_put(fence);
return false;
}
if (!dma_fence_add_callback(entity->dependency, &entity->cb,
drm_sched_entity_wakeup))
return true;
dma_fence_put(entity->dependency);
return false;
}
static struct dma_fence *
drm_sched_job_dependency(struct drm_sched_job *job,
struct drm_sched_entity *entity)
{
struct dma_fence *f;
/* We keep the fence around, so we can iterate over all dependencies
* in drm_sched_entity_kill_jobs_cb() to ensure all deps are signaled
* before killing the job.
*/
f = xa_load(&job->dependencies, job->last_dependency);
if (f) {
job->last_dependency++;
return dma_fence_get(f);
}
if (job->sched->ops->prepare_job)
return job->sched->ops->prepare_job(job, entity);
return NULL;
}
struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
{
struct drm_sched_job *sched_job;
sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
if (!sched_job)
return NULL;
while ((entity->dependency =
drm_sched_job_dependency(sched_job, entity))) {
trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
if (drm_sched_entity_add_dependency_cb(entity))
return NULL;
}
/* skip jobs from entity that marked guilty */
if (entity->guilty && atomic_read(entity->guilty))
dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
dma_fence_put(rcu_dereference_check(entity->last_scheduled, true));
rcu_assign_pointer(entity->last_scheduled,
dma_fence_get(&sched_job->s_fence->finished));
/*
* If the queue is empty we allow drm_sched_entity_select_rq() to
* locklessly access ->last_scheduled. This only works if we set the
* pointer before we dequeue and if we a write barrier here.
*/
smp_wmb();
spsc_queue_pop(&entity->job_queue);
/*
* Update the entity's location in the min heap according to
* the timestamp of the next job, if any.
*/
if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) {
struct drm_sched_job *next;
next = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
if (next)
drm_sched_rq_update_fifo(entity, next->submit_ts);
}
/* Jobs and entities might have different lifecycles. Since we're
* removing the job from the entities queue, set the jobs entity pointer
* to NULL to prevent any future access of the entity through this job.
*/
sched_job->entity = NULL;
return sched_job;
}
void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
{
struct dma_fence *fence;
struct drm_gpu_scheduler *sched;
struct drm_sched_rq *rq;
/* single possible engine and already selected */
if (!entity->sched_list)
return;
/* queue non-empty, stay on the same engine */
if (spsc_queue_count(&entity->job_queue))
return;
/*
* Only when the queue is empty are we guaranteed that the scheduler
* thread cannot change ->last_scheduled. To enforce ordering we need
* a read barrier here. See drm_sched_entity_pop_job() for the other
* side.
*/
smp_rmb();
fence = rcu_dereference_check(entity->last_scheduled, true);
/* stay on the same engine if the previous job hasn't finished */
if (fence && !dma_fence_is_signaled(fence))
return;
spin_lock(&entity->rq_lock);
sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list);
rq = sched ? sched->sched_rq[entity->priority] : NULL;
if (rq != entity->rq) {
drm_sched_rq_remove_entity(entity->rq, entity);
entity->rq = rq;
}
spin_unlock(&entity->rq_lock);
if (entity->num_sched_list == 1)
entity->sched_list = NULL;
}
/**
* drm_sched_entity_push_job - Submit a job to the entity's job queue
* @sched_job: job to submit
*
* Note: To guarantee that the order of insertion to queue matches the job's
* fence sequence number this function should be called with drm_sched_job_arm()
* under common lock for the struct drm_sched_entity that was set up for
* @sched_job in drm_sched_job_init().
*
* Returns 0 for success, negative error code otherwise.
*/
void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
{
struct drm_sched_entity *entity = sched_job->entity;
bool first;
ktime_t submit_ts;
trace_drm_sched_job(sched_job, entity);
atomic_inc(entity->rq->sched->score);
WRITE_ONCE(entity->last_user, current->group_leader);
/*
* After the sched_job is pushed into the entity queue, it may be
* completed and freed up at any time. We can no longer access it.
* Make sure to set the submit_ts first, to avoid a race.
*/
sched_job->submit_ts = submit_ts = ktime_get();
first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
/* first job wakes up scheduler */
if (first) {
/* Add the entity to the run queue */
spin_lock(&entity->rq_lock);
if (entity->stopped) {
spin_unlock(&entity->rq_lock);
DRM_ERROR("Trying to push to a killed entity\n");
return;
}
drm_sched_rq_add_entity(entity->rq, entity);
spin_unlock(&entity->rq_lock);
if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
drm_sched_rq_update_fifo(entity, submit_ts);
drm_sched_wakeup(entity->rq->sched, entity);
}
}
EXPORT_SYMBOL(drm_sched_entity_push_job);