1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00
linux/drivers/gpu/drm/nouveau/nouveau_sched.h
Danilo Krummrich 9a0c32d698 drm/nouveau: don't fini scheduler if not initialized
nouveau_abi16_ioctl_channel_alloc() and nouveau_cli_init() simply call
their corresponding *_fini() counterpart. This can lead to
nouveau_sched_fini() being called without struct nouveau_sched ever
being initialized in the first place.

Instead of embedding struct nouveau_sched into struct nouveau_cli and
struct nouveau_chan_abi16, allocate struct nouveau_sched separately,
such that we can check for the corresponding pointer to be NULL in the
particular *_fini() functions.

It makes sense to allocate struct nouveau_sched separately anyway, since
in a subsequent commit we can also avoid to allocate a struct
nouveau_sched in nouveau_abi16_ioctl_channel_alloc() at all, if the
VM_BIND uAPI has been disabled due to the legacy uAPI being used.

Fixes: 5f03a507b2 ("drm/nouveau: implement 1:1 scheduler - entity relationship")
Reported-by: Timur Tabi <ttabi@nvidia.com>
Tested-by: Timur Tabi <ttabi@nvidia.com>
Closes: https://lore.kernel.org/nouveau/20240131213917.1545604-1-ttabi@nvidia.com/
Reviewed-by: Dave Airlie <airlied@redhat.com>
Signed-off-by: Danilo Krummrich <dakr@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240202000606.3526-1-dakr@redhat.com
2024-02-12 11:40:46 +01:00

118 lines
2.5 KiB
C

/* SPDX-License-Identifier: MIT */
#ifndef NOUVEAU_SCHED_H
#define NOUVEAU_SCHED_H
#include <linux/types.h>
#include <drm/drm_gpuvm.h>
#include <drm/gpu_scheduler.h>
#include "nouveau_drv.h"
#define to_nouveau_job(sched_job) \
container_of((sched_job), struct nouveau_job, base)
struct nouveau_job_ops;
enum nouveau_job_state {
NOUVEAU_JOB_UNINITIALIZED = 0,
NOUVEAU_JOB_INITIALIZED,
NOUVEAU_JOB_SUBMIT_SUCCESS,
NOUVEAU_JOB_SUBMIT_FAILED,
NOUVEAU_JOB_RUN_SUCCESS,
NOUVEAU_JOB_RUN_FAILED,
};
struct nouveau_job_args {
struct drm_file *file_priv;
struct nouveau_sched *sched;
u32 credits;
enum dma_resv_usage resv_usage;
bool sync;
struct {
struct drm_nouveau_sync *s;
u32 count;
} in_sync;
struct {
struct drm_nouveau_sync *s;
u32 count;
} out_sync;
struct nouveau_job_ops *ops;
};
struct nouveau_job {
struct drm_sched_job base;
enum nouveau_job_state state;
struct nouveau_sched *sched;
struct list_head entry;
struct drm_file *file_priv;
struct nouveau_cli *cli;
enum dma_resv_usage resv_usage;
struct dma_fence *done_fence;
bool sync;
struct {
struct drm_nouveau_sync *data;
u32 count;
} in_sync;
struct {
struct drm_nouveau_sync *data;
struct drm_syncobj **objs;
struct dma_fence_chain **chains;
u32 count;
} out_sync;
struct nouveau_job_ops {
/* If .submit() returns without any error, it is guaranteed that
* armed_submit() is called.
*/
int (*submit)(struct nouveau_job *, struct drm_gpuvm_exec *);
void (*armed_submit)(struct nouveau_job *, struct drm_gpuvm_exec *);
struct dma_fence *(*run)(struct nouveau_job *);
void (*free)(struct nouveau_job *);
enum drm_gpu_sched_stat (*timeout)(struct nouveau_job *);
} *ops;
};
int nouveau_job_ucopy_syncs(struct nouveau_job_args *args,
u32 inc, u64 ins,
u32 outc, u64 outs);
int nouveau_job_init(struct nouveau_job *job,
struct nouveau_job_args *args);
void nouveau_job_fini(struct nouveau_job *job);
int nouveau_job_submit(struct nouveau_job *job);
void nouveau_job_done(struct nouveau_job *job);
void nouveau_job_free(struct nouveau_job *job);
struct nouveau_sched {
struct drm_gpu_scheduler base;
struct drm_sched_entity entity;
struct workqueue_struct *wq;
struct mutex mutex;
struct {
struct {
struct list_head head;
spinlock_t lock;
} list;
struct wait_queue_head wq;
} job;
};
int nouveau_sched_create(struct nouveau_sched **psched, struct nouveau_drm *drm,
struct workqueue_struct *wq, u32 credit_limit);
void nouveau_sched_destroy(struct nouveau_sched **psched);
#endif