1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00
linux/drivers/gpu/drm/xe/xe_sched_job.h
Nirmoy Das 5dffaa1bb9 drm/xe: Create a helper function to init job's user fence
Refactor xe_sync_entry_signal so it doesn't have to
modify xe_sched_job struct instead create a new helper function
to set user fence values for a job.

v2: Move the sync type check to xe_sched_job_init_user_fence(Lucas)

Cc: Lucas De Marchi <lucas.demarchi@intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Signed-off-by: Nirmoy Das <nirmoy.das@intel.com>
Reviewed-by: Lucas De Marchi <lucas.demarchi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240321161142.4954-1-nirmoy.das@intel.com
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
2024-03-26 15:40:19 -07:00

88 lines
2.3 KiB
C

/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2021 Intel Corporation
*/
#ifndef _XE_SCHED_JOB_H_
#define _XE_SCHED_JOB_H_
#include "xe_sched_job_types.h"
struct drm_printer;
struct xe_vm;
struct xe_sync_entry;
#define XE_SCHED_HANG_LIMIT 1
#define XE_SCHED_JOB_TIMEOUT LONG_MAX
int xe_sched_job_module_init(void);
void xe_sched_job_module_exit(void);
struct xe_sched_job *xe_sched_job_create(struct xe_exec_queue *q,
u64 *batch_addr);
void xe_sched_job_destroy(struct kref *ref);
/**
* xe_sched_job_get - get reference to XE schedule job
* @job: XE schedule job object
*
* Increment XE schedule job's reference count
*/
static inline struct xe_sched_job *xe_sched_job_get(struct xe_sched_job *job)
{
kref_get(&job->refcount);
return job;
}
/**
* xe_sched_job_put - put reference to XE schedule job
* @job: XE schedule job object
*
* Decrement XE schedule job's reference count, call xe_sched_job_destroy when
* reference count == 0.
*/
static inline void xe_sched_job_put(struct xe_sched_job *job)
{
kref_put(&job->refcount, xe_sched_job_destroy);
}
void xe_sched_job_set_error(struct xe_sched_job *job, int error);
static inline bool xe_sched_job_is_error(struct xe_sched_job *job)
{
return job->fence->error < 0;
}
bool xe_sched_job_started(struct xe_sched_job *job);
bool xe_sched_job_completed(struct xe_sched_job *job);
void xe_sched_job_arm(struct xe_sched_job *job);
void xe_sched_job_push(struct xe_sched_job *job);
int xe_sched_job_last_fence_add_dep(struct xe_sched_job *job, struct xe_vm *vm);
void xe_sched_job_init_user_fence(struct xe_sched_job *job,
struct xe_sync_entry *sync);
static inline struct xe_sched_job *
to_xe_sched_job(struct drm_sched_job *drm)
{
return container_of(drm, struct xe_sched_job, drm);
}
static inline u32 xe_sched_job_seqno(struct xe_sched_job *job)
{
return job->fence->seqno;
}
static inline void
xe_sched_job_add_migrate_flush(struct xe_sched_job *job, u32 flags)
{
job->migrate_flush_flags = flags;
}
bool xe_sched_job_is_migration(struct xe_exec_queue *q);
struct xe_sched_job_snapshot *xe_sched_job_snapshot_capture(struct xe_sched_job *job);
void xe_sched_job_snapshot_free(struct xe_sched_job_snapshot *snapshot);
void xe_sched_job_snapshot_print(struct xe_sched_job_snapshot *snapshot, struct drm_printer *p);
#endif