1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00
linux/drivers/gpu/drm/xe/xe_gpu_scheduler.c
Matthew Brost dd08ebf6c3 drm/xe: Introduce a new DRM driver for Intel GPUs
Xe, is a new driver for Intel GPUs that supports both integrated and
discrete platforms starting with Tiger Lake (first Intel Xe Architecture).

The code is at a stage where it is already functional and has experimental
support for multiple platforms starting from Tiger Lake, with initial
support implemented in Mesa (for Iris and Anv, our OpenGL and Vulkan
drivers), as well as in NEO (for OpenCL and Level0).

The new Xe driver leverages a lot from i915.

As for display, the intent is to share the display code with the i915
driver so that there is maximum reuse there. But it is not added
in this patch.

This initial work is a collaboration of many people and unfortunately
the big squashed patch won't fully honor the proper credits. But let's
get some git quick stats so we can at least try to preserve some of the
credits:

Co-developed-by: Matthew Brost <matthew.brost@intel.com>
Co-developed-by: Matthew Auld <matthew.auld@intel.com>
Co-developed-by: Matt Roper <matthew.d.roper@intel.com>
Co-developed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Co-developed-by: Francois Dugast <francois.dugast@intel.com>
Co-developed-by: Lucas De Marchi <lucas.demarchi@intel.com>
Co-developed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Co-developed-by: Philippe Lecluse <philippe.lecluse@intel.com>
Co-developed-by: Nirmoy Das <nirmoy.das@intel.com>
Co-developed-by: Jani Nikula <jani.nikula@intel.com>
Co-developed-by: José Roberto de Souza <jose.souza@intel.com>
Co-developed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Co-developed-by: Dave Airlie <airlied@redhat.com>
Co-developed-by: Faith Ekstrand <faith.ekstrand@collabora.com>
Co-developed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Co-developed-by: Mauro Carvalho Chehab <mchehab@kernel.org>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
2023-12-12 14:05:48 -05:00

101 lines
2.5 KiB
C

// SPDX-License-Identifier: MIT
/*
* Copyright © 2023 Intel Corporation
*/
#include "xe_gpu_scheduler.h"
static void xe_sched_process_msg_queue(struct xe_gpu_scheduler *sched)
{
if (!READ_ONCE(sched->base.pause_submit))
queue_work(sched->base.submit_wq, &sched->work_process_msg);
}
static void xe_sched_process_msg_queue_if_ready(struct xe_gpu_scheduler *sched)
{
struct xe_sched_msg *msg;
spin_lock(&sched->base.job_list_lock);
msg = list_first_entry_or_null(&sched->msgs, struct xe_sched_msg, link);
if (msg)
xe_sched_process_msg_queue(sched);
spin_unlock(&sched->base.job_list_lock);
}
static struct xe_sched_msg *
xe_sched_get_msg(struct xe_gpu_scheduler *sched)
{
struct xe_sched_msg *msg;
spin_lock(&sched->base.job_list_lock);
msg = list_first_entry_or_null(&sched->msgs,
struct xe_sched_msg, link);
if (msg)
list_del(&msg->link);
spin_unlock(&sched->base.job_list_lock);
return msg;
}
static void xe_sched_process_msg_work(struct work_struct *w)
{
struct xe_gpu_scheduler *sched =
container_of(w, struct xe_gpu_scheduler, work_process_msg);
struct xe_sched_msg *msg;
if (READ_ONCE(sched->base.pause_submit))
return;
msg = xe_sched_get_msg(sched);
if (msg) {
sched->ops->process_msg(msg);
xe_sched_process_msg_queue_if_ready(sched);
}
}
int xe_sched_init(struct xe_gpu_scheduler *sched,
const struct drm_sched_backend_ops *ops,
const struct xe_sched_backend_ops *xe_ops,
struct workqueue_struct *submit_wq,
uint32_t hw_submission, unsigned hang_limit,
long timeout, struct workqueue_struct *timeout_wq,
atomic_t *score, const char *name,
struct device *dev)
{
sched->ops = xe_ops;
INIT_LIST_HEAD(&sched->msgs);
INIT_WORK(&sched->work_process_msg, xe_sched_process_msg_work);
return drm_sched_init(&sched->base, ops, submit_wq, 1, hw_submission,
hang_limit, timeout, timeout_wq, score, name,
dev);
}
void xe_sched_fini(struct xe_gpu_scheduler *sched)
{
xe_sched_submission_stop(sched);
drm_sched_fini(&sched->base);
}
void xe_sched_submission_start(struct xe_gpu_scheduler *sched)
{
drm_sched_wqueue_start(&sched->base);
queue_work(sched->base.submit_wq, &sched->work_process_msg);
}
void xe_sched_submission_stop(struct xe_gpu_scheduler *sched)
{
drm_sched_wqueue_stop(&sched->base);
cancel_work_sync(&sched->work_process_msg);
}
void xe_sched_add_msg(struct xe_gpu_scheduler *sched,
struct xe_sched_msg *msg)
{
spin_lock(&sched->base.job_list_lock);
list_add_tail(&msg->link, &sched->msgs);
spin_unlock(&sched->base.job_list_lock);
xe_sched_process_msg_queue(sched);
}