For non-d3cold-capable devices we'd like to be able to wake up the device from reclaim. In particular, for Lunar Lake we'd like to be able to blit CCS metadata to system at shrink time; at least from kswapd where it's reasonable OK to wait for rpm resume and a preceding rpm suspend. Therefore use a separate lockdep map for such devices and prime it reclaim-tainted. v2: - Rename lockmap acquire- and release functions. (Rodrigo Vivi). - Reinstate the old xe_pm_runtime_lockdep_prime() function and rename it to xe_rpm_might_enter_cb(). (Matthew Auld). - Introduce a separate xe_pm_runtime_lockdep_prime function called from module init for known required locking orders. v3: - Actually hook up the prime function at module init. v4: - Rebase. v5: - Don't use reclaim-safe RPM with sriov. Cc: "Vivi, Rodrigo" <rodrigo.vivi@intel.com> Cc: "Auld, Matthew" <matthew.auld@intel.com> Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20240826143450.92511-1-thomas.hellstrom@linux.intel.com
160 lines
3.9 KiB
C
160 lines
3.9 KiB
C
// SPDX-License-Identifier: MIT
|
|
/*
|
|
* Copyright © 2021 Intel Corporation
|
|
*/
|
|
|
|
#include "xe_module.h"
|
|
|
|
#include <linux/init.h>
|
|
#include <linux/module.h>
|
|
|
|
#include <drm/drm_module.h>
|
|
|
|
#include "xe_drv.h"
|
|
#include "xe_hw_fence.h"
|
|
#include "xe_pci.h"
|
|
#include "xe_pm.h"
|
|
#include "xe_observation.h"
|
|
#include "xe_sched_job.h"
|
|
|
|
struct xe_modparam xe_modparam = {
|
|
.probe_display = true,
|
|
.guc_log_level = 5,
|
|
.force_probe = CONFIG_DRM_XE_FORCE_PROBE,
|
|
.wedged_mode = 1,
|
|
/* the rest are 0 by default */
|
|
};
|
|
|
|
module_param_named_unsafe(force_execlist, xe_modparam.force_execlist, bool, 0444);
|
|
MODULE_PARM_DESC(force_execlist, "Force Execlist submission");
|
|
|
|
module_param_named(probe_display, xe_modparam.probe_display, bool, 0444);
|
|
MODULE_PARM_DESC(probe_display, "Probe display HW, otherwise it's left untouched (default: true)");
|
|
|
|
module_param_named(vram_bar_size, xe_modparam.force_vram_bar_size, uint, 0600);
|
|
MODULE_PARM_DESC(vram_bar_size, "Set the vram bar size(in MiB)");
|
|
|
|
module_param_named(guc_log_level, xe_modparam.guc_log_level, int, 0600);
|
|
MODULE_PARM_DESC(guc_log_level, "GuC firmware logging level (0=disable, 1..5=enable with verbosity min..max)");
|
|
|
|
module_param_named_unsafe(guc_firmware_path, xe_modparam.guc_firmware_path, charp, 0400);
|
|
MODULE_PARM_DESC(guc_firmware_path,
|
|
"GuC firmware path to use instead of the default one");
|
|
|
|
module_param_named_unsafe(huc_firmware_path, xe_modparam.huc_firmware_path, charp, 0400);
|
|
MODULE_PARM_DESC(huc_firmware_path,
|
|
"HuC firmware path to use instead of the default one - empty string disables");
|
|
|
|
module_param_named_unsafe(gsc_firmware_path, xe_modparam.gsc_firmware_path, charp, 0400);
|
|
MODULE_PARM_DESC(gsc_firmware_path,
|
|
"GSC firmware path to use instead of the default one - empty string disables");
|
|
|
|
module_param_named_unsafe(force_probe, xe_modparam.force_probe, charp, 0400);
|
|
MODULE_PARM_DESC(force_probe,
|
|
"Force probe options for specified devices. See CONFIG_DRM_XE_FORCE_PROBE for details.");
|
|
|
|
#ifdef CONFIG_PCI_IOV
|
|
module_param_named(max_vfs, xe_modparam.max_vfs, uint, 0400);
|
|
MODULE_PARM_DESC(max_vfs,
|
|
"Limit number of Virtual Functions (VFs) that could be managed. "
|
|
"(0 = no VFs [default]; N = allow up to N VFs)");
|
|
#endif
|
|
|
|
module_param_named_unsafe(wedged_mode, xe_modparam.wedged_mode, int, 0600);
|
|
MODULE_PARM_DESC(wedged_mode,
|
|
"Module's default policy for the wedged mode - 0=never, 1=upon-critical-errors[default], 2=upon-any-hang");
|
|
|
|
static int xe_check_nomodeset(void)
|
|
{
|
|
if (drm_firmware_drivers_only())
|
|
return -ENODEV;
|
|
|
|
return 0;
|
|
}
|
|
|
|
struct init_funcs {
|
|
int (*init)(void);
|
|
void (*exit)(void);
|
|
};
|
|
|
|
static void xe_dummy_exit(void)
|
|
{
|
|
}
|
|
|
|
static const struct init_funcs init_funcs[] = {
|
|
{
|
|
.init = xe_check_nomodeset,
|
|
},
|
|
{
|
|
.init = xe_hw_fence_module_init,
|
|
.exit = xe_hw_fence_module_exit,
|
|
},
|
|
{
|
|
.init = xe_sched_job_module_init,
|
|
.exit = xe_sched_job_module_exit,
|
|
},
|
|
{
|
|
.init = xe_register_pci_driver,
|
|
.exit = xe_unregister_pci_driver,
|
|
},
|
|
{
|
|
.init = xe_observation_sysctl_register,
|
|
.exit = xe_observation_sysctl_unregister,
|
|
},
|
|
{
|
|
.init = xe_pm_module_init,
|
|
.exit = xe_dummy_exit,
|
|
},
|
|
};
|
|
|
|
static int __init xe_call_init_func(unsigned int i)
|
|
{
|
|
if (WARN_ON(i >= ARRAY_SIZE(init_funcs)))
|
|
return 0;
|
|
if (!init_funcs[i].init)
|
|
return 0;
|
|
|
|
return init_funcs[i].init();
|
|
}
|
|
|
|
static void xe_call_exit_func(unsigned int i)
|
|
{
|
|
if (WARN_ON(i >= ARRAY_SIZE(init_funcs)))
|
|
return;
|
|
if (!init_funcs[i].exit)
|
|
return;
|
|
|
|
init_funcs[i].exit();
|
|
}
|
|
|
|
static int __init xe_init(void)
|
|
{
|
|
int err, i;
|
|
|
|
for (i = 0; i < ARRAY_SIZE(init_funcs); i++) {
|
|
err = xe_call_init_func(i);
|
|
if (err) {
|
|
while (i--)
|
|
xe_call_exit_func(i);
|
|
return err;
|
|
}
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void __exit xe_exit(void)
|
|
{
|
|
int i;
|
|
|
|
for (i = ARRAY_SIZE(init_funcs) - 1; i >= 0; i--)
|
|
xe_call_exit_func(i);
|
|
}
|
|
|
|
module_init(xe_init);
|
|
module_exit(xe_exit);
|
|
|
|
MODULE_AUTHOR("Intel Corporation");
|
|
|
|
MODULE_DESCRIPTION(DRIVER_DESC);
|
|
MODULE_LICENSE("GPL and additional rights");
|