1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00
linux/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
Jesse.zhang@amd.com 2f1b13521d drm/amdgpu: Fix sysfs warning when hotplugging
Fix the similar warning when hotplugging:

[  155.585721] kernfs: can not remove 'enforce_isolation', no directory
[  155.592201] WARNING: CPU: 3 PID: 6960 at fs/kernfs/dir.c:1683 kernfs_remove_by_name_ns+0xb9/0xc0
[  155.601145] Modules linked in: xt_MASQUERADE xt_comment nft_compat veth bridge stp llc overlay nft_fib_inet nft_fib_ipv4 nft_fib_ipv6 nft_fib nft_reject_inet nf_reject_ipv4 nf_reject_ipv6 nft_reject nft_ct nft_chain_nat nf_nat nf_conntrack nf_defrag_ipv6 nf_defrag_ipv4 ip_set nf_tables nfnetlink qrtr intel_rapl_msr amd_atl intel_rapl_common amd64_edac edac_mce_amd amdgpu kvm_amd kvm ipmi_ssif amdxcp rapl drm_exec gpu_sched drm_buddy i2c_algo_bit drm_suballoc_helper drm_ttm_helper ttm pcspkr drm_display_helper acpi_cpufreq drm_kms_helper video wmi k10temp i2c_piix4 acpi_ipmi ipmi_si drm zram ip_tables loop squashfs dm_multipath crct10dif_pclmul crc32_pclmul crc32c_intel ghash_clmulni_intel sha512_ssse3 sha256_ssse3 sha1_ssse3 sp5100_tco ixgbe rfkill ccp dca sunrpc be2iscsi bnx2i cnic uio cxgb4i cxgb4 tls cxgb3i cxgb3 mdio libcxgbi libcxgb qla4xxx iscsi_boot_sysfs iscsi_tcp libiscsi_tcp libiscsi scsi_transport_iscsi ipmi_devintf ipmi_msghandler fuse
[  155.685224] systemd-journald[1354]: Compressed data object 957 -> 524 using ZSTD
[  155.685687] CPU: 3 PID: 6960 Comm: amd_pci_unplug Not tainted 6.10.0-1148853.1.zuul.164395107d6642bdb451071313e9378d #1
[  155.704149] Hardware name: TYAN B8021G88V2HR-2T/S8021GM2NR-2T, BIOS V1.03.B10 04/01/2019
[  155.712383] RIP: 0010:kernfs_remove_by_name_ns+0xb9/0xc0
[  155.717805] Code: a0 00 48 89 ef e8 37 96 c7 ff 5b b8 fe ff ff ff 5d 41 5c 41 5d e9 f7 96 a0 00 0f 0b eb ab 48 c7 c7 48 ba 7e 8f e8 f7 66 bf ff <0f> 0b eb dc 0f 1f 00 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90
[  155.736766] RSP: 0018:ffffb1685d7a3e20 EFLAGS: 00010296
[  155.742108] RAX: 0000000000000038 RBX: ffff929e94c80000 RCX: 0000000000000000
[  155.749363] RDX: ffff928e1efaf200 RSI: ffff928e1efa18c0 RDI: ffff928e1efa18c0
[  155.756612] RBP: 0000000000000008 R08: 0000000000000000 R09: 0000000000000003
[  155.763855] R10: ffffb1685d7a3cd8 R11: ffffffff8fb3e1c8 R12: ffffffffc1ef5341
[  155.771104] R13: ffff929e94cc5530 R14: 0000000000000000 R15: 0000000000000000
[  155.778357] FS:  00007fd9dd8d9c40(0000) GS:ffff928e1ef80000(0000) knlGS:0000000000000000
[  155.786594] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[  155.792450] CR2: 0000561245ceee38 CR3: 0000000113018000 CR4: 00000000003506f0
[  155.799702] Call Trace:
[  155.802254]  <TASK>
[  155.804460]  ? __warn+0x80/0x120
[  155.807798]  ? kernfs_remove_by_name_ns+0xb9/0xc0
[  155.812617]  ? report_bug+0x164/0x190
[  155.816393]  ? handle_bug+0x3c/0x80
[  155.819994]  ? exc_invalid_op+0x17/0x70
[  155.823939]  ? asm_exc_invalid_op+0x1a/0x20
[  155.828235]  ? kernfs_remove_by_name_ns+0xb9/0xc0
[  155.833058]  amdgpu_gfx_sysfs_fini+0x59/0xd0 [amdgpu]
[  155.838637]  gfx_v9_0_sw_fini+0x123/0x1c0 [amdgpu]
[  155.843887]  amdgpu_device_fini_sw+0xbc/0x3e0 [amdgpu]
[  155.849432]  amdgpu_driver_release_kms+0x16/0x30 [amdgpu]
[  155.855235]  drm_dev_put.part.0+0x3c/0x60 [drm]
[  155.859914]  drm_release+0x8b/0xc0 [drm]
[  155.863978]  __fput+0xf1/0x2c0
[  155.867141]  __x64_sys_close+0x3c/0x80
[  155.870998]  do_syscall_64+0x64/0x170

V2: Add details in comments (Tim)

Signed-off-by: Jesse Zhang <jesse.zhang@amd.com>
Reported-by: Andy Dong <andy.dong@amd.com>
Reviewed-by: Tim Huang <tim.huang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2024-11-21 15:56:22 -05:00

458 lines
12 KiB
C

/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_sdma.h"
#include "amdgpu_ras.h"
#define AMDGPU_CSA_SDMA_SIZE 64
/* SDMA CSA reside in the 3rd page of CSA */
#define AMDGPU_CSA_SDMA_OFFSET (4096 * 2)
/*
* GPU SDMA IP block helpers function.
*/
struct amdgpu_sdma_instance *amdgpu_sdma_get_instance_from_ring(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
int i;
for (i = 0; i < adev->sdma.num_instances; i++)
if (ring == &adev->sdma.instance[i].ring ||
ring == &adev->sdma.instance[i].page)
return &adev->sdma.instance[i];
return NULL;
}
int amdgpu_sdma_get_index_from_ring(struct amdgpu_ring *ring, uint32_t *index)
{
struct amdgpu_device *adev = ring->adev;
int i;
for (i = 0; i < adev->sdma.num_instances; i++) {
if (ring == &adev->sdma.instance[i].ring ||
ring == &adev->sdma.instance[i].page) {
*index = i;
return 0;
}
}
return -EINVAL;
}
uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring,
unsigned int vmid)
{
struct amdgpu_device *adev = ring->adev;
uint64_t csa_mc_addr;
uint32_t index = 0;
int r;
/* don't enable OS preemption on SDMA under SRIOV */
if (amdgpu_sriov_vf(adev) || vmid == 0 || !adev->gfx.mcbp)
return 0;
if (ring->is_mes_queue) {
uint32_t offset = 0;
offset = offsetof(struct amdgpu_mes_ctx_meta_data,
sdma[ring->idx].sdma_meta_data);
csa_mc_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
} else {
r = amdgpu_sdma_get_index_from_ring(ring, &index);
if (r || index > 31)
csa_mc_addr = 0;
else
csa_mc_addr = amdgpu_csa_vaddr(adev) +
AMDGPU_CSA_SDMA_OFFSET +
index * AMDGPU_CSA_SDMA_SIZE;
}
return csa_mc_addr;
}
int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev,
struct ras_common_if *ras_block)
{
int r, i;
r = amdgpu_ras_block_late_init(adev, ras_block);
if (r)
return r;
if (amdgpu_ras_is_supported(adev, ras_block->block)) {
for (i = 0; i < adev->sdma.num_instances; i++) {
r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq,
AMDGPU_SDMA_IRQ_INSTANCE0 + i);
if (r)
goto late_fini;
}
}
return 0;
late_fini:
amdgpu_ras_block_late_fini(adev, ras_block);
return r;
}
int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev,
void *err_data,
struct amdgpu_iv_entry *entry)
{
kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
if (amdgpu_sriov_vf(adev))
return AMDGPU_RAS_SUCCESS;
amdgpu_ras_reset_gpu(adev);
return AMDGPU_RAS_SUCCESS;
}
int amdgpu_sdma_process_ecc_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
struct ras_common_if *ras_if = adev->sdma.ras_if;
struct ras_dispatch_if ih_data = {
.entry = entry,
};
if (!ras_if)
return 0;
ih_data.head = *ras_if;
amdgpu_ras_interrupt_dispatch(adev, &ih_data);
return 0;
}
static int amdgpu_sdma_init_inst_ctx(struct amdgpu_sdma_instance *sdma_inst)
{
uint16_t version_major;
const struct common_firmware_header *header = NULL;
const struct sdma_firmware_header_v1_0 *hdr;
const struct sdma_firmware_header_v2_0 *hdr_v2;
const struct sdma_firmware_header_v3_0 *hdr_v3;
header = (const struct common_firmware_header *)
sdma_inst->fw->data;
version_major = le16_to_cpu(header->header_version_major);
switch (version_major) {
case 1:
hdr = (const struct sdma_firmware_header_v1_0 *)sdma_inst->fw->data;
sdma_inst->fw_version = le32_to_cpu(hdr->header.ucode_version);
sdma_inst->feature_version = le32_to_cpu(hdr->ucode_feature_version);
break;
case 2:
hdr_v2 = (const struct sdma_firmware_header_v2_0 *)sdma_inst->fw->data;
sdma_inst->fw_version = le32_to_cpu(hdr_v2->header.ucode_version);
sdma_inst->feature_version = le32_to_cpu(hdr_v2->ucode_feature_version);
break;
case 3:
hdr_v3 = (const struct sdma_firmware_header_v3_0 *)sdma_inst->fw->data;
sdma_inst->fw_version = le32_to_cpu(hdr_v3->header.ucode_version);
sdma_inst->feature_version = le32_to_cpu(hdr_v3->ucode_feature_version);
break;
default:
return -EINVAL;
}
if (sdma_inst->feature_version >= 20)
sdma_inst->burst_nop = true;
return 0;
}
void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev,
bool duplicate)
{
int i;
for (i = 0; i < adev->sdma.num_instances; i++) {
amdgpu_ucode_release(&adev->sdma.instance[i].fw);
if (duplicate)
break;
}
memset((void *)adev->sdma.instance, 0,
sizeof(struct amdgpu_sdma_instance) * AMDGPU_MAX_SDMA_INSTANCES);
}
int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
u32 instance, bool duplicate)
{
struct amdgpu_firmware_info *info = NULL;
const struct common_firmware_header *header = NULL;
int err, i;
const struct sdma_firmware_header_v2_0 *sdma_hdr;
const struct sdma_firmware_header_v3_0 *sdma_hv3;
uint16_t version_major;
char ucode_prefix[30];
amdgpu_ucode_ip_version_decode(adev, SDMA0_HWIP, ucode_prefix, sizeof(ucode_prefix));
if (instance == 0)
err = amdgpu_ucode_request(adev, &adev->sdma.instance[instance].fw,
"amdgpu/%s.bin", ucode_prefix);
else
err = amdgpu_ucode_request(adev, &adev->sdma.instance[instance].fw,
"amdgpu/%s%d.bin", ucode_prefix, instance);
if (err)
goto out;
header = (const struct common_firmware_header *)
adev->sdma.instance[instance].fw->data;
version_major = le16_to_cpu(header->header_version_major);
if ((duplicate && instance) || (!duplicate && version_major > 1)) {
err = -EINVAL;
goto out;
}
err = amdgpu_sdma_init_inst_ctx(&adev->sdma.instance[instance]);
if (err)
goto out;
if (duplicate) {
for (i = 1; i < adev->sdma.num_instances; i++)
memcpy((void *)&adev->sdma.instance[i],
(void *)&adev->sdma.instance[0],
sizeof(struct amdgpu_sdma_instance));
}
DRM_DEBUG("psp_load == '%s'\n",
adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false");
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
switch (version_major) {
case 1:
for (i = 0; i < adev->sdma.num_instances; i++) {
if (!duplicate && (instance != i))
continue;
else {
/* Use a single copy per SDMA firmware type. PSP uses the same instance for all
* groups of SDMAs */
if ((amdgpu_ip_version(adev, SDMA0_HWIP, 0) ==
IP_VERSION(4, 4, 2) ||
amdgpu_ip_version(adev, SDMA0_HWIP, 0) ==
IP_VERSION(4, 4, 5)) &&
adev->firmware.load_type ==
AMDGPU_FW_LOAD_PSP &&
adev->sdma.num_inst_per_aid == i) {
break;
}
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
info->fw = adev->sdma.instance[i].fw;
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
}
}
break;
case 2:
sdma_hdr = (const struct sdma_firmware_header_v2_0 *)
adev->sdma.instance[0].fw->data;
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA_UCODE_TH0];
info->ucode_id = AMDGPU_UCODE_ID_SDMA_UCODE_TH0;
info->fw = adev->sdma.instance[0].fw;
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(sdma_hdr->ctx_ucode_size_bytes), PAGE_SIZE);
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA_UCODE_TH1];
info->ucode_id = AMDGPU_UCODE_ID_SDMA_UCODE_TH1;
info->fw = adev->sdma.instance[0].fw;
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(sdma_hdr->ctl_ucode_size_bytes), PAGE_SIZE);
break;
case 3:
sdma_hv3 = (const struct sdma_firmware_header_v3_0 *)
adev->sdma.instance[0].fw->data;
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA_RS64];
info->ucode_id = AMDGPU_UCODE_ID_SDMA_RS64;
info->fw = adev->sdma.instance[0].fw;
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(sdma_hv3->ucode_size_bytes), PAGE_SIZE);
break;
default:
err = -EINVAL;
}
}
out:
if (err)
amdgpu_sdma_destroy_inst_ctx(adev, duplicate);
return err;
}
int amdgpu_sdma_ras_sw_init(struct amdgpu_device *adev)
{
int err = 0;
struct amdgpu_sdma_ras *ras = NULL;
/* adev->sdma.ras is NULL, which means sdma does not
* support ras function, then do nothing here.
*/
if (!adev->sdma.ras)
return 0;
ras = adev->sdma.ras;
err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
if (err) {
dev_err(adev->dev, "Failed to register sdma ras block!\n");
return err;
}
strcpy(ras->ras_block.ras_comm.name, "sdma");
ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__SDMA;
ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
adev->sdma.ras_if = &ras->ras_block.ras_comm;
/* If not define special ras_late_init function, use default ras_late_init */
if (!ras->ras_block.ras_late_init)
ras->ras_block.ras_late_init = amdgpu_sdma_ras_late_init;
/* If not defined special ras_cb function, use default ras_cb */
if (!ras->ras_block.ras_cb)
ras->ras_block.ras_cb = amdgpu_sdma_process_ras_data_cb;
return 0;
}
/*
* debugfs for to enable/disable sdma job submission to specific core.
*/
#if defined(CONFIG_DEBUG_FS)
static int amdgpu_debugfs_sdma_sched_mask_set(void *data, u64 val)
{
struct amdgpu_device *adev = (struct amdgpu_device *)data;
u32 i;
u64 mask = 0;
struct amdgpu_ring *ring;
if (!adev)
return -ENODEV;
mask = (1 << adev->sdma.num_instances) - 1;
if ((val & mask) == 0)
return -EINVAL;
for (i = 0; i < adev->sdma.num_instances; ++i) {
ring = &adev->sdma.instance[i].ring;
if (val & (1 << i))
ring->sched.ready = true;
else
ring->sched.ready = false;
}
/* publish sched.ready flag update effective immediately across smp */
smp_rmb();
return 0;
}
static int amdgpu_debugfs_sdma_sched_mask_get(void *data, u64 *val)
{
struct amdgpu_device *adev = (struct amdgpu_device *)data;
u32 i;
u64 mask = 0;
struct amdgpu_ring *ring;
if (!adev)
return -ENODEV;
for (i = 0; i < adev->sdma.num_instances; ++i) {
ring = &adev->sdma.instance[i].ring;
if (ring->sched.ready)
mask |= 1 << i;
}
*val = mask;
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_sdma_sched_mask_fops,
amdgpu_debugfs_sdma_sched_mask_get,
amdgpu_debugfs_sdma_sched_mask_set, "%llx\n");
#endif
void amdgpu_debugfs_sdma_sched_mask_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
struct drm_minor *minor = adev_to_drm(adev)->primary;
struct dentry *root = minor->debugfs_root;
char name[32];
if (!(adev->sdma.num_instances > 1))
return;
sprintf(name, "amdgpu_sdma_sched_mask");
debugfs_create_file(name, 0600, root, adev,
&amdgpu_debugfs_sdma_sched_mask_fops);
#endif
}
static ssize_t amdgpu_get_sdma_reset_mask(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
if (!adev)
return -ENODEV;
return amdgpu_show_reset_mask(buf, adev->sdma.supported_reset);
}
static DEVICE_ATTR(sdma_reset_mask, 0444,
amdgpu_get_sdma_reset_mask, NULL);
int amdgpu_sdma_sysfs_reset_mask_init(struct amdgpu_device *adev)
{
int r = 0;
if (!amdgpu_gpu_recovery)
return r;
if (adev->sdma.num_instances) {
r = device_create_file(adev->dev, &dev_attr_sdma_reset_mask);
if (r)
return r;
}
return r;
}
void amdgpu_sdma_sysfs_reset_mask_fini(struct amdgpu_device *adev)
{
if (!amdgpu_gpu_recovery)
return;
if (adev->dev->kobj.sd) {
if (adev->sdma.num_instances)
device_remove_file(adev->dev, &dev_attr_sdma_reset_mask);
}
}