1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00
linux/drivers/gpu/host1x/dev.c
Rupinderjit Singh 02458fbfaa gpu: host1x: Fix a use of uninitialized mutex
commit c8347f915e ("gpu: host1x: Fix boot regression for Tegra")
caused a use of uninitialized mutex leading to below warning when
CONFIG_DEBUG_MUTEXES and CONFIG_DEBUG_LOCK_ALLOC are enabled.

[   41.662843] ------------[ cut here ]------------
[   41.663012] DEBUG_LOCKS_WARN_ON(lock->magic != lock)
[   41.663035] WARNING: CPU: 4 PID: 794 at kernel/locking/mutex.c:587 __mutex_lock+0x670/0x878
[   41.663458] Modules linked in: rtw88_8822c(+) bluetooth(+) rtw88_pci rtw88_core mac80211 aquantia libarc4 crc_itu_t cfg80211 tegra194_cpufreq dwmac_tegra(+) arm_dsu_pmu stmmac_platform stmmac pcs_xpcs rfkill at24 host1x(+) tegra_bpmp_thermal ramoops reed_solomon fuse loop nfnetlink xfs mmc_block rpmb_core ucsi_ccg ina3221 crct10dif_ce xhci_tegra ghash_ce lm90 sha2_ce sha256_arm64 sha1_ce sdhci_tegra pwm_fan sdhci_pltfm sdhci gpio_keys rtc_tegra cqhci mmc_core phy_tegra_xusb i2c_tegra tegra186_gpc_dma i2c_tegra_bpmp spi_tegra114 dm_mirror dm_region_hash dm_log dm_mod
[   41.665078] CPU: 4 UID: 0 PID: 794 Comm: (udev-worker) Not tainted 6.11.0-29.31_1538613708.el10.aarch64+debug #1
[   41.665838] Hardware name: NVIDIA NVIDIA Jetson AGX Orin Developer Kit/Jetson, BIOS 36.3.0-gcid-35594366 02/26/2024
[   41.672555] pstate: 60400009 (nZCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
[   41.679636] pc : __mutex_lock+0x670/0x878
[   41.683834] lr : __mutex_lock+0x670/0x878
[   41.688035] sp : ffff800084b77090
[   41.691446] x29: ffff800084b77160 x28: ffffdd4bebf7b000 x27: ffffdd4be96b1000
[   41.698799] x26: 1fffe0002308361c x25: 1ffff0001096ee18 x24: 0000000000000000
[   41.706149] x23: 0000000000000000 x22: 0000000000000002 x21: ffffdd4be6e3c7a0
[   41.713500] x20: ffff800084b770f0 x19: ffff00011841b1e8 x18: 0000000000000000
[   41.720675] x17: 0000000000000000 x16: 0000000000000000 x15: 0720072007200720
[   41.728023] x14: 0000000000000000 x13: 0000000000000001 x12: ffff6001a96eaab3
[   41.735375] x11: 1fffe001a96eaab2 x10: ffff6001a96eaab2 x9 : ffffdd4be4838bbc
[   41.742723] x8 : 00009ffe5691554e x7 : ffff000d4b755593 x6 : 0000000000000001
[   41.749985] x5 : ffff000d4b755590 x4 : 1fffe0001d88f001 x3 : dfff800000000000
[   41.756988] x2 : 0000000000000000 x1 : 0000000000000000 x0 : ffff0000ec478000
[   41.764251] Call trace:
[   41.766695]  __mutex_lock+0x670/0x878
[   41.770373]  mutex_lock_nested+0x2c/0x40
[   41.774134]  host1x_intr_start+0x54/0xf8 [host1x]
[   41.778863]  host1x_runtime_resume+0x150/0x228 [host1x]
[   41.783935]  pm_generic_runtime_resume+0x84/0xc8
[   41.788485]  __rpm_callback+0xa0/0x478
[   41.792422]  rpm_callback+0x15c/0x1a8
[   41.795922]  rpm_resume+0x698/0xc08
[   41.799597]  __pm_runtime_resume+0xa8/0x140
[   41.803621]  host1x_probe+0x810/0xbc0 [host1x]
[   41.807909]  platform_probe+0xcc/0x1a8
[   41.811845]  really_probe+0x188/0x800
[   41.815347]  __driver_probe_device+0x164/0x360
[   41.819810]  driver_probe_device+0x64/0x1a8
[   41.823834]  __driver_attach+0x180/0x490
[   41.827773]  bus_for_each_dev+0x104/0x1a0
[   41.831797]  driver_attach+0x44/0x68
[   41.835296]  bus_add_driver+0x23c/0x4e8
[   41.839235]  driver_register+0x15c/0x3a8
[   41.843170]  __platform_register_drivers+0xa4/0x208
[   41.848159]  tegra_host1x_init+0x4c/0xff8 [host1x]
[   41.853147]  do_one_initcall+0xd4/0x380
[   41.856997]  do_init_module+0x1dc/0x698
[   41.860758]  load_module+0xc70/0x1300
[   41.864435]  __do_sys_init_module+0x1a8/0x1d0
[   41.868721]  __arm64_sys_init_module+0x74/0xb0
[   41.873183]  invoke_syscall.constprop.0+0xdc/0x1e8
[   41.877997]  do_el0_svc+0x154/0x1d0
[   41.881671]  el0_svc+0x54/0x140
[   41.884820]  el0t_64_sync_handler+0x120/0x130
[   41.889285]  el0t_64_sync+0x1a4/0x1a8
[   41.892960] irq event stamp: 69737
[   41.896370] hardirqs last  enabled at (69737): [<ffffdd4be6d7768c>] _raw_spin_unlock_irqrestore+0x44/0xe8
[   41.905739] hardirqs last disabled at (69736): [<ffffdd4be59dcd40>] clk_enable_lock+0x98/0x198
[   41.914314] softirqs last  enabled at (68082): [<ffffdd4be466b1d0>] handle_softirqs+0x4c8/0x890
[   41.922977] softirqs last disabled at (67945): [<ffffdd4be44f02a4>] __do_softirq+0x1c/0x28
[   41.931289] ---[ end trace 0000000000000000 ]---

Inside the probe function when pm_runtime_enable() is called,
the PM core invokes a resume callback if the device Host1x is
in a suspended state. As it can be seen in the logs above,
this leads to host1x_intr_start() function call which is
trying to acquire a mutex lock. But, the function
host_intr_init() only gets called after the pm_runtime_enable()
where mutex is initialised leading to the use of mutex
prior to its initialisation.

Fix this by moving the mutex initialisation prior to the runtime
PM enablement function pm_runtime_enable() in probe.

Fixes: c8347f915e ("gpu: host1x: Fix boot regression for Tegra")
Signed-off-by: Rupinderjit Singh <rusingh@redhat.com>
Reviewed-by: Jon Hunter <jonathanh@nvidia.com>
Tested-by: Jon Hunter <jonathanh@nvidia.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
Link: https://patchwork.ozlabs.org/project/linux-tegra/patch/20250206155803.201942-1-rusingh@redhat.com/
2025-02-07 15:57:50 +01:00

821 lines
23 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
/*
* Tegra host1x driver
*
* Copyright (c) 2010-2013, NVIDIA Corporation.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <soc/tegra/common.h>
#define CREATE_TRACE_POINTS
#include <trace/events/host1x.h>
#undef CREATE_TRACE_POINTS
#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
#include <asm/dma-iommu.h>
#endif
#include "bus.h"
#include "channel.h"
#include "context.h"
#include "debug.h"
#include "dev.h"
#include "intr.h"
#include "hw/host1x01.h"
#include "hw/host1x02.h"
#include "hw/host1x04.h"
#include "hw/host1x05.h"
#include "hw/host1x06.h"
#include "hw/host1x07.h"
#include "hw/host1x08.h"
void host1x_common_writel(struct host1x *host1x, u32 v, u32 r)
{
writel(v, host1x->common_regs + r);
}
void host1x_hypervisor_writel(struct host1x *host1x, u32 v, u32 r)
{
writel(v, host1x->hv_regs + r);
}
u32 host1x_hypervisor_readl(struct host1x *host1x, u32 r)
{
return readl(host1x->hv_regs + r);
}
void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r)
{
void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset;
writel(v, sync_regs + r);
}
u32 host1x_sync_readl(struct host1x *host1x, u32 r)
{
void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset;
return readl(sync_regs + r);
}
void host1x_ch_writel(struct host1x_channel *ch, u32 v, u32 r)
{
writel(v, ch->regs + r);
}
u32 host1x_ch_readl(struct host1x_channel *ch, u32 r)
{
return readl(ch->regs + r);
}
static const struct host1x_info host1x01_info = {
.nb_channels = 8,
.nb_pts = 32,
.nb_mlocks = 16,
.nb_bases = 8,
.init = host1x01_init,
.sync_offset = 0x3000,
.dma_mask = DMA_BIT_MASK(32),
.has_wide_gather = false,
.has_hypervisor = false,
.num_sid_entries = 0,
.sid_table = NULL,
.reserve_vblank_syncpts = true,
};
static const struct host1x_info host1x02_info = {
.nb_channels = 9,
.nb_pts = 32,
.nb_mlocks = 16,
.nb_bases = 12,
.init = host1x02_init,
.sync_offset = 0x3000,
.dma_mask = DMA_BIT_MASK(32),
.has_wide_gather = false,
.has_hypervisor = false,
.num_sid_entries = 0,
.sid_table = NULL,
.reserve_vblank_syncpts = true,
};
static const struct host1x_info host1x04_info = {
.nb_channels = 12,
.nb_pts = 192,
.nb_mlocks = 16,
.nb_bases = 64,
.init = host1x04_init,
.sync_offset = 0x2100,
.dma_mask = DMA_BIT_MASK(34),
.has_wide_gather = false,
.has_hypervisor = false,
.num_sid_entries = 0,
.sid_table = NULL,
.reserve_vblank_syncpts = false,
};
static const struct host1x_info host1x05_info = {
.nb_channels = 14,
.nb_pts = 192,
.nb_mlocks = 16,
.nb_bases = 64,
.init = host1x05_init,
.sync_offset = 0x2100,
.dma_mask = DMA_BIT_MASK(34),
.has_wide_gather = false,
.has_hypervisor = false,
.num_sid_entries = 0,
.sid_table = NULL,
.reserve_vblank_syncpts = false,
};
static const struct host1x_sid_entry tegra186_sid_table[] = {
{ /* SE1 */ .base = 0x1ac8, .offset = 0x90, .limit = 0x90 },
{ /* SE2 */ .base = 0x1ad0, .offset = 0x90, .limit = 0x90 },
{ /* SE3 */ .base = 0x1ad8, .offset = 0x90, .limit = 0x90 },
{ /* SE4 */ .base = 0x1ae0, .offset = 0x90, .limit = 0x90 },
{ /* ISP */ .base = 0x1ae8, .offset = 0x50, .limit = 0x50 },
{ /* VIC */ .base = 0x1af0, .offset = 0x30, .limit = 0x34 },
{ /* NVENC */ .base = 0x1af8, .offset = 0x30, .limit = 0x34 },
{ /* NVDEC */ .base = 0x1b00, .offset = 0x30, .limit = 0x34 },
{ /* NVJPG */ .base = 0x1b08, .offset = 0x30, .limit = 0x34 },
{ /* TSEC */ .base = 0x1b10, .offset = 0x30, .limit = 0x34 },
{ /* TSECB */ .base = 0x1b18, .offset = 0x30, .limit = 0x34 },
{ /* VI 0 */ .base = 0x1b80, .offset = 0x10000, .limit = 0x10000 },
{ /* VI 1 */ .base = 0x1b88, .offset = 0x20000, .limit = 0x20000 },
{ /* VI 2 */ .base = 0x1b90, .offset = 0x30000, .limit = 0x30000 },
{ /* VI 3 */ .base = 0x1b98, .offset = 0x40000, .limit = 0x40000 },
{ /* VI 4 */ .base = 0x1ba0, .offset = 0x50000, .limit = 0x50000 },
{ /* VI 5 */ .base = 0x1ba8, .offset = 0x60000, .limit = 0x60000 },
{ /* VI 6 */ .base = 0x1bb0, .offset = 0x70000, .limit = 0x70000 },
{ /* VI 7 */ .base = 0x1bb8, .offset = 0x80000, .limit = 0x80000 },
{ /* VI 8 */ .base = 0x1bc0, .offset = 0x90000, .limit = 0x90000 },
{ /* VI 9 */ .base = 0x1bc8, .offset = 0xa0000, .limit = 0xa0000 },
{ /* VI 10 */ .base = 0x1bd0, .offset = 0xb0000, .limit = 0xb0000 },
{ /* VI 11 */ .base = 0x1bd8, .offset = 0xc0000, .limit = 0xc0000 },
};
static const struct host1x_info host1x06_info = {
.nb_channels = 63,
.nb_pts = 576,
.nb_mlocks = 24,
.nb_bases = 16,
.init = host1x06_init,
.sync_offset = 0x0,
.dma_mask = DMA_BIT_MASK(40),
.has_wide_gather = true,
.has_hypervisor = true,
.num_sid_entries = ARRAY_SIZE(tegra186_sid_table),
.sid_table = tegra186_sid_table,
.reserve_vblank_syncpts = false,
.skip_reset_assert = true,
};
static const struct host1x_sid_entry tegra194_sid_table[] = {
{ /* SE1 */ .base = 0x1ac8, .offset = 0x90, .limit = 0x90 },
{ /* SE2 */ .base = 0x1ad0, .offset = 0x90, .limit = 0x90 },
{ /* SE3 */ .base = 0x1ad8, .offset = 0x90, .limit = 0x90 },
{ /* SE4 */ .base = 0x1ae0, .offset = 0x90, .limit = 0x90 },
{ /* ISP */ .base = 0x1ae8, .offset = 0x800, .limit = 0x800 },
{ /* VIC */ .base = 0x1af0, .offset = 0x30, .limit = 0x34 },
{ /* NVENC */ .base = 0x1af8, .offset = 0x30, .limit = 0x34 },
{ /* NVDEC */ .base = 0x1b00, .offset = 0x30, .limit = 0x34 },
{ /* NVJPG */ .base = 0x1b08, .offset = 0x30, .limit = 0x34 },
{ /* TSEC */ .base = 0x1b10, .offset = 0x30, .limit = 0x34 },
{ /* TSECB */ .base = 0x1b18, .offset = 0x30, .limit = 0x34 },
{ /* VI */ .base = 0x1b80, .offset = 0x800, .limit = 0x800 },
{ /* VI_THI */ .base = 0x1b88, .offset = 0x30, .limit = 0x34 },
{ /* ISP_THI */ .base = 0x1b90, .offset = 0x30, .limit = 0x34 },
{ /* PVA0_CLUSTER */ .base = 0x1b98, .offset = 0x0, .limit = 0x0 },
{ /* PVA0_CLUSTER */ .base = 0x1ba0, .offset = 0x0, .limit = 0x0 },
{ /* NVDLA0 */ .base = 0x1ba8, .offset = 0x30, .limit = 0x34 },
{ /* NVDLA1 */ .base = 0x1bb0, .offset = 0x30, .limit = 0x34 },
{ /* NVENC1 */ .base = 0x1bb8, .offset = 0x30, .limit = 0x34 },
{ /* NVDEC1 */ .base = 0x1bc0, .offset = 0x30, .limit = 0x34 },
};
static const struct host1x_info host1x07_info = {
.nb_channels = 63,
.nb_pts = 704,
.nb_mlocks = 32,
.nb_bases = 0,
.init = host1x07_init,
.sync_offset = 0x0,
.dma_mask = DMA_BIT_MASK(40),
.has_wide_gather = true,
.has_hypervisor = true,
.num_sid_entries = ARRAY_SIZE(tegra194_sid_table),
.sid_table = tegra194_sid_table,
.reserve_vblank_syncpts = false,
};
/*
* Tegra234 has two stream ID protection tables, one for setting stream IDs
* through the channel path via SETSTREAMID, and one for setting them via
* MMIO. We program each engine's data stream ID in the channel path table
* and firmware stream ID in the MMIO path table.
*/
static const struct host1x_sid_entry tegra234_sid_table[] = {
{ /* SE1 MMIO */ .base = 0x1650, .offset = 0x90, .limit = 0x90 },
{ /* SE1 ch */ .base = 0x1730, .offset = 0x90, .limit = 0x90 },
{ /* SE2 MMIO */ .base = 0x1658, .offset = 0x90, .limit = 0x90 },
{ /* SE2 ch */ .base = 0x1738, .offset = 0x90, .limit = 0x90 },
{ /* SE4 MMIO */ .base = 0x1660, .offset = 0x90, .limit = 0x90 },
{ /* SE4 ch */ .base = 0x1740, .offset = 0x90, .limit = 0x90 },
{ /* ISP MMIO */ .base = 0x1680, .offset = 0x800, .limit = 0x800 },
{ /* VIC MMIO */ .base = 0x1688, .offset = 0x34, .limit = 0x34 },
{ /* VIC ch */ .base = 0x17b8, .offset = 0x30, .limit = 0x30 },
{ /* NVENC MMIO */ .base = 0x1690, .offset = 0x34, .limit = 0x34 },
{ /* NVENC ch */ .base = 0x17c0, .offset = 0x30, .limit = 0x30 },
{ /* NVDEC MMIO */ .base = 0x1698, .offset = 0x34, .limit = 0x34 },
{ /* NVDEC ch */ .base = 0x17c8, .offset = 0x30, .limit = 0x30 },
{ /* NVJPG MMIO */ .base = 0x16a0, .offset = 0x34, .limit = 0x34 },
{ /* NVJPG ch */ .base = 0x17d0, .offset = 0x30, .limit = 0x30 },
{ /* TSEC MMIO */ .base = 0x16a8, .offset = 0x30, .limit = 0x34 },
{ /* NVJPG1 MMIO */ .base = 0x16b0, .offset = 0x34, .limit = 0x34 },
{ /* NVJPG1 ch */ .base = 0x17a8, .offset = 0x30, .limit = 0x30 },
{ /* VI MMIO */ .base = 0x16b8, .offset = 0x800, .limit = 0x800 },
{ /* VI_THI MMIO */ .base = 0x16c0, .offset = 0x30, .limit = 0x34 },
{ /* ISP_THI MMIO */ .base = 0x16c8, .offset = 0x30, .limit = 0x34 },
{ /* NVDLA MMIO */ .base = 0x16d8, .offset = 0x30, .limit = 0x34 },
{ /* NVDLA ch */ .base = 0x17e0, .offset = 0x30, .limit = 0x34 },
{ /* NVDLA1 MMIO */ .base = 0x16e0, .offset = 0x30, .limit = 0x34 },
{ /* NVDLA1 ch */ .base = 0x17e8, .offset = 0x30, .limit = 0x34 },
{ /* OFA MMIO */ .base = 0x16e8, .offset = 0x34, .limit = 0x34 },
{ /* OFA ch */ .base = 0x1768, .offset = 0x30, .limit = 0x30 },
{ /* VI2 MMIO */ .base = 0x16f0, .offset = 0x800, .limit = 0x800 },
{ /* VI2_THI MMIO */ .base = 0x16f8, .offset = 0x30, .limit = 0x34 },
};
static const struct host1x_info host1x08_info = {
.nb_channels = 63,
.nb_pts = 1024,
.nb_mlocks = 24,
.nb_bases = 0,
.init = host1x08_init,
.sync_offset = 0x0,
.dma_mask = DMA_BIT_MASK(40),
.has_wide_gather = true,
.has_hypervisor = true,
.has_common = true,
.num_sid_entries = ARRAY_SIZE(tegra234_sid_table),
.sid_table = tegra234_sid_table,
.streamid_vm_table = { 0x1004, 128 },
.classid_vm_table = { 0x1404, 25 },
.mmio_vm_table = { 0x1504, 25 },
.reserve_vblank_syncpts = false,
};
static const struct of_device_id host1x_of_match[] = {
{ .compatible = "nvidia,tegra234-host1x", .data = &host1x08_info, },
{ .compatible = "nvidia,tegra194-host1x", .data = &host1x07_info, },
{ .compatible = "nvidia,tegra186-host1x", .data = &host1x06_info, },
{ .compatible = "nvidia,tegra210-host1x", .data = &host1x05_info, },
{ .compatible = "nvidia,tegra124-host1x", .data = &host1x04_info, },
{ .compatible = "nvidia,tegra114-host1x", .data = &host1x02_info, },
{ .compatible = "nvidia,tegra30-host1x", .data = &host1x01_info, },
{ .compatible = "nvidia,tegra20-host1x", .data = &host1x01_info, },
{ },
};
MODULE_DEVICE_TABLE(of, host1x_of_match);
static void host1x_setup_virtualization_tables(struct host1x *host)
{
const struct host1x_info *info = host->info;
unsigned int i;
if (!info->has_hypervisor)
return;
for (i = 0; i < info->num_sid_entries; i++) {
const struct host1x_sid_entry *entry = &info->sid_table[i];
host1x_hypervisor_writel(host, entry->offset, entry->base);
host1x_hypervisor_writel(host, entry->limit, entry->base + 4);
}
for (i = 0; i < info->streamid_vm_table.count; i++) {
/* Allow access to all stream IDs to all VMs. */
host1x_hypervisor_writel(host, 0xff, info->streamid_vm_table.base + 4 * i);
}
for (i = 0; i < info->classid_vm_table.count; i++) {
/* Allow access to all classes to all VMs. */
host1x_hypervisor_writel(host, 0xff, info->classid_vm_table.base + 4 * i);
}
for (i = 0; i < info->mmio_vm_table.count; i++) {
/* Use VM1 (that's us) as originator VMID for engine MMIO accesses. */
host1x_hypervisor_writel(host, 0x1, info->mmio_vm_table.base + 4 * i);
}
}
static bool host1x_wants_iommu(struct host1x *host1x)
{
/* Our IOMMU usage policy doesn't currently play well with GART */
if (of_machine_is_compatible("nvidia,tegra20"))
return false;
/*
* If we support addressing a maximum of 32 bits of physical memory
* and if the host1x firewall is enabled, there's no need to enable
* IOMMU support. This can happen for example on Tegra20, Tegra30
* and Tegra114.
*
* Tegra124 and later can address up to 34 bits of physical memory and
* many platforms come equipped with more than 2 GiB of system memory,
* which requires crossing the 4 GiB boundary. But there's a catch: on
* SoCs before Tegra186 (i.e. Tegra124 and Tegra210), the host1x can
* only address up to 32 bits of memory in GATHER opcodes, which means
* that command buffers need to either be in the first 2 GiB of system
* memory (which could quickly lead to memory exhaustion), or command
* buffers need to be treated differently from other buffers (which is
* not possible with the current ABI).
*
* A third option is to use the IOMMU in these cases to make sure all
* buffers will be mapped into a 32-bit IOVA space that host1x can
* address. This allows all of the system memory to be used and works
* within the limitations of the host1x on these SoCs.
*
* In summary, default to enable IOMMU on Tegra124 and later. For any
* of the earlier SoCs, only use the IOMMU for additional safety when
* the host1x firewall is disabled.
*/
if (host1x->info->dma_mask <= DMA_BIT_MASK(32)) {
if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
return false;
}
return true;
}
static struct iommu_domain *host1x_iommu_attach(struct host1x *host)
{
struct iommu_domain *domain = iommu_get_domain_for_dev(host->dev);
int err;
#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
if (host->dev->archdata.mapping) {
struct dma_iommu_mapping *mapping =
to_dma_iommu_mapping(host->dev);
arm_iommu_detach_device(host->dev);
arm_iommu_release_mapping(mapping);
domain = iommu_get_domain_for_dev(host->dev);
}
#endif
/*
* We may not always want to enable IOMMU support (for example if the
* host1x firewall is already enabled and we don't support addressing
* more than 32 bits of physical memory), so check for that first.
*
* Similarly, if host1x is already attached to an IOMMU (via the DMA
* API), don't try to attach again.
*/
if (!host1x_wants_iommu(host) || domain)
return domain;
host->group = iommu_group_get(host->dev);
if (host->group) {
struct iommu_domain_geometry *geometry;
dma_addr_t start, end;
unsigned long order;
err = iova_cache_get();
if (err < 0)
goto put_group;
host->domain = iommu_paging_domain_alloc(host->dev);
if (IS_ERR(host->domain)) {
err = PTR_ERR(host->domain);
host->domain = NULL;
goto put_cache;
}
err = iommu_attach_group(host->domain, host->group);
if (err) {
if (err == -ENODEV)
err = 0;
goto free_domain;
}
geometry = &host->domain->geometry;
start = geometry->aperture_start & host->info->dma_mask;
end = geometry->aperture_end & host->info->dma_mask;
order = __ffs(host->domain->pgsize_bitmap);
init_iova_domain(&host->iova, 1UL << order, start >> order);
host->iova_end = end;
domain = host->domain;
}
return domain;
free_domain:
iommu_domain_free(host->domain);
host->domain = NULL;
put_cache:
iova_cache_put();
put_group:
iommu_group_put(host->group);
host->group = NULL;
return ERR_PTR(err);
}
static int host1x_iommu_init(struct host1x *host)
{
u64 mask = host->info->dma_mask;
struct iommu_domain *domain;
int err;
domain = host1x_iommu_attach(host);
if (IS_ERR(domain)) {
err = PTR_ERR(domain);
dev_err(host->dev, "failed to attach to IOMMU: %d\n", err);
return err;
}
/*
* If we're not behind an IOMMU make sure we don't get push buffers
* that are allocated outside of the range addressable by the GATHER
* opcode.
*
* Newer generations of Tegra (Tegra186 and later) support a wide
* variant of the GATHER opcode that allows addressing more bits.
*/
if (!domain && !host->info->has_wide_gather)
mask = DMA_BIT_MASK(32);
err = dma_coerce_mask_and_coherent(host->dev, mask);
if (err < 0) {
dev_err(host->dev, "failed to set DMA mask: %d\n", err);
return err;
}
return 0;
}
static void host1x_iommu_exit(struct host1x *host)
{
if (host->domain) {
put_iova_domain(&host->iova);
iommu_detach_group(host->domain, host->group);
iommu_domain_free(host->domain);
host->domain = NULL;
iova_cache_put();
iommu_group_put(host->group);
host->group = NULL;
}
}
static int host1x_get_resets(struct host1x *host)
{
int err;
host->resets[0].id = "mc";
host->resets[1].id = "host1x";
host->nresets = ARRAY_SIZE(host->resets);
err = devm_reset_control_bulk_get_optional_exclusive_released(
host->dev, host->nresets, host->resets);
if (err) {
dev_err(host->dev, "failed to get reset: %d\n", err);
return err;
}
return 0;
}
static int host1x_probe(struct platform_device *pdev)
{
struct host1x *host;
int err, i;
host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
if (!host)
return -ENOMEM;
host->info = of_device_get_match_data(&pdev->dev);
if (host->info->has_hypervisor) {
host->regs = devm_platform_ioremap_resource_byname(pdev, "vm");
if (IS_ERR(host->regs))
return PTR_ERR(host->regs);
host->hv_regs = devm_platform_ioremap_resource_byname(pdev, "hypervisor");
if (IS_ERR(host->hv_regs))
return PTR_ERR(host->hv_regs);
if (host->info->has_common) {
host->common_regs = devm_platform_ioremap_resource_byname(pdev, "common");
if (IS_ERR(host->common_regs))
return PTR_ERR(host->common_regs);
}
} else {
host->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->regs))
return PTR_ERR(host->regs);
}
for (i = 0; i < ARRAY_SIZE(host->syncpt_irqs); i++) {
char irq_name[] = "syncptX";
sprintf(irq_name, "syncpt%d", i);
err = platform_get_irq_byname_optional(pdev, irq_name);
if (err == -ENXIO)
break;
if (err < 0)
return err;
host->syncpt_irqs[i] = err;
}
host->num_syncpt_irqs = i;
/* Device tree without irq names */
if (i == 0) {
host->syncpt_irqs[0] = platform_get_irq(pdev, 0);
if (host->syncpt_irqs[0] < 0)
return host->syncpt_irqs[0];
host->num_syncpt_irqs = 1;
}
mutex_init(&host->devices_lock);
INIT_LIST_HEAD(&host->devices);
INIT_LIST_HEAD(&host->list);
host->dev = &pdev->dev;
/* set common host1x device data */
platform_set_drvdata(pdev, host);
host->dev->dma_parms = &host->dma_parms;
dma_set_max_seg_size(host->dev, UINT_MAX);
if (host->info->init) {
err = host->info->init(host);
if (err)
return err;
}
host->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(host->clk)) {
err = PTR_ERR(host->clk);
if (err != -EPROBE_DEFER)
dev_err(&pdev->dev, "failed to get clock: %d\n", err);
return err;
}
err = host1x_get_resets(host);
if (err)
return err;
host1x_bo_cache_init(&host->cache);
err = host1x_iommu_init(host);
if (err < 0) {
dev_err(&pdev->dev, "failed to setup IOMMU: %d\n", err);
goto destroy_cache;
}
err = host1x_channel_list_init(&host->channel_list,
host->info->nb_channels);
if (err) {
dev_err(&pdev->dev, "failed to initialize channel list\n");
goto iommu_exit;
}
err = host1x_memory_context_list_init(host);
if (err) {
dev_err(&pdev->dev, "failed to initialize context list\n");
goto free_channels;
}
err = host1x_syncpt_init(host);
if (err) {
dev_err(&pdev->dev, "failed to initialize syncpts\n");
goto free_contexts;
}
mutex_init(&host->intr_mutex);
pm_runtime_enable(&pdev->dev);
err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
if (err)
goto pm_disable;
/* the driver's code isn't ready yet for the dynamic RPM */
err = pm_runtime_resume_and_get(&pdev->dev);
if (err)
goto pm_disable;
err = host1x_intr_init(host);
if (err) {
dev_err(&pdev->dev, "failed to initialize interrupts\n");
goto pm_put;
}
host1x_debug_init(host);
err = host1x_register(host);
if (err < 0)
goto deinit_debugfs;
err = devm_of_platform_populate(&pdev->dev);
if (err < 0)
goto unregister;
return 0;
unregister:
host1x_unregister(host);
deinit_debugfs:
host1x_debug_deinit(host);
host1x_intr_deinit(host);
pm_put:
pm_runtime_put_sync_suspend(&pdev->dev);
pm_disable:
pm_runtime_disable(&pdev->dev);
host1x_syncpt_deinit(host);
free_contexts:
host1x_memory_context_list_free(&host->context_list);
free_channels:
host1x_channel_list_free(&host->channel_list);
iommu_exit:
host1x_iommu_exit(host);
destroy_cache:
host1x_bo_cache_destroy(&host->cache);
return err;
}
static void host1x_remove(struct platform_device *pdev)
{
struct host1x *host = platform_get_drvdata(pdev);
host1x_unregister(host);
host1x_debug_deinit(host);
pm_runtime_force_suspend(&pdev->dev);
host1x_intr_deinit(host);
host1x_syncpt_deinit(host);
host1x_memory_context_list_free(&host->context_list);
host1x_channel_list_free(&host->channel_list);
host1x_iommu_exit(host);
host1x_bo_cache_destroy(&host->cache);
}
static int __maybe_unused host1x_runtime_suspend(struct device *dev)
{
struct host1x *host = dev_get_drvdata(dev);
int err;
host1x_channel_stop_all(host);
host1x_intr_stop(host);
host1x_syncpt_save(host);
if (!host->info->skip_reset_assert) {
err = reset_control_bulk_assert(host->nresets, host->resets);
if (err) {
dev_err(dev, "failed to assert reset: %d\n", err);
goto resume_host1x;
}
usleep_range(1000, 2000);
}
clk_disable_unprepare(host->clk);
reset_control_bulk_release(host->nresets, host->resets);
return 0;
resume_host1x:
host1x_setup_virtualization_tables(host);
host1x_syncpt_restore(host);
host1x_intr_start(host);
return err;
}
static int __maybe_unused host1x_runtime_resume(struct device *dev)
{
struct host1x *host = dev_get_drvdata(dev);
int err;
err = reset_control_bulk_acquire(host->nresets, host->resets);
if (err) {
dev_err(dev, "failed to acquire reset: %d\n", err);
return err;
}
err = clk_prepare_enable(host->clk);
if (err) {
dev_err(dev, "failed to enable clock: %d\n", err);
goto release_reset;
}
err = reset_control_bulk_deassert(host->nresets, host->resets);
if (err < 0) {
dev_err(dev, "failed to deassert reset: %d\n", err);
goto disable_clk;
}
host1x_setup_virtualization_tables(host);
host1x_syncpt_restore(host);
host1x_intr_start(host);
return 0;
disable_clk:
clk_disable_unprepare(host->clk);
release_reset:
reset_control_bulk_release(host->nresets, host->resets);
return err;
}
static const struct dev_pm_ops host1x_pm_ops = {
SET_RUNTIME_PM_OPS(host1x_runtime_suspend, host1x_runtime_resume,
NULL)
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
};
static struct platform_driver tegra_host1x_driver = {
.driver = {
.name = "tegra-host1x",
.of_match_table = host1x_of_match,
.pm = &host1x_pm_ops,
},
.probe = host1x_probe,
.remove = host1x_remove,
};
static struct platform_driver * const drivers[] = {
&tegra_host1x_driver,
&tegra_mipi_driver,
};
static int __init tegra_host1x_init(void)
{
int err;
err = bus_register(&host1x_bus_type);
if (err < 0)
return err;
err = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
if (err < 0)
bus_unregister(&host1x_bus_type);
return err;
}
module_init(tegra_host1x_init);
static void __exit tegra_host1x_exit(void)
{
platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
bus_unregister(&host1x_bus_type);
}
module_exit(tegra_host1x_exit);
/**
* host1x_get_dma_mask() - query the supported DMA mask for host1x
* @host1x: host1x instance
*
* Note that this returns the supported DMA mask for host1x, which can be
* different from the applicable DMA mask under certain circumstances.
*/
u64 host1x_get_dma_mask(struct host1x *host1x)
{
return host1x->info->dma_mask;
}
EXPORT_SYMBOL(host1x_get_dma_mask);
MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
MODULE_AUTHOR("Terje Bergstrom <tbergstrom@nvidia.com>");
MODULE_DESCRIPTION("Host1x driver for Tegra products");
MODULE_LICENSE("GPL");