Turing introduced a new simplified page kind scheme, reducing the number of possible page kinds from 256 to 16. It also is the first NVIDIA GPU in which the highest possible page kind value is not reserved as an "invalid" page kind. To address this, the invalid page kind is made an explicit property of the MMU HAL, and a new table of page kinds is added to the tu102 MMU HAL. One hardware change not addressed here is that 0x00 is technically no longer a supported page kind, and pitch surfaces are instead intended to share the block-linear generic page kind 0x06. However, because that will be a rather invasive change to nouveau and 0x00 still works fine in practice on Turing hardware, addressing this new behavior is deferred. Signed-off-by: James Jones <jajones@nvidia.com> Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
66 lines
1.7 KiB
C
66 lines
1.7 KiB
C
/* SPDX-License-Identifier: MIT */
|
|
#ifndef __NVKM_MMU_PRIV_H__
|
|
#define __NVKM_MMU_PRIV_H__
|
|
#define nvkm_mmu(p) container_of((p), struct nvkm_mmu, subdev)
|
|
#include <subdev/mmu.h>
|
|
|
|
void nvkm_mmu_ctor(const struct nvkm_mmu_func *, struct nvkm_device *,
|
|
int index, struct nvkm_mmu *);
|
|
int nvkm_mmu_new_(const struct nvkm_mmu_func *, struct nvkm_device *,
|
|
int index, struct nvkm_mmu **);
|
|
|
|
struct nvkm_mmu_func {
|
|
void (*init)(struct nvkm_mmu *);
|
|
|
|
u8 dma_bits;
|
|
|
|
struct {
|
|
struct nvkm_sclass user;
|
|
} mmu;
|
|
|
|
struct {
|
|
struct nvkm_sclass user;
|
|
int (*vram)(struct nvkm_mmu *, int type, u8 page, u64 size,
|
|
void *argv, u32 argc, struct nvkm_memory **);
|
|
int (*umap)(struct nvkm_mmu *, struct nvkm_memory *, void *argv,
|
|
u32 argc, u64 *addr, u64 *size, struct nvkm_vma **);
|
|
} mem;
|
|
|
|
struct {
|
|
struct nvkm_sclass user;
|
|
int (*ctor)(struct nvkm_mmu *, bool managed, u64 addr, u64 size,
|
|
void *argv, u32 argc, struct lock_class_key *,
|
|
const char *name, struct nvkm_vmm **);
|
|
bool global;
|
|
u32 pd_offset;
|
|
} vmm;
|
|
|
|
const u8 *(*kind)(struct nvkm_mmu *, int *count, u8 *invalid);
|
|
bool kind_sys;
|
|
};
|
|
|
|
extern const struct nvkm_mmu_func nv04_mmu;
|
|
|
|
const u8 *nv50_mmu_kind(struct nvkm_mmu *, int *count, u8 *invalid);
|
|
|
|
const u8 *gf100_mmu_kind(struct nvkm_mmu *, int *count, u8 *invalid);
|
|
|
|
const u8 *gm200_mmu_kind(struct nvkm_mmu *, int *, u8 *);
|
|
|
|
struct nvkm_mmu_pt {
|
|
union {
|
|
struct nvkm_mmu_ptc *ptc;
|
|
struct nvkm_mmu_ptp *ptp;
|
|
};
|
|
struct nvkm_memory *memory;
|
|
bool sub;
|
|
u16 base;
|
|
u64 addr;
|
|
struct list_head head;
|
|
};
|
|
|
|
void nvkm_mmu_ptc_dump(struct nvkm_mmu *);
|
|
struct nvkm_mmu_pt *
|
|
nvkm_mmu_ptc_get(struct nvkm_mmu *, u32 size, u32 align, bool zero);
|
|
void nvkm_mmu_ptc_put(struct nvkm_mmu *, bool force, struct nvkm_mmu_pt **);
|
|
#endif
|