drm/virtio: Improve DMA API usage for shmem BOs
DRM API requires the DRM's driver to be backed with the device that can be used for generic DMA operations. The VirtIO-GPU device can't perform DMA operations if it uses PCI transport because PCI device driver creates a virtual VirtIO-GPU device that isn't associated with the PCI. Use PCI's GPU device for the DRM's device instead of the VirtIO-GPU device and drop DMA-related hacks from the VirtIO-GPU driver. Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com> Link: http://patchwork.freedesktop.org/patch/msgid/20220630200726.1884320-8-dmitry.osipenko@collabora.com Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
This commit is contained in:
parent
e7fef09233
commit
b5c9ed70d1
5 changed files with 32 additions and 99 deletions
|
@ -46,12 +46,11 @@ static int virtio_gpu_modeset = -1;
|
||||||
MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
|
MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
|
||||||
module_param_named(modeset, virtio_gpu_modeset, int, 0400);
|
module_param_named(modeset, virtio_gpu_modeset, int, 0400);
|
||||||
|
|
||||||
static int virtio_gpu_pci_quirk(struct drm_device *dev, struct virtio_device *vdev)
|
static int virtio_gpu_pci_quirk(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
struct pci_dev *pdev = to_pci_dev(vdev->dev.parent);
|
struct pci_dev *pdev = to_pci_dev(dev->dev);
|
||||||
const char *pname = dev_name(&pdev->dev);
|
const char *pname = dev_name(&pdev->dev);
|
||||||
bool vga = (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA;
|
bool vga = (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA;
|
||||||
char unique[20];
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
DRM_INFO("pci: %s detected at %s\n",
|
DRM_INFO("pci: %s detected at %s\n",
|
||||||
|
@ -63,39 +62,7 @@ static int virtio_gpu_pci_quirk(struct drm_device *dev, struct virtio_device *vd
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
return 0;
|
||||||
* Normally the drm_dev_set_unique() call is done by core DRM.
|
|
||||||
* The following comment covers, why virtio cannot rely on it.
|
|
||||||
*
|
|
||||||
* Unlike the other virtual GPU drivers, virtio abstracts the
|
|
||||||
* underlying bus type by using struct virtio_device.
|
|
||||||
*
|
|
||||||
* Hence the dev_is_pci() check, used in core DRM, will fail
|
|
||||||
* and the unique returned will be the virtio_device "virtio0",
|
|
||||||
* while a "pci:..." one is required.
|
|
||||||
*
|
|
||||||
* A few other ideas were considered:
|
|
||||||
* - Extend the dev_is_pci() check [in drm_set_busid] to
|
|
||||||
* consider virtio.
|
|
||||||
* Seems like a bigger hack than what we have already.
|
|
||||||
*
|
|
||||||
* - Point drm_device::dev to the parent of the virtio_device
|
|
||||||
* Semantic changes:
|
|
||||||
* * Using the wrong device for i2c, framebuffer_alloc and
|
|
||||||
* prime import.
|
|
||||||
* Visual changes:
|
|
||||||
* * Helpers such as DRM_DEV_ERROR, dev_info, drm_printer,
|
|
||||||
* will print the wrong information.
|
|
||||||
*
|
|
||||||
* We could address the latter issues, by introducing
|
|
||||||
* drm_device::bus_dev, ... which would be used solely for this.
|
|
||||||
*
|
|
||||||
* So for the moment keep things as-is, with a bulky comment
|
|
||||||
* for the next person who feels like removing this
|
|
||||||
* drm_dev_set_unique() quirk.
|
|
||||||
*/
|
|
||||||
snprintf(unique, sizeof(unique), "pci:%s", pname);
|
|
||||||
return drm_dev_set_unique(dev, unique);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int virtio_gpu_probe(struct virtio_device *vdev)
|
static int virtio_gpu_probe(struct virtio_device *vdev)
|
||||||
|
@ -109,18 +76,24 @@ static int virtio_gpu_probe(struct virtio_device *vdev)
|
||||||
if (virtio_gpu_modeset == 0)
|
if (virtio_gpu_modeset == 0)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
dev = drm_dev_alloc(&driver, &vdev->dev);
|
/*
|
||||||
|
* The virtio-gpu device is a virtual device that doesn't have DMA
|
||||||
|
* ops assigned to it, nor DMA mask set and etc. Its parent device
|
||||||
|
* is actual GPU device we want to use it for the DRM's device in
|
||||||
|
* order to benefit from using generic DRM APIs.
|
||||||
|
*/
|
||||||
|
dev = drm_dev_alloc(&driver, vdev->dev.parent);
|
||||||
if (IS_ERR(dev))
|
if (IS_ERR(dev))
|
||||||
return PTR_ERR(dev);
|
return PTR_ERR(dev);
|
||||||
vdev->priv = dev;
|
vdev->priv = dev;
|
||||||
|
|
||||||
if (!strcmp(vdev->dev.parent->bus->name, "pci")) {
|
if (!strcmp(vdev->dev.parent->bus->name, "pci")) {
|
||||||
ret = virtio_gpu_pci_quirk(dev, vdev);
|
ret = virtio_gpu_pci_quirk(dev);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_free;
|
goto err_free;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = virtio_gpu_init(dev);
|
ret = virtio_gpu_init(vdev, dev);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_free;
|
goto err_free;
|
||||||
|
|
||||||
|
|
|
@ -101,8 +101,6 @@ struct virtio_gpu_object {
|
||||||
|
|
||||||
struct virtio_gpu_object_shmem {
|
struct virtio_gpu_object_shmem {
|
||||||
struct virtio_gpu_object base;
|
struct virtio_gpu_object base;
|
||||||
struct sg_table *pages;
|
|
||||||
uint32_t mapped;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct virtio_gpu_object_vram {
|
struct virtio_gpu_object_vram {
|
||||||
|
@ -215,7 +213,6 @@ struct virtio_gpu_drv_cap_cache {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct virtio_gpu_device {
|
struct virtio_gpu_device {
|
||||||
struct device *dev;
|
|
||||||
struct drm_device *ddev;
|
struct drm_device *ddev;
|
||||||
|
|
||||||
struct virtio_device *vdev;
|
struct virtio_device *vdev;
|
||||||
|
@ -283,7 +280,7 @@ extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
|
||||||
void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file);
|
void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file);
|
||||||
|
|
||||||
/* virtgpu_kms.c */
|
/* virtgpu_kms.c */
|
||||||
int virtio_gpu_init(struct drm_device *dev);
|
int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev);
|
||||||
void virtio_gpu_deinit(struct drm_device *dev);
|
void virtio_gpu_deinit(struct drm_device *dev);
|
||||||
void virtio_gpu_release(struct drm_device *dev);
|
void virtio_gpu_release(struct drm_device *dev);
|
||||||
int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file);
|
int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file);
|
||||||
|
|
|
@ -110,7 +110,7 @@ static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev,
|
||||||
vgdev->num_capsets = num_capsets;
|
vgdev->num_capsets = num_capsets;
|
||||||
}
|
}
|
||||||
|
|
||||||
int virtio_gpu_init(struct drm_device *dev)
|
int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev)
|
||||||
{
|
{
|
||||||
static vq_callback_t *callbacks[] = {
|
static vq_callback_t *callbacks[] = {
|
||||||
virtio_gpu_ctrl_ack, virtio_gpu_cursor_ack
|
virtio_gpu_ctrl_ack, virtio_gpu_cursor_ack
|
||||||
|
@ -123,7 +123,7 @@ int virtio_gpu_init(struct drm_device *dev)
|
||||||
u32 num_scanouts, num_capsets;
|
u32 num_scanouts, num_capsets;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (!virtio_has_feature(dev_to_virtio(dev->dev), VIRTIO_F_VERSION_1))
|
if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
vgdev = kzalloc(sizeof(struct virtio_gpu_device), GFP_KERNEL);
|
vgdev = kzalloc(sizeof(struct virtio_gpu_device), GFP_KERNEL);
|
||||||
|
@ -132,8 +132,7 @@ int virtio_gpu_init(struct drm_device *dev)
|
||||||
|
|
||||||
vgdev->ddev = dev;
|
vgdev->ddev = dev;
|
||||||
dev->dev_private = vgdev;
|
dev->dev_private = vgdev;
|
||||||
vgdev->vdev = dev_to_virtio(dev->dev);
|
vgdev->vdev = vdev;
|
||||||
vgdev->dev = dev->dev;
|
|
||||||
|
|
||||||
spin_lock_init(&vgdev->display_info_lock);
|
spin_lock_init(&vgdev->display_info_lock);
|
||||||
spin_lock_init(&vgdev->resource_export_lock);
|
spin_lock_init(&vgdev->resource_export_lock);
|
||||||
|
|
|
@ -67,21 +67,6 @@ void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
|
||||||
|
|
||||||
virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
|
virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
|
||||||
if (virtio_gpu_is_shmem(bo)) {
|
if (virtio_gpu_is_shmem(bo)) {
|
||||||
struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
|
|
||||||
|
|
||||||
if (shmem->pages) {
|
|
||||||
if (shmem->mapped) {
|
|
||||||
dma_unmap_sgtable(vgdev->vdev->dev.parent,
|
|
||||||
shmem->pages, DMA_TO_DEVICE, 0);
|
|
||||||
shmem->mapped = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
sg_free_table(shmem->pages);
|
|
||||||
kfree(shmem->pages);
|
|
||||||
shmem->pages = NULL;
|
|
||||||
drm_gem_shmem_unpin(&bo->base);
|
|
||||||
}
|
|
||||||
|
|
||||||
drm_gem_shmem_free(&bo->base);
|
drm_gem_shmem_free(&bo->base);
|
||||||
} else if (virtio_gpu_is_vram(bo)) {
|
} else if (virtio_gpu_is_vram(bo)) {
|
||||||
struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
|
struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
|
||||||
|
@ -153,36 +138,18 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
|
||||||
unsigned int *nents)
|
unsigned int *nents)
|
||||||
{
|
{
|
||||||
bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
|
bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
|
||||||
struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
|
|
||||||
struct scatterlist *sg;
|
struct scatterlist *sg;
|
||||||
int si, ret;
|
struct sg_table *pages;
|
||||||
|
int si;
|
||||||
|
|
||||||
ret = drm_gem_shmem_pin(&bo->base);
|
pages = drm_gem_shmem_get_pages_sgt(&bo->base);
|
||||||
if (ret < 0)
|
if (IS_ERR(pages))
|
||||||
return -EINVAL;
|
return PTR_ERR(pages);
|
||||||
|
|
||||||
/*
|
if (use_dma_api)
|
||||||
* virtio_gpu uses drm_gem_shmem_get_sg_table instead of
|
*nents = pages->nents;
|
||||||
* drm_gem_shmem_get_pages_sgt because virtio has it's own set of
|
else
|
||||||
* dma-ops. This is discouraged for other drivers, but should be fine
|
*nents = pages->orig_nents;
|
||||||
* since virtio_gpu doesn't support dma-buf import from other devices.
|
|
||||||
*/
|
|
||||||
shmem->pages = drm_gem_shmem_get_sg_table(&bo->base);
|
|
||||||
if (IS_ERR(shmem->pages)) {
|
|
||||||
drm_gem_shmem_unpin(&bo->base);
|
|
||||||
shmem->pages = NULL;
|
|
||||||
return PTR_ERR(shmem->pages);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (use_dma_api) {
|
|
||||||
ret = dma_map_sgtable(vgdev->vdev->dev.parent,
|
|
||||||
shmem->pages, DMA_TO_DEVICE, 0);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
*nents = shmem->mapped = shmem->pages->nents;
|
|
||||||
} else {
|
|
||||||
*nents = shmem->pages->orig_nents;
|
|
||||||
}
|
|
||||||
|
|
||||||
*ents = kvmalloc_array(*nents,
|
*ents = kvmalloc_array(*nents,
|
||||||
sizeof(struct virtio_gpu_mem_entry),
|
sizeof(struct virtio_gpu_mem_entry),
|
||||||
|
@ -193,13 +160,13 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (use_dma_api) {
|
if (use_dma_api) {
|
||||||
for_each_sgtable_dma_sg(shmem->pages, sg, si) {
|
for_each_sgtable_dma_sg(pages, sg, si) {
|
||||||
(*ents)[si].addr = cpu_to_le64(sg_dma_address(sg));
|
(*ents)[si].addr = cpu_to_le64(sg_dma_address(sg));
|
||||||
(*ents)[si].length = cpu_to_le32(sg_dma_len(sg));
|
(*ents)[si].length = cpu_to_le32(sg_dma_len(sg));
|
||||||
(*ents)[si].padding = 0;
|
(*ents)[si].padding = 0;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
for_each_sgtable_sg(shmem->pages, sg, si) {
|
for_each_sgtable_sg(pages, sg, si) {
|
||||||
(*ents)[si].addr = cpu_to_le64(sg_phys(sg));
|
(*ents)[si].addr = cpu_to_le64(sg_phys(sg));
|
||||||
(*ents)[si].length = cpu_to_le32(sg->length);
|
(*ents)[si].length = cpu_to_le32(sg->length);
|
||||||
(*ents)[si].padding = 0;
|
(*ents)[si].padding = 0;
|
||||||
|
|
|
@ -595,11 +595,10 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
|
||||||
struct virtio_gpu_transfer_to_host_2d *cmd_p;
|
struct virtio_gpu_transfer_to_host_2d *cmd_p;
|
||||||
struct virtio_gpu_vbuffer *vbuf;
|
struct virtio_gpu_vbuffer *vbuf;
|
||||||
bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
|
bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
|
||||||
struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
|
|
||||||
|
|
||||||
if (virtio_gpu_is_shmem(bo) && use_dma_api)
|
if (virtio_gpu_is_shmem(bo) && use_dma_api)
|
||||||
dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
|
dma_sync_sgtable_for_device(&vgdev->vdev->dev,
|
||||||
shmem->pages, DMA_TO_DEVICE);
|
bo->base.sgt, DMA_TO_DEVICE);
|
||||||
|
|
||||||
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
|
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
|
||||||
memset(cmd_p, 0, sizeof(*cmd_p));
|
memset(cmd_p, 0, sizeof(*cmd_p));
|
||||||
|
@ -1019,11 +1018,9 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
|
||||||
struct virtio_gpu_vbuffer *vbuf;
|
struct virtio_gpu_vbuffer *vbuf;
|
||||||
bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
|
bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
|
||||||
|
|
||||||
if (virtio_gpu_is_shmem(bo) && use_dma_api) {
|
if (virtio_gpu_is_shmem(bo) && use_dma_api)
|
||||||
struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
|
dma_sync_sgtable_for_device(&vgdev->vdev->dev,
|
||||||
dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
|
bo->base.sgt, DMA_TO_DEVICE);
|
||||||
shmem->pages, DMA_TO_DEVICE);
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
|
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
|
||||||
memset(cmd_p, 0, sizeof(*cmd_p));
|
memset(cmd_p, 0, sizeof(*cmd_p));
|
||||||
|
|
Loading…
Add table
Reference in a new issue