drm/virtio: Import prime buffers from other devices as guest blobs
By importing scanout buffers from other devices, we should be able to use the virtio-gpu driver in KMS only mode. Note that we attach dynamically and register a move_notify() callback so that we can let the VMM know of any location changes associated with the backing store of the imported object by sending detach_backing cmd. Cc: Gerd Hoffmann <kraxel@redhat.com> Cc: Dmitry Osipenko <dmitry.osipenko@collabora.com> Cc: Rob Clark <robdclark@gmail.com> Cc: Gurchetan Singh <gurchetansingh@chromium.org> Cc: Chia-I Wu <olvaffe@gmail.com> Signed-off-by: Vivek Kasireddy <vivek.kasireddy@intel.com> Tested-by: Dmitry Osipenko <dmitry.osipenko@collabora.com> Reviewed-by: Dmitry Osipenko <dmitry.osipenko@collabora.com> Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com> [dmitry.osipenko@collabora.com: added kref check to move_notify] Link: https://patchwork.freedesktop.org/patch/msgid/20241126031643.3490496-5-vivek.kasireddy@intel.com
This commit is contained in:
parent
2885e575ab
commit
ca77f27a26
1 changed files with 62 additions and 3 deletions
|
@ -189,14 +189,19 @@ static void virtgpu_dma_buf_free_obj(struct drm_gem_object *obj)
|
||||||
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
|
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
|
||||||
struct virtio_gpu_device *vgdev = obj->dev->dev_private;
|
struct virtio_gpu_device *vgdev = obj->dev->dev_private;
|
||||||
struct dma_buf_attachment *attach = obj->import_attach;
|
struct dma_buf_attachment *attach = obj->import_attach;
|
||||||
|
struct dma_resv *resv = attach->dmabuf->resv;
|
||||||
|
|
||||||
if (attach) {
|
if (attach) {
|
||||||
|
dma_resv_lock(resv, NULL);
|
||||||
|
|
||||||
virtio_gpu_detach_object_fenced(bo);
|
virtio_gpu_detach_object_fenced(bo);
|
||||||
|
|
||||||
if (bo->sgt)
|
if (bo->sgt)
|
||||||
dma_buf_unmap_attachment_unlocked(attach, bo->sgt,
|
dma_buf_unmap_attachment(attach, bo->sgt,
|
||||||
DMA_BIDIRECTIONAL);
|
DMA_BIDIRECTIONAL);
|
||||||
|
|
||||||
|
dma_resv_unlock(resv);
|
||||||
|
|
||||||
dma_buf_detach(attach->dmabuf, attach);
|
dma_buf_detach(attach->dmabuf, attach);
|
||||||
dma_buf_put(attach->dmabuf);
|
dma_buf_put(attach->dmabuf);
|
||||||
}
|
}
|
||||||
|
@ -259,10 +264,39 @@ err_pin:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static const struct drm_gem_object_funcs virtgpu_gem_dma_buf_funcs = {
|
||||||
|
.free = virtgpu_dma_buf_free_obj,
|
||||||
|
};
|
||||||
|
|
||||||
|
static void virtgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
|
||||||
|
{
|
||||||
|
struct drm_gem_object *obj = attach->importer_priv;
|
||||||
|
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
|
||||||
|
|
||||||
|
if (bo->created && kref_read(&obj->refcount)) {
|
||||||
|
virtio_gpu_detach_object_fenced(bo);
|
||||||
|
|
||||||
|
if (bo->sgt)
|
||||||
|
dma_buf_unmap_attachment(attach, bo->sgt,
|
||||||
|
DMA_BIDIRECTIONAL);
|
||||||
|
|
||||||
|
bo->sgt = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct dma_buf_attach_ops virtgpu_dma_buf_attach_ops = {
|
||||||
|
.allow_peer2peer = true,
|
||||||
|
.move_notify = virtgpu_dma_buf_move_notify
|
||||||
|
};
|
||||||
|
|
||||||
struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
|
struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
|
||||||
struct dma_buf *buf)
|
struct dma_buf *buf)
|
||||||
{
|
{
|
||||||
|
struct virtio_gpu_device *vgdev = dev->dev_private;
|
||||||
|
struct dma_buf_attachment *attach;
|
||||||
|
struct virtio_gpu_object *bo;
|
||||||
struct drm_gem_object *obj;
|
struct drm_gem_object *obj;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (buf->ops == &virtgpu_dmabuf_ops.ops) {
|
if (buf->ops == &virtgpu_dmabuf_ops.ops) {
|
||||||
obj = buf->priv;
|
obj = buf->priv;
|
||||||
|
@ -276,7 +310,32 @@ struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!vgdev->has_resource_blob || vgdev->has_virgl_3d)
|
||||||
return drm_gem_prime_import(dev, buf);
|
return drm_gem_prime_import(dev, buf);
|
||||||
|
|
||||||
|
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
|
||||||
|
if (!bo)
|
||||||
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
|
obj = &bo->base.base;
|
||||||
|
obj->funcs = &virtgpu_gem_dma_buf_funcs;
|
||||||
|
drm_gem_private_object_init(dev, obj, buf->size);
|
||||||
|
|
||||||
|
attach = dma_buf_dynamic_attach(buf, dev->dev,
|
||||||
|
&virtgpu_dma_buf_attach_ops, obj);
|
||||||
|
if (IS_ERR(attach)) {
|
||||||
|
kfree(bo);
|
||||||
|
return ERR_CAST(attach);
|
||||||
|
}
|
||||||
|
|
||||||
|
obj->import_attach = attach;
|
||||||
|
get_dma_buf(buf);
|
||||||
|
|
||||||
|
ret = virtgpu_dma_buf_init_obj(dev, bo, attach);
|
||||||
|
if (ret < 0)
|
||||||
|
return ERR_PTR(ret);
|
||||||
|
|
||||||
|
return obj;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
|
struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
|
||||||
|
|
Loading…
Add table
Reference in a new issue