diff options
Diffstat (limited to 'drivers/gpu/drm/virtio')
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_debugfs.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_drv.c | 44 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_drv.h | 36 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_fence.c | 30 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_ioctl.c | 195 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_kms.c | 26 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_plane.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_prime.c | 32 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_vq.c | 27 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_vram.c | 61 |
10 files changed, 413 insertions, 42 deletions
diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c index c2b20e0ee030..b6954e2f75e6 100644 --- a/drivers/gpu/drm/virtio/virtgpu_debugfs.c +++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c @@ -52,6 +52,7 @@ static int virtio_gpu_features(struct seq_file *m, void *data) vgdev->has_resource_assign_uuid); virtio_gpu_add_bool(m, "blob resources", vgdev->has_resource_blob); + virtio_gpu_add_bool(m, "context init", vgdev->has_context_init); virtio_gpu_add_int(m, "cap sets", vgdev->num_capsets); virtio_gpu_add_int(m, "scanouts", vgdev->num_scanouts); if (vgdev->host_visible_region.len) { diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index ed85a7863256..749db18dcfa2 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -29,6 +29,8 @@ #include <linux/module.h> #include <linux/console.h> #include <linux/pci.h> +#include <linux/poll.h> +#include <linux/wait.h> #include <drm/drm.h> #include <drm/drm_aperture.h> @@ -155,6 +157,35 @@ static void virtio_gpu_config_changed(struct virtio_device *vdev) schedule_work(&vgdev->config_changed_work); } +static __poll_t virtio_gpu_poll(struct file *filp, + struct poll_table_struct *wait) +{ + struct drm_file *drm_file = filp->private_data; + struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv; + struct drm_device *dev = drm_file->minor->dev; + struct drm_pending_event *e = NULL; + __poll_t mask = 0; + + if (!vfpriv->ring_idx_mask) + return drm_poll(filp, wait); + + poll_wait(filp, &drm_file->event_wait, wait); + + if (!list_empty(&drm_file->event_list)) { + spin_lock_irq(&dev->event_lock); + e = list_first_entry(&drm_file->event_list, + struct drm_pending_event, link); + drm_file->event_space += e->event->length; + list_del(&e->link); + spin_unlock_irq(&dev->event_lock); + + kfree(e); + mask |= EPOLLIN | EPOLLRDNORM; + } + + return mask; +} + static struct virtio_device_id id_table[] = { { VIRTIO_ID_GPU, VIRTIO_DEV_ANY_ID }, { 0 }, @@ -172,6 +203,7 @@ static unsigned int features[] = { VIRTIO_GPU_F_EDID, VIRTIO_GPU_F_RESOURCE_UUID, VIRTIO_GPU_F_RESOURCE_BLOB, + VIRTIO_GPU_F_CONTEXT_INIT, }; static struct virtio_driver virtio_gpu_driver = { .feature_table = features, @@ -193,7 +225,17 @@ MODULE_AUTHOR("Dave Airlie <airlied@redhat.com>"); MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>"); MODULE_AUTHOR("Alon Levy"); -DEFINE_DRM_GEM_FOPS(virtio_gpu_driver_fops); +static const struct file_operations virtio_gpu_driver_fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .unlocked_ioctl = drm_ioctl, + .compat_ioctl = drm_compat_ioctl, + .poll = virtio_gpu_poll, + .read = drm_read, + .llseek = noop_llseek, + .mmap = drm_gem_mmap +}; static const struct drm_driver driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC, diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index d4e610a44e12..e0265fe74aa5 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -26,6 +26,7 @@ #ifndef VIRTIO_DRV_H #define VIRTIO_DRV_H +#include <linux/dma-direction.h> #include <linux/virtio.h> #include <linux/virtio_ids.h> #include <linux/virtio_config.h> @@ -54,6 +55,9 @@ #define STATE_OK 1 #define STATE_ERR 2 +#define MAX_CAPSET_ID 63 +#define MAX_RINGS 64 + struct virtio_gpu_object_params { unsigned long size; bool dumb; @@ -134,9 +138,18 @@ struct virtio_gpu_fence_driver { spinlock_t lock; }; +#define VIRTGPU_EVENT_FENCE_SIGNALED_INTERNAL 0x10000000 +struct virtio_gpu_fence_event { + struct drm_pending_event base; + struct drm_event event; +}; + struct virtio_gpu_fence { struct dma_fence f; + uint32_t ring_idx; uint64_t fence_id; + bool emit_fence_info; + struct virtio_gpu_fence_event *e; struct virtio_gpu_fence_driver *drv; struct list_head node; }; @@ -233,6 +246,7 @@ struct virtio_gpu_device { bool has_resource_assign_uuid; bool has_resource_blob; bool has_host_visible; + bool has_context_init; struct virtio_shm_region host_visible_region; struct drm_mm host_visible_mm; @@ -244,6 +258,7 @@ struct virtio_gpu_device { struct virtio_gpu_drv_capset *capsets; uint32_t num_capsets; + uint64_t capset_id_mask; struct list_head cap_cache; /* protects uuid state when exporting */ @@ -254,12 +269,16 @@ struct virtio_gpu_device { struct virtio_gpu_fpriv { uint32_t ctx_id; + uint32_t context_init; bool context_created; + uint32_t num_rings; + uint64_t base_fence_ctx; + uint64_t ring_idx_mask; struct mutex context_lock; }; /* virtgpu_ioctl.c */ -#define DRM_VIRTIO_NUM_IOCTLS 11 +#define DRM_VIRTIO_NUM_IOCTLS 12 extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS]; void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file); @@ -337,7 +356,8 @@ int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev, struct virtio_gpu_drv_cap_cache **cache_p); int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev); void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id, - uint32_t nlen, const char *name); + uint32_t context_init, uint32_t nlen, + const char *name); void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev, uint32_t id); void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev, @@ -417,8 +437,9 @@ struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev, int index); /* virtgpu_fence.c */ -struct virtio_gpu_fence *virtio_gpu_fence_alloc( - struct virtio_gpu_device *vgdev); +struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev, + uint64_t base_fence_ctx, + uint32_t ring_idx); void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev, struct virtio_gpu_ctrl_hdr *cmd_hdr, struct virtio_gpu_fence *fence); @@ -459,4 +480,11 @@ bool virtio_gpu_is_vram(struct virtio_gpu_object *bo); int virtio_gpu_vram_create(struct virtio_gpu_device *vgdev, struct virtio_gpu_object_params *params, struct virtio_gpu_object **bo_ptr); +struct sg_table *virtio_gpu_vram_map_dma_buf(struct virtio_gpu_object *bo, + struct device *dev, + enum dma_data_direction dir); +void virtio_gpu_vram_unmap_dma_buf(struct device *dev, + struct sg_table *sgt, + enum dma_data_direction dir); + #endif diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c index d28e25e8409b..f28357dbde35 100644 --- a/drivers/gpu/drm/virtio/virtgpu_fence.c +++ b/drivers/gpu/drm/virtio/virtgpu_fence.c @@ -71,22 +71,29 @@ static const struct dma_fence_ops virtio_gpu_fence_ops = { .timeline_value_str = virtio_gpu_timeline_value_str, }; -struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev) +struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev, + uint64_t base_fence_ctx, + uint32_t ring_idx) { + uint64_t fence_context = base_fence_ctx + ring_idx; struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv; struct virtio_gpu_fence *fence = kzalloc(sizeof(struct virtio_gpu_fence), GFP_KERNEL); + if (!fence) return fence; fence->drv = drv; + fence->ring_idx = ring_idx; + fence->emit_fence_info = !(base_fence_ctx == drv->context); /* This only partially initializes the fence because the seqno is * unknown yet. The fence must not be used outside of the driver * until virtio_gpu_fence_emit is called. */ - dma_fence_init(&fence->f, &virtio_gpu_fence_ops, &drv->lock, drv->context, - 0); + + dma_fence_init(&fence->f, &virtio_gpu_fence_ops, &drv->lock, + fence_context, 0); return fence; } @@ -108,6 +115,13 @@ void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev, cmd_hdr->flags |= cpu_to_le32(VIRTIO_GPU_FLAG_FENCE); cmd_hdr->fence_id = cpu_to_le64(fence->fence_id); + + /* Only currently defined fence param. */ + if (fence->emit_fence_info) { + cmd_hdr->flags |= + cpu_to_le32(VIRTIO_GPU_FLAG_INFO_RING_IDX); + cmd_hdr->ring_idx = (u8)fence->ring_idx; + } } void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev, @@ -138,11 +152,21 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev, continue; dma_fence_signal_locked(&curr->f); + if (curr->e) { + drm_send_event(vgdev->ddev, &curr->e->base); + curr->e = NULL; + } + list_del(&curr->node); dma_fence_put(&curr->f); } dma_fence_signal_locked(&signaled->f); + if (signaled->e) { + drm_send_event(vgdev->ddev, &signaled->e->base); + signaled->e = NULL; + } + list_del(&signaled->node); dma_fence_put(&signaled->f); break; diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index 5c1ad1596889..5618a1d5879c 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c @@ -38,20 +38,60 @@ VIRTGPU_BLOB_FLAG_USE_SHAREABLE | \ VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) +static int virtio_gpu_fence_event_create(struct drm_device *dev, + struct drm_file *file, + struct virtio_gpu_fence *fence, + uint32_t ring_idx) +{ + struct virtio_gpu_fpriv *vfpriv = file->driver_priv; + struct virtio_gpu_fence_event *e = NULL; + int ret; + + if (!(vfpriv->ring_idx_mask & (1 << ring_idx))) + return 0; + + e = kzalloc(sizeof(*e), GFP_KERNEL); + if (!e) + return -ENOMEM; + + e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED_INTERNAL; + e->event.length = sizeof(e->event); + + ret = drm_event_reserve_init(dev, file, &e->base, &e->event); + if (ret) + goto free; + + fence->e = e; + return 0; +free: + kfree(e); + return ret; +} + +/* Must be called with &virtio_gpu_fpriv.struct_mutex held. */ +static void virtio_gpu_create_context_locked(struct virtio_gpu_device *vgdev, + struct virtio_gpu_fpriv *vfpriv) +{ + char dbgname[TASK_COMM_LEN]; + + get_task_comm(dbgname, current); + virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id, + vfpriv->context_init, strlen(dbgname), + dbgname); + + vfpriv->context_created = true; +} + void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file) { struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_fpriv *vfpriv = file->driver_priv; - char dbgname[TASK_COMM_LEN]; mutex_lock(&vfpriv->context_lock); if (vfpriv->context_created) goto out_unlock; - get_task_comm(dbgname, current); - virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id, - strlen(dbgname), dbgname); - vfpriv->context_created = true; + virtio_gpu_create_context_locked(vgdev, vfpriv); out_unlock: mutex_unlock(&vfpriv->context_lock); @@ -89,6 +129,11 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, int in_fence_fd = exbuf->fence_fd; int out_fence_fd = -1; void *buf; + uint64_t fence_ctx; + uint32_t ring_idx; + + fence_ctx = vgdev->fence_drv.context; + ring_idx = 0; if (vgdev->has_virgl_3d == false) return -ENOSYS; @@ -96,6 +141,17 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, if ((exbuf->flags & ~VIRTGPU_EXECBUF_FLAGS)) return -EINVAL; + if ((exbuf->flags & VIRTGPU_EXECBUF_RING_IDX)) { + if (exbuf->ring_idx >= vfpriv->num_rings) + return -EINVAL; + + if (!vfpriv->base_fence_ctx) + return -EINVAL; + + fence_ctx = vfpriv->base_fence_ctx; + ring_idx = exbuf->ring_idx; + } + exbuf->fence_fd = -1; virtio_gpu_create_context(dev, file); @@ -163,12 +219,16 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, goto out_memdup; } - out_fence = virtio_gpu_fence_alloc(vgdev); + out_fence = virtio_gpu_fence_alloc(vgdev, fence_ctx, ring_idx); if(!out_fence) { ret = -ENOMEM; goto out_unresv; } + ret = virtio_gpu_fence_event_create(dev, file, out_fence, ring_idx); + if (ret) + goto out_unresv; + if (out_fence_fd >= 0) { sync_file = sync_file_create(&out_fence->f); if (!sync_file) { @@ -226,6 +286,12 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data, case VIRTGPU_PARAM_CROSS_DEVICE: value = vgdev->has_resource_assign_uuid ? 1 : 0; break; + case VIRTGPU_PARAM_CONTEXT_INIT: + value = vgdev->has_context_init ? 1 : 0; + break; + case VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs: + value = vgdev->capset_id_mask; + break; default: return -EINVAL; } @@ -278,7 +344,7 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data, if (params.size == 0) params.size = PAGE_SIZE; - fence = virtio_gpu_fence_alloc(vgdev); + fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0); if (!fence) return -ENOMEM; ret = virtio_gpu_object_create(vgdev, ¶ms, &qobj, fence); @@ -357,7 +423,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev, if (ret != 0) goto err_put_free; - fence = virtio_gpu_fence_alloc(vgdev); + fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0); if (!fence) { ret = -ENOMEM; goto err_unlock; @@ -417,7 +483,8 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data, goto err_put_free; ret = -ENOMEM; - fence = virtio_gpu_fence_alloc(vgdev); + fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, + 0); if (!fence) goto err_unlock; @@ -662,6 +729,113 @@ static int virtio_gpu_resource_create_blob_ioctl(struct drm_device *dev, return 0; } +static int virtio_gpu_context_init_ioctl(struct drm_device *dev, + void *data, struct drm_file *file) +{ + int ret = 0; + uint32_t num_params, i, param, value; + uint64_t valid_ring_mask; + size_t len; + struct drm_virtgpu_context_set_param *ctx_set_params = NULL; + struct virtio_gpu_device *vgdev = dev->dev_private; + struct virtio_gpu_fpriv *vfpriv = file->driver_priv; + struct drm_virtgpu_context_init *args = data; + + num_params = args->num_params; + len = num_params * sizeof(struct drm_virtgpu_context_set_param); + + if (!vgdev->has_context_init || !vgdev->has_virgl_3d) + return -EINVAL; + + /* Number of unique parameters supported at this time. */ + if (num_params > 3) + return -EINVAL; + + ctx_set_params = memdup_user(u64_to_user_ptr(args->ctx_set_params), + len); + + if (IS_ERR(ctx_set_params)) + return PTR_ERR(ctx_set_params); + + mutex_lock(&vfpriv->context_lock); + if (vfpriv->context_created) { + ret = -EEXIST; + goto out_unlock; + } + + for (i = 0; i < num_params; i++) { + param = ctx_set_params[i].param; + value = ctx_set_params[i].value; + + switch (param) { + case VIRTGPU_CONTEXT_PARAM_CAPSET_ID: + if (value > MAX_CAPSET_ID) { + ret = -EINVAL; + goto out_unlock; + } + + if ((vgdev->capset_id_mask & (1 << value)) == 0) { + ret = -EINVAL; + goto out_unlock; + } + + /* Context capset ID already set */ + if (vfpriv->context_init & + VIRTIO_GPU_CONTEXT_INIT_CAPSET_ID_MASK) { + ret = -EINVAL; + goto out_unlock; + } + + vfpriv->context_init |= value; + break; + case VIRTGPU_CONTEXT_PARAM_NUM_RINGS: + if (vfpriv->base_fence_ctx) { + ret = -EINVAL; + goto out_unlock; + } + + if (value > MAX_RINGS) { + ret = -EINVAL; + goto out_unlock; + } + + vfpriv->base_fence_ctx = dma_fence_context_alloc(value); + vfpriv->num_rings = value; + break; + case VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK: + if (vfpriv->ring_idx_mask) { + ret = -EINVAL; + goto out_unlock; + } + + vfpriv->ring_idx_mask = value; + break; + default: + ret = -EINVAL; + goto out_unlock; + } + } + + if (vfpriv->ring_idx_mask) { + valid_ring_mask = 0; + for (i = 0; i < vfpriv->num_rings; i++) + valid_ring_mask |= 1 << i; + + if (~valid_ring_mask & vfpriv->ring_idx_mask) { + ret = -EINVAL; + goto out_unlock; + } + } + + virtio_gpu_create_context_locked(vgdev, vfpriv); + virtio_gpu_notify(vgdev); + +out_unlock: + mutex_unlock(&vfpriv->context_lock); + kfree(ctx_set_params); + return ret; +} + struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = { DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl, DRM_RENDER_ALLOW), @@ -698,4 +872,7 @@ struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = { DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE_BLOB, virtio_gpu_resource_create_blob_ioctl, DRM_RENDER_ALLOW), + + DRM_IOCTL_DEF_DRV(VIRTGPU_CONTEXT_INIT, virtio_gpu_context_init_ioctl, + DRM_RENDER_ALLOW), }; diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c index f3379059f324..21f410901694 100644 --- a/drivers/gpu/drm/virtio/virtgpu_kms.c +++ b/drivers/gpu/drm/virtio/virtgpu_kms.c @@ -65,6 +65,7 @@ static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev, int num_capsets) { int i, ret; + bool invalid_capset_id = false; vgdev->capsets = kcalloc(num_capsets, sizeof(struct virtio_gpu_drv_capset), @@ -78,19 +79,34 @@ static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev, virtio_gpu_notify(vgdev); ret = wait_event_timeout(vgdev->resp_wq, vgdev->capsets[i].id > 0, 5 * HZ); - if (ret == 0) { + /* + * Capability ids are defined in the virtio-gpu spec and are + * between 1 to 63, inclusive. + */ + if (!vgdev->capsets[i].id || + vgdev->capsets[i].id > MAX_CAPSET_ID) + invalid_capset_id = true; + + if (ret == 0) DRM_ERROR("timed out waiting for cap set %d\n", i); + else if (invalid_capset_id) + DRM_ERROR("invalid capset id %u", vgdev->capsets[i].id); + + if (ret == 0 || invalid_capset_id) { spin_lock(&vgdev->display_info_lock); kfree(vgdev->capsets); vgdev->capsets = NULL; spin_unlock(&vgdev->display_info_lock); return; } + + vgdev->capset_id_mask |= 1 << vgdev->capsets[i].id; DRM_INFO("cap set %d: id %d, max-version %d, max-size %d\n", i, vgdev->capsets[i].id, vgdev->capsets[i].max_version, vgdev->capsets[i].max_size); } + vgdev->num_capsets = num_capsets; } @@ -175,13 +191,19 @@ int virtio_gpu_init(struct drm_device *dev) (unsigned long)vgdev->host_visible_region.addr, (unsigned long)vgdev->host_visible_region.len); } + if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_CONTEXT_INIT)) { + vgdev->has_context_init = true; + } - DRM_INFO("features: %cvirgl %cedid %cresource_blob %chost_visible\n", + DRM_INFO("features: %cvirgl %cedid %cresource_blob %chost_visible", vgdev->has_virgl_3d ? '+' : '-', vgdev->has_edid ? '+' : '-', vgdev->has_resource_blob ? '+' : '-', vgdev->has_host_visible ? '+' : '-'); + DRM_INFO("features: %ccontext_init\n", + vgdev->has_context_init ? '+' : '-'); + ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL); if (ret) { DRM_ERROR("failed to find virt queues\n"); diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c index a49fd9480381..6d3cc9e238a4 100644 --- a/drivers/gpu/drm/virtio/virtgpu_plane.c +++ b/drivers/gpu/drm/virtio/virtgpu_plane.c @@ -256,7 +256,8 @@ static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane, return 0; if (bo->dumb && (plane->state->fb != new_state->fb)) { - vgfb->fence = virtio_gpu_fence_alloc(vgdev); + vgfb->fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, + 0); if (!vgfb->fence) return -ENOMEM; } diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c index e45dbf14b307..55d80b77d9b0 100644 --- a/drivers/gpu/drm/virtio/virtgpu_prime.c +++ b/drivers/gpu/drm/virtio/virtgpu_prime.c @@ -43,13 +43,41 @@ static int virtgpu_virtio_get_uuid(struct dma_buf *buf, return 0; } +static struct sg_table * +virtgpu_gem_map_dma_buf(struct dma_buf_attachment *attach, + enum dma_data_direction dir) +{ + struct drm_gem_object *obj = attach->dmabuf->priv; + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); + + if (virtio_gpu_is_vram(bo)) + return virtio_gpu_vram_map_dma_buf(bo, attach->dev, dir); + + return drm_gem_map_dma_buf(attach, dir); +} + +static void virtgpu_gem_unmap_dma_buf(struct dma_buf_attachment *attach, + struct sg_table *sgt, + enum dma_data_direction dir) +{ + struct drm_gem_object *obj = attach->dmabuf->priv; + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); + + if (virtio_gpu_is_vram(bo)) { + virtio_gpu_vram_unmap_dma_buf(attach->dev, sgt, dir); + return; + } + + drm_gem_unmap_dma_buf(attach, sgt, dir); +} + static const struct virtio_dma_buf_ops virtgpu_dmabuf_ops = { .ops = { .cache_sgt_mapping = true, .attach = virtio_dma_buf_attach, .detach = drm_gem_map_detach, - .map_dma_buf = drm_gem_map_dma_buf, - .unmap_dma_buf = drm_gem_unmap_dma_buf, + .map_dma_buf = virtgpu_gem_map_dma_buf, + .unmap_dma_buf = virtgpu_gem_unmap_dma_buf, .release = drm_gem_dmabuf_release, .mmap = drm_gem_dmabuf_mmap, .vmap = drm_gem_dmabuf_vmap, diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index 2e71e91278b4..7c052efe8836 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -91,9 +91,7 @@ virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev, { struct virtio_gpu_vbuffer *vbuf; - vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL); - if (!vbuf) - return ERR_PTR(-ENOMEM); + vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL | __GFP_NOFAIL); BUG_ON(size > MAX_INLINE_CMD_SIZE || size < sizeof(struct virtio_gpu_ctrl_hdr)); @@ -147,10 +145,6 @@ static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev, vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size, resp_size, resp_buf, cb); - if (IS_ERR(vbuf)) { - *vbuffer_p = NULL; - return ERR_CAST(vbuf); - } *vbuffer_p = vbuf; return (struct virtio_gpu_command *)vbuf->buf; } @@ -205,7 +199,7 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) struct list_head reclaim_list; struct virtio_gpu_vbuffer *entry, *tmp; struct virtio_gpu_ctrl_hdr *resp; - u64 fence_id = 0; + u64 fence_id; INIT_LIST_HEAD(&reclaim_list); spin_lock(&vgdev->ctrlq.qlock); @@ -232,23 +226,14 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type)); } if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) { - u64 f = le64_to_cpu(resp->fence_id); - - if (fence_id > f) { - DRM_ERROR("%s: Oops: fence %llx -> %llx\n", - __func__, fence_id, f); - } else { - fence_id = f; - } + fence_id = le64_to_cpu(resp->fence_id); + virtio_gpu_fence_event_process(vgdev, fence_id); } if (entry->resp_cb) entry->resp_cb(vgdev, entry); } wake_up(&vgdev->ctrlq.ack_queue); - if (fence_id) - virtio_gpu_fence_event_process(vgdev, fence_id); - list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { if (entry->objs) virtio_gpu_array_put_free_delayed(vgdev, entry->objs); @@ -917,7 +902,8 @@ int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev) } void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id, - uint32_t nlen, const char *name) + uint32_t context_init, uint32_t nlen, + const char *name) { struct virtio_gpu_ctx_create *cmd_p; struct virtio_gpu_vbuffer *vbuf; @@ -928,6 +914,7 @@ void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id, cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE); cmd_p->hdr.ctx_id = cpu_to_le32(id); cmd_p->nlen = cpu_to_le32(nlen); + cmd_p->context_init = cpu_to_le32(context_init); strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1); cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0; virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); diff --git a/drivers/gpu/drm/virtio/virtgpu_vram.c b/drivers/gpu/drm/virtio/virtgpu_vram.c index 5cc34e7330fa..6b45b0429fef 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vram.c +++ b/drivers/gpu/drm/virtio/virtgpu_vram.c @@ -1,6 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 #include "virtgpu_drv.h" +#include <linux/dma-mapping.h> + static void virtio_gpu_vram_free(struct drm_gem_object *obj) { struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); @@ -64,6 +66,65 @@ static int virtio_gpu_vram_mmap(struct drm_gem_object *obj, return ret; } +struct sg_table *virtio_gpu_vram_map_dma_buf(struct virtio_gpu_object *bo, + struct device *dev, + enum dma_data_direction dir) +{ + struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private; + struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo); + struct sg_table *sgt; + dma_addr_t addr; + int ret; + + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) + return ERR_PTR(-ENOMEM); + + if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE)) { + // Virtio devices can access the dma-buf via its UUID. Return a stub + // sg_table so the dma-buf API still works. + if (!is_virtio_device(dev) || !vgdev->has_resource_assign_uuid) { + ret = -EIO; + goto out; + } + return sgt; + } + + ret = sg_alloc_table(sgt, 1, GFP_KERNEL); + if (ret) + goto out; + + addr = dma_map_resource(dev, vram->vram_node.start, + vram->vram_node.size, dir, + DMA_ATTR_SKIP_CPU_SYNC); + ret = dma_mapping_error(dev, addr); + if (ret) + goto out; + + sg_set_page(sgt->sgl, NULL, vram->vram_node.size, 0); + sg_dma_address(sgt->sgl) = addr; + sg_dma_len(sgt->sgl) = vram->vram_node.size; + + return sgt; +out: + sg_free_table(sgt); + kfree(sgt); + return ERR_PTR(ret); +} + +void virtio_gpu_vram_unmap_dma_buf(struct device *dev, + struct sg_table *sgt, + enum dma_data_direction dir) +{ + if (sgt->nents) { + dma_unmap_resource(dev, sg_dma_address(sgt->sgl), + sg_dma_len(sgt->sgl), dir, + DMA_ATTR_SKIP_CPU_SYNC); + } + sg_free_table(sgt); + kfree(sgt); +} + static const struct drm_gem_object_funcs virtio_gpu_vram_funcs = { .open = virtio_gpu_gem_object_open, .close = virtio_gpu_gem_object_close, |