| 1 | /* |
| 2 | * Copyright (C) 2015 Red Hat, Inc. |
| 3 | * All Rights Reserved. |
| 4 | * |
| 5 | * Permission is hereby granted, free of charge, to any person obtaining |
| 6 | * a copy of this software and associated documentation files (the |
| 7 | * "Software"), to deal in the Software without restriction, including |
| 8 | * without limitation the rights to use, copy, modify, merge, publish, |
| 9 | * distribute, sublicense, and/or sell copies of the Software, and to |
| 10 | * permit persons to whom the Software is furnished to do so, subject to |
| 11 | * the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice (including the |
| 14 | * next paragraph) shall be included in all copies or substantial |
| 15 | * portions of the Software. |
| 16 | * |
| 17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 18 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 19 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. |
| 20 | * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE |
| 21 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION |
| 22 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION |
| 23 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 24 | */ |
| 25 | |
| 26 | #include <linux/virtio.h> |
| 27 | #include <linux/virtio_config.h> |
| 28 | #include <linux/virtio_ring.h> |
| 29 | |
| 30 | #include <drm/drm_file.h> |
| 31 | #include <drm/drm_managed.h> |
| 32 | #include <drm/drm_print.h> |
| 33 | |
| 34 | #include "virtgpu_drv.h" |
| 35 | |
| 36 | static void virtio_gpu_config_changed_work_func(struct work_struct *work) |
| 37 | { |
| 38 | struct virtio_gpu_device *vgdev = |
| 39 | container_of(work, struct virtio_gpu_device, |
| 40 | config_changed_work); |
| 41 | u32 events_read, events_clear = 0; |
| 42 | |
| 43 | /* read the config space */ |
| 44 | virtio_cread_le(vgdev->vdev, struct virtio_gpu_config, |
| 45 | events_read, &events_read); |
| 46 | if (events_read & VIRTIO_GPU_EVENT_DISPLAY) { |
| 47 | if (vgdev->num_scanouts) { |
| 48 | if (vgdev->has_edid) |
| 49 | virtio_gpu_cmd_get_edids(vgdev); |
| 50 | virtio_gpu_cmd_get_display_info(vgdev); |
| 51 | virtio_gpu_notify(vgdev); |
| 52 | drm_helper_hpd_irq_event(dev: vgdev->ddev); |
| 53 | } |
| 54 | events_clear |= VIRTIO_GPU_EVENT_DISPLAY; |
| 55 | } |
| 56 | virtio_cwrite_le(vgdev->vdev, struct virtio_gpu_config, |
| 57 | events_clear, &events_clear); |
| 58 | } |
| 59 | |
| 60 | static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq, |
| 61 | void (*work_func)(struct work_struct *work)) |
| 62 | { |
| 63 | spin_lock_init(&vgvq->qlock); |
| 64 | init_waitqueue_head(&vgvq->ack_queue); |
| 65 | INIT_WORK(&vgvq->dequeue_work, work_func); |
| 66 | } |
| 67 | |
| 68 | static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev, |
| 69 | int num_capsets) |
| 70 | { |
| 71 | int i, ret; |
| 72 | bool invalid_capset_id = false; |
| 73 | struct drm_device *drm = vgdev->ddev; |
| 74 | |
| 75 | vgdev->capsets = drmm_kcalloc(dev: drm, n: num_capsets, |
| 76 | size: sizeof(struct virtio_gpu_drv_capset), |
| 77 | GFP_KERNEL); |
| 78 | if (!vgdev->capsets) { |
| 79 | DRM_ERROR("failed to allocate cap sets\n" ); |
| 80 | return; |
| 81 | } |
| 82 | for (i = 0; i < num_capsets; i++) { |
| 83 | virtio_gpu_cmd_get_capset_info(vgdev, idx: i); |
| 84 | virtio_gpu_notify(vgdev); |
| 85 | ret = wait_event_timeout(vgdev->resp_wq, |
| 86 | vgdev->capsets[i].id > 0, 5 * HZ); |
| 87 | /* |
| 88 | * Capability ids are defined in the virtio-gpu spec and are |
| 89 | * between 1 to 63, inclusive. |
| 90 | */ |
| 91 | if (!vgdev->capsets[i].id || |
| 92 | vgdev->capsets[i].id > MAX_CAPSET_ID) |
| 93 | invalid_capset_id = true; |
| 94 | |
| 95 | if (ret == 0) |
| 96 | DRM_ERROR("timed out waiting for cap set %d\n" , i); |
| 97 | else if (invalid_capset_id) |
| 98 | DRM_ERROR("invalid capset id %u" , vgdev->capsets[i].id); |
| 99 | |
| 100 | if (ret == 0 || invalid_capset_id) { |
| 101 | spin_lock(lock: &vgdev->display_info_lock); |
| 102 | drmm_kfree(dev: drm, data: vgdev->capsets); |
| 103 | vgdev->capsets = NULL; |
| 104 | spin_unlock(lock: &vgdev->display_info_lock); |
| 105 | return; |
| 106 | } |
| 107 | |
| 108 | vgdev->capset_id_mask |= 1 << vgdev->capsets[i].id; |
| 109 | DRM_INFO("cap set %d: id %d, max-version %d, max-size %d\n" , |
| 110 | i, vgdev->capsets[i].id, |
| 111 | vgdev->capsets[i].max_version, |
| 112 | vgdev->capsets[i].max_size); |
| 113 | } |
| 114 | |
| 115 | vgdev->num_capsets = num_capsets; |
| 116 | } |
| 117 | |
| 118 | int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev) |
| 119 | { |
| 120 | struct virtqueue_info vqs_info[] = { |
| 121 | { .name: "control" , .callback: virtio_gpu_ctrl_ack }, |
| 122 | { "cursor" , virtio_gpu_cursor_ack }, |
| 123 | }; |
| 124 | struct virtio_gpu_device *vgdev; |
| 125 | /* this will expand later */ |
| 126 | struct virtqueue *vqs[2]; |
| 127 | u32 num_scanouts, num_capsets; |
| 128 | int ret = 0; |
| 129 | |
| 130 | if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) |
| 131 | return -ENODEV; |
| 132 | |
| 133 | vgdev = drmm_kzalloc(dev, size: sizeof(struct virtio_gpu_device), GFP_KERNEL); |
| 134 | if (!vgdev) |
| 135 | return -ENOMEM; |
| 136 | |
| 137 | vgdev->ddev = dev; |
| 138 | dev->dev_private = vgdev; |
| 139 | vgdev->vdev = vdev; |
| 140 | |
| 141 | spin_lock_init(&vgdev->display_info_lock); |
| 142 | spin_lock_init(&vgdev->resource_export_lock); |
| 143 | spin_lock_init(&vgdev->host_visible_lock); |
| 144 | ida_init(ida: &vgdev->ctx_id_ida); |
| 145 | ida_init(ida: &vgdev->resource_ida); |
| 146 | init_waitqueue_head(&vgdev->resp_wq); |
| 147 | virtio_gpu_init_vq(vgvq: &vgdev->ctrlq, work_func: virtio_gpu_dequeue_ctrl_func); |
| 148 | virtio_gpu_init_vq(vgvq: &vgdev->cursorq, work_func: virtio_gpu_dequeue_cursor_func); |
| 149 | |
| 150 | vgdev->fence_drv.context = dma_fence_context_alloc(num: 1); |
| 151 | spin_lock_init(&vgdev->fence_drv.lock); |
| 152 | INIT_LIST_HEAD(list: &vgdev->fence_drv.fences); |
| 153 | INIT_LIST_HEAD(list: &vgdev->cap_cache); |
| 154 | INIT_WORK(&vgdev->config_changed_work, |
| 155 | virtio_gpu_config_changed_work_func); |
| 156 | |
| 157 | INIT_WORK(&vgdev->obj_free_work, |
| 158 | virtio_gpu_array_put_free_work); |
| 159 | INIT_LIST_HEAD(list: &vgdev->obj_free_list); |
| 160 | spin_lock_init(&vgdev->obj_free_lock); |
| 161 | |
| 162 | #ifdef __LITTLE_ENDIAN |
| 163 | if (virtio_has_feature(vdev: vgdev->vdev, VIRTIO_GPU_F_VIRGL)) |
| 164 | vgdev->has_virgl_3d = true; |
| 165 | #endif |
| 166 | if (virtio_has_feature(vdev: vgdev->vdev, VIRTIO_GPU_F_EDID)) |
| 167 | vgdev->has_edid = true; |
| 168 | |
| 169 | if (virtio_has_feature(vdev: vgdev->vdev, VIRTIO_RING_F_INDIRECT_DESC)) |
| 170 | vgdev->has_indirect = true; |
| 171 | |
| 172 | if (virtio_has_feature(vdev: vgdev->vdev, VIRTIO_GPU_F_RESOURCE_UUID)) |
| 173 | vgdev->has_resource_assign_uuid = true; |
| 174 | |
| 175 | if (virtio_has_feature(vdev: vgdev->vdev, VIRTIO_GPU_F_RESOURCE_BLOB)) |
| 176 | vgdev->has_resource_blob = true; |
| 177 | |
| 178 | if (virtio_get_shm_region(vdev: vgdev->vdev, region_out: &vgdev->host_visible_region, |
| 179 | id: VIRTIO_GPU_SHM_ID_HOST_VISIBLE)) { |
| 180 | if (!devm_request_mem_region(&vgdev->vdev->dev, |
| 181 | vgdev->host_visible_region.addr, |
| 182 | vgdev->host_visible_region.len, |
| 183 | dev_name(&vgdev->vdev->dev))) { |
| 184 | DRM_ERROR("Could not reserve host visible region\n" ); |
| 185 | ret = -EBUSY; |
| 186 | goto err_vqs; |
| 187 | } |
| 188 | |
| 189 | DRM_INFO("Host memory window: 0x%lx +0x%lx\n" , |
| 190 | (unsigned long)vgdev->host_visible_region.addr, |
| 191 | (unsigned long)vgdev->host_visible_region.len); |
| 192 | vgdev->has_host_visible = true; |
| 193 | drm_mm_init(mm: &vgdev->host_visible_mm, |
| 194 | start: (unsigned long)vgdev->host_visible_region.addr, |
| 195 | size: (unsigned long)vgdev->host_visible_region.len); |
| 196 | } |
| 197 | |
| 198 | if (virtio_has_feature(vdev: vgdev->vdev, VIRTIO_GPU_F_CONTEXT_INIT)) |
| 199 | vgdev->has_context_init = true; |
| 200 | |
| 201 | DRM_INFO("features: %cvirgl %cedid %cresource_blob %chost_visible" , |
| 202 | vgdev->has_virgl_3d ? '+' : '-', |
| 203 | vgdev->has_edid ? '+' : '-', |
| 204 | vgdev->has_resource_blob ? '+' : '-', |
| 205 | vgdev->has_host_visible ? '+' : '-'); |
| 206 | |
| 207 | DRM_INFO("features: %ccontext_init\n" , |
| 208 | vgdev->has_context_init ? '+' : '-'); |
| 209 | |
| 210 | ret = virtio_find_vqs(vdev: vgdev->vdev, nvqs: 2, vqs, vqs_info, NULL); |
| 211 | if (ret) { |
| 212 | DRM_ERROR("failed to find virt queues\n" ); |
| 213 | goto err_vqs; |
| 214 | } |
| 215 | vgdev->ctrlq.vq = vqs[0]; |
| 216 | vgdev->cursorq.vq = vqs[1]; |
| 217 | ret = virtio_gpu_alloc_vbufs(vgdev); |
| 218 | if (ret) { |
| 219 | DRM_ERROR("failed to alloc vbufs\n" ); |
| 220 | goto err_vbufs; |
| 221 | } |
| 222 | |
| 223 | /* get display info */ |
| 224 | virtio_cread_le(vgdev->vdev, struct virtio_gpu_config, |
| 225 | num_scanouts, &num_scanouts); |
| 226 | vgdev->num_scanouts = min_t(uint32_t, num_scanouts, |
| 227 | VIRTIO_GPU_MAX_SCANOUTS); |
| 228 | |
| 229 | if (!IS_ENABLED(CONFIG_DRM_VIRTIO_GPU_KMS) || !vgdev->num_scanouts) { |
| 230 | DRM_INFO("KMS disabled\n" ); |
| 231 | vgdev->num_scanouts = 0; |
| 232 | vgdev->has_edid = false; |
| 233 | dev->driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC); |
| 234 | } else { |
| 235 | DRM_INFO("number of scanouts: %d\n" , num_scanouts); |
| 236 | } |
| 237 | |
| 238 | virtio_cread_le(vgdev->vdev, struct virtio_gpu_config, |
| 239 | num_capsets, &num_capsets); |
| 240 | DRM_INFO("number of cap sets: %d\n" , num_capsets); |
| 241 | |
| 242 | ret = virtio_gpu_modeset_init(vgdev); |
| 243 | if (ret) { |
| 244 | DRM_ERROR("modeset init failed\n" ); |
| 245 | goto err_scanouts; |
| 246 | } |
| 247 | |
| 248 | virtio_device_ready(dev: vgdev->vdev); |
| 249 | |
| 250 | if (num_capsets) |
| 251 | virtio_gpu_get_capsets(vgdev, num_capsets); |
| 252 | if (vgdev->num_scanouts) { |
| 253 | if (vgdev->has_edid) |
| 254 | virtio_gpu_cmd_get_edids(vgdev); |
| 255 | virtio_gpu_cmd_get_display_info(vgdev); |
| 256 | virtio_gpu_notify(vgdev); |
| 257 | wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending, |
| 258 | 5 * HZ); |
| 259 | } |
| 260 | return 0; |
| 261 | |
| 262 | err_scanouts: |
| 263 | virtio_gpu_free_vbufs(vgdev); |
| 264 | err_vbufs: |
| 265 | vgdev->vdev->config->del_vqs(vgdev->vdev); |
| 266 | err_vqs: |
| 267 | dev->dev_private = NULL; |
| 268 | return ret; |
| 269 | } |
| 270 | |
| 271 | static void virtio_gpu_cleanup_cap_cache(struct virtio_gpu_device *vgdev) |
| 272 | { |
| 273 | struct virtio_gpu_drv_cap_cache *cache_ent, *tmp; |
| 274 | |
| 275 | list_for_each_entry_safe(cache_ent, tmp, &vgdev->cap_cache, head) { |
| 276 | kfree(objp: cache_ent->caps_cache); |
| 277 | kfree(objp: cache_ent); |
| 278 | } |
| 279 | } |
| 280 | |
| 281 | void virtio_gpu_deinit(struct drm_device *dev) |
| 282 | { |
| 283 | struct virtio_gpu_device *vgdev = dev->dev_private; |
| 284 | |
| 285 | flush_work(work: &vgdev->obj_free_work); |
| 286 | flush_work(work: &vgdev->ctrlq.dequeue_work); |
| 287 | flush_work(work: &vgdev->cursorq.dequeue_work); |
| 288 | flush_work(work: &vgdev->config_changed_work); |
| 289 | virtio_reset_device(dev: vgdev->vdev); |
| 290 | vgdev->vdev->config->del_vqs(vgdev->vdev); |
| 291 | } |
| 292 | |
| 293 | void virtio_gpu_release(struct drm_device *dev) |
| 294 | { |
| 295 | struct virtio_gpu_device *vgdev = dev->dev_private; |
| 296 | |
| 297 | if (!vgdev) |
| 298 | return; |
| 299 | |
| 300 | virtio_gpu_modeset_fini(vgdev); |
| 301 | virtio_gpu_free_vbufs(vgdev); |
| 302 | virtio_gpu_cleanup_cap_cache(vgdev); |
| 303 | |
| 304 | if (vgdev->has_host_visible) |
| 305 | drm_mm_takedown(mm: &vgdev->host_visible_mm); |
| 306 | } |
| 307 | |
| 308 | int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file) |
| 309 | { |
| 310 | struct virtio_gpu_device *vgdev = dev->dev_private; |
| 311 | struct virtio_gpu_fpriv *vfpriv; |
| 312 | int handle; |
| 313 | |
| 314 | /* can't create contexts without 3d renderer */ |
| 315 | if (!vgdev->has_virgl_3d) |
| 316 | return 0; |
| 317 | |
| 318 | /* allocate a virt GPU context for this opener */ |
| 319 | vfpriv = kzalloc(sizeof(*vfpriv), GFP_KERNEL); |
| 320 | if (!vfpriv) |
| 321 | return -ENOMEM; |
| 322 | |
| 323 | mutex_init(&vfpriv->context_lock); |
| 324 | |
| 325 | handle = ida_alloc(ida: &vgdev->ctx_id_ida, GFP_KERNEL); |
| 326 | if (handle < 0) { |
| 327 | kfree(objp: vfpriv); |
| 328 | return handle; |
| 329 | } |
| 330 | |
| 331 | vfpriv->ctx_id = handle + 1; |
| 332 | file->driver_priv = vfpriv; |
| 333 | return 0; |
| 334 | } |
| 335 | |
| 336 | void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file) |
| 337 | { |
| 338 | struct virtio_gpu_device *vgdev = dev->dev_private; |
| 339 | struct virtio_gpu_fpriv *vfpriv = file->driver_priv; |
| 340 | |
| 341 | if (!vgdev->has_virgl_3d) |
| 342 | return; |
| 343 | |
| 344 | if (vfpriv->context_created) { |
| 345 | virtio_gpu_cmd_context_destroy(vgdev, id: vfpriv->ctx_id); |
| 346 | virtio_gpu_notify(vgdev); |
| 347 | } |
| 348 | |
| 349 | ida_free(&vgdev->ctx_id_ida, id: vfpriv->ctx_id - 1); |
| 350 | mutex_destroy(lock: &vfpriv->context_lock); |
| 351 | kfree(objp: vfpriv); |
| 352 | file->driver_priv = NULL; |
| 353 | } |
| 354 | |