| 1 | /* |
| 2 | * Copyright (C) 2015 Red Hat, Inc. |
| 3 | * All Rights Reserved. |
| 4 | * |
| 5 | * Permission is hereby granted, free of charge, to any person obtaining |
| 6 | * a copy of this software and associated documentation files (the |
| 7 | * "Software"), to deal in the Software without restriction, including |
| 8 | * without limitation the rights to use, copy, modify, merge, publish, |
| 9 | * distribute, sublicense, and/or sell copies of the Software, and to |
| 10 | * permit persons to whom the Software is furnished to do so, subject to |
| 11 | * the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice (including the |
| 14 | * next paragraph) shall be included in all copies or substantial |
| 15 | * portions of the Software. |
| 16 | * |
| 17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 18 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 19 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. |
| 20 | * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE |
| 21 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION |
| 22 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION |
| 23 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 24 | */ |
| 25 | |
| 26 | #include <drm/drm_atomic_helper.h> |
| 27 | #include <drm/drm_damage_helper.h> |
| 28 | #include <drm/drm_fourcc.h> |
| 29 | #include <drm/drm_gem_atomic_helper.h> |
| 30 | #include <linux/virtio_dma_buf.h> |
| 31 | #include <drm/drm_managed.h> |
| 32 | #include <drm/drm_panic.h> |
| 33 | #include <drm/drm_print.h> |
| 34 | |
| 35 | #include "virtgpu_drv.h" |
| 36 | |
| 37 | static const uint32_t virtio_gpu_formats[] = { |
| 38 | DRM_FORMAT_HOST_XRGB8888, |
| 39 | }; |
| 40 | |
| 41 | static const uint32_t virtio_gpu_cursor_formats[] = { |
| 42 | DRM_FORMAT_HOST_ARGB8888, |
| 43 | }; |
| 44 | |
| 45 | uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc) |
| 46 | { |
| 47 | uint32_t format; |
| 48 | |
| 49 | switch (drm_fourcc) { |
| 50 | case DRM_FORMAT_XRGB8888: |
| 51 | format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM; |
| 52 | break; |
| 53 | case DRM_FORMAT_ARGB8888: |
| 54 | format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM; |
| 55 | break; |
| 56 | case DRM_FORMAT_BGRX8888: |
| 57 | format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM; |
| 58 | break; |
| 59 | case DRM_FORMAT_BGRA8888: |
| 60 | format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM; |
| 61 | break; |
| 62 | default: |
| 63 | /* |
| 64 | * This should not happen, we handle everything listed |
| 65 | * in virtio_gpu_formats[]. |
| 66 | */ |
| 67 | format = 0; |
| 68 | break; |
| 69 | } |
| 70 | WARN_ON(format == 0); |
| 71 | return format; |
| 72 | } |
| 73 | |
| 74 | static struct |
| 75 | drm_plane_state *virtio_gpu_plane_duplicate_state(struct drm_plane *plane) |
| 76 | { |
| 77 | struct virtio_gpu_plane_state *new; |
| 78 | |
| 79 | if (WARN_ON(!plane->state)) |
| 80 | return NULL; |
| 81 | |
| 82 | new = kzalloc(sizeof(*new), GFP_KERNEL); |
| 83 | if (!new) |
| 84 | return NULL; |
| 85 | |
| 86 | __drm_atomic_helper_plane_duplicate_state(plane, state: &new->base); |
| 87 | |
| 88 | return &new->base; |
| 89 | } |
| 90 | |
| 91 | static const struct drm_plane_funcs virtio_gpu_plane_funcs = { |
| 92 | .update_plane = drm_atomic_helper_update_plane, |
| 93 | .disable_plane = drm_atomic_helper_disable_plane, |
| 94 | .reset = drm_atomic_helper_plane_reset, |
| 95 | .atomic_duplicate_state = virtio_gpu_plane_duplicate_state, |
| 96 | .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, |
| 97 | }; |
| 98 | |
| 99 | static int virtio_gpu_plane_atomic_check(struct drm_plane *plane, |
| 100 | struct drm_atomic_state *state) |
| 101 | { |
| 102 | struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, |
| 103 | plane); |
| 104 | struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, |
| 105 | plane); |
| 106 | bool is_cursor = plane->type == DRM_PLANE_TYPE_CURSOR; |
| 107 | struct drm_crtc_state *crtc_state; |
| 108 | int ret; |
| 109 | |
| 110 | if (!new_plane_state->fb || WARN_ON(!new_plane_state->crtc)) |
| 111 | return 0; |
| 112 | |
| 113 | /* |
| 114 | * Ignore damage clips if the framebuffer attached to the plane's state |
| 115 | * has changed since the last plane update (page-flip). In this case, a |
| 116 | * full plane update should happen because uploads are done per-buffer. |
| 117 | */ |
| 118 | if (old_plane_state->fb != new_plane_state->fb) |
| 119 | new_plane_state->ignore_damage_clips = true; |
| 120 | |
| 121 | crtc_state = drm_atomic_get_crtc_state(state, |
| 122 | crtc: new_plane_state->crtc); |
| 123 | if (IS_ERR(ptr: crtc_state)) |
| 124 | return PTR_ERR(ptr: crtc_state); |
| 125 | |
| 126 | ret = drm_atomic_helper_check_plane_state(plane_state: new_plane_state, crtc_state, |
| 127 | DRM_PLANE_NO_SCALING, |
| 128 | DRM_PLANE_NO_SCALING, |
| 129 | can_position: is_cursor, can_update_disabled: true); |
| 130 | return ret; |
| 131 | } |
| 132 | |
| 133 | /* For drm panic */ |
| 134 | static int virtio_gpu_panic_update_dumb_bo(struct virtio_gpu_device *vgdev, |
| 135 | struct drm_plane_state *state, |
| 136 | struct drm_rect *rect) |
| 137 | { |
| 138 | struct virtio_gpu_object *bo = |
| 139 | gem_to_virtio_gpu_obj(state->fb->obj[0]); |
| 140 | struct virtio_gpu_object_array *objs; |
| 141 | uint32_t w = rect->x2 - rect->x1; |
| 142 | uint32_t h = rect->y2 - rect->y1; |
| 143 | uint32_t x = rect->x1; |
| 144 | uint32_t y = rect->y1; |
| 145 | uint32_t off = x * state->fb->format->cpp[0] + |
| 146 | y * state->fb->pitches[0]; |
| 147 | |
| 148 | objs = virtio_gpu_panic_array_alloc(); |
| 149 | if (!objs) |
| 150 | return -ENOMEM; |
| 151 | virtio_gpu_array_add_obj(objs, obj: &bo->base.base); |
| 152 | |
| 153 | return virtio_gpu_panic_cmd_transfer_to_host_2d(vgdev, offset: off, width: w, height: h, x, y, |
| 154 | objs); |
| 155 | } |
| 156 | |
| 157 | static void virtio_gpu_update_dumb_bo(struct virtio_gpu_device *vgdev, |
| 158 | struct drm_plane_state *state, |
| 159 | struct drm_rect *rect) |
| 160 | { |
| 161 | struct virtio_gpu_object *bo = |
| 162 | gem_to_virtio_gpu_obj(state->fb->obj[0]); |
| 163 | struct virtio_gpu_object_array *objs; |
| 164 | uint32_t w = rect->x2 - rect->x1; |
| 165 | uint32_t h = rect->y2 - rect->y1; |
| 166 | uint32_t x = rect->x1; |
| 167 | uint32_t y = rect->y1; |
| 168 | uint32_t off = x * state->fb->format->cpp[0] + |
| 169 | y * state->fb->pitches[0]; |
| 170 | |
| 171 | objs = virtio_gpu_array_alloc(nents: 1); |
| 172 | if (!objs) |
| 173 | return; |
| 174 | virtio_gpu_array_add_obj(objs, obj: &bo->base.base); |
| 175 | |
| 176 | virtio_gpu_cmd_transfer_to_host_2d(vgdev, offset: off, width: w, height: h, x, y, |
| 177 | objs, NULL); |
| 178 | } |
| 179 | |
| 180 | /* For drm_panic */ |
| 181 | static void virtio_gpu_panic_resource_flush(struct drm_plane *plane, |
| 182 | uint32_t x, uint32_t y, |
| 183 | uint32_t width, uint32_t height) |
| 184 | { |
| 185 | struct drm_device *dev = plane->dev; |
| 186 | struct virtio_gpu_device *vgdev = dev->dev_private; |
| 187 | struct virtio_gpu_framebuffer *vgfb; |
| 188 | struct virtio_gpu_object *bo; |
| 189 | |
| 190 | vgfb = to_virtio_gpu_framebuffer(plane->state->fb); |
| 191 | bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); |
| 192 | |
| 193 | virtio_gpu_panic_cmd_resource_flush(vgdev, resource_id: bo->hw_res_handle, x, y, |
| 194 | width, height); |
| 195 | virtio_gpu_panic_notify(vgdev); |
| 196 | } |
| 197 | |
| 198 | static void virtio_gpu_resource_flush(struct drm_plane *plane, |
| 199 | uint32_t x, uint32_t y, |
| 200 | uint32_t width, uint32_t height) |
| 201 | { |
| 202 | struct drm_device *dev = plane->dev; |
| 203 | struct virtio_gpu_device *vgdev = dev->dev_private; |
| 204 | struct virtio_gpu_framebuffer *vgfb; |
| 205 | struct virtio_gpu_plane_state *vgplane_st; |
| 206 | struct virtio_gpu_object *bo; |
| 207 | |
| 208 | vgfb = to_virtio_gpu_framebuffer(plane->state->fb); |
| 209 | vgplane_st = to_virtio_gpu_plane_state(plane->state); |
| 210 | bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); |
| 211 | if (vgplane_st->fence) { |
| 212 | struct virtio_gpu_object_array *objs; |
| 213 | |
| 214 | objs = virtio_gpu_array_alloc(nents: 1); |
| 215 | if (!objs) |
| 216 | return; |
| 217 | virtio_gpu_array_add_obj(objs, obj: vgfb->base.obj[0]); |
| 218 | virtio_gpu_array_lock_resv(objs); |
| 219 | virtio_gpu_cmd_resource_flush(vgdev, resource_id: bo->hw_res_handle, x, y, |
| 220 | width, height, objs, |
| 221 | fence: vgplane_st->fence); |
| 222 | virtio_gpu_notify(vgdev); |
| 223 | dma_fence_wait_timeout(&vgplane_st->fence->f, intr: true, |
| 224 | timeout: msecs_to_jiffies(m: 50)); |
| 225 | } else { |
| 226 | virtio_gpu_cmd_resource_flush(vgdev, resource_id: bo->hw_res_handle, x, y, |
| 227 | width, height, NULL, NULL); |
| 228 | virtio_gpu_notify(vgdev); |
| 229 | } |
| 230 | } |
| 231 | |
| 232 | static void virtio_gpu_primary_plane_update(struct drm_plane *plane, |
| 233 | struct drm_atomic_state *state) |
| 234 | { |
| 235 | struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, |
| 236 | plane); |
| 237 | struct drm_device *dev = plane->dev; |
| 238 | struct virtio_gpu_device *vgdev = dev->dev_private; |
| 239 | struct virtio_gpu_output *output = NULL; |
| 240 | struct virtio_gpu_object *bo; |
| 241 | struct drm_rect rect; |
| 242 | |
| 243 | if (plane->state->crtc) |
| 244 | output = drm_crtc_to_virtio_gpu_output(plane->state->crtc); |
| 245 | if (old_state->crtc) |
| 246 | output = drm_crtc_to_virtio_gpu_output(old_state->crtc); |
| 247 | if (WARN_ON(!output)) |
| 248 | return; |
| 249 | |
| 250 | if (!plane->state->fb || !output->crtc.state->active) { |
| 251 | DRM_DEBUG("nofb\n" ); |
| 252 | virtio_gpu_cmd_set_scanout(vgdev, scanout_id: output->index, resource_id: 0, |
| 253 | width: plane->state->src_w >> 16, |
| 254 | height: plane->state->src_h >> 16, |
| 255 | x: 0, y: 0); |
| 256 | virtio_gpu_notify(vgdev); |
| 257 | return; |
| 258 | } |
| 259 | |
| 260 | if (!drm_atomic_helper_damage_merged(old_state, state: plane->state, rect: &rect)) |
| 261 | return; |
| 262 | |
| 263 | bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]); |
| 264 | if (bo->dumb) |
| 265 | virtio_gpu_update_dumb_bo(vgdev, state: plane->state, rect: &rect); |
| 266 | |
| 267 | if (plane->state->fb != old_state->fb || |
| 268 | plane->state->src_w != old_state->src_w || |
| 269 | plane->state->src_h != old_state->src_h || |
| 270 | plane->state->src_x != old_state->src_x || |
| 271 | plane->state->src_y != old_state->src_y || |
| 272 | output->needs_modeset) { |
| 273 | output->needs_modeset = false; |
| 274 | DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n" , |
| 275 | bo->hw_res_handle, |
| 276 | plane->state->crtc_w, plane->state->crtc_h, |
| 277 | plane->state->crtc_x, plane->state->crtc_y, |
| 278 | plane->state->src_w >> 16, |
| 279 | plane->state->src_h >> 16, |
| 280 | plane->state->src_x >> 16, |
| 281 | plane->state->src_y >> 16); |
| 282 | |
| 283 | if (bo->host3d_blob || bo->guest_blob) { |
| 284 | virtio_gpu_cmd_set_scanout_blob |
| 285 | (vgdev, scanout_id: output->index, bo, |
| 286 | fb: plane->state->fb, |
| 287 | width: plane->state->src_w >> 16, |
| 288 | height: plane->state->src_h >> 16, |
| 289 | x: plane->state->src_x >> 16, |
| 290 | y: plane->state->src_y >> 16); |
| 291 | } else { |
| 292 | virtio_gpu_cmd_set_scanout(vgdev, scanout_id: output->index, |
| 293 | resource_id: bo->hw_res_handle, |
| 294 | width: plane->state->src_w >> 16, |
| 295 | height: plane->state->src_h >> 16, |
| 296 | x: plane->state->src_x >> 16, |
| 297 | y: plane->state->src_y >> 16); |
| 298 | } |
| 299 | } |
| 300 | |
| 301 | virtio_gpu_resource_flush(plane, |
| 302 | x: rect.x1, |
| 303 | y: rect.y1, |
| 304 | width: rect.x2 - rect.x1, |
| 305 | height: rect.y2 - rect.y1); |
| 306 | } |
| 307 | |
| 308 | static int virtio_gpu_prepare_imported_obj(struct drm_plane *plane, |
| 309 | struct drm_plane_state *new_state, |
| 310 | struct drm_gem_object *obj) |
| 311 | { |
| 312 | struct virtio_gpu_device *vgdev = plane->dev->dev_private; |
| 313 | struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); |
| 314 | struct dma_buf_attachment *attach = obj->import_attach; |
| 315 | struct dma_resv *resv = attach->dmabuf->resv; |
| 316 | struct virtio_gpu_mem_entry *ents = NULL; |
| 317 | unsigned int nents; |
| 318 | int ret; |
| 319 | |
| 320 | dma_resv_lock(obj: resv, NULL); |
| 321 | |
| 322 | ret = dma_buf_pin(attach); |
| 323 | if (ret) { |
| 324 | dma_resv_unlock(obj: resv); |
| 325 | return ret; |
| 326 | } |
| 327 | |
| 328 | if (!bo->sgt) { |
| 329 | ret = virtgpu_dma_buf_import_sgt(ents: &ents, nents: &nents, |
| 330 | bo, attach); |
| 331 | if (ret) |
| 332 | goto err; |
| 333 | |
| 334 | virtio_gpu_object_attach(vgdev, obj: bo, ents, nents); |
| 335 | } |
| 336 | |
| 337 | dma_resv_unlock(obj: resv); |
| 338 | return 0; |
| 339 | |
| 340 | err: |
| 341 | dma_buf_unpin(attach); |
| 342 | dma_resv_unlock(obj: resv); |
| 343 | return ret; |
| 344 | } |
| 345 | |
| 346 | static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane, |
| 347 | struct drm_plane_state *new_state) |
| 348 | { |
| 349 | struct drm_device *dev = plane->dev; |
| 350 | struct virtio_gpu_device *vgdev = dev->dev_private; |
| 351 | struct virtio_gpu_framebuffer *vgfb; |
| 352 | struct virtio_gpu_plane_state *vgplane_st; |
| 353 | struct virtio_gpu_object *bo; |
| 354 | struct drm_gem_object *obj; |
| 355 | int ret; |
| 356 | |
| 357 | if (!new_state->fb) |
| 358 | return 0; |
| 359 | |
| 360 | vgfb = to_virtio_gpu_framebuffer(new_state->fb); |
| 361 | vgplane_st = to_virtio_gpu_plane_state(new_state); |
| 362 | bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); |
| 363 | |
| 364 | drm_gem_plane_helper_prepare_fb(plane, state: new_state); |
| 365 | |
| 366 | if (!bo || (plane->type == DRM_PLANE_TYPE_PRIMARY && !bo->guest_blob)) |
| 367 | return 0; |
| 368 | |
| 369 | obj = new_state->fb->obj[0]; |
| 370 | if (bo->dumb || drm_gem_is_imported(obj)) { |
| 371 | vgplane_st->fence = virtio_gpu_fence_alloc(vgdev, |
| 372 | base_fence_ctx: vgdev->fence_drv.context, |
| 373 | ring_idx: 0); |
| 374 | if (!vgplane_st->fence) |
| 375 | return -ENOMEM; |
| 376 | } |
| 377 | |
| 378 | if (drm_gem_is_imported(obj)) { |
| 379 | ret = virtio_gpu_prepare_imported_obj(plane, new_state, obj); |
| 380 | if (ret) |
| 381 | goto err_fence; |
| 382 | } |
| 383 | |
| 384 | return 0; |
| 385 | |
| 386 | err_fence: |
| 387 | if (vgplane_st->fence) { |
| 388 | dma_fence_put(fence: &vgplane_st->fence->f); |
| 389 | vgplane_st->fence = NULL; |
| 390 | } |
| 391 | |
| 392 | return ret; |
| 393 | } |
| 394 | |
| 395 | static void virtio_gpu_cleanup_imported_obj(struct drm_gem_object *obj) |
| 396 | { |
| 397 | struct dma_buf_attachment *attach = obj->import_attach; |
| 398 | struct dma_resv *resv = attach->dmabuf->resv; |
| 399 | |
| 400 | dma_resv_lock(obj: resv, NULL); |
| 401 | dma_buf_unpin(attach); |
| 402 | dma_resv_unlock(obj: resv); |
| 403 | } |
| 404 | |
| 405 | static void virtio_gpu_plane_cleanup_fb(struct drm_plane *plane, |
| 406 | struct drm_plane_state *state) |
| 407 | { |
| 408 | struct virtio_gpu_plane_state *vgplane_st; |
| 409 | struct drm_gem_object *obj; |
| 410 | |
| 411 | if (!state->fb) |
| 412 | return; |
| 413 | |
| 414 | vgplane_st = to_virtio_gpu_plane_state(state); |
| 415 | if (vgplane_st->fence) { |
| 416 | dma_fence_put(fence: &vgplane_st->fence->f); |
| 417 | vgplane_st->fence = NULL; |
| 418 | } |
| 419 | |
| 420 | obj = state->fb->obj[0]; |
| 421 | if (drm_gem_is_imported(obj)) |
| 422 | virtio_gpu_cleanup_imported_obj(obj); |
| 423 | } |
| 424 | |
| 425 | static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, |
| 426 | struct drm_atomic_state *state) |
| 427 | { |
| 428 | struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, |
| 429 | plane); |
| 430 | struct drm_device *dev = plane->dev; |
| 431 | struct virtio_gpu_device *vgdev = dev->dev_private; |
| 432 | struct virtio_gpu_output *output = NULL; |
| 433 | struct virtio_gpu_framebuffer *vgfb; |
| 434 | struct virtio_gpu_plane_state *vgplane_st; |
| 435 | struct virtio_gpu_object *bo = NULL; |
| 436 | uint32_t handle; |
| 437 | |
| 438 | if (plane->state->crtc) |
| 439 | output = drm_crtc_to_virtio_gpu_output(plane->state->crtc); |
| 440 | if (old_state->crtc) |
| 441 | output = drm_crtc_to_virtio_gpu_output(old_state->crtc); |
| 442 | if (WARN_ON(!output)) |
| 443 | return; |
| 444 | |
| 445 | if (plane->state->fb) { |
| 446 | vgfb = to_virtio_gpu_framebuffer(plane->state->fb); |
| 447 | vgplane_st = to_virtio_gpu_plane_state(plane->state); |
| 448 | bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); |
| 449 | handle = bo->hw_res_handle; |
| 450 | } else { |
| 451 | handle = 0; |
| 452 | } |
| 453 | |
| 454 | if (bo && bo->dumb && (plane->state->fb != old_state->fb)) { |
| 455 | /* new cursor -- update & wait */ |
| 456 | struct virtio_gpu_object_array *objs; |
| 457 | |
| 458 | objs = virtio_gpu_array_alloc(nents: 1); |
| 459 | if (!objs) |
| 460 | return; |
| 461 | virtio_gpu_array_add_obj(objs, obj: vgfb->base.obj[0]); |
| 462 | virtio_gpu_array_lock_resv(objs); |
| 463 | virtio_gpu_cmd_transfer_to_host_2d |
| 464 | (vgdev, offset: 0, |
| 465 | width: plane->state->crtc_w, |
| 466 | height: plane->state->crtc_h, |
| 467 | x: 0, y: 0, objs, fence: vgplane_st->fence); |
| 468 | virtio_gpu_notify(vgdev); |
| 469 | dma_fence_wait(fence: &vgplane_st->fence->f, intr: true); |
| 470 | } |
| 471 | |
| 472 | if (plane->state->fb != old_state->fb) { |
| 473 | DRM_DEBUG("update, handle %d, pos +%d+%d, hot %d,%d\n" , handle, |
| 474 | plane->state->crtc_x, |
| 475 | plane->state->crtc_y, |
| 476 | plane->state->hotspot_x, |
| 477 | plane->state->hotspot_y); |
| 478 | output->cursor.hdr.type = |
| 479 | cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR); |
| 480 | output->cursor.resource_id = cpu_to_le32(handle); |
| 481 | if (plane->state->fb) { |
| 482 | output->cursor.hot_x = |
| 483 | cpu_to_le32(plane->state->hotspot_x); |
| 484 | output->cursor.hot_y = |
| 485 | cpu_to_le32(plane->state->hotspot_y); |
| 486 | } else { |
| 487 | output->cursor.hot_x = cpu_to_le32(0); |
| 488 | output->cursor.hot_y = cpu_to_le32(0); |
| 489 | } |
| 490 | } else { |
| 491 | DRM_DEBUG("move +%d+%d\n" , |
| 492 | plane->state->crtc_x, |
| 493 | plane->state->crtc_y); |
| 494 | output->cursor.hdr.type = |
| 495 | cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR); |
| 496 | } |
| 497 | output->cursor.pos.x = cpu_to_le32(plane->state->crtc_x); |
| 498 | output->cursor.pos.y = cpu_to_le32(plane->state->crtc_y); |
| 499 | virtio_gpu_cursor_ping(vgdev, output); |
| 500 | } |
| 501 | |
| 502 | static int virtio_drm_get_scanout_buffer(struct drm_plane *plane, |
| 503 | struct drm_scanout_buffer *sb) |
| 504 | { |
| 505 | struct virtio_gpu_object *bo; |
| 506 | |
| 507 | if (!plane->state || !plane->state->fb || !plane->state->visible) |
| 508 | return -ENODEV; |
| 509 | |
| 510 | bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]); |
| 511 | |
| 512 | if (virtio_gpu_is_vram(bo) || drm_gem_is_imported(obj: &bo->base.base)) |
| 513 | return -ENODEV; |
| 514 | |
| 515 | if (bo->base.vaddr) { |
| 516 | iosys_map_set_vaddr(map: &sb->map[0], vaddr: bo->base.vaddr); |
| 517 | } else { |
| 518 | struct drm_gem_shmem_object *shmem = &bo->base; |
| 519 | |
| 520 | if (!shmem->pages) |
| 521 | return -ENODEV; |
| 522 | /* map scanout buffer later */ |
| 523 | sb->pages = shmem->pages; |
| 524 | } |
| 525 | |
| 526 | sb->format = plane->state->fb->format; |
| 527 | sb->height = plane->state->fb->height; |
| 528 | sb->width = plane->state->fb->width; |
| 529 | sb->pitch[0] = plane->state->fb->pitches[0]; |
| 530 | return 0; |
| 531 | } |
| 532 | |
| 533 | static void virtio_panic_flush(struct drm_plane *plane) |
| 534 | { |
| 535 | struct virtio_gpu_object *bo; |
| 536 | struct drm_device *dev = plane->dev; |
| 537 | struct virtio_gpu_device *vgdev = dev->dev_private; |
| 538 | struct drm_rect rect; |
| 539 | |
| 540 | rect.x1 = 0; |
| 541 | rect.y1 = 0; |
| 542 | rect.x2 = plane->state->fb->width; |
| 543 | rect.y2 = plane->state->fb->height; |
| 544 | |
| 545 | bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]); |
| 546 | |
| 547 | if (bo->dumb) { |
| 548 | if (virtio_gpu_panic_update_dumb_bo(vgdev, state: plane->state, |
| 549 | rect: &rect)) |
| 550 | return; |
| 551 | } |
| 552 | |
| 553 | virtio_gpu_panic_resource_flush(plane, |
| 554 | x: plane->state->src_x >> 16, |
| 555 | y: plane->state->src_y >> 16, |
| 556 | width: plane->state->src_w >> 16, |
| 557 | height: plane->state->src_h >> 16); |
| 558 | } |
| 559 | |
| 560 | static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = { |
| 561 | .prepare_fb = virtio_gpu_plane_prepare_fb, |
| 562 | .cleanup_fb = virtio_gpu_plane_cleanup_fb, |
| 563 | .atomic_check = virtio_gpu_plane_atomic_check, |
| 564 | .atomic_update = virtio_gpu_primary_plane_update, |
| 565 | .get_scanout_buffer = virtio_drm_get_scanout_buffer, |
| 566 | .panic_flush = virtio_panic_flush, |
| 567 | }; |
| 568 | |
| 569 | static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = { |
| 570 | .prepare_fb = virtio_gpu_plane_prepare_fb, |
| 571 | .cleanup_fb = virtio_gpu_plane_cleanup_fb, |
| 572 | .atomic_check = virtio_gpu_plane_atomic_check, |
| 573 | .atomic_update = virtio_gpu_cursor_plane_update, |
| 574 | }; |
| 575 | |
| 576 | struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev, |
| 577 | enum drm_plane_type type, |
| 578 | int index) |
| 579 | { |
| 580 | struct drm_device *dev = vgdev->ddev; |
| 581 | const struct drm_plane_helper_funcs *funcs; |
| 582 | struct drm_plane *plane; |
| 583 | const uint32_t *formats; |
| 584 | int nformats; |
| 585 | |
| 586 | if (type == DRM_PLANE_TYPE_CURSOR) { |
| 587 | formats = virtio_gpu_cursor_formats; |
| 588 | nformats = ARRAY_SIZE(virtio_gpu_cursor_formats); |
| 589 | funcs = &virtio_gpu_cursor_helper_funcs; |
| 590 | } else { |
| 591 | formats = virtio_gpu_formats; |
| 592 | nformats = ARRAY_SIZE(virtio_gpu_formats); |
| 593 | funcs = &virtio_gpu_primary_helper_funcs; |
| 594 | } |
| 595 | |
| 596 | plane = drmm_universal_plane_alloc(dev, struct drm_plane, dev, |
| 597 | 1 << index, &virtio_gpu_plane_funcs, |
| 598 | formats, nformats, NULL, type, NULL); |
| 599 | if (IS_ERR(ptr: plane)) |
| 600 | return plane; |
| 601 | |
| 602 | drm_plane_helper_add(plane, funcs); |
| 603 | |
| 604 | if (type == DRM_PLANE_TYPE_PRIMARY) |
| 605 | drm_plane_enable_fb_damage_clips(plane); |
| 606 | |
| 607 | return plane; |
| 608 | } |
| 609 | |