| 1 | // SPDX-License-Identifier: GPL-2.0 OR MIT |
| 2 | /************************************************************************** |
| 3 | * |
| 4 | * Copyright (c) 2024 Broadcom. All Rights Reserved. The term |
| 5 | * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. |
| 6 | * |
| 7 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 8 | * copy of this software and associated documentation files (the |
| 9 | * "Software"), to deal in the Software without restriction, including |
| 10 | * without limitation the rights to use, copy, modify, merge, publish, |
| 11 | * distribute, sub license, and/or sell copies of the Software, and to |
| 12 | * permit persons to whom the Software is furnished to do so, subject to |
| 13 | * the following conditions: |
| 14 | * |
| 15 | * The above copyright notice and this permission notice (including the |
| 16 | * next paragraph) shall be included in all copies or substantial portions |
| 17 | * of the Software. |
| 18 | * |
| 19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 21 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
| 22 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
| 23 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
| 24 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
| 25 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 26 | * |
| 27 | **************************************************************************/ |
| 28 | |
| 29 | #include "vmwgfx_vkms.h" |
| 30 | |
| 31 | #include "vmwgfx_bo.h" |
| 32 | #include "vmwgfx_drv.h" |
| 33 | #include "vmwgfx_kms.h" |
| 34 | |
| 35 | #include "vmw_surface_cache.h" |
| 36 | |
| 37 | #include <drm/drm_crtc.h> |
| 38 | #include <drm/drm_debugfs_crc.h> |
| 39 | #include <drm/drm_print.h> |
| 40 | #include <drm/drm_vblank.h> |
| 41 | |
| 42 | #include <linux/crc32.h> |
| 43 | #include <linux/delay.h> |
| 44 | |
| 45 | #define GUESTINFO_VBLANK "guestinfo.vmwgfx.vkms_enable" |
| 46 | |
| 47 | static int |
| 48 | vmw_surface_sync(struct vmw_private *vmw, |
| 49 | struct vmw_surface *surf) |
| 50 | { |
| 51 | int ret; |
| 52 | struct vmw_fence_obj *fence = NULL; |
| 53 | struct vmw_bo *bo = surf->res.guest_memory_bo; |
| 54 | |
| 55 | vmw_resource_clean(res: &surf->res); |
| 56 | |
| 57 | ret = ttm_bo_reserve(bo: &bo->tbo, interruptible: false, no_wait: false, NULL); |
| 58 | if (ret != 0) { |
| 59 | drm_warn(&vmw->drm, "%s: failed reserve\n" , __func__); |
| 60 | goto done; |
| 61 | } |
| 62 | |
| 63 | ret = vmw_execbuf_fence_commands(NULL, dev_priv: vmw, p_fence: &fence, NULL); |
| 64 | if (ret != 0) { |
| 65 | drm_warn(&vmw->drm, "%s: failed execbuf\n" , __func__); |
| 66 | ttm_bo_unreserve(bo: &bo->tbo); |
| 67 | goto done; |
| 68 | } |
| 69 | |
| 70 | dma_fence_wait(fence: &fence->base, intr: false); |
| 71 | dma_fence_put(fence: &fence->base); |
| 72 | |
| 73 | ttm_bo_unreserve(bo: &bo->tbo); |
| 74 | done: |
| 75 | return ret; |
| 76 | } |
| 77 | |
| 78 | static void |
| 79 | compute_crc(struct drm_crtc *crtc, |
| 80 | struct vmw_surface *surf, |
| 81 | u32 *crc) |
| 82 | { |
| 83 | u8 *mapped_surface; |
| 84 | struct vmw_bo *bo = surf->res.guest_memory_bo; |
| 85 | const struct SVGA3dSurfaceDesc *desc = |
| 86 | vmw_surface_get_desc(format: surf->metadata.format); |
| 87 | u32 row_pitch_bytes; |
| 88 | SVGA3dSize blocks; |
| 89 | u32 y; |
| 90 | |
| 91 | *crc = 0; |
| 92 | |
| 93 | vmw_surface_get_size_in_blocks(desc, pixel_size: &surf->metadata.base_size, block_size: &blocks); |
| 94 | row_pitch_bytes = blocks.width * desc->pitchBytesPerBlock; |
| 95 | WARN_ON(!bo); |
| 96 | mapped_surface = vmw_bo_map_and_cache(vbo: bo); |
| 97 | |
| 98 | for (y = 0; y < blocks.height; y++) { |
| 99 | *crc = crc32_le(crc: *crc, p: mapped_surface, len: row_pitch_bytes); |
| 100 | mapped_surface += row_pitch_bytes; |
| 101 | } |
| 102 | |
| 103 | vmw_bo_unmap(vbo: bo); |
| 104 | } |
| 105 | |
| 106 | static void |
| 107 | crc_generate_worker(struct work_struct *work) |
| 108 | { |
| 109 | struct vmw_display_unit *du = |
| 110 | container_of(work, struct vmw_display_unit, vkms.crc_generator_work); |
| 111 | struct drm_crtc *crtc = &du->crtc; |
| 112 | struct vmw_private *vmw = vmw_priv(dev: crtc->dev); |
| 113 | bool crc_pending; |
| 114 | u64 frame_start, frame_end; |
| 115 | u32 crc32 = 0; |
| 116 | struct vmw_surface *surf = 0; |
| 117 | |
| 118 | spin_lock_irq(lock: &du->vkms.crc_state_lock); |
| 119 | crc_pending = du->vkms.crc_pending; |
| 120 | spin_unlock_irq(lock: &du->vkms.crc_state_lock); |
| 121 | |
| 122 | /* |
| 123 | * We raced with the vblank hrtimer and previous work already computed |
| 124 | * the crc, nothing to do. |
| 125 | */ |
| 126 | if (!crc_pending) |
| 127 | return; |
| 128 | |
| 129 | spin_lock_irq(lock: &du->vkms.crc_state_lock); |
| 130 | surf = vmw_surface_reference(srf: du->vkms.surface); |
| 131 | spin_unlock_irq(lock: &du->vkms.crc_state_lock); |
| 132 | |
| 133 | if (surf) { |
| 134 | if (vmw_surface_sync(vmw, surf)) { |
| 135 | drm_warn( |
| 136 | crtc->dev, |
| 137 | "CRC worker wasn't able to sync the crc surface!\n" ); |
| 138 | return; |
| 139 | } |
| 140 | |
| 141 | compute_crc(crtc, surf, crc: &crc32); |
| 142 | vmw_surface_unreference(srf: &surf); |
| 143 | } |
| 144 | |
| 145 | spin_lock_irq(lock: &du->vkms.crc_state_lock); |
| 146 | frame_start = du->vkms.frame_start; |
| 147 | frame_end = du->vkms.frame_end; |
| 148 | du->vkms.frame_start = 0; |
| 149 | du->vkms.frame_end = 0; |
| 150 | du->vkms.crc_pending = false; |
| 151 | spin_unlock_irq(lock: &du->vkms.crc_state_lock); |
| 152 | |
| 153 | /* |
| 154 | * The worker can fall behind the vblank hrtimer, make sure we catch up. |
| 155 | */ |
| 156 | while (frame_start <= frame_end) |
| 157 | drm_crtc_add_crc_entry(crtc, has_frame: true, frame: frame_start++, crcs: &crc32); |
| 158 | } |
| 159 | |
| 160 | static enum hrtimer_restart |
| 161 | vmw_vkms_vblank_simulate(struct hrtimer *timer) |
| 162 | { |
| 163 | struct vmw_display_unit *du = container_of(timer, struct vmw_display_unit, vkms.timer); |
| 164 | struct drm_crtc *crtc = &du->crtc; |
| 165 | struct vmw_private *vmw = vmw_priv(dev: crtc->dev); |
| 166 | bool has_surface = false; |
| 167 | u64 ret_overrun; |
| 168 | bool locked, ret; |
| 169 | |
| 170 | ret_overrun = hrtimer_forward_now(timer: &du->vkms.timer, |
| 171 | interval: du->vkms.period_ns); |
| 172 | if (ret_overrun != 1) |
| 173 | drm_dbg_driver(crtc->dev, "vblank timer missed %lld frames.\n" , |
| 174 | ret_overrun - 1); |
| 175 | |
| 176 | locked = vmw_vkms_vblank_trylock(crtc); |
| 177 | ret = drm_crtc_handle_vblank(crtc); |
| 178 | WARN_ON(!ret); |
| 179 | if (!locked) |
| 180 | return HRTIMER_RESTART; |
| 181 | has_surface = du->vkms.surface != NULL; |
| 182 | vmw_vkms_unlock(crtc); |
| 183 | |
| 184 | if (du->vkms.crc_enabled && has_surface) { |
| 185 | u64 frame = drm_crtc_accurate_vblank_count(crtc); |
| 186 | |
| 187 | spin_lock(lock: &du->vkms.crc_state_lock); |
| 188 | if (!du->vkms.crc_pending) |
| 189 | du->vkms.frame_start = frame; |
| 190 | else |
| 191 | drm_dbg_driver(crtc->dev, |
| 192 | "crc worker falling behind, frame_start: %llu, frame_end: %llu\n" , |
| 193 | du->vkms.frame_start, frame); |
| 194 | du->vkms.frame_end = frame; |
| 195 | du->vkms.crc_pending = true; |
| 196 | spin_unlock(lock: &du->vkms.crc_state_lock); |
| 197 | |
| 198 | ret = queue_work(wq: vmw->crc_workq, work: &du->vkms.crc_generator_work); |
| 199 | if (!ret) |
| 200 | drm_dbg_driver(crtc->dev, "Composer worker already queued\n" ); |
| 201 | } |
| 202 | |
| 203 | return HRTIMER_RESTART; |
| 204 | } |
| 205 | |
| 206 | void |
| 207 | vmw_vkms_init(struct vmw_private *vmw) |
| 208 | { |
| 209 | char buffer[64]; |
| 210 | const size_t max_buf_len = sizeof(buffer) - 1; |
| 211 | size_t buf_len = max_buf_len; |
| 212 | int ret; |
| 213 | |
| 214 | vmw->vkms_enabled = false; |
| 215 | |
| 216 | ret = vmw_host_get_guestinfo(GUESTINFO_VBLANK, buffer, length: &buf_len); |
| 217 | if (ret || buf_len > max_buf_len) |
| 218 | return; |
| 219 | buffer[buf_len] = '\0'; |
| 220 | |
| 221 | ret = kstrtobool(s: buffer, res: &vmw->vkms_enabled); |
| 222 | if (!ret && vmw->vkms_enabled) { |
| 223 | ret = drm_vblank_init(dev: &vmw->drm, VMWGFX_NUM_DISPLAY_UNITS); |
| 224 | vmw->vkms_enabled = (ret == 0); |
| 225 | } |
| 226 | |
| 227 | vmw->crc_workq = alloc_ordered_workqueue("vmwgfx_crc_generator" , 0); |
| 228 | if (!vmw->crc_workq) { |
| 229 | drm_warn(&vmw->drm, "crc workqueue allocation failed. Disabling vkms." ); |
| 230 | vmw->vkms_enabled = false; |
| 231 | } |
| 232 | if (vmw->vkms_enabled) |
| 233 | drm_info(&vmw->drm, "VKMS enabled\n" ); |
| 234 | } |
| 235 | |
| 236 | void |
| 237 | vmw_vkms_cleanup(struct vmw_private *vmw) |
| 238 | { |
| 239 | destroy_workqueue(wq: vmw->crc_workq); |
| 240 | } |
| 241 | |
| 242 | bool |
| 243 | vmw_vkms_get_vblank_timestamp(struct drm_crtc *crtc, |
| 244 | int *max_error, |
| 245 | ktime_t *vblank_time, |
| 246 | bool in_vblank_irq) |
| 247 | { |
| 248 | struct drm_device *dev = crtc->dev; |
| 249 | struct vmw_private *vmw = vmw_priv(dev); |
| 250 | struct vmw_display_unit *du = vmw_crtc_to_du(crtc); |
| 251 | struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); |
| 252 | |
| 253 | if (!vmw->vkms_enabled) |
| 254 | return false; |
| 255 | |
| 256 | if (!READ_ONCE(vblank->enabled)) { |
| 257 | *vblank_time = ktime_get(); |
| 258 | return true; |
| 259 | } |
| 260 | |
| 261 | *vblank_time = READ_ONCE(du->vkms.timer.node.expires); |
| 262 | |
| 263 | if (WARN_ON(*vblank_time == vblank->time)) |
| 264 | return true; |
| 265 | |
| 266 | /* |
| 267 | * To prevent races we roll the hrtimer forward before we do any |
| 268 | * interrupt processing - this is how real hw works (the interrupt is |
| 269 | * only generated after all the vblank registers are updated) and what |
| 270 | * the vblank core expects. Therefore we need to always correct the |
| 271 | * timestampe by one frame. |
| 272 | */ |
| 273 | *vblank_time -= du->vkms.period_ns; |
| 274 | |
| 275 | return true; |
| 276 | } |
| 277 | |
| 278 | int |
| 279 | vmw_vkms_enable_vblank(struct drm_crtc *crtc) |
| 280 | { |
| 281 | struct drm_device *dev = crtc->dev; |
| 282 | struct vmw_private *vmw = vmw_priv(dev); |
| 283 | struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); |
| 284 | struct vmw_display_unit *du = vmw_crtc_to_du(crtc); |
| 285 | |
| 286 | if (!vmw->vkms_enabled) |
| 287 | return -EINVAL; |
| 288 | |
| 289 | drm_calc_timestamping_constants(crtc, mode: &crtc->mode); |
| 290 | |
| 291 | hrtimer_setup(timer: &du->vkms.timer, function: &vmw_vkms_vblank_simulate, CLOCK_MONOTONIC, |
| 292 | mode: HRTIMER_MODE_REL); |
| 293 | du->vkms.period_ns = ktime_set(secs: 0, nsecs: vblank->framedur_ns); |
| 294 | hrtimer_start(timer: &du->vkms.timer, tim: du->vkms.period_ns, mode: HRTIMER_MODE_REL); |
| 295 | |
| 296 | return 0; |
| 297 | } |
| 298 | |
| 299 | void |
| 300 | vmw_vkms_disable_vblank(struct drm_crtc *crtc) |
| 301 | { |
| 302 | struct vmw_display_unit *du = vmw_crtc_to_du(crtc); |
| 303 | struct vmw_private *vmw = vmw_priv(dev: crtc->dev); |
| 304 | |
| 305 | if (!vmw->vkms_enabled) |
| 306 | return; |
| 307 | |
| 308 | hrtimer_cancel(timer: &du->vkms.timer); |
| 309 | du->vkms.surface = NULL; |
| 310 | du->vkms.period_ns = ktime_set(secs: 0, nsecs: 0); |
| 311 | } |
| 312 | |
| 313 | enum vmw_vkms_lock_state { |
| 314 | VMW_VKMS_LOCK_UNLOCKED = 0, |
| 315 | VMW_VKMS_LOCK_MODESET = 1, |
| 316 | VMW_VKMS_LOCK_VBLANK = 2 |
| 317 | }; |
| 318 | |
| 319 | void |
| 320 | vmw_vkms_crtc_init(struct drm_crtc *crtc) |
| 321 | { |
| 322 | struct vmw_display_unit *du = vmw_crtc_to_du(crtc); |
| 323 | |
| 324 | atomic_set(v: &du->vkms.atomic_lock, i: VMW_VKMS_LOCK_UNLOCKED); |
| 325 | spin_lock_init(&du->vkms.crc_state_lock); |
| 326 | |
| 327 | INIT_WORK(&du->vkms.crc_generator_work, crc_generate_worker); |
| 328 | du->vkms.surface = NULL; |
| 329 | } |
| 330 | |
| 331 | void |
| 332 | vmw_vkms_crtc_cleanup(struct drm_crtc *crtc) |
| 333 | { |
| 334 | struct vmw_display_unit *du = vmw_crtc_to_du(crtc); |
| 335 | |
| 336 | if (du->vkms.surface) |
| 337 | vmw_surface_unreference(srf: &du->vkms.surface); |
| 338 | WARN_ON(work_pending(&du->vkms.crc_generator_work)); |
| 339 | hrtimer_cancel(timer: &du->vkms.timer); |
| 340 | } |
| 341 | |
| 342 | void |
| 343 | vmw_vkms_crtc_atomic_begin(struct drm_crtc *crtc, |
| 344 | struct drm_atomic_state *state) |
| 345 | { |
| 346 | struct vmw_private *vmw = vmw_priv(dev: crtc->dev); |
| 347 | |
| 348 | if (vmw->vkms_enabled) |
| 349 | vmw_vkms_modeset_lock(crtc); |
| 350 | } |
| 351 | |
| 352 | void |
| 353 | vmw_vkms_crtc_atomic_flush(struct drm_crtc *crtc, |
| 354 | struct drm_atomic_state *state) |
| 355 | { |
| 356 | unsigned long flags; |
| 357 | struct vmw_private *vmw = vmw_priv(dev: crtc->dev); |
| 358 | |
| 359 | if (!vmw->vkms_enabled) |
| 360 | return; |
| 361 | |
| 362 | if (crtc->state->event) { |
| 363 | spin_lock_irqsave(&crtc->dev->event_lock, flags); |
| 364 | |
| 365 | if (drm_crtc_vblank_get(crtc) != 0) |
| 366 | drm_crtc_send_vblank_event(crtc, e: crtc->state->event); |
| 367 | else |
| 368 | drm_crtc_arm_vblank_event(crtc, e: crtc->state->event); |
| 369 | |
| 370 | spin_unlock_irqrestore(lock: &crtc->dev->event_lock, flags); |
| 371 | |
| 372 | crtc->state->event = NULL; |
| 373 | } |
| 374 | |
| 375 | vmw_vkms_unlock(crtc); |
| 376 | } |
| 377 | |
| 378 | void |
| 379 | vmw_vkms_crtc_atomic_enable(struct drm_crtc *crtc, |
| 380 | struct drm_atomic_state *state) |
| 381 | { |
| 382 | struct vmw_private *vmw = vmw_priv(dev: crtc->dev); |
| 383 | |
| 384 | if (vmw->vkms_enabled) |
| 385 | drm_crtc_vblank_on(crtc); |
| 386 | } |
| 387 | |
| 388 | void |
| 389 | vmw_vkms_crtc_atomic_disable(struct drm_crtc *crtc, |
| 390 | struct drm_atomic_state *state) |
| 391 | { |
| 392 | struct vmw_private *vmw = vmw_priv(dev: crtc->dev); |
| 393 | |
| 394 | if (vmw->vkms_enabled) |
| 395 | drm_crtc_vblank_off(crtc); |
| 396 | } |
| 397 | |
| 398 | static bool |
| 399 | is_crc_supported(struct drm_crtc *crtc) |
| 400 | { |
| 401 | struct vmw_private *vmw = vmw_priv(dev: crtc->dev); |
| 402 | |
| 403 | if (!vmw->vkms_enabled) |
| 404 | return false; |
| 405 | |
| 406 | if (vmw->active_display_unit != vmw_du_screen_target) |
| 407 | return false; |
| 408 | |
| 409 | return true; |
| 410 | } |
| 411 | |
| 412 | static const char * const pipe_crc_sources[] = {"auto" }; |
| 413 | |
| 414 | static int |
| 415 | crc_parse_source(const char *src_name, |
| 416 | bool *enabled) |
| 417 | { |
| 418 | int ret = 0; |
| 419 | |
| 420 | if (!src_name) { |
| 421 | *enabled = false; |
| 422 | } else if (strcmp(src_name, "auto" ) == 0) { |
| 423 | *enabled = true; |
| 424 | } else { |
| 425 | *enabled = false; |
| 426 | ret = -EINVAL; |
| 427 | } |
| 428 | |
| 429 | return ret; |
| 430 | } |
| 431 | |
| 432 | const char *const * |
| 433 | vmw_vkms_get_crc_sources(struct drm_crtc *crtc, |
| 434 | size_t *count) |
| 435 | { |
| 436 | *count = 0; |
| 437 | if (!is_crc_supported(crtc)) |
| 438 | return NULL; |
| 439 | |
| 440 | *count = ARRAY_SIZE(pipe_crc_sources); |
| 441 | return pipe_crc_sources; |
| 442 | } |
| 443 | |
| 444 | int |
| 445 | vmw_vkms_verify_crc_source(struct drm_crtc *crtc, |
| 446 | const char *src_name, |
| 447 | size_t *values_cnt) |
| 448 | { |
| 449 | bool enabled; |
| 450 | |
| 451 | if (!is_crc_supported(crtc)) |
| 452 | return -EINVAL; |
| 453 | |
| 454 | if (crc_parse_source(src_name, enabled: &enabled) < 0) { |
| 455 | drm_dbg_driver(crtc->dev, "unknown source '%s'\n" , src_name); |
| 456 | return -EINVAL; |
| 457 | } |
| 458 | |
| 459 | *values_cnt = 1; |
| 460 | |
| 461 | return 0; |
| 462 | } |
| 463 | |
| 464 | int |
| 465 | vmw_vkms_set_crc_source(struct drm_crtc *crtc, |
| 466 | const char *src_name) |
| 467 | { |
| 468 | struct vmw_display_unit *du = vmw_crtc_to_du(crtc); |
| 469 | bool enabled, prev_enabled, locked; |
| 470 | int ret; |
| 471 | |
| 472 | if (!is_crc_supported(crtc)) |
| 473 | return -EINVAL; |
| 474 | |
| 475 | ret = crc_parse_source(src_name, enabled: &enabled); |
| 476 | |
| 477 | if (enabled) |
| 478 | drm_crtc_vblank_get(crtc); |
| 479 | |
| 480 | locked = vmw_vkms_modeset_lock_relaxed(crtc); |
| 481 | prev_enabled = du->vkms.crc_enabled; |
| 482 | du->vkms.crc_enabled = enabled; |
| 483 | if (locked) |
| 484 | vmw_vkms_unlock(crtc); |
| 485 | |
| 486 | if (prev_enabled) |
| 487 | drm_crtc_vblank_put(crtc); |
| 488 | |
| 489 | return ret; |
| 490 | } |
| 491 | |
| 492 | void |
| 493 | vmw_vkms_set_crc_surface(struct drm_crtc *crtc, |
| 494 | struct vmw_surface *surf) |
| 495 | { |
| 496 | struct vmw_display_unit *du = vmw_crtc_to_du(crtc); |
| 497 | struct vmw_private *vmw = vmw_priv(dev: crtc->dev); |
| 498 | |
| 499 | if (vmw->vkms_enabled && du->vkms.surface != surf) { |
| 500 | WARN_ON(atomic_read(&du->vkms.atomic_lock) != VMW_VKMS_LOCK_MODESET); |
| 501 | if (du->vkms.surface) |
| 502 | vmw_surface_unreference(srf: &du->vkms.surface); |
| 503 | if (surf) |
| 504 | du->vkms.surface = vmw_surface_reference(srf: surf); |
| 505 | } |
| 506 | } |
| 507 | |
| 508 | /** |
| 509 | * vmw_vkms_lock_max_wait_ns - Return the max wait for the vkms lock |
| 510 | * @du: The vmw_display_unit from which to grab the vblank timings |
| 511 | * |
| 512 | * Returns the maximum wait time used to acquire the vkms lock. By |
| 513 | * default uses a time of a single frame and in case where vblank |
| 514 | * was not initialized for the display unit 1/60th of a second. |
| 515 | */ |
| 516 | static inline u64 |
| 517 | vmw_vkms_lock_max_wait_ns(struct vmw_display_unit *du) |
| 518 | { |
| 519 | s64 nsecs = ktime_to_ns(kt: du->vkms.period_ns); |
| 520 | |
| 521 | return (nsecs > 0) ? nsecs : 16666666; |
| 522 | } |
| 523 | |
| 524 | /** |
| 525 | * vmw_vkms_modeset_lock - Protects access to crtc during modeset |
| 526 | * @crtc: The crtc to lock for vkms |
| 527 | * |
| 528 | * This function prevents the VKMS timers/callbacks from being called |
| 529 | * while a modeset operation is in process. We don't want the callbacks |
| 530 | * e.g. the vblank simulator to be trying to access incomplete state |
| 531 | * so we need to make sure they execute only when the modeset has |
| 532 | * finished. |
| 533 | * |
| 534 | * Normally this would have been done with a spinlock but locking the |
| 535 | * entire atomic modeset with vmwgfx is impossible because kms prepare |
| 536 | * executes non-atomic ops (e.g. vmw_validation_prepare holds a mutex to |
| 537 | * guard various bits of state). Which means that we need to synchronize |
| 538 | * atomic context (the vblank handler) with the non-atomic entirity |
| 539 | * of kms - so use an atomic_t to track which part of vkms has access |
| 540 | * to the basic vkms state. |
| 541 | */ |
| 542 | void |
| 543 | vmw_vkms_modeset_lock(struct drm_crtc *crtc) |
| 544 | { |
| 545 | struct vmw_display_unit *du = vmw_crtc_to_du(crtc); |
| 546 | const u64 nsecs_delay = 10; |
| 547 | const u64 MAX_NSECS_DELAY = vmw_vkms_lock_max_wait_ns(du); |
| 548 | u64 total_delay = 0; |
| 549 | int ret; |
| 550 | |
| 551 | do { |
| 552 | ret = atomic_cmpxchg(v: &du->vkms.atomic_lock, |
| 553 | old: VMW_VKMS_LOCK_UNLOCKED, |
| 554 | new: VMW_VKMS_LOCK_MODESET); |
| 555 | if (ret == VMW_VKMS_LOCK_UNLOCKED || total_delay >= MAX_NSECS_DELAY) |
| 556 | break; |
| 557 | ndelay(nsecs_delay); |
| 558 | total_delay += nsecs_delay; |
| 559 | } while (1); |
| 560 | |
| 561 | if (total_delay >= MAX_NSECS_DELAY) { |
| 562 | drm_warn(crtc->dev, "VKMS lock expired! total_delay = %lld, ret = %d, cur = %d\n" , |
| 563 | total_delay, ret, atomic_read(&du->vkms.atomic_lock)); |
| 564 | } |
| 565 | } |
| 566 | |
| 567 | /** |
| 568 | * vmw_vkms_modeset_lock_relaxed - Protects access to crtc during modeset |
| 569 | * @crtc: The crtc to lock for vkms |
| 570 | * |
| 571 | * Much like vmw_vkms_modeset_lock except that when the crtc is currently |
| 572 | * in a modeset it will return immediately. |
| 573 | * |
| 574 | * Returns true if actually locked vkms to modeset or false otherwise. |
| 575 | */ |
| 576 | bool |
| 577 | vmw_vkms_modeset_lock_relaxed(struct drm_crtc *crtc) |
| 578 | { |
| 579 | struct vmw_display_unit *du = vmw_crtc_to_du(crtc); |
| 580 | const u64 nsecs_delay = 10; |
| 581 | const u64 MAX_NSECS_DELAY = vmw_vkms_lock_max_wait_ns(du); |
| 582 | u64 total_delay = 0; |
| 583 | int ret; |
| 584 | |
| 585 | do { |
| 586 | ret = atomic_cmpxchg(v: &du->vkms.atomic_lock, |
| 587 | old: VMW_VKMS_LOCK_UNLOCKED, |
| 588 | new: VMW_VKMS_LOCK_MODESET); |
| 589 | if (ret == VMW_VKMS_LOCK_UNLOCKED || |
| 590 | ret == VMW_VKMS_LOCK_MODESET || |
| 591 | total_delay >= MAX_NSECS_DELAY) |
| 592 | break; |
| 593 | ndelay(nsecs_delay); |
| 594 | total_delay += nsecs_delay; |
| 595 | } while (1); |
| 596 | |
| 597 | if (total_delay >= MAX_NSECS_DELAY) { |
| 598 | drm_warn(crtc->dev, "VKMS relaxed lock expired!\n" ); |
| 599 | return false; |
| 600 | } |
| 601 | |
| 602 | return ret == VMW_VKMS_LOCK_UNLOCKED; |
| 603 | } |
| 604 | |
| 605 | /** |
| 606 | * vmw_vkms_vblank_trylock - Protects access to crtc during vblank |
| 607 | * @crtc: The crtc to lock for vkms |
| 608 | * |
| 609 | * Tries to lock vkms for vblank, returns immediately. |
| 610 | * |
| 611 | * Returns true if locked vkms to vblank or false otherwise. |
| 612 | */ |
| 613 | bool |
| 614 | vmw_vkms_vblank_trylock(struct drm_crtc *crtc) |
| 615 | { |
| 616 | struct vmw_display_unit *du = vmw_crtc_to_du(crtc); |
| 617 | u32 ret; |
| 618 | |
| 619 | ret = atomic_cmpxchg(v: &du->vkms.atomic_lock, |
| 620 | old: VMW_VKMS_LOCK_UNLOCKED, |
| 621 | new: VMW_VKMS_LOCK_VBLANK); |
| 622 | |
| 623 | return ret == VMW_VKMS_LOCK_UNLOCKED; |
| 624 | } |
| 625 | |
| 626 | void |
| 627 | vmw_vkms_unlock(struct drm_crtc *crtc) |
| 628 | { |
| 629 | struct vmw_display_unit *du = vmw_crtc_to_du(crtc); |
| 630 | |
| 631 | /* Release flag; mark it as unlocked. */ |
| 632 | atomic_set(v: &du->vkms.atomic_lock, i: VMW_VKMS_LOCK_UNLOCKED); |
| 633 | } |
| 634 | |