| 1 | // SPDX-License-Identifier: GPL-2.0 OR MIT |
| 2 | /************************************************************************** |
| 3 | * |
| 4 | * Copyright (c) 2019-2025 Broadcom. All Rights Reserved. The term |
| 5 | * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. |
| 6 | * |
| 7 | **************************************************************************/ |
| 8 | #include "vmwgfx_bo.h" |
| 9 | #include "vmwgfx_drv.h" |
| 10 | |
| 11 | /* |
| 12 | * Different methods for tracking dirty: |
| 13 | * VMW_BO_DIRTY_PAGETABLE - Scan the pagetable for hardware dirty bits |
| 14 | * VMW_BO_DIRTY_MKWRITE - Write-protect page table entries and record write- |
| 15 | * accesses in the VM mkwrite() callback |
| 16 | */ |
| 17 | enum vmw_bo_dirty_method { |
| 18 | VMW_BO_DIRTY_PAGETABLE, |
| 19 | VMW_BO_DIRTY_MKWRITE, |
| 20 | }; |
| 21 | |
| 22 | /* |
| 23 | * No dirtied pages at scan trigger a transition to the _MKWRITE method, |
| 24 | * similarly a certain percentage of dirty pages trigger a transition to |
| 25 | * the _PAGETABLE method. How many triggers should we wait for before |
| 26 | * changing method? |
| 27 | */ |
| 28 | #define VMW_DIRTY_NUM_CHANGE_TRIGGERS 2 |
| 29 | |
| 30 | /* Percentage to trigger a transition to the _PAGETABLE method */ |
| 31 | #define VMW_DIRTY_PERCENTAGE 10 |
| 32 | |
| 33 | /** |
| 34 | * struct vmw_bo_dirty - Dirty information for buffer objects |
| 35 | * @ref_count: Reference count for this structure. Must be first member! |
| 36 | * @start: First currently dirty bit |
| 37 | * @end: Last currently dirty bit + 1 |
| 38 | * @method: The currently used dirty method |
| 39 | * @change_count: Number of consecutive method change triggers |
| 40 | * @bitmap_size: The size of the bitmap in bits. Typically equal to the |
| 41 | * nuber of pages in the bo. |
| 42 | * @bitmap: A bitmap where each bit represents a page. A set bit means a |
| 43 | * dirty page. |
| 44 | */ |
| 45 | struct vmw_bo_dirty { |
| 46 | struct kref ref_count; |
| 47 | unsigned long start; |
| 48 | unsigned long end; |
| 49 | enum vmw_bo_dirty_method method; |
| 50 | unsigned int change_count; |
| 51 | unsigned long bitmap_size; |
| 52 | unsigned long bitmap[]; |
| 53 | }; |
| 54 | |
| 55 | bool vmw_bo_is_dirty(struct vmw_bo *vbo) |
| 56 | { |
| 57 | return vbo->dirty && (vbo->dirty->start < vbo->dirty->end); |
| 58 | } |
| 59 | |
| 60 | /** |
| 61 | * vmw_bo_dirty_scan_pagetable - Perform a pagetable scan for dirty bits |
| 62 | * @vbo: The buffer object to scan |
| 63 | * |
| 64 | * Scans the pagetable for dirty bits. Clear those bits and modify the |
| 65 | * dirty structure with the results. This function may change the |
| 66 | * dirty-tracking method. |
| 67 | */ |
| 68 | static void vmw_bo_dirty_scan_pagetable(struct vmw_bo *vbo) |
| 69 | { |
| 70 | struct vmw_bo_dirty *dirty = vbo->dirty; |
| 71 | pgoff_t offset = drm_vma_node_start(node: &vbo->tbo.base.vma_node); |
| 72 | struct address_space *mapping = vbo->tbo.bdev->dev_mapping; |
| 73 | pgoff_t num_marked; |
| 74 | |
| 75 | num_marked = clean_record_shared_mapping_range |
| 76 | (mapping, |
| 77 | first_index: offset, nr: dirty->bitmap_size, |
| 78 | bitmap_pgoff: offset, bitmap: &dirty->bitmap[0], |
| 79 | start: &dirty->start, end: &dirty->end); |
| 80 | if (num_marked == 0) |
| 81 | dirty->change_count++; |
| 82 | else |
| 83 | dirty->change_count = 0; |
| 84 | |
| 85 | if (dirty->change_count > VMW_DIRTY_NUM_CHANGE_TRIGGERS) { |
| 86 | dirty->change_count = 0; |
| 87 | dirty->method = VMW_BO_DIRTY_MKWRITE; |
| 88 | wp_shared_mapping_range(mapping, |
| 89 | first_index: offset, nr: dirty->bitmap_size); |
| 90 | clean_record_shared_mapping_range(mapping, |
| 91 | first_index: offset, nr: dirty->bitmap_size, |
| 92 | bitmap_pgoff: offset, bitmap: &dirty->bitmap[0], |
| 93 | start: &dirty->start, end: &dirty->end); |
| 94 | } |
| 95 | } |
| 96 | |
| 97 | /** |
| 98 | * vmw_bo_dirty_scan_mkwrite - Reset the mkwrite dirty-tracking method |
| 99 | * @vbo: The buffer object to scan |
| 100 | * |
| 101 | * Write-protect pages written to so that consecutive write accesses will |
| 102 | * trigger a call to mkwrite. |
| 103 | * |
| 104 | * This function may change the dirty-tracking method. |
| 105 | */ |
| 106 | static void vmw_bo_dirty_scan_mkwrite(struct vmw_bo *vbo) |
| 107 | { |
| 108 | struct vmw_bo_dirty *dirty = vbo->dirty; |
| 109 | unsigned long offset = drm_vma_node_start(node: &vbo->tbo.base.vma_node); |
| 110 | struct address_space *mapping = vbo->tbo.bdev->dev_mapping; |
| 111 | pgoff_t num_marked; |
| 112 | |
| 113 | if (dirty->end <= dirty->start) |
| 114 | return; |
| 115 | |
| 116 | num_marked = wp_shared_mapping_range(mapping: vbo->tbo.bdev->dev_mapping, |
| 117 | first_index: dirty->start + offset, |
| 118 | nr: dirty->end - dirty->start); |
| 119 | |
| 120 | if (100UL * num_marked / dirty->bitmap_size > |
| 121 | VMW_DIRTY_PERCENTAGE) |
| 122 | dirty->change_count++; |
| 123 | else |
| 124 | dirty->change_count = 0; |
| 125 | |
| 126 | if (dirty->change_count > VMW_DIRTY_NUM_CHANGE_TRIGGERS) { |
| 127 | pgoff_t start = 0; |
| 128 | pgoff_t end = dirty->bitmap_size; |
| 129 | |
| 130 | dirty->method = VMW_BO_DIRTY_PAGETABLE; |
| 131 | clean_record_shared_mapping_range(mapping, first_index: offset, nr: end, bitmap_pgoff: offset, |
| 132 | bitmap: &dirty->bitmap[0], |
| 133 | start: &start, end: &end); |
| 134 | bitmap_clear(map: &dirty->bitmap[0], start: 0, nbits: dirty->bitmap_size); |
| 135 | if (dirty->start < dirty->end) |
| 136 | bitmap_set(map: &dirty->bitmap[0], start: dirty->start, |
| 137 | nbits: dirty->end - dirty->start); |
| 138 | dirty->change_count = 0; |
| 139 | } |
| 140 | } |
| 141 | |
| 142 | /** |
| 143 | * vmw_bo_dirty_scan - Scan for dirty pages and add them to the dirty |
| 144 | * tracking structure |
| 145 | * @vbo: The buffer object to scan |
| 146 | * |
| 147 | * This function may change the dirty tracking method. |
| 148 | */ |
| 149 | void vmw_bo_dirty_scan(struct vmw_bo *vbo) |
| 150 | { |
| 151 | struct vmw_bo_dirty *dirty = vbo->dirty; |
| 152 | |
| 153 | if (dirty->method == VMW_BO_DIRTY_PAGETABLE) |
| 154 | vmw_bo_dirty_scan_pagetable(vbo); |
| 155 | else |
| 156 | vmw_bo_dirty_scan_mkwrite(vbo); |
| 157 | } |
| 158 | |
| 159 | /** |
| 160 | * vmw_bo_dirty_pre_unmap - write-protect and pick up dirty pages before |
| 161 | * an unmap_mapping_range operation. |
| 162 | * @vbo: The buffer object, |
| 163 | * @start: First page of the range within the buffer object. |
| 164 | * @end: Last page of the range within the buffer object + 1. |
| 165 | * |
| 166 | * If we're using the _PAGETABLE scan method, we may leak dirty pages |
| 167 | * when calling unmap_mapping_range(). This function makes sure we pick |
| 168 | * up all dirty pages. |
| 169 | */ |
| 170 | static void vmw_bo_dirty_pre_unmap(struct vmw_bo *vbo, |
| 171 | pgoff_t start, pgoff_t end) |
| 172 | { |
| 173 | struct vmw_bo_dirty *dirty = vbo->dirty; |
| 174 | unsigned long offset = drm_vma_node_start(node: &vbo->tbo.base.vma_node); |
| 175 | struct address_space *mapping = vbo->tbo.bdev->dev_mapping; |
| 176 | |
| 177 | if (dirty->method != VMW_BO_DIRTY_PAGETABLE || start >= end) |
| 178 | return; |
| 179 | |
| 180 | wp_shared_mapping_range(mapping, first_index: start + offset, nr: end - start); |
| 181 | clean_record_shared_mapping_range(mapping, first_index: start + offset, |
| 182 | nr: end - start, bitmap_pgoff: offset, |
| 183 | bitmap: &dirty->bitmap[0], start: &dirty->start, |
| 184 | end: &dirty->end); |
| 185 | } |
| 186 | |
| 187 | /** |
| 188 | * vmw_bo_dirty_unmap - Clear all ptes pointing to a range within a bo |
| 189 | * @vbo: The buffer object, |
| 190 | * @start: First page of the range within the buffer object. |
| 191 | * @end: Last page of the range within the buffer object + 1. |
| 192 | * |
| 193 | * This is similar to ttm_bo_unmap_virtual() except it takes a subrange. |
| 194 | */ |
| 195 | void vmw_bo_dirty_unmap(struct vmw_bo *vbo, |
| 196 | pgoff_t start, pgoff_t end) |
| 197 | { |
| 198 | unsigned long offset = drm_vma_node_start(node: &vbo->tbo.base.vma_node); |
| 199 | struct address_space *mapping = vbo->tbo.bdev->dev_mapping; |
| 200 | |
| 201 | vmw_bo_dirty_pre_unmap(vbo, start, end); |
| 202 | unmap_shared_mapping_range(mapping, holebegin: (offset + start) << PAGE_SHIFT, |
| 203 | holelen: (loff_t) (end - start) << PAGE_SHIFT); |
| 204 | } |
| 205 | |
| 206 | /** |
| 207 | * vmw_bo_dirty_add - Add a dirty-tracking user to a buffer object |
| 208 | * @vbo: The buffer object |
| 209 | * |
| 210 | * This function registers a dirty-tracking user to a buffer object. |
| 211 | * A user can be for example a resource or a vma in a special user-space |
| 212 | * mapping. |
| 213 | * |
| 214 | * Return: Zero on success, -ENOMEM on memory allocation failure. |
| 215 | */ |
| 216 | int vmw_bo_dirty_add(struct vmw_bo *vbo) |
| 217 | { |
| 218 | struct vmw_bo_dirty *dirty = vbo->dirty; |
| 219 | pgoff_t num_pages = PFN_UP(vbo->tbo.resource->size); |
| 220 | size_t size; |
| 221 | int ret; |
| 222 | |
| 223 | if (dirty) { |
| 224 | kref_get(kref: &dirty->ref_count); |
| 225 | return 0; |
| 226 | } |
| 227 | |
| 228 | size = sizeof(*dirty) + BITS_TO_LONGS(num_pages) * sizeof(long); |
| 229 | dirty = kvzalloc(size, GFP_KERNEL); |
| 230 | if (!dirty) { |
| 231 | ret = -ENOMEM; |
| 232 | goto out_no_dirty; |
| 233 | } |
| 234 | |
| 235 | dirty->bitmap_size = num_pages; |
| 236 | dirty->start = dirty->bitmap_size; |
| 237 | dirty->end = 0; |
| 238 | kref_init(kref: &dirty->ref_count); |
| 239 | if (num_pages < PAGE_SIZE / sizeof(pte_t)) { |
| 240 | dirty->method = VMW_BO_DIRTY_PAGETABLE; |
| 241 | } else { |
| 242 | struct address_space *mapping = vbo->tbo.bdev->dev_mapping; |
| 243 | pgoff_t offset = drm_vma_node_start(node: &vbo->tbo.base.vma_node); |
| 244 | |
| 245 | dirty->method = VMW_BO_DIRTY_MKWRITE; |
| 246 | |
| 247 | /* Write-protect and then pick up already dirty bits */ |
| 248 | wp_shared_mapping_range(mapping, first_index: offset, nr: num_pages); |
| 249 | clean_record_shared_mapping_range(mapping, first_index: offset, nr: num_pages, |
| 250 | bitmap_pgoff: offset, |
| 251 | bitmap: &dirty->bitmap[0], |
| 252 | start: &dirty->start, end: &dirty->end); |
| 253 | } |
| 254 | |
| 255 | vbo->dirty = dirty; |
| 256 | |
| 257 | return 0; |
| 258 | |
| 259 | out_no_dirty: |
| 260 | return ret; |
| 261 | } |
| 262 | |
| 263 | /** |
| 264 | * vmw_bo_dirty_release - Release a dirty-tracking user from a buffer object |
| 265 | * @vbo: The buffer object |
| 266 | * |
| 267 | * This function releases a dirty-tracking user from a buffer object. |
| 268 | * If the reference count reaches zero, then the dirty-tracking object is |
| 269 | * freed and the pointer to it cleared. |
| 270 | * |
| 271 | * Return: Zero on success, -ENOMEM on memory allocation failure. |
| 272 | */ |
| 273 | void vmw_bo_dirty_release(struct vmw_bo *vbo) |
| 274 | { |
| 275 | struct vmw_bo_dirty *dirty = vbo->dirty; |
| 276 | |
| 277 | if (dirty && kref_put(kref: &dirty->ref_count, release: (void *)kvfree)) |
| 278 | vbo->dirty = NULL; |
| 279 | } |
| 280 | |
| 281 | /** |
| 282 | * vmw_bo_dirty_transfer_to_res - Pick up a resource's dirty region from |
| 283 | * its backing mob. |
| 284 | * @res: The resource |
| 285 | * |
| 286 | * This function will pick up all dirty ranges affecting the resource from |
| 287 | * it's backup mob, and call vmw_resource_dirty_update() once for each |
| 288 | * range. The transferred ranges will be cleared from the backing mob's |
| 289 | * dirty tracking. |
| 290 | */ |
| 291 | void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res) |
| 292 | { |
| 293 | struct vmw_bo *vbo = res->guest_memory_bo; |
| 294 | struct vmw_bo_dirty *dirty = vbo->dirty; |
| 295 | pgoff_t start, cur, end; |
| 296 | unsigned long res_start = res->guest_memory_offset; |
| 297 | unsigned long res_end = res->guest_memory_offset + res->guest_memory_size; |
| 298 | |
| 299 | WARN_ON_ONCE(res_start & ~PAGE_MASK); |
| 300 | res_start >>= PAGE_SHIFT; |
| 301 | res_end = DIV_ROUND_UP(res_end, PAGE_SIZE); |
| 302 | |
| 303 | if (res_start >= dirty->end || res_end <= dirty->start) |
| 304 | return; |
| 305 | |
| 306 | cur = max(res_start, dirty->start); |
| 307 | res_end = max(res_end, dirty->end); |
| 308 | while (cur < res_end) { |
| 309 | unsigned long num; |
| 310 | |
| 311 | start = find_next_bit(addr: &dirty->bitmap[0], size: res_end, offset: cur); |
| 312 | if (start >= res_end) |
| 313 | break; |
| 314 | |
| 315 | end = find_next_zero_bit(addr: &dirty->bitmap[0], size: res_end, offset: start + 1); |
| 316 | cur = end + 1; |
| 317 | num = end - start; |
| 318 | bitmap_clear(map: &dirty->bitmap[0], start, nbits: num); |
| 319 | vmw_resource_dirty_update(res, start, end); |
| 320 | } |
| 321 | |
| 322 | if (res_start <= dirty->start && res_end > dirty->start) |
| 323 | dirty->start = res_end; |
| 324 | if (res_start < dirty->end && res_end >= dirty->end) |
| 325 | dirty->end = res_start; |
| 326 | } |
| 327 | |
| 328 | void vmw_bo_dirty_clear(struct vmw_bo *vbo) |
| 329 | { |
| 330 | struct vmw_bo_dirty *dirty = vbo->dirty; |
| 331 | pgoff_t start, cur, end; |
| 332 | unsigned long res_start = 0; |
| 333 | unsigned long res_end = vbo->tbo.base.size; |
| 334 | |
| 335 | WARN_ON_ONCE(res_start & ~PAGE_MASK); |
| 336 | res_start >>= PAGE_SHIFT; |
| 337 | res_end = DIV_ROUND_UP(res_end, PAGE_SIZE); |
| 338 | |
| 339 | if (res_start >= dirty->end || res_end <= dirty->start) |
| 340 | return; |
| 341 | |
| 342 | cur = max(res_start, dirty->start); |
| 343 | res_end = max(res_end, dirty->end); |
| 344 | while (cur < res_end) { |
| 345 | unsigned long num; |
| 346 | |
| 347 | start = find_next_bit(addr: &dirty->bitmap[0], size: res_end, offset: cur); |
| 348 | if (start >= res_end) |
| 349 | break; |
| 350 | |
| 351 | end = find_next_zero_bit(addr: &dirty->bitmap[0], size: res_end, offset: start + 1); |
| 352 | cur = end + 1; |
| 353 | num = end - start; |
| 354 | bitmap_clear(map: &dirty->bitmap[0], start, nbits: num); |
| 355 | } |
| 356 | |
| 357 | if (res_start <= dirty->start && res_end > dirty->start) |
| 358 | dirty->start = res_end; |
| 359 | if (res_start < dirty->end && res_end >= dirty->end) |
| 360 | dirty->end = res_start; |
| 361 | } |
| 362 | |
| 363 | /** |
| 364 | * vmw_bo_dirty_clear_res - Clear a resource's dirty region from |
| 365 | * its backing mob. |
| 366 | * @res: The resource |
| 367 | * |
| 368 | * This function will clear all dirty ranges affecting the resource from |
| 369 | * it's backup mob's dirty tracking. |
| 370 | */ |
| 371 | void vmw_bo_dirty_clear_res(struct vmw_resource *res) |
| 372 | { |
| 373 | unsigned long res_start = res->guest_memory_offset; |
| 374 | unsigned long res_end = res->guest_memory_offset + res->guest_memory_size; |
| 375 | struct vmw_bo *vbo = res->guest_memory_bo; |
| 376 | struct vmw_bo_dirty *dirty = vbo->dirty; |
| 377 | |
| 378 | res_start >>= PAGE_SHIFT; |
| 379 | res_end = DIV_ROUND_UP(res_end, PAGE_SIZE); |
| 380 | |
| 381 | if (res_start >= dirty->end || res_end <= dirty->start) |
| 382 | return; |
| 383 | |
| 384 | res_start = max(res_start, dirty->start); |
| 385 | res_end = min(res_end, dirty->end); |
| 386 | bitmap_clear(map: &dirty->bitmap[0], start: res_start, nbits: res_end - res_start); |
| 387 | |
| 388 | if (res_start <= dirty->start && res_end > dirty->start) |
| 389 | dirty->start = res_end; |
| 390 | if (res_start < dirty->end && res_end >= dirty->end) |
| 391 | dirty->end = res_start; |
| 392 | } |
| 393 | |
| 394 | vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf) |
| 395 | { |
| 396 | struct vm_area_struct *vma = vmf->vma; |
| 397 | struct ttm_buffer_object *bo = (struct ttm_buffer_object *) |
| 398 | vma->vm_private_data; |
| 399 | vm_fault_t ret; |
| 400 | unsigned long page_offset; |
| 401 | unsigned int save_flags; |
| 402 | struct vmw_bo *vbo = to_vmw_bo(gobj: &bo->base); |
| 403 | |
| 404 | /* |
| 405 | * mkwrite() doesn't handle the VM_FAULT_RETRY return value correctly. |
| 406 | * So make sure the TTM helpers are aware. |
| 407 | */ |
| 408 | save_flags = vmf->flags; |
| 409 | vmf->flags &= ~FAULT_FLAG_ALLOW_RETRY; |
| 410 | ret = ttm_bo_vm_reserve(bo, vmf); |
| 411 | vmf->flags = save_flags; |
| 412 | if (ret) |
| 413 | return ret; |
| 414 | |
| 415 | page_offset = vmf->pgoff - drm_vma_node_start(node: &bo->base.vma_node); |
| 416 | if (unlikely(page_offset >= PFN_UP(bo->resource->size))) { |
| 417 | ret = VM_FAULT_SIGBUS; |
| 418 | goto out_unlock; |
| 419 | } |
| 420 | |
| 421 | if (vbo->dirty && vbo->dirty->method == VMW_BO_DIRTY_MKWRITE && |
| 422 | !test_bit(page_offset, &vbo->dirty->bitmap[0])) { |
| 423 | struct vmw_bo_dirty *dirty = vbo->dirty; |
| 424 | |
| 425 | __set_bit(page_offset, &dirty->bitmap[0]); |
| 426 | dirty->start = min(dirty->start, page_offset); |
| 427 | dirty->end = max(dirty->end, page_offset + 1); |
| 428 | } |
| 429 | |
| 430 | out_unlock: |
| 431 | dma_resv_unlock(obj: bo->base.resv); |
| 432 | return ret; |
| 433 | } |
| 434 | |
| 435 | vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf) |
| 436 | { |
| 437 | struct vm_area_struct *vma = vmf->vma; |
| 438 | struct ttm_buffer_object *bo = (struct ttm_buffer_object *) |
| 439 | vma->vm_private_data; |
| 440 | struct vmw_bo *vbo = to_vmw_bo(gobj: &bo->base); |
| 441 | pgoff_t num_prefault; |
| 442 | pgprot_t prot; |
| 443 | vm_fault_t ret; |
| 444 | |
| 445 | ret = ttm_bo_vm_reserve(bo, vmf); |
| 446 | if (ret) |
| 447 | return ret; |
| 448 | |
| 449 | num_prefault = (vma->vm_flags & VM_RAND_READ) ? 1 : |
| 450 | TTM_BO_VM_NUM_PREFAULT; |
| 451 | |
| 452 | if (vbo->dirty) { |
| 453 | pgoff_t allowed_prefault; |
| 454 | unsigned long page_offset; |
| 455 | |
| 456 | page_offset = vmf->pgoff - |
| 457 | drm_vma_node_start(node: &bo->base.vma_node); |
| 458 | if (page_offset >= PFN_UP(bo->resource->size) || |
| 459 | vmw_resources_clean(vbo, start: page_offset, |
| 460 | end: page_offset + PAGE_SIZE, |
| 461 | num_prefault: &allowed_prefault)) { |
| 462 | ret = VM_FAULT_SIGBUS; |
| 463 | goto out_unlock; |
| 464 | } |
| 465 | |
| 466 | num_prefault = min(num_prefault, allowed_prefault); |
| 467 | } |
| 468 | |
| 469 | /* |
| 470 | * If we don't track dirty using the MKWRITE method, make sure |
| 471 | * sure the page protection is write-enabled so we don't get |
| 472 | * a lot of unnecessary write faults. |
| 473 | */ |
| 474 | if (vbo->dirty && vbo->dirty->method == VMW_BO_DIRTY_MKWRITE) |
| 475 | prot = vm_get_page_prot(vm_flags: vma->vm_flags & ~VM_SHARED); |
| 476 | else |
| 477 | prot = vm_get_page_prot(vm_flags: vma->vm_flags); |
| 478 | |
| 479 | ret = ttm_bo_vm_fault_reserved(vmf, prot, num_prefault); |
| 480 | if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) |
| 481 | return ret; |
| 482 | |
| 483 | out_unlock: |
| 484 | dma_resv_unlock(obj: bo->base.resv); |
| 485 | |
| 486 | return ret; |
| 487 | } |
| 488 | |