| 1 | // SPDX-License-Identifier: GPL-2.0-only OR MIT |
| 2 | /* |
| 3 | * Copyright © 2024-2025 Intel Corporation |
| 4 | */ |
| 5 | |
| 6 | #include <linux/dma-fence.h> |
| 7 | #include <linux/dma-mapping.h> |
| 8 | #include <linux/migrate.h> |
| 9 | #include <linux/pagemap.h> |
| 10 | #include <drm/drm_drv.h> |
| 11 | #include <drm/drm_pagemap.h> |
| 12 | |
| 13 | /** |
| 14 | * DOC: Overview |
| 15 | * |
| 16 | * The DRM pagemap layer is intended to augment the dev_pagemap functionality by |
| 17 | * providing a way to populate a struct mm_struct virtual range with device |
| 18 | * private pages and to provide helpers to abstract device memory allocations, |
| 19 | * to migrate memory back and forth between device memory and system RAM and |
| 20 | * to handle access (and in the future migration) between devices implementing |
| 21 | * a fast interconnect that is not necessarily visible to the rest of the |
| 22 | * system. |
| 23 | * |
| 24 | * Typically the DRM pagemap receives requests from one or more DRM GPU SVM |
| 25 | * instances to populate struct mm_struct virtual ranges with memory, and the |
| 26 | * migration is best effort only and may thus fail. The implementation should |
| 27 | * also handle device unbinding by blocking (return an -ENODEV) error for new |
| 28 | * population requests and after that migrate all device pages to system ram. |
| 29 | */ |
| 30 | |
| 31 | /** |
| 32 | * DOC: Migration |
| 33 | * |
| 34 | * Migration granularity typically follows the GPU SVM range requests, but |
| 35 | * if there are clashes, due to races or due to the fact that multiple GPU |
| 36 | * SVM instances have different views of the ranges used, and because of that |
| 37 | * parts of a requested range is already present in the requested device memory, |
| 38 | * the implementation has a variety of options. It can fail and it can choose |
| 39 | * to populate only the part of the range that isn't already in device memory, |
| 40 | * and it can evict the range to system before trying to migrate. Ideally an |
| 41 | * implementation would just try to migrate the missing part of the range and |
| 42 | * allocate just enough memory to do so. |
| 43 | * |
| 44 | * When migrating to system memory as a response to a cpu fault or a device |
| 45 | * memory eviction request, currently a full device memory allocation is |
| 46 | * migrated back to system. Moving forward this might need improvement for |
| 47 | * situations where a single page needs bouncing between system memory and |
| 48 | * device memory due to, for example, atomic operations. |
| 49 | * |
| 50 | * Key DRM pagemap components: |
| 51 | * |
| 52 | * - Device Memory Allocations: |
| 53 | * Embedded structure containing enough information for the drm_pagemap to |
| 54 | * migrate to / from device memory. |
| 55 | * |
| 56 | * - Device Memory Operations: |
| 57 | * Define the interface for driver-specific device memory operations |
| 58 | * release memory, populate pfns, and copy to / from device memory. |
| 59 | */ |
| 60 | |
| 61 | /** |
| 62 | * struct drm_pagemap_zdd - GPU SVM zone device data |
| 63 | * |
| 64 | * @refcount: Reference count for the zdd |
| 65 | * @devmem_allocation: device memory allocation |
| 66 | * @device_private_page_owner: Device private pages owner |
| 67 | * |
| 68 | * This structure serves as a generic wrapper installed in |
| 69 | * page->zone_device_data. It provides infrastructure for looking up a device |
| 70 | * memory allocation upon CPU page fault and asynchronously releasing device |
| 71 | * memory once the CPU has no page references. Asynchronous release is useful |
| 72 | * because CPU page references can be dropped in IRQ contexts, while releasing |
| 73 | * device memory likely requires sleeping locks. |
| 74 | */ |
| 75 | struct drm_pagemap_zdd { |
| 76 | struct kref refcount; |
| 77 | struct drm_pagemap_devmem *devmem_allocation; |
| 78 | void *device_private_page_owner; |
| 79 | }; |
| 80 | |
| 81 | /** |
| 82 | * drm_pagemap_zdd_alloc() - Allocate a zdd structure. |
| 83 | * @device_private_page_owner: Device private pages owner |
| 84 | * |
| 85 | * This function allocates and initializes a new zdd structure. It sets up the |
| 86 | * reference count and initializes the destroy work. |
| 87 | * |
| 88 | * Return: Pointer to the allocated zdd on success, ERR_PTR() on failure. |
| 89 | */ |
| 90 | static struct drm_pagemap_zdd * |
| 91 | drm_pagemap_zdd_alloc(void *device_private_page_owner) |
| 92 | { |
| 93 | struct drm_pagemap_zdd *zdd; |
| 94 | |
| 95 | zdd = kmalloc(sizeof(*zdd), GFP_KERNEL); |
| 96 | if (!zdd) |
| 97 | return NULL; |
| 98 | |
| 99 | kref_init(kref: &zdd->refcount); |
| 100 | zdd->devmem_allocation = NULL; |
| 101 | zdd->device_private_page_owner = device_private_page_owner; |
| 102 | |
| 103 | return zdd; |
| 104 | } |
| 105 | |
| 106 | /** |
| 107 | * drm_pagemap_zdd_get() - Get a reference to a zdd structure. |
| 108 | * @zdd: Pointer to the zdd structure. |
| 109 | * |
| 110 | * This function increments the reference count of the provided zdd structure. |
| 111 | * |
| 112 | * Return: Pointer to the zdd structure. |
| 113 | */ |
| 114 | static struct drm_pagemap_zdd *drm_pagemap_zdd_get(struct drm_pagemap_zdd *zdd) |
| 115 | { |
| 116 | kref_get(kref: &zdd->refcount); |
| 117 | return zdd; |
| 118 | } |
| 119 | |
| 120 | /** |
| 121 | * drm_pagemap_zdd_destroy() - Destroy a zdd structure. |
| 122 | * @ref: Pointer to the reference count structure. |
| 123 | * |
| 124 | * This function queues the destroy_work of the zdd for asynchronous destruction. |
| 125 | */ |
| 126 | static void drm_pagemap_zdd_destroy(struct kref *ref) |
| 127 | { |
| 128 | struct drm_pagemap_zdd *zdd = |
| 129 | container_of(ref, struct drm_pagemap_zdd, refcount); |
| 130 | struct drm_pagemap_devmem *devmem = zdd->devmem_allocation; |
| 131 | |
| 132 | if (devmem) { |
| 133 | complete_all(&devmem->detached); |
| 134 | if (devmem->ops->devmem_release) |
| 135 | devmem->ops->devmem_release(devmem); |
| 136 | } |
| 137 | kfree(objp: zdd); |
| 138 | } |
| 139 | |
| 140 | /** |
| 141 | * drm_pagemap_zdd_put() - Put a zdd reference. |
| 142 | * @zdd: Pointer to the zdd structure. |
| 143 | * |
| 144 | * This function decrements the reference count of the provided zdd structure |
| 145 | * and schedules its destruction if the count drops to zero. |
| 146 | */ |
| 147 | static void drm_pagemap_zdd_put(struct drm_pagemap_zdd *zdd) |
| 148 | { |
| 149 | kref_put(kref: &zdd->refcount, release: drm_pagemap_zdd_destroy); |
| 150 | } |
| 151 | |
| 152 | /** |
| 153 | * drm_pagemap_migration_unlock_put_page() - Put a migration page |
| 154 | * @page: Pointer to the page to put |
| 155 | * |
| 156 | * This function unlocks and puts a page. |
| 157 | */ |
| 158 | static void drm_pagemap_migration_unlock_put_page(struct page *page) |
| 159 | { |
| 160 | unlock_page(page); |
| 161 | put_page(page); |
| 162 | } |
| 163 | |
| 164 | /** |
| 165 | * drm_pagemap_migration_unlock_put_pages() - Put migration pages |
| 166 | * @npages: Number of pages |
| 167 | * @migrate_pfn: Array of migrate page frame numbers |
| 168 | * |
| 169 | * This function unlocks and puts an array of pages. |
| 170 | */ |
| 171 | static void drm_pagemap_migration_unlock_put_pages(unsigned long npages, |
| 172 | unsigned long *migrate_pfn) |
| 173 | { |
| 174 | unsigned long i; |
| 175 | |
| 176 | for (i = 0; i < npages; ++i) { |
| 177 | struct page *page; |
| 178 | |
| 179 | if (!migrate_pfn[i]) |
| 180 | continue; |
| 181 | |
| 182 | page = migrate_pfn_to_page(mpfn: migrate_pfn[i]); |
| 183 | drm_pagemap_migration_unlock_put_page(page); |
| 184 | migrate_pfn[i] = 0; |
| 185 | } |
| 186 | } |
| 187 | |
| 188 | /** |
| 189 | * drm_pagemap_get_devmem_page() - Get a reference to a device memory page |
| 190 | * @page: Pointer to the page |
| 191 | * @zdd: Pointer to the GPU SVM zone device data |
| 192 | * |
| 193 | * This function associates the given page with the specified GPU SVM zone |
| 194 | * device data and initializes it for zone device usage. |
| 195 | */ |
| 196 | static void drm_pagemap_get_devmem_page(struct page *page, |
| 197 | struct drm_pagemap_zdd *zdd) |
| 198 | { |
| 199 | page->zone_device_data = drm_pagemap_zdd_get(zdd); |
| 200 | zone_device_page_init(page, pgmap: page_pgmap(page), order: 0); |
| 201 | } |
| 202 | |
| 203 | /** |
| 204 | * drm_pagemap_migrate_map_pages() - Map migration pages for GPU SVM migration |
| 205 | * @dev: The device for which the pages are being mapped |
| 206 | * @pagemap_addr: Array to store DMA information corresponding to mapped pages |
| 207 | * @migrate_pfn: Array of migrate page frame numbers to map |
| 208 | * @npages: Number of pages to map |
| 209 | * @dir: Direction of data transfer (e.g., DMA_BIDIRECTIONAL) |
| 210 | * |
| 211 | * This function maps pages of memory for migration usage in GPU SVM. It |
| 212 | * iterates over each page frame number provided in @migrate_pfn, maps the |
| 213 | * corresponding page, and stores the DMA address in the provided @dma_addr |
| 214 | * array. |
| 215 | * |
| 216 | * Returns: 0 on success, -EFAULT if an error occurs during mapping. |
| 217 | */ |
| 218 | static int drm_pagemap_migrate_map_pages(struct device *dev, |
| 219 | struct drm_pagemap_addr *pagemap_addr, |
| 220 | unsigned long *migrate_pfn, |
| 221 | unsigned long npages, |
| 222 | enum dma_data_direction dir) |
| 223 | { |
| 224 | unsigned long i; |
| 225 | |
| 226 | for (i = 0; i < npages;) { |
| 227 | struct page *page = migrate_pfn_to_page(mpfn: migrate_pfn[i]); |
| 228 | dma_addr_t dma_addr; |
| 229 | struct folio *folio; |
| 230 | unsigned int order = 0; |
| 231 | |
| 232 | if (!page) |
| 233 | goto next; |
| 234 | |
| 235 | if (WARN_ON_ONCE(is_zone_device_page(page))) |
| 236 | return -EFAULT; |
| 237 | |
| 238 | folio = page_folio(page); |
| 239 | order = folio_order(folio); |
| 240 | |
| 241 | dma_addr = dma_map_page(dev, page, 0, page_size(page), dir); |
| 242 | if (dma_mapping_error(dev, dma_addr)) |
| 243 | return -EFAULT; |
| 244 | |
| 245 | pagemap_addr[i] = |
| 246 | drm_pagemap_addr_encode(addr: dma_addr, |
| 247 | proto: DRM_INTERCONNECT_SYSTEM, |
| 248 | order, dir); |
| 249 | |
| 250 | next: |
| 251 | i += NR_PAGES(order); |
| 252 | } |
| 253 | |
| 254 | return 0; |
| 255 | } |
| 256 | |
| 257 | /** |
| 258 | * drm_pagemap_migrate_unmap_pages() - Unmap pages previously mapped for GPU SVM migration |
| 259 | * @dev: The device for which the pages were mapped |
| 260 | * @pagemap_addr: Array of DMA information corresponding to mapped pages |
| 261 | * @npages: Number of pages to unmap |
| 262 | * @dir: Direction of data transfer (e.g., DMA_BIDIRECTIONAL) |
| 263 | * |
| 264 | * This function unmaps previously mapped pages of memory for GPU Shared Virtual |
| 265 | * Memory (SVM). It iterates over each DMA address provided in @dma_addr, checks |
| 266 | * if it's valid and not already unmapped, and unmaps the corresponding page. |
| 267 | */ |
| 268 | static void drm_pagemap_migrate_unmap_pages(struct device *dev, |
| 269 | struct drm_pagemap_addr *pagemap_addr, |
| 270 | unsigned long npages, |
| 271 | enum dma_data_direction dir) |
| 272 | { |
| 273 | unsigned long i; |
| 274 | |
| 275 | for (i = 0; i < npages;) { |
| 276 | if (!pagemap_addr[i].addr || dma_mapping_error(dev, dma_addr: pagemap_addr[i].addr)) |
| 277 | goto next; |
| 278 | |
| 279 | dma_unmap_page(dev, pagemap_addr[i].addr, PAGE_SIZE << pagemap_addr[i].order, dir); |
| 280 | |
| 281 | next: |
| 282 | i += NR_PAGES(pagemap_addr[i].order); |
| 283 | } |
| 284 | } |
| 285 | |
| 286 | static unsigned long |
| 287 | npages_in_range(unsigned long start, unsigned long end) |
| 288 | { |
| 289 | return (end - start) >> PAGE_SHIFT; |
| 290 | } |
| 291 | |
| 292 | /** |
| 293 | * drm_pagemap_migrate_to_devmem() - Migrate a struct mm_struct range to device memory |
| 294 | * @devmem_allocation: The device memory allocation to migrate to. |
| 295 | * The caller should hold a reference to the device memory allocation, |
| 296 | * and the reference is consumed by this function unless it returns with |
| 297 | * an error. |
| 298 | * @mm: Pointer to the struct mm_struct. |
| 299 | * @start: Start of the virtual address range to migrate. |
| 300 | * @end: End of the virtual address range to migrate. |
| 301 | * @timeslice_ms: The time requested for the migrated pagemap pages to |
| 302 | * be present in @mm before being allowed to be migrated back. |
| 303 | * @pgmap_owner: Not used currently, since only system memory is considered. |
| 304 | * |
| 305 | * This function migrates the specified virtual address range to device memory. |
| 306 | * It performs the necessary setup and invokes the driver-specific operations for |
| 307 | * migration to device memory. Expected to be called while holding the mmap lock in |
| 308 | * at least read mode. |
| 309 | * |
| 310 | * Note: The @timeslice_ms parameter can typically be used to force data to |
| 311 | * remain in pagemap pages long enough for a GPU to perform a task and to prevent |
| 312 | * a migration livelock. One alternative would be for the GPU driver to block |
| 313 | * in a mmu_notifier for the specified amount of time, but adding the |
| 314 | * functionality to the pagemap is likely nicer to the system as a whole. |
| 315 | * |
| 316 | * Return: %0 on success, negative error code on failure. |
| 317 | */ |
| 318 | int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation, |
| 319 | struct mm_struct *mm, |
| 320 | unsigned long start, unsigned long end, |
| 321 | unsigned long timeslice_ms, |
| 322 | void *pgmap_owner) |
| 323 | { |
| 324 | const struct drm_pagemap_devmem_ops *ops = devmem_allocation->ops; |
| 325 | struct migrate_vma migrate = { |
| 326 | .start = start, |
| 327 | .end = end, |
| 328 | .pgmap_owner = pgmap_owner, |
| 329 | .flags = MIGRATE_VMA_SELECT_SYSTEM, |
| 330 | }; |
| 331 | unsigned long i, npages = npages_in_range(start, end); |
| 332 | struct vm_area_struct *vas; |
| 333 | struct drm_pagemap_zdd *zdd = NULL; |
| 334 | struct page **pages; |
| 335 | struct drm_pagemap_addr *pagemap_addr; |
| 336 | void *buf; |
| 337 | int err; |
| 338 | |
| 339 | mmap_assert_locked(mm); |
| 340 | |
| 341 | if (!ops->populate_devmem_pfn || !ops->copy_to_devmem || |
| 342 | !ops->copy_to_ram) |
| 343 | return -EOPNOTSUPP; |
| 344 | |
| 345 | vas = vma_lookup(mm, addr: start); |
| 346 | if (!vas) { |
| 347 | err = -ENOENT; |
| 348 | goto err_out; |
| 349 | } |
| 350 | |
| 351 | if (end > vas->vm_end || start < vas->vm_start) { |
| 352 | err = -EINVAL; |
| 353 | goto err_out; |
| 354 | } |
| 355 | |
| 356 | if (!vma_is_anonymous(vma: vas)) { |
| 357 | err = -EBUSY; |
| 358 | goto err_out; |
| 359 | } |
| 360 | |
| 361 | buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*pagemap_addr) + |
| 362 | sizeof(*pages), GFP_KERNEL); |
| 363 | if (!buf) { |
| 364 | err = -ENOMEM; |
| 365 | goto err_out; |
| 366 | } |
| 367 | pagemap_addr = buf + (2 * sizeof(*migrate.src) * npages); |
| 368 | pages = buf + (2 * sizeof(*migrate.src) + sizeof(*pagemap_addr)) * npages; |
| 369 | |
| 370 | zdd = drm_pagemap_zdd_alloc(device_private_page_owner: pgmap_owner); |
| 371 | if (!zdd) { |
| 372 | err = -ENOMEM; |
| 373 | goto err_free; |
| 374 | } |
| 375 | |
| 376 | migrate.vma = vas; |
| 377 | migrate.src = buf; |
| 378 | migrate.dst = migrate.src + npages; |
| 379 | |
| 380 | err = migrate_vma_setup(args: &migrate); |
| 381 | if (err) |
| 382 | goto err_free; |
| 383 | |
| 384 | if (!migrate.cpages) { |
| 385 | err = -EFAULT; |
| 386 | goto err_free; |
| 387 | } |
| 388 | |
| 389 | if (migrate.cpages != npages) { |
| 390 | err = -EBUSY; |
| 391 | goto err_finalize; |
| 392 | } |
| 393 | |
| 394 | err = ops->populate_devmem_pfn(devmem_allocation, npages, migrate.dst); |
| 395 | if (err) |
| 396 | goto err_finalize; |
| 397 | |
| 398 | err = drm_pagemap_migrate_map_pages(dev: devmem_allocation->dev, pagemap_addr, |
| 399 | migrate_pfn: migrate.src, npages, dir: DMA_TO_DEVICE); |
| 400 | |
| 401 | if (err) |
| 402 | goto err_finalize; |
| 403 | |
| 404 | for (i = 0; i < npages; ++i) { |
| 405 | struct page *page = pfn_to_page(migrate.dst[i]); |
| 406 | |
| 407 | pages[i] = page; |
| 408 | migrate.dst[i] = migrate_pfn(pfn: migrate.dst[i]); |
| 409 | drm_pagemap_get_devmem_page(page, zdd); |
| 410 | } |
| 411 | |
| 412 | err = ops->copy_to_devmem(pages, pagemap_addr, npages, |
| 413 | devmem_allocation->pre_migrate_fence); |
| 414 | if (err) |
| 415 | goto err_finalize; |
| 416 | |
| 417 | dma_fence_put(fence: devmem_allocation->pre_migrate_fence); |
| 418 | devmem_allocation->pre_migrate_fence = NULL; |
| 419 | |
| 420 | /* Upon success bind devmem allocation to range and zdd */ |
| 421 | devmem_allocation->timeslice_expiration = get_jiffies_64() + |
| 422 | msecs_to_jiffies(m: timeslice_ms); |
| 423 | zdd->devmem_allocation = devmem_allocation; /* Owns ref */ |
| 424 | |
| 425 | err_finalize: |
| 426 | if (err) |
| 427 | drm_pagemap_migration_unlock_put_pages(npages, migrate_pfn: migrate.dst); |
| 428 | migrate_vma_pages(migrate: &migrate); |
| 429 | migrate_vma_finalize(migrate: &migrate); |
| 430 | drm_pagemap_migrate_unmap_pages(dev: devmem_allocation->dev, pagemap_addr, npages, |
| 431 | dir: DMA_TO_DEVICE); |
| 432 | err_free: |
| 433 | if (zdd) |
| 434 | drm_pagemap_zdd_put(zdd); |
| 435 | kvfree(addr: buf); |
| 436 | err_out: |
| 437 | return err; |
| 438 | } |
| 439 | EXPORT_SYMBOL_GPL(drm_pagemap_migrate_to_devmem); |
| 440 | |
| 441 | /** |
| 442 | * drm_pagemap_migrate_populate_ram_pfn() - Populate RAM PFNs for a VM area |
| 443 | * @vas: Pointer to the VM area structure, can be NULL |
| 444 | * @fault_page: Fault page |
| 445 | * @npages: Number of pages to populate |
| 446 | * @mpages: Number of pages to migrate |
| 447 | * @src_mpfn: Source array of migrate PFNs |
| 448 | * @mpfn: Array of migrate PFNs to populate |
| 449 | * @addr: Start address for PFN allocation |
| 450 | * |
| 451 | * This function populates the RAM migrate page frame numbers (PFNs) for the |
| 452 | * specified VM area structure. It allocates and locks pages in the VM area for |
| 453 | * RAM usage. If vas is non-NULL use alloc_page_vma for allocation, if NULL use |
| 454 | * alloc_page for allocation. |
| 455 | * |
| 456 | * Return: 0 on success, negative error code on failure. |
| 457 | */ |
| 458 | static int drm_pagemap_migrate_populate_ram_pfn(struct vm_area_struct *vas, |
| 459 | struct page *fault_page, |
| 460 | unsigned long npages, |
| 461 | unsigned long *mpages, |
| 462 | unsigned long *src_mpfn, |
| 463 | unsigned long *mpfn, |
| 464 | unsigned long addr) |
| 465 | { |
| 466 | unsigned long i; |
| 467 | |
| 468 | for (i = 0; i < npages;) { |
| 469 | struct page *page = NULL, *src_page; |
| 470 | struct folio *folio; |
| 471 | unsigned int order = 0; |
| 472 | |
| 473 | if (!(src_mpfn[i] & MIGRATE_PFN_MIGRATE)) |
| 474 | goto next; |
| 475 | |
| 476 | src_page = migrate_pfn_to_page(mpfn: src_mpfn[i]); |
| 477 | if (!src_page) |
| 478 | goto next; |
| 479 | |
| 480 | if (fault_page) { |
| 481 | if (src_page->zone_device_data != |
| 482 | fault_page->zone_device_data) |
| 483 | goto next; |
| 484 | } |
| 485 | |
| 486 | order = folio_order(page_folio(src_page)); |
| 487 | |
| 488 | /* TODO: Support fallback to single pages if THP allocation fails */ |
| 489 | if (vas) |
| 490 | folio = vma_alloc_folio(GFP_HIGHUSER, order, vas, addr); |
| 491 | else |
| 492 | folio = folio_alloc(GFP_HIGHUSER, order); |
| 493 | |
| 494 | if (!folio) |
| 495 | goto free_pages; |
| 496 | |
| 497 | page = folio_page(folio, 0); |
| 498 | mpfn[i] = migrate_pfn(page_to_pfn(page)); |
| 499 | |
| 500 | next: |
| 501 | if (page) |
| 502 | addr += page_size(page); |
| 503 | else |
| 504 | addr += PAGE_SIZE; |
| 505 | |
| 506 | i += NR_PAGES(order); |
| 507 | } |
| 508 | |
| 509 | for (i = 0; i < npages;) { |
| 510 | struct page *page = migrate_pfn_to_page(mpfn: mpfn[i]); |
| 511 | unsigned int order = 0; |
| 512 | |
| 513 | if (!page) |
| 514 | goto next_lock; |
| 515 | |
| 516 | WARN_ON_ONCE(!folio_trylock(page_folio(page))); |
| 517 | |
| 518 | order = folio_order(page_folio(page)); |
| 519 | *mpages += NR_PAGES(order); |
| 520 | |
| 521 | next_lock: |
| 522 | i += NR_PAGES(order); |
| 523 | } |
| 524 | |
| 525 | return 0; |
| 526 | |
| 527 | free_pages: |
| 528 | for (i = 0; i < npages;) { |
| 529 | struct page *page = migrate_pfn_to_page(mpfn: mpfn[i]); |
| 530 | unsigned int order = 0; |
| 531 | |
| 532 | if (!page) |
| 533 | goto next_put; |
| 534 | |
| 535 | put_page(page); |
| 536 | mpfn[i] = 0; |
| 537 | |
| 538 | order = folio_order(page_folio(page)); |
| 539 | |
| 540 | next_put: |
| 541 | i += NR_PAGES(order); |
| 542 | } |
| 543 | return -ENOMEM; |
| 544 | } |
| 545 | |
| 546 | /** |
| 547 | * drm_pagemap_evict_to_ram() - Evict GPU SVM range to RAM |
| 548 | * @devmem_allocation: Pointer to the device memory allocation |
| 549 | * |
| 550 | * Similar to __drm_pagemap_migrate_to_ram but does not require mmap lock and |
| 551 | * migration done via migrate_device_* functions. |
| 552 | * |
| 553 | * Return: 0 on success, negative error code on failure. |
| 554 | */ |
| 555 | int drm_pagemap_evict_to_ram(struct drm_pagemap_devmem *devmem_allocation) |
| 556 | { |
| 557 | const struct drm_pagemap_devmem_ops *ops = devmem_allocation->ops; |
| 558 | unsigned long npages, mpages = 0; |
| 559 | struct page **pages; |
| 560 | unsigned long *src, *dst; |
| 561 | struct drm_pagemap_addr *pagemap_addr; |
| 562 | void *buf; |
| 563 | int i, err = 0; |
| 564 | unsigned int retry_count = 2; |
| 565 | |
| 566 | npages = devmem_allocation->size >> PAGE_SHIFT; |
| 567 | |
| 568 | retry: |
| 569 | if (!mmget_not_zero(mm: devmem_allocation->mm)) |
| 570 | return -EFAULT; |
| 571 | |
| 572 | buf = kvcalloc(npages, 2 * sizeof(*src) + sizeof(*pagemap_addr) + |
| 573 | sizeof(*pages), GFP_KERNEL); |
| 574 | if (!buf) { |
| 575 | err = -ENOMEM; |
| 576 | goto err_out; |
| 577 | } |
| 578 | src = buf; |
| 579 | dst = buf + (sizeof(*src) * npages); |
| 580 | pagemap_addr = buf + (2 * sizeof(*src) * npages); |
| 581 | pages = buf + (2 * sizeof(*src) + sizeof(*pagemap_addr)) * npages; |
| 582 | |
| 583 | err = ops->populate_devmem_pfn(devmem_allocation, npages, src); |
| 584 | if (err) |
| 585 | goto err_free; |
| 586 | |
| 587 | err = migrate_device_pfns(src_pfns: src, npages); |
| 588 | if (err) |
| 589 | goto err_free; |
| 590 | |
| 591 | err = drm_pagemap_migrate_populate_ram_pfn(NULL, NULL, npages, mpages: &mpages, |
| 592 | src_mpfn: src, mpfn: dst, addr: 0); |
| 593 | if (err || !mpages) |
| 594 | goto err_finalize; |
| 595 | |
| 596 | err = drm_pagemap_migrate_map_pages(dev: devmem_allocation->dev, pagemap_addr, |
| 597 | migrate_pfn: dst, npages, dir: DMA_FROM_DEVICE); |
| 598 | if (err) |
| 599 | goto err_finalize; |
| 600 | |
| 601 | for (i = 0; i < npages; ++i) |
| 602 | pages[i] = migrate_pfn_to_page(mpfn: src[i]); |
| 603 | |
| 604 | err = ops->copy_to_ram(pages, pagemap_addr, npages, NULL); |
| 605 | if (err) |
| 606 | goto err_finalize; |
| 607 | |
| 608 | err_finalize: |
| 609 | if (err) |
| 610 | drm_pagemap_migration_unlock_put_pages(npages, migrate_pfn: dst); |
| 611 | migrate_device_pages(src_pfns: src, dst_pfns: dst, npages); |
| 612 | migrate_device_finalize(src_pfns: src, dst_pfns: dst, npages); |
| 613 | drm_pagemap_migrate_unmap_pages(dev: devmem_allocation->dev, pagemap_addr, npages, |
| 614 | dir: DMA_FROM_DEVICE); |
| 615 | err_free: |
| 616 | kvfree(addr: buf); |
| 617 | err_out: |
| 618 | mmput_async(devmem_allocation->mm); |
| 619 | |
| 620 | if (completion_done(x: &devmem_allocation->detached)) |
| 621 | return 0; |
| 622 | |
| 623 | if (retry_count--) { |
| 624 | cond_resched(); |
| 625 | goto retry; |
| 626 | } |
| 627 | |
| 628 | return err ?: -EBUSY; |
| 629 | } |
| 630 | EXPORT_SYMBOL_GPL(drm_pagemap_evict_to_ram); |
| 631 | |
| 632 | /** |
| 633 | * __drm_pagemap_migrate_to_ram() - Migrate GPU SVM range to RAM (internal) |
| 634 | * @vas: Pointer to the VM area structure |
| 635 | * @device_private_page_owner: Device private pages owner |
| 636 | * @page: Pointer to the page for fault handling (can be NULL) |
| 637 | * @fault_addr: Fault address |
| 638 | * @size: Size of migration |
| 639 | * |
| 640 | * This internal function performs the migration of the specified GPU SVM range |
| 641 | * to RAM. It sets up the migration, populates + dma maps RAM PFNs, and |
| 642 | * invokes the driver-specific operations for migration to RAM. |
| 643 | * |
| 644 | * Return: 0 on success, negative error code on failure. |
| 645 | */ |
| 646 | static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas, |
| 647 | void *device_private_page_owner, |
| 648 | struct page *page, |
| 649 | unsigned long fault_addr, |
| 650 | unsigned long size) |
| 651 | { |
| 652 | struct migrate_vma migrate = { |
| 653 | .vma = vas, |
| 654 | .pgmap_owner = device_private_page_owner, |
| 655 | .flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE | |
| 656 | MIGRATE_VMA_SELECT_DEVICE_COHERENT, |
| 657 | .fault_page = page, |
| 658 | }; |
| 659 | struct drm_pagemap_zdd *zdd; |
| 660 | const struct drm_pagemap_devmem_ops *ops; |
| 661 | struct device *dev = NULL; |
| 662 | unsigned long npages, mpages = 0; |
| 663 | struct page **pages; |
| 664 | struct drm_pagemap_addr *pagemap_addr; |
| 665 | unsigned long start, end; |
| 666 | void *buf; |
| 667 | int i, err = 0; |
| 668 | |
| 669 | if (page) { |
| 670 | zdd = page->zone_device_data; |
| 671 | if (time_before64(get_jiffies_64(), |
| 672 | zdd->devmem_allocation->timeslice_expiration)) |
| 673 | return 0; |
| 674 | } |
| 675 | |
| 676 | start = ALIGN_DOWN(fault_addr, size); |
| 677 | end = ALIGN(fault_addr + 1, size); |
| 678 | |
| 679 | /* Corner where VMA area struct has been partially unmapped */ |
| 680 | if (start < vas->vm_start) |
| 681 | start = vas->vm_start; |
| 682 | if (end > vas->vm_end) |
| 683 | end = vas->vm_end; |
| 684 | |
| 685 | migrate.start = start; |
| 686 | migrate.end = end; |
| 687 | npages = npages_in_range(start, end); |
| 688 | |
| 689 | buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*pagemap_addr) + |
| 690 | sizeof(*pages), GFP_KERNEL); |
| 691 | if (!buf) { |
| 692 | err = -ENOMEM; |
| 693 | goto err_out; |
| 694 | } |
| 695 | pagemap_addr = buf + (2 * sizeof(*migrate.src) * npages); |
| 696 | pages = buf + (2 * sizeof(*migrate.src) + sizeof(*pagemap_addr)) * npages; |
| 697 | |
| 698 | migrate.vma = vas; |
| 699 | migrate.src = buf; |
| 700 | migrate.dst = migrate.src + npages; |
| 701 | |
| 702 | err = migrate_vma_setup(args: &migrate); |
| 703 | if (err) |
| 704 | goto err_free; |
| 705 | |
| 706 | /* Raced with another CPU fault, nothing to do */ |
| 707 | if (!migrate.cpages) |
| 708 | goto err_free; |
| 709 | |
| 710 | if (!page) { |
| 711 | for (i = 0; i < npages; ++i) { |
| 712 | if (!(migrate.src[i] & MIGRATE_PFN_MIGRATE)) |
| 713 | continue; |
| 714 | |
| 715 | page = migrate_pfn_to_page(mpfn: migrate.src[i]); |
| 716 | break; |
| 717 | } |
| 718 | |
| 719 | if (!page) |
| 720 | goto err_finalize; |
| 721 | } |
| 722 | zdd = page->zone_device_data; |
| 723 | ops = zdd->devmem_allocation->ops; |
| 724 | dev = zdd->devmem_allocation->dev; |
| 725 | |
| 726 | err = drm_pagemap_migrate_populate_ram_pfn(vas, fault_page: page, npages, mpages: &mpages, |
| 727 | src_mpfn: migrate.src, mpfn: migrate.dst, |
| 728 | addr: start); |
| 729 | if (err) |
| 730 | goto err_finalize; |
| 731 | |
| 732 | err = drm_pagemap_migrate_map_pages(dev, pagemap_addr, migrate_pfn: migrate.dst, npages, |
| 733 | dir: DMA_FROM_DEVICE); |
| 734 | if (err) |
| 735 | goto err_finalize; |
| 736 | |
| 737 | for (i = 0; i < npages; ++i) |
| 738 | pages[i] = migrate_pfn_to_page(mpfn: migrate.src[i]); |
| 739 | |
| 740 | err = ops->copy_to_ram(pages, pagemap_addr, npages, NULL); |
| 741 | if (err) |
| 742 | goto err_finalize; |
| 743 | |
| 744 | err_finalize: |
| 745 | if (err) |
| 746 | drm_pagemap_migration_unlock_put_pages(npages, migrate_pfn: migrate.dst); |
| 747 | migrate_vma_pages(migrate: &migrate); |
| 748 | migrate_vma_finalize(migrate: &migrate); |
| 749 | if (dev) |
| 750 | drm_pagemap_migrate_unmap_pages(dev, pagemap_addr, npages, |
| 751 | dir: DMA_FROM_DEVICE); |
| 752 | err_free: |
| 753 | kvfree(addr: buf); |
| 754 | err_out: |
| 755 | |
| 756 | return err; |
| 757 | } |
| 758 | |
| 759 | /** |
| 760 | * drm_pagemap_folio_free() - Put GPU SVM zone device data associated with a folio |
| 761 | * @folio: Pointer to the folio |
| 762 | * |
| 763 | * This function is a callback used to put the GPU SVM zone device data |
| 764 | * associated with a page when it is being released. |
| 765 | */ |
| 766 | static void drm_pagemap_folio_free(struct folio *folio) |
| 767 | { |
| 768 | drm_pagemap_zdd_put(zdd: folio->page.zone_device_data); |
| 769 | } |
| 770 | |
| 771 | /** |
| 772 | * drm_pagemap_migrate_to_ram() - Migrate a virtual range to RAM (page fault handler) |
| 773 | * @vmf: Pointer to the fault information structure |
| 774 | * |
| 775 | * This function is a page fault handler used to migrate a virtual range |
| 776 | * to ram. The device memory allocation in which the device page is found is |
| 777 | * migrated in its entirety. |
| 778 | * |
| 779 | * Returns: |
| 780 | * VM_FAULT_SIGBUS on failure, 0 on success. |
| 781 | */ |
| 782 | static vm_fault_t drm_pagemap_migrate_to_ram(struct vm_fault *vmf) |
| 783 | { |
| 784 | struct drm_pagemap_zdd *zdd = vmf->page->zone_device_data; |
| 785 | int err; |
| 786 | |
| 787 | err = __drm_pagemap_migrate_to_ram(vas: vmf->vma, |
| 788 | device_private_page_owner: zdd->device_private_page_owner, |
| 789 | page: vmf->page, fault_addr: vmf->address, |
| 790 | size: zdd->devmem_allocation->size); |
| 791 | |
| 792 | return err ? VM_FAULT_SIGBUS : 0; |
| 793 | } |
| 794 | |
| 795 | static const struct dev_pagemap_ops drm_pagemap_pagemap_ops = { |
| 796 | .folio_free = drm_pagemap_folio_free, |
| 797 | .migrate_to_ram = drm_pagemap_migrate_to_ram, |
| 798 | }; |
| 799 | |
| 800 | /** |
| 801 | * drm_pagemap_pagemap_ops_get() - Retrieve GPU SVM device page map operations |
| 802 | * |
| 803 | * Returns: |
| 804 | * Pointer to the GPU SVM device page map operations structure. |
| 805 | */ |
| 806 | const struct dev_pagemap_ops *drm_pagemap_pagemap_ops_get(void) |
| 807 | { |
| 808 | return &drm_pagemap_pagemap_ops; |
| 809 | } |
| 810 | EXPORT_SYMBOL_GPL(drm_pagemap_pagemap_ops_get); |
| 811 | |
| 812 | /** |
| 813 | * drm_pagemap_devmem_init() - Initialize a drm_pagemap device memory allocation |
| 814 | * |
| 815 | * @devmem_allocation: The struct drm_pagemap_devmem to initialize. |
| 816 | * @dev: Pointer to the device structure which device memory allocation belongs to |
| 817 | * @mm: Pointer to the mm_struct for the address space |
| 818 | * @ops: Pointer to the operations structure for GPU SVM device memory |
| 819 | * @dpagemap: The struct drm_pagemap we're allocating from. |
| 820 | * @size: Size of device memory allocation |
| 821 | * @pre_migrate_fence: Fence to wait for or pipeline behind before migration starts. |
| 822 | * (May be NULL). |
| 823 | */ |
| 824 | void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation, |
| 825 | struct device *dev, struct mm_struct *mm, |
| 826 | const struct drm_pagemap_devmem_ops *ops, |
| 827 | struct drm_pagemap *dpagemap, size_t size, |
| 828 | struct dma_fence *pre_migrate_fence) |
| 829 | { |
| 830 | init_completion(x: &devmem_allocation->detached); |
| 831 | devmem_allocation->dev = dev; |
| 832 | devmem_allocation->mm = mm; |
| 833 | devmem_allocation->ops = ops; |
| 834 | devmem_allocation->dpagemap = dpagemap; |
| 835 | devmem_allocation->size = size; |
| 836 | devmem_allocation->pre_migrate_fence = pre_migrate_fence; |
| 837 | } |
| 838 | EXPORT_SYMBOL_GPL(drm_pagemap_devmem_init); |
| 839 | |
| 840 | /** |
| 841 | * drm_pagemap_page_to_dpagemap() - Return a pointer the drm_pagemap of a page |
| 842 | * @page: The struct page. |
| 843 | * |
| 844 | * Return: A pointer to the struct drm_pagemap of a device private page that |
| 845 | * was populated from the struct drm_pagemap. If the page was *not* populated |
| 846 | * from a struct drm_pagemap, the result is undefined and the function call |
| 847 | * may result in dereferencing and invalid address. |
| 848 | */ |
| 849 | struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page) |
| 850 | { |
| 851 | struct drm_pagemap_zdd *zdd = page->zone_device_data; |
| 852 | |
| 853 | return zdd->devmem_allocation->dpagemap; |
| 854 | } |
| 855 | EXPORT_SYMBOL_GPL(drm_pagemap_page_to_dpagemap); |
| 856 | |
| 857 | /** |
| 858 | * drm_pagemap_populate_mm() - Populate a virtual range with device memory pages |
| 859 | * @dpagemap: Pointer to the drm_pagemap managing the device memory |
| 860 | * @start: Start of the virtual range to populate. |
| 861 | * @end: End of the virtual range to populate. |
| 862 | * @mm: Pointer to the virtual address space. |
| 863 | * @timeslice_ms: The time requested for the migrated pagemap pages to |
| 864 | * be present in @mm before being allowed to be migrated back. |
| 865 | * |
| 866 | * Attempt to populate a virtual range with device memory pages, |
| 867 | * clearing them or migrating data from the existing pages if necessary. |
| 868 | * The function is best effort only, and implementations may vary |
| 869 | * in how hard they try to satisfy the request. |
| 870 | * |
| 871 | * Return: %0 on success, negative error code on error. If the hardware |
| 872 | * device was removed / unbound the function will return %-ENODEV. |
| 873 | */ |
| 874 | int drm_pagemap_populate_mm(struct drm_pagemap *dpagemap, |
| 875 | unsigned long start, unsigned long end, |
| 876 | struct mm_struct *mm, |
| 877 | unsigned long timeslice_ms) |
| 878 | { |
| 879 | int err; |
| 880 | |
| 881 | if (!mmget_not_zero(mm)) |
| 882 | return -EFAULT; |
| 883 | mmap_read_lock(mm); |
| 884 | err = dpagemap->ops->populate_mm(dpagemap, start, end, mm, |
| 885 | timeslice_ms); |
| 886 | mmap_read_unlock(mm); |
| 887 | mmput(mm); |
| 888 | |
| 889 | return err; |
| 890 | } |
| 891 | EXPORT_SYMBOL(drm_pagemap_populate_mm); |
| 892 | |