| 1 | /* SPDX-License-Identifier: GPL-2.0 OR MIT */ |
| 2 | /************************************************************************** |
| 3 | * |
| 4 | * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA |
| 5 | * All Rights Reserved. |
| 6 | * |
| 7 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 8 | * copy of this software and associated documentation files (the |
| 9 | * "Software"), to deal in the Software without restriction, including |
| 10 | * without limitation the rights to use, copy, modify, merge, publish, |
| 11 | * distribute, sub license, and/or sell copies of the Software, and to |
| 12 | * permit persons to whom the Software is furnished to do so, subject to |
| 13 | * the following conditions: |
| 14 | * |
| 15 | * The above copyright notice and this permission notice (including the |
| 16 | * next paragraph) shall be included in all copies or substantial portions |
| 17 | * of the Software. |
| 18 | * |
| 19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 21 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
| 22 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
| 23 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
| 24 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
| 25 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 26 | * |
| 27 | **************************************************************************/ |
| 28 | /* |
| 29 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> |
| 30 | */ |
| 31 | |
| 32 | #define pr_fmt(fmt) "[TTM] " fmt |
| 33 | |
| 34 | #include <linux/cc_platform.h> |
| 35 | #include <linux/debugfs.h> |
| 36 | #include <linux/export.h> |
| 37 | #include <linux/file.h> |
| 38 | #include <linux/module.h> |
| 39 | #include <linux/sched.h> |
| 40 | #include <linux/shmem_fs.h> |
| 41 | #include <drm/drm_cache.h> |
| 42 | #include <drm/drm_device.h> |
| 43 | #include <drm/drm_print.h> |
| 44 | #include <drm/drm_util.h> |
| 45 | #include <drm/ttm/ttm_backup.h> |
| 46 | #include <drm/ttm/ttm_bo.h> |
| 47 | #include <drm/ttm/ttm_tt.h> |
| 48 | |
| 49 | #include "ttm_module.h" |
| 50 | #include "ttm_pool_internal.h" |
| 51 | |
| 52 | static unsigned long ttm_pages_limit; |
| 53 | |
| 54 | MODULE_PARM_DESC(pages_limit, "Limit for the allocated pages" ); |
| 55 | module_param_named(pages_limit, ttm_pages_limit, ulong, 0644); |
| 56 | |
| 57 | static unsigned long ttm_dma32_pages_limit; |
| 58 | |
| 59 | MODULE_PARM_DESC(dma32_pages_limit, "Limit for the allocated DMA32 pages" ); |
| 60 | module_param_named(dma32_pages_limit, ttm_dma32_pages_limit, ulong, 0644); |
| 61 | |
| 62 | static atomic_long_t ttm_pages_allocated; |
| 63 | static atomic_long_t ttm_dma32_pages_allocated; |
| 64 | |
| 65 | /* |
| 66 | * Allocates a ttm structure for the given BO. |
| 67 | */ |
| 68 | int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc) |
| 69 | { |
| 70 | struct ttm_device *bdev = bo->bdev; |
| 71 | struct drm_device *ddev = bo->base.dev; |
| 72 | uint32_t page_flags = 0; |
| 73 | |
| 74 | dma_resv_assert_held(bo->base.resv); |
| 75 | |
| 76 | if (bo->ttm) |
| 77 | return 0; |
| 78 | |
| 79 | switch (bo->type) { |
| 80 | case ttm_bo_type_device: |
| 81 | if (zero_alloc) |
| 82 | page_flags |= TTM_TT_FLAG_ZERO_ALLOC; |
| 83 | break; |
| 84 | case ttm_bo_type_kernel: |
| 85 | break; |
| 86 | case ttm_bo_type_sg: |
| 87 | page_flags |= TTM_TT_FLAG_EXTERNAL; |
| 88 | break; |
| 89 | default: |
| 90 | pr_err("Illegal buffer object type\n" ); |
| 91 | return -EINVAL; |
| 92 | } |
| 93 | /* |
| 94 | * When using dma_alloc_coherent with memory encryption the |
| 95 | * mapped TT pages need to be decrypted or otherwise the drivers |
| 96 | * will end up sending encrypted mem to the gpu. |
| 97 | */ |
| 98 | if (ttm_pool_uses_dma_alloc(pool: &bdev->pool) && |
| 99 | cc_platform_has(attr: CC_ATTR_GUEST_MEM_ENCRYPT)) { |
| 100 | page_flags |= TTM_TT_FLAG_DECRYPTED; |
| 101 | drm_info_once(ddev, "TT memory decryption enabled." ); |
| 102 | } |
| 103 | |
| 104 | bo->ttm = bdev->funcs->ttm_tt_create(bo, page_flags); |
| 105 | if (unlikely(bo->ttm == NULL)) |
| 106 | return -ENOMEM; |
| 107 | |
| 108 | WARN_ON(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE && |
| 109 | !(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL)); |
| 110 | |
| 111 | return 0; |
| 112 | } |
| 113 | EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_create); |
| 114 | |
| 115 | /* |
| 116 | * Allocates storage for pointers to the pages that back the ttm. |
| 117 | */ |
| 118 | static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm) |
| 119 | { |
| 120 | ttm->pages = kvcalloc(ttm->num_pages, sizeof(void*), GFP_KERNEL); |
| 121 | if (!ttm->pages) |
| 122 | return -ENOMEM; |
| 123 | |
| 124 | return 0; |
| 125 | } |
| 126 | |
| 127 | static int ttm_dma_tt_alloc_page_directory(struct ttm_tt *ttm) |
| 128 | { |
| 129 | ttm->pages = kvcalloc(ttm->num_pages, sizeof(*ttm->pages) + |
| 130 | sizeof(*ttm->dma_address), GFP_KERNEL); |
| 131 | if (!ttm->pages) |
| 132 | return -ENOMEM; |
| 133 | |
| 134 | ttm->dma_address = (void *)(ttm->pages + ttm->num_pages); |
| 135 | return 0; |
| 136 | } |
| 137 | |
| 138 | static int ttm_sg_tt_alloc_page_directory(struct ttm_tt *ttm) |
| 139 | { |
| 140 | ttm->dma_address = kvcalloc(ttm->num_pages, sizeof(*ttm->dma_address), |
| 141 | GFP_KERNEL); |
| 142 | if (!ttm->dma_address) |
| 143 | return -ENOMEM; |
| 144 | |
| 145 | return 0; |
| 146 | } |
| 147 | |
| 148 | void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm) |
| 149 | { |
| 150 | bdev->funcs->ttm_tt_destroy(bdev, ttm); |
| 151 | } |
| 152 | EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_destroy); |
| 153 | |
| 154 | static void ttm_tt_init_fields(struct ttm_tt *ttm, |
| 155 | struct ttm_buffer_object *bo, |
| 156 | uint32_t page_flags, |
| 157 | enum ttm_caching caching, |
| 158 | unsigned long ) |
| 159 | { |
| 160 | ttm->num_pages = (PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT) + extra_pages; |
| 161 | ttm->page_flags = page_flags; |
| 162 | ttm->dma_address = NULL; |
| 163 | ttm->swap_storage = NULL; |
| 164 | ttm->sg = bo->sg; |
| 165 | ttm->caching = caching; |
| 166 | ttm->restore = NULL; |
| 167 | ttm->backup = NULL; |
| 168 | } |
| 169 | |
| 170 | int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo, |
| 171 | uint32_t page_flags, enum ttm_caching caching, |
| 172 | unsigned long ) |
| 173 | { |
| 174 | ttm_tt_init_fields(ttm, bo, page_flags, caching, extra_pages); |
| 175 | |
| 176 | if (ttm_tt_alloc_page_directory(ttm)) { |
| 177 | pr_err("Failed allocating page table\n" ); |
| 178 | return -ENOMEM; |
| 179 | } |
| 180 | return 0; |
| 181 | } |
| 182 | EXPORT_SYMBOL(ttm_tt_init); |
| 183 | |
| 184 | void ttm_tt_fini(struct ttm_tt *ttm) |
| 185 | { |
| 186 | WARN_ON(ttm->page_flags & TTM_TT_FLAG_PRIV_POPULATED); |
| 187 | |
| 188 | if (ttm->swap_storage) |
| 189 | fput(ttm->swap_storage); |
| 190 | ttm->swap_storage = NULL; |
| 191 | |
| 192 | if (ttm_tt_is_backed_up(tt: ttm)) |
| 193 | ttm_pool_drop_backed_up(tt: ttm); |
| 194 | if (ttm->backup) { |
| 195 | ttm_backup_fini(backup: ttm->backup); |
| 196 | ttm->backup = NULL; |
| 197 | } |
| 198 | |
| 199 | if (ttm->pages) |
| 200 | kvfree(addr: ttm->pages); |
| 201 | else |
| 202 | kvfree(addr: ttm->dma_address); |
| 203 | ttm->pages = NULL; |
| 204 | ttm->dma_address = NULL; |
| 205 | } |
| 206 | EXPORT_SYMBOL(ttm_tt_fini); |
| 207 | |
| 208 | int ttm_sg_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo, |
| 209 | uint32_t page_flags, enum ttm_caching caching) |
| 210 | { |
| 211 | int ret; |
| 212 | |
| 213 | ttm_tt_init_fields(ttm, bo, page_flags, caching, extra_pages: 0); |
| 214 | |
| 215 | if (page_flags & TTM_TT_FLAG_EXTERNAL) |
| 216 | ret = ttm_sg_tt_alloc_page_directory(ttm); |
| 217 | else |
| 218 | ret = ttm_dma_tt_alloc_page_directory(ttm); |
| 219 | if (ret) { |
| 220 | pr_err("Failed allocating page table\n" ); |
| 221 | return -ENOMEM; |
| 222 | } |
| 223 | return 0; |
| 224 | } |
| 225 | EXPORT_SYMBOL(ttm_sg_tt_init); |
| 226 | |
| 227 | int ttm_tt_swapin(struct ttm_tt *ttm) |
| 228 | { |
| 229 | struct address_space *swap_space; |
| 230 | struct file *swap_storage; |
| 231 | struct page *from_page; |
| 232 | struct page *to_page; |
| 233 | gfp_t gfp_mask; |
| 234 | int i, ret; |
| 235 | |
| 236 | swap_storage = ttm->swap_storage; |
| 237 | BUG_ON(swap_storage == NULL); |
| 238 | |
| 239 | swap_space = swap_storage->f_mapping; |
| 240 | gfp_mask = mapping_gfp_mask(mapping: swap_space); |
| 241 | |
| 242 | for (i = 0; i < ttm->num_pages; ++i) { |
| 243 | from_page = shmem_read_mapping_page_gfp(mapping: swap_space, index: i, |
| 244 | gfp_mask); |
| 245 | if (IS_ERR(ptr: from_page)) { |
| 246 | ret = PTR_ERR(ptr: from_page); |
| 247 | goto out_err; |
| 248 | } |
| 249 | to_page = ttm->pages[i]; |
| 250 | if (unlikely(to_page == NULL)) { |
| 251 | ret = -ENOMEM; |
| 252 | goto out_err; |
| 253 | } |
| 254 | |
| 255 | copy_highpage(to: to_page, from: from_page); |
| 256 | put_page(page: from_page); |
| 257 | } |
| 258 | |
| 259 | fput(swap_storage); |
| 260 | ttm->swap_storage = NULL; |
| 261 | ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED; |
| 262 | |
| 263 | return 0; |
| 264 | |
| 265 | out_err: |
| 266 | return ret; |
| 267 | } |
| 268 | EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_swapin); |
| 269 | |
| 270 | /** |
| 271 | * ttm_tt_backup() - Helper to back up a struct ttm_tt. |
| 272 | * @bdev: The TTM device. |
| 273 | * @tt: The struct ttm_tt. |
| 274 | * @flags: Flags that govern the backup behaviour. |
| 275 | * |
| 276 | * Update the page accounting and call ttm_pool_shrink_tt to free pages |
| 277 | * or back them up. |
| 278 | * |
| 279 | * Return: Number of pages freed or swapped out, or negative error code on |
| 280 | * error. |
| 281 | */ |
| 282 | long ttm_tt_backup(struct ttm_device *bdev, struct ttm_tt *tt, |
| 283 | const struct ttm_backup_flags flags) |
| 284 | { |
| 285 | long ret; |
| 286 | |
| 287 | if (WARN_ON(IS_ERR_OR_NULL(tt->backup))) |
| 288 | return 0; |
| 289 | |
| 290 | ret = ttm_pool_backup(pool: &bdev->pool, ttm: tt, flags: &flags); |
| 291 | if (ret > 0) { |
| 292 | tt->page_flags &= ~TTM_TT_FLAG_PRIV_POPULATED; |
| 293 | tt->page_flags |= TTM_TT_FLAG_BACKED_UP; |
| 294 | } |
| 295 | |
| 296 | return ret; |
| 297 | } |
| 298 | |
| 299 | int ttm_tt_restore(struct ttm_device *bdev, struct ttm_tt *tt, |
| 300 | const struct ttm_operation_ctx *ctx) |
| 301 | { |
| 302 | int ret = ttm_pool_restore_and_alloc(pool: &bdev->pool, tt, ctx); |
| 303 | |
| 304 | if (ret) |
| 305 | return ret; |
| 306 | |
| 307 | tt->page_flags &= ~TTM_TT_FLAG_BACKED_UP; |
| 308 | |
| 309 | return 0; |
| 310 | } |
| 311 | EXPORT_SYMBOL(ttm_tt_restore); |
| 312 | |
| 313 | /** |
| 314 | * ttm_tt_swapout - swap out tt object |
| 315 | * |
| 316 | * @bdev: TTM device structure. |
| 317 | * @ttm: The struct ttm_tt. |
| 318 | * @gfp_flags: Flags to use for memory allocation. |
| 319 | * |
| 320 | * Swapout a TT object to a shmem_file, return number of pages swapped out or |
| 321 | * negative error code. |
| 322 | */ |
| 323 | int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm, |
| 324 | gfp_t gfp_flags) |
| 325 | { |
| 326 | loff_t size = (loff_t)ttm->num_pages << PAGE_SHIFT; |
| 327 | struct address_space *swap_space; |
| 328 | struct file *swap_storage; |
| 329 | struct page *from_page; |
| 330 | struct page *to_page; |
| 331 | int i, ret; |
| 332 | |
| 333 | swap_storage = shmem_file_setup(name: "ttm swap" , size, flags: 0); |
| 334 | if (IS_ERR(ptr: swap_storage)) { |
| 335 | pr_err("Failed allocating swap storage\n" ); |
| 336 | return PTR_ERR(ptr: swap_storage); |
| 337 | } |
| 338 | |
| 339 | swap_space = swap_storage->f_mapping; |
| 340 | gfp_flags &= mapping_gfp_mask(mapping: swap_space); |
| 341 | |
| 342 | for (i = 0; i < ttm->num_pages; ++i) { |
| 343 | from_page = ttm->pages[i]; |
| 344 | if (unlikely(from_page == NULL)) |
| 345 | continue; |
| 346 | |
| 347 | to_page = shmem_read_mapping_page_gfp(mapping: swap_space, index: i, gfp_mask: gfp_flags); |
| 348 | if (IS_ERR(ptr: to_page)) { |
| 349 | ret = PTR_ERR(ptr: to_page); |
| 350 | goto out_err; |
| 351 | } |
| 352 | copy_highpage(to: to_page, from: from_page); |
| 353 | set_page_dirty(to_page); |
| 354 | mark_page_accessed(to_page); |
| 355 | put_page(page: to_page); |
| 356 | } |
| 357 | |
| 358 | ttm_tt_unpopulate(bdev, ttm); |
| 359 | ttm->swap_storage = swap_storage; |
| 360 | ttm->page_flags |= TTM_TT_FLAG_SWAPPED; |
| 361 | |
| 362 | return ttm->num_pages; |
| 363 | |
| 364 | out_err: |
| 365 | fput(swap_storage); |
| 366 | |
| 367 | return ret; |
| 368 | } |
| 369 | EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_swapout); |
| 370 | |
| 371 | int ttm_tt_populate(struct ttm_device *bdev, |
| 372 | struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) |
| 373 | { |
| 374 | int ret; |
| 375 | |
| 376 | if (!ttm) |
| 377 | return -EINVAL; |
| 378 | |
| 379 | if (ttm_tt_is_populated(tt: ttm)) |
| 380 | return 0; |
| 381 | |
| 382 | if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) { |
| 383 | atomic_long_add(i: ttm->num_pages, v: &ttm_pages_allocated); |
| 384 | if (ttm_pool_uses_dma32(pool: &bdev->pool)) |
| 385 | atomic_long_add(i: ttm->num_pages, |
| 386 | v: &ttm_dma32_pages_allocated); |
| 387 | } |
| 388 | |
| 389 | while (atomic_long_read(v: &ttm_pages_allocated) > ttm_pages_limit || |
| 390 | atomic_long_read(v: &ttm_dma32_pages_allocated) > |
| 391 | ttm_dma32_pages_limit) { |
| 392 | |
| 393 | ret = ttm_global_swapout(ctx, GFP_KERNEL); |
| 394 | if (ret == 0) |
| 395 | break; |
| 396 | if (ret < 0) |
| 397 | goto error; |
| 398 | } |
| 399 | |
| 400 | if (bdev->funcs->ttm_tt_populate) |
| 401 | ret = bdev->funcs->ttm_tt_populate(bdev, ttm, ctx); |
| 402 | else |
| 403 | ret = ttm_pool_alloc(pool: &bdev->pool, tt: ttm, ctx); |
| 404 | if (ret) |
| 405 | goto error; |
| 406 | |
| 407 | ttm->page_flags |= TTM_TT_FLAG_PRIV_POPULATED; |
| 408 | ttm->page_flags &= ~TTM_TT_FLAG_BACKED_UP; |
| 409 | if (unlikely(ttm->page_flags & TTM_TT_FLAG_SWAPPED)) { |
| 410 | ret = ttm_tt_swapin(ttm); |
| 411 | if (unlikely(ret != 0)) { |
| 412 | ttm_tt_unpopulate(bdev, ttm); |
| 413 | return ret; |
| 414 | } |
| 415 | } |
| 416 | |
| 417 | return 0; |
| 418 | |
| 419 | error: |
| 420 | if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) { |
| 421 | atomic_long_sub(i: ttm->num_pages, v: &ttm_pages_allocated); |
| 422 | if (ttm_pool_uses_dma32(pool: &bdev->pool)) |
| 423 | atomic_long_sub(i: ttm->num_pages, |
| 424 | v: &ttm_dma32_pages_allocated); |
| 425 | } |
| 426 | return ret; |
| 427 | } |
| 428 | |
| 429 | #if IS_ENABLED(CONFIG_DRM_TTM_KUNIT_TEST) |
| 430 | EXPORT_SYMBOL(ttm_tt_populate); |
| 431 | #endif |
| 432 | |
| 433 | void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm) |
| 434 | { |
| 435 | if (!ttm_tt_is_populated(tt: ttm)) |
| 436 | return; |
| 437 | |
| 438 | if (bdev->funcs->ttm_tt_unpopulate) |
| 439 | bdev->funcs->ttm_tt_unpopulate(bdev, ttm); |
| 440 | else |
| 441 | ttm_pool_free(pool: &bdev->pool, tt: ttm); |
| 442 | |
| 443 | if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) { |
| 444 | atomic_long_sub(i: ttm->num_pages, v: &ttm_pages_allocated); |
| 445 | if (ttm_pool_uses_dma32(pool: &bdev->pool)) |
| 446 | atomic_long_sub(i: ttm->num_pages, |
| 447 | v: &ttm_dma32_pages_allocated); |
| 448 | } |
| 449 | |
| 450 | ttm->page_flags &= ~TTM_TT_FLAG_PRIV_POPULATED; |
| 451 | } |
| 452 | EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_unpopulate); |
| 453 | |
| 454 | #ifdef CONFIG_DEBUG_FS |
| 455 | |
| 456 | /* Test the shrinker functions and dump the result */ |
| 457 | static int ttm_tt_debugfs_shrink_show(struct seq_file *m, void *data) |
| 458 | { |
| 459 | struct ttm_operation_ctx ctx = { false, false }; |
| 460 | |
| 461 | seq_printf(m, fmt: "%d\n" , ttm_global_swapout(ctx: &ctx, GFP_KERNEL)); |
| 462 | return 0; |
| 463 | } |
| 464 | DEFINE_SHOW_ATTRIBUTE(ttm_tt_debugfs_shrink); |
| 465 | |
| 466 | #endif |
| 467 | |
| 468 | |
| 469 | /* |
| 470 | * ttm_tt_mgr_init - register with the MM shrinker |
| 471 | * |
| 472 | * Register with the MM shrinker for swapping out BOs. |
| 473 | */ |
| 474 | void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages) |
| 475 | { |
| 476 | #ifdef CONFIG_DEBUG_FS |
| 477 | debugfs_create_file("tt_shrink" , 0400, ttm_debugfs_root, NULL, |
| 478 | &ttm_tt_debugfs_shrink_fops); |
| 479 | #endif |
| 480 | |
| 481 | if (!ttm_pages_limit) |
| 482 | ttm_pages_limit = num_pages; |
| 483 | |
| 484 | if (!ttm_dma32_pages_limit) |
| 485 | ttm_dma32_pages_limit = num_dma32_pages; |
| 486 | } |
| 487 | |
| 488 | static void ttm_kmap_iter_tt_map_local(struct ttm_kmap_iter *iter, |
| 489 | struct iosys_map *dmap, |
| 490 | pgoff_t i) |
| 491 | { |
| 492 | struct ttm_kmap_iter_tt *iter_tt = |
| 493 | container_of(iter, typeof(*iter_tt), base); |
| 494 | |
| 495 | iosys_map_set_vaddr(map: dmap, vaddr: kmap_local_page_prot(page: iter_tt->tt->pages[i], |
| 496 | prot: iter_tt->prot)); |
| 497 | } |
| 498 | |
| 499 | static void ttm_kmap_iter_tt_unmap_local(struct ttm_kmap_iter *iter, |
| 500 | struct iosys_map *map) |
| 501 | { |
| 502 | kunmap_local(map->vaddr); |
| 503 | } |
| 504 | |
| 505 | static const struct ttm_kmap_iter_ops ttm_kmap_iter_tt_ops = { |
| 506 | .map_local = ttm_kmap_iter_tt_map_local, |
| 507 | .unmap_local = ttm_kmap_iter_tt_unmap_local, |
| 508 | .maps_tt = true, |
| 509 | }; |
| 510 | |
| 511 | /** |
| 512 | * ttm_kmap_iter_tt_init - Initialize a struct ttm_kmap_iter_tt |
| 513 | * @iter_tt: The struct ttm_kmap_iter_tt to initialize. |
| 514 | * @tt: Struct ttm_tt holding page pointers of the struct ttm_resource. |
| 515 | * |
| 516 | * Return: Pointer to the embedded struct ttm_kmap_iter. |
| 517 | */ |
| 518 | struct ttm_kmap_iter * |
| 519 | ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt, |
| 520 | struct ttm_tt *tt) |
| 521 | { |
| 522 | iter_tt->base.ops = &ttm_kmap_iter_tt_ops; |
| 523 | iter_tt->tt = tt; |
| 524 | if (tt) |
| 525 | iter_tt->prot = ttm_prot_from_caching(caching: tt->caching, PAGE_KERNEL); |
| 526 | else |
| 527 | iter_tt->prot = PAGE_KERNEL; |
| 528 | |
| 529 | return &iter_tt->base; |
| 530 | } |
| 531 | EXPORT_SYMBOL(ttm_kmap_iter_tt_init); |
| 532 | |
| 533 | unsigned long ttm_tt_pages_limit(void) |
| 534 | { |
| 535 | return ttm_pages_limit; |
| 536 | } |
| 537 | EXPORT_SYMBOL(ttm_tt_pages_limit); |
| 538 | |
| 539 | /** |
| 540 | * ttm_tt_setup_backup() - Allocate and assign a backup structure for a ttm_tt |
| 541 | * @tt: The ttm_tt for wich to allocate and assign a backup structure. |
| 542 | * |
| 543 | * Assign a backup structure to be used for tt backup. This should |
| 544 | * typically be done at bo creation, to avoid allocations at shrinking |
| 545 | * time. |
| 546 | * |
| 547 | * Return: 0 on success, negative error code on failure. |
| 548 | */ |
| 549 | int ttm_tt_setup_backup(struct ttm_tt *tt) |
| 550 | { |
| 551 | struct file *backup = |
| 552 | ttm_backup_shmem_create(size: ((loff_t)tt->num_pages) << PAGE_SHIFT); |
| 553 | |
| 554 | if (WARN_ON_ONCE(!(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE))) |
| 555 | return -EINVAL; |
| 556 | |
| 557 | if (IS_ERR(ptr: backup)) |
| 558 | return PTR_ERR(ptr: backup); |
| 559 | |
| 560 | if (tt->backup) |
| 561 | ttm_backup_fini(backup: tt->backup); |
| 562 | |
| 563 | tt->backup = backup; |
| 564 | return 0; |
| 565 | } |
| 566 | EXPORT_SYMBOL(ttm_tt_setup_backup); |
| 567 | |