| 1 | /* SPDX-License-Identifier: GPL-2.0 OR MIT */ |
| 2 | /************************************************************************** |
| 3 | * |
| 4 | * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA |
| 5 | * All Rights Reserved. |
| 6 | * |
| 7 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 8 | * copy of this software and associated documentation files (the |
| 9 | * "Software"), to deal in the Software without restriction, including |
| 10 | * without limitation the rights to use, copy, modify, merge, publish, |
| 11 | * distribute, sub license, and/or sell copies of the Software, and to |
| 12 | * permit persons to whom the Software is furnished to do so, subject to |
| 13 | * the following conditions: |
| 14 | * |
| 15 | * The above copyright notice and this permission notice (including the |
| 16 | * next paragraph) shall be included in all copies or substantial portions |
| 17 | * of the Software. |
| 18 | * |
| 19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 21 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
| 22 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
| 23 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
| 24 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
| 25 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 26 | * |
| 27 | **************************************************************************/ |
| 28 | /* |
| 29 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> |
| 30 | */ |
| 31 | |
| 32 | #define pr_fmt(fmt) "[TTM] " fmt |
| 33 | |
| 34 | #include <linux/export.h> |
| 35 | |
| 36 | #include <drm/ttm/ttm_bo.h> |
| 37 | #include <drm/ttm/ttm_placement.h> |
| 38 | #include <drm/ttm/ttm_tt.h> |
| 39 | |
| 40 | #include <drm/drm_drv.h> |
| 41 | #include <drm/drm_managed.h> |
| 42 | |
| 43 | static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, |
| 44 | struct vm_fault *vmf) |
| 45 | { |
| 46 | long err = 0; |
| 47 | |
| 48 | /* |
| 49 | * Quick non-stalling check for idle. |
| 50 | */ |
| 51 | if (dma_resv_test_signaled(obj: bo->base.resv, usage: DMA_RESV_USAGE_KERNEL)) |
| 52 | return 0; |
| 53 | |
| 54 | /* |
| 55 | * If possible, avoid waiting for GPU with mmap_lock |
| 56 | * held. We only do this if the fault allows retry and this |
| 57 | * is the first attempt. |
| 58 | */ |
| 59 | if (fault_flag_allow_retry_first(flags: vmf->flags)) { |
| 60 | if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) |
| 61 | return VM_FAULT_RETRY; |
| 62 | |
| 63 | drm_gem_object_get(obj: &bo->base); |
| 64 | mmap_read_unlock(mm: vmf->vma->vm_mm); |
| 65 | (void)dma_resv_wait_timeout(obj: bo->base.resv, |
| 66 | usage: DMA_RESV_USAGE_KERNEL, intr: true, |
| 67 | MAX_SCHEDULE_TIMEOUT); |
| 68 | dma_resv_unlock(obj: bo->base.resv); |
| 69 | drm_gem_object_put(obj: &bo->base); |
| 70 | return VM_FAULT_RETRY; |
| 71 | } |
| 72 | |
| 73 | /* |
| 74 | * Ordinary wait. |
| 75 | */ |
| 76 | err = dma_resv_wait_timeout(obj: bo->base.resv, usage: DMA_RESV_USAGE_KERNEL, intr: true, |
| 77 | MAX_SCHEDULE_TIMEOUT); |
| 78 | if (unlikely(err < 0)) { |
| 79 | return (err != -ERESTARTSYS) ? VM_FAULT_SIGBUS : |
| 80 | VM_FAULT_NOPAGE; |
| 81 | } |
| 82 | |
| 83 | return 0; |
| 84 | } |
| 85 | |
| 86 | static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo, |
| 87 | unsigned long page_offset) |
| 88 | { |
| 89 | struct ttm_device *bdev = bo->bdev; |
| 90 | |
| 91 | if (bdev->funcs->io_mem_pfn) |
| 92 | return bdev->funcs->io_mem_pfn(bo, page_offset); |
| 93 | |
| 94 | return (bo->resource->bus.offset >> PAGE_SHIFT) + page_offset; |
| 95 | } |
| 96 | |
| 97 | /** |
| 98 | * ttm_bo_vm_reserve - Reserve a buffer object in a retryable vm callback |
| 99 | * @bo: The buffer object |
| 100 | * @vmf: The fault structure handed to the callback |
| 101 | * |
| 102 | * vm callbacks like fault() and *_mkwrite() allow for the mmap_lock to be dropped |
| 103 | * during long waits, and after the wait the callback will be restarted. This |
| 104 | * is to allow other threads using the same virtual memory space concurrent |
| 105 | * access to map(), unmap() completely unrelated buffer objects. TTM buffer |
| 106 | * object reservations sometimes wait for GPU and should therefore be |
| 107 | * considered long waits. This function reserves the buffer object interruptibly |
| 108 | * taking this into account. Starvation is avoided by the vm system not |
| 109 | * allowing too many repeated restarts. |
| 110 | * This function is intended to be used in customized fault() and _mkwrite() |
| 111 | * handlers. |
| 112 | * |
| 113 | * Return: |
| 114 | * 0 on success and the bo was reserved. |
| 115 | * VM_FAULT_RETRY if blocking wait. |
| 116 | * VM_FAULT_NOPAGE if blocking wait and retrying was not allowed. |
| 117 | */ |
| 118 | vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo, |
| 119 | struct vm_fault *vmf) |
| 120 | { |
| 121 | /* |
| 122 | * Work around locking order reversal in fault / nopfn |
| 123 | * between mmap_lock and bo_reserve: Perform a trylock operation |
| 124 | * for reserve, and if it fails, retry the fault after waiting |
| 125 | * for the buffer to become unreserved. |
| 126 | */ |
| 127 | if (unlikely(!dma_resv_trylock(bo->base.resv))) { |
| 128 | /* |
| 129 | * If the fault allows retry and this is the first |
| 130 | * fault attempt, we try to release the mmap_lock |
| 131 | * before waiting |
| 132 | */ |
| 133 | if (fault_flag_allow_retry_first(flags: vmf->flags)) { |
| 134 | if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { |
| 135 | drm_gem_object_get(obj: &bo->base); |
| 136 | mmap_read_unlock(mm: vmf->vma->vm_mm); |
| 137 | if (!dma_resv_lock_interruptible(obj: bo->base.resv, |
| 138 | NULL)) |
| 139 | dma_resv_unlock(obj: bo->base.resv); |
| 140 | drm_gem_object_put(obj: &bo->base); |
| 141 | } |
| 142 | |
| 143 | return VM_FAULT_RETRY; |
| 144 | } |
| 145 | |
| 146 | if (dma_resv_lock_interruptible(obj: bo->base.resv, NULL)) |
| 147 | return VM_FAULT_NOPAGE; |
| 148 | } |
| 149 | |
| 150 | /* |
| 151 | * Refuse to fault imported pages. This should be handled |
| 152 | * (if at all) by redirecting mmap to the exporter. |
| 153 | */ |
| 154 | if (bo->ttm && (bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) { |
| 155 | if (!(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)) { |
| 156 | dma_resv_unlock(obj: bo->base.resv); |
| 157 | return VM_FAULT_SIGBUS; |
| 158 | } |
| 159 | } |
| 160 | |
| 161 | return 0; |
| 162 | } |
| 163 | EXPORT_SYMBOL(ttm_bo_vm_reserve); |
| 164 | |
| 165 | /** |
| 166 | * ttm_bo_vm_fault_reserved - TTM fault helper |
| 167 | * @vmf: The struct vm_fault given as argument to the fault callback |
| 168 | * @prot: The page protection to be used for this memory area. |
| 169 | * @num_prefault: Maximum number of prefault pages. The caller may want to |
| 170 | * specify this based on madvice settings and the size of the GPU object |
| 171 | * backed by the memory. |
| 172 | * |
| 173 | * This function inserts one or more page table entries pointing to the |
| 174 | * memory backing the buffer object, and then returns a return code |
| 175 | * instructing the caller to retry the page access. |
| 176 | * |
| 177 | * Return: |
| 178 | * VM_FAULT_NOPAGE on success or pending signal |
| 179 | * VM_FAULT_SIGBUS on unspecified error |
| 180 | * VM_FAULT_OOM on out-of-memory |
| 181 | * VM_FAULT_RETRY if retryable wait |
| 182 | */ |
| 183 | vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, |
| 184 | pgprot_t prot, |
| 185 | pgoff_t num_prefault) |
| 186 | { |
| 187 | struct vm_area_struct *vma = vmf->vma; |
| 188 | struct ttm_buffer_object *bo = vma->vm_private_data; |
| 189 | struct ttm_device *bdev = bo->bdev; |
| 190 | unsigned long page_offset; |
| 191 | unsigned long page_last; |
| 192 | unsigned long pfn; |
| 193 | struct ttm_tt *ttm = NULL; |
| 194 | struct page *page; |
| 195 | int err; |
| 196 | pgoff_t i; |
| 197 | vm_fault_t ret = VM_FAULT_NOPAGE; |
| 198 | unsigned long address = vmf->address; |
| 199 | |
| 200 | /* |
| 201 | * Wait for buffer data in transit, due to a pipelined |
| 202 | * move. |
| 203 | */ |
| 204 | ret = ttm_bo_vm_fault_idle(bo, vmf); |
| 205 | if (unlikely(ret != 0)) |
| 206 | return ret; |
| 207 | |
| 208 | err = ttm_mem_io_reserve(bdev, mem: bo->resource); |
| 209 | if (unlikely(err != 0)) |
| 210 | return VM_FAULT_SIGBUS; |
| 211 | |
| 212 | page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + |
| 213 | vma->vm_pgoff - drm_vma_node_start(node: &bo->base.vma_node); |
| 214 | page_last = vma_pages(vma) + vma->vm_pgoff - |
| 215 | drm_vma_node_start(node: &bo->base.vma_node); |
| 216 | |
| 217 | if (unlikely(page_offset >= PFN_UP(bo->base.size))) |
| 218 | return VM_FAULT_SIGBUS; |
| 219 | |
| 220 | prot = ttm_io_prot(bo, res: bo->resource, tmp: prot); |
| 221 | if (!bo->resource->bus.is_iomem) { |
| 222 | struct ttm_operation_ctx ctx = { |
| 223 | .interruptible = true, |
| 224 | .no_wait_gpu = false, |
| 225 | }; |
| 226 | |
| 227 | ttm = bo->ttm; |
| 228 | err = ttm_bo_populate(bo, ctx: &ctx); |
| 229 | if (err) { |
| 230 | if (err == -EINTR || err == -ERESTARTSYS || |
| 231 | err == -EAGAIN) |
| 232 | return VM_FAULT_NOPAGE; |
| 233 | |
| 234 | pr_debug("TTM fault hit %pe.\n" , ERR_PTR(err)); |
| 235 | return VM_FAULT_SIGBUS; |
| 236 | } |
| 237 | } else { |
| 238 | /* Iomem should not be marked encrypted */ |
| 239 | prot = pgprot_decrypted(prot); |
| 240 | } |
| 241 | |
| 242 | /* |
| 243 | * Speculatively prefault a number of pages. Only error on |
| 244 | * first page. |
| 245 | */ |
| 246 | for (i = 0; i < num_prefault; ++i) { |
| 247 | if (bo->resource->bus.is_iomem) { |
| 248 | pfn = ttm_bo_io_mem_pfn(bo, page_offset); |
| 249 | } else { |
| 250 | page = ttm->pages[page_offset]; |
| 251 | if (unlikely(!page && i == 0)) { |
| 252 | return VM_FAULT_OOM; |
| 253 | } else if (unlikely(!page)) { |
| 254 | break; |
| 255 | } |
| 256 | pfn = page_to_pfn(page); |
| 257 | } |
| 258 | |
| 259 | /* |
| 260 | * Note that the value of @prot at this point may differ from |
| 261 | * the value of @vma->vm_page_prot in the caching- and |
| 262 | * encryption bits. This is because the exact location of the |
| 263 | * data may not be known at mmap() time and may also change |
| 264 | * at arbitrary times while the data is mmap'ed. |
| 265 | * See vmf_insert_pfn_prot() for a discussion. |
| 266 | */ |
| 267 | ret = vmf_insert_pfn_prot(vma, addr: address, pfn, pgprot: prot); |
| 268 | |
| 269 | /* Never error on prefaulted PTEs */ |
| 270 | if (unlikely((ret & VM_FAULT_ERROR))) { |
| 271 | if (i == 0) |
| 272 | return VM_FAULT_NOPAGE; |
| 273 | else |
| 274 | break; |
| 275 | } |
| 276 | |
| 277 | address += PAGE_SIZE; |
| 278 | if (unlikely(++page_offset >= page_last)) |
| 279 | break; |
| 280 | } |
| 281 | return ret; |
| 282 | } |
| 283 | EXPORT_SYMBOL(ttm_bo_vm_fault_reserved); |
| 284 | |
| 285 | static void ttm_bo_release_dummy_page(struct drm_device *dev, void *res) |
| 286 | { |
| 287 | struct page *dummy_page = (struct page *)res; |
| 288 | |
| 289 | __free_page(dummy_page); |
| 290 | } |
| 291 | |
| 292 | vm_fault_t ttm_bo_vm_dummy_page(struct vm_fault *vmf, pgprot_t prot) |
| 293 | { |
| 294 | struct vm_area_struct *vma = vmf->vma; |
| 295 | struct ttm_buffer_object *bo = vma->vm_private_data; |
| 296 | struct drm_device *ddev = bo->base.dev; |
| 297 | vm_fault_t ret = VM_FAULT_NOPAGE; |
| 298 | unsigned long address; |
| 299 | unsigned long pfn; |
| 300 | struct page *page; |
| 301 | |
| 302 | /* Allocate new dummy page to map all the VA range in this VMA to it*/ |
| 303 | page = alloc_page(GFP_KERNEL | __GFP_ZERO); |
| 304 | if (!page) |
| 305 | return VM_FAULT_OOM; |
| 306 | |
| 307 | /* Set the page to be freed using drmm release action */ |
| 308 | if (drmm_add_action_or_reset(ddev, ttm_bo_release_dummy_page, page)) |
| 309 | return VM_FAULT_OOM; |
| 310 | |
| 311 | pfn = page_to_pfn(page); |
| 312 | |
| 313 | /* Prefault the entire VMA range right away to avoid further faults */ |
| 314 | for (address = vma->vm_start; address < vma->vm_end; |
| 315 | address += PAGE_SIZE) |
| 316 | ret = vmf_insert_pfn_prot(vma, addr: address, pfn, pgprot: prot); |
| 317 | |
| 318 | return ret; |
| 319 | } |
| 320 | EXPORT_SYMBOL(ttm_bo_vm_dummy_page); |
| 321 | |
| 322 | vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf) |
| 323 | { |
| 324 | struct vm_area_struct *vma = vmf->vma; |
| 325 | pgprot_t prot; |
| 326 | struct ttm_buffer_object *bo = vma->vm_private_data; |
| 327 | struct drm_device *ddev = bo->base.dev; |
| 328 | vm_fault_t ret; |
| 329 | int idx; |
| 330 | |
| 331 | ret = ttm_bo_vm_reserve(bo, vmf); |
| 332 | if (ret) |
| 333 | return ret; |
| 334 | |
| 335 | prot = vma->vm_page_prot; |
| 336 | if (drm_dev_enter(dev: ddev, idx: &idx)) { |
| 337 | ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT); |
| 338 | drm_dev_exit(idx); |
| 339 | } else { |
| 340 | ret = ttm_bo_vm_dummy_page(vmf, prot); |
| 341 | } |
| 342 | if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) |
| 343 | return ret; |
| 344 | |
| 345 | dma_resv_unlock(obj: bo->base.resv); |
| 346 | |
| 347 | return ret; |
| 348 | } |
| 349 | EXPORT_SYMBOL(ttm_bo_vm_fault); |
| 350 | |
| 351 | void ttm_bo_vm_open(struct vm_area_struct *vma) |
| 352 | { |
| 353 | struct ttm_buffer_object *bo = vma->vm_private_data; |
| 354 | |
| 355 | WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping); |
| 356 | |
| 357 | drm_gem_object_get(obj: &bo->base); |
| 358 | } |
| 359 | EXPORT_SYMBOL(ttm_bo_vm_open); |
| 360 | |
| 361 | void ttm_bo_vm_close(struct vm_area_struct *vma) |
| 362 | { |
| 363 | struct ttm_buffer_object *bo = vma->vm_private_data; |
| 364 | |
| 365 | drm_gem_object_put(obj: &bo->base); |
| 366 | vma->vm_private_data = NULL; |
| 367 | } |
| 368 | EXPORT_SYMBOL(ttm_bo_vm_close); |
| 369 | |
| 370 | static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo, |
| 371 | unsigned long offset, |
| 372 | uint8_t *buf, int len, int write) |
| 373 | { |
| 374 | unsigned long page = offset >> PAGE_SHIFT; |
| 375 | unsigned long bytes_left = len; |
| 376 | int ret; |
| 377 | |
| 378 | /* Copy a page at a time, that way no extra virtual address |
| 379 | * mapping is needed |
| 380 | */ |
| 381 | offset -= page << PAGE_SHIFT; |
| 382 | do { |
| 383 | unsigned long bytes = min(bytes_left, PAGE_SIZE - offset); |
| 384 | struct ttm_bo_kmap_obj map; |
| 385 | void *ptr; |
| 386 | bool is_iomem; |
| 387 | |
| 388 | ret = ttm_bo_kmap(bo, start_page: page, num_pages: 1, map: &map); |
| 389 | if (ret) |
| 390 | return ret; |
| 391 | |
| 392 | ptr = (uint8_t *)ttm_kmap_obj_virtual(map: &map, is_iomem: &is_iomem) + offset; |
| 393 | WARN_ON_ONCE(is_iomem); |
| 394 | if (write) |
| 395 | memcpy(ptr, buf, bytes); |
| 396 | else |
| 397 | memcpy(buf, ptr, bytes); |
| 398 | ttm_bo_kunmap(map: &map); |
| 399 | |
| 400 | page++; |
| 401 | buf += bytes; |
| 402 | bytes_left -= bytes; |
| 403 | offset = 0; |
| 404 | } while (bytes_left); |
| 405 | |
| 406 | return len; |
| 407 | } |
| 408 | |
| 409 | /** |
| 410 | * ttm_bo_access - Helper to access a buffer object |
| 411 | * |
| 412 | * @bo: ttm buffer object |
| 413 | * @offset: access offset into buffer object |
| 414 | * @buf: pointer to caller memory to read into or write from |
| 415 | * @len: length of access |
| 416 | * @write: write access |
| 417 | * |
| 418 | * Utility function to access a buffer object. Useful when buffer object cannot |
| 419 | * be easily mapped (non-contiguous, non-visible, etc...). Should not directly |
| 420 | * be exported to user space via a peak / poke interface. |
| 421 | * |
| 422 | * Returns: |
| 423 | * @len if successful, negative error code on failure. |
| 424 | */ |
| 425 | int ttm_bo_access(struct ttm_buffer_object *bo, unsigned long offset, |
| 426 | void *buf, int len, int write) |
| 427 | { |
| 428 | int ret; |
| 429 | |
| 430 | if (len < 1 || (offset + len) > bo->base.size) |
| 431 | return -EIO; |
| 432 | |
| 433 | ret = ttm_bo_reserve(bo, interruptible: true, no_wait: false, NULL); |
| 434 | if (ret) |
| 435 | return ret; |
| 436 | |
| 437 | if (!bo->resource) { |
| 438 | ret = -ENODATA; |
| 439 | goto unlock; |
| 440 | } |
| 441 | |
| 442 | switch (bo->resource->mem_type) { |
| 443 | case TTM_PL_SYSTEM: |
| 444 | fallthrough; |
| 445 | case TTM_PL_TT: |
| 446 | ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write); |
| 447 | break; |
| 448 | default: |
| 449 | if (bo->bdev->funcs->access_memory) |
| 450 | ret = bo->bdev->funcs->access_memory |
| 451 | (bo, offset, buf, len, write); |
| 452 | else |
| 453 | ret = -EIO; |
| 454 | } |
| 455 | |
| 456 | unlock: |
| 457 | ttm_bo_unreserve(bo); |
| 458 | |
| 459 | return ret; |
| 460 | } |
| 461 | EXPORT_SYMBOL(ttm_bo_access); |
| 462 | |
| 463 | int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, |
| 464 | void *buf, int len, int write) |
| 465 | { |
| 466 | struct ttm_buffer_object *bo = vma->vm_private_data; |
| 467 | unsigned long offset = (addr) - vma->vm_start + |
| 468 | ((vma->vm_pgoff - drm_vma_node_start(node: &bo->base.vma_node)) |
| 469 | << PAGE_SHIFT); |
| 470 | |
| 471 | return ttm_bo_access(bo, offset, buf, len, write); |
| 472 | } |
| 473 | EXPORT_SYMBOL(ttm_bo_vm_access); |
| 474 | |
| 475 | static const struct vm_operations_struct ttm_bo_vm_ops = { |
| 476 | .fault = ttm_bo_vm_fault, |
| 477 | .open = ttm_bo_vm_open, |
| 478 | .close = ttm_bo_vm_close, |
| 479 | .access = ttm_bo_vm_access, |
| 480 | }; |
| 481 | |
| 482 | /** |
| 483 | * ttm_bo_mmap_obj - mmap memory backed by a ttm buffer object. |
| 484 | * |
| 485 | * @vma: vma as input from the fbdev mmap method. |
| 486 | * @bo: The bo backing the address space. |
| 487 | * |
| 488 | * Maps a buffer object. |
| 489 | */ |
| 490 | int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo) |
| 491 | { |
| 492 | /* Enforce no COW since would have really strange behavior with it. */ |
| 493 | if (is_cow_mapping(flags: vma->vm_flags)) |
| 494 | return -EINVAL; |
| 495 | |
| 496 | drm_gem_object_get(obj: &bo->base); |
| 497 | |
| 498 | /* |
| 499 | * Drivers may want to override the vm_ops field. Otherwise we |
| 500 | * use TTM's default callbacks. |
| 501 | */ |
| 502 | if (!vma->vm_ops) |
| 503 | vma->vm_ops = &ttm_bo_vm_ops; |
| 504 | |
| 505 | /* |
| 506 | * Note: We're transferring the bo reference to |
| 507 | * vma->vm_private_data here. |
| 508 | */ |
| 509 | |
| 510 | vma->vm_private_data = bo; |
| 511 | |
| 512 | vm_flags_set(vma, VM_PFNMAP | VM_IO | VM_DONTEXPAND | VM_DONTDUMP); |
| 513 | return 0; |
| 514 | } |
| 515 | EXPORT_SYMBOL(ttm_bo_mmap_obj); |
| 516 | |