| 1 | /* |
| 2 | * Copyright 2009 Jerome Glisse. |
| 3 | * All Rights Reserved. |
| 4 | * |
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 6 | * copy of this software and associated documentation files (the |
| 7 | * "Software"), to deal in the Software without restriction, including |
| 8 | * without limitation the rights to use, copy, modify, merge, publish, |
| 9 | * distribute, sub license, and/or sell copies of the Software, and to |
| 10 | * permit persons to whom the Software is furnished to do so, subject to |
| 11 | * the following conditions: |
| 12 | * |
| 13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
| 16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
| 17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
| 18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
| 19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 20 | * |
| 21 | * The above copyright notice and this permission notice (including the |
| 22 | * next paragraph) shall be included in all copies or substantial portions |
| 23 | * of the Software. |
| 24 | * |
| 25 | */ |
| 26 | /* |
| 27 | * Authors: |
| 28 | * Jerome Glisse <glisse@freedesktop.org> |
| 29 | * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> |
| 30 | * Dave Airlie |
| 31 | */ |
| 32 | |
| 33 | #include <linux/io.h> |
| 34 | #include <linux/list.h> |
| 35 | #include <linux/slab.h> |
| 36 | |
| 37 | #include <drm/drm_cache.h> |
| 38 | #include <drm/drm_prime.h> |
| 39 | #include <drm/radeon_drm.h> |
| 40 | |
| 41 | #include "radeon.h" |
| 42 | #include "radeon_trace.h" |
| 43 | #include "radeon_ttm.h" |
| 44 | |
| 45 | static void radeon_bo_clear_surface_reg(struct radeon_bo *bo); |
| 46 | |
| 47 | /* |
| 48 | * To exclude mutual BO access we rely on bo_reserve exclusion, as all |
| 49 | * function are calling it. |
| 50 | */ |
| 51 | |
| 52 | static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) |
| 53 | { |
| 54 | struct radeon_bo *bo; |
| 55 | |
| 56 | bo = container_of(tbo, struct radeon_bo, tbo); |
| 57 | |
| 58 | mutex_lock(&bo->rdev->gem.mutex); |
| 59 | list_del_init(entry: &bo->list); |
| 60 | mutex_unlock(lock: &bo->rdev->gem.mutex); |
| 61 | radeon_bo_clear_surface_reg(bo); |
| 62 | WARN_ON_ONCE(!list_empty(&bo->va)); |
| 63 | if (bo->tbo.base.import_attach) |
| 64 | drm_prime_gem_destroy(obj: &bo->tbo.base, sg: bo->tbo.sg); |
| 65 | drm_gem_object_release(obj: &bo->tbo.base); |
| 66 | kfree(objp: bo); |
| 67 | } |
| 68 | |
| 69 | bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo) |
| 70 | { |
| 71 | if (bo->destroy == &radeon_ttm_bo_destroy) |
| 72 | return true; |
| 73 | return false; |
| 74 | } |
| 75 | |
| 76 | void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) |
| 77 | { |
| 78 | u32 c = 0, i; |
| 79 | |
| 80 | rbo->placement.placement = rbo->placements; |
| 81 | if (domain & RADEON_GEM_DOMAIN_VRAM) { |
| 82 | /* Try placing BOs which don't need CPU access outside of the |
| 83 | * CPU accessible part of VRAM |
| 84 | */ |
| 85 | if ((rbo->flags & RADEON_GEM_NO_CPU_ACCESS) && |
| 86 | rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size) { |
| 87 | rbo->placements[c].fpfn = |
| 88 | rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT; |
| 89 | rbo->placements[c].mem_type = TTM_PL_VRAM; |
| 90 | rbo->placements[c++].flags = 0; |
| 91 | } |
| 92 | |
| 93 | rbo->placements[c].fpfn = 0; |
| 94 | rbo->placements[c].mem_type = TTM_PL_VRAM; |
| 95 | rbo->placements[c++].flags = 0; |
| 96 | } |
| 97 | |
| 98 | if (domain & RADEON_GEM_DOMAIN_GTT) { |
| 99 | rbo->placements[c].fpfn = 0; |
| 100 | rbo->placements[c].mem_type = TTM_PL_TT; |
| 101 | rbo->placements[c++].flags = 0; |
| 102 | } |
| 103 | |
| 104 | if (domain & RADEON_GEM_DOMAIN_CPU) { |
| 105 | rbo->placements[c].fpfn = 0; |
| 106 | rbo->placements[c].mem_type = TTM_PL_SYSTEM; |
| 107 | rbo->placements[c++].flags = 0; |
| 108 | } |
| 109 | if (!c) { |
| 110 | rbo->placements[c].fpfn = 0; |
| 111 | rbo->placements[c].mem_type = TTM_PL_SYSTEM; |
| 112 | rbo->placements[c++].flags = 0; |
| 113 | } |
| 114 | |
| 115 | rbo->placement.num_placement = c; |
| 116 | |
| 117 | for (i = 0; i < c; ++i) { |
| 118 | if ((rbo->flags & RADEON_GEM_CPU_ACCESS) && |
| 119 | (rbo->placements[i].mem_type == TTM_PL_VRAM) && |
| 120 | !rbo->placements[i].fpfn) |
| 121 | rbo->placements[i].lpfn = |
| 122 | rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT; |
| 123 | else |
| 124 | rbo->placements[i].lpfn = 0; |
| 125 | } |
| 126 | } |
| 127 | |
| 128 | int radeon_bo_create(struct radeon_device *rdev, |
| 129 | unsigned long size, int byte_align, bool kernel, |
| 130 | u32 domain, u32 flags, struct sg_table *sg, |
| 131 | struct dma_resv *resv, |
| 132 | struct radeon_bo **bo_ptr) |
| 133 | { |
| 134 | struct radeon_bo *bo; |
| 135 | enum ttm_bo_type type; |
| 136 | unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; |
| 137 | int r; |
| 138 | |
| 139 | size = ALIGN(size, PAGE_SIZE); |
| 140 | |
| 141 | if (kernel) { |
| 142 | type = ttm_bo_type_kernel; |
| 143 | } else if (sg) { |
| 144 | type = ttm_bo_type_sg; |
| 145 | } else { |
| 146 | type = ttm_bo_type_device; |
| 147 | } |
| 148 | *bo_ptr = NULL; |
| 149 | |
| 150 | bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); |
| 151 | if (bo == NULL) |
| 152 | return -ENOMEM; |
| 153 | drm_gem_private_object_init(dev: rdev_to_drm(rdev), obj: &bo->tbo.base, size); |
| 154 | bo->tbo.base.funcs = &radeon_gem_object_funcs; |
| 155 | bo->rdev = rdev; |
| 156 | bo->surface_reg = -1; |
| 157 | INIT_LIST_HEAD(list: &bo->list); |
| 158 | INIT_LIST_HEAD(list: &bo->va); |
| 159 | bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM | |
| 160 | RADEON_GEM_DOMAIN_GTT | |
| 161 | RADEON_GEM_DOMAIN_CPU); |
| 162 | |
| 163 | bo->flags = flags; |
| 164 | /* PCI GART is always snooped */ |
| 165 | if (!(rdev->flags & RADEON_IS_PCIE)) |
| 166 | bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); |
| 167 | |
| 168 | /* Write-combined CPU mappings of GTT cause GPU hangs with RV6xx |
| 169 | * See https://bugs.freedesktop.org/show_bug.cgi?id=91268 |
| 170 | */ |
| 171 | if (rdev->family >= CHIP_RV610 && rdev->family <= CHIP_RV635) |
| 172 | bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); |
| 173 | |
| 174 | #ifdef CONFIG_X86_32 |
| 175 | /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit |
| 176 | * See https://bugs.freedesktop.org/show_bug.cgi?id=84627 |
| 177 | */ |
| 178 | bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); |
| 179 | #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT) |
| 180 | /* Don't try to enable write-combining when it can't work, or things |
| 181 | * may be slow |
| 182 | * See https://bugs.freedesktop.org/show_bug.cgi?id=88758 |
| 183 | */ |
| 184 | #ifndef CONFIG_COMPILE_TEST |
| 185 | #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \ |
| 186 | thanks to write-combining |
| 187 | #endif |
| 188 | |
| 189 | if (bo->flags & RADEON_GEM_GTT_WC) |
| 190 | DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for " |
| 191 | "better performance thanks to write-combining\n" ); |
| 192 | bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); |
| 193 | #else |
| 194 | /* For architectures that don't support WC memory, |
| 195 | * mask out the WC flag from the BO |
| 196 | */ |
| 197 | if (!drm_arch_can_wc_memory()) |
| 198 | bo->flags &= ~RADEON_GEM_GTT_WC; |
| 199 | #endif |
| 200 | |
| 201 | radeon_ttm_placement_from_domain(rbo: bo, domain); |
| 202 | /* Kernel allocation are uninterruptible */ |
| 203 | down_read(sem: &rdev->pm.mclk_lock); |
| 204 | r = ttm_bo_init_validate(bdev: &rdev->mman.bdev, bo: &bo->tbo, type, |
| 205 | placement: &bo->placement, alignment: page_align, interruptible: !kernel, sg, resv, |
| 206 | destroy: &radeon_ttm_bo_destroy); |
| 207 | up_read(sem: &rdev->pm.mclk_lock); |
| 208 | if (unlikely(r != 0)) { |
| 209 | return r; |
| 210 | } |
| 211 | *bo_ptr = bo; |
| 212 | |
| 213 | trace_radeon_bo_create(bo); |
| 214 | |
| 215 | return 0; |
| 216 | } |
| 217 | |
| 218 | int radeon_bo_kmap(struct radeon_bo *bo, void **ptr) |
| 219 | { |
| 220 | bool is_iomem; |
| 221 | long r; |
| 222 | |
| 223 | r = dma_resv_wait_timeout(obj: bo->tbo.base.resv, usage: DMA_RESV_USAGE_KERNEL, |
| 224 | intr: false, MAX_SCHEDULE_TIMEOUT); |
| 225 | if (r < 0) |
| 226 | return r; |
| 227 | |
| 228 | if (bo->kptr) { |
| 229 | if (ptr) { |
| 230 | *ptr = bo->kptr; |
| 231 | } |
| 232 | return 0; |
| 233 | } |
| 234 | r = ttm_bo_kmap(bo: &bo->tbo, start_page: 0, PFN_UP(bo->tbo.base.size), map: &bo->kmap); |
| 235 | if (r) { |
| 236 | return r; |
| 237 | } |
| 238 | bo->kptr = ttm_kmap_obj_virtual(map: &bo->kmap, is_iomem: &is_iomem); |
| 239 | if (ptr) { |
| 240 | *ptr = bo->kptr; |
| 241 | } |
| 242 | radeon_bo_check_tiling(bo, has_moved: 0, force_drop: 0); |
| 243 | return 0; |
| 244 | } |
| 245 | |
| 246 | void radeon_bo_kunmap(struct radeon_bo *bo) |
| 247 | { |
| 248 | if (bo->kptr == NULL) |
| 249 | return; |
| 250 | bo->kptr = NULL; |
| 251 | radeon_bo_check_tiling(bo, has_moved: 0, force_drop: 0); |
| 252 | ttm_bo_kunmap(map: &bo->kmap); |
| 253 | } |
| 254 | |
| 255 | struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo) |
| 256 | { |
| 257 | if (bo == NULL) |
| 258 | return NULL; |
| 259 | |
| 260 | drm_gem_object_get(obj: &bo->tbo.base); |
| 261 | return bo; |
| 262 | } |
| 263 | |
| 264 | void radeon_bo_unref(struct radeon_bo **bo) |
| 265 | { |
| 266 | if ((*bo) == NULL) |
| 267 | return; |
| 268 | drm_gem_object_put(obj: &(*bo)->tbo.base); |
| 269 | *bo = NULL; |
| 270 | } |
| 271 | |
| 272 | int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, |
| 273 | u64 *gpu_addr) |
| 274 | { |
| 275 | struct ttm_operation_ctx ctx = { false, false }; |
| 276 | int r, i; |
| 277 | |
| 278 | if (radeon_ttm_tt_has_userptr(rdev: bo->rdev, ttm: bo->tbo.ttm)) |
| 279 | return -EPERM; |
| 280 | |
| 281 | if (bo->tbo.pin_count) { |
| 282 | ttm_bo_pin(bo: &bo->tbo); |
| 283 | if (gpu_addr) |
| 284 | *gpu_addr = radeon_bo_gpu_offset(bo); |
| 285 | |
| 286 | if (max_offset != 0) { |
| 287 | u64 domain_start; |
| 288 | |
| 289 | if (domain == RADEON_GEM_DOMAIN_VRAM) |
| 290 | domain_start = bo->rdev->mc.vram_start; |
| 291 | else |
| 292 | domain_start = bo->rdev->mc.gtt_start; |
| 293 | WARN_ON_ONCE(max_offset < |
| 294 | (radeon_bo_gpu_offset(bo) - domain_start)); |
| 295 | } |
| 296 | |
| 297 | return 0; |
| 298 | } |
| 299 | if (bo->prime_shared_count && domain == RADEON_GEM_DOMAIN_VRAM) { |
| 300 | /* A BO shared as a dma-buf cannot be sensibly migrated to VRAM */ |
| 301 | return -EINVAL; |
| 302 | } |
| 303 | |
| 304 | radeon_ttm_placement_from_domain(rbo: bo, domain); |
| 305 | for (i = 0; i < bo->placement.num_placement; i++) { |
| 306 | /* force to pin into visible video ram */ |
| 307 | if ((bo->placements[i].mem_type == TTM_PL_VRAM) && |
| 308 | !(bo->flags & RADEON_GEM_NO_CPU_ACCESS) && |
| 309 | (!max_offset || max_offset > bo->rdev->mc.visible_vram_size)) |
| 310 | bo->placements[i].lpfn = |
| 311 | bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; |
| 312 | else |
| 313 | bo->placements[i].lpfn = max_offset >> PAGE_SHIFT; |
| 314 | } |
| 315 | |
| 316 | r = ttm_bo_validate(bo: &bo->tbo, placement: &bo->placement, ctx: &ctx); |
| 317 | if (likely(r == 0)) { |
| 318 | ttm_bo_pin(bo: &bo->tbo); |
| 319 | if (gpu_addr != NULL) |
| 320 | *gpu_addr = radeon_bo_gpu_offset(bo); |
| 321 | if (domain == RADEON_GEM_DOMAIN_VRAM) |
| 322 | bo->rdev->vram_pin_size += radeon_bo_size(bo); |
| 323 | else |
| 324 | bo->rdev->gart_pin_size += radeon_bo_size(bo); |
| 325 | } else { |
| 326 | dev_err(bo->rdev->dev, "%p pin failed\n" , bo); |
| 327 | } |
| 328 | return r; |
| 329 | } |
| 330 | |
| 331 | int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) |
| 332 | { |
| 333 | return radeon_bo_pin_restricted(bo, domain, max_offset: 0, gpu_addr); |
| 334 | } |
| 335 | |
| 336 | void radeon_bo_unpin(struct radeon_bo *bo) |
| 337 | { |
| 338 | ttm_bo_unpin(bo: &bo->tbo); |
| 339 | if (!bo->tbo.pin_count) { |
| 340 | if (bo->tbo.resource->mem_type == TTM_PL_VRAM) |
| 341 | bo->rdev->vram_pin_size -= radeon_bo_size(bo); |
| 342 | else |
| 343 | bo->rdev->gart_pin_size -= radeon_bo_size(bo); |
| 344 | } |
| 345 | } |
| 346 | |
| 347 | int radeon_bo_evict_vram(struct radeon_device *rdev) |
| 348 | { |
| 349 | struct ttm_device *bdev = &rdev->mman.bdev; |
| 350 | struct ttm_resource_manager *man; |
| 351 | |
| 352 | /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ |
| 353 | #ifndef CONFIG_HIBERNATION |
| 354 | if (rdev->flags & RADEON_IS_IGP) { |
| 355 | if (rdev->mc.igp_sideport_enabled == false) |
| 356 | /* Useless to evict on IGP chips */ |
| 357 | return 0; |
| 358 | } |
| 359 | #endif |
| 360 | man = ttm_manager_type(bdev, TTM_PL_VRAM); |
| 361 | if (!man) |
| 362 | return 0; |
| 363 | return ttm_resource_manager_evict_all(bdev, man); |
| 364 | } |
| 365 | |
| 366 | void radeon_bo_force_delete(struct radeon_device *rdev) |
| 367 | { |
| 368 | struct radeon_bo *bo, *n; |
| 369 | |
| 370 | if (list_empty(head: &rdev->gem.objects)) { |
| 371 | return; |
| 372 | } |
| 373 | dev_err(rdev->dev, "Userspace still has active objects !\n" ); |
| 374 | list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { |
| 375 | dev_err(rdev->dev, "%p %p %lu %lu force free\n" , |
| 376 | &bo->tbo.base, bo, (unsigned long)bo->tbo.base.size, |
| 377 | *((unsigned long *)&bo->tbo.base.refcount)); |
| 378 | mutex_lock(&bo->rdev->gem.mutex); |
| 379 | list_del_init(entry: &bo->list); |
| 380 | mutex_unlock(lock: &bo->rdev->gem.mutex); |
| 381 | /* this should unref the ttm bo */ |
| 382 | drm_gem_object_put(obj: &bo->tbo.base); |
| 383 | } |
| 384 | } |
| 385 | |
| 386 | int radeon_bo_init(struct radeon_device *rdev) |
| 387 | { |
| 388 | /* reserve PAT memory space to WC for VRAM */ |
| 389 | arch_io_reserve_memtype_wc(start: rdev->mc.aper_base, |
| 390 | size: rdev->mc.aper_size); |
| 391 | |
| 392 | /* Add an MTRR for the VRAM */ |
| 393 | if (!rdev->fastfb_working) { |
| 394 | rdev->mc.vram_mtrr = arch_phys_wc_add(base: rdev->mc.aper_base, |
| 395 | size: rdev->mc.aper_size); |
| 396 | } |
| 397 | DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n" , |
| 398 | rdev->mc.mc_vram_size >> 20, |
| 399 | (unsigned long long)rdev->mc.aper_size >> 20); |
| 400 | DRM_INFO("RAM width %dbits %cDR\n" , |
| 401 | rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S'); |
| 402 | return radeon_ttm_init(rdev); |
| 403 | } |
| 404 | |
| 405 | void radeon_bo_fini(struct radeon_device *rdev) |
| 406 | { |
| 407 | radeon_ttm_fini(rdev); |
| 408 | arch_phys_wc_del(handle: rdev->mc.vram_mtrr); |
| 409 | arch_io_free_memtype_wc(start: rdev->mc.aper_base, size: rdev->mc.aper_size); |
| 410 | } |
| 411 | |
| 412 | /* Returns how many bytes TTM can move per IB. |
| 413 | */ |
| 414 | static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev) |
| 415 | { |
| 416 | u64 real_vram_size = rdev->mc.real_vram_size; |
| 417 | struct ttm_resource_manager *man = |
| 418 | ttm_manager_type(bdev: &rdev->mman.bdev, TTM_PL_VRAM); |
| 419 | u64 vram_usage = ttm_resource_manager_usage(man); |
| 420 | |
| 421 | /* This function is based on the current VRAM usage. |
| 422 | * |
| 423 | * - If all of VRAM is free, allow relocating the number of bytes that |
| 424 | * is equal to 1/4 of the size of VRAM for this IB. |
| 425 | |
| 426 | * - If more than one half of VRAM is occupied, only allow relocating |
| 427 | * 1 MB of data for this IB. |
| 428 | * |
| 429 | * - From 0 to one half of used VRAM, the threshold decreases |
| 430 | * linearly. |
| 431 | * __________________ |
| 432 | * 1/4 of -|\ | |
| 433 | * VRAM | \ | |
| 434 | * | \ | |
| 435 | * | \ | |
| 436 | * | \ | |
| 437 | * | \ | |
| 438 | * | \ | |
| 439 | * | \________|1 MB |
| 440 | * |----------------| |
| 441 | * VRAM 0 % 100 % |
| 442 | * used used |
| 443 | * |
| 444 | * Note: It's a threshold, not a limit. The threshold must be crossed |
| 445 | * for buffer relocations to stop, so any buffer of an arbitrary size |
| 446 | * can be moved as long as the threshold isn't crossed before |
| 447 | * the relocation takes place. We don't want to disable buffer |
| 448 | * relocations completely. |
| 449 | * |
| 450 | * The idea is that buffers should be placed in VRAM at creation time |
| 451 | * and TTM should only do a minimum number of relocations during |
| 452 | * command submission. In practice, you need to submit at least |
| 453 | * a dozen IBs to move all buffers to VRAM if they are in GTT. |
| 454 | * |
| 455 | * Also, things can get pretty crazy under memory pressure and actual |
| 456 | * VRAM usage can change a lot, so playing safe even at 50% does |
| 457 | * consistently increase performance. |
| 458 | */ |
| 459 | |
| 460 | u64 half_vram = real_vram_size >> 1; |
| 461 | u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage; |
| 462 | u64 bytes_moved_threshold = half_free_vram >> 1; |
| 463 | return max(bytes_moved_threshold, 1024*1024ull); |
| 464 | } |
| 465 | |
| 466 | int radeon_bo_list_validate(struct radeon_device *rdev, |
| 467 | struct drm_exec *exec, |
| 468 | struct list_head *head, int ring) |
| 469 | { |
| 470 | struct ttm_operation_ctx ctx = { true, false }; |
| 471 | struct radeon_bo_list *lobj; |
| 472 | u64 bytes_moved = 0, initial_bytes_moved; |
| 473 | u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev); |
| 474 | int r; |
| 475 | |
| 476 | drm_exec_until_all_locked(exec) { |
| 477 | list_for_each_entry(lobj, head, list) { |
| 478 | r = drm_exec_prepare_obj(exec, obj: &lobj->robj->tbo.base, |
| 479 | num_fences: 1); |
| 480 | drm_exec_retry_on_contention(exec); |
| 481 | if (unlikely(r && r != -EALREADY)) |
| 482 | return r; |
| 483 | } |
| 484 | } |
| 485 | |
| 486 | list_for_each_entry(lobj, head, list) { |
| 487 | struct radeon_bo *bo = lobj->robj; |
| 488 | if (!bo->tbo.pin_count) { |
| 489 | u32 domain = lobj->preferred_domains; |
| 490 | u32 allowed = lobj->allowed_domains; |
| 491 | u32 current_domain = |
| 492 | radeon_mem_type_to_domain(mem_type: bo->tbo.resource->mem_type); |
| 493 | |
| 494 | /* Check if this buffer will be moved and don't move it |
| 495 | * if we have moved too many buffers for this IB already. |
| 496 | * |
| 497 | * Note that this allows moving at least one buffer of |
| 498 | * any size, because it doesn't take the current "bo" |
| 499 | * into account. We don't want to disallow buffer moves |
| 500 | * completely. |
| 501 | */ |
| 502 | if ((allowed & current_domain) != 0 && |
| 503 | (domain & current_domain) == 0 && /* will be moved */ |
| 504 | bytes_moved > bytes_moved_threshold) { |
| 505 | /* don't move it */ |
| 506 | domain = current_domain; |
| 507 | } |
| 508 | |
| 509 | retry: |
| 510 | radeon_ttm_placement_from_domain(rbo: bo, domain); |
| 511 | if (ring == R600_RING_TYPE_UVD_INDEX) |
| 512 | radeon_uvd_force_into_uvd_segment(rbo: bo, allowed_domains: allowed); |
| 513 | |
| 514 | initial_bytes_moved = atomic64_read(v: &rdev->num_bytes_moved); |
| 515 | r = ttm_bo_validate(bo: &bo->tbo, placement: &bo->placement, ctx: &ctx); |
| 516 | bytes_moved += atomic64_read(v: &rdev->num_bytes_moved) - |
| 517 | initial_bytes_moved; |
| 518 | |
| 519 | if (unlikely(r)) { |
| 520 | if (r != -ERESTARTSYS && |
| 521 | domain != lobj->allowed_domains) { |
| 522 | domain = lobj->allowed_domains; |
| 523 | goto retry; |
| 524 | } |
| 525 | return r; |
| 526 | } |
| 527 | } |
| 528 | lobj->gpu_offset = radeon_bo_gpu_offset(bo); |
| 529 | lobj->tiling_flags = bo->tiling_flags; |
| 530 | } |
| 531 | |
| 532 | return 0; |
| 533 | } |
| 534 | |
| 535 | int radeon_bo_get_surface_reg(struct radeon_bo *bo) |
| 536 | { |
| 537 | struct radeon_device *rdev = bo->rdev; |
| 538 | struct radeon_surface_reg *reg; |
| 539 | struct radeon_bo *old_object; |
| 540 | int steal; |
| 541 | int i; |
| 542 | |
| 543 | dma_resv_assert_held(bo->tbo.base.resv); |
| 544 | |
| 545 | if (!bo->tiling_flags) |
| 546 | return 0; |
| 547 | |
| 548 | if (bo->surface_reg >= 0) { |
| 549 | i = bo->surface_reg; |
| 550 | goto out; |
| 551 | } |
| 552 | |
| 553 | steal = -1; |
| 554 | for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { |
| 555 | |
| 556 | reg = &rdev->surface_regs[i]; |
| 557 | if (!reg->bo) |
| 558 | break; |
| 559 | |
| 560 | old_object = reg->bo; |
| 561 | if (old_object->tbo.pin_count == 0) |
| 562 | steal = i; |
| 563 | } |
| 564 | |
| 565 | /* if we are all out */ |
| 566 | if (i == RADEON_GEM_MAX_SURFACES) { |
| 567 | if (steal == -1) |
| 568 | return -ENOMEM; |
| 569 | /* find someone with a surface reg and nuke their BO */ |
| 570 | reg = &rdev->surface_regs[steal]; |
| 571 | old_object = reg->bo; |
| 572 | /* blow away the mapping */ |
| 573 | DRM_DEBUG("stealing surface reg %d from %p\n" , steal, old_object); |
| 574 | ttm_bo_unmap_virtual(bo: &old_object->tbo); |
| 575 | old_object->surface_reg = -1; |
| 576 | i = steal; |
| 577 | } |
| 578 | |
| 579 | bo->surface_reg = i; |
| 580 | reg->bo = bo; |
| 581 | |
| 582 | out: |
| 583 | radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch, |
| 584 | bo->tbo.resource->start << PAGE_SHIFT, |
| 585 | bo->tbo.base.size); |
| 586 | return 0; |
| 587 | } |
| 588 | |
| 589 | static void radeon_bo_clear_surface_reg(struct radeon_bo *bo) |
| 590 | { |
| 591 | struct radeon_device *rdev = bo->rdev; |
| 592 | struct radeon_surface_reg *reg; |
| 593 | |
| 594 | if (bo->surface_reg == -1) |
| 595 | return; |
| 596 | |
| 597 | reg = &rdev->surface_regs[bo->surface_reg]; |
| 598 | radeon_clear_surface_reg(rdev, bo->surface_reg); |
| 599 | |
| 600 | reg->bo = NULL; |
| 601 | bo->surface_reg = -1; |
| 602 | } |
| 603 | |
| 604 | int radeon_bo_set_tiling_flags(struct radeon_bo *bo, |
| 605 | uint32_t tiling_flags, uint32_t pitch) |
| 606 | { |
| 607 | struct radeon_device *rdev = bo->rdev; |
| 608 | int r; |
| 609 | |
| 610 | if (rdev->family >= CHIP_CEDAR) { |
| 611 | unsigned bankw, bankh, mtaspect, tilesplit, stilesplit; |
| 612 | |
| 613 | bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK; |
| 614 | bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK; |
| 615 | mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK; |
| 616 | tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK; |
| 617 | stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK; |
| 618 | switch (bankw) { |
| 619 | case 0: |
| 620 | case 1: |
| 621 | case 2: |
| 622 | case 4: |
| 623 | case 8: |
| 624 | break; |
| 625 | default: |
| 626 | return -EINVAL; |
| 627 | } |
| 628 | switch (bankh) { |
| 629 | case 0: |
| 630 | case 1: |
| 631 | case 2: |
| 632 | case 4: |
| 633 | case 8: |
| 634 | break; |
| 635 | default: |
| 636 | return -EINVAL; |
| 637 | } |
| 638 | switch (mtaspect) { |
| 639 | case 0: |
| 640 | case 1: |
| 641 | case 2: |
| 642 | case 4: |
| 643 | case 8: |
| 644 | break; |
| 645 | default: |
| 646 | return -EINVAL; |
| 647 | } |
| 648 | if (tilesplit > 6) { |
| 649 | return -EINVAL; |
| 650 | } |
| 651 | if (stilesplit > 6) { |
| 652 | return -EINVAL; |
| 653 | } |
| 654 | } |
| 655 | r = radeon_bo_reserve(bo, no_intr: false); |
| 656 | if (unlikely(r != 0)) |
| 657 | return r; |
| 658 | bo->tiling_flags = tiling_flags; |
| 659 | bo->pitch = pitch; |
| 660 | radeon_bo_unreserve(bo); |
| 661 | return 0; |
| 662 | } |
| 663 | |
| 664 | void radeon_bo_get_tiling_flags(struct radeon_bo *bo, |
| 665 | uint32_t *tiling_flags, |
| 666 | uint32_t *pitch) |
| 667 | { |
| 668 | dma_resv_assert_held(bo->tbo.base.resv); |
| 669 | |
| 670 | if (tiling_flags) |
| 671 | *tiling_flags = bo->tiling_flags; |
| 672 | if (pitch) |
| 673 | *pitch = bo->pitch; |
| 674 | } |
| 675 | |
| 676 | int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, |
| 677 | bool force_drop) |
| 678 | { |
| 679 | if (!force_drop) |
| 680 | dma_resv_assert_held(bo->tbo.base.resv); |
| 681 | |
| 682 | if (!(bo->tiling_flags & RADEON_TILING_SURFACE)) |
| 683 | return 0; |
| 684 | |
| 685 | if (force_drop) { |
| 686 | radeon_bo_clear_surface_reg(bo); |
| 687 | return 0; |
| 688 | } |
| 689 | |
| 690 | if (bo->tbo.resource->mem_type != TTM_PL_VRAM) { |
| 691 | if (!has_moved) |
| 692 | return 0; |
| 693 | |
| 694 | if (bo->surface_reg >= 0) |
| 695 | radeon_bo_clear_surface_reg(bo); |
| 696 | return 0; |
| 697 | } |
| 698 | |
| 699 | if ((bo->surface_reg >= 0) && !has_moved) |
| 700 | return 0; |
| 701 | |
| 702 | return radeon_bo_get_surface_reg(bo); |
| 703 | } |
| 704 | |
| 705 | void radeon_bo_move_notify(struct ttm_buffer_object *bo) |
| 706 | { |
| 707 | struct radeon_bo *rbo; |
| 708 | |
| 709 | if (!radeon_ttm_bo_is_radeon_bo(bo)) |
| 710 | return; |
| 711 | |
| 712 | rbo = container_of(bo, struct radeon_bo, tbo); |
| 713 | radeon_bo_check_tiling(bo: rbo, has_moved: 0, force_drop: 1); |
| 714 | radeon_vm_bo_invalidate(rdev: rbo->rdev, bo: rbo); |
| 715 | } |
| 716 | |
| 717 | vm_fault_t radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) |
| 718 | { |
| 719 | struct ttm_operation_ctx ctx = { false, false }; |
| 720 | struct radeon_device *rdev; |
| 721 | struct radeon_bo *rbo; |
| 722 | unsigned long offset, size, lpfn; |
| 723 | int i, r; |
| 724 | |
| 725 | if (!radeon_ttm_bo_is_radeon_bo(bo)) |
| 726 | return 0; |
| 727 | rbo = container_of(bo, struct radeon_bo, tbo); |
| 728 | radeon_bo_check_tiling(bo: rbo, has_moved: 0, force_drop: 0); |
| 729 | rdev = rbo->rdev; |
| 730 | if (bo->resource->mem_type != TTM_PL_VRAM) |
| 731 | return 0; |
| 732 | |
| 733 | size = bo->resource->size; |
| 734 | offset = bo->resource->start << PAGE_SHIFT; |
| 735 | if ((offset + size) <= rdev->mc.visible_vram_size) |
| 736 | return 0; |
| 737 | |
| 738 | /* Can't move a pinned BO to visible VRAM */ |
| 739 | if (rbo->tbo.pin_count > 0) |
| 740 | return VM_FAULT_SIGBUS; |
| 741 | |
| 742 | /* hurrah the memory is not visible ! */ |
| 743 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); |
| 744 | lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; |
| 745 | for (i = 0; i < rbo->placement.num_placement; i++) { |
| 746 | /* Force into visible VRAM */ |
| 747 | if ((rbo->placements[i].mem_type == TTM_PL_VRAM) && |
| 748 | (!rbo->placements[i].lpfn || rbo->placements[i].lpfn > lpfn)) |
| 749 | rbo->placements[i].lpfn = lpfn; |
| 750 | } |
| 751 | r = ttm_bo_validate(bo, placement: &rbo->placement, ctx: &ctx); |
| 752 | if (unlikely(r == -ENOMEM)) { |
| 753 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); |
| 754 | r = ttm_bo_validate(bo, placement: &rbo->placement, ctx: &ctx); |
| 755 | } else if (likely(!r)) { |
| 756 | offset = bo->resource->start << PAGE_SHIFT; |
| 757 | /* this should never happen */ |
| 758 | if ((offset + size) > rdev->mc.visible_vram_size) |
| 759 | return VM_FAULT_SIGBUS; |
| 760 | } |
| 761 | |
| 762 | if (unlikely(r == -EBUSY || r == -ERESTARTSYS)) |
| 763 | return VM_FAULT_NOPAGE; |
| 764 | else if (unlikely(r)) |
| 765 | return VM_FAULT_SIGBUS; |
| 766 | |
| 767 | ttm_bo_move_to_lru_tail_unlocked(bo); |
| 768 | return 0; |
| 769 | } |
| 770 | |
| 771 | /** |
| 772 | * radeon_bo_fence - add fence to buffer object |
| 773 | * |
| 774 | * @bo: buffer object in question |
| 775 | * @fence: fence to add |
| 776 | * @shared: true if fence should be added shared |
| 777 | * |
| 778 | */ |
| 779 | void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence, |
| 780 | bool shared) |
| 781 | { |
| 782 | struct dma_resv *resv = bo->tbo.base.resv; |
| 783 | int r; |
| 784 | |
| 785 | r = dma_resv_reserve_fences(obj: resv, num_fences: 1); |
| 786 | if (r) { |
| 787 | /* As last resort on OOM we block for the fence */ |
| 788 | dma_fence_wait(fence: &fence->base, intr: false); |
| 789 | return; |
| 790 | } |
| 791 | |
| 792 | dma_resv_add_fence(obj: resv, fence: &fence->base, usage: shared ? |
| 793 | DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE); |
| 794 | } |
| 795 | |