| 1 | /* |
| 2 | * Copyright 2008 Jerome Glisse. |
| 3 | * All Rights Reserved. |
| 4 | * |
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 6 | * copy of this software and associated documentation files (the "Software"), |
| 7 | * to deal in the Software without restriction, including without limitation |
| 8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 9 | * and/or sell copies of the Software, and to permit persons to whom the |
| 10 | * Software is furnished to do so, subject to the following conditions: |
| 11 | * |
| 12 | * The above copyright notice and this permission notice (including the next |
| 13 | * paragraph) shall be included in all copies or substantial portions of the |
| 14 | * Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 19 | * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
| 22 | * DEALINGS IN THE SOFTWARE. |
| 23 | * |
| 24 | * Authors: |
| 25 | * Jerome Glisse <glisse@freedesktop.org> |
| 26 | */ |
| 27 | |
| 28 | #include <linux/list_sort.h> |
| 29 | #include <linux/pci.h> |
| 30 | #include <linux/uaccess.h> |
| 31 | |
| 32 | #include <drm/drm_device.h> |
| 33 | #include <drm/drm_file.h> |
| 34 | #include <drm/radeon_drm.h> |
| 35 | |
| 36 | #include "radeon.h" |
| 37 | #include "radeon_reg.h" |
| 38 | #include "radeon_trace.h" |
| 39 | |
| 40 | #define RADEON_CS_MAX_PRIORITY 32u |
| 41 | #define RADEON_CS_NUM_BUCKETS (RADEON_CS_MAX_PRIORITY + 1) |
| 42 | |
| 43 | /* This is based on the bucket sort with O(n) time complexity. |
| 44 | * An item with priority "i" is added to bucket[i]. The lists are then |
| 45 | * concatenated in descending order. |
| 46 | */ |
| 47 | struct radeon_cs_buckets { |
| 48 | struct list_head bucket[RADEON_CS_NUM_BUCKETS]; |
| 49 | }; |
| 50 | |
| 51 | static void radeon_cs_buckets_init(struct radeon_cs_buckets *b) |
| 52 | { |
| 53 | unsigned i; |
| 54 | |
| 55 | for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++) |
| 56 | INIT_LIST_HEAD(list: &b->bucket[i]); |
| 57 | } |
| 58 | |
| 59 | static void radeon_cs_buckets_add(struct radeon_cs_buckets *b, |
| 60 | struct list_head *item, unsigned priority) |
| 61 | { |
| 62 | /* Since buffers which appear sooner in the relocation list are |
| 63 | * likely to be used more often than buffers which appear later |
| 64 | * in the list, the sort mustn't change the ordering of buffers |
| 65 | * with the same priority, i.e. it must be stable. |
| 66 | */ |
| 67 | list_add_tail(new: item, head: &b->bucket[min(priority, RADEON_CS_MAX_PRIORITY)]); |
| 68 | } |
| 69 | |
| 70 | static void radeon_cs_buckets_get_list(struct radeon_cs_buckets *b, |
| 71 | struct list_head *out_list) |
| 72 | { |
| 73 | unsigned i; |
| 74 | |
| 75 | /* Connect the sorted buckets in the output list. */ |
| 76 | for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++) { |
| 77 | list_splice(list: &b->bucket[i], head: out_list); |
| 78 | } |
| 79 | } |
| 80 | |
| 81 | static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) |
| 82 | { |
| 83 | struct radeon_cs_chunk *chunk; |
| 84 | struct radeon_cs_buckets buckets; |
| 85 | unsigned i; |
| 86 | bool need_mmap_lock = false; |
| 87 | int r; |
| 88 | |
| 89 | if (p->chunk_relocs == NULL) { |
| 90 | return 0; |
| 91 | } |
| 92 | chunk = p->chunk_relocs; |
| 93 | p->dma_reloc_idx = 0; |
| 94 | /* FIXME: we assume that each relocs use 4 dwords */ |
| 95 | p->nrelocs = chunk->length_dw / 4; |
| 96 | p->relocs = kvcalloc(p->nrelocs, sizeof(struct radeon_bo_list), |
| 97 | GFP_KERNEL); |
| 98 | if (p->relocs == NULL) { |
| 99 | return -ENOMEM; |
| 100 | } |
| 101 | |
| 102 | radeon_cs_buckets_init(b: &buckets); |
| 103 | |
| 104 | for (i = 0; i < p->nrelocs; i++) { |
| 105 | struct drm_radeon_cs_reloc *r; |
| 106 | struct drm_gem_object *gobj; |
| 107 | unsigned priority; |
| 108 | |
| 109 | r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4]; |
| 110 | gobj = drm_gem_object_lookup(filp: p->filp, handle: r->handle); |
| 111 | if (gobj == NULL) { |
| 112 | DRM_ERROR("gem object lookup failed 0x%x\n" , |
| 113 | r->handle); |
| 114 | return -ENOENT; |
| 115 | } |
| 116 | p->relocs[i].robj = gem_to_radeon_bo(gobj); |
| 117 | |
| 118 | /* The userspace buffer priorities are from 0 to 15. A higher |
| 119 | * number means the buffer is more important. |
| 120 | * Also, the buffers used for write have a higher priority than |
| 121 | * the buffers used for read only, which doubles the range |
| 122 | * to 0 to 31. 32 is reserved for the kernel driver. |
| 123 | */ |
| 124 | priority = (r->flags & RADEON_RELOC_PRIO_MASK) * 2 |
| 125 | + !!r->write_domain; |
| 126 | |
| 127 | /* The first reloc of an UVD job is the msg and that must be in |
| 128 | * VRAM, the second reloc is the DPB and for WMV that must be in |
| 129 | * VRAM as well. Also put everything into VRAM on AGP cards and older |
| 130 | * IGP chips to avoid image corruptions |
| 131 | */ |
| 132 | if (p->ring == R600_RING_TYPE_UVD_INDEX && |
| 133 | (i <= 0 || pci_find_capability(dev: p->rdev->pdev, PCI_CAP_ID_AGP) || |
| 134 | p->rdev->family == CHIP_RS780 || |
| 135 | p->rdev->family == CHIP_RS880)) { |
| 136 | |
| 137 | /* TODO: is this still needed for NI+ ? */ |
| 138 | p->relocs[i].preferred_domains = |
| 139 | RADEON_GEM_DOMAIN_VRAM; |
| 140 | |
| 141 | p->relocs[i].allowed_domains = |
| 142 | RADEON_GEM_DOMAIN_VRAM; |
| 143 | |
| 144 | /* prioritize this over any other relocation */ |
| 145 | priority = RADEON_CS_MAX_PRIORITY; |
| 146 | } else { |
| 147 | uint32_t domain = r->write_domain ? |
| 148 | r->write_domain : r->read_domains; |
| 149 | |
| 150 | if (domain & RADEON_GEM_DOMAIN_CPU) { |
| 151 | DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid " |
| 152 | "for command submission\n" ); |
| 153 | return -EINVAL; |
| 154 | } |
| 155 | |
| 156 | p->relocs[i].preferred_domains = domain; |
| 157 | if (domain == RADEON_GEM_DOMAIN_VRAM) |
| 158 | domain |= RADEON_GEM_DOMAIN_GTT; |
| 159 | p->relocs[i].allowed_domains = domain; |
| 160 | } |
| 161 | |
| 162 | if (radeon_ttm_tt_has_userptr(rdev: p->rdev, ttm: p->relocs[i].robj->tbo.ttm)) { |
| 163 | uint32_t domain = p->relocs[i].preferred_domains; |
| 164 | if (!(domain & RADEON_GEM_DOMAIN_GTT)) { |
| 165 | DRM_ERROR("Only RADEON_GEM_DOMAIN_GTT is " |
| 166 | "allowed for userptr BOs\n" ); |
| 167 | return -EINVAL; |
| 168 | } |
| 169 | need_mmap_lock = true; |
| 170 | domain = RADEON_GEM_DOMAIN_GTT; |
| 171 | p->relocs[i].preferred_domains = domain; |
| 172 | p->relocs[i].allowed_domains = domain; |
| 173 | } |
| 174 | |
| 175 | /* Objects shared as dma-bufs cannot be moved to VRAM */ |
| 176 | if (p->relocs[i].robj->prime_shared_count) { |
| 177 | p->relocs[i].allowed_domains &= ~RADEON_GEM_DOMAIN_VRAM; |
| 178 | if (!p->relocs[i].allowed_domains) { |
| 179 | DRM_ERROR("BO associated with dma-buf cannot " |
| 180 | "be moved to VRAM\n" ); |
| 181 | return -EINVAL; |
| 182 | } |
| 183 | } |
| 184 | |
| 185 | p->relocs[i].shared = !r->write_domain; |
| 186 | radeon_cs_buckets_add(b: &buckets, item: &p->relocs[i].list, priority); |
| 187 | } |
| 188 | |
| 189 | radeon_cs_buckets_get_list(b: &buckets, out_list: &p->validated); |
| 190 | |
| 191 | if (p->cs_flags & RADEON_CS_USE_VM) |
| 192 | p->vm_bos = radeon_vm_get_bos(rdev: p->rdev, vm: p->ib.vm, |
| 193 | head: &p->validated); |
| 194 | if (need_mmap_lock) |
| 195 | mmap_read_lock(current->mm); |
| 196 | |
| 197 | r = radeon_bo_list_validate(rdev: p->rdev, exec: &p->exec, head: &p->validated, ring: p->ring); |
| 198 | |
| 199 | if (need_mmap_lock) |
| 200 | mmap_read_unlock(current->mm); |
| 201 | |
| 202 | return r; |
| 203 | } |
| 204 | |
| 205 | static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority) |
| 206 | { |
| 207 | p->priority = priority; |
| 208 | |
| 209 | switch (ring) { |
| 210 | default: |
| 211 | DRM_ERROR("unknown ring id: %d\n" , ring); |
| 212 | return -EINVAL; |
| 213 | case RADEON_CS_RING_GFX: |
| 214 | p->ring = RADEON_RING_TYPE_GFX_INDEX; |
| 215 | break; |
| 216 | case RADEON_CS_RING_COMPUTE: |
| 217 | if (p->rdev->family >= CHIP_TAHITI) { |
| 218 | if (p->priority > 0) |
| 219 | p->ring = CAYMAN_RING_TYPE_CP1_INDEX; |
| 220 | else |
| 221 | p->ring = CAYMAN_RING_TYPE_CP2_INDEX; |
| 222 | } else |
| 223 | p->ring = RADEON_RING_TYPE_GFX_INDEX; |
| 224 | break; |
| 225 | case RADEON_CS_RING_DMA: |
| 226 | if (p->rdev->family >= CHIP_CAYMAN) { |
| 227 | if (p->priority > 0) |
| 228 | p->ring = R600_RING_TYPE_DMA_INDEX; |
| 229 | else |
| 230 | p->ring = CAYMAN_RING_TYPE_DMA1_INDEX; |
| 231 | } else if (p->rdev->family >= CHIP_RV770) { |
| 232 | p->ring = R600_RING_TYPE_DMA_INDEX; |
| 233 | } else { |
| 234 | return -EINVAL; |
| 235 | } |
| 236 | break; |
| 237 | case RADEON_CS_RING_UVD: |
| 238 | p->ring = R600_RING_TYPE_UVD_INDEX; |
| 239 | break; |
| 240 | case RADEON_CS_RING_VCE: |
| 241 | /* TODO: only use the low priority ring for now */ |
| 242 | p->ring = TN_RING_TYPE_VCE1_INDEX; |
| 243 | break; |
| 244 | } |
| 245 | return 0; |
| 246 | } |
| 247 | |
| 248 | static int radeon_cs_sync_rings(struct radeon_cs_parser *p) |
| 249 | { |
| 250 | struct radeon_bo_list *reloc; |
| 251 | int r; |
| 252 | |
| 253 | list_for_each_entry(reloc, &p->validated, list) { |
| 254 | struct dma_resv *resv; |
| 255 | |
| 256 | resv = reloc->robj->tbo.base.resv; |
| 257 | r = radeon_sync_resv(rdev: p->rdev, sync: &p->ib.sync, resv, shared: reloc->shared); |
| 258 | if (r) |
| 259 | return r; |
| 260 | } |
| 261 | return 0; |
| 262 | } |
| 263 | |
| 264 | /* XXX: note that this is called from the legacy UMS CS ioctl as well */ |
| 265 | int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) |
| 266 | { |
| 267 | struct drm_radeon_cs *cs = data; |
| 268 | uint64_t *chunk_array_ptr; |
| 269 | u64 size; |
| 270 | unsigned i; |
| 271 | u32 ring = RADEON_CS_RING_GFX; |
| 272 | s32 priority = 0; |
| 273 | |
| 274 | INIT_LIST_HEAD(list: &p->validated); |
| 275 | drm_exec_init(exec: &p->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, nr: 0); |
| 276 | |
| 277 | if (!cs->num_chunks) { |
| 278 | return 0; |
| 279 | } |
| 280 | |
| 281 | /* get chunks */ |
| 282 | p->idx = 0; |
| 283 | p->ib.sa_bo = NULL; |
| 284 | p->const_ib.sa_bo = NULL; |
| 285 | p->chunk_ib = NULL; |
| 286 | p->chunk_relocs = NULL; |
| 287 | p->chunk_flags = NULL; |
| 288 | p->chunk_const_ib = NULL; |
| 289 | p->chunks_array = kvmalloc_array(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL); |
| 290 | if (p->chunks_array == NULL) { |
| 291 | return -ENOMEM; |
| 292 | } |
| 293 | chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks); |
| 294 | if (copy_from_user(to: p->chunks_array, from: chunk_array_ptr, |
| 295 | n: sizeof(uint64_t)*cs->num_chunks)) { |
| 296 | return -EFAULT; |
| 297 | } |
| 298 | p->cs_flags = 0; |
| 299 | p->nchunks = cs->num_chunks; |
| 300 | p->chunks = kvcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL); |
| 301 | if (p->chunks == NULL) { |
| 302 | return -ENOMEM; |
| 303 | } |
| 304 | for (i = 0; i < p->nchunks; i++) { |
| 305 | struct drm_radeon_cs_chunk __user **chunk_ptr = NULL; |
| 306 | struct drm_radeon_cs_chunk user_chunk; |
| 307 | uint32_t __user *cdata; |
| 308 | |
| 309 | chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i]; |
| 310 | if (copy_from_user(to: &user_chunk, from: chunk_ptr, |
| 311 | n: sizeof(struct drm_radeon_cs_chunk))) { |
| 312 | return -EFAULT; |
| 313 | } |
| 314 | p->chunks[i].length_dw = user_chunk.length_dw; |
| 315 | if (user_chunk.chunk_id == RADEON_CHUNK_ID_RELOCS) { |
| 316 | p->chunk_relocs = &p->chunks[i]; |
| 317 | } |
| 318 | if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) { |
| 319 | p->chunk_ib = &p->chunks[i]; |
| 320 | /* zero length IB isn't useful */ |
| 321 | if (p->chunks[i].length_dw == 0) |
| 322 | return -EINVAL; |
| 323 | } |
| 324 | if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB) { |
| 325 | p->chunk_const_ib = &p->chunks[i]; |
| 326 | /* zero length CONST IB isn't useful */ |
| 327 | if (p->chunks[i].length_dw == 0) |
| 328 | return -EINVAL; |
| 329 | } |
| 330 | if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) { |
| 331 | p->chunk_flags = &p->chunks[i]; |
| 332 | /* zero length flags aren't useful */ |
| 333 | if (p->chunks[i].length_dw == 0) |
| 334 | return -EINVAL; |
| 335 | } |
| 336 | |
| 337 | size = p->chunks[i].length_dw; |
| 338 | cdata = (void __user *)(unsigned long)user_chunk.chunk_data; |
| 339 | p->chunks[i].user_ptr = cdata; |
| 340 | if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB) |
| 341 | continue; |
| 342 | |
| 343 | if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) { |
| 344 | if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP)) |
| 345 | continue; |
| 346 | } |
| 347 | |
| 348 | p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL); |
| 349 | size *= sizeof(uint32_t); |
| 350 | if (p->chunks[i].kdata == NULL) { |
| 351 | return -ENOMEM; |
| 352 | } |
| 353 | if (copy_from_user(to: p->chunks[i].kdata, from: cdata, n: size)) { |
| 354 | return -EFAULT; |
| 355 | } |
| 356 | if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) { |
| 357 | p->cs_flags = p->chunks[i].kdata[0]; |
| 358 | if (p->chunks[i].length_dw > 1) |
| 359 | ring = p->chunks[i].kdata[1]; |
| 360 | if (p->chunks[i].length_dw > 2) |
| 361 | priority = (s32)p->chunks[i].kdata[2]; |
| 362 | } |
| 363 | } |
| 364 | |
| 365 | /* these are KMS only */ |
| 366 | if (p->rdev) { |
| 367 | if ((p->cs_flags & RADEON_CS_USE_VM) && |
| 368 | !p->rdev->vm_manager.enabled) { |
| 369 | DRM_ERROR("VM not active on asic!\n" ); |
| 370 | return -EINVAL; |
| 371 | } |
| 372 | |
| 373 | if (radeon_cs_get_ring(p, ring, priority)) |
| 374 | return -EINVAL; |
| 375 | |
| 376 | /* we only support VM on some SI+ rings */ |
| 377 | if ((p->cs_flags & RADEON_CS_USE_VM) == 0) { |
| 378 | if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) { |
| 379 | DRM_ERROR("Ring %d requires VM!\n" , p->ring); |
| 380 | return -EINVAL; |
| 381 | } |
| 382 | } else { |
| 383 | if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) { |
| 384 | DRM_ERROR("VM not supported on ring %d!\n" , |
| 385 | p->ring); |
| 386 | return -EINVAL; |
| 387 | } |
| 388 | } |
| 389 | } |
| 390 | |
| 391 | return 0; |
| 392 | } |
| 393 | |
| 394 | static int cmp_size_smaller_first(void *priv, const struct list_head *a, |
| 395 | const struct list_head *b) |
| 396 | { |
| 397 | struct radeon_bo_list *la = list_entry(a, struct radeon_bo_list, list); |
| 398 | struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, list); |
| 399 | |
| 400 | /* Sort A before B if A is smaller. */ |
| 401 | if (la->robj->tbo.base.size > lb->robj->tbo.base.size) |
| 402 | return 1; |
| 403 | if (la->robj->tbo.base.size < lb->robj->tbo.base.size) |
| 404 | return -1; |
| 405 | return 0; |
| 406 | } |
| 407 | |
| 408 | /** |
| 409 | * radeon_cs_parser_fini() - clean parser states |
| 410 | * @parser: parser structure holding parsing context. |
| 411 | * @error: error number |
| 412 | * |
| 413 | * If error is set than unvalidate buffer, otherwise just free memory |
| 414 | * used by parsing context. |
| 415 | **/ |
| 416 | static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error) |
| 417 | { |
| 418 | unsigned i; |
| 419 | |
| 420 | if (!error) { |
| 421 | struct radeon_bo_list *reloc; |
| 422 | |
| 423 | /* Sort the buffer list from the smallest to largest buffer, |
| 424 | * which affects the order of buffers in the LRU list. |
| 425 | * This assures that the smallest buffers are added first |
| 426 | * to the LRU list, so they are likely to be later evicted |
| 427 | * first, instead of large buffers whose eviction is more |
| 428 | * expensive. |
| 429 | * |
| 430 | * This slightly lowers the number of bytes moved by TTM |
| 431 | * per frame under memory pressure. |
| 432 | */ |
| 433 | list_sort(NULL, head: &parser->validated, cmp: cmp_size_smaller_first); |
| 434 | list_for_each_entry(reloc, &parser->validated, list) { |
| 435 | dma_resv_add_fence(obj: reloc->robj->tbo.base.resv, |
| 436 | fence: &parser->ib.fence->base, |
| 437 | usage: reloc->shared ? |
| 438 | DMA_RESV_USAGE_READ : |
| 439 | DMA_RESV_USAGE_WRITE); |
| 440 | } |
| 441 | } |
| 442 | |
| 443 | drm_exec_fini(exec: &parser->exec); |
| 444 | |
| 445 | if (parser->relocs != NULL) { |
| 446 | for (i = 0; i < parser->nrelocs; i++) { |
| 447 | struct radeon_bo *bo = parser->relocs[i].robj; |
| 448 | if (bo == NULL) |
| 449 | continue; |
| 450 | |
| 451 | drm_gem_object_put(obj: &bo->tbo.base); |
| 452 | } |
| 453 | } |
| 454 | kfree(objp: parser->track); |
| 455 | kvfree(addr: parser->relocs); |
| 456 | kvfree(addr: parser->vm_bos); |
| 457 | for (i = 0; i < parser->nchunks; i++) |
| 458 | kvfree(addr: parser->chunks[i].kdata); |
| 459 | kvfree(addr: parser->chunks); |
| 460 | kvfree(addr: parser->chunks_array); |
| 461 | radeon_ib_free(rdev: parser->rdev, ib: &parser->ib); |
| 462 | radeon_ib_free(rdev: parser->rdev, ib: &parser->const_ib); |
| 463 | } |
| 464 | |
| 465 | static int radeon_cs_ib_chunk(struct radeon_device *rdev, |
| 466 | struct radeon_cs_parser *parser) |
| 467 | { |
| 468 | int r; |
| 469 | |
| 470 | if (parser->chunk_ib == NULL) |
| 471 | return 0; |
| 472 | |
| 473 | if (parser->cs_flags & RADEON_CS_USE_VM) |
| 474 | return 0; |
| 475 | |
| 476 | r = radeon_cs_parse(rdev, parser->ring, parser); |
| 477 | if (r || parser->parser_error) { |
| 478 | DRM_ERROR("Invalid command stream !\n" ); |
| 479 | return r; |
| 480 | } |
| 481 | |
| 482 | r = radeon_cs_sync_rings(p: parser); |
| 483 | if (r) { |
| 484 | if (r != -ERESTARTSYS) |
| 485 | DRM_ERROR("Failed to sync rings: %i\n" , r); |
| 486 | return r; |
| 487 | } |
| 488 | |
| 489 | if (parser->ring == R600_RING_TYPE_UVD_INDEX) |
| 490 | radeon_uvd_note_usage(rdev); |
| 491 | else if ((parser->ring == TN_RING_TYPE_VCE1_INDEX) || |
| 492 | (parser->ring == TN_RING_TYPE_VCE2_INDEX)) |
| 493 | radeon_vce_note_usage(rdev); |
| 494 | |
| 495 | r = radeon_ib_schedule(rdev, ib: &parser->ib, NULL, hdp_flush: true); |
| 496 | if (r) { |
| 497 | DRM_ERROR("Failed to schedule IB !\n" ); |
| 498 | } |
| 499 | return r; |
| 500 | } |
| 501 | |
| 502 | static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p, |
| 503 | struct radeon_vm *vm) |
| 504 | { |
| 505 | struct radeon_device *rdev = p->rdev; |
| 506 | struct radeon_bo_va *bo_va; |
| 507 | int i, r; |
| 508 | |
| 509 | r = radeon_vm_update_page_directory(rdev, vm); |
| 510 | if (r) |
| 511 | return r; |
| 512 | |
| 513 | r = radeon_vm_clear_freed(rdev, vm); |
| 514 | if (r) |
| 515 | return r; |
| 516 | |
| 517 | if (vm->ib_bo_va == NULL) { |
| 518 | DRM_ERROR("Tmp BO not in VM!\n" ); |
| 519 | return -EINVAL; |
| 520 | } |
| 521 | |
| 522 | r = radeon_vm_bo_update(rdev, bo_va: vm->ib_bo_va, |
| 523 | mem: rdev->ring_tmp_bo.bo->tbo.resource); |
| 524 | if (r) |
| 525 | return r; |
| 526 | |
| 527 | for (i = 0; i < p->nrelocs; i++) { |
| 528 | struct radeon_bo *bo; |
| 529 | |
| 530 | bo = p->relocs[i].robj; |
| 531 | bo_va = radeon_vm_bo_find(vm, bo); |
| 532 | if (bo_va == NULL) { |
| 533 | dev_err(rdev->dev, "bo %p not in vm %p\n" , bo, vm); |
| 534 | return -EINVAL; |
| 535 | } |
| 536 | |
| 537 | r = radeon_vm_bo_update(rdev, bo_va, mem: bo->tbo.resource); |
| 538 | if (r) |
| 539 | return r; |
| 540 | |
| 541 | radeon_sync_fence(sync: &p->ib.sync, fence: bo_va->last_pt_update); |
| 542 | |
| 543 | r = dma_resv_reserve_fences(obj: bo->tbo.base.resv, num_fences: 1); |
| 544 | if (r) |
| 545 | return r; |
| 546 | } |
| 547 | |
| 548 | return radeon_vm_clear_invalids(rdev, vm); |
| 549 | } |
| 550 | |
| 551 | static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, |
| 552 | struct radeon_cs_parser *parser) |
| 553 | { |
| 554 | struct radeon_fpriv *fpriv = parser->filp->driver_priv; |
| 555 | struct radeon_vm *vm = &fpriv->vm; |
| 556 | int r; |
| 557 | |
| 558 | if (parser->chunk_ib == NULL) |
| 559 | return 0; |
| 560 | if ((parser->cs_flags & RADEON_CS_USE_VM) == 0) |
| 561 | return 0; |
| 562 | |
| 563 | if (parser->const_ib.length_dw) { |
| 564 | r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib); |
| 565 | if (r) { |
| 566 | return r; |
| 567 | } |
| 568 | } |
| 569 | |
| 570 | r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib); |
| 571 | if (r) { |
| 572 | return r; |
| 573 | } |
| 574 | |
| 575 | if (parser->ring == R600_RING_TYPE_UVD_INDEX) |
| 576 | radeon_uvd_note_usage(rdev); |
| 577 | |
| 578 | mutex_lock(&vm->mutex); |
| 579 | r = radeon_bo_vm_update_pte(p: parser, vm); |
| 580 | if (r) { |
| 581 | goto out; |
| 582 | } |
| 583 | |
| 584 | r = radeon_cs_sync_rings(p: parser); |
| 585 | if (r) { |
| 586 | if (r != -ERESTARTSYS) |
| 587 | DRM_ERROR("Failed to sync rings: %i\n" , r); |
| 588 | goto out; |
| 589 | } |
| 590 | |
| 591 | if ((rdev->family >= CHIP_TAHITI) && |
| 592 | (parser->chunk_const_ib != NULL)) { |
| 593 | r = radeon_ib_schedule(rdev, ib: &parser->ib, const_ib: &parser->const_ib, hdp_flush: true); |
| 594 | } else { |
| 595 | r = radeon_ib_schedule(rdev, ib: &parser->ib, NULL, hdp_flush: true); |
| 596 | } |
| 597 | |
| 598 | out: |
| 599 | mutex_unlock(lock: &vm->mutex); |
| 600 | return r; |
| 601 | } |
| 602 | |
| 603 | static int radeon_cs_handle_lockup(struct radeon_device *rdev, int r) |
| 604 | { |
| 605 | if (r == -EDEADLK) { |
| 606 | r = radeon_gpu_reset(rdev); |
| 607 | if (!r) |
| 608 | r = -EAGAIN; |
| 609 | } |
| 610 | return r; |
| 611 | } |
| 612 | |
| 613 | static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser *parser) |
| 614 | { |
| 615 | struct radeon_cs_chunk *ib_chunk; |
| 616 | struct radeon_vm *vm = NULL; |
| 617 | int r; |
| 618 | |
| 619 | if (parser->chunk_ib == NULL) |
| 620 | return 0; |
| 621 | |
| 622 | if (parser->cs_flags & RADEON_CS_USE_VM) { |
| 623 | struct radeon_fpriv *fpriv = parser->filp->driver_priv; |
| 624 | vm = &fpriv->vm; |
| 625 | |
| 626 | if ((rdev->family >= CHIP_TAHITI) && |
| 627 | (parser->chunk_const_ib != NULL)) { |
| 628 | ib_chunk = parser->chunk_const_ib; |
| 629 | if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) { |
| 630 | DRM_ERROR("cs IB CONST too big: %d\n" , ib_chunk->length_dw); |
| 631 | return -EINVAL; |
| 632 | } |
| 633 | r = radeon_ib_get(rdev, ring: parser->ring, ib: &parser->const_ib, |
| 634 | vm, size: ib_chunk->length_dw * 4); |
| 635 | if (r) { |
| 636 | DRM_ERROR("Failed to get const ib !\n" ); |
| 637 | return r; |
| 638 | } |
| 639 | parser->const_ib.is_const_ib = true; |
| 640 | parser->const_ib.length_dw = ib_chunk->length_dw; |
| 641 | if (copy_from_user(to: parser->const_ib.ptr, |
| 642 | from: ib_chunk->user_ptr, |
| 643 | n: ib_chunk->length_dw * 4)) |
| 644 | return -EFAULT; |
| 645 | } |
| 646 | |
| 647 | ib_chunk = parser->chunk_ib; |
| 648 | if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) { |
| 649 | DRM_ERROR("cs IB too big: %d\n" , ib_chunk->length_dw); |
| 650 | return -EINVAL; |
| 651 | } |
| 652 | } |
| 653 | ib_chunk = parser->chunk_ib; |
| 654 | |
| 655 | r = radeon_ib_get(rdev, ring: parser->ring, ib: &parser->ib, |
| 656 | vm, size: ib_chunk->length_dw * 4); |
| 657 | if (r) { |
| 658 | DRM_ERROR("Failed to get ib !\n" ); |
| 659 | return r; |
| 660 | } |
| 661 | parser->ib.length_dw = ib_chunk->length_dw; |
| 662 | if (ib_chunk->kdata) |
| 663 | memcpy(parser->ib.ptr, ib_chunk->kdata, ib_chunk->length_dw * 4); |
| 664 | else if (copy_from_user(to: parser->ib.ptr, from: ib_chunk->user_ptr, n: ib_chunk->length_dw * 4)) |
| 665 | return -EFAULT; |
| 666 | return 0; |
| 667 | } |
| 668 | |
| 669 | int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) |
| 670 | { |
| 671 | struct radeon_device *rdev = dev->dev_private; |
| 672 | struct radeon_cs_parser parser; |
| 673 | int r; |
| 674 | |
| 675 | down_read(sem: &rdev->exclusive_lock); |
| 676 | if (!rdev->accel_working) { |
| 677 | up_read(sem: &rdev->exclusive_lock); |
| 678 | return -EBUSY; |
| 679 | } |
| 680 | if (rdev->in_reset) { |
| 681 | up_read(sem: &rdev->exclusive_lock); |
| 682 | r = radeon_gpu_reset(rdev); |
| 683 | if (!r) |
| 684 | r = -EAGAIN; |
| 685 | return r; |
| 686 | } |
| 687 | /* initialize parser */ |
| 688 | memset(&parser, 0, sizeof(struct radeon_cs_parser)); |
| 689 | parser.filp = filp; |
| 690 | parser.rdev = rdev; |
| 691 | parser.dev = rdev->dev; |
| 692 | parser.family = rdev->family; |
| 693 | r = radeon_cs_parser_init(p: &parser, data); |
| 694 | if (r) { |
| 695 | DRM_ERROR("Failed to initialize parser !\n" ); |
| 696 | radeon_cs_parser_fini(parser: &parser, error: r); |
| 697 | up_read(sem: &rdev->exclusive_lock); |
| 698 | r = radeon_cs_handle_lockup(rdev, r); |
| 699 | return r; |
| 700 | } |
| 701 | |
| 702 | r = radeon_cs_ib_fill(rdev, parser: &parser); |
| 703 | if (!r) { |
| 704 | r = radeon_cs_parser_relocs(p: &parser); |
| 705 | if (r && r != -ERESTARTSYS) |
| 706 | DRM_ERROR("Failed to parse relocation %d!\n" , r); |
| 707 | } |
| 708 | |
| 709 | if (r) { |
| 710 | radeon_cs_parser_fini(parser: &parser, error: r); |
| 711 | up_read(sem: &rdev->exclusive_lock); |
| 712 | r = radeon_cs_handle_lockup(rdev, r); |
| 713 | return r; |
| 714 | } |
| 715 | |
| 716 | trace_radeon_cs(p: &parser); |
| 717 | |
| 718 | r = radeon_cs_ib_chunk(rdev, parser: &parser); |
| 719 | if (r) { |
| 720 | goto out; |
| 721 | } |
| 722 | r = radeon_cs_ib_vm_chunk(rdev, parser: &parser); |
| 723 | if (r) { |
| 724 | goto out; |
| 725 | } |
| 726 | out: |
| 727 | radeon_cs_parser_fini(parser: &parser, error: r); |
| 728 | up_read(sem: &rdev->exclusive_lock); |
| 729 | r = radeon_cs_handle_lockup(rdev, r); |
| 730 | return r; |
| 731 | } |
| 732 | |
| 733 | /** |
| 734 | * radeon_cs_packet_parse() - parse cp packet and point ib index to next packet |
| 735 | * @p: parser structure holding parsing context. |
| 736 | * @pkt: where to store packet information |
| 737 | * @idx: packet index |
| 738 | * |
| 739 | * Assume that chunk_ib_index is properly set. Will return -EINVAL |
| 740 | * if packet is bigger than remaining ib size. or if packets is unknown. |
| 741 | **/ |
| 742 | int radeon_cs_packet_parse(struct radeon_cs_parser *p, |
| 743 | struct radeon_cs_packet *pkt, |
| 744 | unsigned idx) |
| 745 | { |
| 746 | struct radeon_cs_chunk *ib_chunk = p->chunk_ib; |
| 747 | struct radeon_device *rdev = p->rdev; |
| 748 | uint32_t ; |
| 749 | int ret = 0, i; |
| 750 | |
| 751 | if (idx >= ib_chunk->length_dw) { |
| 752 | DRM_ERROR("Can not parse packet at %d after CS end %d !\n" , |
| 753 | idx, ib_chunk->length_dw); |
| 754 | return -EINVAL; |
| 755 | } |
| 756 | header = radeon_get_ib_value(p, idx); |
| 757 | pkt->idx = idx; |
| 758 | pkt->type = RADEON_CP_PACKET_GET_TYPE(header); |
| 759 | pkt->count = RADEON_CP_PACKET_GET_COUNT(header); |
| 760 | pkt->one_reg_wr = 0; |
| 761 | switch (pkt->type) { |
| 762 | case RADEON_PACKET_TYPE0: |
| 763 | if (rdev->family < CHIP_R600) { |
| 764 | pkt->reg = R100_CP_PACKET0_GET_REG(header); |
| 765 | pkt->one_reg_wr = |
| 766 | RADEON_CP_PACKET0_GET_ONE_REG_WR(header); |
| 767 | } else |
| 768 | pkt->reg = R600_CP_PACKET0_GET_REG(header); |
| 769 | break; |
| 770 | case RADEON_PACKET_TYPE3: |
| 771 | pkt->opcode = RADEON_CP_PACKET3_GET_OPCODE(header); |
| 772 | break; |
| 773 | case RADEON_PACKET_TYPE2: |
| 774 | pkt->count = -1; |
| 775 | break; |
| 776 | default: |
| 777 | DRM_ERROR("Unknown packet type %d at %d !\n" , pkt->type, idx); |
| 778 | ret = -EINVAL; |
| 779 | goto dump_ib; |
| 780 | } |
| 781 | if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) { |
| 782 | DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n" , |
| 783 | pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw); |
| 784 | ret = -EINVAL; |
| 785 | goto dump_ib; |
| 786 | } |
| 787 | return 0; |
| 788 | |
| 789 | dump_ib: |
| 790 | for (i = 0; i < ib_chunk->length_dw; i++) { |
| 791 | if (i == idx) |
| 792 | printk("\t0x%08x <---\n" , radeon_get_ib_value(p, i)); |
| 793 | else |
| 794 | printk("\t0x%08x\n" , radeon_get_ib_value(p, i)); |
| 795 | } |
| 796 | return ret; |
| 797 | } |
| 798 | |
| 799 | /** |
| 800 | * radeon_cs_packet_next_is_pkt3_nop() - test if the next packet is P3 NOP |
| 801 | * @p: structure holding the parser context. |
| 802 | * |
| 803 | * Check if the next packet is NOP relocation packet3. |
| 804 | **/ |
| 805 | bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p) |
| 806 | { |
| 807 | struct radeon_cs_packet p3reloc; |
| 808 | int r; |
| 809 | |
| 810 | r = radeon_cs_packet_parse(p, pkt: &p3reloc, idx: p->idx); |
| 811 | if (r) |
| 812 | return false; |
| 813 | if (p3reloc.type != RADEON_PACKET_TYPE3) |
| 814 | return false; |
| 815 | if (p3reloc.opcode != RADEON_PACKET3_NOP) |
| 816 | return false; |
| 817 | return true; |
| 818 | } |
| 819 | |
| 820 | /** |
| 821 | * radeon_cs_dump_packet() - dump raw packet context |
| 822 | * @p: structure holding the parser context. |
| 823 | * @pkt: structure holding the packet. |
| 824 | * |
| 825 | * Used mostly for debugging and error reporting. |
| 826 | **/ |
| 827 | void radeon_cs_dump_packet(struct radeon_cs_parser *p, |
| 828 | struct radeon_cs_packet *pkt) |
| 829 | { |
| 830 | volatile uint32_t *ib; |
| 831 | unsigned i; |
| 832 | unsigned idx; |
| 833 | |
| 834 | ib = p->ib.ptr; |
| 835 | idx = pkt->idx; |
| 836 | for (i = 0; i <= (pkt->count + 1); i++, idx++) |
| 837 | dev_dbg(p->dev, "ib[%d]=0x%08X\n" , idx, ib[idx]); |
| 838 | } |
| 839 | |
| 840 | /** |
| 841 | * radeon_cs_packet_next_reloc() - parse next (should be reloc) packet |
| 842 | * @p: parser structure holding parsing context. |
| 843 | * @cs_reloc: reloc informations |
| 844 | * @nomm: no memory management for debugging |
| 845 | * |
| 846 | * Check if next packet is relocation packet3, do bo validation and compute |
| 847 | * GPU offset using the provided start. |
| 848 | **/ |
| 849 | int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p, |
| 850 | struct radeon_bo_list **cs_reloc, |
| 851 | int nomm) |
| 852 | { |
| 853 | struct radeon_cs_chunk *relocs_chunk; |
| 854 | struct radeon_cs_packet p3reloc; |
| 855 | unsigned idx; |
| 856 | int r; |
| 857 | |
| 858 | if (p->chunk_relocs == NULL) { |
| 859 | DRM_ERROR("No relocation chunk !\n" ); |
| 860 | return -EINVAL; |
| 861 | } |
| 862 | *cs_reloc = NULL; |
| 863 | relocs_chunk = p->chunk_relocs; |
| 864 | r = radeon_cs_packet_parse(p, pkt: &p3reloc, idx: p->idx); |
| 865 | if (r) |
| 866 | return r; |
| 867 | p->idx += p3reloc.count + 2; |
| 868 | if (p3reloc.type != RADEON_PACKET_TYPE3 || |
| 869 | p3reloc.opcode != RADEON_PACKET3_NOP) { |
| 870 | DRM_ERROR("No packet3 for relocation for packet at %d.\n" , |
| 871 | p3reloc.idx); |
| 872 | radeon_cs_dump_packet(p, pkt: &p3reloc); |
| 873 | return -EINVAL; |
| 874 | } |
| 875 | idx = radeon_get_ib_value(p, idx: p3reloc.idx + 1); |
| 876 | if (idx >= relocs_chunk->length_dw) { |
| 877 | DRM_ERROR("Relocs at %d after relocations chunk end %d !\n" , |
| 878 | idx, relocs_chunk->length_dw); |
| 879 | radeon_cs_dump_packet(p, pkt: &p3reloc); |
| 880 | return -EINVAL; |
| 881 | } |
| 882 | /* FIXME: we assume reloc size is 4 dwords */ |
| 883 | if (nomm) { |
| 884 | *cs_reloc = p->relocs; |
| 885 | (*cs_reloc)->gpu_offset = |
| 886 | (u64)relocs_chunk->kdata[idx + 3] << 32; |
| 887 | (*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0]; |
| 888 | } else |
| 889 | *cs_reloc = &p->relocs[(idx / 4)]; |
| 890 | return 0; |
| 891 | } |
| 892 | |