| 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
| 2 | /* |
| 3 | * Copyright (C) 2013 Red Hat |
| 4 | * Author: Rob Clark <robdclark@gmail.com> |
| 5 | */ |
| 6 | |
| 7 | #ifndef __MSM_GEM_H__ |
| 8 | #define __MSM_GEM_H__ |
| 9 | |
| 10 | #include "msm_mmu.h" |
| 11 | #include <linux/kref.h> |
| 12 | #include <linux/dma-resv.h> |
| 13 | #include "drm/drm_exec.h" |
| 14 | #include "drm/drm_gpuvm.h" |
| 15 | #include "drm/gpu_scheduler.h" |
| 16 | #include "msm_drv.h" |
| 17 | |
| 18 | /* Make all GEM related WARN_ON()s ratelimited.. when things go wrong they |
| 19 | * tend to go wrong 1000s of times in a short timespan. |
| 20 | */ |
| 21 | #define GEM_WARN_ON(x) WARN_RATELIMIT(x, "%s", __stringify(x)) |
| 22 | |
| 23 | /* Additional internal-use only BO flags: */ |
| 24 | #define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */ |
| 25 | #define MSM_BO_MAP_PRIV 0x20000000 /* use IOMMU_PRIV when mapping */ |
| 26 | |
| 27 | /** |
| 28 | * struct msm_gem_vm_log_entry - An entry in the VM log |
| 29 | * |
| 30 | * For userspace managed VMs, a log of recent VM updates is tracked and |
| 31 | * captured in GPU devcore dumps, to aid debugging issues caused by (for |
| 32 | * example) incorrectly synchronized VM updates |
| 33 | */ |
| 34 | struct msm_gem_vm_log_entry { |
| 35 | const char *op; |
| 36 | uint64_t iova; |
| 37 | uint64_t range; |
| 38 | int queue_id; |
| 39 | }; |
| 40 | |
| 41 | /** |
| 42 | * struct msm_gem_vm - VM object |
| 43 | * |
| 44 | * A VM object representing a GPU (or display or GMU or ...) virtual address |
| 45 | * space. |
| 46 | * |
| 47 | * In the case of GPU, if per-process address spaces are supported, the address |
| 48 | * space is split into two VMs, which map to TTBR0 and TTBR1 in the SMMU. TTBR0 |
| 49 | * is used for userspace objects, and is unique per msm_context/drm_file, while |
| 50 | * TTBR1 is the same for all processes. (The kernel controlled ringbuffer and |
| 51 | * a few other kernel controlled buffers live in TTBR1.) |
| 52 | * |
| 53 | * The GPU TTBR0 vm can be managed by userspace or by the kernel, depending on |
| 54 | * whether userspace supports VM_BIND. All other vm's are managed by the kernel. |
| 55 | * (Managed by kernel means the kernel is responsible for VA allocation.) |
| 56 | * |
| 57 | * Note that because VM_BIND allows a given BO to be mapped multiple times in |
| 58 | * a VM, and therefore have multiple VMA's in a VM, there is an extra object |
| 59 | * provided by drm_gpuvm infrastructure.. the drm_gpuvm_bo, which is not |
| 60 | * embedded in any larger driver structure. The GEM object holds a list of |
| 61 | * drm_gpuvm_bo, which in turn holds a list of msm_gem_vma. A linked vma |
| 62 | * holds a reference to the vm_bo, and drops it when the vma is unlinked. |
| 63 | * So we just need to call drm_gpuvm_bo_obtain() to return a ref to an |
| 64 | * existing vm_bo, or create a new one. Once the vma is linked, the ref |
| 65 | * to the vm_bo can be dropped (since the vma is holding one). |
| 66 | */ |
| 67 | struct msm_gem_vm { |
| 68 | /** @base: Inherit from drm_gpuvm. */ |
| 69 | struct drm_gpuvm base; |
| 70 | |
| 71 | /** |
| 72 | * @sched: Scheduler used for asynchronous VM_BIND request. |
| 73 | * |
| 74 | * Unused for kernel managed VMs (where all operations are synchronous). |
| 75 | */ |
| 76 | struct drm_gpu_scheduler sched; |
| 77 | |
| 78 | /** |
| 79 | * @prealloc_throttle: Used to throttle VM_BIND ops if too much pre- |
| 80 | * allocated memory is in flight. |
| 81 | * |
| 82 | * Because we have to pre-allocate pgtable pages for the worst case |
| 83 | * (ie. new mappings do not share any PTEs with existing mappings) |
| 84 | * we could end up consuming a lot of resources transiently. The |
| 85 | * prealloc_throttle puts an upper bound on that. |
| 86 | */ |
| 87 | struct { |
| 88 | /** @wait: Notified when preallocated resources are released */ |
| 89 | wait_queue_head_t wait; |
| 90 | |
| 91 | /** |
| 92 | * @in_flight: The # of preallocated pgtable pages in-flight |
| 93 | * for queued VM_BIND jobs. |
| 94 | */ |
| 95 | atomic_t in_flight; |
| 96 | } prealloc_throttle; |
| 97 | |
| 98 | /** |
| 99 | * @mm: Memory management for kernel managed VA allocations |
| 100 | * |
| 101 | * Only used for kernel managed VMs, unused for user managed VMs. |
| 102 | * |
| 103 | * Protected by vm lock. See msm_gem_lock_vm_and_obj(), for ex. |
| 104 | */ |
| 105 | struct drm_mm mm; |
| 106 | |
| 107 | /** @mmu: The mmu object which manages the pgtables */ |
| 108 | struct msm_mmu *mmu; |
| 109 | |
| 110 | /** @mmu_lock: Protects access to the mmu */ |
| 111 | struct mutex mmu_lock; |
| 112 | |
| 113 | /** |
| 114 | * @pid: For address spaces associated with a specific process, this |
| 115 | * will be non-NULL: |
| 116 | */ |
| 117 | struct pid *pid; |
| 118 | |
| 119 | /** @last_fence: Fence for last pending work scheduled on the VM */ |
| 120 | struct dma_fence *last_fence; |
| 121 | |
| 122 | /** @log: A log of recent VM updates */ |
| 123 | struct msm_gem_vm_log_entry *log; |
| 124 | |
| 125 | /** @log_shift: length of @log is (1 << @log_shift) */ |
| 126 | uint32_t log_shift; |
| 127 | |
| 128 | /** @log_idx: index of next @log entry to write */ |
| 129 | uint32_t log_idx; |
| 130 | |
| 131 | /** @faults: the number of GPU hangs associated with this address space */ |
| 132 | int faults; |
| 133 | |
| 134 | /** @managed: is this a kernel managed VM? */ |
| 135 | bool managed; |
| 136 | |
| 137 | /** |
| 138 | * @unusable: True if the VM has turned unusable because something |
| 139 | * bad happened during an asynchronous request. |
| 140 | * |
| 141 | * We don't try to recover from such failures, because this implies |
| 142 | * informing userspace about the specific operation that failed, and |
| 143 | * hoping the userspace driver can replay things from there. This all |
| 144 | * sounds very complicated for little gain. |
| 145 | * |
| 146 | * Instead, we should just flag the VM as unusable, and fail any |
| 147 | * further request targeting this VM. |
| 148 | * |
| 149 | * As an analogy, this would be mapped to a VK_ERROR_DEVICE_LOST |
| 150 | * situation, where the logical device needs to be re-created. |
| 151 | */ |
| 152 | bool unusable; |
| 153 | }; |
| 154 | #define to_msm_vm(x) container_of(x, struct msm_gem_vm, base) |
| 155 | |
| 156 | struct drm_gpuvm * |
| 157 | msm_gem_vm_create(struct drm_device *drm, struct msm_mmu *mmu, const char *name, |
| 158 | u64 va_start, u64 va_size, bool managed); |
| 159 | |
| 160 | void msm_gem_vm_close(struct drm_gpuvm *gpuvm); |
| 161 | void msm_gem_vm_unusable(struct drm_gpuvm *gpuvm); |
| 162 | |
| 163 | struct msm_fence_context; |
| 164 | |
| 165 | #define MSM_VMA_DUMP (DRM_GPUVA_USERBITS << 0) |
| 166 | |
| 167 | /** |
| 168 | * struct msm_gem_vma - a VMA mapping |
| 169 | * |
| 170 | * Represents a combination of a GEM object plus a VM. |
| 171 | */ |
| 172 | struct msm_gem_vma { |
| 173 | /** @base: inherit from drm_gpuva */ |
| 174 | struct drm_gpuva base; |
| 175 | |
| 176 | /** |
| 177 | * @node: mm node for VA allocation |
| 178 | * |
| 179 | * Only used by kernel managed VMs |
| 180 | */ |
| 181 | struct drm_mm_node node; |
| 182 | |
| 183 | /** @mapped: Is this VMA mapped? */ |
| 184 | bool mapped; |
| 185 | }; |
| 186 | #define to_msm_vma(x) container_of(x, struct msm_gem_vma, base) |
| 187 | |
| 188 | struct drm_gpuva * |
| 189 | msm_gem_vma_new(struct drm_gpuvm *vm, struct drm_gem_object *obj, |
| 190 | u64 offset, u64 range_start, u64 range_end); |
| 191 | void msm_gem_vma_unmap(struct drm_gpuva *vma, const char *reason); |
| 192 | int msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct sg_table *sgt); |
| 193 | void msm_gem_vma_close(struct drm_gpuva *vma); |
| 194 | |
| 195 | struct msm_gem_object { |
| 196 | struct drm_gem_object base; |
| 197 | |
| 198 | uint32_t flags; |
| 199 | |
| 200 | /** |
| 201 | * madv: are the backing pages purgeable? |
| 202 | * |
| 203 | * Protected by obj lock and LRU lock |
| 204 | */ |
| 205 | uint8_t madv; |
| 206 | |
| 207 | /** |
| 208 | * count of active vmap'ing |
| 209 | */ |
| 210 | uint8_t vmap_count; |
| 211 | |
| 212 | /** |
| 213 | * Node in list of all objects (mainly for debugfs, protected by |
| 214 | * priv->obj_lock |
| 215 | */ |
| 216 | struct list_head node; |
| 217 | |
| 218 | struct page **pages; |
| 219 | struct sg_table *sgt; |
| 220 | void *vaddr; |
| 221 | |
| 222 | char name[32]; /* Identifier to print for the debugfs files */ |
| 223 | |
| 224 | /* userspace metadata backchannel */ |
| 225 | void *metadata; |
| 226 | u32 metadata_size; |
| 227 | |
| 228 | /** |
| 229 | * pin_count: Number of times the pages are pinned |
| 230 | * |
| 231 | * Protected by LRU lock. |
| 232 | */ |
| 233 | int pin_count; |
| 234 | |
| 235 | /** |
| 236 | * @vma_ref: Reference count of VMA users. |
| 237 | * |
| 238 | * With the vm_bo/vma holding a reference to the GEM object, we'd |
| 239 | * otherwise have to actively tear down a VMA when, for example, |
| 240 | * a buffer is unpinned for scanout, vs. the pre-drm_gpuvm approach |
| 241 | * where a VMA did not hold a reference to the BO, but instead was |
| 242 | * implicitly torn down when the BO was freed. |
| 243 | * |
| 244 | * To regain the lazy VMA teardown, we use the @vma_ref. It is |
| 245 | * incremented for any of the following: |
| 246 | * |
| 247 | * 1) the BO is exported as a dma_buf |
| 248 | * 2) the BO has open userspace handle |
| 249 | * |
| 250 | * All of those conditions will hold an reference to the BO, |
| 251 | * preventing it from being freed. So lazily keeping around the |
| 252 | * VMA will not prevent the BO from being freed. (Or rather, the |
| 253 | * reference loop is harmless in this case.) |
| 254 | * |
| 255 | * When the @vma_ref drops to zero, then kms->vm VMA will be |
| 256 | * torn down. |
| 257 | */ |
| 258 | atomic_t vma_ref; |
| 259 | }; |
| 260 | #define to_msm_bo(x) container_of(x, struct msm_gem_object, base) |
| 261 | |
| 262 | void msm_gem_vma_get(struct drm_gem_object *obj); |
| 263 | void msm_gem_vma_put(struct drm_gem_object *obj); |
| 264 | |
| 265 | uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj); |
| 266 | int msm_gem_prot(struct drm_gem_object *obj); |
| 267 | int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct drm_gpuva *vma); |
| 268 | void msm_gem_unpin_locked(struct drm_gem_object *obj); |
| 269 | void msm_gem_unpin_active(struct drm_gem_object *obj); |
| 270 | struct drm_gpuva *msm_gem_get_vma_locked(struct drm_gem_object *obj, |
| 271 | struct drm_gpuvm *vm); |
| 272 | int msm_gem_get_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm, |
| 273 | uint64_t *iova); |
| 274 | int msm_gem_set_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm, |
| 275 | uint64_t iova); |
| 276 | int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj, |
| 277 | struct drm_gpuvm *vm, uint64_t *iova, |
| 278 | u64 range_start, u64 range_end); |
| 279 | int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm, |
| 280 | uint64_t *iova); |
| 281 | void msm_gem_unpin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm); |
| 282 | void msm_gem_pin_obj_locked(struct drm_gem_object *obj); |
| 283 | struct page **msm_gem_get_pages_locked(struct drm_gem_object *obj, unsigned madv); |
| 284 | struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj); |
| 285 | void msm_gem_unpin_pages_locked(struct drm_gem_object *obj); |
| 286 | int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, |
| 287 | struct drm_mode_create_dumb *args); |
| 288 | int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, |
| 289 | uint32_t handle, uint64_t *offset); |
| 290 | void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj); |
| 291 | void *msm_gem_get_vaddr(struct drm_gem_object *obj); |
| 292 | void *msm_gem_get_vaddr_active(struct drm_gem_object *obj); |
| 293 | void msm_gem_put_vaddr_locked(struct drm_gem_object *obj); |
| 294 | void msm_gem_put_vaddr(struct drm_gem_object *obj); |
| 295 | int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv); |
| 296 | bool msm_gem_active(struct drm_gem_object *obj); |
| 297 | int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout); |
| 298 | int msm_gem_cpu_fini(struct drm_gem_object *obj); |
| 299 | int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, |
| 300 | size_t size, uint32_t flags, uint32_t *handle, char *name); |
| 301 | struct drm_gem_object *msm_gem_new(struct drm_device *dev, |
| 302 | size_t size, uint32_t flags); |
| 303 | void *msm_gem_kernel_new(struct drm_device *dev, size_t size, uint32_t flags, |
| 304 | struct drm_gpuvm *vm, struct drm_gem_object **bo, |
| 305 | uint64_t *iova); |
| 306 | void msm_gem_kernel_put(struct drm_gem_object *bo, struct drm_gpuvm *vm); |
| 307 | struct drm_gem_object *msm_gem_import(struct drm_device *dev, |
| 308 | struct dma_buf *dmabuf, struct sg_table *sgt); |
| 309 | __printf(2, 3) |
| 310 | void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...); |
| 311 | |
| 312 | #ifdef CONFIG_DEBUG_FS |
| 313 | struct msm_gem_stats { |
| 314 | struct { |
| 315 | unsigned count; |
| 316 | size_t size; |
| 317 | } all, active, resident, purgeable, purged; |
| 318 | }; |
| 319 | |
| 320 | void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m, |
| 321 | struct msm_gem_stats *stats); |
| 322 | void msm_gem_describe_objects(struct list_head *list, struct seq_file *m); |
| 323 | #endif |
| 324 | |
| 325 | static inline void |
| 326 | msm_gem_lock(struct drm_gem_object *obj) |
| 327 | { |
| 328 | dma_resv_lock(obj: obj->resv, NULL); |
| 329 | } |
| 330 | |
| 331 | static inline bool __must_check |
| 332 | msm_gem_trylock(struct drm_gem_object *obj) |
| 333 | { |
| 334 | return dma_resv_trylock(obj: obj->resv); |
| 335 | } |
| 336 | |
| 337 | static inline int |
| 338 | msm_gem_lock_interruptible(struct drm_gem_object *obj) |
| 339 | { |
| 340 | return dma_resv_lock_interruptible(obj: obj->resv, NULL); |
| 341 | } |
| 342 | |
| 343 | static inline void |
| 344 | msm_gem_unlock(struct drm_gem_object *obj) |
| 345 | { |
| 346 | dma_resv_unlock(obj: obj->resv); |
| 347 | } |
| 348 | |
| 349 | /** |
| 350 | * msm_gem_lock_vm_and_obj() - Helper to lock an obj + VM |
| 351 | * @exec: the exec context helper which will be initalized |
| 352 | * @obj: the GEM object to lock |
| 353 | * @vm: the VM to lock |
| 354 | * |
| 355 | * Operations which modify a VM frequently need to lock both the VM and |
| 356 | * the object being mapped/unmapped/etc. This helper uses drm_exec to |
| 357 | * acquire both locks, dealing with potential deadlock/backoff scenarios |
| 358 | * which arise when multiple locks are involved. |
| 359 | */ |
| 360 | static inline int |
| 361 | msm_gem_lock_vm_and_obj(struct drm_exec *exec, |
| 362 | struct drm_gem_object *obj, |
| 363 | struct drm_gpuvm *vm) |
| 364 | { |
| 365 | int ret = 0; |
| 366 | |
| 367 | drm_exec_init(exec, flags: 0, nr: 2); |
| 368 | drm_exec_until_all_locked (exec) { |
| 369 | ret = drm_exec_lock_obj(exec, drm_gpuvm_resv_obj(vm)); |
| 370 | if (!ret && (obj->resv != drm_gpuvm_resv(vm))) |
| 371 | ret = drm_exec_lock_obj(exec, obj); |
| 372 | drm_exec_retry_on_contention(exec); |
| 373 | if (GEM_WARN_ON(ret)) |
| 374 | break; |
| 375 | } |
| 376 | |
| 377 | return ret; |
| 378 | } |
| 379 | |
| 380 | static inline void |
| 381 | msm_gem_assert_locked(struct drm_gem_object *obj) |
| 382 | { |
| 383 | /* |
| 384 | * Destroying the object is a special case.. msm_gem_free_object() |
| 385 | * calls many things that WARN_ON if the obj lock is not held. But |
| 386 | * acquiring the obj lock in msm_gem_free_object() can cause a |
| 387 | * locking order inversion between reservation_ww_class_mutex and |
| 388 | * fs_reclaim. |
| 389 | * |
| 390 | * This deadlock is not actually possible, because no one should |
| 391 | * be already holding the lock when msm_gem_free_object() is called. |
| 392 | * Unfortunately lockdep is not aware of this detail. So when the |
| 393 | * refcount drops to zero, we pretend it is already locked. |
| 394 | */ |
| 395 | lockdep_assert_once( |
| 396 | (kref_read(&obj->refcount) == 0) || |
| 397 | (lockdep_is_held(&obj->resv->lock.base) != LOCK_STATE_NOT_HELD) |
| 398 | ); |
| 399 | } |
| 400 | |
| 401 | /* imported/exported objects are not purgeable: */ |
| 402 | static inline bool is_unpurgeable(struct msm_gem_object *msm_obj) |
| 403 | { |
| 404 | return drm_gem_is_imported(obj: &msm_obj->base) || msm_obj->pin_count; |
| 405 | } |
| 406 | |
| 407 | static inline bool is_purgeable(struct msm_gem_object *msm_obj) |
| 408 | { |
| 409 | return (msm_obj->madv == MSM_MADV_DONTNEED) && msm_obj->sgt && |
| 410 | !is_unpurgeable(msm_obj); |
| 411 | } |
| 412 | |
| 413 | static inline bool is_vunmapable(struct msm_gem_object *msm_obj) |
| 414 | { |
| 415 | msm_gem_assert_locked(obj: &msm_obj->base); |
| 416 | return (msm_obj->vmap_count == 0) && msm_obj->vaddr; |
| 417 | } |
| 418 | |
| 419 | static inline bool is_unevictable(struct msm_gem_object *msm_obj) |
| 420 | { |
| 421 | return is_unpurgeable(msm_obj) || msm_obj->vaddr; |
| 422 | } |
| 423 | |
| 424 | void msm_gem_purge(struct drm_gem_object *obj); |
| 425 | void msm_gem_evict(struct drm_gem_object *obj); |
| 426 | void msm_gem_vunmap(struct drm_gem_object *obj); |
| 427 | |
| 428 | /* Created per submit-ioctl, to track bo's and cmdstream bufs, etc, |
| 429 | * associated with the cmdstream submission for synchronization (and |
| 430 | * make it easier to unwind when things go wrong, etc). |
| 431 | */ |
| 432 | struct msm_gem_submit { |
| 433 | struct drm_sched_job base; |
| 434 | struct kref ref; |
| 435 | struct drm_device *dev; |
| 436 | struct msm_gpu *gpu; |
| 437 | struct drm_gpuvm *vm; |
| 438 | struct list_head node; /* node in ring submit list */ |
| 439 | struct drm_exec exec; |
| 440 | uint32_t seqno; /* Sequence number of the submit on the ring */ |
| 441 | |
| 442 | /* Hw fence, which is created when the scheduler executes the job, and |
| 443 | * is signaled when the hw finishes (via seqno write from cmdstream) |
| 444 | */ |
| 445 | struct dma_fence *hw_fence; |
| 446 | |
| 447 | /* Userspace visible fence, which is signaled by the scheduler after |
| 448 | * the hw_fence is signaled. |
| 449 | */ |
| 450 | struct dma_fence *user_fence; |
| 451 | |
| 452 | int fence_id; /* key into queue->fence_idr */ |
| 453 | struct msm_gpu_submitqueue *queue; |
| 454 | struct pid *pid; /* submitting process */ |
| 455 | bool bos_pinned : 1; |
| 456 | bool fault_dumped:1;/* Limit devcoredump dumping to one per submit */ |
| 457 | bool in_rb : 1; /* "sudo" mode, copy cmds into RB */ |
| 458 | struct msm_ringbuffer *ring; |
| 459 | unsigned int nr_cmds; |
| 460 | unsigned int nr_bos; |
| 461 | u32 ident; /* A "identifier" for the submit for logging */ |
| 462 | struct { |
| 463 | uint32_t type; |
| 464 | uint32_t size; /* in dwords */ |
| 465 | uint64_t iova; |
| 466 | uint32_t offset;/* in dwords */ |
| 467 | uint32_t idx; /* cmdstream buffer idx in bos[] */ |
| 468 | uint32_t nr_relocs; |
| 469 | struct drm_msm_gem_submit_reloc *relocs; |
| 470 | } *cmd; /* array of size nr_cmds */ |
| 471 | struct { |
| 472 | uint32_t flags; |
| 473 | union { |
| 474 | struct drm_gem_object *obj; |
| 475 | uint32_t handle; |
| 476 | }; |
| 477 | struct drm_gpuvm_bo *vm_bo; |
| 478 | uint64_t iova; |
| 479 | } bos[]; |
| 480 | }; |
| 481 | |
| 482 | static inline struct msm_gem_submit *to_msm_submit(struct drm_sched_job *job) |
| 483 | { |
| 484 | return container_of(job, struct msm_gem_submit, base); |
| 485 | } |
| 486 | |
| 487 | void __msm_gem_submit_destroy(struct kref *kref); |
| 488 | |
| 489 | static inline void msm_gem_submit_get(struct msm_gem_submit *submit) |
| 490 | { |
| 491 | kref_get(kref: &submit->ref); |
| 492 | } |
| 493 | |
| 494 | static inline void msm_gem_submit_put(struct msm_gem_submit *submit) |
| 495 | { |
| 496 | kref_put(kref: &submit->ref, release: __msm_gem_submit_destroy); |
| 497 | } |
| 498 | |
| 499 | void msm_submit_retire(struct msm_gem_submit *submit); |
| 500 | |
| 501 | #endif /* __MSM_GEM_H__ */ |
| 502 | |