| 1 | /* |
| 2 | * Copyright 2015 Advanced Micro Devices, Inc. |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice shall be included in |
| 12 | * all copies or substantial portions of the Software. |
| 13 | * |
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 20 | * OTHER DEALINGS IN THE SOFTWARE. |
| 21 | * |
| 22 | */ |
| 23 | |
| 24 | #include <linux/export.h> |
| 25 | #include <linux/module.h> |
| 26 | #include <linux/sched.h> |
| 27 | #include <linux/slab.h> |
| 28 | #include <linux/wait.h> |
| 29 | |
| 30 | #include <drm/gpu_scheduler.h> |
| 31 | |
| 32 | #include "sched_internal.h" |
| 33 | |
| 34 | static struct kmem_cache *sched_fence_slab; |
| 35 | |
| 36 | static int __init drm_sched_fence_slab_init(void) |
| 37 | { |
| 38 | sched_fence_slab = KMEM_CACHE(drm_sched_fence, SLAB_HWCACHE_ALIGN); |
| 39 | if (!sched_fence_slab) |
| 40 | return -ENOMEM; |
| 41 | |
| 42 | return 0; |
| 43 | } |
| 44 | |
| 45 | static void __exit drm_sched_fence_slab_fini(void) |
| 46 | { |
| 47 | rcu_barrier(); |
| 48 | kmem_cache_destroy(s: sched_fence_slab); |
| 49 | } |
| 50 | |
| 51 | static void drm_sched_fence_set_parent(struct drm_sched_fence *s_fence, |
| 52 | struct dma_fence *fence) |
| 53 | { |
| 54 | /* |
| 55 | * smp_store_release() to ensure another thread racing us |
| 56 | * in drm_sched_fence_set_deadline_finished() sees the |
| 57 | * fence's parent set before test_bit() |
| 58 | */ |
| 59 | smp_store_release(&s_fence->parent, dma_fence_get(fence)); |
| 60 | if (test_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT, |
| 61 | &s_fence->finished.flags)) |
| 62 | dma_fence_set_deadline(fence, deadline: s_fence->deadline); |
| 63 | } |
| 64 | |
| 65 | void drm_sched_fence_scheduled(struct drm_sched_fence *fence, |
| 66 | struct dma_fence *parent) |
| 67 | { |
| 68 | /* Set the parent before signaling the scheduled fence, such that, |
| 69 | * any waiter expecting the parent to be filled after the job has |
| 70 | * been scheduled (which is the case for drivers delegating waits |
| 71 | * to some firmware) doesn't have to busy wait for parent to show |
| 72 | * up. |
| 73 | */ |
| 74 | if (!IS_ERR_OR_NULL(ptr: parent)) |
| 75 | drm_sched_fence_set_parent(s_fence: fence, fence: parent); |
| 76 | |
| 77 | dma_fence_signal(fence: &fence->scheduled); |
| 78 | } |
| 79 | |
| 80 | void drm_sched_fence_finished(struct drm_sched_fence *fence, int result) |
| 81 | { |
| 82 | if (result) |
| 83 | dma_fence_set_error(fence: &fence->finished, error: result); |
| 84 | dma_fence_signal(fence: &fence->finished); |
| 85 | } |
| 86 | |
| 87 | static const char *drm_sched_fence_get_driver_name(struct dma_fence *fence) |
| 88 | { |
| 89 | return "drm_sched" ; |
| 90 | } |
| 91 | |
| 92 | static const char *drm_sched_fence_get_timeline_name(struct dma_fence *f) |
| 93 | { |
| 94 | struct drm_sched_fence *fence = to_drm_sched_fence(f); |
| 95 | return (const char *)fence->sched->name; |
| 96 | } |
| 97 | |
| 98 | static void drm_sched_fence_free_rcu(struct rcu_head *rcu) |
| 99 | { |
| 100 | struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); |
| 101 | struct drm_sched_fence *fence = to_drm_sched_fence(f); |
| 102 | |
| 103 | if (!WARN_ON_ONCE(!fence)) |
| 104 | kmem_cache_free(s: sched_fence_slab, objp: fence); |
| 105 | } |
| 106 | |
| 107 | /** |
| 108 | * drm_sched_fence_free - free up an uninitialized fence |
| 109 | * |
| 110 | * @fence: fence to free |
| 111 | * |
| 112 | * Free up the fence memory. Should only be used if drm_sched_fence_init() |
| 113 | * has not been called yet. |
| 114 | */ |
| 115 | void drm_sched_fence_free(struct drm_sched_fence *fence) |
| 116 | { |
| 117 | /* This function should not be called if the fence has been initialized. */ |
| 118 | if (!WARN_ON_ONCE(fence->sched)) |
| 119 | kmem_cache_free(s: sched_fence_slab, objp: fence); |
| 120 | } |
| 121 | |
| 122 | /** |
| 123 | * drm_sched_fence_release_scheduled - callback that fence can be freed |
| 124 | * |
| 125 | * @f: fence |
| 126 | * |
| 127 | * This function is called when the reference count becomes zero. |
| 128 | * It just RCU schedules freeing up the fence. |
| 129 | */ |
| 130 | static void drm_sched_fence_release_scheduled(struct dma_fence *f) |
| 131 | { |
| 132 | struct drm_sched_fence *fence = to_drm_sched_fence(f); |
| 133 | |
| 134 | dma_fence_put(fence: fence->parent); |
| 135 | call_rcu(head: &fence->finished.rcu, func: drm_sched_fence_free_rcu); |
| 136 | } |
| 137 | |
| 138 | /** |
| 139 | * drm_sched_fence_release_finished - drop extra reference |
| 140 | * |
| 141 | * @f: fence |
| 142 | * |
| 143 | * Drop the extra reference from the scheduled fence to the base fence. |
| 144 | */ |
| 145 | static void drm_sched_fence_release_finished(struct dma_fence *f) |
| 146 | { |
| 147 | struct drm_sched_fence *fence = to_drm_sched_fence(f); |
| 148 | |
| 149 | dma_fence_put(fence: &fence->scheduled); |
| 150 | } |
| 151 | |
| 152 | static void drm_sched_fence_set_deadline_finished(struct dma_fence *f, |
| 153 | ktime_t deadline) |
| 154 | { |
| 155 | struct drm_sched_fence *fence = to_drm_sched_fence(f); |
| 156 | struct dma_fence *parent; |
| 157 | unsigned long flags; |
| 158 | |
| 159 | spin_lock_irqsave(&fence->lock, flags); |
| 160 | |
| 161 | /* If we already have an earlier deadline, keep it: */ |
| 162 | if (test_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT, &f->flags) && |
| 163 | ktime_before(cmp1: fence->deadline, cmp2: deadline)) { |
| 164 | spin_unlock_irqrestore(lock: &fence->lock, flags); |
| 165 | return; |
| 166 | } |
| 167 | |
| 168 | fence->deadline = deadline; |
| 169 | set_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT, addr: &f->flags); |
| 170 | |
| 171 | spin_unlock_irqrestore(lock: &fence->lock, flags); |
| 172 | |
| 173 | /* |
| 174 | * smp_load_aquire() to ensure that if we are racing another |
| 175 | * thread calling drm_sched_fence_set_parent(), that we see |
| 176 | * the parent set before it calls test_bit(HAS_DEADLINE_BIT) |
| 177 | */ |
| 178 | parent = smp_load_acquire(&fence->parent); |
| 179 | if (parent) |
| 180 | dma_fence_set_deadline(fence: parent, deadline); |
| 181 | } |
| 182 | |
| 183 | static const struct dma_fence_ops drm_sched_fence_ops_scheduled = { |
| 184 | .get_driver_name = drm_sched_fence_get_driver_name, |
| 185 | .get_timeline_name = drm_sched_fence_get_timeline_name, |
| 186 | .release = drm_sched_fence_release_scheduled, |
| 187 | }; |
| 188 | |
| 189 | static const struct dma_fence_ops drm_sched_fence_ops_finished = { |
| 190 | .get_driver_name = drm_sched_fence_get_driver_name, |
| 191 | .get_timeline_name = drm_sched_fence_get_timeline_name, |
| 192 | .release = drm_sched_fence_release_finished, |
| 193 | .set_deadline = drm_sched_fence_set_deadline_finished, |
| 194 | }; |
| 195 | |
| 196 | struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f) |
| 197 | { |
| 198 | if (f->ops == &drm_sched_fence_ops_scheduled) |
| 199 | return container_of(f, struct drm_sched_fence, scheduled); |
| 200 | |
| 201 | if (f->ops == &drm_sched_fence_ops_finished) |
| 202 | return container_of(f, struct drm_sched_fence, finished); |
| 203 | |
| 204 | return NULL; |
| 205 | } |
| 206 | EXPORT_SYMBOL(to_drm_sched_fence); |
| 207 | |
| 208 | struct drm_sched_fence *drm_sched_fence_alloc(struct drm_sched_entity *entity, |
| 209 | void *owner, |
| 210 | u64 drm_client_id) |
| 211 | { |
| 212 | struct drm_sched_fence *fence = NULL; |
| 213 | |
| 214 | fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL); |
| 215 | if (fence == NULL) |
| 216 | return NULL; |
| 217 | |
| 218 | fence->owner = owner; |
| 219 | fence->drm_client_id = drm_client_id; |
| 220 | spin_lock_init(&fence->lock); |
| 221 | |
| 222 | return fence; |
| 223 | } |
| 224 | |
| 225 | void drm_sched_fence_init(struct drm_sched_fence *fence, |
| 226 | struct drm_sched_entity *entity) |
| 227 | { |
| 228 | unsigned seq; |
| 229 | |
| 230 | fence->sched = entity->rq->sched; |
| 231 | seq = atomic_inc_return(v: &entity->fence_seq); |
| 232 | dma_fence_init(fence: &fence->scheduled, ops: &drm_sched_fence_ops_scheduled, |
| 233 | lock: &fence->lock, context: entity->fence_context, seqno: seq); |
| 234 | dma_fence_init(fence: &fence->finished, ops: &drm_sched_fence_ops_finished, |
| 235 | lock: &fence->lock, context: entity->fence_context + 1, seqno: seq); |
| 236 | } |
| 237 | |
| 238 | module_init(drm_sched_fence_slab_init); |
| 239 | module_exit(drm_sched_fence_slab_fini); |
| 240 | |
| 241 | MODULE_DESCRIPTION("DRM GPU scheduler" ); |
| 242 | MODULE_LICENSE("GPL and additional rights" ); |
| 243 | |