| 1 | /* |
| 2 | * Copyright 2014 Advanced Micro Devices, Inc. |
| 3 | * All Rights Reserved. |
| 4 | * |
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 6 | * copy of this software and associated documentation files (the |
| 7 | * "Software"), to deal in the Software without restriction, including |
| 8 | * without limitation the rights to use, copy, modify, merge, publish, |
| 9 | * distribute, sub license, and/or sell copies of the Software, and to |
| 10 | * permit persons to whom the Software is furnished to do so, subject to |
| 11 | * the following conditions: |
| 12 | * |
| 13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
| 16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
| 17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
| 18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
| 19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 20 | * |
| 21 | * The above copyright notice and this permission notice (including the |
| 22 | * next paragraph) shall be included in all copies or substantial portions |
| 23 | * of the Software. |
| 24 | * |
| 25 | */ |
| 26 | /* |
| 27 | * Authors: |
| 28 | * Christian König <christian.koenig@amd.com> |
| 29 | */ |
| 30 | |
| 31 | /** |
| 32 | * DOC: MMU Notifier |
| 33 | * |
| 34 | * For coherent userptr handling registers an MMU notifier to inform the driver |
| 35 | * about updates on the page tables of a process. |
| 36 | * |
| 37 | * When somebody tries to invalidate the page tables we block the update until |
| 38 | * all operations on the pages in question are completed, then those pages are |
| 39 | * marked as accessed and also dirty if it wasn't a read only access. |
| 40 | * |
| 41 | * New command submissions using the userptrs in question are delayed until all |
| 42 | * page table invalidation are completed and we once more see a coherent process |
| 43 | * address space. |
| 44 | */ |
| 45 | |
| 46 | #include <linux/firmware.h> |
| 47 | #include <linux/module.h> |
| 48 | #include <drm/drm.h> |
| 49 | |
| 50 | #include "amdgpu.h" |
| 51 | #include "amdgpu_amdkfd.h" |
| 52 | #include "amdgpu_hmm.h" |
| 53 | |
| 54 | #define MAX_WALK_BYTE (2UL << 30) |
| 55 | |
| 56 | /** |
| 57 | * amdgpu_hmm_invalidate_gfx - callback to notify about mm change |
| 58 | * |
| 59 | * @mni: the range (mm) is about to update |
| 60 | * @range: details on the invalidation |
| 61 | * @cur_seq: Value to pass to mmu_interval_set_seq() |
| 62 | * |
| 63 | * Block for operations on BOs to finish and mark pages as accessed and |
| 64 | * potentially dirty. |
| 65 | */ |
| 66 | static bool amdgpu_hmm_invalidate_gfx(struct mmu_interval_notifier *mni, |
| 67 | const struct mmu_notifier_range *range, |
| 68 | unsigned long cur_seq) |
| 69 | { |
| 70 | struct amdgpu_bo *bo = container_of(mni, struct amdgpu_bo, notifier); |
| 71 | struct amdgpu_device *adev = amdgpu_ttm_adev(bdev: bo->tbo.bdev); |
| 72 | long r; |
| 73 | |
| 74 | if (!mmu_notifier_range_blockable(range)) |
| 75 | return false; |
| 76 | |
| 77 | mutex_lock(&adev->notifier_lock); |
| 78 | |
| 79 | mmu_interval_set_seq(interval_sub: mni, cur_seq); |
| 80 | |
| 81 | r = dma_resv_wait_timeout(obj: bo->tbo.base.resv, usage: DMA_RESV_USAGE_BOOKKEEP, |
| 82 | intr: false, MAX_SCHEDULE_TIMEOUT); |
| 83 | mutex_unlock(lock: &adev->notifier_lock); |
| 84 | if (r <= 0) |
| 85 | DRM_ERROR("(%ld) failed to wait for user bo\n" , r); |
| 86 | return true; |
| 87 | } |
| 88 | |
| 89 | static const struct mmu_interval_notifier_ops amdgpu_hmm_gfx_ops = { |
| 90 | .invalidate = amdgpu_hmm_invalidate_gfx, |
| 91 | }; |
| 92 | |
| 93 | /** |
| 94 | * amdgpu_hmm_invalidate_hsa - callback to notify about mm change |
| 95 | * |
| 96 | * @mni: the range (mm) is about to update |
| 97 | * @range: details on the invalidation |
| 98 | * @cur_seq: Value to pass to mmu_interval_set_seq() |
| 99 | * |
| 100 | * We temporarily evict the BO attached to this range. This necessitates |
| 101 | * evicting all user-mode queues of the process. |
| 102 | */ |
| 103 | static bool amdgpu_hmm_invalidate_hsa(struct mmu_interval_notifier *mni, |
| 104 | const struct mmu_notifier_range *range, |
| 105 | unsigned long cur_seq) |
| 106 | { |
| 107 | struct amdgpu_bo *bo = container_of(mni, struct amdgpu_bo, notifier); |
| 108 | |
| 109 | if (!mmu_notifier_range_blockable(range)) |
| 110 | return false; |
| 111 | |
| 112 | amdgpu_amdkfd_evict_userptr(mni, cur_seq, mem: bo->kfd_bo); |
| 113 | |
| 114 | return true; |
| 115 | } |
| 116 | |
| 117 | static const struct mmu_interval_notifier_ops amdgpu_hmm_hsa_ops = { |
| 118 | .invalidate = amdgpu_hmm_invalidate_hsa, |
| 119 | }; |
| 120 | |
| 121 | /** |
| 122 | * amdgpu_hmm_register - register a BO for notifier updates |
| 123 | * |
| 124 | * @bo: amdgpu buffer object |
| 125 | * @addr: userptr addr we should monitor |
| 126 | * |
| 127 | * Registers a mmu_notifier for the given BO at the specified address. |
| 128 | * Returns 0 on success, -ERRNO if anything goes wrong. |
| 129 | */ |
| 130 | int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr) |
| 131 | { |
| 132 | int r; |
| 133 | |
| 134 | if (bo->kfd_bo) |
| 135 | r = mmu_interval_notifier_insert(interval_sub: &bo->notifier, current->mm, |
| 136 | start: addr, length: amdgpu_bo_size(bo), |
| 137 | ops: &amdgpu_hmm_hsa_ops); |
| 138 | else |
| 139 | r = mmu_interval_notifier_insert(interval_sub: &bo->notifier, current->mm, start: addr, |
| 140 | length: amdgpu_bo_size(bo), |
| 141 | ops: &amdgpu_hmm_gfx_ops); |
| 142 | if (r) |
| 143 | /* |
| 144 | * Make sure amdgpu_hmm_unregister() doesn't call |
| 145 | * mmu_interval_notifier_remove() when the notifier isn't properly |
| 146 | * initialized. |
| 147 | */ |
| 148 | bo->notifier.mm = NULL; |
| 149 | |
| 150 | return r; |
| 151 | } |
| 152 | |
| 153 | /** |
| 154 | * amdgpu_hmm_unregister - unregister a BO for notifier updates |
| 155 | * |
| 156 | * @bo: amdgpu buffer object |
| 157 | * |
| 158 | * Remove any registration of mmu notifier updates from the buffer object. |
| 159 | */ |
| 160 | void amdgpu_hmm_unregister(struct amdgpu_bo *bo) |
| 161 | { |
| 162 | if (!bo->notifier.mm) |
| 163 | return; |
| 164 | mmu_interval_notifier_remove(interval_sub: &bo->notifier); |
| 165 | bo->notifier.mm = NULL; |
| 166 | } |
| 167 | |
| 168 | int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier, |
| 169 | uint64_t start, uint64_t npages, bool readonly, |
| 170 | void *owner, |
| 171 | struct amdgpu_hmm_range *range) |
| 172 | { |
| 173 | unsigned long end; |
| 174 | unsigned long timeout; |
| 175 | unsigned long *pfns; |
| 176 | int r = 0; |
| 177 | struct hmm_range *hmm_range = &range->hmm_range; |
| 178 | |
| 179 | pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL); |
| 180 | if (unlikely(!pfns)) { |
| 181 | r = -ENOMEM; |
| 182 | goto out_free_range; |
| 183 | } |
| 184 | |
| 185 | hmm_range->notifier = notifier; |
| 186 | hmm_range->default_flags = HMM_PFN_REQ_FAULT; |
| 187 | if (!readonly) |
| 188 | hmm_range->default_flags |= HMM_PFN_REQ_WRITE; |
| 189 | hmm_range->hmm_pfns = pfns; |
| 190 | hmm_range->start = start; |
| 191 | end = start + npages * PAGE_SIZE; |
| 192 | hmm_range->dev_private_owner = owner; |
| 193 | |
| 194 | do { |
| 195 | hmm_range->end = min(hmm_range->start + MAX_WALK_BYTE, end); |
| 196 | |
| 197 | pr_debug("hmm range: start = 0x%lx, end = 0x%lx" , |
| 198 | hmm_range->start, hmm_range->end); |
| 199 | |
| 200 | timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT); |
| 201 | |
| 202 | retry: |
| 203 | hmm_range->notifier_seq = mmu_interval_read_begin(interval_sub: notifier); |
| 204 | r = hmm_range_fault(range: hmm_range); |
| 205 | if (unlikely(r)) { |
| 206 | if (r == -EBUSY && !time_after(jiffies, timeout)) |
| 207 | goto retry; |
| 208 | goto out_free_pfns; |
| 209 | } |
| 210 | |
| 211 | if (hmm_range->end == end) |
| 212 | break; |
| 213 | hmm_range->hmm_pfns += MAX_WALK_BYTE >> PAGE_SHIFT; |
| 214 | hmm_range->start = hmm_range->end; |
| 215 | } while (hmm_range->end < end); |
| 216 | |
| 217 | hmm_range->start = start; |
| 218 | hmm_range->hmm_pfns = pfns; |
| 219 | |
| 220 | return 0; |
| 221 | |
| 222 | out_free_pfns: |
| 223 | kvfree(addr: pfns); |
| 224 | hmm_range->hmm_pfns = NULL; |
| 225 | out_free_range: |
| 226 | if (r == -EBUSY) |
| 227 | r = -EAGAIN; |
| 228 | return r; |
| 229 | } |
| 230 | |
| 231 | /** |
| 232 | * amdgpu_hmm_range_valid - check if an HMM range is still valid |
| 233 | * @range: pointer to the &struct amdgpu_hmm_range to validate |
| 234 | * |
| 235 | * Determines whether the given HMM range @range is still valid by |
| 236 | * checking for invalidations via the MMU notifier sequence. This is |
| 237 | * typically used to verify that the range has not been invalidated |
| 238 | * by concurrent address space updates before it is accessed. |
| 239 | * |
| 240 | * Return: |
| 241 | * * true if @range is valid and can be used safely |
| 242 | * * false if @range is NULL or has been invalidated |
| 243 | */ |
| 244 | bool amdgpu_hmm_range_valid(struct amdgpu_hmm_range *range) |
| 245 | { |
| 246 | if (!range) |
| 247 | return false; |
| 248 | |
| 249 | return !mmu_interval_read_retry(interval_sub: range->hmm_range.notifier, |
| 250 | seq: range->hmm_range.notifier_seq); |
| 251 | } |
| 252 | |
| 253 | /** |
| 254 | * amdgpu_hmm_range_alloc - allocate and initialize an AMDGPU HMM range |
| 255 | * @bo: optional buffer object to associate with this HMM range |
| 256 | * |
| 257 | * Allocates memory for amdgpu_hmm_range and associates it with the @bo passed. |
| 258 | * The reference count of the @bo is incremented. |
| 259 | * |
| 260 | * Return: |
| 261 | * Pointer to a newly allocated struct amdgpu_hmm_range on success, |
| 262 | * or NULL if memory allocation fails. |
| 263 | */ |
| 264 | struct amdgpu_hmm_range *amdgpu_hmm_range_alloc(struct amdgpu_bo *bo) |
| 265 | { |
| 266 | struct amdgpu_hmm_range *range; |
| 267 | |
| 268 | range = kzalloc(sizeof(*range), GFP_KERNEL); |
| 269 | if (!range) |
| 270 | return NULL; |
| 271 | |
| 272 | range->bo = amdgpu_bo_ref(bo); |
| 273 | return range; |
| 274 | } |
| 275 | |
| 276 | /** |
| 277 | * amdgpu_hmm_range_free - release an AMDGPU HMM range |
| 278 | * @range: pointer to the range object to free |
| 279 | * |
| 280 | * Releases all resources held by @range, including the associated |
| 281 | * hmm_pfns and the dropping reference of associated bo if any. |
| 282 | * |
| 283 | * Return: void |
| 284 | */ |
| 285 | void amdgpu_hmm_range_free(struct amdgpu_hmm_range *range) |
| 286 | { |
| 287 | if (!range) |
| 288 | return; |
| 289 | |
| 290 | kvfree(addr: range->hmm_range.hmm_pfns); |
| 291 | amdgpu_bo_unref(bo: &range->bo); |
| 292 | kfree(objp: range); |
| 293 | } |
| 294 | |