| 1 | /* SPDX-License-Identifier: GPL-2.0 OR MIT */ |
| 2 | /* |
| 3 | * Copyright 2020-2021 Advanced Micro Devices, Inc. |
| 4 | * |
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 6 | * copy of this software and associated documentation files (the "Software"), |
| 7 | * to deal in the Software without restriction, including without limitation |
| 8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 9 | * and/or sell copies of the Software, and to permit persons to whom the |
| 10 | * Software is furnished to do so, subject to the following conditions: |
| 11 | * |
| 12 | * The above copyright notice and this permission notice shall be included in |
| 13 | * all copies or substantial portions of the Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 18 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 21 | * OTHER DEALINGS IN THE SOFTWARE. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #ifndef KFD_SVM_H_ |
| 26 | #define KFD_SVM_H_ |
| 27 | |
| 28 | #if IS_ENABLED(CONFIG_HSA_AMD_SVM) |
| 29 | |
| 30 | #include <linux/rwsem.h> |
| 31 | #include <linux/list.h> |
| 32 | #include <linux/mutex.h> |
| 33 | #include <linux/sched/mm.h> |
| 34 | #include "amdgpu.h" |
| 35 | #include "kfd_priv.h" |
| 36 | |
| 37 | #define SVM_RANGE_VRAM_DOMAIN (1UL << 0) |
| 38 | #define SVM_ADEV_PGMAP_OWNER(adev)\ |
| 39 | ((adev)->hive ? (void *)(adev)->hive : (void *)(adev)) |
| 40 | |
| 41 | struct svm_range_bo { |
| 42 | struct amdgpu_bo *bo; |
| 43 | struct kref kref; |
| 44 | struct list_head range_list; /* all svm ranges shared this bo */ |
| 45 | spinlock_t list_lock; |
| 46 | struct amdgpu_amdkfd_fence *eviction_fence; |
| 47 | struct work_struct eviction_work; |
| 48 | uint32_t evicting; |
| 49 | struct work_struct release_work; |
| 50 | struct kfd_node *node; |
| 51 | }; |
| 52 | |
| 53 | enum svm_work_list_ops { |
| 54 | SVM_OP_NULL, |
| 55 | SVM_OP_UNMAP_RANGE, |
| 56 | SVM_OP_UPDATE_RANGE_NOTIFIER, |
| 57 | SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP, |
| 58 | SVM_OP_ADD_RANGE, |
| 59 | SVM_OP_ADD_RANGE_AND_MAP |
| 60 | }; |
| 61 | |
| 62 | struct svm_work_list_item { |
| 63 | enum svm_work_list_ops op; |
| 64 | struct mm_struct *mm; |
| 65 | }; |
| 66 | |
| 67 | /** |
| 68 | * struct svm_range - shared virtual memory range |
| 69 | * |
| 70 | * @svms: list of svm ranges, structure defined in kfd_process |
| 71 | * @migrate_mutex: to serialize range migration, validation and mapping update |
| 72 | * @start: range start address in pages |
| 73 | * @last: range last address in pages |
| 74 | * @it_node: node [start, last] stored in interval tree, start, last are page |
| 75 | * aligned, page size is (last - start + 1) |
| 76 | * @list: link list node, used to scan all ranges of svms |
| 77 | * @update_list:link list node used to add to update_list |
| 78 | * @mapping: bo_va mapping structure to create and update GPU page table |
| 79 | * @npages: number of pages |
| 80 | * @vram_pages: vram pages number in this svm_range |
| 81 | * @dma_addr: dma mapping address on each GPU for system memory physical page |
| 82 | * @ttm_res: vram ttm resource map |
| 83 | * @offset: range start offset within mm_nodes |
| 84 | * @svm_bo: struct to manage splited amdgpu_bo |
| 85 | * @svm_bo_list:link list node, to scan all ranges which share same svm_bo |
| 86 | * @lock: protect prange start, last, child_list, svm_bo_list |
| 87 | * @saved_flags:save/restore current PF_MEMALLOC flags |
| 88 | * @flags: flags defined as KFD_IOCTL_SVM_FLAG_* |
| 89 | * @perferred_loc: perferred location, 0 for CPU, or GPU id |
| 90 | * @perfetch_loc: last prefetch location, 0 for CPU, or GPU id |
| 91 | * @actual_loc: this svm_range location. 0: all pages are from sys ram; |
| 92 | * GPU id: this svm_range may include vram pages from GPU with |
| 93 | * id actual_loc. |
| 94 | * @granularity:migration granularity, log2 num pages |
| 95 | * @invalid: not 0 means cpu page table is invalidated |
| 96 | * @validate_timestamp: system timestamp when range is validated |
| 97 | * @notifier: register mmu interval notifier |
| 98 | * @work_item: deferred work item information |
| 99 | * @deferred_list: list header used to add range to deferred list |
| 100 | * @child_list: list header for split ranges which are not added to svms yet |
| 101 | * @bitmap_access: index bitmap of GPUs which can access the range |
| 102 | * @bitmap_aip: index bitmap of GPUs which can access the range in place |
| 103 | * |
| 104 | * Data structure for virtual memory range shared by CPU and GPUs, it can be |
| 105 | * allocated from system memory ram or device vram, and migrate from ram to vram |
| 106 | * or from vram to ram. |
| 107 | */ |
| 108 | struct svm_range { |
| 109 | struct svm_range_list *svms; |
| 110 | struct mutex migrate_mutex; |
| 111 | unsigned long start; |
| 112 | unsigned long last; |
| 113 | struct interval_tree_node it_node; |
| 114 | struct list_head list; |
| 115 | struct list_head update_list; |
| 116 | uint64_t npages; |
| 117 | uint64_t vram_pages; |
| 118 | dma_addr_t *dma_addr[MAX_GPU_INSTANCE]; |
| 119 | struct ttm_resource *ttm_res; |
| 120 | uint64_t offset; |
| 121 | struct svm_range_bo *svm_bo; |
| 122 | struct list_head svm_bo_list; |
| 123 | struct mutex lock; |
| 124 | unsigned int saved_flags; |
| 125 | uint32_t flags; |
| 126 | uint32_t preferred_loc; |
| 127 | uint32_t prefetch_loc; |
| 128 | uint32_t actual_loc; |
| 129 | uint8_t granularity; |
| 130 | atomic_t invalid; |
| 131 | ktime_t validate_timestamp; |
| 132 | struct mmu_interval_notifier notifier; |
| 133 | struct svm_work_list_item work_item; |
| 134 | struct list_head deferred_list; |
| 135 | struct list_head child_list; |
| 136 | DECLARE_BITMAP(bitmap_access, MAX_GPU_INSTANCE); |
| 137 | DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE); |
| 138 | bool mapped_to_gpu; |
| 139 | atomic_t queue_refcount; |
| 140 | }; |
| 141 | |
| 142 | static inline void svm_range_lock(struct svm_range *prange) |
| 143 | { |
| 144 | mutex_lock(&prange->lock); |
| 145 | prange->saved_flags = memalloc_noreclaim_save(); |
| 146 | |
| 147 | } |
| 148 | static inline void svm_range_unlock(struct svm_range *prange) |
| 149 | { |
| 150 | memalloc_noreclaim_restore(flags: prange->saved_flags); |
| 151 | mutex_unlock(lock: &prange->lock); |
| 152 | } |
| 153 | |
| 154 | static inline struct svm_range_bo *svm_range_bo_ref(struct svm_range_bo *svm_bo) |
| 155 | { |
| 156 | if (svm_bo) |
| 157 | kref_get(kref: &svm_bo->kref); |
| 158 | |
| 159 | return svm_bo; |
| 160 | } |
| 161 | |
| 162 | int svm_range_list_init(struct kfd_process *p); |
| 163 | void svm_range_list_fini(struct kfd_process *p); |
| 164 | int svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start, |
| 165 | uint64_t size, uint32_t nattrs, |
| 166 | struct kfd_ioctl_svm_attribute *attrs); |
| 167 | struct svm_range *svm_range_from_addr(struct svm_range_list *svms, |
| 168 | unsigned long addr, |
| 169 | struct svm_range **parent); |
| 170 | struct kfd_node *svm_range_get_node_by_id(struct svm_range *prange, |
| 171 | uint32_t gpu_id); |
| 172 | int svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange, |
| 173 | bool clear); |
| 174 | void svm_range_vram_node_free(struct svm_range *prange); |
| 175 | int svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, |
| 176 | uint32_t vmid, uint32_t node_id, uint64_t addr, uint64_t ts, |
| 177 | bool write_fault); |
| 178 | int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence); |
| 179 | void svm_range_add_list_work(struct svm_range_list *svms, |
| 180 | struct svm_range *prange, struct mm_struct *mm, |
| 181 | enum svm_work_list_ops op); |
| 182 | void schedule_deferred_list_work(struct svm_range_list *svms); |
| 183 | void svm_range_dma_unmap_dev(struct device *dev, dma_addr_t *dma_addr, |
| 184 | unsigned long offset, unsigned long npages); |
| 185 | void svm_range_dma_unmap(struct svm_range *prange); |
| 186 | void svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges, |
| 187 | uint64_t *svm_priv_data_size); |
| 188 | int kfd_criu_checkpoint_svm(struct kfd_process *p, |
| 189 | uint8_t __user *user_priv_data, |
| 190 | uint64_t *priv_offset); |
| 191 | int kfd_criu_restore_svm(struct kfd_process *p, |
| 192 | uint8_t __user *user_priv_ptr, |
| 193 | uint64_t *priv_data_offset, |
| 194 | uint64_t max_priv_data_size); |
| 195 | int kfd_criu_resume_svm(struct kfd_process *p); |
| 196 | struct kfd_process_device * |
| 197 | svm_range_get_pdd_by_node(struct svm_range *prange, struct kfd_node *node); |
| 198 | void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_struct *mm); |
| 199 | |
| 200 | /* SVM API and HMM page migration work together, device memory type |
| 201 | * is initialized to not 0 when page migration register device memory. |
| 202 | */ |
| 203 | #define KFD_IS_SVM_API_SUPPORTED(adev) ((adev)->kfd.pgmap.type != 0 ||\ |
| 204 | ((adev)->apu_prefer_gtt)) |
| 205 | |
| 206 | void svm_range_bo_unref_async(struct svm_range_bo *svm_bo); |
| 207 | |
| 208 | void svm_range_set_max_pages(struct amdgpu_device *adev); |
| 209 | int svm_range_switch_xnack_reserve_mem(struct kfd_process *p, bool xnack_enabled); |
| 210 | |
| 211 | #else |
| 212 | |
| 213 | struct kfd_process; |
| 214 | |
| 215 | static inline int svm_range_list_init(struct kfd_process *p) |
| 216 | { |
| 217 | return 0; |
| 218 | } |
| 219 | static inline void svm_range_list_fini(struct kfd_process *p) |
| 220 | { |
| 221 | /* empty */ |
| 222 | } |
| 223 | |
| 224 | static inline int svm_range_restore_pages(struct amdgpu_device *adev, |
| 225 | unsigned int pasid, |
| 226 | uint32_t client_id, uint32_t node_id, |
| 227 | uint64_t addr, uint64_t ts, bool write_fault) |
| 228 | { |
| 229 | return -EFAULT; |
| 230 | } |
| 231 | |
| 232 | static inline int svm_range_schedule_evict_svm_bo( |
| 233 | struct amdgpu_amdkfd_fence *fence) |
| 234 | { |
| 235 | WARN_ONCE(1, "SVM eviction fence triggered, but SVM is disabled" ); |
| 236 | return -EINVAL; |
| 237 | } |
| 238 | |
| 239 | static inline void svm_range_get_info(struct kfd_process *p, |
| 240 | uint32_t *num_svm_ranges, |
| 241 | uint64_t *svm_priv_data_size) |
| 242 | { |
| 243 | *num_svm_ranges = 0; |
| 244 | *svm_priv_data_size = 0; |
| 245 | } |
| 246 | |
| 247 | static inline int kfd_criu_checkpoint_svm(struct kfd_process *p, |
| 248 | uint8_t __user *user_priv_data, |
| 249 | uint64_t *priv_offset) |
| 250 | { |
| 251 | return 0; |
| 252 | } |
| 253 | |
| 254 | static inline int kfd_criu_restore_svm(struct kfd_process *p, |
| 255 | uint8_t __user *user_priv_ptr, |
| 256 | uint64_t *priv_data_offset, |
| 257 | uint64_t max_priv_data_size) |
| 258 | { |
| 259 | return -EINVAL; |
| 260 | } |
| 261 | |
| 262 | static inline int kfd_criu_resume_svm(struct kfd_process *p) |
| 263 | { |
| 264 | return 0; |
| 265 | } |
| 266 | |
| 267 | static inline void svm_range_set_max_pages(struct amdgpu_device *adev) |
| 268 | { |
| 269 | } |
| 270 | |
| 271 | #define KFD_IS_SVM_API_SUPPORTED(dev) false |
| 272 | |
| 273 | #endif /* IS_ENABLED(CONFIG_HSA_AMD_SVM) */ |
| 274 | |
| 275 | #endif /* KFD_SVM_H_ */ |
| 276 | |