| 1 | /* SPDX-License-Identifier: MIT */ |
| 2 | /* |
| 3 | * Copyright © 2024 Intel Corporation |
| 4 | */ |
| 5 | |
| 6 | #ifndef _XE_SVM_H_ |
| 7 | #define _XE_SVM_H_ |
| 8 | |
| 9 | struct xe_device; |
| 10 | |
| 11 | /** |
| 12 | * xe_svm_devm_owner() - Return the owner of device private memory |
| 13 | * @xe: The xe device. |
| 14 | * |
| 15 | * Return: The owner of this device's device private memory to use in |
| 16 | * hmm_range_fault()- |
| 17 | */ |
| 18 | static inline void *xe_svm_devm_owner(struct xe_device *xe) |
| 19 | { |
| 20 | return xe; |
| 21 | } |
| 22 | |
| 23 | #if IS_ENABLED(CONFIG_DRM_XE_GPUSVM) |
| 24 | |
| 25 | #include <drm/drm_pagemap.h> |
| 26 | #include <drm/drm_gpusvm.h> |
| 27 | |
| 28 | #define XE_INTERCONNECT_VRAM DRM_INTERCONNECT_DRIVER |
| 29 | |
| 30 | struct xe_bo; |
| 31 | struct xe_gt; |
| 32 | struct xe_tile; |
| 33 | struct xe_vm; |
| 34 | struct xe_vma; |
| 35 | struct xe_vram_region; |
| 36 | |
| 37 | /** struct xe_svm_range - SVM range */ |
| 38 | struct xe_svm_range { |
| 39 | /** @base: base drm_gpusvm_range */ |
| 40 | struct drm_gpusvm_range base; |
| 41 | /** |
| 42 | * @garbage_collector_link: Link into VM's garbage collect SVM range |
| 43 | * list. Protected by VM's garbage collect lock. |
| 44 | */ |
| 45 | struct list_head garbage_collector_link; |
| 46 | /** |
| 47 | * @tile_present: Tile mask of binding is present for this range. |
| 48 | * Protected by GPU SVM notifier lock. |
| 49 | */ |
| 50 | u8 tile_present; |
| 51 | /** |
| 52 | * @tile_invalidated: Tile mask of binding is invalidated for this |
| 53 | * range. Protected by GPU SVM notifier lock. |
| 54 | */ |
| 55 | u8 tile_invalidated; |
| 56 | }; |
| 57 | |
| 58 | /** |
| 59 | * xe_svm_range_pages_valid() - SVM range pages valid |
| 60 | * @range: SVM range |
| 61 | * |
| 62 | * Return: True if SVM range pages are valid, False otherwise |
| 63 | */ |
| 64 | static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range) |
| 65 | { |
| 66 | return drm_gpusvm_range_pages_valid(gpusvm: range->base.gpusvm, range: &range->base); |
| 67 | } |
| 68 | |
| 69 | int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr); |
| 70 | |
| 71 | int xe_svm_init(struct xe_vm *vm); |
| 72 | |
| 73 | void xe_svm_fini(struct xe_vm *vm); |
| 74 | |
| 75 | void xe_svm_close(struct xe_vm *vm); |
| 76 | |
| 77 | int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma, |
| 78 | struct xe_gt *gt, u64 fault_addr, |
| 79 | bool atomic); |
| 80 | |
| 81 | bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end); |
| 82 | |
| 83 | int xe_svm_bo_evict(struct xe_bo *bo); |
| 84 | |
| 85 | void xe_svm_range_debug(struct xe_svm_range *range, const char *operation); |
| 86 | |
| 87 | int xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range, |
| 88 | const struct drm_gpusvm_ctx *ctx); |
| 89 | |
| 90 | struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr, |
| 91 | struct xe_vma *vma, struct drm_gpusvm_ctx *ctx); |
| 92 | |
| 93 | int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range, |
| 94 | struct drm_gpusvm_ctx *ctx); |
| 95 | |
| 96 | bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma, |
| 97 | bool preferred_region_is_vram); |
| 98 | |
| 99 | void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range); |
| 100 | |
| 101 | bool xe_svm_range_validate(struct xe_vm *vm, |
| 102 | struct xe_svm_range *range, |
| 103 | u8 tile_mask, bool devmem_preferred); |
| 104 | |
| 105 | u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vma); |
| 106 | |
| 107 | void xe_svm_unmap_address_range(struct xe_vm *vm, u64 start, u64 end); |
| 108 | |
| 109 | u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end); |
| 110 | |
| 111 | struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile); |
| 112 | |
| 113 | /** |
| 114 | * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping |
| 115 | * @range: SVM range |
| 116 | * |
| 117 | * Return: True if SVM range has a DMA mapping, False otherwise |
| 118 | */ |
| 119 | static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range) |
| 120 | { |
| 121 | lockdep_assert_held(&range->base.gpusvm->notifier_lock); |
| 122 | return range->base.pages.flags.has_dma_mapping; |
| 123 | } |
| 124 | |
| 125 | /** |
| 126 | * to_xe_range - Convert a drm_gpusvm_range pointer to a xe_svm_range |
| 127 | * @r: Pointer to the drm_gpusvm_range structure |
| 128 | * |
| 129 | * This function takes a pointer to a drm_gpusvm_range structure and |
| 130 | * converts it to a pointer to the containing xe_svm_range structure. |
| 131 | * |
| 132 | * Return: Pointer to the xe_svm_range structure |
| 133 | */ |
| 134 | static inline struct xe_svm_range *to_xe_range(struct drm_gpusvm_range *r) |
| 135 | { |
| 136 | return container_of(r, struct xe_svm_range, base); |
| 137 | } |
| 138 | |
| 139 | /** |
| 140 | * xe_svm_range_start() - SVM range start address |
| 141 | * @range: SVM range |
| 142 | * |
| 143 | * Return: start address of range. |
| 144 | */ |
| 145 | static inline unsigned long xe_svm_range_start(struct xe_svm_range *range) |
| 146 | { |
| 147 | return drm_gpusvm_range_start(range: &range->base); |
| 148 | } |
| 149 | |
| 150 | /** |
| 151 | * xe_svm_range_end() - SVM range end address |
| 152 | * @range: SVM range |
| 153 | * |
| 154 | * Return: end address of range. |
| 155 | */ |
| 156 | static inline unsigned long xe_svm_range_end(struct xe_svm_range *range) |
| 157 | { |
| 158 | return drm_gpusvm_range_end(range: &range->base); |
| 159 | } |
| 160 | |
| 161 | /** |
| 162 | * xe_svm_range_size() - SVM range size |
| 163 | * @range: SVM range |
| 164 | * |
| 165 | * Return: Size of range. |
| 166 | */ |
| 167 | static inline unsigned long xe_svm_range_size(struct xe_svm_range *range) |
| 168 | { |
| 169 | return drm_gpusvm_range_size(range: &range->base); |
| 170 | } |
| 171 | |
| 172 | void xe_svm_flush(struct xe_vm *vm); |
| 173 | |
| 174 | #else |
| 175 | #include <linux/interval_tree.h> |
| 176 | #include "xe_vm.h" |
| 177 | |
| 178 | struct drm_pagemap_addr; |
| 179 | struct drm_gpusvm_ctx; |
| 180 | struct drm_gpusvm_range; |
| 181 | struct xe_bo; |
| 182 | struct xe_gt; |
| 183 | struct xe_vm; |
| 184 | struct xe_vma; |
| 185 | struct xe_tile; |
| 186 | struct xe_vram_region; |
| 187 | |
| 188 | #define XE_INTERCONNECT_VRAM 1 |
| 189 | |
| 190 | struct xe_svm_range { |
| 191 | struct { |
| 192 | struct interval_tree_node itree; |
| 193 | struct { |
| 194 | const struct drm_pagemap_addr *dma_addr; |
| 195 | } pages; |
| 196 | } base; |
| 197 | u32 tile_present; |
| 198 | u32 tile_invalidated; |
| 199 | }; |
| 200 | |
| 201 | static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range) |
| 202 | { |
| 203 | return false; |
| 204 | } |
| 205 | |
| 206 | static inline |
| 207 | int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr) |
| 208 | { |
| 209 | return 0; |
| 210 | } |
| 211 | |
| 212 | static inline |
| 213 | int xe_svm_init(struct xe_vm *vm) |
| 214 | { |
| 215 | #if IS_ENABLED(CONFIG_DRM_GPUSVM) |
| 216 | return drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM (simple)" , &vm->xe->drm, |
| 217 | NULL, 0, 0, 0, NULL, NULL, 0); |
| 218 | #else |
| 219 | return 0; |
| 220 | #endif |
| 221 | } |
| 222 | |
| 223 | static inline |
| 224 | void xe_svm_fini(struct xe_vm *vm) |
| 225 | { |
| 226 | #if IS_ENABLED(CONFIG_DRM_GPUSVM) |
| 227 | xe_assert(vm->xe, xe_vm_is_closed(vm)); |
| 228 | drm_gpusvm_fini(&vm->svm.gpusvm); |
| 229 | #endif |
| 230 | } |
| 231 | |
| 232 | static inline |
| 233 | void xe_svm_close(struct xe_vm *vm) |
| 234 | { |
| 235 | } |
| 236 | |
| 237 | static inline |
| 238 | int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma, |
| 239 | struct xe_gt *gt, u64 fault_addr, |
| 240 | bool atomic) |
| 241 | { |
| 242 | return 0; |
| 243 | } |
| 244 | |
| 245 | static inline |
| 246 | bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end) |
| 247 | { |
| 248 | return false; |
| 249 | } |
| 250 | |
| 251 | static inline |
| 252 | int xe_svm_bo_evict(struct xe_bo *bo) |
| 253 | { |
| 254 | return 0; |
| 255 | } |
| 256 | |
| 257 | static inline |
| 258 | void xe_svm_range_debug(struct xe_svm_range *range, const char *operation) |
| 259 | { |
| 260 | } |
| 261 | |
| 262 | static inline int |
| 263 | xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range, |
| 264 | const struct drm_gpusvm_ctx *ctx) |
| 265 | { |
| 266 | return -EOPNOTSUPP; |
| 267 | } |
| 268 | |
| 269 | static inline |
| 270 | struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr, |
| 271 | struct xe_vma *vma, struct drm_gpusvm_ctx *ctx) |
| 272 | { |
| 273 | return ERR_PTR(-EINVAL); |
| 274 | } |
| 275 | |
| 276 | static inline |
| 277 | int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range, |
| 278 | struct drm_gpusvm_ctx *ctx) |
| 279 | { |
| 280 | return -EINVAL; |
| 281 | } |
| 282 | |
| 283 | static inline struct xe_svm_range *to_xe_range(struct drm_gpusvm_range *r) |
| 284 | { |
| 285 | return NULL; |
| 286 | } |
| 287 | |
| 288 | static inline unsigned long xe_svm_range_start(struct xe_svm_range *range) |
| 289 | { |
| 290 | return 0; |
| 291 | } |
| 292 | |
| 293 | static inline unsigned long xe_svm_range_end(struct xe_svm_range *range) |
| 294 | { |
| 295 | return 0; |
| 296 | } |
| 297 | |
| 298 | static inline unsigned long xe_svm_range_size(struct xe_svm_range *range) |
| 299 | { |
| 300 | return 0; |
| 301 | } |
| 302 | |
| 303 | static inline |
| 304 | bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma, |
| 305 | u32 region) |
| 306 | { |
| 307 | return false; |
| 308 | } |
| 309 | |
| 310 | static inline |
| 311 | void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range) |
| 312 | { |
| 313 | } |
| 314 | |
| 315 | static inline |
| 316 | bool xe_svm_range_validate(struct xe_vm *vm, |
| 317 | struct xe_svm_range *range, |
| 318 | u8 tile_mask, bool devmem_preferred) |
| 319 | { |
| 320 | return false; |
| 321 | } |
| 322 | |
| 323 | static inline |
| 324 | u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vma) |
| 325 | { |
| 326 | return ULONG_MAX; |
| 327 | } |
| 328 | |
| 329 | static inline |
| 330 | void xe_svm_unmap_address_range(struct xe_vm *vm, u64 start, u64 end) |
| 331 | { |
| 332 | } |
| 333 | |
| 334 | static inline |
| 335 | u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end) |
| 336 | { |
| 337 | return 0; |
| 338 | } |
| 339 | |
| 340 | static inline |
| 341 | struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile) |
| 342 | { |
| 343 | return NULL; |
| 344 | } |
| 345 | |
| 346 | static inline void xe_svm_flush(struct xe_vm *vm) |
| 347 | { |
| 348 | } |
| 349 | #define xe_svm_range_has_dma_mapping(...) false |
| 350 | #endif /* CONFIG_DRM_XE_GPUSVM */ |
| 351 | |
| 352 | #if IS_ENABLED(CONFIG_DRM_GPUSVM) /* Need to support userptr without XE_GPUSVM */ |
| 353 | #define xe_svm_assert_in_notifier(vm__) \ |
| 354 | lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock) |
| 355 | |
| 356 | #define xe_svm_assert_held_read(vm__) \ |
| 357 | lockdep_assert_held_read(&(vm__)->svm.gpusvm.notifier_lock) |
| 358 | |
| 359 | #define xe_svm_notifier_lock(vm__) \ |
| 360 | drm_gpusvm_notifier_lock(&(vm__)->svm.gpusvm) |
| 361 | |
| 362 | #define xe_svm_notifier_lock_interruptible(vm__) \ |
| 363 | down_read_interruptible(&(vm__)->svm.gpusvm.notifier_lock) |
| 364 | |
| 365 | #define xe_svm_notifier_unlock(vm__) \ |
| 366 | drm_gpusvm_notifier_unlock(&(vm__)->svm.gpusvm) |
| 367 | |
| 368 | #else |
| 369 | #define xe_svm_assert_in_notifier(...) do {} while (0) |
| 370 | |
| 371 | static inline void xe_svm_assert_held_read(struct xe_vm *vm) |
| 372 | { |
| 373 | } |
| 374 | |
| 375 | static inline void xe_svm_notifier_lock(struct xe_vm *vm) |
| 376 | { |
| 377 | } |
| 378 | |
| 379 | static inline int xe_svm_notifier_lock_interruptible(struct xe_vm *vm) |
| 380 | { |
| 381 | return 0; |
| 382 | } |
| 383 | |
| 384 | static inline void xe_svm_notifier_unlock(struct xe_vm *vm) |
| 385 | { |
| 386 | } |
| 387 | #endif /* CONFIG_DRM_GPUSVM */ |
| 388 | |
| 389 | #endif |
| 390 | |