| 1 | /* SPDX-License-Identifier: MIT */ |
| 2 | /* |
| 3 | * Copyright © 2020 Intel Corporation |
| 4 | */ |
| 5 | |
| 6 | #ifndef _XE_MIGRATE_ |
| 7 | #define _XE_MIGRATE_ |
| 8 | |
| 9 | #include <linux/types.h> |
| 10 | |
| 11 | struct dma_fence; |
| 12 | struct drm_pagemap_addr; |
| 13 | struct iosys_map; |
| 14 | struct ttm_resource; |
| 15 | |
| 16 | struct xe_bo; |
| 17 | struct xe_gt; |
| 18 | struct xe_tlb_inval_job; |
| 19 | struct xe_exec_queue; |
| 20 | struct xe_migrate; |
| 21 | struct xe_migrate_pt_update; |
| 22 | struct xe_sync_entry; |
| 23 | struct xe_pt; |
| 24 | struct xe_tile; |
| 25 | struct xe_vm; |
| 26 | struct xe_vm_pgtable_update; |
| 27 | struct xe_vma; |
| 28 | |
| 29 | enum xe_sriov_vf_ccs_rw_ctxs; |
| 30 | |
| 31 | enum xe_migrate_copy_dir { |
| 32 | XE_MIGRATE_COPY_TO_VRAM, |
| 33 | XE_MIGRATE_COPY_TO_SRAM, |
| 34 | }; |
| 35 | |
| 36 | /** |
| 37 | * struct xe_migrate_pt_update_ops - Callbacks for the |
| 38 | * xe_migrate_update_pgtables() function. |
| 39 | */ |
| 40 | struct xe_migrate_pt_update_ops { |
| 41 | /** |
| 42 | * @populate: Populate a command buffer or page-table with ptes. |
| 43 | * @pt_update: Embeddable callback argument. |
| 44 | * @tile: The tile for the current operation. |
| 45 | * @map: struct iosys_map into the memory to be populated. |
| 46 | * @pos: If @map is NULL, map into the memory to be populated. |
| 47 | * @ofs: qword offset into @map, unused if @map is NULL. |
| 48 | * @num_qwords: Number of qwords to write. |
| 49 | * @update: Information about the PTEs to be inserted. |
| 50 | * |
| 51 | * This interface is intended to be used as a callback into the |
| 52 | * page-table system to populate command buffers or shared |
| 53 | * page-tables with PTEs. |
| 54 | */ |
| 55 | void (*populate)(struct xe_migrate_pt_update *pt_update, |
| 56 | struct xe_tile *tile, struct iosys_map *map, |
| 57 | void *pos, u32 ofs, u32 num_qwords, |
| 58 | const struct xe_vm_pgtable_update *update); |
| 59 | /** |
| 60 | * @clear: Clear a command buffer or page-table with ptes. |
| 61 | * @pt_update: Embeddable callback argument. |
| 62 | * @tile: The tile for the current operation. |
| 63 | * @map: struct iosys_map into the memory to be populated. |
| 64 | * @pos: If @map is NULL, map into the memory to be populated. |
| 65 | * @ofs: qword offset into @map, unused if @map is NULL. |
| 66 | * @num_qwords: Number of qwords to write. |
| 67 | * @update: Information about the PTEs to be inserted. |
| 68 | * |
| 69 | * This interface is intended to be used as a callback into the |
| 70 | * page-table system to populate command buffers or shared |
| 71 | * page-tables with PTEs. |
| 72 | */ |
| 73 | void (*clear)(struct xe_migrate_pt_update *pt_update, |
| 74 | struct xe_tile *tile, struct iosys_map *map, |
| 75 | void *pos, u32 ofs, u32 num_qwords, |
| 76 | const struct xe_vm_pgtable_update *update); |
| 77 | |
| 78 | /** |
| 79 | * @pre_commit: Callback to be called just before arming the |
| 80 | * sched_job. |
| 81 | * @pt_update: Pointer to embeddable callback argument. |
| 82 | * |
| 83 | * Return: 0 on success, negative error code on error. |
| 84 | */ |
| 85 | int (*pre_commit)(struct xe_migrate_pt_update *pt_update); |
| 86 | }; |
| 87 | |
| 88 | /** |
| 89 | * struct xe_migrate_pt_update - Argument to the |
| 90 | * struct xe_migrate_pt_update_ops callbacks. |
| 91 | * |
| 92 | * Intended to be subclassed to support additional arguments if necessary. |
| 93 | */ |
| 94 | struct xe_migrate_pt_update { |
| 95 | /** @ops: Pointer to the struct xe_migrate_pt_update_ops callbacks */ |
| 96 | const struct xe_migrate_pt_update_ops *ops; |
| 97 | /** @vops: VMA operations */ |
| 98 | struct xe_vma_ops *vops; |
| 99 | /** @job: The job if a GPU page-table update. NULL otherwise */ |
| 100 | struct xe_sched_job *job; |
| 101 | /** |
| 102 | * @ijob: The TLB invalidation job for primary GT. NULL otherwise |
| 103 | */ |
| 104 | struct xe_tlb_inval_job *ijob; |
| 105 | /** |
| 106 | * @mjob: The TLB invalidation job for media GT. NULL otherwise |
| 107 | */ |
| 108 | struct xe_tlb_inval_job *mjob; |
| 109 | /** @tile_id: Tile ID of the update */ |
| 110 | u8 tile_id; |
| 111 | }; |
| 112 | |
| 113 | struct xe_migrate *xe_migrate_alloc(struct xe_tile *tile); |
| 114 | int xe_migrate_init(struct xe_migrate *m); |
| 115 | |
| 116 | struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m, |
| 117 | unsigned long npages, |
| 118 | struct drm_pagemap_addr *src_addr, |
| 119 | u64 dst_addr, |
| 120 | struct dma_fence *deps); |
| 121 | |
| 122 | struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m, |
| 123 | unsigned long npages, |
| 124 | u64 src_addr, |
| 125 | struct drm_pagemap_addr *dst_addr, |
| 126 | struct dma_fence *deps); |
| 127 | |
| 128 | struct dma_fence *xe_migrate_copy(struct xe_migrate *m, |
| 129 | struct xe_bo *src_bo, |
| 130 | struct xe_bo *dst_bo, |
| 131 | struct ttm_resource *src, |
| 132 | struct ttm_resource *dst, |
| 133 | bool copy_only_ccs); |
| 134 | |
| 135 | int xe_migrate_ccs_rw_copy(struct xe_tile *tile, struct xe_exec_queue *q, |
| 136 | struct xe_bo *src_bo, |
| 137 | enum xe_sriov_vf_ccs_rw_ctxs read_write); |
| 138 | |
| 139 | struct xe_lrc *xe_migrate_lrc(struct xe_migrate *migrate); |
| 140 | struct xe_exec_queue *xe_migrate_exec_queue(struct xe_migrate *migrate); |
| 141 | struct dma_fence *xe_migrate_vram_copy_chunk(struct xe_bo *vram_bo, u64 vram_offset, |
| 142 | struct xe_bo *sysmem_bo, u64 sysmem_offset, |
| 143 | u64 size, enum xe_migrate_copy_dir dir); |
| 144 | int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo, |
| 145 | unsigned long offset, void *buf, int len, |
| 146 | int write); |
| 147 | |
| 148 | #define XE_MIGRATE_CLEAR_FLAG_BO_DATA BIT(0) |
| 149 | #define XE_MIGRATE_CLEAR_FLAG_CCS_DATA BIT(1) |
| 150 | #define XE_MIGRATE_CLEAR_FLAG_FULL (XE_MIGRATE_CLEAR_FLAG_BO_DATA | \ |
| 151 | XE_MIGRATE_CLEAR_FLAG_CCS_DATA) |
| 152 | struct dma_fence *xe_migrate_clear(struct xe_migrate *m, |
| 153 | struct xe_bo *bo, |
| 154 | struct ttm_resource *dst, |
| 155 | u32 clear_flags); |
| 156 | |
| 157 | struct xe_vm *xe_migrate_get_vm(struct xe_migrate *m); |
| 158 | |
| 159 | struct dma_fence * |
| 160 | xe_migrate_update_pgtables(struct xe_migrate *m, |
| 161 | struct xe_migrate_pt_update *pt_update); |
| 162 | |
| 163 | void xe_migrate_wait(struct xe_migrate *m); |
| 164 | |
| 165 | #if IS_ENABLED(CONFIG_PROVE_LOCKING) |
| 166 | void xe_migrate_job_lock_assert(struct xe_exec_queue *q); |
| 167 | #else |
| 168 | static inline void xe_migrate_job_lock_assert(struct xe_exec_queue *q) |
| 169 | { |
| 170 | } |
| 171 | #endif |
| 172 | |
| 173 | void xe_migrate_job_lock(struct xe_migrate *m, struct xe_exec_queue *q); |
| 174 | void xe_migrate_job_unlock(struct xe_migrate *m, struct xe_exec_queue *q); |
| 175 | |
| 176 | #endif |
| 177 | |