| 1 | /* |
| 2 | * Copyright 2018 Advanced Micro Devices, Inc. |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice shall be included in |
| 12 | * all copies or substantial portions of the Software. |
| 13 | * |
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 20 | * OTHER DEALINGS IN THE SOFTWARE. |
| 21 | * |
| 22 | */ |
| 23 | |
| 24 | #ifndef __AMDGPU_SDMA_H__ |
| 25 | #define __AMDGPU_SDMA_H__ |
| 26 | #include "amdgpu_ras.h" |
| 27 | |
| 28 | /* max number of IP instances */ |
| 29 | #define AMDGPU_MAX_SDMA_INSTANCES 16 |
| 30 | |
| 31 | enum amdgpu_sdma_irq { |
| 32 | AMDGPU_SDMA_IRQ_INSTANCE0 = 0, |
| 33 | AMDGPU_SDMA_IRQ_INSTANCE1, |
| 34 | AMDGPU_SDMA_IRQ_INSTANCE2, |
| 35 | AMDGPU_SDMA_IRQ_INSTANCE3, |
| 36 | AMDGPU_SDMA_IRQ_INSTANCE4, |
| 37 | AMDGPU_SDMA_IRQ_INSTANCE5, |
| 38 | AMDGPU_SDMA_IRQ_INSTANCE6, |
| 39 | AMDGPU_SDMA_IRQ_INSTANCE7, |
| 40 | AMDGPU_SDMA_IRQ_INSTANCE8, |
| 41 | AMDGPU_SDMA_IRQ_INSTANCE9, |
| 42 | AMDGPU_SDMA_IRQ_INSTANCE10, |
| 43 | AMDGPU_SDMA_IRQ_INSTANCE11, |
| 44 | AMDGPU_SDMA_IRQ_INSTANCE12, |
| 45 | AMDGPU_SDMA_IRQ_INSTANCE13, |
| 46 | AMDGPU_SDMA_IRQ_INSTANCE14, |
| 47 | AMDGPU_SDMA_IRQ_INSTANCE15, |
| 48 | AMDGPU_SDMA_IRQ_LAST |
| 49 | }; |
| 50 | |
| 51 | #define NUM_SDMA(x) hweight32(x) |
| 52 | |
| 53 | struct amdgpu_sdma_funcs { |
| 54 | int (*stop_kernel_queue)(struct amdgpu_ring *ring); |
| 55 | int (*start_kernel_queue)(struct amdgpu_ring *ring); |
| 56 | int (*soft_reset_kernel_queue)(struct amdgpu_device *adev, u32 instance_id); |
| 57 | }; |
| 58 | |
| 59 | struct amdgpu_sdma_instance { |
| 60 | /* SDMA firmware */ |
| 61 | const struct firmware *fw; |
| 62 | uint32_t fw_version; |
| 63 | uint32_t feature_version; |
| 64 | |
| 65 | struct amdgpu_ring ring; |
| 66 | struct amdgpu_ring page; |
| 67 | bool burst_nop; |
| 68 | uint32_t aid_id; |
| 69 | |
| 70 | struct amdgpu_bo *sdma_fw_obj; |
| 71 | uint64_t sdma_fw_gpu_addr; |
| 72 | uint32_t *sdma_fw_ptr; |
| 73 | struct mutex engine_reset_mutex; |
| 74 | /* track guilty state of GFX and PAGE queues */ |
| 75 | bool gfx_guilty; |
| 76 | bool page_guilty; |
| 77 | const struct amdgpu_sdma_funcs *funcs; |
| 78 | }; |
| 79 | |
| 80 | enum amdgpu_sdma_ras_memory_id { |
| 81 | AMDGPU_SDMA_MBANK_DATA_BUF0 = 1, |
| 82 | AMDGPU_SDMA_MBANK_DATA_BUF1 = 2, |
| 83 | AMDGPU_SDMA_MBANK_DATA_BUF2 = 3, |
| 84 | AMDGPU_SDMA_MBANK_DATA_BUF3 = 4, |
| 85 | AMDGPU_SDMA_MBANK_DATA_BUF4 = 5, |
| 86 | AMDGPU_SDMA_MBANK_DATA_BUF5 = 6, |
| 87 | AMDGPU_SDMA_MBANK_DATA_BUF6 = 7, |
| 88 | AMDGPU_SDMA_MBANK_DATA_BUF7 = 8, |
| 89 | AMDGPU_SDMA_MBANK_DATA_BUF8 = 9, |
| 90 | AMDGPU_SDMA_MBANK_DATA_BUF9 = 10, |
| 91 | AMDGPU_SDMA_MBANK_DATA_BUF10 = 11, |
| 92 | AMDGPU_SDMA_MBANK_DATA_BUF11 = 12, |
| 93 | AMDGPU_SDMA_MBANK_DATA_BUF12 = 13, |
| 94 | AMDGPU_SDMA_MBANK_DATA_BUF13 = 14, |
| 95 | AMDGPU_SDMA_MBANK_DATA_BUF14 = 15, |
| 96 | AMDGPU_SDMA_MBANK_DATA_BUF15 = 16, |
| 97 | AMDGPU_SDMA_UCODE_BUF = 17, |
| 98 | AMDGPU_SDMA_RB_CMD_BUF = 18, |
| 99 | AMDGPU_SDMA_IB_CMD_BUF = 19, |
| 100 | AMDGPU_SDMA_UTCL1_RD_FIFO = 20, |
| 101 | AMDGPU_SDMA_UTCL1_RDBST_FIFO = 21, |
| 102 | AMDGPU_SDMA_UTCL1_WR_FIFO = 22, |
| 103 | AMDGPU_SDMA_DATA_LUT_FIFO = 23, |
| 104 | AMDGPU_SDMA_SPLIT_DAT_BUF = 24, |
| 105 | AMDGPU_SDMA_MEMORY_BLOCK_LAST, |
| 106 | }; |
| 107 | |
| 108 | struct amdgpu_sdma_ras { |
| 109 | struct amdgpu_ras_block_object ras_block; |
| 110 | }; |
| 111 | |
| 112 | struct amdgpu_sdma { |
| 113 | struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES]; |
| 114 | struct amdgpu_irq_src trap_irq; |
| 115 | struct amdgpu_irq_src illegal_inst_irq; |
| 116 | struct amdgpu_irq_src fence_irq; |
| 117 | struct amdgpu_irq_src ecc_irq; |
| 118 | struct amdgpu_irq_src vm_hole_irq; |
| 119 | struct amdgpu_irq_src doorbell_invalid_irq; |
| 120 | struct amdgpu_irq_src pool_timeout_irq; |
| 121 | struct amdgpu_irq_src srbm_write_irq; |
| 122 | struct amdgpu_irq_src ctxt_empty_irq; |
| 123 | |
| 124 | int num_instances; |
| 125 | uint32_t sdma_mask; |
| 126 | int num_inst_per_aid; |
| 127 | uint32_t srbm_soft_reset; |
| 128 | bool has_page_queue; |
| 129 | struct ras_common_if *ras_if; |
| 130 | struct amdgpu_sdma_ras *ras; |
| 131 | uint32_t *ip_dump; |
| 132 | uint32_t supported_reset; |
| 133 | struct list_head reset_callback_list; |
| 134 | bool no_user_submission; |
| 135 | bool disable_uq; |
| 136 | }; |
| 137 | |
| 138 | /* |
| 139 | * Provided by hw blocks that can move/clear data. e.g., gfx or sdma |
| 140 | * But currently, we use sdma to move data. |
| 141 | */ |
| 142 | struct amdgpu_buffer_funcs { |
| 143 | /* maximum bytes in a single operation */ |
| 144 | uint32_t copy_max_bytes; |
| 145 | |
| 146 | /* number of dw to reserve per operation */ |
| 147 | unsigned copy_num_dw; |
| 148 | |
| 149 | /* used for buffer migration */ |
| 150 | void (*emit_copy_buffer)(struct amdgpu_ib *ib, |
| 151 | /* src addr in bytes */ |
| 152 | uint64_t src_offset, |
| 153 | /* dst addr in bytes */ |
| 154 | uint64_t dst_offset, |
| 155 | /* number of byte to transfer */ |
| 156 | uint32_t byte_count, |
| 157 | uint32_t copy_flags); |
| 158 | |
| 159 | /* maximum bytes in a single operation */ |
| 160 | uint32_t fill_max_bytes; |
| 161 | |
| 162 | /* number of dw to reserve per operation */ |
| 163 | unsigned fill_num_dw; |
| 164 | |
| 165 | /* used for buffer clearing */ |
| 166 | void (*emit_fill_buffer)(struct amdgpu_ib *ib, |
| 167 | /* value to write to memory */ |
| 168 | uint32_t src_data, |
| 169 | /* dst addr in bytes */ |
| 170 | uint64_t dst_offset, |
| 171 | /* number of byte to fill */ |
| 172 | uint32_t byte_count); |
| 173 | }; |
| 174 | |
| 175 | int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id, |
| 176 | bool caller_handles_kernel_queues); |
| 177 | |
| 178 | #define amdgpu_emit_copy_buffer(adev, ib, s, d, b, t) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b), (t)) |
| 179 | #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b)) |
| 180 | |
| 181 | struct amdgpu_sdma_instance * |
| 182 | amdgpu_sdma_get_instance_from_ring(struct amdgpu_ring *ring); |
| 183 | int amdgpu_sdma_get_index_from_ring(struct amdgpu_ring *ring, uint32_t *index); |
| 184 | uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring, unsigned vmid); |
| 185 | int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev, |
| 186 | struct ras_common_if *ras_block); |
| 187 | int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev, |
| 188 | void *err_data, |
| 189 | struct amdgpu_iv_entry *entry); |
| 190 | int amdgpu_sdma_process_ecc_irq(struct amdgpu_device *adev, |
| 191 | struct amdgpu_irq_src *source, |
| 192 | struct amdgpu_iv_entry *entry); |
| 193 | int amdgpu_sdma_init_microcode(struct amdgpu_device *adev, u32 instance, |
| 194 | bool duplicate); |
| 195 | void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev, |
| 196 | bool duplicate); |
| 197 | int amdgpu_sdma_ras_sw_init(struct amdgpu_device *adev); |
| 198 | void amdgpu_debugfs_sdma_sched_mask_init(struct amdgpu_device *adev); |
| 199 | int amdgpu_sdma_sysfs_reset_mask_init(struct amdgpu_device *adev); |
| 200 | void amdgpu_sdma_sysfs_reset_mask_fini(struct amdgpu_device *adev); |
| 201 | bool amdgpu_sdma_is_shared_inv_eng(struct amdgpu_device *adev, struct amdgpu_ring *ring); |
| 202 | struct amdgpu_ring *amdgpu_sdma_get_shared_ring(struct amdgpu_device *adev, |
| 203 | struct amdgpu_ring *ring); |
| 204 | #endif |
| 205 | |