| 1 | /* |
| 2 | * Copyright 2015 Advanced Micro Devices, Inc. |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice shall be included in |
| 12 | * all copies or substantial portions of the Software. |
| 13 | * |
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 20 | * OTHER DEALINGS IN THE SOFTWARE. |
| 21 | * |
| 22 | */ |
| 23 | |
| 24 | #include <linux/pci.h> |
| 25 | |
| 26 | #include "amdgpu.h" |
| 27 | #include "amdgpu_ih.h" |
| 28 | #include "sid.h" |
| 29 | #include "si_ih.h" |
| 30 | |
| 31 | #include "oss/oss_1_0_d.h" |
| 32 | #include "oss/oss_1_0_sh_mask.h" |
| 33 | |
| 34 | static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev); |
| 35 | |
| 36 | static void si_ih_enable_interrupts(struct amdgpu_device *adev) |
| 37 | { |
| 38 | u32 ih_cntl = RREG32(IH_CNTL); |
| 39 | u32 ih_rb_cntl = RREG32(IH_RB_CNTL); |
| 40 | |
| 41 | ih_cntl |= ENABLE_INTR; |
| 42 | ih_rb_cntl |= IH_RB_ENABLE; |
| 43 | WREG32(IH_CNTL, ih_cntl); |
| 44 | WREG32(IH_RB_CNTL, ih_rb_cntl); |
| 45 | adev->irq.ih.enabled = true; |
| 46 | } |
| 47 | |
| 48 | static void si_ih_disable_interrupts(struct amdgpu_device *adev) |
| 49 | { |
| 50 | u32 ih_rb_cntl = RREG32(IH_RB_CNTL); |
| 51 | u32 ih_cntl = RREG32(IH_CNTL); |
| 52 | |
| 53 | ih_rb_cntl &= ~IH_RB_ENABLE; |
| 54 | ih_cntl &= ~ENABLE_INTR; |
| 55 | WREG32(IH_RB_CNTL, ih_rb_cntl); |
| 56 | WREG32(IH_CNTL, ih_cntl); |
| 57 | WREG32(IH_RB_RPTR, 0); |
| 58 | WREG32(IH_RB_WPTR, 0); |
| 59 | adev->irq.ih.enabled = false; |
| 60 | adev->irq.ih.rptr = 0; |
| 61 | } |
| 62 | |
| 63 | static int si_ih_irq_init(struct amdgpu_device *adev) |
| 64 | { |
| 65 | struct amdgpu_ih_ring *ih = &adev->irq.ih; |
| 66 | int rb_bufsz; |
| 67 | u32 interrupt_cntl, ih_cntl, ih_rb_cntl; |
| 68 | |
| 69 | si_ih_disable_interrupts(adev); |
| 70 | /* set dummy read address to dummy page address */ |
| 71 | WREG32(INTERRUPT_CNTL2, adev->dummy_page_addr >> 8); |
| 72 | interrupt_cntl = RREG32(INTERRUPT_CNTL); |
| 73 | interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE; |
| 74 | interrupt_cntl &= ~IH_REQ_NONSNOOP_EN; |
| 75 | WREG32(INTERRUPT_CNTL, interrupt_cntl); |
| 76 | |
| 77 | WREG32(IH_RB_BASE, adev->irq.ih.gpu_addr >> 8); |
| 78 | rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4); |
| 79 | |
| 80 | ih_rb_cntl = IH_WPTR_OVERFLOW_ENABLE | |
| 81 | IH_WPTR_OVERFLOW_CLEAR | |
| 82 | (rb_bufsz << 1) | |
| 83 | IH_WPTR_WRITEBACK_ENABLE; |
| 84 | |
| 85 | WREG32(IH_RB_WPTR_ADDR_LO, lower_32_bits(ih->wptr_addr)); |
| 86 | WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(ih->wptr_addr) & 0xFF); |
| 87 | WREG32(IH_RB_CNTL, ih_rb_cntl); |
| 88 | WREG32(IH_RB_RPTR, 0); |
| 89 | WREG32(IH_RB_WPTR, 0); |
| 90 | |
| 91 | ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0); |
| 92 | if (adev->irq.msi_enabled) |
| 93 | ih_cntl |= RPTR_REARM; |
| 94 | WREG32(IH_CNTL, ih_cntl); |
| 95 | |
| 96 | pci_set_master(dev: adev->pdev); |
| 97 | si_ih_enable_interrupts(adev); |
| 98 | |
| 99 | if (adev->irq.ih_soft.ring_size) |
| 100 | adev->irq.ih_soft.enabled = true; |
| 101 | |
| 102 | return 0; |
| 103 | } |
| 104 | |
| 105 | static void si_ih_irq_disable(struct amdgpu_device *adev) |
| 106 | { |
| 107 | si_ih_disable_interrupts(adev); |
| 108 | mdelay(1); |
| 109 | } |
| 110 | |
| 111 | static u32 si_ih_get_wptr(struct amdgpu_device *adev, |
| 112 | struct amdgpu_ih_ring *ih) |
| 113 | { |
| 114 | u32 wptr, tmp; |
| 115 | |
| 116 | wptr = le32_to_cpu(*ih->wptr_cpu); |
| 117 | |
| 118 | if (ih == &adev->irq.ih_soft) |
| 119 | goto out; |
| 120 | |
| 121 | if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) { |
| 122 | wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK; |
| 123 | dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n" , |
| 124 | wptr, ih->rptr, (wptr + 16) & ih->ptr_mask); |
| 125 | ih->rptr = (wptr + 16) & ih->ptr_mask; |
| 126 | tmp = RREG32(IH_RB_CNTL); |
| 127 | tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK; |
| 128 | WREG32(IH_RB_CNTL, tmp); |
| 129 | |
| 130 | /* Unset the CLEAR_OVERFLOW bit immediately so new overflows |
| 131 | * can be detected. |
| 132 | */ |
| 133 | tmp &= ~IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK; |
| 134 | WREG32(IH_RB_CNTL, tmp); |
| 135 | } |
| 136 | |
| 137 | out: |
| 138 | return (wptr & ih->ptr_mask); |
| 139 | } |
| 140 | |
| 141 | static void si_ih_decode_iv(struct amdgpu_device *adev, |
| 142 | struct amdgpu_ih_ring *ih, |
| 143 | struct amdgpu_iv_entry *entry) |
| 144 | { |
| 145 | u32 ring_index = ih->rptr >> 2; |
| 146 | uint32_t dw[4]; |
| 147 | |
| 148 | dw[0] = le32_to_cpu(ih->ring[ring_index + 0]); |
| 149 | dw[1] = le32_to_cpu(ih->ring[ring_index + 1]); |
| 150 | dw[2] = le32_to_cpu(ih->ring[ring_index + 2]); |
| 151 | dw[3] = le32_to_cpu(ih->ring[ring_index + 3]); |
| 152 | |
| 153 | entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY; |
| 154 | entry->src_id = dw[0] & 0xff; |
| 155 | entry->src_data[0] = dw[1] & 0xfffffff; |
| 156 | entry->ring_id = dw[2] & 0xff; |
| 157 | entry->vmid = (dw[2] >> 8) & 0xff; |
| 158 | |
| 159 | ih->rptr += 16; |
| 160 | } |
| 161 | |
| 162 | static void si_ih_set_rptr(struct amdgpu_device *adev, |
| 163 | struct amdgpu_ih_ring *ih) |
| 164 | { |
| 165 | WREG32(IH_RB_RPTR, ih->rptr); |
| 166 | } |
| 167 | |
| 168 | static int si_ih_early_init(struct amdgpu_ip_block *ip_block) |
| 169 | { |
| 170 | struct amdgpu_device *adev = ip_block->adev; |
| 171 | |
| 172 | si_ih_set_interrupt_funcs(adev); |
| 173 | |
| 174 | return 0; |
| 175 | } |
| 176 | |
| 177 | static int si_ih_sw_init(struct amdgpu_ip_block *ip_block) |
| 178 | { |
| 179 | int r; |
| 180 | struct amdgpu_device *adev = ip_block->adev; |
| 181 | |
| 182 | r = amdgpu_ih_ring_init(adev, ih: &adev->irq.ih, ring_size: 64 * 1024, use_bus_addr: false); |
| 183 | if (r) |
| 184 | return r; |
| 185 | |
| 186 | r = amdgpu_ih_ring_init(adev, ih: &adev->irq.ih_soft, IH_SW_RING_SIZE, use_bus_addr: true); |
| 187 | if (r) |
| 188 | return r; |
| 189 | |
| 190 | return amdgpu_irq_init(adev); |
| 191 | } |
| 192 | |
| 193 | static int si_ih_sw_fini(struct amdgpu_ip_block *ip_block) |
| 194 | { |
| 195 | struct amdgpu_device *adev = ip_block->adev; |
| 196 | |
| 197 | amdgpu_irq_fini_sw(adev); |
| 198 | |
| 199 | return 0; |
| 200 | } |
| 201 | |
| 202 | static int si_ih_hw_init(struct amdgpu_ip_block *ip_block) |
| 203 | { |
| 204 | struct amdgpu_device *adev = ip_block->adev; |
| 205 | |
| 206 | return si_ih_irq_init(adev); |
| 207 | } |
| 208 | |
| 209 | static int si_ih_hw_fini(struct amdgpu_ip_block *ip_block) |
| 210 | { |
| 211 | si_ih_irq_disable(adev: ip_block->adev); |
| 212 | |
| 213 | return 0; |
| 214 | } |
| 215 | |
| 216 | static int si_ih_suspend(struct amdgpu_ip_block *ip_block) |
| 217 | { |
| 218 | return si_ih_hw_fini(ip_block); |
| 219 | } |
| 220 | |
| 221 | static int si_ih_resume(struct amdgpu_ip_block *ip_block) |
| 222 | { |
| 223 | return si_ih_hw_init(ip_block); |
| 224 | } |
| 225 | |
| 226 | static bool si_ih_is_idle(struct amdgpu_ip_block *ip_block) |
| 227 | { |
| 228 | struct amdgpu_device *adev = ip_block->adev; |
| 229 | u32 tmp = RREG32(mmSRBM_STATUS); |
| 230 | |
| 231 | if (tmp & SRBM_STATUS__IH_BUSY_MASK) |
| 232 | return false; |
| 233 | |
| 234 | return true; |
| 235 | } |
| 236 | |
| 237 | static int si_ih_wait_for_idle(struct amdgpu_ip_block *ip_block) |
| 238 | { |
| 239 | unsigned i; |
| 240 | struct amdgpu_device *adev = ip_block->adev; |
| 241 | |
| 242 | for (i = 0; i < adev->usec_timeout; i++) { |
| 243 | if (si_ih_is_idle(ip_block)) |
| 244 | return 0; |
| 245 | udelay(usec: 1); |
| 246 | } |
| 247 | return -ETIMEDOUT; |
| 248 | } |
| 249 | |
| 250 | static int si_ih_soft_reset(struct amdgpu_ip_block *ip_block) |
| 251 | { |
| 252 | struct amdgpu_device *adev = ip_block->adev; |
| 253 | |
| 254 | u32 srbm_soft_reset = 0; |
| 255 | u32 tmp = RREG32(mmSRBM_STATUS); |
| 256 | |
| 257 | if (tmp & SRBM_STATUS__IH_BUSY_MASK) |
| 258 | srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK; |
| 259 | |
| 260 | if (srbm_soft_reset) { |
| 261 | tmp = RREG32(mmSRBM_SOFT_RESET); |
| 262 | tmp |= srbm_soft_reset; |
| 263 | dev_info(adev->dev, "mmSRBM_SOFT_RESET=0x%08X\n" , tmp); |
| 264 | WREG32(mmSRBM_SOFT_RESET, tmp); |
| 265 | tmp = RREG32(mmSRBM_SOFT_RESET); |
| 266 | |
| 267 | udelay(usec: 50); |
| 268 | |
| 269 | tmp &= ~srbm_soft_reset; |
| 270 | WREG32(mmSRBM_SOFT_RESET, tmp); |
| 271 | tmp = RREG32(mmSRBM_SOFT_RESET); |
| 272 | |
| 273 | udelay(usec: 50); |
| 274 | } |
| 275 | |
| 276 | return 0; |
| 277 | } |
| 278 | |
| 279 | static int si_ih_set_clockgating_state(struct amdgpu_ip_block *ip_block, |
| 280 | enum amd_clockgating_state state) |
| 281 | { |
| 282 | return 0; |
| 283 | } |
| 284 | |
| 285 | static int si_ih_set_powergating_state(struct amdgpu_ip_block *ip_block, |
| 286 | enum amd_powergating_state state) |
| 287 | { |
| 288 | return 0; |
| 289 | } |
| 290 | |
| 291 | static const struct amd_ip_funcs si_ih_ip_funcs = { |
| 292 | .name = "si_ih" , |
| 293 | .early_init = si_ih_early_init, |
| 294 | .sw_init = si_ih_sw_init, |
| 295 | .sw_fini = si_ih_sw_fini, |
| 296 | .hw_init = si_ih_hw_init, |
| 297 | .hw_fini = si_ih_hw_fini, |
| 298 | .suspend = si_ih_suspend, |
| 299 | .resume = si_ih_resume, |
| 300 | .is_idle = si_ih_is_idle, |
| 301 | .wait_for_idle = si_ih_wait_for_idle, |
| 302 | .soft_reset = si_ih_soft_reset, |
| 303 | .set_clockgating_state = si_ih_set_clockgating_state, |
| 304 | .set_powergating_state = si_ih_set_powergating_state, |
| 305 | }; |
| 306 | |
| 307 | static const struct amdgpu_ih_funcs si_ih_funcs = { |
| 308 | .get_wptr = si_ih_get_wptr, |
| 309 | .decode_iv = si_ih_decode_iv, |
| 310 | .set_rptr = si_ih_set_rptr |
| 311 | }; |
| 312 | |
| 313 | static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev) |
| 314 | { |
| 315 | adev->irq.ih_funcs = &si_ih_funcs; |
| 316 | } |
| 317 | |
| 318 | const struct amdgpu_ip_block_version si_ih_ip_block = |
| 319 | { |
| 320 | .type = AMD_IP_BLOCK_TYPE_IH, |
| 321 | .major = 1, |
| 322 | .minor = 0, |
| 323 | .rev = 0, |
| 324 | .funcs = &si_ih_ip_funcs, |
| 325 | }; |
| 326 | |