| 1 | /* |
| 2 | * Copyright 2017 Advanced Micro Devices, Inc. |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice shall be included in |
| 12 | * all copies or substantial portions of the Software. |
| 13 | * |
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 20 | * OTHER DEALINGS IN THE SOFTWARE. |
| 21 | * |
| 22 | */ |
| 23 | /* |
| 24 | * dc_helper.c |
| 25 | * |
| 26 | * Created on: Aug 30, 2016 |
| 27 | * Author: agrodzov |
| 28 | */ |
| 29 | |
| 30 | #include <linux/delay.h> |
| 31 | #include <linux/stdarg.h> |
| 32 | |
| 33 | #include "dm_services.h" |
| 34 | |
| 35 | #include "dc.h" |
| 36 | #include "dc_dmub_srv.h" |
| 37 | #include "reg_helper.h" |
| 38 | |
| 39 | #define DC_LOGGER \ |
| 40 | ctx->logger |
| 41 | |
| 42 | static inline void submit_dmub_read_modify_write( |
| 43 | struct dc_reg_helper_state *offload, |
| 44 | const struct dc_context *ctx) |
| 45 | { |
| 46 | struct dmub_rb_cmd_read_modify_write *cmd_buf = &offload->cmd_data.read_modify_write; |
| 47 | |
| 48 | offload->should_burst_write = |
| 49 | (offload->same_addr_count == (DMUB_READ_MODIFY_WRITE_SEQ__MAX - 1)); |
| 50 | cmd_buf->header.payload_bytes = |
| 51 | sizeof(struct dmub_cmd_read_modify_write_sequence) * offload->reg_seq_count; |
| 52 | |
| 53 | dc_wake_and_execute_dmub_cmd(ctx, cmd: &offload->cmd_data, wait_type: DM_DMUB_WAIT_TYPE_NO_WAIT); |
| 54 | |
| 55 | memset(cmd_buf, 0, sizeof(*cmd_buf)); |
| 56 | |
| 57 | offload->reg_seq_count = 0; |
| 58 | offload->same_addr_count = 0; |
| 59 | } |
| 60 | |
| 61 | static inline void submit_dmub_burst_write( |
| 62 | struct dc_reg_helper_state *offload, |
| 63 | const struct dc_context *ctx) |
| 64 | { |
| 65 | struct dmub_rb_cmd_burst_write *cmd_buf = &offload->cmd_data.burst_write; |
| 66 | |
| 67 | cmd_buf->header.payload_bytes = |
| 68 | sizeof(uint32_t) * offload->reg_seq_count; |
| 69 | |
| 70 | dc_wake_and_execute_dmub_cmd(ctx, cmd: &offload->cmd_data, wait_type: DM_DMUB_WAIT_TYPE_NO_WAIT); |
| 71 | |
| 72 | memset(cmd_buf, 0, sizeof(*cmd_buf)); |
| 73 | |
| 74 | offload->reg_seq_count = 0; |
| 75 | } |
| 76 | |
| 77 | static inline void submit_dmub_reg_wait( |
| 78 | struct dc_reg_helper_state *offload, |
| 79 | const struct dc_context *ctx) |
| 80 | { |
| 81 | struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait; |
| 82 | |
| 83 | dc_wake_and_execute_dmub_cmd(ctx, cmd: &offload->cmd_data, wait_type: DM_DMUB_WAIT_TYPE_NO_WAIT); |
| 84 | |
| 85 | memset(cmd_buf, 0, sizeof(*cmd_buf)); |
| 86 | offload->reg_seq_count = 0; |
| 87 | } |
| 88 | |
| 89 | struct dc_reg_value_masks { |
| 90 | uint32_t value; |
| 91 | uint32_t mask; |
| 92 | }; |
| 93 | |
| 94 | static inline void set_reg_field_value_masks( |
| 95 | struct dc_reg_value_masks *field_value_mask, |
| 96 | uint32_t value, |
| 97 | uint32_t mask, |
| 98 | uint8_t shift) |
| 99 | { |
| 100 | ASSERT(mask != 0); |
| 101 | |
| 102 | field_value_mask->value = (field_value_mask->value & ~mask) | (mask & (value << shift)); |
| 103 | field_value_mask->mask = field_value_mask->mask | mask; |
| 104 | } |
| 105 | |
| 106 | static void set_reg_field_values(struct dc_reg_value_masks *field_value_mask, |
| 107 | uint32_t addr, int n, |
| 108 | uint8_t shift1, uint32_t mask1, uint32_t field_value1, |
| 109 | va_list ap) |
| 110 | { |
| 111 | uint32_t shift, mask, field_value; |
| 112 | int i = 1; |
| 113 | |
| 114 | /* gather all bits value/mask getting updated in this register */ |
| 115 | set_reg_field_value_masks(field_value_mask, |
| 116 | value: field_value1, mask: mask1, shift: shift1); |
| 117 | |
| 118 | while (i < n) { |
| 119 | shift = va_arg(ap, uint32_t); |
| 120 | mask = va_arg(ap, uint32_t); |
| 121 | field_value = va_arg(ap, uint32_t); |
| 122 | |
| 123 | set_reg_field_value_masks(field_value_mask, |
| 124 | value: field_value, mask, shift); |
| 125 | i++; |
| 126 | } |
| 127 | } |
| 128 | |
| 129 | static void dmub_flush_buffer_execute( |
| 130 | struct dc_reg_helper_state *offload, |
| 131 | const struct dc_context *ctx) |
| 132 | { |
| 133 | submit_dmub_read_modify_write(offload, ctx); |
| 134 | } |
| 135 | |
| 136 | static void dmub_flush_burst_write_buffer_execute( |
| 137 | struct dc_reg_helper_state *offload, |
| 138 | const struct dc_context *ctx) |
| 139 | { |
| 140 | submit_dmub_burst_write(offload, ctx); |
| 141 | } |
| 142 | |
| 143 | static bool dmub_reg_value_burst_set_pack(const struct dc_context *ctx, uint32_t addr, |
| 144 | uint32_t reg_val) |
| 145 | { |
| 146 | struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload; |
| 147 | struct dmub_rb_cmd_burst_write *cmd_buf = &offload->cmd_data.burst_write; |
| 148 | |
| 149 | /* flush command if buffer is full */ |
| 150 | if (offload->reg_seq_count == DMUB_BURST_WRITE_VALUES__MAX) |
| 151 | dmub_flush_burst_write_buffer_execute(offload, ctx); |
| 152 | |
| 153 | if (offload->cmd_data.cmd_common.header.type == DMUB_CMD__REG_SEQ_BURST_WRITE && |
| 154 | addr != cmd_buf->addr) { |
| 155 | dmub_flush_burst_write_buffer_execute(offload, ctx); |
| 156 | return false; |
| 157 | } |
| 158 | |
| 159 | cmd_buf->header.type = DMUB_CMD__REG_SEQ_BURST_WRITE; |
| 160 | cmd_buf->header.sub_type = 0; |
| 161 | cmd_buf->addr = addr; |
| 162 | cmd_buf->write_values[offload->reg_seq_count] = reg_val; |
| 163 | offload->reg_seq_count++; |
| 164 | |
| 165 | return true; |
| 166 | } |
| 167 | |
| 168 | static uint32_t dmub_reg_value_pack(const struct dc_context *ctx, uint32_t addr, |
| 169 | struct dc_reg_value_masks *field_value_mask) |
| 170 | { |
| 171 | struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload; |
| 172 | struct dmub_rb_cmd_read_modify_write *cmd_buf = &offload->cmd_data.read_modify_write; |
| 173 | struct dmub_cmd_read_modify_write_sequence *seq; |
| 174 | |
| 175 | /* flush command if buffer is full */ |
| 176 | if (offload->cmd_data.cmd_common.header.type != DMUB_CMD__REG_SEQ_BURST_WRITE && |
| 177 | offload->reg_seq_count == DMUB_READ_MODIFY_WRITE_SEQ__MAX) |
| 178 | dmub_flush_buffer_execute(offload, ctx); |
| 179 | |
| 180 | if (offload->should_burst_write) { |
| 181 | if (dmub_reg_value_burst_set_pack(ctx, addr, reg_val: field_value_mask->value)) |
| 182 | return field_value_mask->value; |
| 183 | else |
| 184 | offload->should_burst_write = false; |
| 185 | } |
| 186 | |
| 187 | /* pack commands */ |
| 188 | cmd_buf->header.type = DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE; |
| 189 | cmd_buf->header.sub_type = 0; |
| 190 | seq = &cmd_buf->seq[offload->reg_seq_count]; |
| 191 | |
| 192 | if (offload->reg_seq_count) { |
| 193 | if (cmd_buf->seq[offload->reg_seq_count - 1].addr == addr) |
| 194 | offload->same_addr_count++; |
| 195 | else |
| 196 | offload->same_addr_count = 0; |
| 197 | } |
| 198 | |
| 199 | seq->addr = addr; |
| 200 | seq->modify_mask = field_value_mask->mask; |
| 201 | seq->modify_value = field_value_mask->value; |
| 202 | offload->reg_seq_count++; |
| 203 | |
| 204 | return field_value_mask->value; |
| 205 | } |
| 206 | |
| 207 | static void dmub_reg_wait_done_pack(const struct dc_context *ctx, uint32_t addr, |
| 208 | uint32_t mask, uint32_t shift, uint32_t condition_value, uint32_t time_out_us) |
| 209 | { |
| 210 | struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload; |
| 211 | struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait; |
| 212 | |
| 213 | cmd_buf->header.type = DMUB_CMD__REG_REG_WAIT; |
| 214 | cmd_buf->header.sub_type = 0; |
| 215 | cmd_buf->reg_wait.addr = addr; |
| 216 | cmd_buf->reg_wait.condition_field_value = mask & (condition_value << shift); |
| 217 | cmd_buf->reg_wait.mask = mask; |
| 218 | cmd_buf->reg_wait.time_out_us = time_out_us; |
| 219 | } |
| 220 | |
| 221 | uint32_t generic_reg_update_ex(const struct dc_context *ctx, |
| 222 | uint32_t addr, int n, |
| 223 | uint8_t shift1, uint32_t mask1, uint32_t field_value1, |
| 224 | ...) |
| 225 | { |
| 226 | struct dc_reg_value_masks field_value_mask = {0}; |
| 227 | uint32_t reg_val; |
| 228 | va_list ap; |
| 229 | |
| 230 | va_start(ap, field_value1); |
| 231 | |
| 232 | set_reg_field_values(field_value_mask: &field_value_mask, addr, n, shift1, mask1, |
| 233 | field_value1, ap); |
| 234 | |
| 235 | va_end(ap); |
| 236 | |
| 237 | if (ctx->dmub_srv && |
| 238 | ctx->dmub_srv->reg_helper_offload.gather_in_progress) |
| 239 | return dmub_reg_value_pack(ctx, addr, field_value_mask: &field_value_mask); |
| 240 | /* todo: return void so we can decouple code running in driver from register states */ |
| 241 | |
| 242 | /* mmio write directly */ |
| 243 | reg_val = dm_read_reg(ctx, addr); |
| 244 | reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value; |
| 245 | dm_write_reg(ctx, addr, reg_val); |
| 246 | return reg_val; |
| 247 | } |
| 248 | |
| 249 | uint32_t generic_reg_set_ex(const struct dc_context *ctx, |
| 250 | uint32_t addr, uint32_t reg_val, int n, |
| 251 | uint8_t shift1, uint32_t mask1, uint32_t field_value1, |
| 252 | ...) |
| 253 | { |
| 254 | struct dc_reg_value_masks field_value_mask = {0}; |
| 255 | va_list ap; |
| 256 | |
| 257 | va_start(ap, field_value1); |
| 258 | |
| 259 | set_reg_field_values(field_value_mask: &field_value_mask, addr, n, shift1, mask1, |
| 260 | field_value1, ap); |
| 261 | |
| 262 | va_end(ap); |
| 263 | |
| 264 | /* mmio write directly */ |
| 265 | reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value; |
| 266 | |
| 267 | if (ctx->dmub_srv && |
| 268 | ctx->dmub_srv->reg_helper_offload.gather_in_progress) { |
| 269 | return dmub_reg_value_burst_set_pack(ctx, addr, reg_val); |
| 270 | /* todo: return void so we can decouple code running in driver from register states */ |
| 271 | } |
| 272 | |
| 273 | dm_write_reg(ctx, addr, reg_val); |
| 274 | return reg_val; |
| 275 | } |
| 276 | |
| 277 | uint32_t generic_reg_get(const struct dc_context *ctx, uint32_t addr, |
| 278 | uint8_t shift, uint32_t mask, uint32_t *field_value) |
| 279 | { |
| 280 | uint32_t reg_val = dm_read_reg(ctx, addr); |
| 281 | *field_value = get_reg_field_value_ex(reg_value: reg_val, mask, shift); |
| 282 | return reg_val; |
| 283 | } |
| 284 | |
| 285 | uint32_t generic_reg_get2(const struct dc_context *ctx, uint32_t addr, |
| 286 | uint8_t shift1, uint32_t mask1, uint32_t *field_value1, |
| 287 | uint8_t shift2, uint32_t mask2, uint32_t *field_value2) |
| 288 | { |
| 289 | uint32_t reg_val = dm_read_reg(ctx, addr); |
| 290 | *field_value1 = get_reg_field_value_ex(reg_value: reg_val, mask: mask1, shift: shift1); |
| 291 | *field_value2 = get_reg_field_value_ex(reg_value: reg_val, mask: mask2, shift: shift2); |
| 292 | return reg_val; |
| 293 | } |
| 294 | |
| 295 | uint32_t generic_reg_get3(const struct dc_context *ctx, uint32_t addr, |
| 296 | uint8_t shift1, uint32_t mask1, uint32_t *field_value1, |
| 297 | uint8_t shift2, uint32_t mask2, uint32_t *field_value2, |
| 298 | uint8_t shift3, uint32_t mask3, uint32_t *field_value3) |
| 299 | { |
| 300 | uint32_t reg_val = dm_read_reg(ctx, addr); |
| 301 | *field_value1 = get_reg_field_value_ex(reg_value: reg_val, mask: mask1, shift: shift1); |
| 302 | *field_value2 = get_reg_field_value_ex(reg_value: reg_val, mask: mask2, shift: shift2); |
| 303 | *field_value3 = get_reg_field_value_ex(reg_value: reg_val, mask: mask3, shift: shift3); |
| 304 | return reg_val; |
| 305 | } |
| 306 | |
| 307 | uint32_t generic_reg_get4(const struct dc_context *ctx, uint32_t addr, |
| 308 | uint8_t shift1, uint32_t mask1, uint32_t *field_value1, |
| 309 | uint8_t shift2, uint32_t mask2, uint32_t *field_value2, |
| 310 | uint8_t shift3, uint32_t mask3, uint32_t *field_value3, |
| 311 | uint8_t shift4, uint32_t mask4, uint32_t *field_value4) |
| 312 | { |
| 313 | uint32_t reg_val = dm_read_reg(ctx, addr); |
| 314 | *field_value1 = get_reg_field_value_ex(reg_value: reg_val, mask: mask1, shift: shift1); |
| 315 | *field_value2 = get_reg_field_value_ex(reg_value: reg_val, mask: mask2, shift: shift2); |
| 316 | *field_value3 = get_reg_field_value_ex(reg_value: reg_val, mask: mask3, shift: shift3); |
| 317 | *field_value4 = get_reg_field_value_ex(reg_value: reg_val, mask: mask4, shift: shift4); |
| 318 | return reg_val; |
| 319 | } |
| 320 | |
| 321 | uint32_t generic_reg_get5(const struct dc_context *ctx, uint32_t addr, |
| 322 | uint8_t shift1, uint32_t mask1, uint32_t *field_value1, |
| 323 | uint8_t shift2, uint32_t mask2, uint32_t *field_value2, |
| 324 | uint8_t shift3, uint32_t mask3, uint32_t *field_value3, |
| 325 | uint8_t shift4, uint32_t mask4, uint32_t *field_value4, |
| 326 | uint8_t shift5, uint32_t mask5, uint32_t *field_value5) |
| 327 | { |
| 328 | uint32_t reg_val = dm_read_reg(ctx, addr); |
| 329 | *field_value1 = get_reg_field_value_ex(reg_value: reg_val, mask: mask1, shift: shift1); |
| 330 | *field_value2 = get_reg_field_value_ex(reg_value: reg_val, mask: mask2, shift: shift2); |
| 331 | *field_value3 = get_reg_field_value_ex(reg_value: reg_val, mask: mask3, shift: shift3); |
| 332 | *field_value4 = get_reg_field_value_ex(reg_value: reg_val, mask: mask4, shift: shift4); |
| 333 | *field_value5 = get_reg_field_value_ex(reg_value: reg_val, mask: mask5, shift: shift5); |
| 334 | return reg_val; |
| 335 | } |
| 336 | |
| 337 | uint32_t generic_reg_get6(const struct dc_context *ctx, uint32_t addr, |
| 338 | uint8_t shift1, uint32_t mask1, uint32_t *field_value1, |
| 339 | uint8_t shift2, uint32_t mask2, uint32_t *field_value2, |
| 340 | uint8_t shift3, uint32_t mask3, uint32_t *field_value3, |
| 341 | uint8_t shift4, uint32_t mask4, uint32_t *field_value4, |
| 342 | uint8_t shift5, uint32_t mask5, uint32_t *field_value5, |
| 343 | uint8_t shift6, uint32_t mask6, uint32_t *field_value6) |
| 344 | { |
| 345 | uint32_t reg_val = dm_read_reg(ctx, addr); |
| 346 | *field_value1 = get_reg_field_value_ex(reg_value: reg_val, mask: mask1, shift: shift1); |
| 347 | *field_value2 = get_reg_field_value_ex(reg_value: reg_val, mask: mask2, shift: shift2); |
| 348 | *field_value3 = get_reg_field_value_ex(reg_value: reg_val, mask: mask3, shift: shift3); |
| 349 | *field_value4 = get_reg_field_value_ex(reg_value: reg_val, mask: mask4, shift: shift4); |
| 350 | *field_value5 = get_reg_field_value_ex(reg_value: reg_val, mask: mask5, shift: shift5); |
| 351 | *field_value6 = get_reg_field_value_ex(reg_value: reg_val, mask: mask6, shift: shift6); |
| 352 | return reg_val; |
| 353 | } |
| 354 | |
| 355 | uint32_t generic_reg_get7(const struct dc_context *ctx, uint32_t addr, |
| 356 | uint8_t shift1, uint32_t mask1, uint32_t *field_value1, |
| 357 | uint8_t shift2, uint32_t mask2, uint32_t *field_value2, |
| 358 | uint8_t shift3, uint32_t mask3, uint32_t *field_value3, |
| 359 | uint8_t shift4, uint32_t mask4, uint32_t *field_value4, |
| 360 | uint8_t shift5, uint32_t mask5, uint32_t *field_value5, |
| 361 | uint8_t shift6, uint32_t mask6, uint32_t *field_value6, |
| 362 | uint8_t shift7, uint32_t mask7, uint32_t *field_value7) |
| 363 | { |
| 364 | uint32_t reg_val = dm_read_reg(ctx, addr); |
| 365 | *field_value1 = get_reg_field_value_ex(reg_value: reg_val, mask: mask1, shift: shift1); |
| 366 | *field_value2 = get_reg_field_value_ex(reg_value: reg_val, mask: mask2, shift: shift2); |
| 367 | *field_value3 = get_reg_field_value_ex(reg_value: reg_val, mask: mask3, shift: shift3); |
| 368 | *field_value4 = get_reg_field_value_ex(reg_value: reg_val, mask: mask4, shift: shift4); |
| 369 | *field_value5 = get_reg_field_value_ex(reg_value: reg_val, mask: mask5, shift: shift5); |
| 370 | *field_value6 = get_reg_field_value_ex(reg_value: reg_val, mask: mask6, shift: shift6); |
| 371 | *field_value7 = get_reg_field_value_ex(reg_value: reg_val, mask: mask7, shift: shift7); |
| 372 | return reg_val; |
| 373 | } |
| 374 | |
| 375 | uint32_t generic_reg_get8(const struct dc_context *ctx, uint32_t addr, |
| 376 | uint8_t shift1, uint32_t mask1, uint32_t *field_value1, |
| 377 | uint8_t shift2, uint32_t mask2, uint32_t *field_value2, |
| 378 | uint8_t shift3, uint32_t mask3, uint32_t *field_value3, |
| 379 | uint8_t shift4, uint32_t mask4, uint32_t *field_value4, |
| 380 | uint8_t shift5, uint32_t mask5, uint32_t *field_value5, |
| 381 | uint8_t shift6, uint32_t mask6, uint32_t *field_value6, |
| 382 | uint8_t shift7, uint32_t mask7, uint32_t *field_value7, |
| 383 | uint8_t shift8, uint32_t mask8, uint32_t *field_value8) |
| 384 | { |
| 385 | uint32_t reg_val = dm_read_reg(ctx, addr); |
| 386 | *field_value1 = get_reg_field_value_ex(reg_value: reg_val, mask: mask1, shift: shift1); |
| 387 | *field_value2 = get_reg_field_value_ex(reg_value: reg_val, mask: mask2, shift: shift2); |
| 388 | *field_value3 = get_reg_field_value_ex(reg_value: reg_val, mask: mask3, shift: shift3); |
| 389 | *field_value4 = get_reg_field_value_ex(reg_value: reg_val, mask: mask4, shift: shift4); |
| 390 | *field_value5 = get_reg_field_value_ex(reg_value: reg_val, mask: mask5, shift: shift5); |
| 391 | *field_value6 = get_reg_field_value_ex(reg_value: reg_val, mask: mask6, shift: shift6); |
| 392 | *field_value7 = get_reg_field_value_ex(reg_value: reg_val, mask: mask7, shift: shift7); |
| 393 | *field_value8 = get_reg_field_value_ex(reg_value: reg_val, mask: mask8, shift: shift8); |
| 394 | return reg_val; |
| 395 | } |
| 396 | /* note: va version of this is pretty bad idea, since there is a output parameter pass by pointer |
| 397 | * compiler won't be able to check for size match and is prone to stack corruption type of bugs |
| 398 | |
| 399 | uint32_t generic_reg_get(const struct dc_context *ctx, |
| 400 | uint32_t addr, int n, ...) |
| 401 | { |
| 402 | uint32_t shift, mask; |
| 403 | uint32_t *field_value; |
| 404 | uint32_t reg_val; |
| 405 | int i = 0; |
| 406 | |
| 407 | reg_val = dm_read_reg(ctx, addr); |
| 408 | |
| 409 | va_list ap; |
| 410 | va_start(ap, n); |
| 411 | |
| 412 | while (i < n) { |
| 413 | shift = va_arg(ap, uint32_t); |
| 414 | mask = va_arg(ap, uint32_t); |
| 415 | field_value = va_arg(ap, uint32_t *); |
| 416 | |
| 417 | *field_value = get_reg_field_value_ex(reg_val, mask, shift); |
| 418 | i++; |
| 419 | } |
| 420 | |
| 421 | va_end(ap); |
| 422 | |
| 423 | return reg_val; |
| 424 | } |
| 425 | */ |
| 426 | |
| 427 | void generic_reg_wait(const struct dc_context *ctx, |
| 428 | uint32_t addr, uint32_t shift, uint32_t mask, uint32_t condition_value, |
| 429 | unsigned int delay_between_poll_us, unsigned int time_out_num_tries, |
| 430 | const char *func_name, int line) |
| 431 | { |
| 432 | uint32_t field_value; |
| 433 | uint32_t reg_val; |
| 434 | int i; |
| 435 | |
| 436 | if (ctx->dmub_srv && |
| 437 | ctx->dmub_srv->reg_helper_offload.gather_in_progress) { |
| 438 | dmub_reg_wait_done_pack(ctx, addr, mask, shift, condition_value, |
| 439 | time_out_us: delay_between_poll_us * time_out_num_tries); |
| 440 | return; |
| 441 | } |
| 442 | |
| 443 | /* |
| 444 | * Something is terribly wrong if time out is > 3000ms. |
| 445 | * 3000ms is the maximum time needed for SMU to pass values back. |
| 446 | * This value comes from experiments. |
| 447 | * |
| 448 | */ |
| 449 | ASSERT(delay_between_poll_us * time_out_num_tries <= 3000000); |
| 450 | |
| 451 | for (i = 0; i <= time_out_num_tries; i++) { |
| 452 | if (i) { |
| 453 | if (delay_between_poll_us >= 1000) |
| 454 | msleep(msecs: delay_between_poll_us/1000); |
| 455 | else if (delay_between_poll_us > 0) |
| 456 | udelay(usec: delay_between_poll_us); |
| 457 | } |
| 458 | |
| 459 | reg_val = dm_read_reg(ctx, addr); |
| 460 | |
| 461 | field_value = get_reg_field_value_ex(reg_value: reg_val, mask, shift); |
| 462 | |
| 463 | if (field_value == condition_value) { |
| 464 | if (i * delay_between_poll_us > 1000) |
| 465 | DC_LOG_DC("REG_WAIT taking a while: %dms in %s line:%d\n" , |
| 466 | delay_between_poll_us * i / 1000, |
| 467 | func_name, line); |
| 468 | return; |
| 469 | } |
| 470 | } |
| 471 | |
| 472 | DC_LOG_WARNING("REG_WAIT timeout %dus * %d tries - %s line:%d\n" , |
| 473 | delay_between_poll_us, time_out_num_tries, |
| 474 | func_name, line); |
| 475 | |
| 476 | BREAK_TO_DEBUGGER(); |
| 477 | } |
| 478 | |
| 479 | void generic_write_indirect_reg(const struct dc_context *ctx, |
| 480 | uint32_t addr_index, uint32_t addr_data, |
| 481 | uint32_t index, uint32_t data) |
| 482 | { |
| 483 | dm_write_reg(ctx, addr_index, index); |
| 484 | dm_write_reg(ctx, addr_data, data); |
| 485 | } |
| 486 | |
| 487 | uint32_t generic_read_indirect_reg(const struct dc_context *ctx, |
| 488 | uint32_t addr_index, uint32_t addr_data, |
| 489 | uint32_t index) |
| 490 | { |
| 491 | uint32_t value = 0; |
| 492 | |
| 493 | // when reg read, there should not be any offload. |
| 494 | if (ctx->dmub_srv && |
| 495 | ctx->dmub_srv->reg_helper_offload.gather_in_progress) { |
| 496 | ASSERT(false); |
| 497 | } |
| 498 | |
| 499 | dm_write_reg(ctx, addr_index, index); |
| 500 | value = dm_read_reg(ctx, addr_data); |
| 501 | |
| 502 | return value; |
| 503 | } |
| 504 | |
| 505 | uint32_t generic_indirect_reg_get(const struct dc_context *ctx, |
| 506 | uint32_t addr_index, uint32_t addr_data, |
| 507 | uint32_t index, int n, |
| 508 | uint8_t shift1, uint32_t mask1, uint32_t *field_value1, |
| 509 | ...) |
| 510 | { |
| 511 | uint32_t shift, mask, *field_value; |
| 512 | uint32_t value = 0; |
| 513 | int i = 1; |
| 514 | |
| 515 | va_list ap; |
| 516 | |
| 517 | va_start(ap, field_value1); |
| 518 | |
| 519 | value = generic_read_indirect_reg(ctx, addr_index, addr_data, index); |
| 520 | *field_value1 = get_reg_field_value_ex(reg_value: value, mask: mask1, shift: shift1); |
| 521 | |
| 522 | while (i < n) { |
| 523 | shift = va_arg(ap, uint32_t); |
| 524 | mask = va_arg(ap, uint32_t); |
| 525 | field_value = va_arg(ap, uint32_t *); |
| 526 | |
| 527 | *field_value = get_reg_field_value_ex(reg_value: value, mask, shift); |
| 528 | i++; |
| 529 | } |
| 530 | |
| 531 | va_end(ap); |
| 532 | |
| 533 | return value; |
| 534 | } |
| 535 | |
| 536 | uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx, |
| 537 | uint32_t addr_index, uint32_t addr_data, |
| 538 | uint32_t index, uint32_t reg_val, int n, |
| 539 | uint8_t shift1, uint32_t mask1, uint32_t field_value1, |
| 540 | ...) |
| 541 | { |
| 542 | uint32_t shift, mask, field_value; |
| 543 | int i = 1; |
| 544 | |
| 545 | va_list ap; |
| 546 | |
| 547 | va_start(ap, field_value1); |
| 548 | |
| 549 | reg_val = set_reg_field_value_ex(reg_value: reg_val, value: field_value1, mask: mask1, shift: shift1); |
| 550 | |
| 551 | while (i < n) { |
| 552 | shift = va_arg(ap, uint32_t); |
| 553 | mask = va_arg(ap, uint32_t); |
| 554 | field_value = va_arg(ap, uint32_t); |
| 555 | |
| 556 | reg_val = set_reg_field_value_ex(reg_value: reg_val, value: field_value, mask, shift); |
| 557 | i++; |
| 558 | } |
| 559 | |
| 560 | generic_write_indirect_reg(ctx, addr_index, addr_data, index, data: reg_val); |
| 561 | va_end(ap); |
| 562 | |
| 563 | return reg_val; |
| 564 | } |
| 565 | |
| 566 | |
| 567 | uint32_t generic_indirect_reg_update_ex_sync(const struct dc_context *ctx, |
| 568 | uint32_t index, uint32_t reg_val, int n, |
| 569 | uint8_t shift1, uint32_t mask1, uint32_t field_value1, |
| 570 | ...) |
| 571 | { |
| 572 | uint32_t shift, mask, field_value; |
| 573 | int i = 1; |
| 574 | |
| 575 | va_list ap; |
| 576 | |
| 577 | va_start(ap, field_value1); |
| 578 | |
| 579 | reg_val = set_reg_field_value_ex(reg_value: reg_val, value: field_value1, mask: mask1, shift: shift1); |
| 580 | |
| 581 | while (i < n) { |
| 582 | shift = va_arg(ap, uint32_t); |
| 583 | mask = va_arg(ap, uint32_t); |
| 584 | field_value = va_arg(ap, uint32_t); |
| 585 | |
| 586 | reg_val = set_reg_field_value_ex(reg_value: reg_val, value: field_value, mask, shift); |
| 587 | i++; |
| 588 | } |
| 589 | |
| 590 | dm_write_index_reg(ctx, addr_space: CGS_IND_REG__PCIE, index, value: reg_val); |
| 591 | va_end(ap); |
| 592 | |
| 593 | return reg_val; |
| 594 | } |
| 595 | |
| 596 | uint32_t generic_indirect_reg_get_sync(const struct dc_context *ctx, |
| 597 | uint32_t index, int n, |
| 598 | uint8_t shift1, uint32_t mask1, uint32_t *field_value1, |
| 599 | ...) |
| 600 | { |
| 601 | uint32_t shift, mask, *field_value; |
| 602 | uint32_t value = 0; |
| 603 | int i = 1; |
| 604 | |
| 605 | va_list ap; |
| 606 | |
| 607 | va_start(ap, field_value1); |
| 608 | |
| 609 | value = dm_read_index_reg(ctx, addr_space: CGS_IND_REG__PCIE, index); |
| 610 | *field_value1 = get_reg_field_value_ex(reg_value: value, mask: mask1, shift: shift1); |
| 611 | |
| 612 | while (i < n) { |
| 613 | shift = va_arg(ap, uint32_t); |
| 614 | mask = va_arg(ap, uint32_t); |
| 615 | field_value = va_arg(ap, uint32_t *); |
| 616 | |
| 617 | *field_value = get_reg_field_value_ex(reg_value: value, mask, shift); |
| 618 | i++; |
| 619 | } |
| 620 | |
| 621 | va_end(ap); |
| 622 | |
| 623 | return value; |
| 624 | } |
| 625 | |
| 626 | void reg_sequence_start_gather(const struct dc_context *ctx) |
| 627 | { |
| 628 | /* if reg sequence is supported and enabled, set flag to |
| 629 | * indicate we want to have REG_SET, REG_UPDATE macro build |
| 630 | * reg sequence command buffer rather than MMIO directly. |
| 631 | */ |
| 632 | |
| 633 | if (ctx->dmub_srv && ctx->dc->debug.dmub_offload_enabled) { |
| 634 | struct dc_reg_helper_state *offload = |
| 635 | &ctx->dmub_srv->reg_helper_offload; |
| 636 | |
| 637 | /* caller sequence mismatch. need to debug caller. offload will not work!!! */ |
| 638 | ASSERT(!offload->gather_in_progress); |
| 639 | |
| 640 | offload->gather_in_progress = true; |
| 641 | } |
| 642 | } |
| 643 | |
| 644 | void reg_sequence_start_execute(const struct dc_context *ctx) |
| 645 | { |
| 646 | struct dc_reg_helper_state *offload; |
| 647 | |
| 648 | if (!ctx->dmub_srv) |
| 649 | return; |
| 650 | |
| 651 | offload = &ctx->dmub_srv->reg_helper_offload; |
| 652 | |
| 653 | if (offload && offload->gather_in_progress) { |
| 654 | offload->gather_in_progress = false; |
| 655 | offload->should_burst_write = false; |
| 656 | switch (offload->cmd_data.cmd_common.header.type) { |
| 657 | case DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE: |
| 658 | submit_dmub_read_modify_write(offload, ctx); |
| 659 | break; |
| 660 | case DMUB_CMD__REG_REG_WAIT: |
| 661 | submit_dmub_reg_wait(offload, ctx); |
| 662 | break; |
| 663 | case DMUB_CMD__REG_SEQ_BURST_WRITE: |
| 664 | submit_dmub_burst_write(offload, ctx); |
| 665 | break; |
| 666 | default: |
| 667 | return; |
| 668 | } |
| 669 | } |
| 670 | } |
| 671 | |
| 672 | void reg_sequence_wait_done(const struct dc_context *ctx) |
| 673 | { |
| 674 | /* callback to DM to poll for last submission done*/ |
| 675 | struct dc_reg_helper_state *offload; |
| 676 | |
| 677 | if (!ctx->dmub_srv) |
| 678 | return; |
| 679 | |
| 680 | offload = &ctx->dmub_srv->reg_helper_offload; |
| 681 | |
| 682 | if (offload && |
| 683 | ctx->dc->debug.dmub_offload_enabled && |
| 684 | !ctx->dc->debug.dmcub_emulation) { |
| 685 | dc_dmub_srv_wait_for_idle(dc_dmub_srv: ctx->dmub_srv, wait_type: DM_DMUB_WAIT_TYPE_WAIT, NULL); |
| 686 | } |
| 687 | } |
| 688 | |
| 689 | char *dce_version_to_string(const int version) |
| 690 | { |
| 691 | switch (version) { |
| 692 | case DCE_VERSION_6_0: |
| 693 | return "DCE 6.0" ; |
| 694 | case DCE_VERSION_6_1: |
| 695 | return "DCE 6.1" ; |
| 696 | case DCE_VERSION_6_4: |
| 697 | return "DCE 6.4" ; |
| 698 | case DCE_VERSION_8_0: |
| 699 | return "DCE 8.0" ; |
| 700 | case DCE_VERSION_8_1: |
| 701 | return "DCE 8.1" ; |
| 702 | case DCE_VERSION_8_3: |
| 703 | return "DCE 8.3" ; |
| 704 | case DCE_VERSION_10_0: |
| 705 | return "DCE 10.0" ; |
| 706 | case DCE_VERSION_11_0: |
| 707 | return "DCE 11.0" ; |
| 708 | case DCE_VERSION_11_2: |
| 709 | return "DCE 11.2" ; |
| 710 | case DCE_VERSION_11_22: |
| 711 | return "DCE 11.22" ; |
| 712 | case DCE_VERSION_12_0: |
| 713 | return "DCE 12.0" ; |
| 714 | case DCE_VERSION_12_1: |
| 715 | return "DCE 12.1" ; |
| 716 | case DCN_VERSION_1_0: |
| 717 | return "DCN 1.0" ; |
| 718 | case DCN_VERSION_1_01: |
| 719 | return "DCN 1.0.1" ; |
| 720 | case DCN_VERSION_2_0: |
| 721 | return "DCN 2.0" ; |
| 722 | case DCN_VERSION_2_1: |
| 723 | return "DCN 2.1" ; |
| 724 | case DCN_VERSION_2_01: |
| 725 | return "DCN 2.0.1" ; |
| 726 | case DCN_VERSION_3_0: |
| 727 | return "DCN 3.0" ; |
| 728 | case DCN_VERSION_3_01: |
| 729 | return "DCN 3.0.1" ; |
| 730 | case DCN_VERSION_3_02: |
| 731 | return "DCN 3.0.2" ; |
| 732 | case DCN_VERSION_3_03: |
| 733 | return "DCN 3.0.3" ; |
| 734 | case DCN_VERSION_3_1: |
| 735 | return "DCN 3.1.2" ; |
| 736 | case DCN_VERSION_3_14: |
| 737 | return "DCN 3.1.4" ; |
| 738 | case DCN_VERSION_3_15: |
| 739 | return "DCN 3.1.5" ; |
| 740 | case DCN_VERSION_3_16: |
| 741 | return "DCN 3.1.6" ; |
| 742 | case DCN_VERSION_3_2: |
| 743 | return "DCN 3.2" ; |
| 744 | case DCN_VERSION_3_21: |
| 745 | return "DCN 3.2.1" ; |
| 746 | case DCN_VERSION_3_5: |
| 747 | return "DCN 3.5" ; |
| 748 | case DCN_VERSION_3_51: |
| 749 | return "DCN 3.5.1" ; |
| 750 | case DCN_VERSION_3_6: |
| 751 | return "DCN 3.6" ; |
| 752 | case DCN_VERSION_4_01: |
| 753 | return "DCN 4.0.1" ; |
| 754 | default: |
| 755 | return "Unknown" ; |
| 756 | } |
| 757 | } |
| 758 | |
| 759 | bool dc_supports_vrr(const enum dce_version v) |
| 760 | { |
| 761 | return v >= DCE_VERSION_8_0; |
| 762 | } |
| 763 | |