| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | |
| 3 | /* |
| 4 | * Copyright 2020 HabanaLabs, Ltd. |
| 5 | * All Rights Reserved. |
| 6 | */ |
| 7 | |
| 8 | #include "habanalabs.h" |
| 9 | |
| 10 | static const char * const hl_glbl_error_cause[] = { |
| 11 | "Error due to un-priv read" , |
| 12 | "Error due to un-secure read" , |
| 13 | "Error due to read from unmapped reg" , |
| 14 | "Error due to un-priv write" , |
| 15 | "Error due to un-secure write" , |
| 16 | "Error due to write to unmapped reg" , |
| 17 | "N/A" , |
| 18 | "N/A" , |
| 19 | "N/A" , |
| 20 | "N/A" , |
| 21 | "N/A" , |
| 22 | "N/A" , |
| 23 | "N/A" , |
| 24 | "N/A" , |
| 25 | "N/A" , |
| 26 | "N/A" , |
| 27 | "External I/F write sec violation" , |
| 28 | "External I/F write to un-mapped reg" , |
| 29 | "N/A" , |
| 30 | "N/A" , |
| 31 | "N/A" , |
| 32 | "N/A" , |
| 33 | "N/A" , |
| 34 | "N/A" , |
| 35 | "Read to write only" , |
| 36 | "Write to read only" |
| 37 | }; |
| 38 | |
| 39 | /** |
| 40 | * hl_get_pb_block - return the relevant block within the block array |
| 41 | * |
| 42 | * @hdev: pointer to hl_device structure |
| 43 | * @mm_reg_addr: register address in the desired block |
| 44 | * @pb_blocks: blocks array |
| 45 | * @array_size: blocks array size |
| 46 | * |
| 47 | */ |
| 48 | static int hl_get_pb_block(struct hl_device *hdev, u32 mm_reg_addr, |
| 49 | const u32 pb_blocks[], int array_size) |
| 50 | { |
| 51 | int i; |
| 52 | u32 start_addr, end_addr; |
| 53 | |
| 54 | for (i = 0 ; i < array_size ; i++) { |
| 55 | start_addr = pb_blocks[i]; |
| 56 | end_addr = start_addr + HL_BLOCK_SIZE; |
| 57 | |
| 58 | if ((mm_reg_addr >= start_addr) && (mm_reg_addr < end_addr)) |
| 59 | return i; |
| 60 | } |
| 61 | |
| 62 | dev_err(hdev->dev, "No protection domain was found for 0x%x\n" , |
| 63 | mm_reg_addr); |
| 64 | return -EDOM; |
| 65 | } |
| 66 | |
| 67 | /** |
| 68 | * hl_unset_pb_in_block - clear a specific protection bit in a block |
| 69 | * |
| 70 | * @hdev: pointer to hl_device structure |
| 71 | * @reg_offset: register offset will be converted to bit offset in pb block |
| 72 | * @sgs_entry: pb array |
| 73 | * |
| 74 | */ |
| 75 | static int hl_unset_pb_in_block(struct hl_device *hdev, u32 reg_offset, |
| 76 | struct hl_block_glbl_sec *sgs_entry) |
| 77 | { |
| 78 | if ((reg_offset >= HL_BLOCK_SIZE) || (reg_offset & 0x3)) { |
| 79 | dev_err(hdev->dev, |
| 80 | "Register offset(%d) is out of range(%d) or invalid\n" , |
| 81 | reg_offset, HL_BLOCK_SIZE); |
| 82 | return -EINVAL; |
| 83 | } |
| 84 | |
| 85 | UNSET_GLBL_SEC_BIT(sgs_entry->sec_array, |
| 86 | (reg_offset & (HL_BLOCK_SIZE - 1)) >> 2); |
| 87 | |
| 88 | return 0; |
| 89 | } |
| 90 | |
| 91 | /** |
| 92 | * hl_unsecure_register - locate the relevant block for this register and |
| 93 | * remove corresponding protection bit |
| 94 | * |
| 95 | * @hdev: pointer to hl_device structure |
| 96 | * @mm_reg_addr: register address to unsecure |
| 97 | * @offset: additional offset to the register address |
| 98 | * @pb_blocks: blocks array |
| 99 | * @sgs_array: pb array |
| 100 | * @array_size: blocks array size |
| 101 | * |
| 102 | */ |
| 103 | int hl_unsecure_register(struct hl_device *hdev, u32 mm_reg_addr, int offset, |
| 104 | const u32 pb_blocks[], struct hl_block_glbl_sec sgs_array[], |
| 105 | int array_size) |
| 106 | { |
| 107 | u32 reg_offset; |
| 108 | int block_num; |
| 109 | |
| 110 | block_num = hl_get_pb_block(hdev, mm_reg_addr: mm_reg_addr + offset, pb_blocks, |
| 111 | array_size); |
| 112 | if (block_num < 0) |
| 113 | return block_num; |
| 114 | |
| 115 | reg_offset = (mm_reg_addr + offset) - pb_blocks[block_num]; |
| 116 | |
| 117 | return hl_unset_pb_in_block(hdev, reg_offset, sgs_entry: &sgs_array[block_num]); |
| 118 | } |
| 119 | |
| 120 | /** |
| 121 | * hl_unsecure_register_range - locate the relevant block for this register |
| 122 | * range and remove corresponding protection bit |
| 123 | * |
| 124 | * @hdev: pointer to hl_device structure |
| 125 | * @mm_reg_range: register address range to unsecure |
| 126 | * @offset: additional offset to the register address |
| 127 | * @pb_blocks: blocks array |
| 128 | * @sgs_array: pb array |
| 129 | * @array_size: blocks array size |
| 130 | * |
| 131 | */ |
| 132 | static int hl_unsecure_register_range(struct hl_device *hdev, |
| 133 | struct range mm_reg_range, int offset, const u32 pb_blocks[], |
| 134 | struct hl_block_glbl_sec sgs_array[], |
| 135 | int array_size) |
| 136 | { |
| 137 | u32 reg_offset; |
| 138 | int i, block_num, rc = 0; |
| 139 | |
| 140 | block_num = hl_get_pb_block(hdev, |
| 141 | mm_reg_addr: mm_reg_range.start + offset, pb_blocks, |
| 142 | array_size); |
| 143 | if (block_num < 0) |
| 144 | return block_num; |
| 145 | |
| 146 | for (i = mm_reg_range.start ; i <= mm_reg_range.end ; i += 4) { |
| 147 | reg_offset = (i + offset) - pb_blocks[block_num]; |
| 148 | rc |= hl_unset_pb_in_block(hdev, reg_offset, |
| 149 | sgs_entry: &sgs_array[block_num]); |
| 150 | } |
| 151 | |
| 152 | return rc; |
| 153 | } |
| 154 | |
| 155 | /** |
| 156 | * hl_unsecure_registers - locate the relevant block for all registers and |
| 157 | * remove corresponding protection bit |
| 158 | * |
| 159 | * @hdev: pointer to hl_device structure |
| 160 | * @mm_reg_array: register address array to unsecure |
| 161 | * @mm_array_size: register array size |
| 162 | * @offset: additional offset to the register address |
| 163 | * @pb_blocks: blocks array |
| 164 | * @sgs_array: pb array |
| 165 | * @blocks_array_size: blocks array size |
| 166 | * |
| 167 | */ |
| 168 | int hl_unsecure_registers(struct hl_device *hdev, const u32 mm_reg_array[], |
| 169 | int mm_array_size, int offset, const u32 pb_blocks[], |
| 170 | struct hl_block_glbl_sec sgs_array[], int blocks_array_size) |
| 171 | { |
| 172 | int i, rc = 0; |
| 173 | |
| 174 | for (i = 0 ; i < mm_array_size ; i++) { |
| 175 | rc = hl_unsecure_register(hdev, mm_reg_addr: mm_reg_array[i], offset, |
| 176 | pb_blocks, sgs_array, array_size: blocks_array_size); |
| 177 | |
| 178 | if (rc) |
| 179 | return rc; |
| 180 | } |
| 181 | |
| 182 | return rc; |
| 183 | } |
| 184 | |
| 185 | /** |
| 186 | * hl_unsecure_registers_range - locate the relevant block for all register |
| 187 | * ranges and remove corresponding protection bit |
| 188 | * |
| 189 | * @hdev: pointer to hl_device structure |
| 190 | * @mm_reg_range_array: register address range array to unsecure |
| 191 | * @mm_array_size: register array size |
| 192 | * @offset: additional offset to the register address |
| 193 | * @pb_blocks: blocks array |
| 194 | * @sgs_array: pb array |
| 195 | * @blocks_array_size: blocks array size |
| 196 | * |
| 197 | */ |
| 198 | static int hl_unsecure_registers_range(struct hl_device *hdev, |
| 199 | const struct range mm_reg_range_array[], int mm_array_size, |
| 200 | int offset, const u32 pb_blocks[], |
| 201 | struct hl_block_glbl_sec sgs_array[], int blocks_array_size) |
| 202 | { |
| 203 | int i, rc = 0; |
| 204 | |
| 205 | for (i = 0 ; i < mm_array_size ; i++) { |
| 206 | rc = hl_unsecure_register_range(hdev, mm_reg_range: mm_reg_range_array[i], |
| 207 | offset, pb_blocks, sgs_array, array_size: blocks_array_size); |
| 208 | |
| 209 | if (rc) |
| 210 | return rc; |
| 211 | } |
| 212 | |
| 213 | return rc; |
| 214 | } |
| 215 | |
| 216 | /** |
| 217 | * hl_ack_pb_security_violations - Ack security violation |
| 218 | * |
| 219 | * @hdev: pointer to hl_device structure |
| 220 | * @pb_blocks: blocks array |
| 221 | * @block_offset: additional offset to the block |
| 222 | * @array_size: blocks array size |
| 223 | * |
| 224 | */ |
| 225 | static void hl_ack_pb_security_violations(struct hl_device *hdev, |
| 226 | const u32 pb_blocks[], u32 block_offset, int array_size) |
| 227 | { |
| 228 | int i; |
| 229 | u32 cause, addr, block_base; |
| 230 | |
| 231 | for (i = 0 ; i < array_size ; i++) { |
| 232 | block_base = pb_blocks[i] + block_offset; |
| 233 | cause = RREG32(block_base + HL_BLOCK_GLBL_ERR_CAUSE); |
| 234 | if (cause) { |
| 235 | addr = RREG32(block_base + HL_BLOCK_GLBL_ERR_ADDR); |
| 236 | hdev->asic_funcs->pb_print_security_errors(hdev, |
| 237 | block_base, cause, addr); |
| 238 | WREG32(block_base + HL_BLOCK_GLBL_ERR_CAUSE, cause); |
| 239 | } |
| 240 | } |
| 241 | } |
| 242 | |
| 243 | /** |
| 244 | * hl_config_glbl_sec - set pb in HW according to given pb array |
| 245 | * |
| 246 | * @hdev: pointer to hl_device structure |
| 247 | * @pb_blocks: blocks array |
| 248 | * @sgs_array: pb array |
| 249 | * @block_offset: additional offset to the block |
| 250 | * @array_size: blocks array size |
| 251 | * |
| 252 | */ |
| 253 | void hl_config_glbl_sec(struct hl_device *hdev, const u32 pb_blocks[], |
| 254 | struct hl_block_glbl_sec sgs_array[], u32 block_offset, |
| 255 | int array_size) |
| 256 | { |
| 257 | int i, j; |
| 258 | u32 sgs_base; |
| 259 | |
| 260 | if (hdev->pldm) |
| 261 | usleep_range(min: 100, max: 1000); |
| 262 | |
| 263 | for (i = 0 ; i < array_size ; i++) { |
| 264 | sgs_base = block_offset + pb_blocks[i] + |
| 265 | HL_BLOCK_GLBL_SEC_OFFS; |
| 266 | |
| 267 | for (j = 0 ; j < HL_BLOCK_GLBL_SEC_LEN ; j++) |
| 268 | WREG32(sgs_base + j * sizeof(u32), |
| 269 | sgs_array[i].sec_array[j]); |
| 270 | } |
| 271 | } |
| 272 | |
| 273 | /** |
| 274 | * hl_secure_block - locally memsets a block to 0 |
| 275 | * |
| 276 | * @hdev: pointer to hl_device structure |
| 277 | * @sgs_array: pb array to clear |
| 278 | * @array_size: blocks array size |
| 279 | * |
| 280 | */ |
| 281 | void hl_secure_block(struct hl_device *hdev, |
| 282 | struct hl_block_glbl_sec sgs_array[], int array_size) |
| 283 | { |
| 284 | int i; |
| 285 | |
| 286 | for (i = 0 ; i < array_size ; i++) |
| 287 | memset((char *)(sgs_array[i].sec_array), 0, |
| 288 | HL_BLOCK_GLBL_SEC_SIZE); |
| 289 | } |
| 290 | |
| 291 | /** |
| 292 | * hl_init_pb_with_mask - set selected pb instances with mask in HW according |
| 293 | * to given configuration |
| 294 | * |
| 295 | * @hdev: pointer to hl_device structure |
| 296 | * @num_dcores: number of decores to apply configuration to |
| 297 | * set to HL_PB_SHARED if need to apply only once |
| 298 | * @dcore_offset: offset between dcores |
| 299 | * @num_instances: number of instances to apply configuration to |
| 300 | * @instance_offset: offset between instances |
| 301 | * @pb_blocks: blocks array |
| 302 | * @blocks_array_size: blocks array size |
| 303 | * @user_regs_array: unsecured register array |
| 304 | * @user_regs_array_size: unsecured register array size |
| 305 | * @mask: enabled instances mask: 1- enabled, 0- disabled |
| 306 | */ |
| 307 | int hl_init_pb_with_mask(struct hl_device *hdev, u32 num_dcores, |
| 308 | u32 dcore_offset, u32 num_instances, u32 instance_offset, |
| 309 | const u32 pb_blocks[], u32 blocks_array_size, |
| 310 | const u32 *user_regs_array, u32 user_regs_array_size, u64 mask) |
| 311 | { |
| 312 | int i, j; |
| 313 | struct hl_block_glbl_sec *glbl_sec; |
| 314 | |
| 315 | glbl_sec = kcalloc(blocks_array_size, |
| 316 | sizeof(struct hl_block_glbl_sec), |
| 317 | GFP_KERNEL); |
| 318 | if (!glbl_sec) |
| 319 | return -ENOMEM; |
| 320 | |
| 321 | hl_secure_block(hdev, sgs_array: glbl_sec, array_size: blocks_array_size); |
| 322 | hl_unsecure_registers(hdev, mm_reg_array: user_regs_array, mm_array_size: user_regs_array_size, offset: 0, |
| 323 | pb_blocks, sgs_array: glbl_sec, blocks_array_size); |
| 324 | |
| 325 | /* Fill all blocks with the same configuration */ |
| 326 | for (i = 0 ; i < num_dcores ; i++) { |
| 327 | for (j = 0 ; j < num_instances ; j++) { |
| 328 | int seq = i * num_instances + j; |
| 329 | |
| 330 | if (!(mask & BIT_ULL(seq))) |
| 331 | continue; |
| 332 | |
| 333 | hl_config_glbl_sec(hdev, pb_blocks, sgs_array: glbl_sec, |
| 334 | block_offset: i * dcore_offset + j * instance_offset, |
| 335 | array_size: blocks_array_size); |
| 336 | } |
| 337 | } |
| 338 | |
| 339 | kfree(objp: glbl_sec); |
| 340 | |
| 341 | return 0; |
| 342 | } |
| 343 | |
| 344 | /** |
| 345 | * hl_init_pb - set pb in HW according to given configuration |
| 346 | * |
| 347 | * @hdev: pointer to hl_device structure |
| 348 | * @num_dcores: number of decores to apply configuration to |
| 349 | * set to HL_PB_SHARED if need to apply only once |
| 350 | * @dcore_offset: offset between dcores |
| 351 | * @num_instances: number of instances to apply configuration to |
| 352 | * @instance_offset: offset between instances |
| 353 | * @pb_blocks: blocks array |
| 354 | * @blocks_array_size: blocks array size |
| 355 | * @user_regs_array: unsecured register array |
| 356 | * @user_regs_array_size: unsecured register array size |
| 357 | * |
| 358 | */ |
| 359 | int hl_init_pb(struct hl_device *hdev, u32 num_dcores, u32 dcore_offset, |
| 360 | u32 num_instances, u32 instance_offset, |
| 361 | const u32 pb_blocks[], u32 blocks_array_size, |
| 362 | const u32 *user_regs_array, u32 user_regs_array_size) |
| 363 | { |
| 364 | return hl_init_pb_with_mask(hdev, num_dcores, dcore_offset, |
| 365 | num_instances, instance_offset, pb_blocks, |
| 366 | blocks_array_size, user_regs_array, |
| 367 | user_regs_array_size, ULLONG_MAX); |
| 368 | } |
| 369 | |
| 370 | /** |
| 371 | * hl_init_pb_ranges_with_mask - set pb instances using mask in HW according to |
| 372 | * given configuration unsecurring registers |
| 373 | * ranges instead of specific registers |
| 374 | * |
| 375 | * @hdev: pointer to hl_device structure |
| 376 | * @num_dcores: number of decores to apply configuration to |
| 377 | * set to HL_PB_SHARED if need to apply only once |
| 378 | * @dcore_offset: offset between dcores |
| 379 | * @num_instances: number of instances to apply configuration to |
| 380 | * @instance_offset: offset between instances |
| 381 | * @pb_blocks: blocks array |
| 382 | * @blocks_array_size: blocks array size |
| 383 | * @user_regs_range_array: unsecured register range array |
| 384 | * @user_regs_range_array_size: unsecured register range array size |
| 385 | * @mask: enabled instances mask: 1- enabled, 0- disabled |
| 386 | */ |
| 387 | int hl_init_pb_ranges_with_mask(struct hl_device *hdev, u32 num_dcores, |
| 388 | u32 dcore_offset, u32 num_instances, u32 instance_offset, |
| 389 | const u32 pb_blocks[], u32 blocks_array_size, |
| 390 | const struct range *user_regs_range_array, |
| 391 | u32 user_regs_range_array_size, u64 mask) |
| 392 | { |
| 393 | int i, j, rc = 0; |
| 394 | struct hl_block_glbl_sec *glbl_sec; |
| 395 | |
| 396 | glbl_sec = kcalloc(blocks_array_size, |
| 397 | sizeof(struct hl_block_glbl_sec), |
| 398 | GFP_KERNEL); |
| 399 | if (!glbl_sec) |
| 400 | return -ENOMEM; |
| 401 | |
| 402 | hl_secure_block(hdev, sgs_array: glbl_sec, array_size: blocks_array_size); |
| 403 | rc = hl_unsecure_registers_range(hdev, mm_reg_range_array: user_regs_range_array, |
| 404 | mm_array_size: user_regs_range_array_size, offset: 0, pb_blocks, sgs_array: glbl_sec, |
| 405 | blocks_array_size); |
| 406 | if (rc) |
| 407 | goto free_glbl_sec; |
| 408 | |
| 409 | /* Fill all blocks with the same configuration */ |
| 410 | for (i = 0 ; i < num_dcores ; i++) { |
| 411 | for (j = 0 ; j < num_instances ; j++) { |
| 412 | int seq = i * num_instances + j; |
| 413 | |
| 414 | if (!(mask & BIT_ULL(seq))) |
| 415 | continue; |
| 416 | |
| 417 | hl_config_glbl_sec(hdev, pb_blocks, sgs_array: glbl_sec, |
| 418 | block_offset: i * dcore_offset + j * instance_offset, |
| 419 | array_size: blocks_array_size); |
| 420 | } |
| 421 | } |
| 422 | |
| 423 | free_glbl_sec: |
| 424 | kfree(objp: glbl_sec); |
| 425 | |
| 426 | return rc; |
| 427 | } |
| 428 | |
| 429 | /** |
| 430 | * hl_init_pb_ranges - set pb in HW according to given configuration unsecurring |
| 431 | * registers ranges instead of specific registers |
| 432 | * |
| 433 | * @hdev: pointer to hl_device structure |
| 434 | * @num_dcores: number of decores to apply configuration to |
| 435 | * set to HL_PB_SHARED if need to apply only once |
| 436 | * @dcore_offset: offset between dcores |
| 437 | * @num_instances: number of instances to apply configuration to |
| 438 | * @instance_offset: offset between instances |
| 439 | * @pb_blocks: blocks array |
| 440 | * @blocks_array_size: blocks array size |
| 441 | * @user_regs_range_array: unsecured register range array |
| 442 | * @user_regs_range_array_size: unsecured register range array size |
| 443 | * |
| 444 | */ |
| 445 | int hl_init_pb_ranges(struct hl_device *hdev, u32 num_dcores, |
| 446 | u32 dcore_offset, u32 num_instances, u32 instance_offset, |
| 447 | const u32 pb_blocks[], u32 blocks_array_size, |
| 448 | const struct range *user_regs_range_array, |
| 449 | u32 user_regs_range_array_size) |
| 450 | { |
| 451 | return hl_init_pb_ranges_with_mask(hdev, num_dcores, dcore_offset, |
| 452 | num_instances, instance_offset, pb_blocks, |
| 453 | blocks_array_size, user_regs_range_array, |
| 454 | user_regs_range_array_size, ULLONG_MAX); |
| 455 | } |
| 456 | |
| 457 | /** |
| 458 | * hl_init_pb_single_dcore - set pb for a single docre in HW |
| 459 | * according to given configuration |
| 460 | * |
| 461 | * @hdev: pointer to hl_device structure |
| 462 | * @dcore_offset: offset from the dcore0 |
| 463 | * @num_instances: number of instances to apply configuration to |
| 464 | * @instance_offset: offset between instances |
| 465 | * @pb_blocks: blocks array |
| 466 | * @blocks_array_size: blocks array size |
| 467 | * @user_regs_array: unsecured register array |
| 468 | * @user_regs_array_size: unsecured register array size |
| 469 | * |
| 470 | */ |
| 471 | int hl_init_pb_single_dcore(struct hl_device *hdev, u32 dcore_offset, |
| 472 | u32 num_instances, u32 instance_offset, |
| 473 | const u32 pb_blocks[], u32 blocks_array_size, |
| 474 | const u32 *user_regs_array, u32 user_regs_array_size) |
| 475 | { |
| 476 | int i, rc = 0; |
| 477 | struct hl_block_glbl_sec *glbl_sec; |
| 478 | |
| 479 | glbl_sec = kcalloc(blocks_array_size, |
| 480 | sizeof(struct hl_block_glbl_sec), |
| 481 | GFP_KERNEL); |
| 482 | if (!glbl_sec) |
| 483 | return -ENOMEM; |
| 484 | |
| 485 | hl_secure_block(hdev, sgs_array: glbl_sec, array_size: blocks_array_size); |
| 486 | rc = hl_unsecure_registers(hdev, mm_reg_array: user_regs_array, mm_array_size: user_regs_array_size, |
| 487 | offset: 0, pb_blocks, sgs_array: glbl_sec, blocks_array_size); |
| 488 | if (rc) |
| 489 | goto free_glbl_sec; |
| 490 | |
| 491 | /* Fill all blocks with the same configuration */ |
| 492 | for (i = 0 ; i < num_instances ; i++) |
| 493 | hl_config_glbl_sec(hdev, pb_blocks, sgs_array: glbl_sec, |
| 494 | block_offset: dcore_offset + i * instance_offset, |
| 495 | array_size: blocks_array_size); |
| 496 | |
| 497 | free_glbl_sec: |
| 498 | kfree(objp: glbl_sec); |
| 499 | |
| 500 | return rc; |
| 501 | } |
| 502 | |
| 503 | /** |
| 504 | * hl_init_pb_ranges_single_dcore - set pb for a single docre in HW according |
| 505 | * to given configuration unsecurring |
| 506 | * registers ranges instead of specific |
| 507 | * registers |
| 508 | * |
| 509 | * @hdev: pointer to hl_device structure |
| 510 | * @dcore_offset: offset from the dcore0 |
| 511 | * @num_instances: number of instances to apply configuration to |
| 512 | * @instance_offset: offset between instances |
| 513 | * @pb_blocks: blocks array |
| 514 | * @blocks_array_size: blocks array size |
| 515 | * @user_regs_range_array: unsecured register range array |
| 516 | * @user_regs_range_array_size: unsecured register range array size |
| 517 | * |
| 518 | */ |
| 519 | int hl_init_pb_ranges_single_dcore(struct hl_device *hdev, u32 dcore_offset, |
| 520 | u32 num_instances, u32 instance_offset, |
| 521 | const u32 pb_blocks[], u32 blocks_array_size, |
| 522 | const struct range *user_regs_range_array, u32 user_regs_range_array_size) |
| 523 | { |
| 524 | int i; |
| 525 | struct hl_block_glbl_sec *glbl_sec; |
| 526 | |
| 527 | glbl_sec = kcalloc(blocks_array_size, |
| 528 | sizeof(struct hl_block_glbl_sec), |
| 529 | GFP_KERNEL); |
| 530 | if (!glbl_sec) |
| 531 | return -ENOMEM; |
| 532 | |
| 533 | hl_secure_block(hdev, sgs_array: glbl_sec, array_size: blocks_array_size); |
| 534 | hl_unsecure_registers_range(hdev, mm_reg_range_array: user_regs_range_array, |
| 535 | mm_array_size: user_regs_range_array_size, offset: 0, pb_blocks, sgs_array: glbl_sec, |
| 536 | blocks_array_size); |
| 537 | |
| 538 | /* Fill all blocks with the same configuration */ |
| 539 | for (i = 0 ; i < num_instances ; i++) |
| 540 | hl_config_glbl_sec(hdev, pb_blocks, sgs_array: glbl_sec, |
| 541 | block_offset: dcore_offset + i * instance_offset, |
| 542 | array_size: blocks_array_size); |
| 543 | |
| 544 | kfree(objp: glbl_sec); |
| 545 | |
| 546 | return 0; |
| 547 | } |
| 548 | |
| 549 | /** |
| 550 | * hl_ack_pb_with_mask - ack pb with mask in HW according to given configuration |
| 551 | * |
| 552 | * @hdev: pointer to hl_device structure |
| 553 | * @num_dcores: number of decores to apply configuration to |
| 554 | * set to HL_PB_SHARED if need to apply only once |
| 555 | * @dcore_offset: offset between dcores |
| 556 | * @num_instances: number of instances to apply configuration to |
| 557 | * @instance_offset: offset between instances |
| 558 | * @pb_blocks: blocks array |
| 559 | * @blocks_array_size: blocks array size |
| 560 | * @mask: enabled instances mask: 1- enabled, 0- disabled |
| 561 | * |
| 562 | */ |
| 563 | void hl_ack_pb_with_mask(struct hl_device *hdev, u32 num_dcores, |
| 564 | u32 dcore_offset, u32 num_instances, u32 instance_offset, |
| 565 | const u32 pb_blocks[], u32 blocks_array_size, u64 mask) |
| 566 | { |
| 567 | int i, j; |
| 568 | |
| 569 | /* ack all blocks */ |
| 570 | for (i = 0 ; i < num_dcores ; i++) { |
| 571 | for (j = 0 ; j < num_instances ; j++) { |
| 572 | int seq = i * num_instances + j; |
| 573 | |
| 574 | if (!(mask & BIT_ULL(seq))) |
| 575 | continue; |
| 576 | |
| 577 | hl_ack_pb_security_violations(hdev, pb_blocks, |
| 578 | block_offset: i * dcore_offset + j * instance_offset, |
| 579 | array_size: blocks_array_size); |
| 580 | } |
| 581 | } |
| 582 | } |
| 583 | |
| 584 | /** |
| 585 | * hl_ack_pb - ack pb in HW according to given configuration |
| 586 | * |
| 587 | * @hdev: pointer to hl_device structure |
| 588 | * @num_dcores: number of decores to apply configuration to |
| 589 | * set to HL_PB_SHARED if need to apply only once |
| 590 | * @dcore_offset: offset between dcores |
| 591 | * @num_instances: number of instances to apply configuration to |
| 592 | * @instance_offset: offset between instances |
| 593 | * @pb_blocks: blocks array |
| 594 | * @blocks_array_size: blocks array size |
| 595 | * |
| 596 | */ |
| 597 | void hl_ack_pb(struct hl_device *hdev, u32 num_dcores, u32 dcore_offset, |
| 598 | u32 num_instances, u32 instance_offset, |
| 599 | const u32 pb_blocks[], u32 blocks_array_size) |
| 600 | { |
| 601 | hl_ack_pb_with_mask(hdev, num_dcores, dcore_offset, num_instances, |
| 602 | instance_offset, pb_blocks, blocks_array_size, |
| 603 | ULLONG_MAX); |
| 604 | } |
| 605 | |
| 606 | /** |
| 607 | * hl_ack_pb_single_dcore - ack pb for single docre in HW |
| 608 | * according to given configuration |
| 609 | * |
| 610 | * @hdev: pointer to hl_device structure |
| 611 | * @dcore_offset: offset from dcore0 |
| 612 | * @num_instances: number of instances to apply configuration to |
| 613 | * @instance_offset: offset between instances |
| 614 | * @pb_blocks: blocks array |
| 615 | * @blocks_array_size: blocks array size |
| 616 | * |
| 617 | */ |
| 618 | void hl_ack_pb_single_dcore(struct hl_device *hdev, u32 dcore_offset, |
| 619 | u32 num_instances, u32 instance_offset, |
| 620 | const u32 pb_blocks[], u32 blocks_array_size) |
| 621 | { |
| 622 | int i; |
| 623 | |
| 624 | /* ack all blocks */ |
| 625 | for (i = 0 ; i < num_instances ; i++) |
| 626 | hl_ack_pb_security_violations(hdev, pb_blocks, |
| 627 | block_offset: dcore_offset + i * instance_offset, |
| 628 | array_size: blocks_array_size); |
| 629 | |
| 630 | } |
| 631 | |
| 632 | static u32 hl_automated_get_block_base_addr(struct hl_device *hdev, |
| 633 | struct hl_special_block_info *block_info, |
| 634 | u32 major, u32 minor, u32 sub_minor) |
| 635 | { |
| 636 | u32 fw_block_base_address = block_info->base_addr + |
| 637 | major * block_info->major_offset + |
| 638 | minor * block_info->minor_offset + |
| 639 | sub_minor * block_info->sub_minor_offset; |
| 640 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 641 | |
| 642 | /* Calculation above returns an address for FW use, and therefore should |
| 643 | * be casted for driver use. |
| 644 | */ |
| 645 | return (fw_block_base_address - lower_32_bits(prop->cfg_base_address)); |
| 646 | } |
| 647 | |
| 648 | static bool hl_check_block_type_exclusion(struct hl_skip_blocks_cfg *skip_blocks_cfg, |
| 649 | int block_type) |
| 650 | { |
| 651 | int i; |
| 652 | |
| 653 | /* Check if block type is listed in the exclusion list of block types */ |
| 654 | for (i = 0 ; i < skip_blocks_cfg->block_types_len ; i++) |
| 655 | if (block_type == skip_blocks_cfg->block_types[i]) |
| 656 | return true; |
| 657 | |
| 658 | return false; |
| 659 | } |
| 660 | |
| 661 | static bool hl_check_block_range_exclusion(struct hl_device *hdev, |
| 662 | struct hl_skip_blocks_cfg *skip_blocks_cfg, |
| 663 | struct hl_special_block_info *block_info, |
| 664 | u32 major, u32 minor, u32 sub_minor) |
| 665 | { |
| 666 | u32 blocks_in_range, block_base_addr_in_range, block_base_addr; |
| 667 | int i, j; |
| 668 | |
| 669 | block_base_addr = hl_automated_get_block_base_addr(hdev, block_info, |
| 670 | major, minor, sub_minor); |
| 671 | |
| 672 | for (i = 0 ; i < skip_blocks_cfg->block_ranges_len ; i++) { |
| 673 | blocks_in_range = (skip_blocks_cfg->block_ranges[i].end - |
| 674 | skip_blocks_cfg->block_ranges[i].start) / |
| 675 | HL_BLOCK_SIZE + 1; |
| 676 | for (j = 0 ; j < blocks_in_range ; j++) { |
| 677 | block_base_addr_in_range = skip_blocks_cfg->block_ranges[i].start + |
| 678 | j * HL_BLOCK_SIZE; |
| 679 | if (block_base_addr == block_base_addr_in_range) |
| 680 | return true; |
| 681 | } |
| 682 | } |
| 683 | |
| 684 | return false; |
| 685 | } |
| 686 | |
| 687 | static int hl_read_glbl_errors(struct hl_device *hdev, |
| 688 | u32 blk_idx, u32 major, u32 minor, u32 sub_minor, void *data) |
| 689 | { |
| 690 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 691 | struct hl_special_block_info *special_blocks = prop->special_blocks; |
| 692 | struct hl_special_block_info *current_block = &special_blocks[blk_idx]; |
| 693 | u32 glbl_err_addr, glbl_err_cause, addr_val, cause_val, block_base, |
| 694 | base = current_block->base_addr - lower_32_bits(prop->cfg_base_address); |
| 695 | int i; |
| 696 | |
| 697 | block_base = base + major * current_block->major_offset + |
| 698 | minor * current_block->minor_offset + |
| 699 | sub_minor * current_block->sub_minor_offset; |
| 700 | |
| 701 | glbl_err_cause = block_base + HL_GLBL_ERR_CAUSE_OFFSET; |
| 702 | cause_val = RREG32(glbl_err_cause); |
| 703 | if (!cause_val) |
| 704 | return 0; |
| 705 | |
| 706 | glbl_err_addr = block_base + HL_GLBL_ERR_ADDR_OFFSET; |
| 707 | addr_val = RREG32(glbl_err_addr); |
| 708 | |
| 709 | for (i = 0 ; i <= prop->glbl_err_max_cause_num ; i++) { |
| 710 | if (cause_val & BIT(i)) |
| 711 | dev_err_ratelimited(hdev->dev, |
| 712 | "%s, addr %#llx\n" , |
| 713 | hl_glbl_error_cause[i], |
| 714 | prop->cfg_base_address + block_base + |
| 715 | FIELD_GET(HL_GLBL_ERR_ADDRESS_MASK, addr_val)); |
| 716 | } |
| 717 | |
| 718 | WREG32(glbl_err_cause, cause_val); |
| 719 | |
| 720 | return 0; |
| 721 | } |
| 722 | |
| 723 | void hl_check_for_glbl_errors(struct hl_device *hdev) |
| 724 | { |
| 725 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 726 | struct hl_special_blocks_cfg special_blocks_cfg; |
| 727 | struct iterate_special_ctx glbl_err_iter; |
| 728 | int rc; |
| 729 | |
| 730 | memset(&special_blocks_cfg, 0, sizeof(special_blocks_cfg)); |
| 731 | special_blocks_cfg.skip_blocks_cfg = &prop->skip_special_blocks_cfg; |
| 732 | |
| 733 | glbl_err_iter.fn = &hl_read_glbl_errors; |
| 734 | glbl_err_iter.data = &special_blocks_cfg; |
| 735 | |
| 736 | rc = hl_iterate_special_blocks(hdev, ctx: &glbl_err_iter); |
| 737 | if (rc) |
| 738 | dev_err_ratelimited(hdev->dev, |
| 739 | "Could not iterate special blocks, glbl error check failed\n" ); |
| 740 | } |
| 741 | |
| 742 | int hl_iterate_special_blocks(struct hl_device *hdev, struct iterate_special_ctx *ctx) |
| 743 | { |
| 744 | struct hl_special_blocks_cfg *special_blocks_cfg = |
| 745 | (struct hl_special_blocks_cfg *)ctx->data; |
| 746 | struct hl_skip_blocks_cfg *skip_blocks_cfg = |
| 747 | special_blocks_cfg->skip_blocks_cfg; |
| 748 | u32 major, minor, sub_minor, blk_idx, num_blocks; |
| 749 | struct hl_special_block_info *block_info_arr; |
| 750 | int rc; |
| 751 | |
| 752 | block_info_arr = hdev->asic_prop.special_blocks; |
| 753 | if (!block_info_arr) |
| 754 | return -EINVAL; |
| 755 | |
| 756 | num_blocks = hdev->asic_prop.num_of_special_blocks; |
| 757 | |
| 758 | for (blk_idx = 0 ; blk_idx < num_blocks ; blk_idx++, block_info_arr++) { |
| 759 | if (hl_check_block_type_exclusion(skip_blocks_cfg, block_type: block_info_arr->block_type)) |
| 760 | continue; |
| 761 | |
| 762 | for (major = 0 ; major < block_info_arr->major ; major++) { |
| 763 | minor = 0; |
| 764 | do { |
| 765 | sub_minor = 0; |
| 766 | do { |
| 767 | if ((hl_check_block_range_exclusion(hdev, |
| 768 | skip_blocks_cfg, block_info: block_info_arr, |
| 769 | major, minor, sub_minor)) || |
| 770 | (skip_blocks_cfg->skip_block_hook && |
| 771 | skip_blocks_cfg->skip_block_hook(hdev, |
| 772 | special_blocks_cfg, |
| 773 | blk_idx, major, minor, sub_minor))) { |
| 774 | sub_minor++; |
| 775 | continue; |
| 776 | } |
| 777 | |
| 778 | rc = ctx->fn(hdev, blk_idx, major, minor, |
| 779 | sub_minor, ctx->data); |
| 780 | if (rc) |
| 781 | return rc; |
| 782 | |
| 783 | sub_minor++; |
| 784 | } while (sub_minor < block_info_arr->sub_minor); |
| 785 | |
| 786 | minor++; |
| 787 | } while (minor < block_info_arr->minor); |
| 788 | } |
| 789 | } |
| 790 | |
| 791 | return 0; |
| 792 | } |
| 793 | |