| 1 | // SPDX-License-Identifier: MIT |
| 2 | /* |
| 3 | * Copyright © 2022 Intel Corporation |
| 4 | */ |
| 5 | |
| 6 | #include "xe_query.h" |
| 7 | |
| 8 | #include <linux/nospec.h> |
| 9 | #include <linux/sched/clock.h> |
| 10 | |
| 11 | #include <drm/ttm/ttm_placement.h> |
| 12 | #include <generated/xe_wa_oob.h> |
| 13 | #include <uapi/drm/xe_drm.h> |
| 14 | |
| 15 | #include "regs/xe_engine_regs.h" |
| 16 | #include "regs/xe_gt_regs.h" |
| 17 | #include "xe_bo.h" |
| 18 | #include "xe_device.h" |
| 19 | #include "xe_eu_stall.h" |
| 20 | #include "xe_exec_queue.h" |
| 21 | #include "xe_force_wake.h" |
| 22 | #include "xe_ggtt.h" |
| 23 | #include "xe_gt.h" |
| 24 | #include "xe_gt_topology.h" |
| 25 | #include "xe_guc_hwconfig.h" |
| 26 | #include "xe_macros.h" |
| 27 | #include "xe_mmio.h" |
| 28 | #include "xe_oa.h" |
| 29 | #include "xe_pxp.h" |
| 30 | #include "xe_ttm_vram_mgr.h" |
| 31 | #include "xe_vram_types.h" |
| 32 | #include "xe_wa.h" |
| 33 | |
| 34 | static const u16 xe_to_user_engine_class[] = { |
| 35 | [XE_ENGINE_CLASS_RENDER] = DRM_XE_ENGINE_CLASS_RENDER, |
| 36 | [XE_ENGINE_CLASS_COPY] = DRM_XE_ENGINE_CLASS_COPY, |
| 37 | [XE_ENGINE_CLASS_VIDEO_DECODE] = DRM_XE_ENGINE_CLASS_VIDEO_DECODE, |
| 38 | [XE_ENGINE_CLASS_VIDEO_ENHANCE] = DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE, |
| 39 | [XE_ENGINE_CLASS_COMPUTE] = DRM_XE_ENGINE_CLASS_COMPUTE, |
| 40 | }; |
| 41 | |
| 42 | static const enum xe_engine_class user_to_xe_engine_class[] = { |
| 43 | [DRM_XE_ENGINE_CLASS_RENDER] = XE_ENGINE_CLASS_RENDER, |
| 44 | [DRM_XE_ENGINE_CLASS_COPY] = XE_ENGINE_CLASS_COPY, |
| 45 | [DRM_XE_ENGINE_CLASS_VIDEO_DECODE] = XE_ENGINE_CLASS_VIDEO_DECODE, |
| 46 | [DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE] = XE_ENGINE_CLASS_VIDEO_ENHANCE, |
| 47 | [DRM_XE_ENGINE_CLASS_COMPUTE] = XE_ENGINE_CLASS_COMPUTE, |
| 48 | }; |
| 49 | |
| 50 | static size_t calc_hw_engine_info_size(struct xe_device *xe) |
| 51 | { |
| 52 | struct xe_hw_engine *hwe; |
| 53 | enum xe_hw_engine_id id; |
| 54 | struct xe_gt *gt; |
| 55 | u8 gt_id; |
| 56 | int i = 0; |
| 57 | |
| 58 | for_each_gt(gt, xe, gt_id) |
| 59 | for_each_hw_engine(hwe, gt, id) { |
| 60 | if (xe_hw_engine_is_reserved(hwe)) |
| 61 | continue; |
| 62 | i++; |
| 63 | } |
| 64 | |
| 65 | return sizeof(struct drm_xe_query_engines) + |
| 66 | i * sizeof(struct drm_xe_engine); |
| 67 | } |
| 68 | |
| 69 | typedef u64 (*__ktime_func_t)(void); |
| 70 | static __ktime_func_t __clock_id_to_func(clockid_t clk_id) |
| 71 | { |
| 72 | /* |
| 73 | * Use logic same as the perf subsystem to allow user to select the |
| 74 | * reference clock id to be used for timestamps. |
| 75 | */ |
| 76 | switch (clk_id) { |
| 77 | case CLOCK_MONOTONIC: |
| 78 | return &ktime_get_ns; |
| 79 | case CLOCK_MONOTONIC_RAW: |
| 80 | return &ktime_get_raw_ns; |
| 81 | case CLOCK_REALTIME: |
| 82 | return &ktime_get_real_ns; |
| 83 | case CLOCK_BOOTTIME: |
| 84 | return &ktime_get_boottime_ns; |
| 85 | case CLOCK_TAI: |
| 86 | return &ktime_get_clocktai_ns; |
| 87 | default: |
| 88 | return NULL; |
| 89 | } |
| 90 | } |
| 91 | |
| 92 | static void |
| 93 | hwe_read_timestamp(struct xe_hw_engine *hwe, u64 *engine_ts, u64 *cpu_ts, |
| 94 | u64 *cpu_delta, __ktime_func_t cpu_clock) |
| 95 | { |
| 96 | struct xe_mmio *mmio = &hwe->gt->mmio; |
| 97 | u32 upper, lower, old_upper, loop = 0; |
| 98 | struct xe_reg upper_reg = RING_TIMESTAMP_UDW(hwe->mmio_base), |
| 99 | lower_reg = RING_TIMESTAMP(hwe->mmio_base); |
| 100 | |
| 101 | upper = xe_mmio_read32(mmio, reg: upper_reg); |
| 102 | do { |
| 103 | *cpu_delta = local_clock(); |
| 104 | *cpu_ts = cpu_clock(); |
| 105 | lower = xe_mmio_read32(mmio, reg: lower_reg); |
| 106 | *cpu_delta = local_clock() - *cpu_delta; |
| 107 | old_upper = upper; |
| 108 | upper = xe_mmio_read32(mmio, reg: upper_reg); |
| 109 | } while (upper != old_upper && loop++ < 2); |
| 110 | |
| 111 | *engine_ts = (u64)upper << 32 | lower; |
| 112 | } |
| 113 | |
| 114 | static int |
| 115 | query_engine_cycles(struct xe_device *xe, |
| 116 | struct drm_xe_device_query *query) |
| 117 | { |
| 118 | struct drm_xe_query_engine_cycles __user *query_ptr; |
| 119 | struct drm_xe_engine_class_instance *eci; |
| 120 | struct drm_xe_query_engine_cycles resp; |
| 121 | size_t size = sizeof(resp); |
| 122 | __ktime_func_t cpu_clock; |
| 123 | struct xe_hw_engine *hwe; |
| 124 | struct xe_gt *gt; |
| 125 | unsigned int fw_ref; |
| 126 | |
| 127 | if (IS_SRIOV_VF(xe)) |
| 128 | return -EOPNOTSUPP; |
| 129 | |
| 130 | if (query->size == 0) { |
| 131 | query->size = size; |
| 132 | return 0; |
| 133 | } else if (XE_IOCTL_DBG(xe, query->size != size)) { |
| 134 | return -EINVAL; |
| 135 | } |
| 136 | |
| 137 | query_ptr = u64_to_user_ptr(query->data); |
| 138 | if (copy_from_user(to: &resp, from: query_ptr, n: size)) |
| 139 | return -EFAULT; |
| 140 | |
| 141 | cpu_clock = __clock_id_to_func(clk_id: resp.clockid); |
| 142 | if (!cpu_clock) |
| 143 | return -EINVAL; |
| 144 | |
| 145 | eci = &resp.eci; |
| 146 | if (eci->gt_id >= xe->info.max_gt_per_tile) |
| 147 | return -EINVAL; |
| 148 | |
| 149 | gt = xe_device_get_gt(xe, gt_id: eci->gt_id); |
| 150 | if (!gt) |
| 151 | return -EINVAL; |
| 152 | |
| 153 | if (eci->engine_class >= ARRAY_SIZE(user_to_xe_engine_class)) |
| 154 | return -EINVAL; |
| 155 | |
| 156 | hwe = xe_gt_hw_engine(gt, class: user_to_xe_engine_class[eci->engine_class], |
| 157 | instance: eci->engine_instance, logical: true); |
| 158 | if (!hwe) |
| 159 | return -EINVAL; |
| 160 | |
| 161 | fw_ref = xe_force_wake_get(fw: gt_to_fw(gt), domains: XE_FORCEWAKE_ALL); |
| 162 | if (!xe_force_wake_ref_has_domain(fw_ref, domain: XE_FORCEWAKE_ALL)) { |
| 163 | xe_force_wake_put(fw: gt_to_fw(gt), fw_ref); |
| 164 | return -EIO; |
| 165 | } |
| 166 | |
| 167 | hwe_read_timestamp(hwe, engine_ts: &resp.engine_cycles, cpu_ts: &resp.cpu_timestamp, |
| 168 | cpu_delta: &resp.cpu_delta, cpu_clock); |
| 169 | |
| 170 | xe_force_wake_put(fw: gt_to_fw(gt), fw_ref); |
| 171 | |
| 172 | if (GRAPHICS_VER(xe) >= 20) |
| 173 | resp.width = 64; |
| 174 | else |
| 175 | resp.width = 36; |
| 176 | |
| 177 | /* Only write to the output fields of user query */ |
| 178 | if (put_user(resp.cpu_timestamp, &query_ptr->cpu_timestamp) || |
| 179 | put_user(resp.cpu_delta, &query_ptr->cpu_delta) || |
| 180 | put_user(resp.engine_cycles, &query_ptr->engine_cycles) || |
| 181 | put_user(resp.width, &query_ptr->width)) |
| 182 | return -EFAULT; |
| 183 | |
| 184 | return 0; |
| 185 | } |
| 186 | |
| 187 | static int query_engines(struct xe_device *xe, |
| 188 | struct drm_xe_device_query *query) |
| 189 | { |
| 190 | size_t size = calc_hw_engine_info_size(xe); |
| 191 | struct drm_xe_query_engines __user *query_ptr = |
| 192 | u64_to_user_ptr(query->data); |
| 193 | struct drm_xe_query_engines *engines; |
| 194 | struct xe_hw_engine *hwe; |
| 195 | enum xe_hw_engine_id id; |
| 196 | struct xe_gt *gt; |
| 197 | u8 gt_id; |
| 198 | int i = 0; |
| 199 | |
| 200 | if (query->size == 0) { |
| 201 | query->size = size; |
| 202 | return 0; |
| 203 | } else if (XE_IOCTL_DBG(xe, query->size != size)) { |
| 204 | return -EINVAL; |
| 205 | } |
| 206 | |
| 207 | engines = kzalloc(size, GFP_KERNEL); |
| 208 | if (!engines) |
| 209 | return -ENOMEM; |
| 210 | |
| 211 | for_each_gt(gt, xe, gt_id) |
| 212 | for_each_hw_engine(hwe, gt, id) { |
| 213 | if (xe_hw_engine_is_reserved(hwe)) |
| 214 | continue; |
| 215 | |
| 216 | engines->engines[i].instance.engine_class = |
| 217 | xe_to_user_engine_class[hwe->class]; |
| 218 | engines->engines[i].instance.engine_instance = |
| 219 | hwe->logical_instance; |
| 220 | engines->engines[i].instance.gt_id = gt->info.id; |
| 221 | |
| 222 | i++; |
| 223 | } |
| 224 | |
| 225 | engines->num_engines = i; |
| 226 | |
| 227 | if (copy_to_user(to: query_ptr, from: engines, n: size)) { |
| 228 | kfree(objp: engines); |
| 229 | return -EFAULT; |
| 230 | } |
| 231 | kfree(objp: engines); |
| 232 | |
| 233 | return 0; |
| 234 | } |
| 235 | |
| 236 | static size_t calc_mem_regions_size(struct xe_device *xe) |
| 237 | { |
| 238 | u32 num_managers = 1; |
| 239 | int i; |
| 240 | |
| 241 | for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) |
| 242 | if (ttm_manager_type(bdev: &xe->ttm, mem_type: i)) |
| 243 | num_managers++; |
| 244 | |
| 245 | return offsetof(struct drm_xe_query_mem_regions, mem_regions[num_managers]); |
| 246 | } |
| 247 | |
| 248 | static int query_mem_regions(struct xe_device *xe, |
| 249 | struct drm_xe_device_query *query) |
| 250 | { |
| 251 | size_t size = calc_mem_regions_size(xe); |
| 252 | struct drm_xe_query_mem_regions *mem_regions; |
| 253 | struct drm_xe_query_mem_regions __user *query_ptr = |
| 254 | u64_to_user_ptr(query->data); |
| 255 | struct ttm_resource_manager *man; |
| 256 | int ret, i; |
| 257 | |
| 258 | if (query->size == 0) { |
| 259 | query->size = size; |
| 260 | return 0; |
| 261 | } else if (XE_IOCTL_DBG(xe, query->size != size)) { |
| 262 | return -EINVAL; |
| 263 | } |
| 264 | |
| 265 | mem_regions = kzalloc(size, GFP_KERNEL); |
| 266 | if (XE_IOCTL_DBG(xe, !mem_regions)) |
| 267 | return -ENOMEM; |
| 268 | |
| 269 | man = ttm_manager_type(bdev: &xe->ttm, XE_PL_TT); |
| 270 | mem_regions->mem_regions[0].mem_class = DRM_XE_MEM_REGION_CLASS_SYSMEM; |
| 271 | /* |
| 272 | * The instance needs to be a unique number that represents the index |
| 273 | * in the placement mask used at xe_gem_create_ioctl() for the |
| 274 | * xe_bo_create() placement. |
| 275 | */ |
| 276 | mem_regions->mem_regions[0].instance = 0; |
| 277 | mem_regions->mem_regions[0].min_page_size = PAGE_SIZE; |
| 278 | mem_regions->mem_regions[0].total_size = man->size << PAGE_SHIFT; |
| 279 | mem_regions->mem_regions[0].used = ttm_resource_manager_usage(man); |
| 280 | mem_regions->num_mem_regions = 1; |
| 281 | |
| 282 | for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) { |
| 283 | man = ttm_manager_type(bdev: &xe->ttm, mem_type: i); |
| 284 | if (man) { |
| 285 | mem_regions->mem_regions[mem_regions->num_mem_regions].mem_class = |
| 286 | DRM_XE_MEM_REGION_CLASS_VRAM; |
| 287 | mem_regions->mem_regions[mem_regions->num_mem_regions].instance = |
| 288 | mem_regions->num_mem_regions; |
| 289 | mem_regions->mem_regions[mem_regions->num_mem_regions].min_page_size = |
| 290 | xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? |
| 291 | SZ_64K : PAGE_SIZE; |
| 292 | mem_regions->mem_regions[mem_regions->num_mem_regions].total_size = |
| 293 | man->size; |
| 294 | |
| 295 | xe_ttm_vram_get_used(man, |
| 296 | used: &mem_regions->mem_regions |
| 297 | [mem_regions->num_mem_regions].used, |
| 298 | used_visible: &mem_regions->mem_regions |
| 299 | [mem_regions->num_mem_regions].cpu_visible_used); |
| 300 | |
| 301 | mem_regions->mem_regions[mem_regions->num_mem_regions].cpu_visible_size = |
| 302 | xe_ttm_vram_get_cpu_visible_size(man); |
| 303 | mem_regions->num_mem_regions++; |
| 304 | } |
| 305 | } |
| 306 | |
| 307 | if (!copy_to_user(to: query_ptr, from: mem_regions, n: size)) |
| 308 | ret = 0; |
| 309 | else |
| 310 | ret = -ENOSPC; |
| 311 | |
| 312 | kfree(objp: mem_regions); |
| 313 | return ret; |
| 314 | } |
| 315 | |
| 316 | static int query_config(struct xe_device *xe, struct drm_xe_device_query *query) |
| 317 | { |
| 318 | const u32 num_params = DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY + 1; |
| 319 | size_t size = |
| 320 | sizeof(struct drm_xe_query_config) + num_params * sizeof(u64); |
| 321 | struct drm_xe_query_config __user *query_ptr = |
| 322 | u64_to_user_ptr(query->data); |
| 323 | struct drm_xe_query_config *config; |
| 324 | |
| 325 | if (query->size == 0) { |
| 326 | query->size = size; |
| 327 | return 0; |
| 328 | } else if (XE_IOCTL_DBG(xe, query->size != size)) { |
| 329 | return -EINVAL; |
| 330 | } |
| 331 | |
| 332 | config = kzalloc(size, GFP_KERNEL); |
| 333 | if (!config) |
| 334 | return -ENOMEM; |
| 335 | |
| 336 | config->num_params = num_params; |
| 337 | config->info[DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID] = |
| 338 | xe->info.devid | (xe->info.revid << 16); |
| 339 | if (xe->mem.vram) |
| 340 | config->info[DRM_XE_QUERY_CONFIG_FLAGS] |= |
| 341 | DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM; |
| 342 | if (xe->info.has_usm && IS_ENABLED(CONFIG_DRM_XE_GPUSVM)) |
| 343 | config->info[DRM_XE_QUERY_CONFIG_FLAGS] |= |
| 344 | DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR; |
| 345 | config->info[DRM_XE_QUERY_CONFIG_FLAGS] |= |
| 346 | DRM_XE_QUERY_CONFIG_FLAG_HAS_LOW_LATENCY; |
| 347 | config->info[DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT] = |
| 348 | xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K; |
| 349 | config->info[DRM_XE_QUERY_CONFIG_VA_BITS] = xe->info.va_bits; |
| 350 | config->info[DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY] = |
| 351 | xe_exec_queue_device_get_max_priority(xe); |
| 352 | |
| 353 | if (copy_to_user(to: query_ptr, from: config, n: size)) { |
| 354 | kfree(objp: config); |
| 355 | return -EFAULT; |
| 356 | } |
| 357 | kfree(objp: config); |
| 358 | |
| 359 | return 0; |
| 360 | } |
| 361 | |
| 362 | static int query_gt_list(struct xe_device *xe, struct drm_xe_device_query *query) |
| 363 | { |
| 364 | struct xe_gt *gt; |
| 365 | size_t size = sizeof(struct drm_xe_query_gt_list) + |
| 366 | xe->info.gt_count * sizeof(struct drm_xe_gt); |
| 367 | struct drm_xe_query_gt_list __user *query_ptr = |
| 368 | u64_to_user_ptr(query->data); |
| 369 | struct drm_xe_query_gt_list *gt_list; |
| 370 | int iter = 0; |
| 371 | u8 id; |
| 372 | |
| 373 | if (query->size == 0) { |
| 374 | query->size = size; |
| 375 | return 0; |
| 376 | } else if (XE_IOCTL_DBG(xe, query->size != size)) { |
| 377 | return -EINVAL; |
| 378 | } |
| 379 | |
| 380 | gt_list = kzalloc(size, GFP_KERNEL); |
| 381 | if (!gt_list) |
| 382 | return -ENOMEM; |
| 383 | |
| 384 | gt_list->num_gt = xe->info.gt_count; |
| 385 | |
| 386 | for_each_gt(gt, xe, id) { |
| 387 | if (xe_gt_is_media_type(gt)) |
| 388 | gt_list->gt_list[iter].type = DRM_XE_QUERY_GT_TYPE_MEDIA; |
| 389 | else |
| 390 | gt_list->gt_list[iter].type = DRM_XE_QUERY_GT_TYPE_MAIN; |
| 391 | gt_list->gt_list[iter].tile_id = gt_to_tile(gt)->id; |
| 392 | gt_list->gt_list[iter].gt_id = gt->info.id; |
| 393 | gt_list->gt_list[iter].reference_clock = gt->info.reference_clock; |
| 394 | /* |
| 395 | * The mem_regions indexes in the mask below need to |
| 396 | * directly identify the struct |
| 397 | * drm_xe_query_mem_regions' instance constructed at |
| 398 | * query_mem_regions() |
| 399 | * |
| 400 | * For our current platforms: |
| 401 | * Bit 0 -> System Memory |
| 402 | * Bit 1 -> VRAM0 on Tile0 |
| 403 | * Bit 2 -> VRAM1 on Tile1 |
| 404 | * However the uAPI is generic and it's userspace's |
| 405 | * responsibility to check the mem_class, without any |
| 406 | * assumption. |
| 407 | */ |
| 408 | if (!IS_DGFX(xe)) |
| 409 | gt_list->gt_list[iter].near_mem_regions = 0x1; |
| 410 | else |
| 411 | gt_list->gt_list[iter].near_mem_regions = |
| 412 | BIT(gt_to_tile(gt)->mem.vram->id) << 1; |
| 413 | gt_list->gt_list[iter].far_mem_regions = xe->info.mem_region_mask ^ |
| 414 | gt_list->gt_list[iter].near_mem_regions; |
| 415 | |
| 416 | gt_list->gt_list[iter].ip_ver_major = |
| 417 | REG_FIELD_GET(GMD_ID_ARCH_MASK, gt->info.gmdid); |
| 418 | gt_list->gt_list[iter].ip_ver_minor = |
| 419 | REG_FIELD_GET(GMD_ID_RELEASE_MASK, gt->info.gmdid); |
| 420 | gt_list->gt_list[iter].ip_ver_rev = |
| 421 | REG_FIELD_GET(GMD_ID_REVID, gt->info.gmdid); |
| 422 | |
| 423 | iter++; |
| 424 | } |
| 425 | |
| 426 | if (copy_to_user(to: query_ptr, from: gt_list, n: size)) { |
| 427 | kfree(objp: gt_list); |
| 428 | return -EFAULT; |
| 429 | } |
| 430 | kfree(objp: gt_list); |
| 431 | |
| 432 | return 0; |
| 433 | } |
| 434 | |
| 435 | static int query_hwconfig(struct xe_device *xe, |
| 436 | struct drm_xe_device_query *query) |
| 437 | { |
| 438 | struct xe_gt *gt = xe_root_mmio_gt(xe); |
| 439 | size_t size = gt ? xe_guc_hwconfig_size(guc: >->uc.guc) : 0; |
| 440 | void __user *query_ptr = u64_to_user_ptr(query->data); |
| 441 | void *hwconfig; |
| 442 | |
| 443 | if (query->size == 0) { |
| 444 | query->size = size; |
| 445 | return 0; |
| 446 | } else if (XE_IOCTL_DBG(xe, query->size != size)) { |
| 447 | return -EINVAL; |
| 448 | } |
| 449 | |
| 450 | hwconfig = kzalloc(size, GFP_KERNEL); |
| 451 | if (!hwconfig) |
| 452 | return -ENOMEM; |
| 453 | |
| 454 | xe_guc_hwconfig_copy(guc: >->uc.guc, dst: hwconfig); |
| 455 | |
| 456 | if (copy_to_user(to: query_ptr, from: hwconfig, n: size)) { |
| 457 | kfree(objp: hwconfig); |
| 458 | return -EFAULT; |
| 459 | } |
| 460 | kfree(objp: hwconfig); |
| 461 | |
| 462 | return 0; |
| 463 | } |
| 464 | |
| 465 | static size_t calc_topo_query_size(struct xe_device *xe) |
| 466 | { |
| 467 | struct xe_gt *gt; |
| 468 | size_t query_size = 0; |
| 469 | int id; |
| 470 | |
| 471 | for_each_gt(gt, xe, id) { |
| 472 | query_size += 3 * sizeof(struct drm_xe_query_topology_mask) + |
| 473 | sizeof_field(struct xe_gt, fuse_topo.g_dss_mask) + |
| 474 | sizeof_field(struct xe_gt, fuse_topo.c_dss_mask) + |
| 475 | sizeof_field(struct xe_gt, fuse_topo.eu_mask_per_dss); |
| 476 | |
| 477 | /* L3bank mask may not be available for some GTs */ |
| 478 | if (xe_gt_topology_report_l3(gt)) |
| 479 | query_size += sizeof(struct drm_xe_query_topology_mask) + |
| 480 | sizeof_field(struct xe_gt, fuse_topo.l3_bank_mask); |
| 481 | } |
| 482 | |
| 483 | return query_size; |
| 484 | } |
| 485 | |
| 486 | static int copy_mask(void __user **ptr, |
| 487 | struct drm_xe_query_topology_mask *topo, |
| 488 | void *mask, size_t mask_size) |
| 489 | { |
| 490 | topo->num_bytes = mask_size; |
| 491 | |
| 492 | if (copy_to_user(to: *ptr, from: topo, n: sizeof(*topo))) |
| 493 | return -EFAULT; |
| 494 | *ptr += sizeof(*topo); |
| 495 | |
| 496 | if (copy_to_user(to: *ptr, from: mask, n: mask_size)) |
| 497 | return -EFAULT; |
| 498 | *ptr += mask_size; |
| 499 | |
| 500 | return 0; |
| 501 | } |
| 502 | |
| 503 | static int query_gt_topology(struct xe_device *xe, |
| 504 | struct drm_xe_device_query *query) |
| 505 | { |
| 506 | void __user *query_ptr = u64_to_user_ptr(query->data); |
| 507 | size_t size = calc_topo_query_size(xe); |
| 508 | struct drm_xe_query_topology_mask topo; |
| 509 | struct xe_gt *gt; |
| 510 | int id; |
| 511 | |
| 512 | if (query->size == 0) { |
| 513 | query->size = size; |
| 514 | return 0; |
| 515 | } else if (XE_IOCTL_DBG(xe, query->size != size)) { |
| 516 | return -EINVAL; |
| 517 | } |
| 518 | |
| 519 | for_each_gt(gt, xe, id) { |
| 520 | int err; |
| 521 | |
| 522 | topo.gt_id = id; |
| 523 | |
| 524 | topo.type = DRM_XE_TOPO_DSS_GEOMETRY; |
| 525 | err = copy_mask(ptr: &query_ptr, topo: &topo, mask: gt->fuse_topo.g_dss_mask, |
| 526 | mask_size: sizeof(gt->fuse_topo.g_dss_mask)); |
| 527 | if (err) |
| 528 | return err; |
| 529 | |
| 530 | topo.type = DRM_XE_TOPO_DSS_COMPUTE; |
| 531 | err = copy_mask(ptr: &query_ptr, topo: &topo, mask: gt->fuse_topo.c_dss_mask, |
| 532 | mask_size: sizeof(gt->fuse_topo.c_dss_mask)); |
| 533 | if (err) |
| 534 | return err; |
| 535 | |
| 536 | /* |
| 537 | * If the kernel doesn't have a way to obtain a correct L3bank |
| 538 | * mask, then it's better to omit L3 from the query rather than |
| 539 | * reporting bogus or zeroed information to userspace. |
| 540 | */ |
| 541 | if (xe_gt_topology_report_l3(gt)) { |
| 542 | topo.type = DRM_XE_TOPO_L3_BANK; |
| 543 | err = copy_mask(ptr: &query_ptr, topo: &topo, mask: gt->fuse_topo.l3_bank_mask, |
| 544 | mask_size: sizeof(gt->fuse_topo.l3_bank_mask)); |
| 545 | if (err) |
| 546 | return err; |
| 547 | } |
| 548 | |
| 549 | topo.type = gt->fuse_topo.eu_type == XE_GT_EU_TYPE_SIMD16 ? |
| 550 | DRM_XE_TOPO_SIMD16_EU_PER_DSS : |
| 551 | DRM_XE_TOPO_EU_PER_DSS; |
| 552 | err = copy_mask(ptr: &query_ptr, topo: &topo, |
| 553 | mask: gt->fuse_topo.eu_mask_per_dss, |
| 554 | mask_size: sizeof(gt->fuse_topo.eu_mask_per_dss)); |
| 555 | if (err) |
| 556 | return err; |
| 557 | } |
| 558 | |
| 559 | return 0; |
| 560 | } |
| 561 | |
| 562 | static int |
| 563 | query_uc_fw_version(struct xe_device *xe, struct drm_xe_device_query *query) |
| 564 | { |
| 565 | struct drm_xe_query_uc_fw_version __user *query_ptr = u64_to_user_ptr(query->data); |
| 566 | size_t size = sizeof(struct drm_xe_query_uc_fw_version); |
| 567 | struct drm_xe_query_uc_fw_version resp; |
| 568 | struct xe_uc_fw_version *version = NULL; |
| 569 | |
| 570 | if (query->size == 0) { |
| 571 | query->size = size; |
| 572 | return 0; |
| 573 | } else if (XE_IOCTL_DBG(xe, query->size != size)) { |
| 574 | return -EINVAL; |
| 575 | } |
| 576 | |
| 577 | if (copy_from_user(to: &resp, from: query_ptr, n: size)) |
| 578 | return -EFAULT; |
| 579 | |
| 580 | if (XE_IOCTL_DBG(xe, resp.pad || resp.pad2 || resp.reserved)) |
| 581 | return -EINVAL; |
| 582 | |
| 583 | switch (resp.uc_type) { |
| 584 | case XE_QUERY_UC_TYPE_GUC_SUBMISSION: { |
| 585 | struct xe_guc *guc = &xe->tiles[0].primary_gt->uc.guc; |
| 586 | |
| 587 | version = &guc->fw.versions.found[XE_UC_FW_VER_COMPATIBILITY]; |
| 588 | break; |
| 589 | } |
| 590 | case XE_QUERY_UC_TYPE_HUC: { |
| 591 | struct xe_gt *media_gt = NULL; |
| 592 | struct xe_huc *huc; |
| 593 | |
| 594 | if (MEDIA_VER(xe) >= 13) { |
| 595 | struct xe_tile *tile; |
| 596 | u8 gt_id; |
| 597 | |
| 598 | for_each_tile(tile, xe, gt_id) { |
| 599 | if (tile->media_gt) { |
| 600 | media_gt = tile->media_gt; |
| 601 | break; |
| 602 | } |
| 603 | } |
| 604 | } else { |
| 605 | media_gt = xe->tiles[0].primary_gt; |
| 606 | } |
| 607 | |
| 608 | if (!media_gt) |
| 609 | break; |
| 610 | |
| 611 | huc = &media_gt->uc.huc; |
| 612 | if (huc->fw.status == XE_UC_FIRMWARE_RUNNING) |
| 613 | version = &huc->fw.versions.found[XE_UC_FW_VER_RELEASE]; |
| 614 | break; |
| 615 | } |
| 616 | default: |
| 617 | return -EINVAL; |
| 618 | } |
| 619 | |
| 620 | if (version) { |
| 621 | resp.branch_ver = 0; |
| 622 | resp.major_ver = version->major; |
| 623 | resp.minor_ver = version->minor; |
| 624 | resp.patch_ver = version->patch; |
| 625 | } else { |
| 626 | return -ENODEV; |
| 627 | } |
| 628 | |
| 629 | if (copy_to_user(to: query_ptr, from: &resp, n: size)) |
| 630 | return -EFAULT; |
| 631 | |
| 632 | return 0; |
| 633 | } |
| 634 | |
| 635 | static size_t calc_oa_unit_query_size(struct xe_device *xe) |
| 636 | { |
| 637 | size_t size = sizeof(struct drm_xe_query_oa_units); |
| 638 | struct xe_gt *gt; |
| 639 | int i, id; |
| 640 | |
| 641 | for_each_gt(gt, xe, id) { |
| 642 | for (i = 0; i < gt->oa.num_oa_units; i++) { |
| 643 | size += sizeof(struct drm_xe_oa_unit); |
| 644 | size += gt->oa.oa_unit[i].num_engines * |
| 645 | sizeof(struct drm_xe_engine_class_instance); |
| 646 | } |
| 647 | } |
| 648 | |
| 649 | return size; |
| 650 | } |
| 651 | |
| 652 | static int query_oa_units(struct xe_device *xe, |
| 653 | struct drm_xe_device_query *query) |
| 654 | { |
| 655 | void __user *query_ptr = u64_to_user_ptr(query->data); |
| 656 | size_t size = calc_oa_unit_query_size(xe); |
| 657 | struct drm_xe_query_oa_units *qoa; |
| 658 | enum xe_hw_engine_id hwe_id; |
| 659 | struct drm_xe_oa_unit *du; |
| 660 | struct xe_hw_engine *hwe; |
| 661 | struct xe_oa_unit *u; |
| 662 | int gt_id, i, j, ret; |
| 663 | struct xe_gt *gt; |
| 664 | u8 *pdu; |
| 665 | |
| 666 | if (query->size == 0) { |
| 667 | query->size = size; |
| 668 | return 0; |
| 669 | } else if (XE_IOCTL_DBG(xe, query->size != size)) { |
| 670 | return -EINVAL; |
| 671 | } |
| 672 | |
| 673 | qoa = kzalloc(size, GFP_KERNEL); |
| 674 | if (!qoa) |
| 675 | return -ENOMEM; |
| 676 | |
| 677 | pdu = (u8 *)&qoa->oa_units[0]; |
| 678 | for_each_gt(gt, xe, gt_id) { |
| 679 | for (i = 0; i < gt->oa.num_oa_units; i++) { |
| 680 | u = >->oa.oa_unit[i]; |
| 681 | du = (struct drm_xe_oa_unit *)pdu; |
| 682 | |
| 683 | du->oa_unit_id = u->oa_unit_id; |
| 684 | du->oa_unit_type = u->type; |
| 685 | du->oa_timestamp_freq = xe_oa_timestamp_frequency(gt); |
| 686 | du->capabilities = DRM_XE_OA_CAPS_BASE | DRM_XE_OA_CAPS_SYNCS | |
| 687 | DRM_XE_OA_CAPS_OA_BUFFER_SIZE | |
| 688 | DRM_XE_OA_CAPS_WAIT_NUM_REPORTS | |
| 689 | DRM_XE_OA_CAPS_OAM; |
| 690 | j = 0; |
| 691 | for_each_hw_engine(hwe, gt, hwe_id) { |
| 692 | if (!xe_hw_engine_is_reserved(hwe) && |
| 693 | xe_oa_unit_id(hwe) == u->oa_unit_id) { |
| 694 | du->eci[j].engine_class = |
| 695 | xe_to_user_engine_class[hwe->class]; |
| 696 | du->eci[j].engine_instance = hwe->logical_instance; |
| 697 | du->eci[j].gt_id = gt->info.id; |
| 698 | j++; |
| 699 | } |
| 700 | } |
| 701 | du->num_engines = j; |
| 702 | pdu += sizeof(*du) + j * sizeof(du->eci[0]); |
| 703 | qoa->num_oa_units++; |
| 704 | } |
| 705 | } |
| 706 | |
| 707 | ret = copy_to_user(to: query_ptr, from: qoa, n: size); |
| 708 | kfree(objp: qoa); |
| 709 | |
| 710 | return ret ? -EFAULT : 0; |
| 711 | } |
| 712 | |
| 713 | static int query_pxp_status(struct xe_device *xe, struct drm_xe_device_query *query) |
| 714 | { |
| 715 | struct drm_xe_query_pxp_status __user *query_ptr = u64_to_user_ptr(query->data); |
| 716 | size_t size = sizeof(struct drm_xe_query_pxp_status); |
| 717 | struct drm_xe_query_pxp_status resp = { 0 }; |
| 718 | int ret; |
| 719 | |
| 720 | if (query->size == 0) { |
| 721 | query->size = size; |
| 722 | return 0; |
| 723 | } else if (XE_IOCTL_DBG(xe, query->size != size)) { |
| 724 | return -EINVAL; |
| 725 | } |
| 726 | |
| 727 | ret = xe_pxp_get_readiness_status(pxp: xe->pxp); |
| 728 | if (ret < 0) |
| 729 | return ret; |
| 730 | |
| 731 | resp.status = ret; |
| 732 | resp.supported_session_types = BIT(DRM_XE_PXP_TYPE_HWDRM); |
| 733 | |
| 734 | if (copy_to_user(to: query_ptr, from: &resp, n: size)) |
| 735 | return -EFAULT; |
| 736 | |
| 737 | return 0; |
| 738 | } |
| 739 | |
| 740 | static int query_eu_stall(struct xe_device *xe, |
| 741 | struct drm_xe_device_query *query) |
| 742 | { |
| 743 | void __user *query_ptr = u64_to_user_ptr(query->data); |
| 744 | struct drm_xe_query_eu_stall *info; |
| 745 | size_t size, array_size; |
| 746 | const u64 *rates; |
| 747 | u32 num_rates; |
| 748 | int ret; |
| 749 | |
| 750 | if (!xe_eu_stall_supported_on_platform(xe)) |
| 751 | return -ENODEV; |
| 752 | |
| 753 | array_size = xe_eu_stall_get_sampling_rates(num_rates: &num_rates, rates: &rates); |
| 754 | size = sizeof(struct drm_xe_query_eu_stall) + array_size; |
| 755 | |
| 756 | if (query->size == 0) { |
| 757 | query->size = size; |
| 758 | return 0; |
| 759 | } else if (XE_IOCTL_DBG(xe, query->size != size)) { |
| 760 | return -EINVAL; |
| 761 | } |
| 762 | |
| 763 | info = kzalloc(size, GFP_KERNEL); |
| 764 | if (!info) |
| 765 | return -ENOMEM; |
| 766 | |
| 767 | info->num_sampling_rates = num_rates; |
| 768 | info->capabilities = DRM_XE_EU_STALL_CAPS_BASE; |
| 769 | info->record_size = xe_eu_stall_data_record_size(xe); |
| 770 | info->per_xecore_buf_size = xe_eu_stall_get_per_xecore_buf_size(); |
| 771 | memcpy(info->sampling_rates, rates, array_size); |
| 772 | |
| 773 | ret = copy_to_user(to: query_ptr, from: info, n: size); |
| 774 | kfree(objp: info); |
| 775 | |
| 776 | return ret ? -EFAULT : 0; |
| 777 | } |
| 778 | |
| 779 | static int (* const xe_query_funcs[])(struct xe_device *xe, |
| 780 | struct drm_xe_device_query *query) = { |
| 781 | query_engines, |
| 782 | query_mem_regions, |
| 783 | query_config, |
| 784 | query_gt_list, |
| 785 | query_hwconfig, |
| 786 | query_gt_topology, |
| 787 | query_engine_cycles, |
| 788 | query_uc_fw_version, |
| 789 | query_oa_units, |
| 790 | query_pxp_status, |
| 791 | query_eu_stall, |
| 792 | }; |
| 793 | |
| 794 | int xe_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file) |
| 795 | { |
| 796 | struct xe_device *xe = to_xe_device(dev); |
| 797 | struct drm_xe_device_query *query = data; |
| 798 | u32 idx; |
| 799 | |
| 800 | if (XE_IOCTL_DBG(xe, query->extensions) || |
| 801 | XE_IOCTL_DBG(xe, query->reserved[0] || query->reserved[1])) |
| 802 | return -EINVAL; |
| 803 | |
| 804 | if (XE_IOCTL_DBG(xe, query->query >= ARRAY_SIZE(xe_query_funcs))) |
| 805 | return -EINVAL; |
| 806 | |
| 807 | idx = array_index_nospec(query->query, ARRAY_SIZE(xe_query_funcs)); |
| 808 | if (XE_IOCTL_DBG(xe, !xe_query_funcs[idx])) |
| 809 | return -EINVAL; |
| 810 | |
| 811 | return xe_query_funcs[idx](xe, query); |
| 812 | } |
| 813 | |