| 1 | // SPDX-License-Identifier: MIT |
| 2 | /* |
| 3 | * Copyright(c) 2024 Intel Corporation. |
| 4 | */ |
| 5 | |
| 6 | #include "xe_pxp.h" |
| 7 | |
| 8 | #include <drm/drm_managed.h> |
| 9 | #include <uapi/drm/xe_drm.h> |
| 10 | |
| 11 | #include "xe_bo.h" |
| 12 | #include "xe_bo_types.h" |
| 13 | #include "xe_device_types.h" |
| 14 | #include "xe_exec_queue.h" |
| 15 | #include "xe_force_wake.h" |
| 16 | #include "xe_guc_submit.h" |
| 17 | #include "xe_gsc_proxy.h" |
| 18 | #include "xe_gt.h" |
| 19 | #include "xe_gt_types.h" |
| 20 | #include "xe_huc.h" |
| 21 | #include "xe_mmio.h" |
| 22 | #include "xe_pm.h" |
| 23 | #include "xe_pxp_submit.h" |
| 24 | #include "xe_pxp_types.h" |
| 25 | #include "xe_uc_fw.h" |
| 26 | #include "regs/xe_irq_regs.h" |
| 27 | #include "regs/xe_pxp_regs.h" |
| 28 | |
| 29 | /** |
| 30 | * DOC: PXP |
| 31 | * |
| 32 | * PXP (Protected Xe Path) allows execution and flip to display of protected |
| 33 | * (i.e. encrypted) objects. This feature is currently only supported in |
| 34 | * integrated parts. |
| 35 | */ |
| 36 | |
| 37 | #define ARB_SESSION DRM_XE_PXP_HWDRM_DEFAULT_SESSION /* shorter define */ |
| 38 | |
| 39 | /* |
| 40 | * A submission to GSC can take up to 250ms to complete, so use a 300ms |
| 41 | * timeout for activation where only one of those is involved. Termination |
| 42 | * additionally requires a submission to VCS and an interaction with KCR, so |
| 43 | * bump the timeout to 500ms for that. |
| 44 | */ |
| 45 | #define PXP_ACTIVATION_TIMEOUT_MS 300 |
| 46 | #define PXP_TERMINATION_TIMEOUT_MS 500 |
| 47 | |
| 48 | bool xe_pxp_is_supported(const struct xe_device *xe) |
| 49 | { |
| 50 | return xe->info.has_pxp && IS_ENABLED(CONFIG_INTEL_MEI_GSC_PROXY); |
| 51 | } |
| 52 | |
| 53 | bool xe_pxp_is_enabled(const struct xe_pxp *pxp) |
| 54 | { |
| 55 | return pxp; |
| 56 | } |
| 57 | |
| 58 | static bool pxp_prerequisites_done(const struct xe_pxp *pxp) |
| 59 | { |
| 60 | struct xe_gt *gt = pxp->gt; |
| 61 | unsigned int fw_ref; |
| 62 | bool ready; |
| 63 | |
| 64 | fw_ref = xe_force_wake_get(fw: gt_to_fw(gt), domains: XE_FORCEWAKE_ALL); |
| 65 | |
| 66 | /* |
| 67 | * If force_wake fails we could falsely report the prerequisites as not |
| 68 | * done even if they are; the consequence of this would be that the |
| 69 | * callers won't go ahead with using PXP, but if force_wake doesn't work |
| 70 | * the GT is very likely in a bad state so not really a problem to abort |
| 71 | * PXP. Therefore, we can just log the force_wake error and not escalate |
| 72 | * it. |
| 73 | */ |
| 74 | XE_WARN_ON(!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)); |
| 75 | |
| 76 | /* PXP requires both HuC authentication via GSC and GSC proxy initialized */ |
| 77 | ready = xe_huc_is_authenticated(huc: >->uc.huc, type: XE_HUC_AUTH_VIA_GSC) && |
| 78 | xe_gsc_proxy_init_done(gsc: >->uc.gsc); |
| 79 | |
| 80 | xe_force_wake_put(fw: gt_to_fw(gt), fw_ref); |
| 81 | |
| 82 | return ready; |
| 83 | } |
| 84 | |
| 85 | /** |
| 86 | * xe_pxp_get_readiness_status - check whether PXP is ready for userspace use |
| 87 | * @pxp: the xe_pxp pointer (can be NULL if PXP is disabled) |
| 88 | * |
| 89 | * Returns: 0 if PXP is not ready yet, 1 if it is ready, a negative errno value |
| 90 | * if PXP is not supported/enabled or if something went wrong in the |
| 91 | * initialization of the prerequisites. Note that the return values of this |
| 92 | * function follow the uapi (see drm_xe_query_pxp_status), so they can be used |
| 93 | * directly in the query ioctl. |
| 94 | */ |
| 95 | int xe_pxp_get_readiness_status(struct xe_pxp *pxp) |
| 96 | { |
| 97 | int ret = 0; |
| 98 | |
| 99 | if (!xe_pxp_is_enabled(pxp)) |
| 100 | return -ENODEV; |
| 101 | |
| 102 | /* if the GSC or HuC FW are in an error state, PXP will never work */ |
| 103 | if (xe_uc_fw_status_to_error(status: pxp->gt->uc.huc.fw.status) || |
| 104 | xe_uc_fw_status_to_error(status: pxp->gt->uc.gsc.fw.status)) |
| 105 | return -EIO; |
| 106 | |
| 107 | xe_pm_runtime_get(xe: pxp->xe); |
| 108 | |
| 109 | /* PXP requires both HuC loaded and GSC proxy initialized */ |
| 110 | if (pxp_prerequisites_done(pxp)) |
| 111 | ret = 1; |
| 112 | |
| 113 | xe_pm_runtime_put(xe: pxp->xe); |
| 114 | return ret; |
| 115 | } |
| 116 | |
| 117 | static bool pxp_session_is_in_play(struct xe_pxp *pxp, u32 id) |
| 118 | { |
| 119 | struct xe_gt *gt = pxp->gt; |
| 120 | |
| 121 | return xe_mmio_read32(mmio: >->mmio, KCR_SIP) & BIT(id); |
| 122 | } |
| 123 | |
| 124 | static int pxp_wait_for_session_state(struct xe_pxp *pxp, u32 id, bool in_play) |
| 125 | { |
| 126 | struct xe_gt *gt = pxp->gt; |
| 127 | u32 mask = BIT(id); |
| 128 | |
| 129 | return xe_mmio_wait32(mmio: >->mmio, KCR_SIP, mask, val: in_play ? mask : 0, |
| 130 | timeout_us: 250, NULL, atomic: false); |
| 131 | } |
| 132 | |
| 133 | static void pxp_invalidate_queues(struct xe_pxp *pxp); |
| 134 | |
| 135 | static int pxp_terminate_hw(struct xe_pxp *pxp) |
| 136 | { |
| 137 | struct xe_gt *gt = pxp->gt; |
| 138 | unsigned int fw_ref; |
| 139 | int ret = 0; |
| 140 | |
| 141 | drm_dbg(&pxp->xe->drm, "Terminating PXP\n" ); |
| 142 | |
| 143 | fw_ref = xe_force_wake_get(fw: gt_to_fw(gt), domains: XE_FW_GT); |
| 144 | if (!xe_force_wake_ref_has_domain(fw_ref, domain: XE_FW_GT)) { |
| 145 | ret = -EIO; |
| 146 | goto out; |
| 147 | } |
| 148 | |
| 149 | /* terminate the hw session */ |
| 150 | ret = xe_pxp_submit_session_termination(pxp, ARB_SESSION); |
| 151 | if (ret) |
| 152 | goto out; |
| 153 | |
| 154 | ret = pxp_wait_for_session_state(pxp, ARB_SESSION, in_play: false); |
| 155 | if (ret) |
| 156 | goto out; |
| 157 | |
| 158 | /* Trigger full HW cleanup */ |
| 159 | xe_mmio_write32(mmio: >->mmio, KCR_GLOBAL_TERMINATE, val: 1); |
| 160 | |
| 161 | /* now we can tell the GSC to clean up its own state */ |
| 162 | ret = xe_pxp_submit_session_invalidation(gsc_res: &pxp->gsc_res, ARB_SESSION); |
| 163 | |
| 164 | out: |
| 165 | xe_force_wake_put(fw: gt_to_fw(gt), fw_ref); |
| 166 | return ret; |
| 167 | } |
| 168 | |
| 169 | static void mark_termination_in_progress(struct xe_pxp *pxp) |
| 170 | { |
| 171 | lockdep_assert_held(&pxp->mutex); |
| 172 | |
| 173 | reinit_completion(x: &pxp->termination); |
| 174 | pxp->status = XE_PXP_TERMINATION_IN_PROGRESS; |
| 175 | } |
| 176 | |
| 177 | static void pxp_terminate(struct xe_pxp *pxp) |
| 178 | { |
| 179 | int ret = 0; |
| 180 | struct xe_device *xe = pxp->xe; |
| 181 | |
| 182 | if (!wait_for_completion_timeout(x: &pxp->activation, |
| 183 | timeout: msecs_to_jiffies(PXP_ACTIVATION_TIMEOUT_MS))) |
| 184 | drm_err(&xe->drm, "failed to wait for PXP start before termination\n" ); |
| 185 | |
| 186 | mutex_lock(&pxp->mutex); |
| 187 | |
| 188 | if (pxp->status == XE_PXP_ACTIVE) |
| 189 | pxp->key_instance++; |
| 190 | |
| 191 | /* |
| 192 | * we'll mark the status as needing termination on resume, so no need to |
| 193 | * emit a termination now. |
| 194 | */ |
| 195 | if (pxp->status == XE_PXP_SUSPENDED) { |
| 196 | mutex_unlock(lock: &pxp->mutex); |
| 197 | return; |
| 198 | } |
| 199 | |
| 200 | /* |
| 201 | * If we have a termination already in progress, we need to wait for |
| 202 | * it to complete before queueing another one. Once the first |
| 203 | * termination is completed we'll set the state back to |
| 204 | * NEEDS_TERMINATION and leave it to the pxp start code to issue it. |
| 205 | */ |
| 206 | if (pxp->status == XE_PXP_TERMINATION_IN_PROGRESS) { |
| 207 | pxp->status = XE_PXP_NEEDS_ADDITIONAL_TERMINATION; |
| 208 | mutex_unlock(lock: &pxp->mutex); |
| 209 | return; |
| 210 | } |
| 211 | |
| 212 | mark_termination_in_progress(pxp); |
| 213 | |
| 214 | mutex_unlock(lock: &pxp->mutex); |
| 215 | |
| 216 | pxp_invalidate_queues(pxp); |
| 217 | |
| 218 | ret = pxp_terminate_hw(pxp); |
| 219 | if (ret) { |
| 220 | drm_err(&xe->drm, "PXP termination failed: %pe\n" , ERR_PTR(ret)); |
| 221 | mutex_lock(&pxp->mutex); |
| 222 | pxp->status = XE_PXP_ERROR; |
| 223 | complete_all(&pxp->termination); |
| 224 | mutex_unlock(lock: &pxp->mutex); |
| 225 | } |
| 226 | } |
| 227 | |
| 228 | static void pxp_terminate_complete(struct xe_pxp *pxp) |
| 229 | { |
| 230 | /* |
| 231 | * We expect PXP to be in one of 3 states when we get here: |
| 232 | * - XE_PXP_TERMINATION_IN_PROGRESS: a single termination event was |
| 233 | * requested and it is now completing, so we're ready to start. |
| 234 | * - XE_PXP_NEEDS_ADDITIONAL_TERMINATION: a second termination was |
| 235 | * requested while the first one was still being processed. |
| 236 | * - XE_PXP_SUSPENDED: PXP is now suspended, so we defer everything to |
| 237 | * when we come back on resume. |
| 238 | */ |
| 239 | mutex_lock(&pxp->mutex); |
| 240 | |
| 241 | switch (pxp->status) { |
| 242 | case XE_PXP_TERMINATION_IN_PROGRESS: |
| 243 | pxp->status = XE_PXP_READY_TO_START; |
| 244 | break; |
| 245 | case XE_PXP_NEEDS_ADDITIONAL_TERMINATION: |
| 246 | pxp->status = XE_PXP_NEEDS_TERMINATION; |
| 247 | break; |
| 248 | case XE_PXP_SUSPENDED: |
| 249 | /* Nothing to do */ |
| 250 | break; |
| 251 | default: |
| 252 | drm_err(&pxp->xe->drm, |
| 253 | "PXP termination complete while status was %u\n" , |
| 254 | pxp->status); |
| 255 | } |
| 256 | |
| 257 | complete_all(&pxp->termination); |
| 258 | |
| 259 | mutex_unlock(lock: &pxp->mutex); |
| 260 | } |
| 261 | |
| 262 | static void pxp_irq_work(struct work_struct *work) |
| 263 | { |
| 264 | struct xe_pxp *pxp = container_of(work, typeof(*pxp), irq.work); |
| 265 | struct xe_device *xe = pxp->xe; |
| 266 | u32 events = 0; |
| 267 | |
| 268 | spin_lock_irq(lock: &xe->irq.lock); |
| 269 | events = pxp->irq.events; |
| 270 | pxp->irq.events = 0; |
| 271 | spin_unlock_irq(lock: &xe->irq.lock); |
| 272 | |
| 273 | if (!events) |
| 274 | return; |
| 275 | |
| 276 | /* |
| 277 | * If we're processing a termination irq while suspending then don't |
| 278 | * bother, we're going to re-init everything on resume anyway. |
| 279 | */ |
| 280 | if ((events & PXP_TERMINATION_REQUEST) && !xe_pm_runtime_get_if_active(xe)) |
| 281 | return; |
| 282 | |
| 283 | if (events & PXP_TERMINATION_REQUEST) { |
| 284 | events &= ~PXP_TERMINATION_COMPLETE; |
| 285 | pxp_terminate(pxp); |
| 286 | } |
| 287 | |
| 288 | if (events & PXP_TERMINATION_COMPLETE) |
| 289 | pxp_terminate_complete(pxp); |
| 290 | |
| 291 | if (events & PXP_TERMINATION_REQUEST) |
| 292 | xe_pm_runtime_put(xe); |
| 293 | } |
| 294 | |
| 295 | /** |
| 296 | * xe_pxp_irq_handler - Handles PXP interrupts. |
| 297 | * @xe: the xe_device structure |
| 298 | * @iir: interrupt vector |
| 299 | */ |
| 300 | void xe_pxp_irq_handler(struct xe_device *xe, u16 iir) |
| 301 | { |
| 302 | struct xe_pxp *pxp = xe->pxp; |
| 303 | |
| 304 | if (!xe_pxp_is_enabled(pxp)) { |
| 305 | drm_err(&xe->drm, "PXP irq 0x%x received with PXP disabled!\n" , iir); |
| 306 | return; |
| 307 | } |
| 308 | |
| 309 | lockdep_assert_held(&xe->irq.lock); |
| 310 | |
| 311 | if (unlikely(!iir)) |
| 312 | return; |
| 313 | |
| 314 | if (iir & (KCR_PXP_STATE_TERMINATED_INTERRUPT | |
| 315 | KCR_APP_TERMINATED_PER_FW_REQ_INTERRUPT)) |
| 316 | pxp->irq.events |= PXP_TERMINATION_REQUEST; |
| 317 | |
| 318 | if (iir & KCR_PXP_STATE_RESET_COMPLETE_INTERRUPT) |
| 319 | pxp->irq.events |= PXP_TERMINATION_COMPLETE; |
| 320 | |
| 321 | if (pxp->irq.events) |
| 322 | queue_work(wq: pxp->irq.wq, work: &pxp->irq.work); |
| 323 | } |
| 324 | |
| 325 | static int kcr_pxp_set_status(const struct xe_pxp *pxp, bool enable) |
| 326 | { |
| 327 | u32 val = enable ? _MASKED_BIT_ENABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES) : |
| 328 | _MASKED_BIT_DISABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES); |
| 329 | unsigned int fw_ref; |
| 330 | |
| 331 | fw_ref = xe_force_wake_get(fw: gt_to_fw(gt: pxp->gt), domains: XE_FW_GT); |
| 332 | if (!xe_force_wake_ref_has_domain(fw_ref, domain: XE_FW_GT)) |
| 333 | return -EIO; |
| 334 | |
| 335 | xe_mmio_write32(mmio: &pxp->gt->mmio, KCR_INIT, val); |
| 336 | xe_force_wake_put(fw: gt_to_fw(gt: pxp->gt), fw_ref); |
| 337 | |
| 338 | return 0; |
| 339 | } |
| 340 | |
| 341 | static int kcr_pxp_enable(const struct xe_pxp *pxp) |
| 342 | { |
| 343 | return kcr_pxp_set_status(pxp, enable: true); |
| 344 | } |
| 345 | |
| 346 | static int kcr_pxp_disable(const struct xe_pxp *pxp) |
| 347 | { |
| 348 | return kcr_pxp_set_status(pxp, enable: false); |
| 349 | } |
| 350 | |
| 351 | static void pxp_fini(void *arg) |
| 352 | { |
| 353 | struct xe_pxp *pxp = arg; |
| 354 | |
| 355 | destroy_workqueue(wq: pxp->irq.wq); |
| 356 | xe_pxp_destroy_execution_resources(pxp); |
| 357 | |
| 358 | /* no need to explicitly disable KCR since we're going to do an FLR */ |
| 359 | } |
| 360 | |
| 361 | /** |
| 362 | * xe_pxp_init - initialize PXP support |
| 363 | * @xe: the xe_device structure |
| 364 | * |
| 365 | * Initialize the HW state and allocate the objects required for PXP support. |
| 366 | * Note that some of the requirement for PXP support (GSC proxy init, HuC auth) |
| 367 | * are performed asynchronously as part of the GSC init. PXP can only be used |
| 368 | * after both this function and the async worker have completed. |
| 369 | * |
| 370 | * Returns 0 if PXP is not supported or if PXP initialization is successful, |
| 371 | * other errno value if there is an error during the init. |
| 372 | */ |
| 373 | int xe_pxp_init(struct xe_device *xe) |
| 374 | { |
| 375 | struct xe_gt *gt = xe->tiles[0].media_gt; |
| 376 | struct xe_pxp *pxp; |
| 377 | int err; |
| 378 | |
| 379 | if (!xe_pxp_is_supported(xe)) |
| 380 | return 0; |
| 381 | |
| 382 | /* we only support PXP on single tile devices with a media GT */ |
| 383 | if (xe->info.tile_count > 1 || !gt) |
| 384 | return 0; |
| 385 | |
| 386 | /* The GSCCS is required for submissions to the GSC FW */ |
| 387 | if (!(gt->info.engine_mask & BIT(XE_HW_ENGINE_GSCCS0))) |
| 388 | return 0; |
| 389 | |
| 390 | /* PXP requires both GSC and HuC firmwares to be available */ |
| 391 | if (!xe_uc_fw_is_loadable(uc_fw: >->uc.gsc.fw) || |
| 392 | !xe_uc_fw_is_loadable(uc_fw: >->uc.huc.fw)) { |
| 393 | drm_info(&xe->drm, "skipping PXP init due to missing FW dependencies" ); |
| 394 | return 0; |
| 395 | } |
| 396 | |
| 397 | pxp = drmm_kzalloc(dev: &xe->drm, size: sizeof(struct xe_pxp), GFP_KERNEL); |
| 398 | if (!pxp) { |
| 399 | err = -ENOMEM; |
| 400 | goto out; |
| 401 | } |
| 402 | |
| 403 | INIT_LIST_HEAD(list: &pxp->queues.list); |
| 404 | spin_lock_init(&pxp->queues.lock); |
| 405 | INIT_WORK(&pxp->irq.work, pxp_irq_work); |
| 406 | pxp->xe = xe; |
| 407 | pxp->gt = gt; |
| 408 | |
| 409 | pxp->key_instance = 1; |
| 410 | pxp->last_suspend_key_instance = 1; |
| 411 | |
| 412 | /* |
| 413 | * we'll use the completions to check if there is an action pending, |
| 414 | * so we start them as completed and we reinit it when an action is |
| 415 | * triggered. |
| 416 | */ |
| 417 | init_completion(x: &pxp->activation); |
| 418 | init_completion(x: &pxp->termination); |
| 419 | complete_all(&pxp->termination); |
| 420 | complete_all(&pxp->activation); |
| 421 | |
| 422 | mutex_init(&pxp->mutex); |
| 423 | |
| 424 | pxp->irq.wq = alloc_ordered_workqueue("pxp-wq" , 0); |
| 425 | if (!pxp->irq.wq) { |
| 426 | err = -ENOMEM; |
| 427 | goto out_free; |
| 428 | } |
| 429 | |
| 430 | err = kcr_pxp_enable(pxp); |
| 431 | if (err) |
| 432 | goto out_wq; |
| 433 | |
| 434 | err = xe_pxp_allocate_execution_resources(pxp); |
| 435 | if (err) |
| 436 | goto out_kcr_disable; |
| 437 | |
| 438 | xe->pxp = pxp; |
| 439 | |
| 440 | return devm_add_action_or_reset(xe->drm.dev, pxp_fini, pxp); |
| 441 | |
| 442 | out_kcr_disable: |
| 443 | kcr_pxp_disable(pxp); |
| 444 | out_wq: |
| 445 | destroy_workqueue(wq: pxp->irq.wq); |
| 446 | out_free: |
| 447 | drmm_kfree(dev: &xe->drm, data: pxp); |
| 448 | out: |
| 449 | drm_err(&xe->drm, "PXP initialization failed: %pe\n" , ERR_PTR(err)); |
| 450 | return err; |
| 451 | } |
| 452 | |
| 453 | static int __pxp_start_arb_session(struct xe_pxp *pxp) |
| 454 | { |
| 455 | int ret; |
| 456 | unsigned int fw_ref; |
| 457 | |
| 458 | fw_ref = xe_force_wake_get(fw: gt_to_fw(gt: pxp->gt), domains: XE_FW_GT); |
| 459 | if (!xe_force_wake_ref_has_domain(fw_ref, domain: XE_FW_GT)) |
| 460 | return -EIO; |
| 461 | |
| 462 | if (pxp_session_is_in_play(pxp, ARB_SESSION)) { |
| 463 | ret = -EEXIST; |
| 464 | goto out_force_wake; |
| 465 | } |
| 466 | |
| 467 | ret = xe_pxp_submit_session_init(gsc_res: &pxp->gsc_res, ARB_SESSION); |
| 468 | if (ret) { |
| 469 | drm_err(&pxp->xe->drm, "Failed to init PXP arb session: %pe\n" , ERR_PTR(ret)); |
| 470 | goto out_force_wake; |
| 471 | } |
| 472 | |
| 473 | ret = pxp_wait_for_session_state(pxp, ARB_SESSION, in_play: true); |
| 474 | if (ret) { |
| 475 | drm_err(&pxp->xe->drm, "PXP ARB session failed to go in play%pe\n" , ERR_PTR(ret)); |
| 476 | goto out_force_wake; |
| 477 | } |
| 478 | |
| 479 | drm_dbg(&pxp->xe->drm, "PXP ARB session is active\n" ); |
| 480 | |
| 481 | out_force_wake: |
| 482 | xe_force_wake_put(fw: gt_to_fw(gt: pxp->gt), fw_ref); |
| 483 | return ret; |
| 484 | } |
| 485 | |
| 486 | /** |
| 487 | * xe_pxp_exec_queue_set_type - Mark a queue as using PXP |
| 488 | * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled) |
| 489 | * @q: the queue to mark as using PXP |
| 490 | * @type: the type of PXP session this queue will use |
| 491 | * |
| 492 | * Returns 0 if the selected PXP type is supported, -ENODEV otherwise. |
| 493 | */ |
| 494 | int xe_pxp_exec_queue_set_type(struct xe_pxp *pxp, struct xe_exec_queue *q, u8 type) |
| 495 | { |
| 496 | if (!xe_pxp_is_enabled(pxp)) |
| 497 | return -ENODEV; |
| 498 | |
| 499 | /* we only support HWDRM sessions right now */ |
| 500 | xe_assert(pxp->xe, type == DRM_XE_PXP_TYPE_HWDRM); |
| 501 | |
| 502 | q->pxp.type = type; |
| 503 | |
| 504 | return 0; |
| 505 | } |
| 506 | |
| 507 | static int __exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q) |
| 508 | { |
| 509 | int ret = 0; |
| 510 | |
| 511 | /* |
| 512 | * A queue can be added to the list only if the PXP is in active status, |
| 513 | * otherwise the termination might not handle it correctly. |
| 514 | */ |
| 515 | mutex_lock(&pxp->mutex); |
| 516 | |
| 517 | if (pxp->status == XE_PXP_ACTIVE) { |
| 518 | spin_lock_irq(lock: &pxp->queues.lock); |
| 519 | list_add_tail(new: &q->pxp.link, head: &pxp->queues.list); |
| 520 | spin_unlock_irq(lock: &pxp->queues.lock); |
| 521 | } else if (pxp->status == XE_PXP_ERROR || pxp->status == XE_PXP_SUSPENDED) { |
| 522 | ret = -EIO; |
| 523 | } else { |
| 524 | ret = -EBUSY; /* try again later */ |
| 525 | } |
| 526 | |
| 527 | mutex_unlock(lock: &pxp->mutex); |
| 528 | |
| 529 | return ret; |
| 530 | } |
| 531 | |
| 532 | static int pxp_start(struct xe_pxp *pxp, u8 type) |
| 533 | { |
| 534 | int ret = 0; |
| 535 | bool restart = false; |
| 536 | |
| 537 | if (!xe_pxp_is_enabled(pxp)) |
| 538 | return -ENODEV; |
| 539 | |
| 540 | /* we only support HWDRM sessions right now */ |
| 541 | xe_assert(pxp->xe, type == DRM_XE_PXP_TYPE_HWDRM); |
| 542 | |
| 543 | /* get_readiness_status() returns 0 for in-progress and 1 for done */ |
| 544 | ret = xe_pxp_get_readiness_status(pxp); |
| 545 | if (ret <= 0) |
| 546 | return ret ?: -EBUSY; |
| 547 | |
| 548 | ret = 0; |
| 549 | |
| 550 | wait_for_idle: |
| 551 | /* |
| 552 | * if there is an action in progress, wait for it. We need to wait |
| 553 | * outside the lock because the completion is done from within the lock. |
| 554 | * Note that the two actions should never be pending at the same time. |
| 555 | */ |
| 556 | if (!wait_for_completion_timeout(x: &pxp->termination, |
| 557 | timeout: msecs_to_jiffies(PXP_TERMINATION_TIMEOUT_MS))) |
| 558 | return -ETIMEDOUT; |
| 559 | |
| 560 | if (!wait_for_completion_timeout(x: &pxp->activation, |
| 561 | timeout: msecs_to_jiffies(PXP_ACTIVATION_TIMEOUT_MS))) |
| 562 | return -ETIMEDOUT; |
| 563 | |
| 564 | mutex_lock(&pxp->mutex); |
| 565 | |
| 566 | /* If PXP is not already active, turn it on */ |
| 567 | switch (pxp->status) { |
| 568 | case XE_PXP_ERROR: |
| 569 | ret = -EIO; |
| 570 | goto out_unlock; |
| 571 | case XE_PXP_ACTIVE: |
| 572 | goto out_unlock; |
| 573 | case XE_PXP_READY_TO_START: |
| 574 | pxp->status = XE_PXP_START_IN_PROGRESS; |
| 575 | reinit_completion(x: &pxp->activation); |
| 576 | break; |
| 577 | case XE_PXP_START_IN_PROGRESS: |
| 578 | /* If a start is in progress then the completion must not be done */ |
| 579 | XE_WARN_ON(completion_done(&pxp->activation)); |
| 580 | restart = true; |
| 581 | goto out_unlock; |
| 582 | case XE_PXP_NEEDS_TERMINATION: |
| 583 | mark_termination_in_progress(pxp); |
| 584 | break; |
| 585 | case XE_PXP_TERMINATION_IN_PROGRESS: |
| 586 | case XE_PXP_NEEDS_ADDITIONAL_TERMINATION: |
| 587 | /* If a termination is in progress then the completion must not be done */ |
| 588 | XE_WARN_ON(completion_done(&pxp->termination)); |
| 589 | restart = true; |
| 590 | goto out_unlock; |
| 591 | case XE_PXP_SUSPENDED: |
| 592 | default: |
| 593 | drm_err(&pxp->xe->drm, "unexpected state during PXP start: %u\n" , pxp->status); |
| 594 | ret = -EIO; |
| 595 | goto out_unlock; |
| 596 | } |
| 597 | |
| 598 | mutex_unlock(lock: &pxp->mutex); |
| 599 | |
| 600 | if (!completion_done(x: &pxp->termination)) { |
| 601 | ret = pxp_terminate_hw(pxp); |
| 602 | if (ret) { |
| 603 | drm_err(&pxp->xe->drm, "PXP termination failed before start\n" ); |
| 604 | mutex_lock(&pxp->mutex); |
| 605 | pxp->status = XE_PXP_ERROR; |
| 606 | |
| 607 | goto out_unlock; |
| 608 | } |
| 609 | |
| 610 | goto wait_for_idle; |
| 611 | } |
| 612 | |
| 613 | /* All the cases except for start should have exited earlier */ |
| 614 | XE_WARN_ON(completion_done(&pxp->activation)); |
| 615 | ret = __pxp_start_arb_session(pxp); |
| 616 | |
| 617 | mutex_lock(&pxp->mutex); |
| 618 | |
| 619 | complete_all(&pxp->activation); |
| 620 | |
| 621 | /* |
| 622 | * Any other process should wait until the state goes away from |
| 623 | * XE_PXP_START_IN_PROGRESS, so if the state is not that something went |
| 624 | * wrong. Mark the status as needing termination and try again. |
| 625 | */ |
| 626 | if (pxp->status != XE_PXP_START_IN_PROGRESS) { |
| 627 | drm_err(&pxp->xe->drm, "unexpected state after PXP start: %u\n" , pxp->status); |
| 628 | pxp->status = XE_PXP_NEEDS_TERMINATION; |
| 629 | restart = true; |
| 630 | goto out_unlock; |
| 631 | } |
| 632 | |
| 633 | /* If everything went ok, update the status and add the queue to the list */ |
| 634 | if (!ret) |
| 635 | pxp->status = XE_PXP_ACTIVE; |
| 636 | else |
| 637 | pxp->status = XE_PXP_ERROR; |
| 638 | |
| 639 | out_unlock: |
| 640 | mutex_unlock(lock: &pxp->mutex); |
| 641 | |
| 642 | if (restart) |
| 643 | goto wait_for_idle; |
| 644 | |
| 645 | return ret; |
| 646 | } |
| 647 | |
| 648 | /** |
| 649 | * xe_pxp_exec_queue_add - add a queue to the PXP list |
| 650 | * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled) |
| 651 | * @q: the queue to add to the list |
| 652 | * |
| 653 | * If PXP is enabled and the prerequisites are done, start the PXP default |
| 654 | * session (if not already running) and add the queue to the PXP list. |
| 655 | * |
| 656 | * Returns 0 if the PXP session is running and the queue is in the list, |
| 657 | * -ENODEV if PXP is disabled, -EBUSY if the PXP prerequisites are not done, |
| 658 | * other errno value if something goes wrong during the session start. |
| 659 | */ |
| 660 | int xe_pxp_exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q) |
| 661 | { |
| 662 | int ret; |
| 663 | |
| 664 | if (!xe_pxp_is_enabled(pxp)) |
| 665 | return -ENODEV; |
| 666 | |
| 667 | /* |
| 668 | * Runtime suspend kills PXP, so we take a reference to prevent it from |
| 669 | * happening while we have active queues that use PXP |
| 670 | */ |
| 671 | xe_pm_runtime_get(xe: pxp->xe); |
| 672 | |
| 673 | start: |
| 674 | ret = pxp_start(pxp, type: q->pxp.type); |
| 675 | |
| 676 | if (!ret) { |
| 677 | ret = __exec_queue_add(pxp, q); |
| 678 | if (ret == -EBUSY) |
| 679 | goto start; |
| 680 | } |
| 681 | |
| 682 | /* |
| 683 | * in the successful case the PM ref is released from |
| 684 | * xe_pxp_exec_queue_remove |
| 685 | */ |
| 686 | if (ret) |
| 687 | xe_pm_runtime_put(xe: pxp->xe); |
| 688 | |
| 689 | return ret; |
| 690 | } |
| 691 | ALLOW_ERROR_INJECTION(xe_pxp_exec_queue_add, ERRNO); |
| 692 | |
| 693 | static void __pxp_exec_queue_remove(struct xe_pxp *pxp, struct xe_exec_queue *q, bool lock) |
| 694 | { |
| 695 | bool need_pm_put = false; |
| 696 | |
| 697 | if (!xe_pxp_is_enabled(pxp)) |
| 698 | return; |
| 699 | |
| 700 | if (lock) |
| 701 | spin_lock_irq(lock: &pxp->queues.lock); |
| 702 | |
| 703 | if (!list_empty(head: &q->pxp.link)) { |
| 704 | list_del_init(entry: &q->pxp.link); |
| 705 | need_pm_put = true; |
| 706 | } |
| 707 | |
| 708 | q->pxp.type = DRM_XE_PXP_TYPE_NONE; |
| 709 | |
| 710 | if (lock) |
| 711 | spin_unlock_irq(lock: &pxp->queues.lock); |
| 712 | |
| 713 | if (need_pm_put) |
| 714 | xe_pm_runtime_put(xe: pxp->xe); |
| 715 | } |
| 716 | |
| 717 | /** |
| 718 | * xe_pxp_exec_queue_remove - remove a queue from the PXP list |
| 719 | * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled) |
| 720 | * @q: the queue to remove from the list |
| 721 | * |
| 722 | * If PXP is enabled and the exec_queue is in the list, the queue will be |
| 723 | * removed from the list and its PM reference will be released. It is safe to |
| 724 | * call this function multiple times for the same queue. |
| 725 | */ |
| 726 | void xe_pxp_exec_queue_remove(struct xe_pxp *pxp, struct xe_exec_queue *q) |
| 727 | { |
| 728 | __pxp_exec_queue_remove(pxp, q, lock: true); |
| 729 | } |
| 730 | |
| 731 | static void pxp_invalidate_queues(struct xe_pxp *pxp) |
| 732 | { |
| 733 | struct xe_exec_queue *tmp, *q; |
| 734 | LIST_HEAD(to_clean); |
| 735 | |
| 736 | spin_lock_irq(lock: &pxp->queues.lock); |
| 737 | |
| 738 | list_for_each_entry_safe(q, tmp, &pxp->queues.list, pxp.link) { |
| 739 | q = xe_exec_queue_get_unless_zero(q); |
| 740 | if (!q) |
| 741 | continue; |
| 742 | |
| 743 | list_move_tail(list: &q->pxp.link, head: &to_clean); |
| 744 | } |
| 745 | spin_unlock_irq(lock: &pxp->queues.lock); |
| 746 | |
| 747 | list_for_each_entry_safe(q, tmp, &to_clean, pxp.link) { |
| 748 | xe_exec_queue_kill(q); |
| 749 | |
| 750 | /* |
| 751 | * We hold a ref to the queue so there is no risk of racing with |
| 752 | * the calls to exec_queue_remove coming from exec_queue_destroy. |
| 753 | */ |
| 754 | __pxp_exec_queue_remove(pxp, q, lock: false); |
| 755 | |
| 756 | xe_exec_queue_put(q); |
| 757 | } |
| 758 | } |
| 759 | |
| 760 | /** |
| 761 | * xe_pxp_key_assign - mark a BO as using the current PXP key iteration |
| 762 | * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled) |
| 763 | * @bo: the BO to mark |
| 764 | * |
| 765 | * Returns: -ENODEV if PXP is disabled, 0 otherwise. |
| 766 | */ |
| 767 | int xe_pxp_key_assign(struct xe_pxp *pxp, struct xe_bo *bo) |
| 768 | { |
| 769 | if (!xe_pxp_is_enabled(pxp)) |
| 770 | return -ENODEV; |
| 771 | |
| 772 | xe_assert(pxp->xe, !bo->pxp_key_instance); |
| 773 | |
| 774 | /* |
| 775 | * Note that the PXP key handling is inherently racey, because the key |
| 776 | * can theoretically change at any time (although it's unlikely to do |
| 777 | * so without triggers), even right after we copy it. Taking a lock |
| 778 | * wouldn't help because the value might still change as soon as we |
| 779 | * release the lock. |
| 780 | * Userspace needs to handle the fact that their BOs can go invalid at |
| 781 | * any point. |
| 782 | */ |
| 783 | bo->pxp_key_instance = pxp->key_instance; |
| 784 | |
| 785 | return 0; |
| 786 | } |
| 787 | |
| 788 | /** |
| 789 | * xe_pxp_bo_key_check - check if the key used by a xe_bo is valid |
| 790 | * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled) |
| 791 | * @bo: the BO we want to check |
| 792 | * |
| 793 | * Checks whether a BO was encrypted with the current key or an obsolete one. |
| 794 | * |
| 795 | * Returns: 0 if the key is valid, -ENODEV if PXP is disabled, -EINVAL if the |
| 796 | * BO is not using PXP, -ENOEXEC if the key is not valid. |
| 797 | */ |
| 798 | int xe_pxp_bo_key_check(struct xe_pxp *pxp, struct xe_bo *bo) |
| 799 | { |
| 800 | if (!xe_pxp_is_enabled(pxp)) |
| 801 | return -ENODEV; |
| 802 | |
| 803 | if (!xe_bo_is_protected(bo)) |
| 804 | return -EINVAL; |
| 805 | |
| 806 | xe_assert(pxp->xe, bo->pxp_key_instance); |
| 807 | |
| 808 | /* |
| 809 | * Note that the PXP key handling is inherently racey, because the key |
| 810 | * can theoretically change at any time (although it's unlikely to do |
| 811 | * so without triggers), even right after we check it. Taking a lock |
| 812 | * wouldn't help because the value might still change as soon as we |
| 813 | * release the lock. |
| 814 | * We mitigate the risk by checking the key at multiple points (on each |
| 815 | * submission involving the BO and right before flipping it on the |
| 816 | * display), but there is still a very small chance that we could |
| 817 | * operate on an invalid BO for a single submission or a single frame |
| 818 | * flip. This is a compromise made to protect the encrypted data (which |
| 819 | * is what the key termination is for). |
| 820 | */ |
| 821 | if (bo->pxp_key_instance != pxp->key_instance) |
| 822 | return -ENOEXEC; |
| 823 | |
| 824 | return 0; |
| 825 | } |
| 826 | |
| 827 | /** |
| 828 | * xe_pxp_obj_key_check - check if the key used by a drm_gem_obj is valid |
| 829 | * @obj: the drm_gem_obj we want to check |
| 830 | * |
| 831 | * Checks whether a drm_gem_obj was encrypted with the current key or an |
| 832 | * obsolete one. |
| 833 | * |
| 834 | * Returns: 0 if the key is valid, -ENODEV if PXP is disabled, -EINVAL if the |
| 835 | * obj is not using PXP, -ENOEXEC if the key is not valid. |
| 836 | */ |
| 837 | int xe_pxp_obj_key_check(struct drm_gem_object *obj) |
| 838 | { |
| 839 | struct xe_bo *bo = gem_to_xe_bo(obj); |
| 840 | struct xe_device *xe = xe_bo_device(bo); |
| 841 | struct xe_pxp *pxp = xe->pxp; |
| 842 | |
| 843 | return xe_pxp_bo_key_check(pxp, bo); |
| 844 | } |
| 845 | |
| 846 | /** |
| 847 | * xe_pxp_pm_suspend - prepare PXP for HW suspend |
| 848 | * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled) |
| 849 | * |
| 850 | * Makes sure all PXP actions have completed and invalidates all PXP queues |
| 851 | * and objects before we go into a suspend state. |
| 852 | * |
| 853 | * Returns: 0 if successful, a negative errno value otherwise. |
| 854 | */ |
| 855 | int xe_pxp_pm_suspend(struct xe_pxp *pxp) |
| 856 | { |
| 857 | bool needs_queue_inval = false; |
| 858 | int ret = 0; |
| 859 | |
| 860 | if (!xe_pxp_is_enabled(pxp)) |
| 861 | return 0; |
| 862 | |
| 863 | wait_for_activation: |
| 864 | if (!wait_for_completion_timeout(x: &pxp->activation, |
| 865 | timeout: msecs_to_jiffies(PXP_ACTIVATION_TIMEOUT_MS))) |
| 866 | ret = -ETIMEDOUT; |
| 867 | |
| 868 | mutex_lock(&pxp->mutex); |
| 869 | |
| 870 | switch (pxp->status) { |
| 871 | case XE_PXP_ERROR: |
| 872 | case XE_PXP_READY_TO_START: |
| 873 | case XE_PXP_SUSPENDED: |
| 874 | case XE_PXP_TERMINATION_IN_PROGRESS: |
| 875 | case XE_PXP_NEEDS_ADDITIONAL_TERMINATION: |
| 876 | /* |
| 877 | * If PXP is not running there is nothing to cleanup. If there |
| 878 | * is a termination pending then no need to issue another one. |
| 879 | */ |
| 880 | break; |
| 881 | case XE_PXP_START_IN_PROGRESS: |
| 882 | mutex_unlock(lock: &pxp->mutex); |
| 883 | goto wait_for_activation; |
| 884 | case XE_PXP_NEEDS_TERMINATION: |
| 885 | /* If PXP was never used we can skip the cleanup */ |
| 886 | if (pxp->key_instance == pxp->last_suspend_key_instance) |
| 887 | break; |
| 888 | fallthrough; |
| 889 | case XE_PXP_ACTIVE: |
| 890 | pxp->key_instance++; |
| 891 | needs_queue_inval = true; |
| 892 | break; |
| 893 | default: |
| 894 | drm_err(&pxp->xe->drm, "unexpected state during PXP suspend: %u" , |
| 895 | pxp->status); |
| 896 | ret = -EIO; |
| 897 | goto out; |
| 898 | } |
| 899 | |
| 900 | /* |
| 901 | * We set this even if we were in error state, hoping the suspend clears |
| 902 | * the error. Worse case we fail again and go in error state again. |
| 903 | */ |
| 904 | pxp->status = XE_PXP_SUSPENDED; |
| 905 | |
| 906 | mutex_unlock(lock: &pxp->mutex); |
| 907 | |
| 908 | if (needs_queue_inval) |
| 909 | pxp_invalidate_queues(pxp); |
| 910 | |
| 911 | /* |
| 912 | * if there is a termination in progress, wait for it. |
| 913 | * We need to wait outside the lock because the completion is done from |
| 914 | * within the lock |
| 915 | */ |
| 916 | if (!wait_for_completion_timeout(x: &pxp->termination, |
| 917 | timeout: msecs_to_jiffies(PXP_TERMINATION_TIMEOUT_MS))) |
| 918 | ret = -ETIMEDOUT; |
| 919 | |
| 920 | pxp->last_suspend_key_instance = pxp->key_instance; |
| 921 | |
| 922 | out: |
| 923 | return ret; |
| 924 | } |
| 925 | |
| 926 | /** |
| 927 | * xe_pxp_pm_resume - re-init PXP after HW suspend |
| 928 | * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled) |
| 929 | */ |
| 930 | void xe_pxp_pm_resume(struct xe_pxp *pxp) |
| 931 | { |
| 932 | int err; |
| 933 | |
| 934 | if (!xe_pxp_is_enabled(pxp)) |
| 935 | return; |
| 936 | |
| 937 | err = kcr_pxp_enable(pxp); |
| 938 | |
| 939 | mutex_lock(&pxp->mutex); |
| 940 | |
| 941 | xe_assert(pxp->xe, pxp->status == XE_PXP_SUSPENDED); |
| 942 | |
| 943 | if (err) |
| 944 | pxp->status = XE_PXP_ERROR; |
| 945 | else |
| 946 | pxp->status = XE_PXP_NEEDS_TERMINATION; |
| 947 | |
| 948 | mutex_unlock(lock: &pxp->mutex); |
| 949 | } |
| 950 | |