| 1 | // SPDX-License-Identifier: MIT |
| 2 | /* |
| 3 | * Copyright(c) 2020 Intel Corporation. |
| 4 | */ |
| 5 | |
| 6 | #include <linux/workqueue.h> |
| 7 | |
| 8 | #include <drm/drm_print.h> |
| 9 | |
| 10 | #include "gem/i915_gem_context.h" |
| 11 | #include "gt/intel_context.h" |
| 12 | #include "gt/intel_gt.h" |
| 13 | |
| 14 | #include "i915_drv.h" |
| 15 | #include "i915_wait_util.h" |
| 16 | #include "intel_pxp.h" |
| 17 | #include "intel_pxp_gsccs.h" |
| 18 | #include "intel_pxp_irq.h" |
| 19 | #include "intel_pxp_regs.h" |
| 20 | #include "intel_pxp_session.h" |
| 21 | #include "intel_pxp_tee.h" |
| 22 | #include "intel_pxp_types.h" |
| 23 | |
| 24 | /** |
| 25 | * DOC: PXP |
| 26 | * |
| 27 | * PXP (Protected Xe Path) is a feature available in Gen12 and newer platforms. |
| 28 | * It allows execution and flip to display of protected (i.e. encrypted) |
| 29 | * objects. The SW support is enabled via the CONFIG_DRM_I915_PXP kconfig. |
| 30 | * |
| 31 | * Objects can opt-in to PXP encryption at creation time via the |
| 32 | * I915_GEM_CREATE_EXT_PROTECTED_CONTENT create_ext flag. For objects to be |
| 33 | * correctly protected they must be used in conjunction with a context created |
| 34 | * with the I915_CONTEXT_PARAM_PROTECTED_CONTENT flag. See the documentation |
| 35 | * of those two uapi flags for details and restrictions. |
| 36 | * |
| 37 | * Protected objects are tied to a pxp session; currently we only support one |
| 38 | * session, which i915 manages and whose index is available in the uapi |
| 39 | * (I915_PROTECTED_CONTENT_DEFAULT_SESSION) for use in instructions targeting |
| 40 | * protected objects. |
| 41 | * The session is invalidated by the HW when certain events occur (e.g. |
| 42 | * suspend/resume). When this happens, all the objects that were used with the |
| 43 | * session are marked as invalid and all contexts marked as using protected |
| 44 | * content are banned. Any further attempt at using them in an execbuf call is |
| 45 | * rejected, while flips are converted to black frames. |
| 46 | * |
| 47 | * Some of the PXP setup operations are performed by the Management Engine, |
| 48 | * which is handled by the mei driver; communication between i915 and mei is |
| 49 | * performed via the mei_pxp component module. |
| 50 | */ |
| 51 | |
| 52 | bool intel_pxp_is_supported(const struct intel_pxp *pxp) |
| 53 | { |
| 54 | return IS_ENABLED(CONFIG_DRM_I915_PXP) && pxp; |
| 55 | } |
| 56 | |
| 57 | bool intel_pxp_is_enabled(const struct intel_pxp *pxp) |
| 58 | { |
| 59 | return IS_ENABLED(CONFIG_DRM_I915_PXP) && pxp && pxp->ce; |
| 60 | } |
| 61 | |
| 62 | bool intel_pxp_is_active(const struct intel_pxp *pxp) |
| 63 | { |
| 64 | return IS_ENABLED(CONFIG_DRM_I915_PXP) && pxp && pxp->arb_is_valid; |
| 65 | } |
| 66 | |
| 67 | static void kcr_pxp_set_status(const struct intel_pxp *pxp, bool enable) |
| 68 | { |
| 69 | u32 val = enable ? _MASKED_BIT_ENABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES) : |
| 70 | _MASKED_BIT_DISABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES); |
| 71 | |
| 72 | intel_uncore_write(uncore: pxp->ctrl_gt->uncore, KCR_INIT(pxp->kcr_base), val); |
| 73 | } |
| 74 | |
| 75 | static void kcr_pxp_enable(const struct intel_pxp *pxp) |
| 76 | { |
| 77 | kcr_pxp_set_status(pxp, enable: true); |
| 78 | } |
| 79 | |
| 80 | static void kcr_pxp_disable(const struct intel_pxp *pxp) |
| 81 | { |
| 82 | kcr_pxp_set_status(pxp, enable: false); |
| 83 | } |
| 84 | |
| 85 | static int create_vcs_context(struct intel_pxp *pxp) |
| 86 | { |
| 87 | static struct lock_class_key pxp_lock; |
| 88 | struct intel_gt *gt = pxp->ctrl_gt; |
| 89 | struct intel_engine_cs *engine; |
| 90 | struct intel_context *ce; |
| 91 | int i; |
| 92 | |
| 93 | /* |
| 94 | * Find the first VCS engine present. We're guaranteed there is one |
| 95 | * if we're in this function due to the check in has_pxp |
| 96 | */ |
| 97 | for (i = 0, engine = NULL; !engine; i++) |
| 98 | engine = gt->engine_class[VIDEO_DECODE_CLASS][i]; |
| 99 | |
| 100 | GEM_BUG_ON(!engine || engine->class != VIDEO_DECODE_CLASS); |
| 101 | |
| 102 | ce = intel_engine_create_pinned_context(engine, vm: engine->gt->vm, SZ_4K, |
| 103 | I915_GEM_HWS_PXP_ADDR, |
| 104 | key: &pxp_lock, name: "pxp_context" ); |
| 105 | if (IS_ERR(ptr: ce)) { |
| 106 | drm_err(>->i915->drm, "failed to create VCS ctx for PXP\n" ); |
| 107 | return PTR_ERR(ptr: ce); |
| 108 | } |
| 109 | |
| 110 | pxp->ce = ce; |
| 111 | |
| 112 | return 0; |
| 113 | } |
| 114 | |
| 115 | static void destroy_vcs_context(struct intel_pxp *pxp) |
| 116 | { |
| 117 | if (pxp->ce) |
| 118 | intel_engine_destroy_pinned_context(fetch_and_zero(&pxp->ce)); |
| 119 | } |
| 120 | |
| 121 | static void pxp_init_full(struct intel_pxp *pxp) |
| 122 | { |
| 123 | struct intel_gt *gt = pxp->ctrl_gt; |
| 124 | int ret; |
| 125 | |
| 126 | /* |
| 127 | * we'll use the completion to check if there is a termination pending, |
| 128 | * so we start it as completed and we reinit it when a termination |
| 129 | * is triggered. |
| 130 | */ |
| 131 | init_completion(x: &pxp->termination); |
| 132 | complete_all(&pxp->termination); |
| 133 | |
| 134 | if (pxp->ctrl_gt->type == GT_MEDIA) |
| 135 | pxp->kcr_base = MTL_KCR_BASE; |
| 136 | else |
| 137 | pxp->kcr_base = GEN12_KCR_BASE; |
| 138 | |
| 139 | intel_pxp_session_management_init(pxp); |
| 140 | |
| 141 | ret = create_vcs_context(pxp); |
| 142 | if (ret) |
| 143 | return; |
| 144 | |
| 145 | if (HAS_ENGINE(pxp->ctrl_gt, GSC0)) |
| 146 | ret = intel_pxp_gsccs_init(pxp); |
| 147 | else |
| 148 | ret = intel_pxp_tee_component_init(pxp); |
| 149 | if (ret) |
| 150 | goto out_context; |
| 151 | |
| 152 | drm_info(>->i915->drm, "Protected Xe Path (PXP) protected content support initialized\n" ); |
| 153 | |
| 154 | return; |
| 155 | |
| 156 | out_context: |
| 157 | destroy_vcs_context(pxp); |
| 158 | } |
| 159 | |
| 160 | static struct intel_gt *find_gt_for_required_teelink(struct drm_i915_private *i915) |
| 161 | { |
| 162 | /* |
| 163 | * NOTE: Only certain platforms require PXP-tee-backend dependencies |
| 164 | * for HuC authentication. For now, its limited to DG2. |
| 165 | */ |
| 166 | if (IS_ENABLED(CONFIG_INTEL_MEI_PXP) && IS_ENABLED(CONFIG_INTEL_MEI_GSC) && |
| 167 | intel_huc_is_loaded_by_gsc(huc: &to_gt(i915)->uc.huc) && intel_uc_uses_huc(uc: &to_gt(i915)->uc)) |
| 168 | return to_gt(i915); |
| 169 | |
| 170 | return NULL; |
| 171 | } |
| 172 | |
| 173 | static struct intel_gt *find_gt_for_required_protected_content(struct drm_i915_private *i915) |
| 174 | { |
| 175 | if (!HAS_PXP(i915)) |
| 176 | return NULL; |
| 177 | |
| 178 | /* |
| 179 | * For MTL onwards, PXP-controller-GT needs to have a valid GSC engine |
| 180 | * on the media GT. NOTE: if we have a media-tile with a GSC-engine, |
| 181 | * the VDBOX is already present so skip that check. We also have to |
| 182 | * ensure the GSC and HUC firmware are coming online |
| 183 | */ |
| 184 | if (i915->media_gt && HAS_ENGINE(i915->media_gt, GSC0) && |
| 185 | intel_uc_fw_is_loadable(uc_fw: &i915->media_gt->uc.gsc.fw) && |
| 186 | intel_uc_fw_is_loadable(uc_fw: &i915->media_gt->uc.huc.fw)) |
| 187 | return i915->media_gt; |
| 188 | |
| 189 | /* |
| 190 | * Else we rely on mei-pxp module but only on legacy platforms |
| 191 | * prior to having separate media GTs and has a valid VDBOX. |
| 192 | */ |
| 193 | if (IS_ENABLED(CONFIG_INTEL_MEI_PXP) && !i915->media_gt && VDBOX_MASK(to_gt(i915))) |
| 194 | return to_gt(i915); |
| 195 | |
| 196 | return NULL; |
| 197 | } |
| 198 | |
| 199 | int intel_pxp_init(struct drm_i915_private *i915) |
| 200 | { |
| 201 | struct intel_gt *gt; |
| 202 | bool is_full_feature = false; |
| 203 | |
| 204 | if (intel_gt_is_wedged(gt: to_gt(i915))) |
| 205 | return -ENOTCONN; |
| 206 | |
| 207 | /* |
| 208 | * NOTE: Get the ctrl_gt before checking intel_pxp_is_supported since |
| 209 | * we still need it if PXP's backend tee transport is needed. |
| 210 | */ |
| 211 | gt = find_gt_for_required_protected_content(i915); |
| 212 | if (gt) |
| 213 | is_full_feature = true; |
| 214 | else |
| 215 | gt = find_gt_for_required_teelink(i915); |
| 216 | |
| 217 | if (!gt) |
| 218 | return -ENODEV; |
| 219 | |
| 220 | /* |
| 221 | * At this point, we will either enable full featured PXP capabilities |
| 222 | * including session and object management, or we will init the backend tee |
| 223 | * channel for internal users such as HuC loading by GSC |
| 224 | */ |
| 225 | i915->pxp = kzalloc(sizeof(*i915->pxp), GFP_KERNEL); |
| 226 | if (!i915->pxp) |
| 227 | return -ENOMEM; |
| 228 | |
| 229 | /* init common info used by all feature-mode usages*/ |
| 230 | i915->pxp->ctrl_gt = gt; |
| 231 | mutex_init(&i915->pxp->tee_mutex); |
| 232 | |
| 233 | /* |
| 234 | * If full PXP feature is not available but HuC is loaded by GSC on pre-MTL |
| 235 | * such as DG2, we can skip the init of the full PXP session/object management |
| 236 | * and just init the tee channel. |
| 237 | */ |
| 238 | if (is_full_feature) |
| 239 | pxp_init_full(pxp: i915->pxp); |
| 240 | else |
| 241 | intel_pxp_tee_component_init(pxp: i915->pxp); |
| 242 | |
| 243 | return 0; |
| 244 | } |
| 245 | |
| 246 | void intel_pxp_fini(struct drm_i915_private *i915) |
| 247 | { |
| 248 | if (!i915->pxp) |
| 249 | return; |
| 250 | |
| 251 | i915->pxp->arb_is_valid = false; |
| 252 | |
| 253 | if (HAS_ENGINE(i915->pxp->ctrl_gt, GSC0)) |
| 254 | intel_pxp_gsccs_fini(pxp: i915->pxp); |
| 255 | else |
| 256 | intel_pxp_tee_component_fini(pxp: i915->pxp); |
| 257 | |
| 258 | destroy_vcs_context(pxp: i915->pxp); |
| 259 | |
| 260 | kfree(objp: i915->pxp); |
| 261 | i915->pxp = NULL; |
| 262 | } |
| 263 | |
| 264 | void intel_pxp_mark_termination_in_progress(struct intel_pxp *pxp) |
| 265 | { |
| 266 | pxp->arb_is_valid = false; |
| 267 | reinit_completion(x: &pxp->termination); |
| 268 | } |
| 269 | |
| 270 | static void pxp_queue_termination(struct intel_pxp *pxp) |
| 271 | { |
| 272 | struct intel_gt *gt = pxp->ctrl_gt; |
| 273 | |
| 274 | /* |
| 275 | * We want to get the same effect as if we received a termination |
| 276 | * interrupt, so just pretend that we did. |
| 277 | */ |
| 278 | spin_lock_irq(lock: gt->irq_lock); |
| 279 | intel_pxp_mark_termination_in_progress(pxp); |
| 280 | pxp->session_events |= PXP_TERMINATION_REQUEST; |
| 281 | queue_work(wq: system_unbound_wq, work: &pxp->session_work); |
| 282 | spin_unlock_irq(lock: gt->irq_lock); |
| 283 | } |
| 284 | |
| 285 | static bool pxp_component_bound(struct intel_pxp *pxp) |
| 286 | { |
| 287 | bool bound = false; |
| 288 | |
| 289 | mutex_lock(&pxp->tee_mutex); |
| 290 | if (pxp->pxp_component) |
| 291 | bound = true; |
| 292 | mutex_unlock(lock: &pxp->tee_mutex); |
| 293 | |
| 294 | return bound; |
| 295 | } |
| 296 | |
| 297 | int intel_pxp_get_backend_timeout_ms(struct intel_pxp *pxp) |
| 298 | { |
| 299 | if (HAS_ENGINE(pxp->ctrl_gt, GSC0)) |
| 300 | return GSCFW_MAX_ROUND_TRIP_LATENCY_MS; |
| 301 | else |
| 302 | return 250; |
| 303 | } |
| 304 | |
| 305 | static int __pxp_global_teardown_final(struct intel_pxp *pxp) |
| 306 | { |
| 307 | int timeout; |
| 308 | |
| 309 | if (!pxp->arb_is_valid) |
| 310 | return 0; |
| 311 | |
| 312 | drm_dbg(&pxp->ctrl_gt->i915->drm, "PXP: teardown for suspend/fini" ); |
| 313 | /* |
| 314 | * To ensure synchronous and coherent session teardown completion |
| 315 | * in response to suspend or shutdown triggers, don't use a worker. |
| 316 | */ |
| 317 | intel_pxp_mark_termination_in_progress(pxp); |
| 318 | intel_pxp_terminate(pxp, post_invalidation_needs_restart: false); |
| 319 | |
| 320 | timeout = intel_pxp_get_backend_timeout_ms(pxp); |
| 321 | |
| 322 | if (!wait_for_completion_timeout(x: &pxp->termination, timeout: msecs_to_jiffies(m: timeout))) |
| 323 | return -ETIMEDOUT; |
| 324 | |
| 325 | return 0; |
| 326 | } |
| 327 | |
| 328 | static int __pxp_global_teardown_restart(struct intel_pxp *pxp) |
| 329 | { |
| 330 | int timeout; |
| 331 | |
| 332 | if (pxp->arb_is_valid) |
| 333 | return 0; |
| 334 | |
| 335 | drm_dbg(&pxp->ctrl_gt->i915->drm, "PXP: teardown for restart" ); |
| 336 | /* |
| 337 | * The arb-session is currently inactive and we are doing a reset and restart |
| 338 | * due to a runtime event. Use the worker that was designed for this. |
| 339 | */ |
| 340 | pxp_queue_termination(pxp); |
| 341 | |
| 342 | timeout = intel_pxp_get_backend_timeout_ms(pxp); |
| 343 | |
| 344 | if (!wait_for_completion_timeout(x: &pxp->termination, timeout: msecs_to_jiffies(m: timeout))) { |
| 345 | drm_dbg(&pxp->ctrl_gt->i915->drm, "PXP: restart backend timed out (%d ms)" , |
| 346 | timeout); |
| 347 | return -ETIMEDOUT; |
| 348 | } |
| 349 | |
| 350 | return 0; |
| 351 | } |
| 352 | |
| 353 | void intel_pxp_end(struct intel_pxp *pxp) |
| 354 | { |
| 355 | struct drm_i915_private *i915 = pxp->ctrl_gt->i915; |
| 356 | intel_wakeref_t wakeref; |
| 357 | |
| 358 | if (!intel_pxp_is_enabled(pxp)) |
| 359 | return; |
| 360 | |
| 361 | wakeref = intel_runtime_pm_get(rpm: &i915->runtime_pm); |
| 362 | |
| 363 | mutex_lock(&pxp->arb_mutex); |
| 364 | |
| 365 | if (__pxp_global_teardown_final(pxp)) |
| 366 | drm_dbg(&i915->drm, "PXP end timed out\n" ); |
| 367 | |
| 368 | mutex_unlock(lock: &pxp->arb_mutex); |
| 369 | |
| 370 | intel_pxp_fini_hw(pxp); |
| 371 | intel_runtime_pm_put(rpm: &i915->runtime_pm, wref: wakeref); |
| 372 | } |
| 373 | |
| 374 | static bool pxp_required_fw_failed(struct intel_pxp *pxp) |
| 375 | { |
| 376 | if (__intel_uc_fw_status(uc_fw: &pxp->ctrl_gt->uc.huc.fw) == INTEL_UC_FIRMWARE_LOAD_FAIL) |
| 377 | return true; |
| 378 | if (HAS_ENGINE(pxp->ctrl_gt, GSC0) && |
| 379 | __intel_uc_fw_status(uc_fw: &pxp->ctrl_gt->uc.gsc.fw) == INTEL_UC_FIRMWARE_LOAD_FAIL) |
| 380 | return true; |
| 381 | |
| 382 | return false; |
| 383 | } |
| 384 | |
| 385 | static bool pxp_fw_dependencies_completed(struct intel_pxp *pxp) |
| 386 | { |
| 387 | if (HAS_ENGINE(pxp->ctrl_gt, GSC0)) |
| 388 | return intel_pxp_gsccs_is_ready_for_sessions(pxp); |
| 389 | |
| 390 | return pxp_component_bound(pxp); |
| 391 | } |
| 392 | |
| 393 | /* |
| 394 | * this helper is used by both intel_pxp_start and by |
| 395 | * the GET_PARAM IOCTL that user space calls. Thus, the |
| 396 | * return values here should match the UAPI spec. |
| 397 | */ |
| 398 | int intel_pxp_get_readiness_status(struct intel_pxp *pxp, int timeout_ms) |
| 399 | { |
| 400 | if (!intel_pxp_is_enabled(pxp)) |
| 401 | return -ENODEV; |
| 402 | |
| 403 | if (pxp_required_fw_failed(pxp)) |
| 404 | return -ENODEV; |
| 405 | |
| 406 | if (pxp->platform_cfg_is_bad) |
| 407 | return -ENODEV; |
| 408 | |
| 409 | if (timeout_ms) { |
| 410 | if (wait_for(pxp_fw_dependencies_completed(pxp), timeout_ms)) |
| 411 | return 2; |
| 412 | } else if (!pxp_fw_dependencies_completed(pxp)) { |
| 413 | return 2; |
| 414 | } |
| 415 | return 1; |
| 416 | } |
| 417 | |
| 418 | /* |
| 419 | * the arb session is restarted from the irq work when we receive the |
| 420 | * termination completion interrupt |
| 421 | */ |
| 422 | #define PXP_READINESS_TIMEOUT 250 |
| 423 | |
| 424 | int intel_pxp_start(struct intel_pxp *pxp) |
| 425 | { |
| 426 | int ret = 0; |
| 427 | |
| 428 | ret = intel_pxp_get_readiness_status(pxp, PXP_READINESS_TIMEOUT); |
| 429 | if (ret < 0) { |
| 430 | drm_dbg(&pxp->ctrl_gt->i915->drm, "PXP: tried but not-avail (%d)" , ret); |
| 431 | return ret; |
| 432 | } else if (ret > 1) { |
| 433 | return -EIO; /* per UAPI spec, user may retry later */ |
| 434 | } |
| 435 | |
| 436 | mutex_lock(&pxp->arb_mutex); |
| 437 | |
| 438 | ret = __pxp_global_teardown_restart(pxp); |
| 439 | if (ret) |
| 440 | goto unlock; |
| 441 | |
| 442 | /* make sure the compiler doesn't optimize the double access */ |
| 443 | barrier(); |
| 444 | |
| 445 | if (!pxp->arb_is_valid) |
| 446 | ret = -EIO; |
| 447 | |
| 448 | unlock: |
| 449 | mutex_unlock(lock: &pxp->arb_mutex); |
| 450 | return ret; |
| 451 | } |
| 452 | |
| 453 | void intel_pxp_init_hw(struct intel_pxp *pxp) |
| 454 | { |
| 455 | kcr_pxp_enable(pxp); |
| 456 | intel_pxp_irq_enable(pxp); |
| 457 | } |
| 458 | |
| 459 | void intel_pxp_fini_hw(struct intel_pxp *pxp) |
| 460 | { |
| 461 | kcr_pxp_disable(pxp); |
| 462 | intel_pxp_irq_disable(pxp); |
| 463 | } |
| 464 | |
| 465 | int intel_pxp_key_check(struct drm_gem_object *_obj, bool assign) |
| 466 | { |
| 467 | struct drm_i915_gem_object *obj = to_intel_bo(gem: _obj); |
| 468 | struct drm_i915_private *i915 = to_i915(dev: _obj->dev); |
| 469 | struct intel_pxp *pxp = i915->pxp; |
| 470 | |
| 471 | if (!intel_pxp_is_active(pxp)) |
| 472 | return -ENODEV; |
| 473 | |
| 474 | if (!i915_gem_object_is_protected(obj)) |
| 475 | return -EINVAL; |
| 476 | |
| 477 | GEM_BUG_ON(!pxp->key_instance); |
| 478 | |
| 479 | /* |
| 480 | * If this is the first time we're using this object, it's not |
| 481 | * encrypted yet; it will be encrypted with the current key, so mark it |
| 482 | * as such. If the object is already encrypted, check instead if the |
| 483 | * used key is still valid. |
| 484 | */ |
| 485 | if (!obj->pxp_key_instance && assign) |
| 486 | obj->pxp_key_instance = pxp->key_instance; |
| 487 | |
| 488 | if (obj->pxp_key_instance != pxp->key_instance) |
| 489 | return -ENOEXEC; |
| 490 | |
| 491 | return 0; |
| 492 | } |
| 493 | |
| 494 | void intel_pxp_invalidate(struct intel_pxp *pxp) |
| 495 | { |
| 496 | struct drm_i915_private *i915 = pxp->ctrl_gt->i915; |
| 497 | struct i915_gem_context *ctx, *cn; |
| 498 | |
| 499 | /* ban all contexts marked as protected */ |
| 500 | spin_lock_irq(lock: &i915->gem.contexts.lock); |
| 501 | list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) { |
| 502 | struct i915_gem_engines_iter it; |
| 503 | struct intel_context *ce; |
| 504 | |
| 505 | if (!kref_get_unless_zero(kref: &ctx->ref)) |
| 506 | continue; |
| 507 | |
| 508 | if (likely(!i915_gem_context_uses_protected_content(ctx))) { |
| 509 | i915_gem_context_put(ctx); |
| 510 | continue; |
| 511 | } |
| 512 | |
| 513 | spin_unlock_irq(lock: &i915->gem.contexts.lock); |
| 514 | |
| 515 | /* |
| 516 | * By the time we get here we are either going to suspend with |
| 517 | * quiesced execution or the HW keys are already long gone and |
| 518 | * in this case it is worthless to attempt to close the context |
| 519 | * and wait for its execution. It will hang the GPU if it has |
| 520 | * not already. So, as a fast mitigation, we can ban the |
| 521 | * context as quick as we can. That might race with the |
| 522 | * execbuffer, but currently this is the best that can be done. |
| 523 | */ |
| 524 | for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) |
| 525 | intel_context_ban(ce, NULL); |
| 526 | i915_gem_context_unlock_engines(ctx); |
| 527 | |
| 528 | /* |
| 529 | * The context has been banned, no need to keep the wakeref. |
| 530 | * This is safe from races because the only other place this |
| 531 | * is touched is context_release and we're holding a ctx ref |
| 532 | */ |
| 533 | if (ctx->pxp_wakeref) { |
| 534 | intel_runtime_pm_put(rpm: &i915->runtime_pm, |
| 535 | wref: ctx->pxp_wakeref); |
| 536 | ctx->pxp_wakeref = NULL; |
| 537 | } |
| 538 | |
| 539 | spin_lock_irq(lock: &i915->gem.contexts.lock); |
| 540 | list_safe_reset_next(ctx, cn, link); |
| 541 | i915_gem_context_put(ctx); |
| 542 | } |
| 543 | spin_unlock_irq(lock: &i915->gem.contexts.lock); |
| 544 | } |
| 545 | |