| 1 | /* |
| 2 | * SPDX-License-Identifier: MIT |
| 3 | * |
| 4 | * Copyright © 2018 Intel Corporation |
| 5 | */ |
| 6 | |
| 7 | #include "igt_gem_utils.h" |
| 8 | |
| 9 | #include "gem/i915_gem_context.h" |
| 10 | #include "gem/i915_gem_internal.h" |
| 11 | #include "gem/i915_gem_pm.h" |
| 12 | #include "gt/intel_context.h" |
| 13 | #include "gt/intel_gpu_commands.h" |
| 14 | #include "gt/intel_gt.h" |
| 15 | #include "i915_vma.h" |
| 16 | #include "i915_drv.h" |
| 17 | |
| 18 | #include "i915_request.h" |
| 19 | |
| 20 | struct i915_request * |
| 21 | igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine) |
| 22 | { |
| 23 | struct intel_context *ce; |
| 24 | struct i915_request *rq; |
| 25 | |
| 26 | /* |
| 27 | * Pinning the contexts may generate requests in order to acquire |
| 28 | * GGTT space, so do this first before we reserve a seqno for |
| 29 | * ourselves. |
| 30 | */ |
| 31 | ce = i915_gem_context_get_engine(ctx, idx: engine->legacy_idx); |
| 32 | if (IS_ERR(ptr: ce)) |
| 33 | return ERR_CAST(ptr: ce); |
| 34 | |
| 35 | rq = intel_context_create_request(ce); |
| 36 | intel_context_put(ce); |
| 37 | |
| 38 | return rq; |
| 39 | } |
| 40 | |
| 41 | struct i915_vma * |
| 42 | igt_emit_store_dw(struct i915_vma *vma, |
| 43 | u64 offset, |
| 44 | unsigned long count, |
| 45 | u32 val) |
| 46 | { |
| 47 | struct drm_i915_gem_object *obj; |
| 48 | const int ver = GRAPHICS_VER(vma->vm->i915); |
| 49 | unsigned long n, size; |
| 50 | u32 *cmd; |
| 51 | int err; |
| 52 | |
| 53 | size = (4 * count + 1) * sizeof(u32); |
| 54 | size = round_up(size, PAGE_SIZE); |
| 55 | obj = i915_gem_object_create_internal(i915: vma->vm->i915, size); |
| 56 | if (IS_ERR(ptr: obj)) |
| 57 | return ERR_CAST(ptr: obj); |
| 58 | |
| 59 | cmd = i915_gem_object_pin_map_unlocked(obj, type: I915_MAP_WC); |
| 60 | if (IS_ERR(ptr: cmd)) { |
| 61 | err = PTR_ERR(ptr: cmd); |
| 62 | goto err; |
| 63 | } |
| 64 | |
| 65 | GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > i915_vma_size(vma)); |
| 66 | offset += i915_vma_offset(vma); |
| 67 | |
| 68 | for (n = 0; n < count; n++) { |
| 69 | if (ver >= 8) { |
| 70 | *cmd++ = MI_STORE_DWORD_IMM_GEN4; |
| 71 | *cmd++ = lower_32_bits(offset); |
| 72 | *cmd++ = upper_32_bits(offset); |
| 73 | *cmd++ = val; |
| 74 | } else if (ver >= 4) { |
| 75 | *cmd++ = MI_STORE_DWORD_IMM_GEN4 | |
| 76 | (ver < 6 ? MI_USE_GGTT : 0); |
| 77 | *cmd++ = 0; |
| 78 | *cmd++ = offset; |
| 79 | *cmd++ = val; |
| 80 | } else { |
| 81 | *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; |
| 82 | *cmd++ = offset; |
| 83 | *cmd++ = val; |
| 84 | } |
| 85 | offset += PAGE_SIZE; |
| 86 | } |
| 87 | *cmd = MI_BATCH_BUFFER_END; |
| 88 | |
| 89 | i915_gem_object_flush_map(obj); |
| 90 | i915_gem_object_unpin_map(obj); |
| 91 | |
| 92 | intel_gt_chipset_flush(gt: vma->vm->gt); |
| 93 | |
| 94 | vma = i915_vma_instance(obj, vm: vma->vm, NULL); |
| 95 | if (IS_ERR(ptr: vma)) { |
| 96 | err = PTR_ERR(ptr: vma); |
| 97 | goto err; |
| 98 | } |
| 99 | |
| 100 | err = i915_vma_pin(vma, size: 0, alignment: 0, PIN_USER); |
| 101 | if (err) |
| 102 | goto err; |
| 103 | |
| 104 | return vma; |
| 105 | |
| 106 | err: |
| 107 | i915_gem_object_put(obj); |
| 108 | return ERR_PTR(error: err); |
| 109 | } |
| 110 | |
| 111 | int igt_gpu_fill_dw(struct intel_context *ce, |
| 112 | struct i915_vma *vma, u64 offset, |
| 113 | unsigned long count, u32 val) |
| 114 | { |
| 115 | struct i915_request *rq; |
| 116 | struct i915_vma *batch; |
| 117 | unsigned int flags; |
| 118 | int err; |
| 119 | |
| 120 | GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine)); |
| 121 | GEM_BUG_ON(!i915_vma_is_pinned(vma)); |
| 122 | |
| 123 | batch = igt_emit_store_dw(vma, offset, count, val); |
| 124 | if (IS_ERR(ptr: batch)) |
| 125 | return PTR_ERR(ptr: batch); |
| 126 | |
| 127 | rq = intel_context_create_request(ce); |
| 128 | if (IS_ERR(ptr: rq)) { |
| 129 | err = PTR_ERR(ptr: rq); |
| 130 | goto err_batch; |
| 131 | } |
| 132 | |
| 133 | err = igt_vma_move_to_active_unlocked(vma: batch, rq, flags: 0); |
| 134 | if (err) |
| 135 | goto skip_request; |
| 136 | |
| 137 | err = igt_vma_move_to_active_unlocked(vma, rq, EXEC_OBJECT_WRITE); |
| 138 | if (err) |
| 139 | goto skip_request; |
| 140 | |
| 141 | flags = 0; |
| 142 | if (GRAPHICS_VER(ce->vm->i915) <= 5) |
| 143 | flags |= I915_DISPATCH_SECURE; |
| 144 | |
| 145 | err = rq->engine->emit_bb_start(rq, |
| 146 | i915_vma_offset(vma: batch), |
| 147 | i915_vma_size(vma: batch), |
| 148 | flags); |
| 149 | |
| 150 | skip_request: |
| 151 | if (err) |
| 152 | i915_request_set_error_once(rq, error: err); |
| 153 | i915_request_add(rq); |
| 154 | err_batch: |
| 155 | i915_vma_unpin_and_release(p_vma: &batch, flags: 0); |
| 156 | return err; |
| 157 | } |
| 158 | |