1/*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Zhi Wang <zhi.a.wang@intel.com>
25 *
26 * Contributors:
27 * Ping Gao <ping.a.gao@intel.com>
28 * Tina Zhang <tina.zhang@intel.com>
29 * Chanbin Du <changbin.du@intel.com>
30 * Min He <min.he@intel.com>
31 * Bing Niu <bing.niu@intel.com>
32 * Zhenyu Wang <zhenyuw@linux.intel.com>
33 *
34 */
35
36#include <linux/kthread.h>
37
38#include <drm/drm_print.h>
39
40#include "gem/i915_gem_pm.h"
41#include "gt/intel_context.h"
42#include "gt/intel_execlists_submission.h"
43#include "gt/intel_gt_regs.h"
44#include "gt/intel_lrc.h"
45#include "gt/intel_ring.h"
46
47#include "i915_drv.h"
48#include "i915_gem_gtt.h"
49#include "i915_perf_oa_regs.h"
50#include "gvt.h"
51
52#define RING_CTX_OFF(x) \
53 offsetof(struct execlist_ring_context, x)
54
55static void set_context_pdp_root_pointer(
56 struct execlist_ring_context *ring_context,
57 u32 pdp[8])
58{
59 int i;
60
61 for (i = 0; i < 8; i++)
62 ring_context->pdps[i].val = pdp[7 - i];
63}
64
65static void update_shadow_pdps(struct intel_vgpu_workload *workload)
66{
67 struct execlist_ring_context *shadow_ring_context;
68 struct intel_context *ctx = workload->req->context;
69
70 if (WARN_ON(!workload->shadow_mm))
71 return;
72
73 if (WARN_ON(!atomic_read(&workload->shadow_mm->pincount)))
74 return;
75
76 shadow_ring_context = (struct execlist_ring_context *)ctx->lrc_reg_state;
77 set_context_pdp_root_pointer(ring_context: shadow_ring_context,
78 pdp: (void *)workload->shadow_mm->ppgtt_mm.shadow_pdps);
79}
80
81/*
82 * When populating shadow ctx from guest, we should not override oa related
83 * registers, so that they will not be overlapped by guest oa configs. Thus
84 * made it possible to capture oa data from host for both host and guests.
85 */
86static void sr_oa_regs(struct intel_vgpu_workload *workload,
87 u32 *reg_state, bool save)
88{
89 struct drm_i915_private *dev_priv = workload->vgpu->gvt->gt->i915;
90 u32 ctx_oactxctrl = dev_priv->perf.ctx_oactxctrl_offset;
91 u32 ctx_flexeu0 = dev_priv->perf.ctx_flexeu0_offset;
92 int i = 0;
93 u32 flex_mmio[] = {
94 i915_mmio_reg_offset(EU_PERF_CNTL0),
95 i915_mmio_reg_offset(EU_PERF_CNTL1),
96 i915_mmio_reg_offset(EU_PERF_CNTL2),
97 i915_mmio_reg_offset(EU_PERF_CNTL3),
98 i915_mmio_reg_offset(EU_PERF_CNTL4),
99 i915_mmio_reg_offset(EU_PERF_CNTL5),
100 i915_mmio_reg_offset(EU_PERF_CNTL6),
101 };
102
103 if (workload->engine->id != RCS0)
104 return;
105
106 if (save) {
107 workload->oactxctrl = reg_state[ctx_oactxctrl + 1];
108
109 for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
110 u32 state_offset = ctx_flexeu0 + i * 2;
111
112 workload->flex_mmio[i] = reg_state[state_offset + 1];
113 }
114 } else {
115 reg_state[ctx_oactxctrl] =
116 i915_mmio_reg_offset(GEN8_OACTXCONTROL);
117 reg_state[ctx_oactxctrl + 1] = workload->oactxctrl;
118
119 for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
120 u32 state_offset = ctx_flexeu0 + i * 2;
121 u32 mmio = flex_mmio[i];
122
123 reg_state[state_offset] = mmio;
124 reg_state[state_offset + 1] = workload->flex_mmio[i];
125 }
126 }
127}
128
129static int populate_shadow_context(struct intel_vgpu_workload *workload)
130{
131 struct intel_vgpu *vgpu = workload->vgpu;
132 struct intel_gvt *gvt = vgpu->gvt;
133 struct intel_context *ctx = workload->req->context;
134 struct execlist_ring_context *shadow_ring_context;
135 void *dst;
136 void *context_base;
137 unsigned long context_gpa, context_page_num;
138 unsigned long gpa_base; /* first gpa of consecutive GPAs */
139 unsigned long gpa_size; /* size of consecutive GPAs */
140 struct intel_vgpu_submission *s = &vgpu->submission;
141 int i;
142 bool skip = false;
143 int ring_id = workload->engine->id;
144 int ret;
145
146 GEM_BUG_ON(!intel_context_is_pinned(ctx));
147
148 context_base = (void *) ctx->lrc_reg_state -
149 (LRC_STATE_PN << I915_GTT_PAGE_SHIFT);
150
151 shadow_ring_context = (void *) ctx->lrc_reg_state;
152
153 sr_oa_regs(workload, reg_state: (u32 *)shadow_ring_context, save: true);
154#define COPY_REG(name) \
155 intel_gvt_read_gpa(vgpu, workload->ring_context_gpa \
156 + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
157#define COPY_REG_MASKED(name) {\
158 intel_gvt_read_gpa(vgpu, workload->ring_context_gpa \
159 + RING_CTX_OFF(name.val),\
160 &shadow_ring_context->name.val, 4);\
161 shadow_ring_context->name.val |= 0xffff << 16;\
162 }
163
164 COPY_REG_MASKED(ctx_ctrl);
165 COPY_REG(ctx_timestamp);
166
167 if (workload->engine->id == RCS0) {
168 COPY_REG(bb_per_ctx_ptr);
169 COPY_REG(rcs_indirect_ctx);
170 COPY_REG(rcs_indirect_ctx_offset);
171 } else if (workload->engine->id == BCS0)
172 intel_gvt_read_gpa(vgpu,
173 gpa: workload->ring_context_gpa +
174 BCS_TILE_REGISTER_VAL_OFFSET,
175 buf: (void *)shadow_ring_context +
176 BCS_TILE_REGISTER_VAL_OFFSET, len: 4);
177#undef COPY_REG
178#undef COPY_REG_MASKED
179
180 /* don't copy Ring Context (the first 0x50 dwords),
181 * only copy the Engine Context part from guest
182 */
183 intel_gvt_read_gpa(vgpu,
184 gpa: workload->ring_context_gpa +
185 RING_CTX_SIZE,
186 buf: (void *)shadow_ring_context +
187 RING_CTX_SIZE,
188 I915_GTT_PAGE_SIZE - RING_CTX_SIZE);
189
190 sr_oa_regs(workload, reg_state: (u32 *)shadow_ring_context, save: false);
191
192 gvt_dbg_sched("ring %s workload lrca %x, ctx_id %x, ctx gpa %llx",
193 workload->engine->name, workload->ctx_desc.lrca,
194 workload->ctx_desc.context_id,
195 workload->ring_context_gpa);
196
197 /* only need to ensure this context is not pinned/unpinned during the
198 * period from last submission to this this submission.
199 * Upon reaching this function, the currently submitted context is not
200 * supposed to get unpinned. If a misbehaving guest driver ever does
201 * this, it would corrupt itself.
202 */
203 if (s->last_ctx[ring_id].valid &&
204 (s->last_ctx[ring_id].lrca ==
205 workload->ctx_desc.lrca) &&
206 (s->last_ctx[ring_id].ring_context_gpa ==
207 workload->ring_context_gpa))
208 skip = true;
209
210 s->last_ctx[ring_id].lrca = workload->ctx_desc.lrca;
211 s->last_ctx[ring_id].ring_context_gpa = workload->ring_context_gpa;
212
213 if (IS_RESTORE_INHIBIT(shadow_ring_context->ctx_ctrl.val) || skip)
214 return 0;
215
216 s->last_ctx[ring_id].valid = false;
217 context_page_num = workload->engine->context_size;
218 context_page_num = context_page_num >> PAGE_SHIFT;
219
220 if (IS_BROADWELL(gvt->gt->i915) && workload->engine->id == RCS0)
221 context_page_num = 19;
222
223 /* find consecutive GPAs from gma until the first inconsecutive GPA.
224 * read from the continuous GPAs into dst virtual address
225 */
226 gpa_size = 0;
227 for (i = 2; i < context_page_num; i++) {
228 context_gpa = intel_vgpu_gma_to_gpa(mm: vgpu->gtt.ggtt_mm,
229 gma: (u32)((workload->ctx_desc.lrca + i) <<
230 I915_GTT_PAGE_SHIFT));
231 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
232 gvt_vgpu_err("Invalid guest context descriptor\n");
233 return -EFAULT;
234 }
235
236 if (gpa_size == 0) {
237 gpa_base = context_gpa;
238 dst = context_base + (i << I915_GTT_PAGE_SHIFT);
239 } else if (context_gpa != gpa_base + gpa_size)
240 goto read;
241
242 gpa_size += I915_GTT_PAGE_SIZE;
243
244 if (i == context_page_num - 1)
245 goto read;
246
247 continue;
248
249read:
250 intel_gvt_read_gpa(vgpu, gpa: gpa_base, buf: dst, len: gpa_size);
251 gpa_base = context_gpa;
252 gpa_size = I915_GTT_PAGE_SIZE;
253 dst = context_base + (i << I915_GTT_PAGE_SHIFT);
254 }
255 ret = intel_gvt_scan_engine_context(workload);
256 if (ret) {
257 gvt_vgpu_err("invalid cmd found in guest context pages\n");
258 return ret;
259 }
260 s->last_ctx[ring_id].valid = true;
261 return 0;
262}
263
264static inline bool is_gvt_request(struct i915_request *rq)
265{
266 return intel_context_force_single_submission(ce: rq->context);
267}
268
269static void save_ring_hw_state(struct intel_vgpu *vgpu,
270 const struct intel_engine_cs *engine)
271{
272 struct intel_uncore *uncore = engine->uncore;
273 i915_reg_t reg;
274
275 reg = RING_INSTDONE(engine->mmio_base);
276 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) =
277 intel_uncore_read(uncore, reg);
278
279 reg = RING_ACTHD(engine->mmio_base);
280 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) =
281 intel_uncore_read(uncore, reg);
282
283 reg = RING_ACTHD_UDW(engine->mmio_base);
284 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) =
285 intel_uncore_read(uncore, reg);
286}
287
288static int shadow_context_status_change(struct notifier_block *nb,
289 unsigned long action, void *data)
290{
291 struct i915_request *rq = data;
292 struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
293 shadow_ctx_notifier_block[rq->engine->id]);
294 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
295 enum intel_engine_id ring_id = rq->engine->id;
296 struct intel_vgpu_workload *workload;
297 unsigned long flags;
298
299 if (!is_gvt_request(rq)) {
300 spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
301 if (action == INTEL_CONTEXT_SCHEDULE_IN &&
302 scheduler->engine_owner[ring_id]) {
303 /* Switch ring from vGPU to host. */
304 intel_gvt_switch_mmio(pre: scheduler->engine_owner[ring_id],
305 NULL, engine: rq->engine);
306 scheduler->engine_owner[ring_id] = NULL;
307 }
308 spin_unlock_irqrestore(lock: &scheduler->mmio_context_lock, flags);
309
310 return NOTIFY_OK;
311 }
312
313 workload = scheduler->current_workload[ring_id];
314 if (unlikely(!workload))
315 return NOTIFY_OK;
316
317 switch (action) {
318 case INTEL_CONTEXT_SCHEDULE_IN:
319 spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
320 if (workload->vgpu != scheduler->engine_owner[ring_id]) {
321 /* Switch ring from host to vGPU or vGPU to vGPU. */
322 intel_gvt_switch_mmio(pre: scheduler->engine_owner[ring_id],
323 next: workload->vgpu, engine: rq->engine);
324 scheduler->engine_owner[ring_id] = workload->vgpu;
325 } else
326 gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
327 ring_id, workload->vgpu->id);
328 spin_unlock_irqrestore(lock: &scheduler->mmio_context_lock, flags);
329 atomic_set(v: &workload->shadow_ctx_active, i: 1);
330 break;
331 case INTEL_CONTEXT_SCHEDULE_OUT:
332 save_ring_hw_state(vgpu: workload->vgpu, engine: rq->engine);
333 atomic_set(v: &workload->shadow_ctx_active, i: 0);
334 break;
335 case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
336 save_ring_hw_state(vgpu: workload->vgpu, engine: rq->engine);
337 break;
338 default:
339 WARN_ON(1);
340 return NOTIFY_OK;
341 }
342 wake_up(&workload->shadow_ctx_status_wq);
343 return NOTIFY_OK;
344}
345
346static void
347shadow_context_descriptor_update(struct intel_context *ce,
348 struct intel_vgpu_workload *workload)
349{
350 u64 desc = ce->lrc.desc;
351
352 /*
353 * Update bits 0-11 of the context descriptor which includes flags
354 * like GEN8_CTX_* cached in desc_template
355 */
356 desc &= ~(0x3ull << GEN8_CTX_ADDRESSING_MODE_SHIFT);
357 desc |= (u64)workload->ctx_desc.addressing_mode <<
358 GEN8_CTX_ADDRESSING_MODE_SHIFT;
359
360 ce->lrc.desc = desc;
361}
362
363static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
364{
365 struct intel_vgpu *vgpu = workload->vgpu;
366 struct i915_request *req = workload->req;
367 void *shadow_ring_buffer_va;
368 u32 *cs;
369 int err;
370
371 if (GRAPHICS_VER(req->engine->i915) == 9 && is_inhibit_context(ce: req->context))
372 intel_vgpu_restore_inhibit_context(vgpu, req);
373
374 /*
375 * To track whether a request has started on HW, we can emit a
376 * breadcrumb at the beginning of the request and check its
377 * timeline's HWSP to see if the breadcrumb has advanced past the
378 * start of this request. Actually, the request must have the
379 * init_breadcrumb if its timeline set has_init_bread_crumb, or the
380 * scheduler might get a wrong state of it during reset. Since the
381 * requests from gvt always set the has_init_breadcrumb flag, here
382 * need to do the emit_init_breadcrumb for all the requests.
383 */
384 if (req->engine->emit_init_breadcrumb) {
385 err = req->engine->emit_init_breadcrumb(req);
386 if (err) {
387 gvt_vgpu_err("fail to emit init breadcrumb\n");
388 return err;
389 }
390 }
391
392 /* allocate shadow ring buffer */
393 cs = intel_ring_begin(rq: workload->req, num_dwords: workload->rb_len / sizeof(u32));
394 if (IS_ERR(ptr: cs)) {
395 gvt_vgpu_err("fail to alloc size =%ld shadow ring buffer\n",
396 workload->rb_len);
397 return PTR_ERR(ptr: cs);
398 }
399
400 shadow_ring_buffer_va = workload->shadow_ring_buffer_va;
401
402 /* get shadow ring buffer va */
403 workload->shadow_ring_buffer_va = cs;
404
405 memcpy(cs, shadow_ring_buffer_va,
406 workload->rb_len);
407
408 cs += workload->rb_len / sizeof(u32);
409 intel_ring_advance(rq: workload->req, cs);
410
411 return 0;
412}
413
414static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
415{
416 if (!wa_ctx->indirect_ctx.obj)
417 return;
418
419 i915_gem_object_lock(obj: wa_ctx->indirect_ctx.obj, NULL);
420 i915_gem_object_unpin_map(obj: wa_ctx->indirect_ctx.obj);
421 i915_gem_object_unlock(obj: wa_ctx->indirect_ctx.obj);
422 i915_gem_object_put(obj: wa_ctx->indirect_ctx.obj);
423
424 wa_ctx->indirect_ctx.obj = NULL;
425 wa_ctx->indirect_ctx.shadow_va = NULL;
426}
427
428static void set_dma_address(struct i915_page_directory *pd, dma_addr_t addr)
429{
430 struct scatterlist *sg = pd->pt.base->mm.pages->sgl;
431
432 /* This is not a good idea */
433 sg->dma_address = addr;
434}
435
436static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
437 struct intel_context *ce)
438{
439 struct intel_vgpu_mm *mm = workload->shadow_mm;
440 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm: ce->vm);
441 int i = 0;
442
443 if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
444 set_dma_address(pd: ppgtt->pd, addr: mm->ppgtt_mm.shadow_pdps[0]);
445 } else {
446 for (i = 0; i < GVT_RING_CTX_NR_PDPS; i++) {
447 struct i915_page_directory * const pd =
448 i915_pd_entry(pdp: ppgtt->pd, n: i);
449 /* skip now as current i915 ppgtt alloc won't allocate
450 top level pdp for non 4-level table, won't impact
451 shadow ppgtt. */
452 if (!pd)
453 break;
454
455 set_dma_address(pd, addr: mm->ppgtt_mm.shadow_pdps[i]);
456 }
457 }
458}
459
460static int
461intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload)
462{
463 struct intel_vgpu *vgpu = workload->vgpu;
464 struct intel_vgpu_submission *s = &vgpu->submission;
465 struct i915_request *rq;
466
467 if (workload->req)
468 return 0;
469
470 rq = i915_request_create(ce: s->shadow[workload->engine->id]);
471 if (IS_ERR(ptr: rq)) {
472 gvt_vgpu_err("fail to allocate gem request\n");
473 return PTR_ERR(ptr: rq);
474 }
475
476 workload->req = i915_request_get(rq);
477 return 0;
478}
479
480/**
481 * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
482 * shadow it as well, include ringbuffer,wa_ctx and ctx.
483 * @workload: an abstract entity for each execlist submission.
484 *
485 * This function is called before the workload submitting to i915, to make
486 * sure the content of the workload is valid.
487 */
488int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
489{
490 struct intel_vgpu *vgpu = workload->vgpu;
491 struct intel_vgpu_submission *s = &vgpu->submission;
492 int ret;
493
494 lockdep_assert_held(&vgpu->vgpu_lock);
495
496 if (workload->shadow)
497 return 0;
498
499 if (!test_and_set_bit(nr: workload->engine->id, addr: s->shadow_ctx_desc_updated))
500 shadow_context_descriptor_update(ce: s->shadow[workload->engine->id],
501 workload);
502
503 ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
504 if (ret)
505 return ret;
506
507 if (workload->engine->id == RCS0 &&
508 workload->wa_ctx.indirect_ctx.size) {
509 ret = intel_gvt_scan_and_shadow_wa_ctx(wa_ctx: &workload->wa_ctx);
510 if (ret)
511 goto err_shadow;
512 }
513
514 workload->shadow = true;
515 return 0;
516
517err_shadow:
518 release_shadow_wa_ctx(wa_ctx: &workload->wa_ctx);
519 return ret;
520}
521
522static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload);
523
524static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
525{
526 struct intel_gvt *gvt = workload->vgpu->gvt;
527 const int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd;
528 struct intel_vgpu_shadow_bb *bb;
529 struct i915_gem_ww_ctx ww;
530 int ret;
531
532 list_for_each_entry(bb, &workload->shadow_bb, list) {
533 /*
534 * For privilege batch buffer and not wa_ctx, the bb_start_cmd_va
535 * is only updated into ring_scan_buffer, not real ring address
536 * allocated in later copy_workload_to_ring_buffer. Please be noted
537 * shadow_ring_buffer_va is now pointed to real ring buffer va
538 * in copy_workload_to_ring_buffer.
539 */
540
541 if (bb->bb_offset)
542 bb->bb_start_cmd_va = workload->shadow_ring_buffer_va
543 + bb->bb_offset;
544
545 /*
546 * For non-priv bb, scan&shadow is only for
547 * debugging purpose, so the content of shadow bb
548 * is the same as original bb. Therefore,
549 * here, rather than switch to shadow bb's gma
550 * address, we directly use original batch buffer's
551 * gma address, and send original bb to hardware
552 * directly.
553 */
554 if (!bb->ppgtt) {
555 i915_gem_ww_ctx_init(ctx: &ww, intr: false);
556retry:
557 i915_gem_object_lock(obj: bb->obj, ww: &ww);
558
559 bb->vma = i915_gem_object_ggtt_pin_ww(obj: bb->obj, ww: &ww,
560 NULL, size: 0, alignment: 0, flags: 0);
561 if (IS_ERR(ptr: bb->vma)) {
562 ret = PTR_ERR(ptr: bb->vma);
563 if (ret == -EDEADLK) {
564 ret = i915_gem_ww_ctx_backoff(ctx: &ww);
565 if (!ret)
566 goto retry;
567 }
568 goto err;
569 }
570
571 /* relocate shadow batch buffer */
572 bb->bb_start_cmd_va[1] = i915_ggtt_offset(vma: bb->vma);
573 if (gmadr_bytes == 8)
574 bb->bb_start_cmd_va[2] = 0;
575
576 ret = i915_vma_move_to_active(vma: bb->vma, rq: workload->req,
577 __EXEC_OBJECT_NO_REQUEST_AWAIT);
578 if (ret)
579 goto err;
580
581 /* No one is going to touch shadow bb from now on. */
582 i915_gem_object_flush_map(obj: bb->obj);
583 i915_gem_ww_ctx_fini(ctx: &ww);
584 }
585 }
586 return 0;
587err:
588 i915_gem_ww_ctx_fini(ctx: &ww);
589 release_shadow_batch_buffer(workload);
590 return ret;
591}
592
593static void update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
594{
595 struct intel_vgpu_workload *workload =
596 container_of(wa_ctx, struct intel_vgpu_workload, wa_ctx);
597 struct i915_request *rq = workload->req;
598 struct execlist_ring_context *shadow_ring_context =
599 (struct execlist_ring_context *)rq->context->lrc_reg_state;
600
601 shadow_ring_context->bb_per_ctx_ptr.val =
602 (shadow_ring_context->bb_per_ctx_ptr.val &
603 (~PER_CTX_ADDR_MASK)) | wa_ctx->per_ctx.shadow_gma;
604 shadow_ring_context->rcs_indirect_ctx.val =
605 (shadow_ring_context->rcs_indirect_ctx.val &
606 (~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;
607}
608
609static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
610{
611 struct i915_vma *vma;
612 unsigned char *per_ctx_va =
613 (unsigned char *)wa_ctx->indirect_ctx.shadow_va +
614 wa_ctx->indirect_ctx.size;
615 struct i915_gem_ww_ctx ww;
616 int ret;
617
618 if (wa_ctx->indirect_ctx.size == 0)
619 return 0;
620
621 i915_gem_ww_ctx_init(ctx: &ww, intr: false);
622retry:
623 i915_gem_object_lock(obj: wa_ctx->indirect_ctx.obj, ww: &ww);
624
625 vma = i915_gem_object_ggtt_pin_ww(obj: wa_ctx->indirect_ctx.obj, ww: &ww, NULL,
626 size: 0, CACHELINE_BYTES, flags: 0);
627 if (IS_ERR(ptr: vma)) {
628 ret = PTR_ERR(ptr: vma);
629 if (ret == -EDEADLK) {
630 ret = i915_gem_ww_ctx_backoff(ctx: &ww);
631 if (!ret)
632 goto retry;
633 }
634 return ret;
635 }
636
637 i915_gem_ww_ctx_fini(ctx: &ww);
638
639 /* FIXME: we are not tracking our pinned VMA leaving it
640 * up to the core to fix up the stray pin_count upon
641 * free.
642 */
643
644 wa_ctx->indirect_ctx.shadow_gma = i915_ggtt_offset(vma);
645
646 wa_ctx->per_ctx.shadow_gma = *((unsigned int *)per_ctx_va + 1);
647 memset(per_ctx_va, 0, CACHELINE_BYTES);
648
649 update_wa_ctx_2_shadow_ctx(wa_ctx);
650 return 0;
651}
652
653static void update_vreg_in_ctx(struct intel_vgpu_workload *workload)
654{
655 vgpu_vreg_t(workload->vgpu, RING_START(workload->engine->mmio_base)) =
656 workload->rb_start;
657}
658
659static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
660{
661 struct intel_vgpu_shadow_bb *bb, *pos;
662
663 if (list_empty(head: &workload->shadow_bb))
664 return;
665
666 bb = list_first_entry(&workload->shadow_bb,
667 struct intel_vgpu_shadow_bb, list);
668
669 list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
670 if (bb->obj) {
671 i915_gem_object_lock(obj: bb->obj, NULL);
672 if (bb->va && !IS_ERR(ptr: bb->va))
673 i915_gem_object_unpin_map(obj: bb->obj);
674
675 if (bb->vma && !IS_ERR(ptr: bb->vma))
676 i915_vma_unpin(vma: bb->vma);
677
678 i915_gem_object_unlock(obj: bb->obj);
679 i915_gem_object_put(obj: bb->obj);
680 }
681 list_del(entry: &bb->list);
682 kfree(objp: bb);
683 }
684}
685
686static int
687intel_vgpu_shadow_mm_pin(struct intel_vgpu_workload *workload)
688{
689 struct intel_vgpu *vgpu = workload->vgpu;
690 struct intel_vgpu_mm *m;
691 int ret = 0;
692
693 ret = intel_vgpu_pin_mm(mm: workload->shadow_mm);
694 if (ret) {
695 gvt_vgpu_err("fail to vgpu pin mm\n");
696 return ret;
697 }
698
699 if (workload->shadow_mm->type != INTEL_GVT_MM_PPGTT ||
700 !workload->shadow_mm->ppgtt_mm.shadowed) {
701 intel_vgpu_unpin_mm(mm: workload->shadow_mm);
702 gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
703 return -EINVAL;
704 }
705
706 if (!list_empty(head: &workload->lri_shadow_mm)) {
707 list_for_each_entry(m, &workload->lri_shadow_mm,
708 ppgtt_mm.link) {
709 ret = intel_vgpu_pin_mm(mm: m);
710 if (ret) {
711 list_for_each_entry_from_reverse(m,
712 &workload->lri_shadow_mm,
713 ppgtt_mm.link)
714 intel_vgpu_unpin_mm(mm: m);
715 gvt_vgpu_err("LRI shadow ppgtt fail to pin\n");
716 break;
717 }
718 }
719 }
720
721 if (ret)
722 intel_vgpu_unpin_mm(mm: workload->shadow_mm);
723
724 return ret;
725}
726
727static void
728intel_vgpu_shadow_mm_unpin(struct intel_vgpu_workload *workload)
729{
730 struct intel_vgpu_mm *m;
731
732 if (!list_empty(head: &workload->lri_shadow_mm)) {
733 list_for_each_entry(m, &workload->lri_shadow_mm,
734 ppgtt_mm.link)
735 intel_vgpu_unpin_mm(mm: m);
736 }
737 intel_vgpu_unpin_mm(mm: workload->shadow_mm);
738}
739
740static int prepare_workload(struct intel_vgpu_workload *workload)
741{
742 struct intel_vgpu *vgpu = workload->vgpu;
743 struct intel_vgpu_submission *s = &vgpu->submission;
744 int ret = 0;
745
746 ret = intel_vgpu_shadow_mm_pin(workload);
747 if (ret) {
748 gvt_vgpu_err("fail to pin shadow mm\n");
749 return ret;
750 }
751
752 update_shadow_pdps(workload);
753
754 set_context_ppgtt_from_shadow(workload, ce: s->shadow[workload->engine->id]);
755
756 ret = intel_vgpu_sync_oos_pages(vgpu: workload->vgpu);
757 if (ret) {
758 gvt_vgpu_err("fail to vgpu sync oos pages\n");
759 goto err_unpin_mm;
760 }
761
762 ret = intel_vgpu_flush_post_shadow(vgpu: workload->vgpu);
763 if (ret) {
764 gvt_vgpu_err("fail to flush post shadow\n");
765 goto err_unpin_mm;
766 }
767
768 ret = copy_workload_to_ring_buffer(workload);
769 if (ret) {
770 gvt_vgpu_err("fail to generate request\n");
771 goto err_unpin_mm;
772 }
773
774 ret = prepare_shadow_batch_buffer(workload);
775 if (ret) {
776 gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
777 goto err_unpin_mm;
778 }
779
780 ret = prepare_shadow_wa_ctx(wa_ctx: &workload->wa_ctx);
781 if (ret) {
782 gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n");
783 goto err_shadow_batch;
784 }
785
786 if (workload->prepare) {
787 ret = workload->prepare(workload);
788 if (ret)
789 goto err_shadow_wa_ctx;
790 }
791
792 return 0;
793err_shadow_wa_ctx:
794 release_shadow_wa_ctx(wa_ctx: &workload->wa_ctx);
795err_shadow_batch:
796 release_shadow_batch_buffer(workload);
797err_unpin_mm:
798 intel_vgpu_shadow_mm_unpin(workload);
799 return ret;
800}
801
802static int dispatch_workload(struct intel_vgpu_workload *workload)
803{
804 struct intel_vgpu *vgpu = workload->vgpu;
805 struct i915_request *rq;
806 int ret;
807
808 gvt_dbg_sched("ring id %s prepare to dispatch workload %p\n",
809 workload->engine->name, workload);
810
811 mutex_lock(&vgpu->vgpu_lock);
812
813 ret = intel_gvt_workload_req_alloc(workload);
814 if (ret)
815 goto err_req;
816
817 ret = intel_gvt_scan_and_shadow_workload(workload);
818 if (ret)
819 goto out;
820
821 ret = populate_shadow_context(workload);
822 if (ret) {
823 release_shadow_wa_ctx(wa_ctx: &workload->wa_ctx);
824 goto out;
825 }
826
827 ret = prepare_workload(workload);
828out:
829 if (ret) {
830 /* We might still need to add request with
831 * clean ctx to retire it properly..
832 */
833 rq = fetch_and_zero(&workload->req);
834 i915_request_put(rq);
835 }
836
837 if (!IS_ERR_OR_NULL(ptr: workload->req)) {
838 gvt_dbg_sched("ring id %s submit workload to i915 %p\n",
839 workload->engine->name, workload->req);
840 i915_request_add(rq: workload->req);
841 workload->dispatched = true;
842 }
843err_req:
844 if (ret)
845 workload->status = ret;
846 mutex_unlock(lock: &vgpu->vgpu_lock);
847 return ret;
848}
849
850static struct intel_vgpu_workload *
851pick_next_workload(struct intel_gvt *gvt, struct intel_engine_cs *engine)
852{
853 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
854 struct intel_vgpu_workload *workload = NULL;
855
856 mutex_lock(&gvt->sched_lock);
857
858 /*
859 * no current vgpu / will be scheduled out / no workload
860 * bail out
861 */
862 if (!scheduler->current_vgpu) {
863 gvt_dbg_sched("ring %s stop - no current vgpu\n", engine->name);
864 goto out;
865 }
866
867 if (scheduler->need_reschedule) {
868 gvt_dbg_sched("ring %s stop - will reschedule\n", engine->name);
869 goto out;
870 }
871
872 if (!test_bit(INTEL_VGPU_STATUS_ACTIVE,
873 scheduler->current_vgpu->status) ||
874 list_empty(workload_q_head(scheduler->current_vgpu, engine)))
875 goto out;
876
877 /*
878 * still have current workload, maybe the workload disptacher
879 * fail to submit it for some reason, resubmit it.
880 */
881 if (scheduler->current_workload[engine->id]) {
882 workload = scheduler->current_workload[engine->id];
883 gvt_dbg_sched("ring %s still have current workload %p\n",
884 engine->name, workload);
885 goto out;
886 }
887
888 /*
889 * pick a workload as current workload
890 * once current workload is set, schedule policy routines
891 * will wait the current workload is finished when trying to
892 * schedule out a vgpu.
893 */
894 scheduler->current_workload[engine->id] =
895 list_first_entry(workload_q_head(scheduler->current_vgpu,
896 engine),
897 struct intel_vgpu_workload, list);
898
899 workload = scheduler->current_workload[engine->id];
900
901 gvt_dbg_sched("ring %s pick new workload %p\n", engine->name, workload);
902
903 atomic_inc(v: &workload->vgpu->submission.running_workload_num);
904out:
905 mutex_unlock(lock: &gvt->sched_lock);
906 return workload;
907}
908
909static void update_guest_pdps(struct intel_vgpu *vgpu,
910 u64 ring_context_gpa, u32 pdp[8])
911{
912 u64 gpa;
913 int i;
914
915 gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val);
916
917 for (i = 0; i < 8; i++)
918 intel_gvt_write_gpa(vgpu, gpa: gpa + i * 8, buf: &pdp[7 - i], len: 4);
919}
920
921static __maybe_unused bool
922check_shadow_context_ppgtt(struct execlist_ring_context *c, struct intel_vgpu_mm *m)
923{
924 if (m->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
925 u64 shadow_pdp = c->pdps[7].val | (u64) c->pdps[6].val << 32;
926
927 if (shadow_pdp != m->ppgtt_mm.shadow_pdps[0]) {
928 gvt_dbg_mm("4-level context ppgtt not match LRI command\n");
929 return false;
930 }
931 return true;
932 } else {
933 /* see comment in LRI handler in cmd_parser.c */
934 gvt_dbg_mm("invalid shadow mm type\n");
935 return false;
936 }
937}
938
939static void update_guest_context(struct intel_vgpu_workload *workload)
940{
941 struct i915_request *rq = workload->req;
942 struct intel_vgpu *vgpu = workload->vgpu;
943 struct execlist_ring_context *shadow_ring_context;
944 struct intel_context *ctx = workload->req->context;
945 void *context_base;
946 void *src;
947 unsigned long context_gpa, context_page_num;
948 unsigned long gpa_base; /* first gpa of consecutive GPAs */
949 unsigned long gpa_size; /* size of consecutive GPAs*/
950 int i;
951 u32 ring_base;
952 u32 head, tail;
953 u16 wrap_count;
954
955 gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id,
956 workload->ctx_desc.lrca);
957
958 GEM_BUG_ON(!intel_context_is_pinned(ctx));
959
960 head = workload->rb_head;
961 tail = workload->rb_tail;
962 wrap_count = workload->guest_rb_head >> RB_HEAD_WRAP_CNT_OFF;
963
964 if (tail < head) {
965 if (wrap_count == RB_HEAD_WRAP_CNT_MAX)
966 wrap_count = 0;
967 else
968 wrap_count += 1;
969 }
970
971 head = (wrap_count << RB_HEAD_WRAP_CNT_OFF) | tail;
972
973 ring_base = rq->engine->mmio_base;
974 vgpu_vreg_t(vgpu, RING_TAIL(ring_base)) = tail;
975 vgpu_vreg_t(vgpu, RING_HEAD(ring_base)) = head;
976
977 context_page_num = rq->engine->context_size;
978 context_page_num = context_page_num >> PAGE_SHIFT;
979
980 if (IS_BROADWELL(rq->i915) && rq->engine->id == RCS0)
981 context_page_num = 19;
982
983 context_base = (void *) ctx->lrc_reg_state -
984 (LRC_STATE_PN << I915_GTT_PAGE_SHIFT);
985
986 /* find consecutive GPAs from gma until the first inconsecutive GPA.
987 * write to the consecutive GPAs from src virtual address
988 */
989 gpa_size = 0;
990 for (i = 2; i < context_page_num; i++) {
991 context_gpa = intel_vgpu_gma_to_gpa(mm: vgpu->gtt.ggtt_mm,
992 gma: (u32)((workload->ctx_desc.lrca + i) <<
993 I915_GTT_PAGE_SHIFT));
994 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
995 gvt_vgpu_err("invalid guest context descriptor\n");
996 return;
997 }
998
999 if (gpa_size == 0) {
1000 gpa_base = context_gpa;
1001 src = context_base + (i << I915_GTT_PAGE_SHIFT);
1002 } else if (context_gpa != gpa_base + gpa_size)
1003 goto write;
1004
1005 gpa_size += I915_GTT_PAGE_SIZE;
1006
1007 if (i == context_page_num - 1)
1008 goto write;
1009
1010 continue;
1011
1012write:
1013 intel_gvt_write_gpa(vgpu, gpa: gpa_base, buf: src, len: gpa_size);
1014 gpa_base = context_gpa;
1015 gpa_size = I915_GTT_PAGE_SIZE;
1016 src = context_base + (i << I915_GTT_PAGE_SHIFT);
1017 }
1018
1019 intel_gvt_write_gpa(vgpu, gpa: workload->ring_context_gpa +
1020 RING_CTX_OFF(ring_header.val), buf: &workload->rb_tail, len: 4);
1021
1022 shadow_ring_context = (void *) ctx->lrc_reg_state;
1023
1024 if (!list_empty(head: &workload->lri_shadow_mm)) {
1025 struct intel_vgpu_mm *m = list_last_entry(&workload->lri_shadow_mm,
1026 struct intel_vgpu_mm,
1027 ppgtt_mm.link);
1028 GEM_BUG_ON(!check_shadow_context_ppgtt(shadow_ring_context, m));
1029 update_guest_pdps(vgpu, ring_context_gpa: workload->ring_context_gpa,
1030 pdp: (void *)m->ppgtt_mm.guest_pdps);
1031 }
1032
1033#define COPY_REG(name) \
1034 intel_gvt_write_gpa(vgpu, workload->ring_context_gpa + \
1035 RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
1036
1037 COPY_REG(ctx_ctrl);
1038 COPY_REG(ctx_timestamp);
1039
1040#undef COPY_REG
1041
1042 intel_gvt_write_gpa(vgpu,
1043 gpa: workload->ring_context_gpa +
1044 sizeof(*shadow_ring_context),
1045 buf: (void *)shadow_ring_context +
1046 sizeof(*shadow_ring_context),
1047 I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
1048}
1049
1050void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
1051 intel_engine_mask_t engine_mask)
1052{
1053 struct intel_vgpu_submission *s = &vgpu->submission;
1054 struct intel_engine_cs *engine;
1055 struct intel_vgpu_workload *pos, *n;
1056 intel_engine_mask_t tmp;
1057
1058 /* free the unsubmitted workloads in the queues. */
1059 for_each_engine_masked(engine, vgpu->gvt->gt, engine_mask, tmp) {
1060 list_for_each_entry_safe(pos, n,
1061 &s->workload_q_head[engine->id], list) {
1062 list_del_init(entry: &pos->list);
1063 intel_vgpu_destroy_workload(workload: pos);
1064 }
1065 clear_bit(nr: engine->id, addr: s->shadow_ctx_desc_updated);
1066 }
1067}
1068
1069static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
1070{
1071 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
1072 struct intel_vgpu_workload *workload =
1073 scheduler->current_workload[ring_id];
1074 struct intel_vgpu *vgpu = workload->vgpu;
1075 struct intel_vgpu_submission *s = &vgpu->submission;
1076 struct i915_request *rq = workload->req;
1077 int event;
1078
1079 mutex_lock(&vgpu->vgpu_lock);
1080 mutex_lock(&gvt->sched_lock);
1081
1082 /* For the workload w/ request, needs to wait for the context
1083 * switch to make sure request is completed.
1084 * For the workload w/o request, directly complete the workload.
1085 */
1086 if (rq) {
1087 wait_event(workload->shadow_ctx_status_wq,
1088 !atomic_read(&workload->shadow_ctx_active));
1089
1090 /* If this request caused GPU hang, req->fence.error will
1091 * be set to -EIO. Use -EIO to set workload status so
1092 * that when this request caused GPU hang, didn't trigger
1093 * context switch interrupt to guest.
1094 */
1095 if (likely(workload->status == -EINPROGRESS)) {
1096 if (workload->req->fence.error == -EIO)
1097 workload->status = -EIO;
1098 else
1099 workload->status = 0;
1100 }
1101
1102 if (!workload->status &&
1103 !(vgpu->resetting_eng & BIT(ring_id))) {
1104 update_guest_context(workload);
1105
1106 for_each_set_bit(event, workload->pending_events,
1107 INTEL_GVT_EVENT_MAX)
1108 intel_vgpu_trigger_virtual_event(vgpu, event);
1109 }
1110
1111 i915_request_put(fetch_and_zero(&workload->req));
1112 }
1113
1114 gvt_dbg_sched("ring id %d complete workload %p status %d\n",
1115 ring_id, workload, workload->status);
1116
1117 scheduler->current_workload[ring_id] = NULL;
1118
1119 list_del_init(entry: &workload->list);
1120
1121 if (workload->status || vgpu->resetting_eng & BIT(ring_id)) {
1122 /* if workload->status is not successful means HW GPU
1123 * has occurred GPU hang or something wrong with i915/GVT,
1124 * and GVT won't inject context switch interrupt to guest.
1125 * So this error is a vGPU hang actually to the guest.
1126 * According to this we should emunlate a vGPU hang. If
1127 * there are pending workloads which are already submitted
1128 * from guest, we should clean them up like HW GPU does.
1129 *
1130 * if it is in middle of engine resetting, the pending
1131 * workloads won't be submitted to HW GPU and will be
1132 * cleaned up during the resetting process later, so doing
1133 * the workload clean up here doesn't have any impact.
1134 **/
1135 intel_vgpu_clean_workloads(vgpu, BIT(ring_id));
1136 }
1137
1138 workload->complete(workload);
1139
1140 intel_vgpu_shadow_mm_unpin(workload);
1141 intel_vgpu_destroy_workload(workload);
1142
1143 atomic_dec(v: &s->running_workload_num);
1144 wake_up(&scheduler->workload_complete_wq);
1145
1146 if (gvt->scheduler.need_reschedule)
1147 intel_gvt_request_service(gvt, service: INTEL_GVT_REQUEST_EVENT_SCHED);
1148
1149 mutex_unlock(lock: &gvt->sched_lock);
1150 mutex_unlock(lock: &vgpu->vgpu_lock);
1151}
1152
1153static int workload_thread(void *arg)
1154{
1155 struct intel_engine_cs *engine = arg;
1156 const bool need_force_wake = GRAPHICS_VER(engine->i915) >= 9;
1157 struct intel_gvt *gvt = engine->i915->gvt;
1158 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
1159 struct intel_vgpu_workload *workload = NULL;
1160 struct intel_vgpu *vgpu = NULL;
1161 int ret;
1162 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1163
1164 gvt_dbg_core("workload thread for ring %s started\n", engine->name);
1165
1166 while (!kthread_should_stop()) {
1167 intel_wakeref_t wakeref;
1168
1169 add_wait_queue(wq_head: &scheduler->waitq[engine->id], wq_entry: &wait);
1170 do {
1171 workload = pick_next_workload(gvt, engine);
1172 if (workload)
1173 break;
1174 wait_woken(wq_entry: &wait, TASK_INTERRUPTIBLE,
1175 MAX_SCHEDULE_TIMEOUT);
1176 } while (!kthread_should_stop());
1177 remove_wait_queue(wq_head: &scheduler->waitq[engine->id], wq_entry: &wait);
1178
1179 if (!workload)
1180 break;
1181
1182 gvt_dbg_sched("ring %s next workload %p vgpu %d\n",
1183 engine->name, workload,
1184 workload->vgpu->id);
1185
1186 wakeref = intel_runtime_pm_get(rpm: engine->uncore->rpm);
1187
1188 gvt_dbg_sched("ring %s will dispatch workload %p\n",
1189 engine->name, workload);
1190
1191 if (need_force_wake)
1192 intel_uncore_forcewake_get(uncore: engine->uncore,
1193 domains: FORCEWAKE_ALL);
1194 /*
1195 * Update the vReg of the vGPU which submitted this
1196 * workload. The vGPU may use these registers for checking
1197 * the context state. The value comes from GPU commands
1198 * in this workload.
1199 */
1200 update_vreg_in_ctx(workload);
1201
1202 ret = dispatch_workload(workload);
1203
1204 if (ret) {
1205 vgpu = workload->vgpu;
1206 gvt_vgpu_err("fail to dispatch workload, skip\n");
1207 goto complete;
1208 }
1209
1210 gvt_dbg_sched("ring %s wait workload %p\n",
1211 engine->name, workload);
1212 i915_request_wait(rq: workload->req, flags: 0, MAX_SCHEDULE_TIMEOUT);
1213
1214complete:
1215 gvt_dbg_sched("will complete workload %p, status: %d\n",
1216 workload, workload->status);
1217
1218 complete_current_workload(gvt, ring_id: engine->id);
1219
1220 if (need_force_wake)
1221 intel_uncore_forcewake_put(uncore: engine->uncore,
1222 domains: FORCEWAKE_ALL);
1223
1224 intel_runtime_pm_put(rpm: engine->uncore->rpm, wref: wakeref);
1225 if (ret && (vgpu_is_vm_unhealthy(ret)))
1226 enter_failsafe_mode(vgpu, reason: GVT_FAILSAFE_GUEST_ERR);
1227 }
1228 return 0;
1229}
1230
1231void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
1232{
1233 struct intel_vgpu_submission *s = &vgpu->submission;
1234 struct intel_gvt *gvt = vgpu->gvt;
1235 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
1236
1237 if (atomic_read(v: &s->running_workload_num)) {
1238 gvt_dbg_sched("wait vgpu idle\n");
1239
1240 wait_event(scheduler->workload_complete_wq,
1241 !atomic_read(&s->running_workload_num));
1242 }
1243}
1244
1245void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
1246{
1247 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
1248 struct intel_engine_cs *engine;
1249 enum intel_engine_id i;
1250
1251 gvt_dbg_core("clean workload scheduler\n");
1252
1253 for_each_engine(engine, gvt->gt, i) {
1254 atomic_notifier_chain_unregister(
1255 nh: &engine->context_status_notifier,
1256 nb: &gvt->shadow_ctx_notifier_block[i]);
1257 kthread_stop(k: scheduler->thread[i]);
1258 }
1259}
1260
1261int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
1262{
1263 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
1264 struct intel_engine_cs *engine;
1265 enum intel_engine_id i;
1266 int ret;
1267
1268 gvt_dbg_core("init workload scheduler\n");
1269
1270 init_waitqueue_head(&scheduler->workload_complete_wq);
1271
1272 for_each_engine(engine, gvt->gt, i) {
1273 init_waitqueue_head(&scheduler->waitq[i]);
1274
1275 scheduler->thread[i] = kthread_run(workload_thread, engine,
1276 "gvt:%s", engine->name);
1277 if (IS_ERR(ptr: scheduler->thread[i])) {
1278 gvt_err("fail to create workload thread\n");
1279 ret = PTR_ERR(ptr: scheduler->thread[i]);
1280 goto err;
1281 }
1282
1283 gvt->shadow_ctx_notifier_block[i].notifier_call =
1284 shadow_context_status_change;
1285 atomic_notifier_chain_register(nh: &engine->context_status_notifier,
1286 nb: &gvt->shadow_ctx_notifier_block[i]);
1287 }
1288
1289 return 0;
1290
1291err:
1292 intel_gvt_clean_workload_scheduler(gvt);
1293 return ret;
1294}
1295
1296static void
1297i915_context_ppgtt_root_restore(struct intel_vgpu_submission *s,
1298 struct i915_ppgtt *ppgtt)
1299{
1300 int i;
1301
1302 if (i915_vm_is_4lvl(vm: &ppgtt->vm)) {
1303 set_dma_address(pd: ppgtt->pd, addr: s->i915_context_pml4);
1304 } else {
1305 for (i = 0; i < GEN8_3LVL_PDPES; i++) {
1306 struct i915_page_directory * const pd =
1307 i915_pd_entry(pdp: ppgtt->pd, n: i);
1308
1309 set_dma_address(pd, addr: s->i915_context_pdps[i]);
1310 }
1311 }
1312}
1313
1314/**
1315 * intel_vgpu_clean_submission - free submission-related resource for vGPU
1316 * @vgpu: a vGPU
1317 *
1318 * This function is called when a vGPU is being destroyed.
1319 *
1320 */
1321void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
1322{
1323 struct intel_vgpu_submission *s = &vgpu->submission;
1324 struct intel_engine_cs *engine;
1325 enum intel_engine_id id;
1326
1327 intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, interface: 0);
1328
1329 i915_context_ppgtt_root_restore(s, ppgtt: i915_vm_to_ppgtt(vm: s->shadow[0]->vm));
1330 for_each_engine(engine, vgpu->gvt->gt, id)
1331 intel_context_put(ce: s->shadow[id]);
1332
1333 kmem_cache_destroy(s: s->workloads);
1334}
1335
1336
1337/**
1338 * intel_vgpu_reset_submission - reset submission-related resource for vGPU
1339 * @vgpu: a vGPU
1340 * @engine_mask: engines expected to be reset
1341 *
1342 * This function is called when a vGPU is being destroyed.
1343 *
1344 */
1345void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
1346 intel_engine_mask_t engine_mask)
1347{
1348 struct intel_vgpu_submission *s = &vgpu->submission;
1349
1350 if (!s->active)
1351 return;
1352
1353 intel_vgpu_clean_workloads(vgpu, engine_mask);
1354 s->ops->reset(vgpu, engine_mask);
1355}
1356
1357static void
1358i915_context_ppgtt_root_save(struct intel_vgpu_submission *s,
1359 struct i915_ppgtt *ppgtt)
1360{
1361 int i;
1362
1363 if (i915_vm_is_4lvl(vm: &ppgtt->vm)) {
1364 s->i915_context_pml4 = px_dma(ppgtt->pd);
1365 } else {
1366 for (i = 0; i < GEN8_3LVL_PDPES; i++) {
1367 struct i915_page_directory * const pd =
1368 i915_pd_entry(pdp: ppgtt->pd, n: i);
1369
1370 s->i915_context_pdps[i] = px_dma(pd);
1371 }
1372 }
1373}
1374
1375/**
1376 * intel_vgpu_setup_submission - setup submission-related resource for vGPU
1377 * @vgpu: a vGPU
1378 *
1379 * This function is called when a vGPU is being created.
1380 *
1381 * Returns:
1382 * Zero on success, negative error code if failed.
1383 *
1384 */
1385int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
1386{
1387 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
1388 struct intel_vgpu_submission *s = &vgpu->submission;
1389 struct intel_engine_cs *engine;
1390 struct i915_ppgtt *ppgtt;
1391 enum intel_engine_id i;
1392 int ret;
1393
1394 ppgtt = i915_ppgtt_create(gt: to_gt(i915), I915_BO_ALLOC_PM_EARLY);
1395 if (IS_ERR(ptr: ppgtt))
1396 return PTR_ERR(ptr: ppgtt);
1397
1398 i915_context_ppgtt_root_save(s, ppgtt);
1399
1400 for_each_engine(engine, vgpu->gvt->gt, i) {
1401 struct intel_context *ce;
1402
1403 INIT_LIST_HEAD(list: &s->workload_q_head[i]);
1404 s->shadow[i] = ERR_PTR(error: -EINVAL);
1405
1406 ce = intel_context_create(engine);
1407 if (IS_ERR(ptr: ce)) {
1408 ret = PTR_ERR(ptr: ce);
1409 goto out_shadow_ctx;
1410 }
1411
1412 i915_vm_put(vm: ce->vm);
1413 ce->vm = i915_vm_get(vm: &ppgtt->vm);
1414 intel_context_set_single_submission(ce);
1415
1416 /* Max ring buffer size */
1417 if (!intel_uc_wants_guc_submission(uc: &engine->gt->uc))
1418 ce->ring_size = SZ_2M;
1419
1420 s->shadow[i] = ce;
1421 }
1422
1423 bitmap_zero(dst: s->shadow_ctx_desc_updated, nbits: I915_NUM_ENGINES);
1424
1425 s->workloads = kmem_cache_create_usercopy(name: "gvt-g_vgpu_workload",
1426 size: sizeof(struct intel_vgpu_workload), align: 0,
1427 SLAB_HWCACHE_ALIGN,
1428 offsetof(struct intel_vgpu_workload, rb_tail),
1429 sizeof_field(struct intel_vgpu_workload, rb_tail),
1430 NULL);
1431
1432 if (!s->workloads) {
1433 ret = -ENOMEM;
1434 goto out_shadow_ctx;
1435 }
1436
1437 atomic_set(v: &s->running_workload_num, i: 0);
1438 bitmap_zero(dst: s->tlb_handle_pending, nbits: I915_NUM_ENGINES);
1439
1440 memset(s->last_ctx, 0, sizeof(s->last_ctx));
1441
1442 i915_vm_put(vm: &ppgtt->vm);
1443 return 0;
1444
1445out_shadow_ctx:
1446 i915_context_ppgtt_root_restore(s, ppgtt);
1447 for_each_engine(engine, vgpu->gvt->gt, i) {
1448 if (IS_ERR(ptr: s->shadow[i]))
1449 break;
1450
1451 intel_context_put(ce: s->shadow[i]);
1452 }
1453 i915_vm_put(vm: &ppgtt->vm);
1454 return ret;
1455}
1456
1457/**
1458 * intel_vgpu_select_submission_ops - select virtual submission interface
1459 * @vgpu: a vGPU
1460 * @engine_mask: either ALL_ENGINES or target engine mask
1461 * @interface: expected vGPU virtual submission interface
1462 *
1463 * This function is called when guest configures submission interface.
1464 *
1465 * Returns:
1466 * Zero on success, negative error code if failed.
1467 *
1468 */
1469int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
1470 intel_engine_mask_t engine_mask,
1471 unsigned int interface)
1472{
1473 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
1474 struct intel_vgpu_submission *s = &vgpu->submission;
1475 const struct intel_vgpu_submission_ops *ops[] = {
1476 [INTEL_VGPU_EXECLIST_SUBMISSION] =
1477 &intel_vgpu_execlist_submission_ops,
1478 };
1479 int ret;
1480
1481 if (drm_WARN_ON(&i915->drm, interface >= ARRAY_SIZE(ops)))
1482 return -EINVAL;
1483
1484 if (drm_WARN_ON(&i915->drm,
1485 interface == 0 && engine_mask != ALL_ENGINES))
1486 return -EINVAL;
1487
1488 if (s->active)
1489 s->ops->clean(vgpu, engine_mask);
1490
1491 if (interface == 0) {
1492 s->ops = NULL;
1493 s->virtual_submission_interface = 0;
1494 s->active = false;
1495 gvt_dbg_core("vgpu%d: remove submission ops\n", vgpu->id);
1496 return 0;
1497 }
1498
1499 ret = ops[interface]->init(vgpu, engine_mask);
1500 if (ret)
1501 return ret;
1502
1503 s->ops = ops[interface];
1504 s->virtual_submission_interface = interface;
1505 s->active = true;
1506
1507 gvt_dbg_core("vgpu%d: activate ops [ %s ]\n",
1508 vgpu->id, s->ops->name);
1509
1510 return 0;
1511}
1512
1513/**
1514 * intel_vgpu_destroy_workload - destroy a vGPU workload
1515 * @workload: workload to destroy
1516 *
1517 * This function is called when destroy a vGPU workload.
1518 *
1519 */
1520void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
1521{
1522 struct intel_vgpu_submission *s = &workload->vgpu->submission;
1523
1524 intel_context_unpin(ce: s->shadow[workload->engine->id]);
1525 release_shadow_batch_buffer(workload);
1526 release_shadow_wa_ctx(wa_ctx: &workload->wa_ctx);
1527
1528 if (!list_empty(head: &workload->lri_shadow_mm)) {
1529 struct intel_vgpu_mm *m, *mm;
1530 list_for_each_entry_safe(m, mm, &workload->lri_shadow_mm,
1531 ppgtt_mm.link) {
1532 list_del(entry: &m->ppgtt_mm.link);
1533 intel_vgpu_mm_put(mm: m);
1534 }
1535 }
1536
1537 GEM_BUG_ON(!list_empty(&workload->lri_shadow_mm));
1538 if (workload->shadow_mm)
1539 intel_vgpu_mm_put(mm: workload->shadow_mm);
1540
1541 kmem_cache_free(s: s->workloads, objp: workload);
1542}
1543
1544static struct intel_vgpu_workload *
1545alloc_workload(struct intel_vgpu *vgpu)
1546{
1547 struct intel_vgpu_submission *s = &vgpu->submission;
1548 struct intel_vgpu_workload *workload;
1549
1550 workload = kmem_cache_zalloc(s->workloads, GFP_KERNEL);
1551 if (!workload)
1552 return ERR_PTR(error: -ENOMEM);
1553
1554 INIT_LIST_HEAD(list: &workload->list);
1555 INIT_LIST_HEAD(list: &workload->shadow_bb);
1556 INIT_LIST_HEAD(list: &workload->lri_shadow_mm);
1557
1558 init_waitqueue_head(&workload->shadow_ctx_status_wq);
1559 atomic_set(v: &workload->shadow_ctx_active, i: 0);
1560
1561 workload->status = -EINPROGRESS;
1562 workload->vgpu = vgpu;
1563
1564 return workload;
1565}
1566
1567#define RING_CTX_OFF(x) \
1568 offsetof(struct execlist_ring_context, x)
1569
1570static void read_guest_pdps(struct intel_vgpu *vgpu,
1571 u64 ring_context_gpa, u32 pdp[8])
1572{
1573 u64 gpa;
1574 int i;
1575
1576 gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val);
1577
1578 for (i = 0; i < 8; i++)
1579 intel_gvt_read_gpa(vgpu,
1580 gpa: gpa + i * 8, buf: &pdp[7 - i], len: 4);
1581}
1582
1583static int prepare_mm(struct intel_vgpu_workload *workload)
1584{
1585 struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
1586 struct intel_vgpu_mm *mm;
1587 struct intel_vgpu *vgpu = workload->vgpu;
1588 enum intel_gvt_gtt_type root_entry_type;
1589 u64 pdps[GVT_RING_CTX_NR_PDPS];
1590
1591 switch (desc->addressing_mode) {
1592 case 1: /* legacy 32-bit */
1593 root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
1594 break;
1595 case 3: /* legacy 64-bit */
1596 root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
1597 break;
1598 default:
1599 gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n");
1600 return -EINVAL;
1601 }
1602
1603 read_guest_pdps(vgpu: workload->vgpu, ring_context_gpa: workload->ring_context_gpa, pdp: (void *)pdps);
1604
1605 mm = intel_vgpu_get_ppgtt_mm(vgpu: workload->vgpu, root_entry_type, pdps);
1606 if (IS_ERR(ptr: mm))
1607 return PTR_ERR(ptr: mm);
1608
1609 workload->shadow_mm = mm;
1610 return 0;
1611}
1612
1613#define same_context(a, b) (((a)->context_id == (b)->context_id) && \
1614 ((a)->lrca == (b)->lrca))
1615
1616/**
1617 * intel_vgpu_create_workload - create a vGPU workload
1618 * @vgpu: a vGPU
1619 * @engine: the engine
1620 * @desc: a guest context descriptor
1621 *
1622 * This function is called when creating a vGPU workload.
1623 *
1624 * Returns:
1625 * struct intel_vgpu_workload * on success, negative error code in
1626 * pointer if failed.
1627 *
1628 */
1629struct intel_vgpu_workload *
1630intel_vgpu_create_workload(struct intel_vgpu *vgpu,
1631 const struct intel_engine_cs *engine,
1632 struct execlist_ctx_descriptor_format *desc)
1633{
1634 struct intel_vgpu_submission *s = &vgpu->submission;
1635 struct list_head *q = workload_q_head(vgpu, engine);
1636 struct intel_vgpu_workload *last_workload = NULL;
1637 struct intel_vgpu_workload *workload = NULL;
1638 u64 ring_context_gpa;
1639 u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
1640 u32 guest_head;
1641 int ret;
1642
1643 ring_context_gpa = intel_vgpu_gma_to_gpa(mm: vgpu->gtt.ggtt_mm,
1644 gma: (u32)((desc->lrca + 1) << I915_GTT_PAGE_SHIFT));
1645 if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
1646 gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca);
1647 return ERR_PTR(error: -EINVAL);
1648 }
1649
1650 intel_gvt_read_gpa(vgpu, gpa: ring_context_gpa +
1651 RING_CTX_OFF(ring_header.val), buf: &head, len: 4);
1652
1653 intel_gvt_read_gpa(vgpu, gpa: ring_context_gpa +
1654 RING_CTX_OFF(ring_tail.val), buf: &tail, len: 4);
1655
1656 guest_head = head;
1657
1658 head &= RB_HEAD_OFF_MASK;
1659 tail &= RB_TAIL_OFF_MASK;
1660
1661 list_for_each_entry_reverse(last_workload, q, list) {
1662
1663 if (same_context(&last_workload->ctx_desc, desc)) {
1664 gvt_dbg_el("ring %s cur workload == last\n",
1665 engine->name);
1666 gvt_dbg_el("ctx head %x real head %lx\n", head,
1667 last_workload->rb_tail);
1668 /*
1669 * cannot use guest context head pointer here,
1670 * as it might not be updated at this time
1671 */
1672 head = last_workload->rb_tail;
1673 break;
1674 }
1675 }
1676
1677 gvt_dbg_el("ring %s begin a new workload\n", engine->name);
1678
1679 /* record some ring buffer register values for scan and shadow */
1680 intel_gvt_read_gpa(vgpu, gpa: ring_context_gpa +
1681 RING_CTX_OFF(rb_start.val), buf: &start, len: 4);
1682 intel_gvt_read_gpa(vgpu, gpa: ring_context_gpa +
1683 RING_CTX_OFF(rb_ctrl.val), buf: &ctl, len: 4);
1684 intel_gvt_read_gpa(vgpu, gpa: ring_context_gpa +
1685 RING_CTX_OFF(ctx_ctrl.val), buf: &ctx_ctl, len: 4);
1686
1687 if (!intel_gvt_ggtt_validate_range(vgpu, addr: start,
1688 _RING_CTL_BUF_SIZE(ctl))) {
1689 gvt_vgpu_err("context contain invalid rb at: 0x%x\n", start);
1690 return ERR_PTR(error: -EINVAL);
1691 }
1692
1693 workload = alloc_workload(vgpu);
1694 if (IS_ERR(ptr: workload))
1695 return workload;
1696
1697 workload->engine = engine;
1698 workload->ctx_desc = *desc;
1699 workload->ring_context_gpa = ring_context_gpa;
1700 workload->rb_head = head;
1701 workload->guest_rb_head = guest_head;
1702 workload->rb_tail = tail;
1703 workload->rb_start = start;
1704 workload->rb_ctl = ctl;
1705
1706 if (engine->id == RCS0) {
1707 intel_gvt_read_gpa(vgpu, gpa: ring_context_gpa +
1708 RING_CTX_OFF(bb_per_ctx_ptr.val), buf: &per_ctx, len: 4);
1709 intel_gvt_read_gpa(vgpu, gpa: ring_context_gpa +
1710 RING_CTX_OFF(rcs_indirect_ctx.val), buf: &indirect_ctx, len: 4);
1711
1712 workload->wa_ctx.indirect_ctx.guest_gma =
1713 indirect_ctx & INDIRECT_CTX_ADDR_MASK;
1714 workload->wa_ctx.indirect_ctx.size =
1715 (indirect_ctx & INDIRECT_CTX_SIZE_MASK) *
1716 CACHELINE_BYTES;
1717
1718 if (workload->wa_ctx.indirect_ctx.size != 0) {
1719 if (!intel_gvt_ggtt_validate_range(vgpu,
1720 addr: workload->wa_ctx.indirect_ctx.guest_gma,
1721 size: workload->wa_ctx.indirect_ctx.size)) {
1722 gvt_vgpu_err("invalid wa_ctx at: 0x%lx\n",
1723 workload->wa_ctx.indirect_ctx.guest_gma);
1724 kmem_cache_free(s: s->workloads, objp: workload);
1725 return ERR_PTR(error: -EINVAL);
1726 }
1727 }
1728
1729 workload->wa_ctx.per_ctx.guest_gma =
1730 per_ctx & PER_CTX_ADDR_MASK;
1731 workload->wa_ctx.per_ctx.valid = per_ctx & 1;
1732 if (workload->wa_ctx.per_ctx.valid) {
1733 if (!intel_gvt_ggtt_validate_range(vgpu,
1734 addr: workload->wa_ctx.per_ctx.guest_gma,
1735 CACHELINE_BYTES)) {
1736 gvt_vgpu_err("invalid per_ctx at: 0x%lx\n",
1737 workload->wa_ctx.per_ctx.guest_gma);
1738 kmem_cache_free(s: s->workloads, objp: workload);
1739 return ERR_PTR(error: -EINVAL);
1740 }
1741 }
1742 }
1743
1744 gvt_dbg_el("workload %p ring %s head %x tail %x start %x ctl %x\n",
1745 workload, engine->name, head, tail, start, ctl);
1746
1747 ret = prepare_mm(workload);
1748 if (ret) {
1749 kmem_cache_free(s: s->workloads, objp: workload);
1750 return ERR_PTR(error: ret);
1751 }
1752
1753 /* Only scan and shadow the first workload in the queue
1754 * as there is only one pre-allocated buf-obj for shadow.
1755 */
1756 if (list_empty(head: q)) {
1757 intel_wakeref_t wakeref;
1758
1759 with_intel_runtime_pm(engine->gt->uncore->rpm, wakeref)
1760 ret = intel_gvt_scan_and_shadow_workload(workload);
1761 }
1762
1763 if (ret) {
1764 if (vgpu_is_vm_unhealthy(ret))
1765 enter_failsafe_mode(vgpu, reason: GVT_FAILSAFE_GUEST_ERR);
1766 intel_vgpu_destroy_workload(workload);
1767 return ERR_PTR(error: ret);
1768 }
1769
1770 ret = intel_context_pin(ce: s->shadow[engine->id]);
1771 if (ret) {
1772 intel_vgpu_destroy_workload(workload);
1773 return ERR_PTR(error: ret);
1774 }
1775
1776 return workload;
1777}
1778
1779/**
1780 * intel_vgpu_queue_workload - Queue a vGPU workload
1781 * @workload: the workload to queue in
1782 */
1783void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload)
1784{
1785 list_add_tail(new: &workload->list,
1786 workload_q_head(workload->vgpu, workload->engine));
1787 intel_gvt_kick_schedule(gvt: workload->vgpu->gvt);
1788 wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->engine->id]);
1789}
1790

source code of linux/drivers/gpu/drm/i915/gvt/scheduler.c