1/*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Kevin Tian <kevin.tian@intel.com>
25 * Eddie Dong <eddie.dong@intel.com>
26 * Zhiyuan Lv <zhiyuan.lv@intel.com>
27 *
28 * Contributors:
29 * Min He <min.he@intel.com>
30 * Tina Zhang <tina.zhang@intel.com>
31 * Pei Zhang <pei.zhang@intel.com>
32 * Niu Bing <bing.niu@intel.com>
33 * Ping Gao <ping.a.gao@intel.com>
34 * Zhi Wang <zhi.a.wang@intel.com>
35 *
36
37 */
38
39#include <drm/display/drm_dp.h>
40#include <drm/drm_print.h>
41
42#include "i915_drv.h"
43#include "i915_reg.h"
44#include "display/intel_display_regs.h"
45#include "gvt.h"
46#include "i915_pvinfo.h"
47#include "intel_mchbar_regs.h"
48#include "display/bxt_dpio_phy_regs.h"
49#include "display/i9xx_plane_regs.h"
50#include "display/intel_crt_regs.h"
51#include "display/intel_cursor_regs.h"
52#include "display/intel_display_core.h"
53#include "display/intel_display_types.h"
54#include "display/intel_dmc_regs.h"
55#include "display/intel_dp_aux_regs.h"
56#include "display/intel_dpio_phy.h"
57#include "display/intel_fbc.h"
58#include "display/intel_fdi_regs.h"
59#include "display/intel_pps_regs.h"
60#include "display/intel_psr_regs.h"
61#include "display/intel_sbi_regs.h"
62#include "display/intel_sprite_regs.h"
63#include "display/intel_vga_regs.h"
64#include "display/skl_universal_plane_regs.h"
65#include "display/skl_watermark_regs.h"
66#include "display/vlv_dsi_pll_regs.h"
67#include "gt/intel_gt_regs.h"
68#include <linux/vmalloc.h>
69
70/* XXX FIXME i915 has changed PP_XXX definition */
71#define PCH_PP_STATUS _MMIO(0xc7200)
72#define PCH_PP_CONTROL _MMIO(0xc7204)
73#define PCH_PP_ON_DELAYS _MMIO(0xc7208)
74#define PCH_PP_OFF_DELAYS _MMIO(0xc720c)
75#define PCH_PP_DIVISOR _MMIO(0xc7210)
76
77unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
78{
79 struct drm_i915_private *i915 = gvt->gt->i915;
80
81 if (IS_BROADWELL(i915))
82 return D_BDW;
83 else if (IS_SKYLAKE(i915))
84 return D_SKL;
85 else if (IS_KABYLAKE(i915))
86 return D_KBL;
87 else if (IS_BROXTON(i915))
88 return D_BXT;
89 else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
90 return D_CFL;
91
92 return 0;
93}
94
95static bool intel_gvt_match_device(struct intel_gvt *gvt,
96 unsigned long device)
97{
98 return intel_gvt_get_device_type(gvt) & device;
99}
100
101static void read_vreg(struct intel_vgpu *vgpu, unsigned int offset,
102 void *p_data, unsigned int bytes)
103{
104 memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
105}
106
107static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset,
108 void *p_data, unsigned int bytes)
109{
110 memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
111}
112
113struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
114 unsigned int offset)
115{
116 struct intel_gvt_mmio_info *e;
117
118 hash_for_each_possible(gvt->mmio.mmio_info_table, e, node, offset) {
119 if (e->offset == offset)
120 return e;
121 }
122 return NULL;
123}
124
125static int setup_mmio_info(struct intel_gvt *gvt, u32 offset, u32 size,
126 u16 flags, u32 addr_mask, u32 ro_mask, u32 device,
127 gvt_mmio_func read, gvt_mmio_func write)
128{
129 struct intel_gvt_mmio_info *p;
130 u32 start, end, i;
131
132 if (!intel_gvt_match_device(gvt, device))
133 return 0;
134
135 if (WARN_ON(!IS_ALIGNED(offset, 4)))
136 return -EINVAL;
137
138 start = offset;
139 end = offset + size;
140
141 for (i = start; i < end; i += 4) {
142 p = intel_gvt_find_mmio_info(gvt, offset: i);
143 if (!p) {
144 WARN(1, "assign a handler to a non-tracked mmio %x\n",
145 i);
146 return -ENODEV;
147 }
148 p->ro_mask = ro_mask;
149 gvt->mmio.mmio_attribute[i / 4] = flags;
150 if (read)
151 p->read = read;
152 if (write)
153 p->write = write;
154 }
155 return 0;
156}
157
158/**
159 * intel_gvt_render_mmio_to_engine - convert a mmio offset into the engine
160 * @gvt: a GVT device
161 * @offset: register offset
162 *
163 * Returns:
164 * The engine containing the offset within its mmio page.
165 */
166const struct intel_engine_cs *
167intel_gvt_render_mmio_to_engine(struct intel_gvt *gvt, unsigned int offset)
168{
169 struct intel_engine_cs *engine;
170 enum intel_engine_id id;
171
172 offset &= ~GENMASK(11, 0);
173 for_each_engine(engine, gvt->gt, id)
174 if (engine->mmio_base == offset)
175 return engine;
176
177 return NULL;
178}
179
180#define offset_to_fence_num(offset) \
181 ((offset - i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0))) >> 3)
182
183#define fence_num_to_offset(num) \
184 (num * 8 + i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0)))
185
186
187void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason)
188{
189 switch (reason) {
190 case GVT_FAILSAFE_UNSUPPORTED_GUEST:
191 pr_err("Detected your guest driver doesn't support GVT-g.\n");
192 break;
193 case GVT_FAILSAFE_INSUFFICIENT_RESOURCE:
194 pr_err("Graphics resource is not enough for the guest\n");
195 break;
196 case GVT_FAILSAFE_GUEST_ERR:
197 pr_err("GVT Internal error for the guest\n");
198 break;
199 default:
200 break;
201 }
202 pr_err("Now vgpu %d will enter failsafe mode.\n", vgpu->id);
203 vgpu->failsafe = true;
204}
205
206static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
207 unsigned int fence_num, void *p_data, unsigned int bytes)
208{
209 unsigned int max_fence = vgpu_fence_sz(vgpu);
210
211 if (fence_num >= max_fence) {
212 gvt_vgpu_err("access oob fence reg %d/%d\n",
213 fence_num, max_fence);
214
215 /* When guest access oob fence regs without access
216 * pv_info first, we treat guest not supporting GVT,
217 * and we will let vgpu enter failsafe mode.
218 */
219 if (!vgpu->pv_notified)
220 enter_failsafe_mode(vgpu,
221 reason: GVT_FAILSAFE_UNSUPPORTED_GUEST);
222
223 memset(p_data, 0, bytes);
224 return -EINVAL;
225 }
226 return 0;
227}
228
229static int gamw_echo_dev_rw_ia_write(struct intel_vgpu *vgpu,
230 unsigned int offset, void *p_data, unsigned int bytes)
231{
232 u32 ips = (*(u32 *)p_data) & GAMW_ECO_ENABLE_64K_IPS_FIELD;
233
234 if (GRAPHICS_VER(vgpu->gvt->gt->i915) <= 10) {
235 if (ips == GAMW_ECO_ENABLE_64K_IPS_FIELD)
236 gvt_dbg_core("vgpu%d: ips enabled\n", vgpu->id);
237 else if (!ips)
238 gvt_dbg_core("vgpu%d: ips disabled\n", vgpu->id);
239 else {
240 /* All engines must be enabled together for vGPU,
241 * since we don't know which engine the ppgtt will
242 * bind to when shadowing.
243 */
244 gvt_vgpu_err("Unsupported IPS setting %x, cannot enable 64K gtt.\n",
245 ips);
246 return -EINVAL;
247 }
248 }
249
250 write_vreg(vgpu, offset, p_data, bytes);
251 return 0;
252}
253
254static int fence_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
255 void *p_data, unsigned int bytes)
256{
257 int ret;
258
259 ret = sanitize_fence_mmio_access(vgpu, offset_to_fence_num(off),
260 p_data, bytes);
261 if (ret)
262 return ret;
263 read_vreg(vgpu, offset: off, p_data, bytes);
264 return 0;
265}
266
267static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
268 void *p_data, unsigned int bytes)
269{
270 struct intel_gvt *gvt = vgpu->gvt;
271 unsigned int fence_num = offset_to_fence_num(off);
272 intel_wakeref_t wakeref;
273 int ret;
274
275 ret = sanitize_fence_mmio_access(vgpu, fence_num, p_data, bytes);
276 if (ret)
277 return ret;
278 write_vreg(vgpu, offset: off, p_data, bytes);
279
280 wakeref = mmio_hw_access_pre(gt: gvt->gt);
281 intel_vgpu_write_fence(vgpu, fence: fence_num,
282 vgpu_vreg64(vgpu, fence_num_to_offset(fence_num)));
283 mmio_hw_access_post(gt: gvt->gt, wakeref);
284 return 0;
285}
286
287#define CALC_MODE_MASK_REG(old, new) \
288 (((new) & GENMASK(31, 16)) \
289 | ((((old) & GENMASK(15, 0)) & ~((new) >> 16)) \
290 | ((new) & ((new) >> 16))))
291
292static int mul_force_wake_write(struct intel_vgpu *vgpu,
293 unsigned int offset, void *p_data, unsigned int bytes)
294{
295 u32 old, new;
296 u32 ack_reg_offset;
297
298 old = vgpu_vreg(vgpu, offset);
299 new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
300
301 if (GRAPHICS_VER(vgpu->gvt->gt->i915) >= 9) {
302 switch (offset) {
303 case FORCEWAKE_RENDER_GEN9_REG:
304 ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
305 break;
306 case FORCEWAKE_GT_GEN9_REG:
307 ack_reg_offset = FORCEWAKE_ACK_GT_GEN9_REG;
308 break;
309 case FORCEWAKE_MEDIA_GEN9_REG:
310 ack_reg_offset = FORCEWAKE_ACK_MEDIA_GEN9_REG;
311 break;
312 default:
313 /*should not hit here*/
314 gvt_vgpu_err("invalid forcewake offset 0x%x\n", offset);
315 return -EINVAL;
316 }
317 } else {
318 ack_reg_offset = FORCEWAKE_ACK_HSW_REG;
319 }
320
321 vgpu_vreg(vgpu, offset) = new;
322 vgpu_vreg(vgpu, ack_reg_offset) = (new & GENMASK(15, 0));
323 return 0;
324}
325
326static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
327 void *p_data, unsigned int bytes)
328{
329 intel_engine_mask_t engine_mask = 0;
330 u32 data;
331
332 write_vreg(vgpu, offset, p_data, bytes);
333 data = vgpu_vreg(vgpu, offset);
334
335 if (data & GEN6_GRDOM_FULL) {
336 gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id);
337 engine_mask = ALL_ENGINES;
338 } else {
339 if (data & GEN6_GRDOM_RENDER) {
340 gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
341 engine_mask |= BIT(RCS0);
342 }
343 if (data & GEN6_GRDOM_MEDIA) {
344 gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
345 engine_mask |= BIT(VCS0);
346 }
347 if (data & GEN6_GRDOM_BLT) {
348 gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
349 engine_mask |= BIT(BCS0);
350 }
351 if (data & GEN6_GRDOM_VECS) {
352 gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
353 engine_mask |= BIT(VECS0);
354 }
355 if (data & GEN8_GRDOM_MEDIA2) {
356 gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
357 engine_mask |= BIT(VCS1);
358 }
359 if (data & GEN9_GRDOM_GUC) {
360 gvt_dbg_mmio("vgpu%d: request GUC Reset\n", vgpu->id);
361 vgpu_vreg_t(vgpu, GUC_STATUS) |= GS_MIA_IN_RESET;
362 }
363 engine_mask &= vgpu->gvt->gt->info.engine_mask;
364 }
365
366 /* vgpu_lock already hold by emulate mmio r/w */
367 intel_gvt_reset_vgpu_locked(vgpu, dmlr: false, engine_mask);
368
369 /* sw will wait for the device to ack the reset request */
370 vgpu_vreg(vgpu, offset) = 0;
371
372 return 0;
373}
374
375static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
376 void *p_data, unsigned int bytes)
377{
378 return intel_gvt_i2c_handle_gmbus_read(vgpu, offset, p_data, bytes);
379}
380
381static int gmbus_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
382 void *p_data, unsigned int bytes)
383{
384 return intel_gvt_i2c_handle_gmbus_write(vgpu, offset, p_data, bytes);
385}
386
387static int pch_pp_control_mmio_write(struct intel_vgpu *vgpu,
388 unsigned int offset, void *p_data, unsigned int bytes)
389{
390 write_vreg(vgpu, offset, p_data, bytes);
391
392 if (vgpu_vreg(vgpu, offset) & PANEL_POWER_ON) {
393 vgpu_vreg_t(vgpu, PCH_PP_STATUS) |= PP_ON;
394 vgpu_vreg_t(vgpu, PCH_PP_STATUS) |= PP_SEQUENCE_STATE_ON_IDLE;
395 vgpu_vreg_t(vgpu, PCH_PP_STATUS) &= ~PP_SEQUENCE_POWER_DOWN;
396 vgpu_vreg_t(vgpu, PCH_PP_STATUS) &= ~PP_CYCLE_DELAY_ACTIVE;
397
398 } else
399 vgpu_vreg_t(vgpu, PCH_PP_STATUS) &=
400 ~(PP_ON | PP_SEQUENCE_POWER_DOWN
401 | PP_CYCLE_DELAY_ACTIVE);
402 return 0;
403}
404
405static int transconf_mmio_write(struct intel_vgpu *vgpu,
406 unsigned int offset, void *p_data, unsigned int bytes)
407{
408 write_vreg(vgpu, offset, p_data, bytes);
409
410 if (vgpu_vreg(vgpu, offset) & TRANS_ENABLE)
411 vgpu_vreg(vgpu, offset) |= TRANS_STATE_ENABLE;
412 else
413 vgpu_vreg(vgpu, offset) &= ~TRANS_STATE_ENABLE;
414 return 0;
415}
416
417static int lcpll_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
418 void *p_data, unsigned int bytes)
419{
420 write_vreg(vgpu, offset, p_data, bytes);
421
422 if (vgpu_vreg(vgpu, offset) & LCPLL_PLL_DISABLE)
423 vgpu_vreg(vgpu, offset) &= ~LCPLL_PLL_LOCK;
424 else
425 vgpu_vreg(vgpu, offset) |= LCPLL_PLL_LOCK;
426
427 if (vgpu_vreg(vgpu, offset) & LCPLL_CD_SOURCE_FCLK)
428 vgpu_vreg(vgpu, offset) |= LCPLL_CD_SOURCE_FCLK_DONE;
429 else
430 vgpu_vreg(vgpu, offset) &= ~LCPLL_CD_SOURCE_FCLK_DONE;
431
432 return 0;
433}
434
435static int dpy_reg_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
436 void *p_data, unsigned int bytes)
437{
438 switch (offset) {
439 case 0xe651c:
440 case 0xe661c:
441 case 0xe671c:
442 case 0xe681c:
443 vgpu_vreg(vgpu, offset) = 1 << 17;
444 break;
445 case 0xe6c04:
446 vgpu_vreg(vgpu, offset) = 0x3;
447 break;
448 case 0xe6e1c:
449 vgpu_vreg(vgpu, offset) = 0x2f << 16;
450 break;
451 default:
452 return -EINVAL;
453 }
454
455 read_vreg(vgpu, offset, p_data, bytes);
456 return 0;
457}
458
459/*
460 * Only PIPE_A is enabled in current vGPU display and PIPE_A is tied to
461 * TRANSCODER_A in HW. DDI/PORT could be PORT_x depends on
462 * setup_virtual_dp_monitor().
463 * emulate_monitor_status_change() set up PLL for PORT_x as the initial enabled
464 * DPLL. Later guest driver may setup a different DPLLx when setting mode.
465 * So the correct sequence to find DP stream clock is:
466 * Check TRANS_DDI_FUNC_CTL on TRANSCODER_A to get PORT_x.
467 * Check correct PLLx for PORT_x to get PLL frequency and DP bitrate.
468 * Then Refresh rate then can be calculated based on follow equations:
469 * Pixel clock = h_total * v_total * refresh_rate
470 * stream clock = Pixel clock
471 * ls_clk = DP bitrate
472 * Link M/N = strm_clk / ls_clk
473 */
474
475static u32 bdw_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
476{
477 u32 dp_br = 0;
478 u32 ddi_pll_sel = vgpu_vreg_t(vgpu, PORT_CLK_SEL(port));
479
480 switch (ddi_pll_sel) {
481 case PORT_CLK_SEL_LCPLL_2700:
482 dp_br = 270000 * 2;
483 break;
484 case PORT_CLK_SEL_LCPLL_1350:
485 dp_br = 135000 * 2;
486 break;
487 case PORT_CLK_SEL_LCPLL_810:
488 dp_br = 81000 * 2;
489 break;
490 case PORT_CLK_SEL_SPLL:
491 {
492 switch (vgpu_vreg_t(vgpu, SPLL_CTL) & SPLL_FREQ_MASK) {
493 case SPLL_FREQ_810MHz:
494 dp_br = 81000 * 2;
495 break;
496 case SPLL_FREQ_1350MHz:
497 dp_br = 135000 * 2;
498 break;
499 case SPLL_FREQ_2700MHz:
500 dp_br = 270000 * 2;
501 break;
502 default:
503 gvt_dbg_dpy("vgpu-%d PORT_%c can't get freq from SPLL 0x%08x\n",
504 vgpu->id, port_name(port), vgpu_vreg_t(vgpu, SPLL_CTL));
505 break;
506 }
507 break;
508 }
509 case PORT_CLK_SEL_WRPLL1:
510 case PORT_CLK_SEL_WRPLL2:
511 {
512 u32 wrpll_ctl;
513 int refclk, n, p, r;
514
515 if (ddi_pll_sel == PORT_CLK_SEL_WRPLL1)
516 wrpll_ctl = vgpu_vreg_t(vgpu, WRPLL_CTL(DPLL_ID_WRPLL1));
517 else
518 wrpll_ctl = vgpu_vreg_t(vgpu, WRPLL_CTL(DPLL_ID_WRPLL2));
519
520 switch (wrpll_ctl & WRPLL_REF_MASK) {
521 case WRPLL_REF_PCH_SSC:
522 refclk = 135000;
523 break;
524 case WRPLL_REF_LCPLL:
525 refclk = 2700000;
526 break;
527 default:
528 gvt_dbg_dpy("vgpu-%d PORT_%c WRPLL can't get refclk 0x%08x\n",
529 vgpu->id, port_name(port), wrpll_ctl);
530 goto out;
531 }
532
533 r = wrpll_ctl & WRPLL_DIVIDER_REF_MASK;
534 p = (wrpll_ctl & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
535 n = (wrpll_ctl & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
536
537 dp_br = (refclk * n / 10) / (p * r) * 2;
538 break;
539 }
540 default:
541 gvt_dbg_dpy("vgpu-%d PORT_%c has invalid clock select 0x%08x\n",
542 vgpu->id, port_name(port), vgpu_vreg_t(vgpu, PORT_CLK_SEL(port)));
543 break;
544 }
545
546out:
547 return dp_br;
548}
549
550static u32 bxt_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
551{
552 u32 dp_br = 0;
553 int refclk = 100000;
554 enum dpio_phy phy = DPIO_PHY0;
555 enum dpio_channel ch = DPIO_CH0;
556 struct dpll clock = {};
557 u32 temp;
558
559 /* Port to PHY mapping is fixed, see bxt_ddi_phy_info{} */
560 switch (port) {
561 case PORT_A:
562 phy = DPIO_PHY1;
563 ch = DPIO_CH0;
564 break;
565 case PORT_B:
566 phy = DPIO_PHY0;
567 ch = DPIO_CH0;
568 break;
569 case PORT_C:
570 phy = DPIO_PHY0;
571 ch = DPIO_CH1;
572 break;
573 default:
574 gvt_dbg_dpy("vgpu-%d no PHY for PORT_%c\n", vgpu->id, port_name(port));
575 goto out;
576 }
577
578 temp = vgpu_vreg_t(vgpu, BXT_PORT_PLL_ENABLE(port));
579 if (!(temp & PORT_PLL_ENABLE) || !(temp & PORT_PLL_LOCK)) {
580 gvt_dbg_dpy("vgpu-%d PORT_%c PLL_ENABLE 0x%08x isn't enabled or locked\n",
581 vgpu->id, port_name(port), temp);
582 goto out;
583 }
584
585 clock.m1 = 2;
586 clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK,
587 vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 0))) << 22;
588 if (vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 3)) & PORT_PLL_M2_FRAC_ENABLE)
589 clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK,
590 vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 2)));
591 clock.n = REG_FIELD_GET(PORT_PLL_N_MASK,
592 vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 1)));
593 clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK,
594 vgpu_vreg_t(vgpu, BXT_PORT_PLL_EBB_0(phy, ch)));
595 clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK,
596 vgpu_vreg_t(vgpu, BXT_PORT_PLL_EBB_0(phy, ch)));
597 clock.m = clock.m1 * clock.m2;
598 clock.p = clock.p1 * clock.p2 * 5;
599
600 if (clock.n == 0 || clock.p == 0) {
601 gvt_dbg_dpy("vgpu-%d PORT_%c PLL has invalid divider\n", vgpu->id, port_name(port));
602 goto out;
603 }
604
605 clock.vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock.m), clock.n << 22);
606 clock.dot = DIV_ROUND_CLOSEST(clock.vco, clock.p);
607
608 dp_br = clock.dot;
609
610out:
611 return dp_br;
612}
613
614static u32 skl_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
615{
616 u32 dp_br = 0;
617 enum intel_dpll_id dpll_id = DPLL_ID_SKL_DPLL0;
618
619 /* Find the enabled DPLL for the DDI/PORT */
620 if (!(vgpu_vreg_t(vgpu, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_OFF(port)) &&
621 (vgpu_vreg_t(vgpu, DPLL_CTRL2) & DPLL_CTRL2_DDI_SEL_OVERRIDE(port))) {
622 dpll_id += (vgpu_vreg_t(vgpu, DPLL_CTRL2) &
623 DPLL_CTRL2_DDI_CLK_SEL_MASK(port)) >>
624 DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port);
625 } else {
626 gvt_dbg_dpy("vgpu-%d DPLL for PORT_%c isn't turned on\n",
627 vgpu->id, port_name(port));
628 return dp_br;
629 }
630
631 /* Find PLL output frequency from correct DPLL, and get bir rate */
632 switch ((vgpu_vreg_t(vgpu, DPLL_CTRL1) &
633 DPLL_CTRL1_LINK_RATE_MASK(dpll_id)) >>
634 DPLL_CTRL1_LINK_RATE_SHIFT(dpll_id)) {
635 case DPLL_CTRL1_LINK_RATE_810:
636 dp_br = 81000 * 2;
637 break;
638 case DPLL_CTRL1_LINK_RATE_1080:
639 dp_br = 108000 * 2;
640 break;
641 case DPLL_CTRL1_LINK_RATE_1350:
642 dp_br = 135000 * 2;
643 break;
644 case DPLL_CTRL1_LINK_RATE_1620:
645 dp_br = 162000 * 2;
646 break;
647 case DPLL_CTRL1_LINK_RATE_2160:
648 dp_br = 216000 * 2;
649 break;
650 case DPLL_CTRL1_LINK_RATE_2700:
651 dp_br = 270000 * 2;
652 break;
653 default:
654 dp_br = 0;
655 gvt_dbg_dpy("vgpu-%d PORT_%c fail to get DPLL-%d freq\n",
656 vgpu->id, port_name(port), dpll_id);
657 }
658
659 return dp_br;
660}
661
662static void vgpu_update_refresh_rate(struct intel_vgpu *vgpu)
663{
664 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
665 struct intel_display *display = dev_priv->display;
666 enum port port;
667 u32 dp_br, link_m, link_n, htotal, vtotal;
668
669 /* Find DDI/PORT assigned to TRANSCODER_A, expect B or D */
670 port = (vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) &
671 TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
672 if (port != PORT_B && port != PORT_D) {
673 gvt_dbg_dpy("vgpu-%d unsupported PORT_%c\n", vgpu->id, port_name(port));
674 return;
675 }
676
677 /* Calculate DP bitrate from PLL */
678 if (IS_BROADWELL(dev_priv))
679 dp_br = bdw_vgpu_get_dp_bitrate(vgpu, port);
680 else if (IS_BROXTON(dev_priv))
681 dp_br = bxt_vgpu_get_dp_bitrate(vgpu, port);
682 else
683 dp_br = skl_vgpu_get_dp_bitrate(vgpu, port);
684
685 /* Get DP link symbol clock M/N */
686 link_m = vgpu_vreg_t(vgpu, PIPE_LINK_M1(display, TRANSCODER_A));
687 link_n = vgpu_vreg_t(vgpu, PIPE_LINK_N1(display, TRANSCODER_A));
688
689 /* Get H/V total from transcoder timing */
690 htotal = (vgpu_vreg_t(vgpu, TRANS_HTOTAL(display, TRANSCODER_A)) >> TRANS_HTOTAL_SHIFT);
691 vtotal = (vgpu_vreg_t(vgpu, TRANS_VTOTAL(display, TRANSCODER_A)) >> TRANS_VTOTAL_SHIFT);
692
693 if (dp_br && link_n && htotal && vtotal) {
694 u64 pixel_clk = 0;
695 u32 new_rate = 0;
696 u32 *old_rate = &(intel_vgpu_port(vgpu, vgpu->display.port_num)->vrefresh_k);
697
698 /* Calculate pixel clock by (ls_clk * M / N) */
699 pixel_clk = div_u64(dividend: mul_u32_u32(a: link_m, b: dp_br), divisor: link_n);
700 pixel_clk *= MSEC_PER_SEC;
701
702 /* Calculate refresh rate by (pixel_clk / (h_total * v_total)) */
703 new_rate = DIV64_U64_ROUND_CLOSEST(mul_u64_u32_shr(pixel_clk, MSEC_PER_SEC, 0), mul_u32_u32(htotal + 1, vtotal + 1));
704
705 if (*old_rate != new_rate)
706 *old_rate = new_rate;
707
708 gvt_dbg_dpy("vgpu-%d PIPE_%c refresh rate updated to %d\n",
709 vgpu->id, pipe_name(PIPE_A), new_rate);
710 }
711}
712
713static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
714 void *p_data, unsigned int bytes)
715{
716 u32 data;
717
718 write_vreg(vgpu, offset, p_data, bytes);
719 data = vgpu_vreg(vgpu, offset);
720
721 if (data & TRANSCONF_ENABLE) {
722 vgpu_vreg(vgpu, offset) |= TRANSCONF_STATE_ENABLE;
723 vgpu_update_refresh_rate(vgpu);
724 vgpu_update_vblank_emulation(vgpu, turnon: true);
725 } else {
726 vgpu_vreg(vgpu, offset) &= ~TRANSCONF_STATE_ENABLE;
727 vgpu_update_vblank_emulation(vgpu, turnon: false);
728 }
729 return 0;
730}
731
732/* sorted in ascending order */
733static i915_reg_t force_nonpriv_white_list[] = {
734 _MMIO(0xd80),
735 GEN9_CS_DEBUG_MODE1, //_MMIO(0x20ec)
736 GEN9_CTX_PREEMPT_REG,//_MMIO(0x2248)
737 CL_PRIMITIVES_COUNT, //_MMIO(0x2340)
738 PS_INVOCATION_COUNT, //_MMIO(0x2348)
739 PS_DEPTH_COUNT, //_MMIO(0x2350)
740 GEN8_CS_CHICKEN1,//_MMIO(0x2580)
741 _MMIO(0x2690),
742 _MMIO(0x2694),
743 _MMIO(0x2698),
744 _MMIO(0x2754),
745 _MMIO(0x28a0),
746 _MMIO(0x4de0),
747 _MMIO(0x4de4),
748 _MMIO(0x4dfc),
749 GEN7_COMMON_SLICE_CHICKEN1,//_MMIO(0x7010)
750 _MMIO(0x7014),
751 HDC_CHICKEN0,//_MMIO(0x7300)
752 GEN8_HDC_CHICKEN1,//_MMIO(0x7304)
753 _MMIO(0x7700),
754 _MMIO(0x7704),
755 _MMIO(0x7708),
756 _MMIO(0x770c),
757 _MMIO(0x83a8),
758 _MMIO(0xb110),
759 _MMIO(0xb118),
760 _MMIO(0xe100),
761 _MMIO(0xe18c),
762 _MMIO(0xe48c),
763 _MMIO(0xe5f4),
764 _MMIO(0x64844),
765};
766
767/* a simple bsearch */
768static inline bool in_whitelist(u32 reg)
769{
770 int left = 0, right = ARRAY_SIZE(force_nonpriv_white_list);
771 i915_reg_t *array = force_nonpriv_white_list;
772
773 while (left < right) {
774 int mid = (left + right)/2;
775
776 if (reg > array[mid].reg)
777 left = mid + 1;
778 else if (reg < array[mid].reg)
779 right = mid;
780 else
781 return true;
782 }
783 return false;
784}
785
786static int force_nonpriv_write(struct intel_vgpu *vgpu,
787 unsigned int offset, void *p_data, unsigned int bytes)
788{
789 u32 reg_nonpriv = (*(u32 *)p_data) & REG_GENMASK(25, 2);
790 const struct intel_engine_cs *engine =
791 intel_gvt_render_mmio_to_engine(gvt: vgpu->gvt, offset);
792
793 if (bytes != 4 || !IS_ALIGNED(offset, bytes) || !engine) {
794 gvt_err("vgpu(%d) Invalid FORCE_NONPRIV offset %x(%dB)\n",
795 vgpu->id, offset, bytes);
796 return -EINVAL;
797 }
798
799 if (!in_whitelist(reg: reg_nonpriv) &&
800 reg_nonpriv != i915_mmio_reg_offset(RING_NOPID(engine->mmio_base))) {
801 gvt_err("vgpu(%d) Invalid FORCE_NONPRIV write %x at offset %x\n",
802 vgpu->id, reg_nonpriv, offset);
803 } else
804 intel_vgpu_default_mmio_write(vgpu, offset, p_data, bytes);
805
806 return 0;
807}
808
809static int ddi_buf_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
810 void *p_data, unsigned int bytes)
811{
812 write_vreg(vgpu, offset, p_data, bytes);
813
814 if (vgpu_vreg(vgpu, offset) & DDI_BUF_CTL_ENABLE) {
815 vgpu_vreg(vgpu, offset) &= ~DDI_BUF_IS_IDLE;
816 } else {
817 vgpu_vreg(vgpu, offset) |= DDI_BUF_IS_IDLE;
818 if (offset == i915_mmio_reg_offset(DDI_BUF_CTL(PORT_E)))
819 vgpu_vreg_t(vgpu, DP_TP_STATUS(PORT_E))
820 &= ~DP_TP_STATUS_AUTOTRAIN_DONE;
821 }
822 return 0;
823}
824
825static int fdi_rx_iir_mmio_write(struct intel_vgpu *vgpu,
826 unsigned int offset, void *p_data, unsigned int bytes)
827{
828 vgpu_vreg(vgpu, offset) &= ~*(u32 *)p_data;
829 return 0;
830}
831
832#define FDI_LINK_TRAIN_PATTERN1 0
833#define FDI_LINK_TRAIN_PATTERN2 1
834
835static int fdi_auto_training_started(struct intel_vgpu *vgpu)
836{
837 u32 ddi_buf_ctl = vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_E));
838 u32 rx_ctl = vgpu_vreg(vgpu, _FDI_RXA_CTL);
839 u32 tx_ctl = vgpu_vreg_t(vgpu, DP_TP_CTL(PORT_E));
840
841 if ((ddi_buf_ctl & DDI_BUF_CTL_ENABLE) &&
842 (rx_ctl & FDI_RX_ENABLE) &&
843 (rx_ctl & FDI_AUTO_TRAINING) &&
844 (tx_ctl & DP_TP_CTL_ENABLE) &&
845 (tx_ctl & DP_TP_CTL_FDI_AUTOTRAIN))
846 return 1;
847 else
848 return 0;
849}
850
851static int check_fdi_rx_train_status(struct intel_vgpu *vgpu,
852 enum pipe pipe, unsigned int train_pattern)
853{
854 i915_reg_t fdi_rx_imr, fdi_tx_ctl, fdi_rx_ctl;
855 unsigned int fdi_rx_check_bits, fdi_tx_check_bits;
856 unsigned int fdi_rx_train_bits, fdi_tx_train_bits;
857 unsigned int fdi_iir_check_bits;
858
859 fdi_rx_imr = FDI_RX_IMR(pipe);
860 fdi_tx_ctl = FDI_TX_CTL(pipe);
861 fdi_rx_ctl = FDI_RX_CTL(pipe);
862
863 if (train_pattern == FDI_LINK_TRAIN_PATTERN1) {
864 fdi_rx_train_bits = FDI_LINK_TRAIN_PATTERN_1_CPT;
865 fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_1;
866 fdi_iir_check_bits = FDI_RX_BIT_LOCK;
867 } else if (train_pattern == FDI_LINK_TRAIN_PATTERN2) {
868 fdi_rx_train_bits = FDI_LINK_TRAIN_PATTERN_2_CPT;
869 fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_2;
870 fdi_iir_check_bits = FDI_RX_SYMBOL_LOCK;
871 } else {
872 gvt_vgpu_err("Invalid train pattern %d\n", train_pattern);
873 return -EINVAL;
874 }
875
876 fdi_rx_check_bits = FDI_RX_ENABLE | fdi_rx_train_bits;
877 fdi_tx_check_bits = FDI_TX_ENABLE | fdi_tx_train_bits;
878
879 /* If imr bit has been masked */
880 if (vgpu_vreg_t(vgpu, fdi_rx_imr) & fdi_iir_check_bits)
881 return 0;
882
883 if (((vgpu_vreg_t(vgpu, fdi_tx_ctl) & fdi_tx_check_bits)
884 == fdi_tx_check_bits)
885 && ((vgpu_vreg_t(vgpu, fdi_rx_ctl) & fdi_rx_check_bits)
886 == fdi_rx_check_bits))
887 return 1;
888 else
889 return 0;
890}
891
892#define INVALID_INDEX (~0U)
893
894static unsigned int calc_index(unsigned int offset, i915_reg_t _start,
895 i915_reg_t _next, i915_reg_t _end)
896{
897 u32 start = i915_mmio_reg_offset(_start);
898 u32 next = i915_mmio_reg_offset(_next);
899 u32 end = i915_mmio_reg_offset(_end);
900 u32 stride = next - start;
901
902 if (offset < start || offset > end)
903 return INVALID_INDEX;
904 offset -= start;
905 return offset / stride;
906}
907
908#define FDI_RX_CTL_TO_PIPE(offset) \
909 calc_index(offset, FDI_RX_CTL(PIPE_A), FDI_RX_CTL(PIPE_B), FDI_RX_CTL(PIPE_C))
910
911#define FDI_TX_CTL_TO_PIPE(offset) \
912 calc_index(offset, FDI_TX_CTL(PIPE_A), FDI_TX_CTL(PIPE_B), FDI_TX_CTL(PIPE_C))
913
914#define FDI_RX_IMR_TO_PIPE(offset) \
915 calc_index(offset, FDI_RX_IMR(PIPE_A), FDI_RX_IMR(PIPE_B), FDI_RX_IMR(PIPE_C))
916
917static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu,
918 unsigned int offset, void *p_data, unsigned int bytes)
919{
920 i915_reg_t fdi_rx_iir;
921 unsigned int index;
922 int ret;
923
924 if (FDI_RX_CTL_TO_PIPE(offset) != INVALID_INDEX)
925 index = FDI_RX_CTL_TO_PIPE(offset);
926 else if (FDI_TX_CTL_TO_PIPE(offset) != INVALID_INDEX)
927 index = FDI_TX_CTL_TO_PIPE(offset);
928 else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX)
929 index = FDI_RX_IMR_TO_PIPE(offset);
930 else {
931 gvt_vgpu_err("Unsupported registers %x\n", offset);
932 return -EINVAL;
933 }
934
935 write_vreg(vgpu, offset, p_data, bytes);
936
937 fdi_rx_iir = FDI_RX_IIR(index);
938
939 ret = check_fdi_rx_train_status(vgpu, pipe: index, FDI_LINK_TRAIN_PATTERN1);
940 if (ret < 0)
941 return ret;
942 if (ret)
943 vgpu_vreg_t(vgpu, fdi_rx_iir) |= FDI_RX_BIT_LOCK;
944
945 ret = check_fdi_rx_train_status(vgpu, pipe: index, FDI_LINK_TRAIN_PATTERN2);
946 if (ret < 0)
947 return ret;
948 if (ret)
949 vgpu_vreg_t(vgpu, fdi_rx_iir) |= FDI_RX_SYMBOL_LOCK;
950
951 if (offset == _FDI_RXA_CTL)
952 if (fdi_auto_training_started(vgpu))
953 vgpu_vreg_t(vgpu, DP_TP_STATUS(PORT_E)) |=
954 DP_TP_STATUS_AUTOTRAIN_DONE;
955 return 0;
956}
957
958#define DP_TP_CTL_TO_PORT(offset) \
959 calc_index(offset, DP_TP_CTL(PORT_A), DP_TP_CTL(PORT_B), DP_TP_CTL(PORT_E))
960
961static int dp_tp_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
962 void *p_data, unsigned int bytes)
963{
964 i915_reg_t status_reg;
965 unsigned int index;
966 u32 data;
967
968 write_vreg(vgpu, offset, p_data, bytes);
969
970 index = DP_TP_CTL_TO_PORT(offset);
971 data = (vgpu_vreg(vgpu, offset) & GENMASK(10, 8)) >> 8;
972 if (data == 0x2) {
973 status_reg = DP_TP_STATUS(index);
974 vgpu_vreg_t(vgpu, status_reg) |= (1 << 25);
975 }
976 return 0;
977}
978
979static int dp_tp_status_mmio_write(struct intel_vgpu *vgpu,
980 unsigned int offset, void *p_data, unsigned int bytes)
981{
982 u32 reg_val;
983 u32 sticky_mask;
984
985 reg_val = *((u32 *)p_data);
986 sticky_mask = GENMASK(27, 26) | (1 << 24);
987
988 vgpu_vreg(vgpu, offset) = (reg_val & ~sticky_mask) |
989 (vgpu_vreg(vgpu, offset) & sticky_mask);
990 vgpu_vreg(vgpu, offset) &= ~(reg_val & sticky_mask);
991 return 0;
992}
993
994static int pch_adpa_mmio_write(struct intel_vgpu *vgpu,
995 unsigned int offset, void *p_data, unsigned int bytes)
996{
997 u32 data;
998
999 write_vreg(vgpu, offset, p_data, bytes);
1000 data = vgpu_vreg(vgpu, offset);
1001
1002 if (data & ADPA_CRT_HOTPLUG_FORCE_TRIGGER)
1003 vgpu_vreg(vgpu, offset) &= ~ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
1004 return 0;
1005}
1006
1007static int south_chicken2_mmio_write(struct intel_vgpu *vgpu,
1008 unsigned int offset, void *p_data, unsigned int bytes)
1009{
1010 u32 data;
1011
1012 write_vreg(vgpu, offset, p_data, bytes);
1013 data = vgpu_vreg(vgpu, offset);
1014
1015 if (data & FDI_MPHY_IOSFSB_RESET_CTL)
1016 vgpu_vreg(vgpu, offset) |= FDI_MPHY_IOSFSB_RESET_STATUS;
1017 else
1018 vgpu_vreg(vgpu, offset) &= ~FDI_MPHY_IOSFSB_RESET_STATUS;
1019 return 0;
1020}
1021
1022#define DSPSURF_TO_PIPE(display, offset) \
1023 calc_index(offset, DSPSURF(display, PIPE_A), DSPSURF(display, PIPE_B), DSPSURF(display, PIPE_C))
1024
1025static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1026 void *p_data, unsigned int bytes)
1027{
1028 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
1029 struct intel_display *display = dev_priv->display;
1030 u32 pipe = DSPSURF_TO_PIPE(display, offset);
1031 int event = SKL_FLIP_EVENT(pipe, PLANE_PRIMARY);
1032
1033 write_vreg(vgpu, offset, p_data, bytes);
1034 vgpu_vreg_t(vgpu, DSPSURFLIVE(display, pipe)) = vgpu_vreg(vgpu, offset);
1035
1036 vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(display, pipe))++;
1037
1038 if (vgpu_vreg_t(vgpu, DSPCNTR(display, pipe)) & PLANE_CTL_ASYNC_FLIP)
1039 intel_vgpu_trigger_virtual_event(vgpu, event);
1040 else
1041 set_bit(nr: event, addr: vgpu->irq.flip_done_event[pipe]);
1042
1043 return 0;
1044}
1045
1046#define SPRSURF_TO_PIPE(offset) \
1047 calc_index(offset, SPRSURF(PIPE_A), SPRSURF(PIPE_B), SPRSURF(PIPE_C))
1048
1049static int spr_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1050 void *p_data, unsigned int bytes)
1051{
1052 u32 pipe = SPRSURF_TO_PIPE(offset);
1053 int event = SKL_FLIP_EVENT(pipe, PLANE_SPRITE0);
1054
1055 write_vreg(vgpu, offset, p_data, bytes);
1056 vgpu_vreg_t(vgpu, SPRSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
1057
1058 if (vgpu_vreg_t(vgpu, SPRCTL(pipe)) & PLANE_CTL_ASYNC_FLIP)
1059 intel_vgpu_trigger_virtual_event(vgpu, event);
1060 else
1061 set_bit(nr: event, addr: vgpu->irq.flip_done_event[pipe]);
1062
1063 return 0;
1064}
1065
1066static int reg50080_mmio_write(struct intel_vgpu *vgpu,
1067 unsigned int offset, void *p_data,
1068 unsigned int bytes)
1069{
1070 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
1071 struct intel_display *display = dev_priv->display;
1072 enum pipe pipe = REG_50080_TO_PIPE(offset);
1073 enum plane_id plane = REG_50080_TO_PLANE(offset);
1074 int event = SKL_FLIP_EVENT(pipe, plane);
1075
1076 write_vreg(vgpu, offset, p_data, bytes);
1077 if (plane == PLANE_PRIMARY) {
1078 vgpu_vreg_t(vgpu, DSPSURFLIVE(display, pipe)) = vgpu_vreg(vgpu, offset);
1079 vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(display, pipe))++;
1080 } else {
1081 vgpu_vreg_t(vgpu, SPRSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
1082 }
1083
1084 if ((vgpu_vreg(vgpu, offset) & REG50080_FLIP_TYPE_MASK) == REG50080_FLIP_TYPE_ASYNC)
1085 intel_vgpu_trigger_virtual_event(vgpu, event);
1086 else
1087 set_bit(nr: event, addr: vgpu->irq.flip_done_event[pipe]);
1088
1089 return 0;
1090}
1091
1092static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu,
1093 unsigned int reg)
1094{
1095 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
1096 enum intel_gvt_event_type event;
1097
1098 if (reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_A)))
1099 event = AUX_CHANNEL_A;
1100 else if (reg == i915_mmio_reg_offset(PCH_DP_AUX_CH_CTL(AUX_CH_B)) ||
1101 reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_B)))
1102 event = AUX_CHANNEL_B;
1103 else if (reg == i915_mmio_reg_offset(PCH_DP_AUX_CH_CTL(AUX_CH_C)) ||
1104 reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_C)))
1105 event = AUX_CHANNEL_C;
1106 else if (reg == i915_mmio_reg_offset(PCH_DP_AUX_CH_CTL(AUX_CH_D)) ||
1107 reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_D)))
1108 event = AUX_CHANNEL_D;
1109 else {
1110 drm_WARN_ON(&dev_priv->drm, true);
1111 return -EINVAL;
1112 }
1113
1114 intel_vgpu_trigger_virtual_event(vgpu, event);
1115 return 0;
1116}
1117
1118static int dp_aux_ch_ctl_trans_done(struct intel_vgpu *vgpu, u32 value,
1119 unsigned int reg, int len, bool data_valid)
1120{
1121 /* mark transaction done */
1122 value |= DP_AUX_CH_CTL_DONE;
1123 value &= ~DP_AUX_CH_CTL_SEND_BUSY;
1124 value &= ~DP_AUX_CH_CTL_RECEIVE_ERROR;
1125
1126 if (data_valid)
1127 value &= ~DP_AUX_CH_CTL_TIME_OUT_ERROR;
1128 else
1129 value |= DP_AUX_CH_CTL_TIME_OUT_ERROR;
1130
1131 /* message size */
1132 value &= ~(0xf << 20);
1133 value |= (len << 20);
1134 vgpu_vreg(vgpu, reg) = value;
1135
1136 if (value & DP_AUX_CH_CTL_INTERRUPT)
1137 return trigger_aux_channel_interrupt(vgpu, reg);
1138 return 0;
1139}
1140
1141static void dp_aux_ch_ctl_link_training(struct intel_vgpu_dpcd_data *dpcd,
1142 u8 t)
1143{
1144 if ((t & DP_TRAINING_PATTERN_MASK) == DP_TRAINING_PATTERN_1) {
1145 /* training pattern 1 for CR */
1146 /* set LANE0_CR_DONE, LANE1_CR_DONE */
1147 dpcd->data[DP_LANE0_1_STATUS] |= DP_LANE_CR_DONE |
1148 DP_LANE_CR_DONE << 4;
1149 /* set LANE2_CR_DONE, LANE3_CR_DONE */
1150 dpcd->data[DP_LANE2_3_STATUS] |= DP_LANE_CR_DONE |
1151 DP_LANE_CR_DONE << 4;
1152 } else if ((t & DP_TRAINING_PATTERN_MASK) ==
1153 DP_TRAINING_PATTERN_2) {
1154 /* training pattern 2 for EQ */
1155 /* Set CHANNEL_EQ_DONE and SYMBOL_LOCKED for Lane0_1 */
1156 dpcd->data[DP_LANE0_1_STATUS] |= DP_LANE_CHANNEL_EQ_DONE |
1157 DP_LANE_CHANNEL_EQ_DONE << 4;
1158 dpcd->data[DP_LANE0_1_STATUS] |= DP_LANE_SYMBOL_LOCKED |
1159 DP_LANE_SYMBOL_LOCKED << 4;
1160 /* Set CHANNEL_EQ_DONE and SYMBOL_LOCKED for Lane2_3 */
1161 dpcd->data[DP_LANE2_3_STATUS] |= DP_LANE_CHANNEL_EQ_DONE |
1162 DP_LANE_CHANNEL_EQ_DONE << 4;
1163 dpcd->data[DP_LANE2_3_STATUS] |= DP_LANE_SYMBOL_LOCKED |
1164 DP_LANE_SYMBOL_LOCKED << 4;
1165 /* set INTERLANE_ALIGN_DONE */
1166 dpcd->data[DP_LANE_ALIGN_STATUS_UPDATED] |=
1167 DP_INTERLANE_ALIGN_DONE;
1168 } else if ((t & DP_TRAINING_PATTERN_MASK) ==
1169 DP_TRAINING_PATTERN_DISABLE) {
1170 /* finish link training */
1171 /* set sink status as synchronized */
1172 dpcd->data[DP_SINK_STATUS] = DP_RECEIVE_PORT_0_STATUS |
1173 DP_RECEIVE_PORT_1_STATUS;
1174 }
1175}
1176
1177#define OFFSET_TO_DP_AUX_PORT(offset) (((offset) & 0xF00) >> 8)
1178
1179#define dpy_is_valid_port(port) \
1180 (((port) >= PORT_A) && ((port) < I915_MAX_PORTS))
1181
1182static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
1183 unsigned int offset, void *p_data, unsigned int bytes)
1184{
1185 struct intel_vgpu_display *display = &vgpu->display;
1186 int msg, addr, ctrl, op, len;
1187 int port_index = OFFSET_TO_DP_AUX_PORT(offset);
1188 struct intel_vgpu_dpcd_data *dpcd = NULL;
1189 struct intel_vgpu_port *port = NULL;
1190 u32 data;
1191
1192 if (!dpy_is_valid_port(port_index)) {
1193 gvt_vgpu_err("Unsupported DP port access!\n");
1194 return 0;
1195 }
1196
1197 write_vreg(vgpu, offset, p_data, bytes);
1198 data = vgpu_vreg(vgpu, offset);
1199
1200 if (GRAPHICS_VER(vgpu->gvt->gt->i915) >= 9 &&
1201 offset != i915_mmio_reg_offset(DP_AUX_CH_CTL(port_index))) {
1202 /* SKL DPB/C/D aux ctl register changed */
1203 return 0;
1204 } else if (IS_BROADWELL(vgpu->gvt->gt->i915) &&
1205 offset != i915_mmio_reg_offset(port_index ?
1206 PCH_DP_AUX_CH_CTL(port_index) :
1207 DP_AUX_CH_CTL(port_index))) {
1208 /* write to the data registers */
1209 return 0;
1210 }
1211
1212 if (!(data & DP_AUX_CH_CTL_SEND_BUSY)) {
1213 /* just want to clear the sticky bits */
1214 vgpu_vreg(vgpu, offset) = 0;
1215 return 0;
1216 }
1217
1218 port = &display->ports[port_index];
1219 dpcd = port->dpcd;
1220
1221 /* read out message from DATA1 register */
1222 msg = vgpu_vreg(vgpu, offset + 4);
1223 addr = (msg >> 8) & 0xffff;
1224 ctrl = (msg >> 24) & 0xff;
1225 len = msg & 0xff;
1226 op = ctrl >> 4;
1227
1228 if (op == DP_AUX_NATIVE_WRITE) {
1229 int t;
1230 u8 buf[16];
1231
1232 if ((addr + len + 1) >= DPCD_SIZE) {
1233 /*
1234 * Write request exceeds what we supported,
1235 * DCPD spec: When a Source Device is writing a DPCD
1236 * address not supported by the Sink Device, the Sink
1237 * Device shall reply with AUX NACK and “M” equal to
1238 * zero.
1239 */
1240
1241 /* NAK the write */
1242 vgpu_vreg(vgpu, offset + 4) = AUX_NATIVE_REPLY_NAK;
1243 dp_aux_ch_ctl_trans_done(vgpu, value: data, reg: offset, len: 2, data_valid: true);
1244 return 0;
1245 }
1246
1247 /*
1248 * Write request format: Headr (command + address + size) occupies
1249 * 4 bytes, followed by (len + 1) bytes of data. See details at
1250 * intel_dp_aux_transfer().
1251 */
1252 if ((len + 1 + 4) > AUX_BURST_SIZE) {
1253 gvt_vgpu_err("dp_aux_header: len %d is too large\n", len);
1254 return -EINVAL;
1255 }
1256
1257 /* unpack data from vreg to buf */
1258 for (t = 0; t < 4; t++) {
1259 u32 r = vgpu_vreg(vgpu, offset + 8 + t * 4);
1260
1261 buf[t * 4] = (r >> 24) & 0xff;
1262 buf[t * 4 + 1] = (r >> 16) & 0xff;
1263 buf[t * 4 + 2] = (r >> 8) & 0xff;
1264 buf[t * 4 + 3] = r & 0xff;
1265 }
1266
1267 /* write to virtual DPCD */
1268 if (dpcd && dpcd->data_valid) {
1269 for (t = 0; t <= len; t++) {
1270 int p = addr + t;
1271
1272 dpcd->data[p] = buf[t];
1273 /* check for link training */
1274 if (p == DP_TRAINING_PATTERN_SET)
1275 dp_aux_ch_ctl_link_training(dpcd,
1276 t: buf[t]);
1277 }
1278 }
1279
1280 /* ACK the write */
1281 vgpu_vreg(vgpu, offset + 4) = 0;
1282 dp_aux_ch_ctl_trans_done(vgpu, value: data, reg: offset, len: 1,
1283 data_valid: dpcd && dpcd->data_valid);
1284 return 0;
1285 }
1286
1287 if (op == DP_AUX_NATIVE_READ) {
1288 int idx, i, ret = 0;
1289
1290 if ((addr + len + 1) >= DPCD_SIZE) {
1291 /*
1292 * read request exceeds what we supported
1293 * DPCD spec: A Sink Device receiving a Native AUX CH
1294 * read request for an unsupported DPCD address must
1295 * reply with an AUX ACK and read data set equal to
1296 * zero instead of replying with AUX NACK.
1297 */
1298
1299 /* ACK the READ*/
1300 vgpu_vreg(vgpu, offset + 4) = 0;
1301 vgpu_vreg(vgpu, offset + 8) = 0;
1302 vgpu_vreg(vgpu, offset + 12) = 0;
1303 vgpu_vreg(vgpu, offset + 16) = 0;
1304 vgpu_vreg(vgpu, offset + 20) = 0;
1305
1306 dp_aux_ch_ctl_trans_done(vgpu, value: data, reg: offset, len: len + 2,
1307 data_valid: true);
1308 return 0;
1309 }
1310
1311 for (idx = 1; idx <= 5; idx++) {
1312 /* clear the data registers */
1313 vgpu_vreg(vgpu, offset + 4 * idx) = 0;
1314 }
1315
1316 /*
1317 * Read reply format: ACK (1 byte) plus (len + 1) bytes of data.
1318 */
1319 if ((len + 2) > AUX_BURST_SIZE) {
1320 gvt_vgpu_err("dp_aux_header: len %d is too large\n", len);
1321 return -EINVAL;
1322 }
1323
1324 /* read from virtual DPCD to vreg */
1325 /* first 4 bytes: [ACK][addr][addr+1][addr+2] */
1326 if (dpcd && dpcd->data_valid) {
1327 for (i = 1; i <= (len + 1); i++) {
1328 int t;
1329
1330 t = dpcd->data[addr + i - 1];
1331 t <<= (24 - 8 * (i % 4));
1332 ret |= t;
1333
1334 if ((i % 4 == 3) || (i == (len + 1))) {
1335 vgpu_vreg(vgpu, offset +
1336 (i / 4 + 1) * 4) = ret;
1337 ret = 0;
1338 }
1339 }
1340 }
1341 dp_aux_ch_ctl_trans_done(vgpu, value: data, reg: offset, len: len + 2,
1342 data_valid: dpcd && dpcd->data_valid);
1343 return 0;
1344 }
1345
1346 /* i2c transaction starts */
1347 intel_gvt_i2c_handle_aux_ch_write(vgpu, port_idx: port_index, offset, p_data);
1348
1349 if (data & DP_AUX_CH_CTL_INTERRUPT)
1350 trigger_aux_channel_interrupt(vgpu, reg: offset);
1351 return 0;
1352}
1353
1354static int mbctl_write(struct intel_vgpu *vgpu, unsigned int offset,
1355 void *p_data, unsigned int bytes)
1356{
1357 *(u32 *)p_data &= (~GEN6_MBCTL_ENABLE_BOOT_FETCH);
1358 write_vreg(vgpu, offset, p_data, bytes);
1359 return 0;
1360}
1361
1362static int vga_control_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1363 void *p_data, unsigned int bytes)
1364{
1365 bool vga_disable;
1366
1367 write_vreg(vgpu, offset, p_data, bytes);
1368 vga_disable = vgpu_vreg(vgpu, offset) & VGA_DISP_DISABLE;
1369
1370 gvt_dbg_core("vgpu%d: %s VGA mode\n", vgpu->id,
1371 vga_disable ? "Disable" : "Enable");
1372 return 0;
1373}
1374
1375static u32 read_virtual_sbi_register(struct intel_vgpu *vgpu,
1376 unsigned int sbi_offset)
1377{
1378 struct intel_vgpu_display *display = &vgpu->display;
1379 int num = display->sbi.number;
1380 int i;
1381
1382 for (i = 0; i < num; ++i)
1383 if (display->sbi.registers[i].offset == sbi_offset)
1384 break;
1385
1386 if (i == num)
1387 return 0;
1388
1389 return display->sbi.registers[i].value;
1390}
1391
1392static void write_virtual_sbi_register(struct intel_vgpu *vgpu,
1393 unsigned int offset, u32 value)
1394{
1395 struct intel_vgpu_display *display = &vgpu->display;
1396 int num = display->sbi.number;
1397 int i;
1398
1399 for (i = 0; i < num; ++i) {
1400 if (display->sbi.registers[i].offset == offset)
1401 break;
1402 }
1403
1404 if (i == num) {
1405 if (num == SBI_REG_MAX) {
1406 gvt_vgpu_err("SBI caching meets maximum limits\n");
1407 return;
1408 }
1409 display->sbi.number++;
1410 }
1411
1412 display->sbi.registers[i].offset = offset;
1413 display->sbi.registers[i].value = value;
1414}
1415
1416static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
1417 void *p_data, unsigned int bytes)
1418{
1419 if ((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_CTL_OP_MASK) == SBI_CTL_OP_CRRD) {
1420 unsigned int sbi_offset;
1421
1422 sbi_offset = REG_FIELD_GET(SBI_ADDR_MASK, vgpu_vreg_t(vgpu, SBI_ADDR));
1423
1424 vgpu_vreg(vgpu, offset) = read_virtual_sbi_register(vgpu, sbi_offset);
1425 }
1426 read_vreg(vgpu, offset, p_data, bytes);
1427 return 0;
1428}
1429
1430static int sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1431 void *p_data, unsigned int bytes)
1432{
1433 u32 data;
1434
1435 write_vreg(vgpu, offset, p_data, bytes);
1436 data = vgpu_vreg(vgpu, offset);
1437
1438 data &= ~SBI_STATUS_MASK;
1439 data |= SBI_STATUS_READY;
1440
1441 data &= ~SBI_RESPONSE_MASK;
1442 data |= SBI_RESPONSE_SUCCESS;
1443
1444 vgpu_vreg(vgpu, offset) = data;
1445
1446 if ((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_CTL_OP_MASK) == SBI_CTL_OP_CRWR) {
1447 unsigned int sbi_offset;
1448
1449 sbi_offset = REG_FIELD_GET(SBI_ADDR_MASK, vgpu_vreg_t(vgpu, SBI_ADDR));
1450
1451 write_virtual_sbi_register(vgpu, offset: sbi_offset, vgpu_vreg_t(vgpu, SBI_DATA));
1452 }
1453 return 0;
1454}
1455
1456#define _vgtif_reg(x) \
1457 (VGT_PVINFO_PAGE + offsetof(struct vgt_if, x))
1458
1459static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
1460 void *p_data, unsigned int bytes)
1461{
1462 bool invalid_read = false;
1463
1464 read_vreg(vgpu, offset, p_data, bytes);
1465
1466 switch (offset) {
1467 case _vgtif_reg(magic) ... _vgtif_reg(vgt_id):
1468 if (offset + bytes > _vgtif_reg(vgt_id) + 4)
1469 invalid_read = true;
1470 break;
1471 case _vgtif_reg(avail_rs.mappable_gmadr.base) ...
1472 _vgtif_reg(avail_rs.fence_num):
1473 if (offset + bytes >
1474 _vgtif_reg(avail_rs.fence_num) + 4)
1475 invalid_read = true;
1476 break;
1477 case 0x78010: /* vgt_caps */
1478 case 0x7881c:
1479 break;
1480 default:
1481 invalid_read = true;
1482 break;
1483 }
1484 if (invalid_read)
1485 gvt_vgpu_err("invalid pvinfo read: [%x:%x] = %x\n",
1486 offset, bytes, *(u32 *)p_data);
1487 vgpu->pv_notified = true;
1488 return 0;
1489}
1490
1491static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
1492{
1493 enum intel_gvt_gtt_type root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
1494 struct intel_vgpu_mm *mm;
1495 u64 *pdps;
1496
1497 pdps = (u64 *)&vgpu_vreg64_t(vgpu, vgtif_reg(pdp[0]));
1498
1499 switch (notification) {
1500 case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE:
1501 root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
1502 fallthrough;
1503 case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE:
1504 mm = intel_vgpu_get_ppgtt_mm(vgpu, root_entry_type, pdps);
1505 return PTR_ERR_OR_ZERO(ptr: mm);
1506 case VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY:
1507 case VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY:
1508 return intel_vgpu_put_ppgtt_mm(vgpu, pdps);
1509 case VGT_G2V_EXECLIST_CONTEXT_CREATE:
1510 case VGT_G2V_EXECLIST_CONTEXT_DESTROY:
1511 case 1: /* Remove this in guest driver. */
1512 break;
1513 default:
1514 gvt_vgpu_err("Invalid PV notification %d\n", notification);
1515 }
1516 return 0;
1517}
1518
1519static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready)
1520{
1521 struct kobject *kobj = &vgpu->gvt->gt->i915->drm.primary->kdev->kobj;
1522 char *env[3] = {NULL, NULL, NULL};
1523 char vmid_str[20];
1524 char display_ready_str[20];
1525
1526 snprintf(buf: display_ready_str, size: 20, fmt: "GVT_DISPLAY_READY=%d", ready);
1527 env[0] = display_ready_str;
1528
1529 snprintf(buf: vmid_str, size: 20, fmt: "VMID=%d", vgpu->id);
1530 env[1] = vmid_str;
1531
1532 return kobject_uevent_env(kobj, action: KOBJ_ADD, envp: env);
1533}
1534
1535static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1536 void *p_data, unsigned int bytes)
1537{
1538 u32 data = *(u32 *)p_data;
1539 bool invalid_write = false;
1540
1541 switch (offset) {
1542 case _vgtif_reg(display_ready):
1543 send_display_ready_uevent(vgpu, ready: data ? 1 : 0);
1544 break;
1545 case _vgtif_reg(g2v_notify):
1546 handle_g2v_notification(vgpu, notification: data);
1547 break;
1548 /* add xhot and yhot to handled list to avoid error log */
1549 case _vgtif_reg(cursor_x_hot):
1550 case _vgtif_reg(cursor_y_hot):
1551 case _vgtif_reg(pdp[0].lo):
1552 case _vgtif_reg(pdp[0].hi):
1553 case _vgtif_reg(pdp[1].lo):
1554 case _vgtif_reg(pdp[1].hi):
1555 case _vgtif_reg(pdp[2].lo):
1556 case _vgtif_reg(pdp[2].hi):
1557 case _vgtif_reg(pdp[3].lo):
1558 case _vgtif_reg(pdp[3].hi):
1559 case _vgtif_reg(execlist_context_descriptor_lo):
1560 case _vgtif_reg(execlist_context_descriptor_hi):
1561 break;
1562 case _vgtif_reg(rsv5[0])..._vgtif_reg(rsv5[3]):
1563 invalid_write = true;
1564 enter_failsafe_mode(vgpu, reason: GVT_FAILSAFE_INSUFFICIENT_RESOURCE);
1565 break;
1566 default:
1567 invalid_write = true;
1568 gvt_vgpu_err("invalid pvinfo write offset %x bytes %x data %x\n",
1569 offset, bytes, data);
1570 break;
1571 }
1572
1573 if (!invalid_write)
1574 write_vreg(vgpu, offset, p_data, bytes);
1575
1576 return 0;
1577}
1578
1579static int pf_write(struct intel_vgpu *vgpu,
1580 unsigned int offset, void *p_data, unsigned int bytes)
1581{
1582 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
1583 u32 val = *(u32 *)p_data;
1584
1585 if ((offset == _PS_1A_CTRL || offset == _PS_2A_CTRL ||
1586 offset == _PS_1B_CTRL || offset == _PS_2B_CTRL ||
1587 offset == _PS_1C_CTRL) && (val & PS_BINDING_MASK) != PS_BINDING_PIPE) {
1588 drm_WARN_ONCE(&i915->drm, true,
1589 "VM(%d): guest is trying to scaling a plane\n",
1590 vgpu->id);
1591 return 0;
1592 }
1593
1594 return intel_vgpu_default_mmio_write(vgpu, offset, p_data, bytes);
1595}
1596
1597static int power_well_ctl_mmio_write(struct intel_vgpu *vgpu,
1598 unsigned int offset, void *p_data, unsigned int bytes)
1599{
1600 write_vreg(vgpu, offset, p_data, bytes);
1601
1602 if (vgpu_vreg(vgpu, offset) &
1603 HSW_PWR_WELL_CTL_REQ(HSW_PW_CTL_IDX_GLOBAL))
1604 vgpu_vreg(vgpu, offset) |=
1605 HSW_PWR_WELL_CTL_STATE(HSW_PW_CTL_IDX_GLOBAL);
1606 else
1607 vgpu_vreg(vgpu, offset) &=
1608 ~HSW_PWR_WELL_CTL_STATE(HSW_PW_CTL_IDX_GLOBAL);
1609 return 0;
1610}
1611
1612static int gen9_dbuf_ctl_mmio_write(struct intel_vgpu *vgpu,
1613 unsigned int offset, void *p_data, unsigned int bytes)
1614{
1615 write_vreg(vgpu, offset, p_data, bytes);
1616
1617 if (vgpu_vreg(vgpu, offset) & DBUF_POWER_REQUEST)
1618 vgpu_vreg(vgpu, offset) |= DBUF_POWER_STATE;
1619 else
1620 vgpu_vreg(vgpu, offset) &= ~DBUF_POWER_STATE;
1621
1622 return 0;
1623}
1624
1625static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu,
1626 unsigned int offset, void *p_data, unsigned int bytes)
1627{
1628 write_vreg(vgpu, offset, p_data, bytes);
1629
1630 if (vgpu_vreg(vgpu, offset) & FPGA_DBG_RM_NOCLAIM)
1631 vgpu_vreg(vgpu, offset) &= ~FPGA_DBG_RM_NOCLAIM;
1632 return 0;
1633}
1634
1635static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset,
1636 void *p_data, unsigned int bytes)
1637{
1638 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
1639 u32 mode;
1640
1641 write_vreg(vgpu, offset, p_data, bytes);
1642 mode = vgpu_vreg(vgpu, offset);
1643
1644 if (GFX_MODE_BIT_SET_IN_MASK(mode, START_DMA)) {
1645 drm_WARN_ONCE(&i915->drm, 1,
1646 "VM(%d): iGVT-g doesn't support GuC\n",
1647 vgpu->id);
1648 return 0;
1649 }
1650
1651 return 0;
1652}
1653
1654static int gen9_trtte_write(struct intel_vgpu *vgpu, unsigned int offset,
1655 void *p_data, unsigned int bytes)
1656{
1657 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
1658 u32 trtte = *(u32 *)p_data;
1659
1660 if ((trtte & 1) && (trtte & (1 << 1)) == 0) {
1661 drm_WARN(&i915->drm, 1,
1662 "VM(%d): Use physical address for TRTT!\n",
1663 vgpu->id);
1664 return -EINVAL;
1665 }
1666 write_vreg(vgpu, offset, p_data, bytes);
1667
1668 return 0;
1669}
1670
1671static int gen9_trtt_chicken_write(struct intel_vgpu *vgpu, unsigned int offset,
1672 void *p_data, unsigned int bytes)
1673{
1674 write_vreg(vgpu, offset, p_data, bytes);
1675 return 0;
1676}
1677
1678static int dpll_status_read(struct intel_vgpu *vgpu, unsigned int offset,
1679 void *p_data, unsigned int bytes)
1680{
1681 u32 v = 0;
1682
1683 if (vgpu_vreg(vgpu, 0x46010) & (1 << 31))
1684 v |= (1 << 0);
1685
1686 if (vgpu_vreg(vgpu, 0x46014) & (1 << 31))
1687 v |= (1 << 8);
1688
1689 if (vgpu_vreg(vgpu, 0x46040) & (1 << 31))
1690 v |= (1 << 16);
1691
1692 if (vgpu_vreg(vgpu, 0x46060) & (1 << 31))
1693 v |= (1 << 24);
1694
1695 vgpu_vreg(vgpu, offset) = v;
1696
1697 return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
1698}
1699
1700static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
1701 void *p_data, unsigned int bytes)
1702{
1703 u32 value = *(u32 *)p_data;
1704 u32 cmd = value & 0xff;
1705 u32 *data0 = &vgpu_vreg_t(vgpu, GEN6_PCODE_DATA);
1706
1707 switch (cmd) {
1708 case GEN9_PCODE_READ_MEM_LATENCY:
1709 if (IS_SKYLAKE(vgpu->gvt->gt->i915) ||
1710 IS_KABYLAKE(vgpu->gvt->gt->i915) ||
1711 IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
1712 IS_COMETLAKE(vgpu->gvt->gt->i915)) {
1713 /**
1714 * "Read memory latency" command on gen9.
1715 * Below memory latency values are read
1716 * from skylake platform.
1717 */
1718 if (!*data0)
1719 *data0 = 0x1e1a1100;
1720 else
1721 *data0 = 0x61514b3d;
1722 } else if (IS_BROXTON(vgpu->gvt->gt->i915)) {
1723 /**
1724 * "Read memory latency" command on gen9.
1725 * Below memory latency values are read
1726 * from Broxton MRB.
1727 */
1728 if (!*data0)
1729 *data0 = 0x16080707;
1730 else
1731 *data0 = 0x16161616;
1732 }
1733 break;
1734 case SKL_PCODE_CDCLK_CONTROL:
1735 if (IS_SKYLAKE(vgpu->gvt->gt->i915) ||
1736 IS_KABYLAKE(vgpu->gvt->gt->i915) ||
1737 IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
1738 IS_COMETLAKE(vgpu->gvt->gt->i915))
1739 *data0 = SKL_CDCLK_READY_FOR_CHANGE;
1740 break;
1741 case GEN6_PCODE_READ_RC6VIDS:
1742 *data0 |= 0x1;
1743 break;
1744 }
1745
1746 gvt_dbg_core("VM(%d) write %x to mailbox, return data0 %x\n",
1747 vgpu->id, value, *data0);
1748 /**
1749 * PCODE_READY clear means ready for pcode read/write,
1750 * PCODE_ERROR_MASK clear means no error happened. In GVT-g we
1751 * always emulate as pcode read/write success and ready for access
1752 * anytime, since we don't touch real physical registers here.
1753 */
1754 value &= ~(GEN6_PCODE_READY | GEN6_PCODE_ERROR_MASK);
1755 return intel_vgpu_default_mmio_write(vgpu, offset, p_data: &value, bytes);
1756}
1757
1758static int hws_pga_write(struct intel_vgpu *vgpu, unsigned int offset,
1759 void *p_data, unsigned int bytes)
1760{
1761 u32 value = *(u32 *)p_data;
1762 const struct intel_engine_cs *engine =
1763 intel_gvt_render_mmio_to_engine(gvt: vgpu->gvt, offset);
1764
1765 if (value != 0 &&
1766 !intel_gvt_ggtt_validate_range(vgpu, addr: value, I915_GTT_PAGE_SIZE)) {
1767 gvt_vgpu_err("write invalid HWSP address, reg:0x%x, value:0x%x\n",
1768 offset, value);
1769 return -EINVAL;
1770 }
1771
1772 /*
1773 * Need to emulate all the HWSP register write to ensure host can
1774 * update the VM CSB status correctly. Here listed registers can
1775 * support BDW, SKL or other platforms with same HWSP registers.
1776 */
1777 if (unlikely(!engine)) {
1778 gvt_vgpu_err("access unknown hardware status page register:0x%x\n",
1779 offset);
1780 return -EINVAL;
1781 }
1782 vgpu->hws_pga[engine->id] = value;
1783 gvt_dbg_mmio("VM(%d) write: 0x%x to HWSP: 0x%x\n",
1784 vgpu->id, value, offset);
1785
1786 return intel_vgpu_default_mmio_write(vgpu, offset, p_data: &value, bytes);
1787}
1788
1789static int skl_power_well_ctl_write(struct intel_vgpu *vgpu,
1790 unsigned int offset, void *p_data, unsigned int bytes)
1791{
1792 u32 v = *(u32 *)p_data;
1793
1794 if (IS_BROXTON(vgpu->gvt->gt->i915))
1795 v &= (1 << 31) | (1 << 29);
1796 else
1797 v &= (1 << 31) | (1 << 29) | (1 << 9) |
1798 (1 << 7) | (1 << 5) | (1 << 3) | (1 << 1);
1799 v |= (v >> 1);
1800
1801 return intel_vgpu_default_mmio_write(vgpu, offset, p_data: &v, bytes);
1802}
1803
1804static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
1805 void *p_data, unsigned int bytes)
1806{
1807 u32 v = *(u32 *)p_data;
1808
1809 /* other bits are MBZ. */
1810 v &= (1 << 31) | (1 << 30);
1811 v & (1 << 31) ? (v |= (1 << 30)) : (v &= ~(1 << 30));
1812
1813 vgpu_vreg(vgpu, offset) = v;
1814
1815 return 0;
1816}
1817
1818static int bxt_de_pll_enable_write(struct intel_vgpu *vgpu,
1819 unsigned int offset, void *p_data, unsigned int bytes)
1820{
1821 u32 v = *(u32 *)p_data;
1822
1823 if (v & BXT_DE_PLL_PLL_ENABLE)
1824 v |= BXT_DE_PLL_LOCK;
1825
1826 vgpu_vreg(vgpu, offset) = v;
1827
1828 return 0;
1829}
1830
1831static int bxt_port_pll_enable_write(struct intel_vgpu *vgpu,
1832 unsigned int offset, void *p_data, unsigned int bytes)
1833{
1834 u32 v = *(u32 *)p_data;
1835
1836 if (v & PORT_PLL_ENABLE)
1837 v |= PORT_PLL_LOCK;
1838
1839 vgpu_vreg(vgpu, offset) = v;
1840
1841 return 0;
1842}
1843
1844static int bxt_phy_ctl_family_write(struct intel_vgpu *vgpu,
1845 unsigned int offset, void *p_data, unsigned int bytes)
1846{
1847 u32 v = *(u32 *)p_data;
1848 u32 data = v & COMMON_RESET_DIS ? BXT_PHY_LANE_ENABLED : 0;
1849
1850 switch (offset) {
1851 case _PHY_CTL_FAMILY_EDP:
1852 vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data;
1853 break;
1854 case _PHY_CTL_FAMILY_DDI:
1855 vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data;
1856 vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data;
1857 break;
1858 }
1859
1860 vgpu_vreg(vgpu, offset) = v;
1861
1862 return 0;
1863}
1864
1865static int bxt_port_tx_dw3_read(struct intel_vgpu *vgpu,
1866 unsigned int offset, void *p_data, unsigned int bytes)
1867{
1868 u32 v = vgpu_vreg(vgpu, offset);
1869
1870 v &= ~UNIQUE_TRANGE_EN_METHOD;
1871
1872 vgpu_vreg(vgpu, offset) = v;
1873
1874 return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
1875}
1876
1877static int bxt_pcs_dw12_grp_write(struct intel_vgpu *vgpu,
1878 unsigned int offset, void *p_data, unsigned int bytes)
1879{
1880 u32 v = *(u32 *)p_data;
1881
1882 if (offset == _PORT_PCS_DW12_GRP_A || offset == _PORT_PCS_DW12_GRP_B) {
1883 vgpu_vreg(vgpu, offset - 0x600) = v;
1884 vgpu_vreg(vgpu, offset - 0x800) = v;
1885 } else {
1886 vgpu_vreg(vgpu, offset - 0x400) = v;
1887 vgpu_vreg(vgpu, offset - 0x600) = v;
1888 }
1889
1890 vgpu_vreg(vgpu, offset) = v;
1891
1892 return 0;
1893}
1894
1895static int bxt_gt_disp_pwron_write(struct intel_vgpu *vgpu,
1896 unsigned int offset, void *p_data, unsigned int bytes)
1897{
1898 u32 v = *(u32 *)p_data;
1899
1900 if (v & BIT(0)) {
1901 vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
1902 ~PHY_RESERVED;
1903 vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) |=
1904 PHY_POWER_GOOD;
1905 }
1906
1907 if (v & BIT(1)) {
1908 vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &=
1909 ~PHY_RESERVED;
1910 vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) |=
1911 PHY_POWER_GOOD;
1912 }
1913
1914
1915 vgpu_vreg(vgpu, offset) = v;
1916
1917 return 0;
1918}
1919
1920static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
1921 unsigned int offset, void *p_data, unsigned int bytes)
1922{
1923 vgpu_vreg(vgpu, offset) = 0;
1924 return 0;
1925}
1926
1927/*
1928 * FixMe:
1929 * If guest fills non-priv batch buffer on ApolloLake/Broxton as Mesa i965 did:
1930 * 717e7539124d (i965: Use a WC map and memcpy for the batch instead of pwrite.)
1931 * Due to the missing flush of bb filled by VM vCPU, host GPU hangs on executing
1932 * these MI_BATCH_BUFFER.
1933 * Temporarily workaround this by setting SNOOP bit for PAT3 used by PPGTT
1934 * PML4 PTE: PAT(0) PCD(1) PWT(1).
1935 * The performance is still expected to be low, will need further improvement.
1936 */
1937static int bxt_ppat_low_write(struct intel_vgpu *vgpu, unsigned int offset,
1938 void *p_data, unsigned int bytes)
1939{
1940 u64 pat =
1941 GEN8_PPAT(0, CHV_PPAT_SNOOP) |
1942 GEN8_PPAT(1, 0) |
1943 GEN8_PPAT(2, 0) |
1944 GEN8_PPAT(3, CHV_PPAT_SNOOP) |
1945 GEN8_PPAT(4, CHV_PPAT_SNOOP) |
1946 GEN8_PPAT(5, CHV_PPAT_SNOOP) |
1947 GEN8_PPAT(6, CHV_PPAT_SNOOP) |
1948 GEN8_PPAT(7, CHV_PPAT_SNOOP);
1949
1950 vgpu_vreg(vgpu, offset) = lower_32_bits(pat);
1951
1952 return 0;
1953}
1954
1955static int guc_status_read(struct intel_vgpu *vgpu,
1956 unsigned int offset, void *p_data,
1957 unsigned int bytes)
1958{
1959 /* keep MIA_IN_RESET before clearing */
1960 read_vreg(vgpu, offset, p_data, bytes);
1961 vgpu_vreg(vgpu, offset) &= ~GS_MIA_IN_RESET;
1962 return 0;
1963}
1964
1965static int mmio_read_from_hw(struct intel_vgpu *vgpu,
1966 unsigned int offset, void *p_data, unsigned int bytes)
1967{
1968 struct intel_gvt *gvt = vgpu->gvt;
1969 const struct intel_engine_cs *engine =
1970 intel_gvt_render_mmio_to_engine(gvt, offset);
1971
1972 /**
1973 * Read HW reg in following case
1974 * a. the offset isn't a ring mmio
1975 * b. the offset's ring is running on hw.
1976 * c. the offset is ring time stamp mmio
1977 */
1978
1979 if (!engine ||
1980 vgpu == gvt->scheduler.engine_owner[engine->id] ||
1981 offset == i915_mmio_reg_offset(RING_TIMESTAMP(engine->mmio_base)) ||
1982 offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(engine->mmio_base))) {
1983 intel_wakeref_t wakeref;
1984
1985 wakeref = mmio_hw_access_pre(gt: gvt->gt);
1986 vgpu_vreg(vgpu, offset) =
1987 intel_uncore_read(uncore: gvt->gt->uncore, _MMIO(offset));
1988 mmio_hw_access_post(gt: gvt->gt, wakeref);
1989 }
1990
1991 return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
1992}
1993
1994static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1995 void *p_data, unsigned int bytes)
1996{
1997 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
1998 const struct intel_engine_cs *engine = intel_gvt_render_mmio_to_engine(gvt: vgpu->gvt, offset);
1999 struct intel_vgpu_execlist *execlist;
2000 u32 data = *(u32 *)p_data;
2001 int ret = 0;
2002
2003 if (drm_WARN_ON(&i915->drm, !engine))
2004 return -EINVAL;
2005
2006 /*
2007 * Due to d3_entered is used to indicate skipping PPGTT invalidation on
2008 * vGPU reset, it's set on D0->D3 on PCI config write, and cleared after
2009 * vGPU reset if in resuming.
2010 * In S0ix exit, the device power state also transite from D3 to D0 as
2011 * S3 resume, but no vGPU reset (triggered by QEMU device model). After
2012 * S0ix exit, all engines continue to work. However the d3_entered
2013 * remains set which will break next vGPU reset logic (miss the expected
2014 * PPGTT invalidation).
2015 * Engines can only work in D0. Thus the 1st elsp write gives GVT a
2016 * chance to clear d3_entered.
2017 */
2018 if (vgpu->d3_entered)
2019 vgpu->d3_entered = false;
2020
2021 execlist = &vgpu->submission.execlist[engine->id];
2022
2023 execlist->elsp_dwords.data[3 - execlist->elsp_dwords.index] = data;
2024 if (execlist->elsp_dwords.index == 3) {
2025 ret = intel_vgpu_submit_execlist(vgpu, engine);
2026 if(ret)
2027 gvt_vgpu_err("fail submit workload on ring %s\n",
2028 engine->name);
2029 }
2030
2031 ++execlist->elsp_dwords.index;
2032 execlist->elsp_dwords.index &= 0x3;
2033 return ret;
2034}
2035
2036static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
2037 void *p_data, unsigned int bytes)
2038{
2039 u32 data = *(u32 *)p_data;
2040 const struct intel_engine_cs *engine =
2041 intel_gvt_render_mmio_to_engine(gvt: vgpu->gvt, offset);
2042 bool enable_execlist;
2043 int ret;
2044
2045 (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(1);
2046 if (IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
2047 IS_COMETLAKE(vgpu->gvt->gt->i915))
2048 (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(2);
2049 write_vreg(vgpu, offset, p_data, bytes);
2050
2051 if (IS_MASKED_BITS_ENABLED(data, 1)) {
2052 enter_failsafe_mode(vgpu, reason: GVT_FAILSAFE_UNSUPPORTED_GUEST);
2053 return 0;
2054 }
2055
2056 if ((IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
2057 IS_COMETLAKE(vgpu->gvt->gt->i915)) &&
2058 IS_MASKED_BITS_ENABLED(data, 2)) {
2059 enter_failsafe_mode(vgpu, reason: GVT_FAILSAFE_UNSUPPORTED_GUEST);
2060 return 0;
2061 }
2062
2063 /* when PPGTT mode enabled, we will check if guest has called
2064 * pvinfo, if not, we will treat this guest as non-gvtg-aware
2065 * guest, and stop emulating its cfg space, mmio, gtt, etc.
2066 */
2067 if ((IS_MASKED_BITS_ENABLED(data, GFX_PPGTT_ENABLE) ||
2068 IS_MASKED_BITS_ENABLED(data, GFX_RUN_LIST_ENABLE)) &&
2069 !vgpu->pv_notified) {
2070 enter_failsafe_mode(vgpu, reason: GVT_FAILSAFE_UNSUPPORTED_GUEST);
2071 return 0;
2072 }
2073 if (IS_MASKED_BITS_ENABLED(data, GFX_RUN_LIST_ENABLE) ||
2074 IS_MASKED_BITS_DISABLED(data, GFX_RUN_LIST_ENABLE)) {
2075 enable_execlist = !!(data & GFX_RUN_LIST_ENABLE);
2076
2077 gvt_dbg_core("EXECLIST %s on ring %s\n",
2078 (enable_execlist ? "enabling" : "disabling"),
2079 engine->name);
2080
2081 if (!enable_execlist)
2082 return 0;
2083
2084 ret = intel_vgpu_select_submission_ops(vgpu,
2085 engine_mask: engine->mask,
2086 interface: INTEL_VGPU_EXECLIST_SUBMISSION);
2087 if (ret)
2088 return ret;
2089
2090 intel_vgpu_start_schedule(vgpu);
2091 }
2092 return 0;
2093}
2094
2095static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
2096 unsigned int offset, void *p_data, unsigned int bytes)
2097{
2098 unsigned int id = 0;
2099
2100 write_vreg(vgpu, offset, p_data, bytes);
2101 vgpu_vreg(vgpu, offset) = 0;
2102
2103 switch (offset) {
2104 case 0x4260:
2105 id = RCS0;
2106 break;
2107 case 0x4264:
2108 id = VCS0;
2109 break;
2110 case 0x4268:
2111 id = VCS1;
2112 break;
2113 case 0x426c:
2114 id = BCS0;
2115 break;
2116 case 0x4270:
2117 id = VECS0;
2118 break;
2119 default:
2120 return -EINVAL;
2121 }
2122 set_bit(nr: id, addr: (void *)vgpu->submission.tlb_handle_pending);
2123
2124 return 0;
2125}
2126
2127static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
2128 unsigned int offset, void *p_data, unsigned int bytes)
2129{
2130 u32 data;
2131
2132 write_vreg(vgpu, offset, p_data, bytes);
2133 data = vgpu_vreg(vgpu, offset);
2134
2135 if (IS_MASKED_BITS_ENABLED(data, RESET_CTL_REQUEST_RESET))
2136 data |= RESET_CTL_READY_TO_RESET;
2137 else if (data & _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET))
2138 data &= ~RESET_CTL_READY_TO_RESET;
2139
2140 vgpu_vreg(vgpu, offset) = data;
2141 return 0;
2142}
2143
2144static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
2145 unsigned int offset, void *p_data,
2146 unsigned int bytes)
2147{
2148 u32 data = *(u32 *)p_data;
2149
2150 (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(0x18);
2151 write_vreg(vgpu, offset, p_data, bytes);
2152
2153 if (IS_MASKED_BITS_ENABLED(data, 0x10) ||
2154 IS_MASKED_BITS_ENABLED(data, 0x8))
2155 enter_failsafe_mode(vgpu, reason: GVT_FAILSAFE_UNSUPPORTED_GUEST);
2156
2157 return 0;
2158}
2159
2160#define MMIO_F(reg, s, f, am, rm, d, r, w) do { \
2161 ret = setup_mmio_info(gvt, i915_mmio_reg_offset(reg), \
2162 s, f, am, rm, d, r, w); \
2163 if (ret) \
2164 return ret; \
2165} while (0)
2166
2167#define MMIO_DH(reg, d, r, w) \
2168 MMIO_F(reg, 4, 0, 0, 0, d, r, w)
2169
2170#define MMIO_DFH(reg, d, f, r, w) \
2171 MMIO_F(reg, 4, f, 0, 0, d, r, w)
2172
2173#define MMIO_GM(reg, d, r, w) \
2174 MMIO_F(reg, 4, F_GMADR, 0xFFFFF000, 0, d, r, w)
2175
2176#define MMIO_GM_RDR(reg, d, r, w) \
2177 MMIO_F(reg, 4, F_GMADR | F_CMD_ACCESS, 0xFFFFF000, 0, d, r, w)
2178
2179#define MMIO_RO(reg, d, f, rm, r, w) \
2180 MMIO_F(reg, 4, F_RO | f, 0, rm, d, r, w)
2181
2182#define MMIO_RING_F(prefix, s, f, am, rm, d, r, w) do { \
2183 MMIO_F(prefix(RENDER_RING_BASE), s, f, am, rm, d, r, w); \
2184 MMIO_F(prefix(BLT_RING_BASE), s, f, am, rm, d, r, w); \
2185 MMIO_F(prefix(GEN6_BSD_RING_BASE), s, f, am, rm, d, r, w); \
2186 MMIO_F(prefix(VEBOX_RING_BASE), s, f, am, rm, d, r, w); \
2187 if (HAS_ENGINE(gvt->gt, VCS1)) \
2188 MMIO_F(prefix(GEN8_BSD2_RING_BASE), s, f, am, rm, d, r, w); \
2189} while (0)
2190
2191#define MMIO_RING_DFH(prefix, d, f, r, w) \
2192 MMIO_RING_F(prefix, 4, f, 0, 0, d, r, w)
2193
2194#define MMIO_RING_GM(prefix, d, r, w) \
2195 MMIO_RING_F(prefix, 4, F_GMADR, 0xFFFF0000, 0, d, r, w)
2196
2197#define MMIO_RING_GM_RDR(prefix, d, r, w) \
2198 MMIO_RING_F(prefix, 4, F_GMADR | F_CMD_ACCESS, 0xFFFF0000, 0, d, r, w)
2199
2200#define MMIO_RING_RO(prefix, d, f, rm, r, w) \
2201 MMIO_RING_F(prefix, 4, F_RO | f, 0, rm, d, r, w)
2202
2203static int init_generic_mmio_info(struct intel_gvt *gvt)
2204{
2205 struct drm_i915_private *dev_priv = gvt->gt->i915;
2206 struct intel_display *display = dev_priv->display;
2207 int ret;
2208
2209 MMIO_RING_DFH(RING_IMR, D_ALL, 0, NULL,
2210 intel_vgpu_reg_imr_handler);
2211
2212 MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler);
2213 MMIO_DFH(SDEIER, D_ALL, 0, NULL, intel_vgpu_reg_ier_handler);
2214 MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler);
2215
2216 MMIO_RING_DFH(RING_HWSTAM, D_ALL, 0, NULL, NULL);
2217
2218
2219 MMIO_DH(GEN8_GAMW_ECO_DEV_RW_IA, D_BDW_PLUS, NULL,
2220 gamw_echo_dev_rw_ia_write);
2221
2222 MMIO_GM_RDR(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL);
2223 MMIO_GM_RDR(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL);
2224 MMIO_GM_RDR(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL);
2225
2226#define RING_REG(base) _MMIO((base) + 0x28)
2227 MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
2228#undef RING_REG
2229
2230#define RING_REG(base) _MMIO((base) + 0x134)
2231 MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
2232#undef RING_REG
2233
2234#define RING_REG(base) _MMIO((base) + 0x6c)
2235 MMIO_RING_DFH(RING_REG, D_ALL, 0, mmio_read_from_hw, NULL);
2236#undef RING_REG
2237 MMIO_DH(GEN7_SC_INSTDONE, D_BDW_PLUS, mmio_read_from_hw, NULL);
2238
2239 MMIO_GM_RDR(_MMIO(0x2148), D_ALL, NULL, NULL);
2240 MMIO_GM_RDR(CCID(RENDER_RING_BASE), D_ALL, NULL, NULL);
2241 MMIO_GM_RDR(_MMIO(0x12198), D_ALL, NULL, NULL);
2242
2243 MMIO_RING_DFH(RING_TAIL, D_ALL, 0, NULL, NULL);
2244 MMIO_RING_DFH(RING_HEAD, D_ALL, 0, NULL, NULL);
2245 MMIO_RING_DFH(RING_CTL, D_ALL, 0, NULL, NULL);
2246 MMIO_RING_DFH(RING_ACTHD, D_ALL, 0, mmio_read_from_hw, NULL);
2247 MMIO_RING_GM(RING_START, D_ALL, NULL, NULL);
2248
2249 /* RING MODE */
2250#define RING_REG(base) _MMIO((base) + 0x29c)
2251 MMIO_RING_DFH(RING_REG, D_ALL,
2252 F_MODE_MASK | F_CMD_ACCESS | F_CMD_WRITE_PATCH, NULL,
2253 ring_mode_mmio_write);
2254#undef RING_REG
2255
2256 MMIO_RING_DFH(RING_MI_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
2257 NULL, NULL);
2258 MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
2259 NULL, NULL);
2260 MMIO_RING_DFH(RING_TIMESTAMP, D_ALL, F_CMD_ACCESS,
2261 mmio_read_from_hw, NULL);
2262 MMIO_RING_DFH(RING_TIMESTAMP_UDW, D_ALL, F_CMD_ACCESS,
2263 mmio_read_from_hw, NULL);
2264
2265 MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2266 MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
2267 NULL, NULL);
2268 MMIO_DFH(CACHE_MODE_1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2269 MMIO_DFH(CACHE_MODE_0, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2270 MMIO_DFH(_MMIO(0x2124), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2271
2272 MMIO_DFH(_MMIO(0x20dc), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2273 MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2274 MMIO_DFH(_MMIO(0x2088), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2275 MMIO_DFH(FF_SLICE_CS_CHICKEN2, D_ALL,
2276 F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2277 MMIO_DFH(_MMIO(0x2470), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2278 MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
2279 MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
2280 NULL, NULL);
2281 MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
2282 NULL, NULL);
2283 MMIO_DFH(_MMIO(0x9030), D_ALL, F_CMD_ACCESS, NULL, NULL);
2284 MMIO_DFH(_MMIO(0x20a0), D_ALL, F_CMD_ACCESS, NULL, NULL);
2285 MMIO_DFH(_MMIO(0x2420), D_ALL, F_CMD_ACCESS, NULL, NULL);
2286 MMIO_DFH(_MMIO(0x2430), D_ALL, F_CMD_ACCESS, NULL, NULL);
2287 MMIO_DFH(_MMIO(0x2434), D_ALL, F_CMD_ACCESS, NULL, NULL);
2288 MMIO_DFH(_MMIO(0x2438), D_ALL, F_CMD_ACCESS, NULL, NULL);
2289 MMIO_DFH(_MMIO(0x243c), D_ALL, F_CMD_ACCESS, NULL, NULL);
2290 MMIO_DFH(_MMIO(0x7018), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2291 MMIO_DFH(HSW_HALF_SLICE_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2292 MMIO_DFH(GEN7_HALF_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2293
2294 /* display */
2295 MMIO_DH(TRANSCONF(display, TRANSCODER_A), D_ALL, NULL,
2296 pipeconf_mmio_write);
2297 MMIO_DH(TRANSCONF(display, TRANSCODER_B), D_ALL, NULL,
2298 pipeconf_mmio_write);
2299 MMIO_DH(TRANSCONF(display, TRANSCODER_C), D_ALL, NULL,
2300 pipeconf_mmio_write);
2301 MMIO_DH(TRANSCONF(display, TRANSCODER_EDP), D_ALL, NULL,
2302 pipeconf_mmio_write);
2303 MMIO_DH(DSPSURF(display, PIPE_A), D_ALL, NULL, pri_surf_mmio_write);
2304 MMIO_DH(REG_50080(PIPE_A, PLANE_PRIMARY), D_ALL, NULL,
2305 reg50080_mmio_write);
2306 MMIO_DH(DSPSURF(display, PIPE_B), D_ALL, NULL, pri_surf_mmio_write);
2307 MMIO_DH(REG_50080(PIPE_B, PLANE_PRIMARY), D_ALL, NULL,
2308 reg50080_mmio_write);
2309 MMIO_DH(DSPSURF(display, PIPE_C), D_ALL, NULL, pri_surf_mmio_write);
2310 MMIO_DH(REG_50080(PIPE_C, PLANE_PRIMARY), D_ALL, NULL,
2311 reg50080_mmio_write);
2312 MMIO_DH(SPRSURF(PIPE_A), D_ALL, NULL, spr_surf_mmio_write);
2313 MMIO_DH(REG_50080(PIPE_A, PLANE_SPRITE0), D_ALL, NULL,
2314 reg50080_mmio_write);
2315 MMIO_DH(SPRSURF(PIPE_B), D_ALL, NULL, spr_surf_mmio_write);
2316 MMIO_DH(REG_50080(PIPE_B, PLANE_SPRITE0), D_ALL, NULL,
2317 reg50080_mmio_write);
2318 MMIO_DH(SPRSURF(PIPE_C), D_ALL, NULL, spr_surf_mmio_write);
2319 MMIO_DH(REG_50080(PIPE_C, PLANE_SPRITE0), D_ALL, NULL,
2320 reg50080_mmio_write);
2321
2322 MMIO_F(PCH_GMBUS0, 4 * 4, 0, 0, 0, D_ALL, gmbus_mmio_read,
2323 gmbus_mmio_write);
2324 MMIO_F(PCH_GPIO_BASE, 6 * 4, F_UNALIGN, 0, 0, D_ALL, NULL, NULL);
2325
2326 MMIO_F(PCH_DP_AUX_CH_CTL(AUX_CH_B), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
2327 dp_aux_ch_ctl_mmio_write);
2328 MMIO_F(PCH_DP_AUX_CH_CTL(AUX_CH_C), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
2329 dp_aux_ch_ctl_mmio_write);
2330 MMIO_F(PCH_DP_AUX_CH_CTL(AUX_CH_D), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
2331 dp_aux_ch_ctl_mmio_write);
2332
2333 MMIO_DH(PCH_ADPA, D_PRE_SKL, NULL, pch_adpa_mmio_write);
2334
2335 MMIO_DH(_MMIO(_PCH_TRANSACONF), D_ALL, NULL, transconf_mmio_write);
2336 MMIO_DH(_MMIO(_PCH_TRANSBCONF), D_ALL, NULL, transconf_mmio_write);
2337
2338 MMIO_DH(FDI_RX_IIR(PIPE_A), D_ALL, NULL, fdi_rx_iir_mmio_write);
2339 MMIO_DH(FDI_RX_IIR(PIPE_B), D_ALL, NULL, fdi_rx_iir_mmio_write);
2340 MMIO_DH(FDI_RX_IIR(PIPE_C), D_ALL, NULL, fdi_rx_iir_mmio_write);
2341 MMIO_DH(FDI_RX_IMR(PIPE_A), D_ALL, NULL, update_fdi_rx_iir_status);
2342 MMIO_DH(FDI_RX_IMR(PIPE_B), D_ALL, NULL, update_fdi_rx_iir_status);
2343 MMIO_DH(FDI_RX_IMR(PIPE_C), D_ALL, NULL, update_fdi_rx_iir_status);
2344 MMIO_DH(FDI_RX_CTL(PIPE_A), D_ALL, NULL, update_fdi_rx_iir_status);
2345 MMIO_DH(FDI_RX_CTL(PIPE_B), D_ALL, NULL, update_fdi_rx_iir_status);
2346 MMIO_DH(FDI_RX_CTL(PIPE_C), D_ALL, NULL, update_fdi_rx_iir_status);
2347 MMIO_DH(PCH_PP_CONTROL, D_ALL, NULL, pch_pp_control_mmio_write);
2348 MMIO_DH(_MMIO(0xe651c), D_ALL, dpy_reg_mmio_read, NULL);
2349 MMIO_DH(_MMIO(0xe661c), D_ALL, dpy_reg_mmio_read, NULL);
2350 MMIO_DH(_MMIO(0xe671c), D_ALL, dpy_reg_mmio_read, NULL);
2351 MMIO_DH(_MMIO(0xe681c), D_ALL, dpy_reg_mmio_read, NULL);
2352 MMIO_DH(_MMIO(0xe6c04), D_ALL, dpy_reg_mmio_read, NULL);
2353 MMIO_DH(_MMIO(0xe6e1c), D_ALL, dpy_reg_mmio_read, NULL);
2354
2355 MMIO_RO(PCH_PORT_HOTPLUG, D_ALL, 0,
2356 PORTA_HOTPLUG_STATUS_MASK
2357 | PORTB_HOTPLUG_STATUS_MASK
2358 | PORTC_HOTPLUG_STATUS_MASK
2359 | PORTD_HOTPLUG_STATUS_MASK,
2360 NULL, NULL);
2361
2362 MMIO_DH(LCPLL_CTL, D_ALL, NULL, lcpll_ctl_mmio_write);
2363 MMIO_DH(SOUTH_CHICKEN2, D_ALL, NULL, south_chicken2_mmio_write);
2364 MMIO_DH(SFUSE_STRAP, D_ALL, NULL, NULL);
2365 MMIO_DH(SBI_DATA, D_ALL, sbi_data_mmio_read, NULL);
2366 MMIO_DH(SBI_CTL_STAT, D_ALL, NULL, sbi_ctl_mmio_write);
2367
2368 MMIO_F(DP_AUX_CH_CTL(AUX_CH_A), 6 * 4, 0, 0, 0, D_ALL, NULL,
2369 dp_aux_ch_ctl_mmio_write);
2370
2371 MMIO_DH(DDI_BUF_CTL(PORT_A), D_ALL, NULL, ddi_buf_ctl_mmio_write);
2372 MMIO_DH(DDI_BUF_CTL(PORT_B), D_ALL, NULL, ddi_buf_ctl_mmio_write);
2373 MMIO_DH(DDI_BUF_CTL(PORT_C), D_ALL, NULL, ddi_buf_ctl_mmio_write);
2374 MMIO_DH(DDI_BUF_CTL(PORT_D), D_ALL, NULL, ddi_buf_ctl_mmio_write);
2375 MMIO_DH(DDI_BUF_CTL(PORT_E), D_ALL, NULL, ddi_buf_ctl_mmio_write);
2376
2377 MMIO_DH(DP_TP_CTL(PORT_A), D_ALL, NULL, dp_tp_ctl_mmio_write);
2378 MMIO_DH(DP_TP_CTL(PORT_B), D_ALL, NULL, dp_tp_ctl_mmio_write);
2379 MMIO_DH(DP_TP_CTL(PORT_C), D_ALL, NULL, dp_tp_ctl_mmio_write);
2380 MMIO_DH(DP_TP_CTL(PORT_D), D_ALL, NULL, dp_tp_ctl_mmio_write);
2381 MMIO_DH(DP_TP_CTL(PORT_E), D_ALL, NULL, dp_tp_ctl_mmio_write);
2382
2383 MMIO_DH(DP_TP_STATUS(PORT_A), D_ALL, NULL, dp_tp_status_mmio_write);
2384 MMIO_DH(DP_TP_STATUS(PORT_B), D_ALL, NULL, dp_tp_status_mmio_write);
2385 MMIO_DH(DP_TP_STATUS(PORT_C), D_ALL, NULL, dp_tp_status_mmio_write);
2386 MMIO_DH(DP_TP_STATUS(PORT_D), D_ALL, NULL, dp_tp_status_mmio_write);
2387 MMIO_DH(DP_TP_STATUS(PORT_E), D_ALL, NULL, NULL);
2388
2389 MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_A), D_ALL, NULL, NULL);
2390 MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_B), D_ALL, NULL, NULL);
2391 MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_C), D_ALL, NULL, NULL);
2392 MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_EDP), D_ALL, NULL, NULL);
2393
2394 MMIO_DH(FORCEWAKE, D_ALL, NULL, NULL);
2395 MMIO_DFH(GTFIFODBG, D_ALL, F_CMD_ACCESS, NULL, NULL);
2396 MMIO_DFH(GTFIFOCTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
2397 MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write);
2398 MMIO_DH(FORCEWAKE_ACK_HSW, D_BDW, NULL, NULL);
2399 MMIO_DH(GEN6_RC_CONTROL, D_ALL, NULL, NULL);
2400 MMIO_DH(GEN6_RC_STATE, D_ALL, NULL, NULL);
2401 MMIO_DH(HSW_PWR_WELL_CTL1, D_BDW, NULL, power_well_ctl_mmio_write);
2402 MMIO_DH(HSW_PWR_WELL_CTL2, D_BDW, NULL, power_well_ctl_mmio_write);
2403 MMIO_DH(HSW_PWR_WELL_CTL3, D_BDW, NULL, power_well_ctl_mmio_write);
2404 MMIO_DH(HSW_PWR_WELL_CTL4, D_BDW, NULL, power_well_ctl_mmio_write);
2405 MMIO_DH(HSW_PWR_WELL_CTL5, D_BDW, NULL, power_well_ctl_mmio_write);
2406 MMIO_DH(HSW_PWR_WELL_CTL6, D_BDW, NULL, power_well_ctl_mmio_write);
2407
2408 MMIO_DH(GEN6_GDRST, D_ALL, NULL, gdrst_mmio_write);
2409 MMIO_F(FENCE_REG_GEN6_LO(0), 0x80, 0, 0, 0, D_ALL, fence_mmio_read, fence_mmio_write);
2410 MMIO_DH(CPU_VGACNTRL, D_ALL, NULL, vga_control_mmio_write);
2411
2412 MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL);
2413 MMIO_DH(GFX_FLSH_CNTL_GEN6, D_ALL, NULL, NULL);
2414
2415 MMIO_DH(GEN6_MBCTL, D_ALL, NULL, mbctl_write);
2416 MMIO_DFH(GEN7_UCGCTL4, D_ALL, F_CMD_ACCESS, NULL, NULL);
2417
2418 MMIO_DH(FPGA_DBG, D_ALL, NULL, fpga_dbg_mmio_write);
2419 MMIO_DFH(_MMIO(0x215c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2420 MMIO_DFH(_MMIO(0x2178), D_ALL, F_CMD_ACCESS, NULL, NULL);
2421 MMIO_DFH(_MMIO(0x217c), D_ALL, F_CMD_ACCESS, NULL, NULL);
2422 MMIO_DFH(_MMIO(0x12178), D_ALL, F_CMD_ACCESS, NULL, NULL);
2423 MMIO_DFH(_MMIO(0x1217c), D_ALL, F_CMD_ACCESS, NULL, NULL);
2424
2425 MMIO_F(_MMIO(0x2290), 8, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL);
2426 MMIO_F(_MMIO(0x5200), 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2427 MMIO_F(_MMIO(0x5240), 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2428 MMIO_F(_MMIO(0x5280), 16, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2429
2430 MMIO_DFH(_MMIO(0x1c17c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2431 MMIO_DFH(_MMIO(0x1c178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2432 MMIO_DFH(BCS_SWCTRL, D_ALL, F_CMD_ACCESS, NULL, NULL);
2433
2434 MMIO_F(HS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2435 MMIO_F(DS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2436 MMIO_F(IA_VERTICES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2437 MMIO_F(IA_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2438 MMIO_F(VS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2439 MMIO_F(GS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2440 MMIO_F(GS_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2441 MMIO_F(CL_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2442 MMIO_F(CL_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2443 MMIO_F(PS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2444 MMIO_F(PS_DEPTH_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2445 MMIO_DH(_MMIO(0x4260), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
2446 MMIO_DH(_MMIO(0x4264), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
2447 MMIO_DH(_MMIO(0x4268), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
2448 MMIO_DH(_MMIO(0x426c), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
2449 MMIO_DH(_MMIO(0x4270), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
2450 MMIO_DFH(_MMIO(0x4094), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2451
2452 MMIO_DFH(ARB_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2453 MMIO_RING_GM(RING_BBADDR, D_ALL, NULL, NULL);
2454 MMIO_DFH(_MMIO(0x2220), D_ALL, F_CMD_ACCESS, NULL, NULL);
2455 MMIO_DFH(_MMIO(0x12220), D_ALL, F_CMD_ACCESS, NULL, NULL);
2456 MMIO_DFH(_MMIO(0x22220), D_ALL, F_CMD_ACCESS, NULL, NULL);
2457 MMIO_RING_DFH(RING_SYNC_1, D_ALL, F_CMD_ACCESS, NULL, NULL);
2458 MMIO_RING_DFH(RING_SYNC_0, D_ALL, F_CMD_ACCESS, NULL, NULL);
2459 MMIO_DFH(_MMIO(0x22178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2460 MMIO_DFH(_MMIO(0x1a178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2461 MMIO_DFH(_MMIO(0x1a17c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2462 MMIO_DFH(_MMIO(0x2217c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2463
2464 MMIO_DH(EDP_PSR_IMR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
2465 MMIO_DH(EDP_PSR_IIR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
2466 MMIO_DH(GUC_STATUS, D_ALL, guc_status_read, NULL);
2467
2468 return 0;
2469}
2470
2471static int init_bdw_mmio_info(struct intel_gvt *gvt)
2472{
2473 int ret;
2474
2475 MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
2476 MMIO_DH(GEN8_GT_IER(0), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
2477 MMIO_DH(GEN8_GT_IIR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
2478
2479 MMIO_DH(GEN8_GT_IMR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
2480 MMIO_DH(GEN8_GT_IER(1), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
2481 MMIO_DH(GEN8_GT_IIR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
2482
2483 MMIO_DH(GEN8_GT_IMR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
2484 MMIO_DH(GEN8_GT_IER(2), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
2485 MMIO_DH(GEN8_GT_IIR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
2486
2487 MMIO_DH(GEN8_GT_IMR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
2488 MMIO_DH(GEN8_GT_IER(3), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
2489 MMIO_DH(GEN8_GT_IIR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
2490
2491 MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_A), D_BDW_PLUS, NULL,
2492 intel_vgpu_reg_imr_handler);
2493 MMIO_DH(GEN8_DE_PIPE_IER(PIPE_A), D_BDW_PLUS, NULL,
2494 intel_vgpu_reg_ier_handler);
2495 MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_A), D_BDW_PLUS, NULL,
2496 intel_vgpu_reg_iir_handler);
2497
2498 MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_B), D_BDW_PLUS, NULL,
2499 intel_vgpu_reg_imr_handler);
2500 MMIO_DH(GEN8_DE_PIPE_IER(PIPE_B), D_BDW_PLUS, NULL,
2501 intel_vgpu_reg_ier_handler);
2502 MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_B), D_BDW_PLUS, NULL,
2503 intel_vgpu_reg_iir_handler);
2504
2505 MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_C), D_BDW_PLUS, NULL,
2506 intel_vgpu_reg_imr_handler);
2507 MMIO_DH(GEN8_DE_PIPE_IER(PIPE_C), D_BDW_PLUS, NULL,
2508 intel_vgpu_reg_ier_handler);
2509 MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_C), D_BDW_PLUS, NULL,
2510 intel_vgpu_reg_iir_handler);
2511
2512 MMIO_DH(GEN8_DE_PORT_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
2513 MMIO_DH(GEN8_DE_PORT_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
2514 MMIO_DH(GEN8_DE_PORT_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
2515
2516 MMIO_DH(GEN8_DE_MISC_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
2517 MMIO_DH(GEN8_DE_MISC_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
2518 MMIO_DH(GEN8_DE_MISC_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
2519
2520 MMIO_DH(GEN8_PCU_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
2521 MMIO_DH(GEN8_PCU_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
2522 MMIO_DH(GEN8_PCU_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
2523
2524 MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL,
2525 intel_vgpu_reg_master_irq_handler);
2526
2527 MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, 0,
2528 mmio_read_from_hw, NULL);
2529
2530#define RING_REG(base) _MMIO((base) + 0xd0)
2531 MMIO_RING_F(RING_REG, 4, F_RO, 0,
2532 ~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
2533 ring_reset_ctl_write);
2534#undef RING_REG
2535
2536#define RING_REG(base) _MMIO((base) + 0x230)
2537 MMIO_RING_DFH(RING_REG, D_BDW_PLUS, 0, NULL, elsp_mmio_write);
2538#undef RING_REG
2539
2540#define RING_REG(base) _MMIO((base) + 0x234)
2541 MMIO_RING_F(RING_REG, 8, F_RO, 0, ~0, D_BDW_PLUS,
2542 NULL, NULL);
2543#undef RING_REG
2544
2545#define RING_REG(base) _MMIO((base) + 0x244)
2546 MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2547#undef RING_REG
2548
2549#define RING_REG(base) _MMIO((base) + 0x370)
2550 MMIO_RING_F(RING_REG, 48, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL);
2551#undef RING_REG
2552
2553#define RING_REG(base) _MMIO((base) + 0x3a0)
2554 MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
2555#undef RING_REG
2556
2557 MMIO_DH(GEN6_PCODE_MAILBOX, D_BDW_PLUS, NULL, mailbox_write);
2558
2559#define RING_REG(base) _MMIO((base) + 0x270)
2560 MMIO_RING_F(RING_REG, 32, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL);
2561#undef RING_REG
2562
2563 MMIO_RING_GM(RING_HWS_PGA, D_BDW_PLUS, NULL, hws_pga_write);
2564
2565 MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2566
2567 MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
2568 NULL, NULL);
2569 MMIO_DFH(GEN7_ROW_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
2570 NULL, NULL);
2571 MMIO_DFH(GEN8_UCGCTL6, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2572
2573 MMIO_DFH(_MMIO(0xb1f0), D_BDW, F_CMD_ACCESS, NULL, NULL);
2574 MMIO_DFH(_MMIO(0xb1c0), D_BDW, F_CMD_ACCESS, NULL, NULL);
2575 MMIO_DFH(GEN8_L3SQCREG4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2576 MMIO_DFH(_MMIO(0xb100), D_BDW, F_CMD_ACCESS, NULL, NULL);
2577 MMIO_DFH(_MMIO(0xb10c), D_BDW, F_CMD_ACCESS, NULL, NULL);
2578
2579 MMIO_F(_MMIO(0x24d0), 48, F_CMD_ACCESS | F_CMD_WRITE_PATCH, 0, 0,
2580 D_BDW_PLUS, NULL, force_nonpriv_write);
2581
2582 MMIO_DFH(_MMIO(0x83a4), D_BDW, F_CMD_ACCESS, NULL, NULL);
2583
2584 MMIO_DFH(_MMIO(0x8430), D_BDW, F_CMD_ACCESS, NULL, NULL);
2585
2586 MMIO_DFH(_MMIO(0xe194), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2587 MMIO_DFH(_MMIO(0xe188), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2588 MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2589 MMIO_DFH(_MMIO(0x2580), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2590
2591 MMIO_DFH(_MMIO(0x2248), D_BDW, F_CMD_ACCESS, NULL, NULL);
2592
2593 MMIO_DFH(_MMIO(0xe220), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2594 MMIO_DFH(_MMIO(0xe230), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2595 MMIO_DFH(_MMIO(0xe240), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2596 MMIO_DFH(_MMIO(0xe260), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2597 MMIO_DFH(_MMIO(0xe270), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2598 MMIO_DFH(_MMIO(0xe280), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2599 MMIO_DFH(_MMIO(0xe2a0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2600 MMIO_DFH(_MMIO(0xe2b0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2601 MMIO_DFH(_MMIO(0xe2c0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2602 MMIO_DFH(_MMIO(0x21f0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2603 return 0;
2604}
2605
2606static int init_skl_mmio_info(struct intel_gvt *gvt)
2607{
2608 int ret;
2609
2610 MMIO_DH(FORCEWAKE_RENDER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
2611 MMIO_DH(FORCEWAKE_ACK_RENDER_GEN9, D_SKL_PLUS, NULL, NULL);
2612 MMIO_DH(FORCEWAKE_GT_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
2613 MMIO_DH(FORCEWAKE_ACK_GT_GEN9, D_SKL_PLUS, NULL, NULL);
2614 MMIO_DH(FORCEWAKE_MEDIA_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
2615 MMIO_DH(FORCEWAKE_ACK_MEDIA_GEN9, D_SKL_PLUS, NULL, NULL);
2616
2617 MMIO_F(DP_AUX_CH_CTL(AUX_CH_B), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
2618 dp_aux_ch_ctl_mmio_write);
2619 MMIO_F(DP_AUX_CH_CTL(AUX_CH_C), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
2620 dp_aux_ch_ctl_mmio_write);
2621 MMIO_F(DP_AUX_CH_CTL(AUX_CH_D), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
2622 dp_aux_ch_ctl_mmio_write);
2623
2624 MMIO_DH(HSW_PWR_WELL_CTL2, D_SKL_PLUS, NULL, skl_power_well_ctl_write);
2625
2626 MMIO_DH(DBUF_CTL_S(0), D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write);
2627
2628 MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2629 MMIO_DFH(MMCD_MISC_CTRL, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2630 MMIO_DH(CHICKEN_PAR1_1, D_SKL_PLUS, NULL, NULL);
2631 MMIO_DH(LCPLL1_CTL, D_SKL_PLUS, NULL, skl_lcpll_write);
2632 MMIO_DH(LCPLL2_CTL, D_SKL_PLUS, NULL, skl_lcpll_write);
2633 MMIO_DH(DPLL_STATUS, D_SKL_PLUS, dpll_status_read, NULL);
2634
2635 MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
2636 MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
2637 MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write);
2638 MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write);
2639 MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write);
2640 MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write);
2641
2642 MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
2643 MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
2644 MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write);
2645 MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write);
2646 MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write);
2647 MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write);
2648
2649 MMIO_DH(SKL_PS_CTRL(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
2650 MMIO_DH(SKL_PS_CTRL(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
2651 MMIO_DH(SKL_PS_CTRL(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write);
2652 MMIO_DH(SKL_PS_CTRL(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write);
2653 MMIO_DH(SKL_PS_CTRL(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write);
2654 MMIO_DH(SKL_PS_CTRL(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write);
2655
2656 MMIO_DH(PLANE_BUF_CFG(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
2657 MMIO_DH(PLANE_BUF_CFG(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
2658 MMIO_DH(PLANE_BUF_CFG(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
2659 MMIO_DH(PLANE_BUF_CFG(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
2660
2661 MMIO_DH(PLANE_BUF_CFG(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
2662 MMIO_DH(PLANE_BUF_CFG(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
2663 MMIO_DH(PLANE_BUF_CFG(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
2664 MMIO_DH(PLANE_BUF_CFG(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
2665
2666 MMIO_DH(PLANE_BUF_CFG(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
2667 MMIO_DH(PLANE_BUF_CFG(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
2668 MMIO_DH(PLANE_BUF_CFG(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
2669 MMIO_DH(PLANE_BUF_CFG(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
2670
2671 MMIO_DH(CUR_BUF_CFG(PIPE_A), D_SKL_PLUS, NULL, NULL);
2672 MMIO_DH(CUR_BUF_CFG(PIPE_B), D_SKL_PLUS, NULL, NULL);
2673 MMIO_DH(CUR_BUF_CFG(PIPE_C), D_SKL_PLUS, NULL, NULL);
2674
2675 MMIO_DH(PLANE_WM_TRANS(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
2676 MMIO_DH(PLANE_WM_TRANS(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
2677 MMIO_DH(PLANE_WM_TRANS(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
2678
2679 MMIO_DH(PLANE_WM_TRANS(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
2680 MMIO_DH(PLANE_WM_TRANS(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
2681 MMIO_DH(PLANE_WM_TRANS(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
2682
2683 MMIO_DH(PLANE_WM_TRANS(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
2684 MMIO_DH(PLANE_WM_TRANS(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
2685 MMIO_DH(PLANE_WM_TRANS(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
2686
2687 MMIO_DH(CUR_WM_TRANS(PIPE_A), D_SKL_PLUS, NULL, NULL);
2688 MMIO_DH(CUR_WM_TRANS(PIPE_B), D_SKL_PLUS, NULL, NULL);
2689 MMIO_DH(CUR_WM_TRANS(PIPE_C), D_SKL_PLUS, NULL, NULL);
2690
2691 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
2692 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
2693 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
2694 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
2695
2696 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
2697 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
2698 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
2699 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
2700
2701 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
2702 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
2703 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
2704 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
2705
2706 MMIO_DH(PLANE_AUX_DIST(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
2707 MMIO_DH(PLANE_AUX_DIST(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
2708 MMIO_DH(PLANE_AUX_DIST(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
2709 MMIO_DH(PLANE_AUX_DIST(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
2710
2711 MMIO_DH(PLANE_AUX_DIST(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
2712 MMIO_DH(PLANE_AUX_DIST(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
2713 MMIO_DH(PLANE_AUX_DIST(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
2714 MMIO_DH(PLANE_AUX_DIST(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
2715
2716 MMIO_DH(PLANE_AUX_DIST(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
2717 MMIO_DH(PLANE_AUX_DIST(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
2718 MMIO_DH(PLANE_AUX_DIST(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
2719 MMIO_DH(PLANE_AUX_DIST(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
2720
2721 MMIO_DH(PLANE_AUX_OFFSET(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
2722 MMIO_DH(PLANE_AUX_OFFSET(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
2723 MMIO_DH(PLANE_AUX_OFFSET(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
2724 MMIO_DH(PLANE_AUX_OFFSET(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
2725
2726 MMIO_DH(PLANE_AUX_OFFSET(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
2727 MMIO_DH(PLANE_AUX_OFFSET(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
2728 MMIO_DH(PLANE_AUX_OFFSET(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
2729 MMIO_DH(PLANE_AUX_OFFSET(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
2730
2731 MMIO_DH(PLANE_AUX_OFFSET(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
2732 MMIO_DH(PLANE_AUX_OFFSET(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
2733 MMIO_DH(PLANE_AUX_OFFSET(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
2734 MMIO_DH(PLANE_AUX_OFFSET(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
2735
2736 MMIO_DFH(BDW_SCRATCH1, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2737
2738 MMIO_F(GEN9_GFX_MOCS(0), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
2739 NULL, NULL);
2740 MMIO_F(GEN7_L3CNTLREG2, 0x80, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
2741 NULL, NULL);
2742
2743 MMIO_DFH(GEN7_FF_SLICE_CS_CHICKEN1, D_SKL_PLUS,
2744 F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2745 MMIO_DFH(GEN9_CS_DEBUG_MODE1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
2746 NULL, NULL);
2747
2748 /* TRTT */
2749 MMIO_DFH(TRVATTL3PTRDW(0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2750 MMIO_DFH(TRVATTL3PTRDW(1), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2751 MMIO_DFH(TRVATTL3PTRDW(2), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2752 MMIO_DFH(TRVATTL3PTRDW(3), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2753 MMIO_DFH(TRVADR, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2754 MMIO_DFH(TRTTE, D_SKL_PLUS, F_CMD_ACCESS | F_PM_SAVE,
2755 NULL, gen9_trtte_write);
2756 MMIO_DFH(_MMIO(0x4dfc), D_SKL_PLUS, F_PM_SAVE,
2757 NULL, gen9_trtt_chicken_write);
2758
2759 MMIO_DFH(GEN8_GARBCNTL, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2760 MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
2761
2762#define CSFE_CHICKEN1_REG(base) _MMIO((base) + 0xD4)
2763 MMIO_RING_DFH(CSFE_CHICKEN1_REG, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
2764 NULL, csfe_chicken1_mmio_write);
2765#undef CSFE_CHICKEN1_REG
2766 MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
2767 NULL, NULL);
2768 MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
2769 NULL, NULL);
2770
2771 MMIO_DFH(GAMT_CHKN_BIT_REG, D_KBL | D_CFL, F_CMD_ACCESS, NULL, NULL);
2772 MMIO_DFH(_MMIO(0xe4cc), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2773
2774 return 0;
2775}
2776
2777static int init_bxt_mmio_info(struct intel_gvt *gvt)
2778{
2779 int ret;
2780
2781 MMIO_DH(BXT_P_CR_GT_DISP_PWRON, D_BXT, NULL, bxt_gt_disp_pwron_write);
2782 MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY0), D_BXT,
2783 NULL, bxt_phy_ctl_family_write);
2784 MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY1), D_BXT,
2785 NULL, bxt_phy_ctl_family_write);
2786 MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_A), D_BXT,
2787 NULL, bxt_port_pll_enable_write);
2788 MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_B), D_BXT,
2789 NULL, bxt_port_pll_enable_write);
2790 MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_C), D_BXT, NULL,
2791 bxt_port_pll_enable_write);
2792
2793 MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH0), D_BXT,
2794 NULL, bxt_pcs_dw12_grp_write);
2795 MMIO_DH(BXT_PORT_TX_DW3_LN(DPIO_PHY0, DPIO_CH0, 0), D_BXT,
2796 bxt_port_tx_dw3_read, NULL);
2797 MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH1), D_BXT,
2798 NULL, bxt_pcs_dw12_grp_write);
2799 MMIO_DH(BXT_PORT_TX_DW3_LN(DPIO_PHY0, DPIO_CH1, 0), D_BXT,
2800 bxt_port_tx_dw3_read, NULL);
2801 MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY1, DPIO_CH0), D_BXT,
2802 NULL, bxt_pcs_dw12_grp_write);
2803 MMIO_DH(BXT_PORT_TX_DW3_LN(DPIO_PHY1, DPIO_CH0, 0), D_BXT,
2804 bxt_port_tx_dw3_read, NULL);
2805 MMIO_DH(BXT_DE_PLL_ENABLE, D_BXT, NULL, bxt_de_pll_enable_write);
2806 MMIO_DFH(GEN8_L3SQCREG1, D_BXT, F_CMD_ACCESS, NULL, NULL);
2807 MMIO_DFH(GEN8_L3CNTLREG, D_BXT, F_CMD_ACCESS, NULL, NULL);
2808 MMIO_DFH(_MMIO(0x20D8), D_BXT, F_CMD_ACCESS, NULL, NULL);
2809 MMIO_F(GEN8_RING_CS_GPR(RENDER_RING_BASE, 0), 0x40, F_CMD_ACCESS,
2810 0, 0, D_BXT, NULL, NULL);
2811 MMIO_F(GEN8_RING_CS_GPR(GEN6_BSD_RING_BASE, 0), 0x40, F_CMD_ACCESS,
2812 0, 0, D_BXT, NULL, NULL);
2813 MMIO_F(GEN8_RING_CS_GPR(BLT_RING_BASE, 0), 0x40, F_CMD_ACCESS,
2814 0, 0, D_BXT, NULL, NULL);
2815 MMIO_F(GEN8_RING_CS_GPR(VEBOX_RING_BASE, 0), 0x40, F_CMD_ACCESS,
2816 0, 0, D_BXT, NULL, NULL);
2817
2818 MMIO_DFH(GEN9_CTX_PREEMPT_REG, D_BXT, F_CMD_ACCESS, NULL, NULL);
2819
2820 MMIO_DH(GEN8_PRIVATE_PAT_LO, D_BXT, NULL, bxt_ppat_low_write);
2821
2822 return 0;
2823}
2824
2825static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,
2826 unsigned int offset)
2827{
2828 struct gvt_mmio_block *block = gvt->mmio.mmio_block;
2829 int num = gvt->mmio.num_mmio_block;
2830 int i;
2831
2832 for (i = 0; i < num; i++, block++) {
2833 if (offset >= i915_mmio_reg_offset(block->offset) &&
2834 offset < i915_mmio_reg_offset(block->offset) + block->size)
2835 return block;
2836 }
2837 return NULL;
2838}
2839
2840/**
2841 * intel_gvt_clean_mmio_info - clean up MMIO information table for GVT device
2842 * @gvt: GVT device
2843 *
2844 * This function is called at the driver unloading stage, to clean up the MMIO
2845 * information table of GVT device
2846 *
2847 */
2848void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
2849{
2850 struct hlist_node *tmp;
2851 struct intel_gvt_mmio_info *e;
2852 int i;
2853
2854 hash_for_each_safe(gvt->mmio.mmio_info_table, i, tmp, e, node)
2855 kfree(objp: e);
2856
2857 kfree(objp: gvt->mmio.mmio_block);
2858 gvt->mmio.mmio_block = NULL;
2859 gvt->mmio.num_mmio_block = 0;
2860
2861 vfree(addr: gvt->mmio.mmio_attribute);
2862 gvt->mmio.mmio_attribute = NULL;
2863}
2864
2865static int handle_mmio(struct intel_gvt_mmio_table_iter *iter, u32 offset,
2866 u32 size)
2867{
2868 struct intel_gvt *gvt = iter->data;
2869 struct intel_gvt_mmio_info *info, *p;
2870 u32 start, end, i;
2871
2872 if (WARN_ON(!IS_ALIGNED(offset, 4)))
2873 return -EINVAL;
2874
2875 start = offset;
2876 end = offset + size;
2877
2878 for (i = start; i < end; i += 4) {
2879 p = intel_gvt_find_mmio_info(gvt, offset: i);
2880 if (p) {
2881 WARN(1, "dup mmio definition offset %x\n", i);
2882
2883 /* We return -EEXIST here to make GVT-g load fail.
2884 * So duplicated MMIO can be found as soon as
2885 * possible.
2886 */
2887 return -EEXIST;
2888 }
2889
2890 info = kzalloc(sizeof(*info), GFP_KERNEL);
2891 if (!info)
2892 return -ENOMEM;
2893
2894 info->offset = i;
2895 info->read = intel_vgpu_default_mmio_read;
2896 info->write = intel_vgpu_default_mmio_write;
2897 INIT_HLIST_NODE(h: &info->node);
2898 hash_add(gvt->mmio.mmio_info_table, &info->node, info->offset);
2899 gvt->mmio.num_tracked_mmio++;
2900 }
2901 return 0;
2902}
2903
2904static int handle_mmio_block(struct intel_gvt_mmio_table_iter *iter,
2905 u32 offset, u32 size)
2906{
2907 struct intel_gvt *gvt = iter->data;
2908 struct gvt_mmio_block *block = gvt->mmio.mmio_block;
2909 void *ret;
2910
2911 ret = krealloc(block,
2912 (gvt->mmio.num_mmio_block + 1) * sizeof(*block),
2913 GFP_KERNEL);
2914 if (!ret)
2915 return -ENOMEM;
2916
2917 gvt->mmio.mmio_block = block = ret;
2918
2919 block += gvt->mmio.num_mmio_block;
2920
2921 memset(block, 0, sizeof(*block));
2922
2923 block->offset = _MMIO(offset);
2924 block->size = size;
2925
2926 gvt->mmio.num_mmio_block++;
2927
2928 return 0;
2929}
2930
2931static int handle_mmio_cb(struct intel_gvt_mmio_table_iter *iter, u32 offset,
2932 u32 size)
2933{
2934 if (size < 1024 || offset == i915_mmio_reg_offset(GEN9_GFX_MOCS(0)))
2935 return handle_mmio(iter, offset, size);
2936 else
2937 return handle_mmio_block(iter, offset, size);
2938}
2939
2940static int init_mmio_info(struct intel_gvt *gvt)
2941{
2942 struct intel_gvt_mmio_table_iter iter = {
2943 .i915 = gvt->gt->i915,
2944 .data = gvt,
2945 .handle_mmio_cb = handle_mmio_cb,
2946 };
2947
2948 return intel_gvt_iterate_mmio_table(iter: &iter);
2949}
2950
2951static int init_mmio_block_handlers(struct intel_gvt *gvt)
2952{
2953 struct gvt_mmio_block *block;
2954
2955 block = find_mmio_block(gvt, VGT_PVINFO_PAGE);
2956 if (!block) {
2957 WARN(1, "fail to assign handlers to mmio block %x\n",
2958 i915_mmio_reg_offset(gvt->mmio.mmio_block->offset));
2959 return -ENODEV;
2960 }
2961
2962 block->read = pvinfo_mmio_read;
2963 block->write = pvinfo_mmio_write;
2964
2965 return 0;
2966}
2967
2968/**
2969 * intel_gvt_setup_mmio_info - setup MMIO information table for GVT device
2970 * @gvt: GVT device
2971 *
2972 * This function is called at the initialization stage, to setup the MMIO
2973 * information table for GVT device
2974 *
2975 * Returns:
2976 * zero on success, negative if failed.
2977 */
2978int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
2979{
2980 struct intel_gvt_device_info *info = &gvt->device_info;
2981 struct drm_i915_private *i915 = gvt->gt->i915;
2982 int size = info->mmio_size / 4 * sizeof(*gvt->mmio.mmio_attribute);
2983 int ret;
2984
2985 gvt->mmio.mmio_attribute = vzalloc(size);
2986 if (!gvt->mmio.mmio_attribute)
2987 return -ENOMEM;
2988
2989 ret = init_mmio_info(gvt);
2990 if (ret)
2991 goto err;
2992
2993 ret = init_mmio_block_handlers(gvt);
2994 if (ret)
2995 goto err;
2996
2997 ret = init_generic_mmio_info(gvt);
2998 if (ret)
2999 goto err;
3000
3001 if (IS_BROADWELL(i915)) {
3002 ret = init_bdw_mmio_info(gvt);
3003 if (ret)
3004 goto err;
3005 } else if (IS_SKYLAKE(i915) ||
3006 IS_KABYLAKE(i915) ||
3007 IS_COFFEELAKE(i915) ||
3008 IS_COMETLAKE(i915)) {
3009 ret = init_bdw_mmio_info(gvt);
3010 if (ret)
3011 goto err;
3012 ret = init_skl_mmio_info(gvt);
3013 if (ret)
3014 goto err;
3015 } else if (IS_BROXTON(i915)) {
3016 ret = init_bdw_mmio_info(gvt);
3017 if (ret)
3018 goto err;
3019 ret = init_skl_mmio_info(gvt);
3020 if (ret)
3021 goto err;
3022 ret = init_bxt_mmio_info(gvt);
3023 if (ret)
3024 goto err;
3025 }
3026
3027 return 0;
3028err:
3029 intel_gvt_clean_mmio_info(gvt);
3030 return ret;
3031}
3032
3033/**
3034 * intel_gvt_for_each_tracked_mmio - iterate each tracked mmio
3035 * @gvt: a GVT device
3036 * @handler: the handler
3037 * @data: private data given to handler
3038 *
3039 * Returns:
3040 * Zero on success, negative error code if failed.
3041 */
3042int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt,
3043 int (*handler)(struct intel_gvt *gvt, u32 offset, void *data),
3044 void *data)
3045{
3046 struct gvt_mmio_block *block = gvt->mmio.mmio_block;
3047 struct intel_gvt_mmio_info *e;
3048 int i, j, ret;
3049
3050 hash_for_each(gvt->mmio.mmio_info_table, i, e, node) {
3051 ret = handler(gvt, e->offset, data);
3052 if (ret)
3053 return ret;
3054 }
3055
3056 for (i = 0; i < gvt->mmio.num_mmio_block; i++, block++) {
3057 /* pvinfo data doesn't come from hw mmio */
3058 if (i915_mmio_reg_offset(block->offset) == VGT_PVINFO_PAGE)
3059 continue;
3060
3061 for (j = 0; j < block->size; j += 4) {
3062 ret = handler(gvt, i915_mmio_reg_offset(block->offset) + j, data);
3063 if (ret)
3064 return ret;
3065 }
3066 }
3067 return 0;
3068}
3069
3070/**
3071 * intel_vgpu_default_mmio_read - default MMIO read handler
3072 * @vgpu: a vGPU
3073 * @offset: access offset
3074 * @p_data: data return buffer
3075 * @bytes: access data length
3076 *
3077 * Returns:
3078 * Zero on success, negative error code if failed.
3079 */
3080int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
3081 void *p_data, unsigned int bytes)
3082{
3083 read_vreg(vgpu, offset, p_data, bytes);
3084 return 0;
3085}
3086
3087/**
3088 * intel_vgpu_default_mmio_write() - default MMIO write handler
3089 * @vgpu: a vGPU
3090 * @offset: access offset
3091 * @p_data: write data buffer
3092 * @bytes: access data length
3093 *
3094 * Returns:
3095 * Zero on success, negative error code if failed.
3096 */
3097int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
3098 void *p_data, unsigned int bytes)
3099{
3100 write_vreg(vgpu, offset, p_data, bytes);
3101 return 0;
3102}
3103
3104/**
3105 * intel_vgpu_mask_mmio_write - write mask register
3106 * @vgpu: a vGPU
3107 * @offset: access offset
3108 * @p_data: write data buffer
3109 * @bytes: access data length
3110 *
3111 * Returns:
3112 * Zero on success, negative error code if failed.
3113 */
3114int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
3115 void *p_data, unsigned int bytes)
3116{
3117 u32 mask, old_vreg;
3118
3119 old_vreg = vgpu_vreg(vgpu, offset);
3120 write_vreg(vgpu, offset, p_data, bytes);
3121 mask = vgpu_vreg(vgpu, offset) >> 16;
3122 vgpu_vreg(vgpu, offset) = (old_vreg & ~mask) |
3123 (vgpu_vreg(vgpu, offset) & mask);
3124
3125 return 0;
3126}
3127
3128/**
3129 * intel_vgpu_mmio_reg_rw - emulate tracked mmio registers
3130 * @vgpu: a vGPU
3131 * @offset: register offset
3132 * @pdata: data buffer
3133 * @bytes: data length
3134 * @is_read: read or write
3135 *
3136 * Returns:
3137 * Zero on success, negative error code if failed.
3138 */
3139int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
3140 void *pdata, unsigned int bytes, bool is_read)
3141{
3142 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
3143 struct intel_gvt *gvt = vgpu->gvt;
3144 struct intel_gvt_mmio_info *mmio_info;
3145 struct gvt_mmio_block *mmio_block;
3146 gvt_mmio_func func;
3147 int ret;
3148
3149 if (drm_WARN_ON(&i915->drm, bytes > 8))
3150 return -EINVAL;
3151
3152 /*
3153 * Handle special MMIO blocks.
3154 */
3155 mmio_block = find_mmio_block(gvt, offset);
3156 if (mmio_block) {
3157 func = is_read ? mmio_block->read : mmio_block->write;
3158 if (func)
3159 return func(vgpu, offset, pdata, bytes);
3160 goto default_rw;
3161 }
3162
3163 /*
3164 * Normal tracked MMIOs.
3165 */
3166 mmio_info = intel_gvt_find_mmio_info(gvt, offset);
3167 if (!mmio_info) {
3168 gvt_dbg_mmio("untracked MMIO %08x len %d\n", offset, bytes);
3169 goto default_rw;
3170 }
3171
3172 if (is_read)
3173 return mmio_info->read(vgpu, offset, pdata, bytes);
3174 else {
3175 u64 ro_mask = mmio_info->ro_mask;
3176 u32 old_vreg = 0;
3177 u64 data = 0;
3178
3179 if (intel_gvt_mmio_has_mode_mask(gvt, offset: mmio_info->offset)) {
3180 old_vreg = vgpu_vreg(vgpu, offset);
3181 }
3182
3183 if (likely(!ro_mask))
3184 ret = mmio_info->write(vgpu, offset, pdata, bytes);
3185 else if (!~ro_mask) {
3186 gvt_vgpu_err("try to write RO reg %x\n", offset);
3187 return 0;
3188 } else {
3189 /* keep the RO bits in the virtual register */
3190 memcpy(&data, pdata, bytes);
3191 data &= ~ro_mask;
3192 data |= vgpu_vreg(vgpu, offset) & ro_mask;
3193 ret = mmio_info->write(vgpu, offset, &data, bytes);
3194 }
3195
3196 /* higher 16bits of mode ctl regs are mask bits for change */
3197 if (intel_gvt_mmio_has_mode_mask(gvt, offset: mmio_info->offset)) {
3198 u32 mask = vgpu_vreg(vgpu, offset) >> 16;
3199
3200 vgpu_vreg(vgpu, offset) = (old_vreg & ~mask)
3201 | (vgpu_vreg(vgpu, offset) & mask);
3202 }
3203 }
3204
3205 return ret;
3206
3207default_rw:
3208 return is_read ?
3209 intel_vgpu_default_mmio_read(vgpu, offset, p_data: pdata, bytes) :
3210 intel_vgpu_default_mmio_write(vgpu, offset, p_data: pdata, bytes);
3211}
3212
3213void intel_gvt_restore_fence(struct intel_gvt *gvt)
3214{
3215 struct intel_vgpu *vgpu;
3216 int i, id;
3217
3218 idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
3219 intel_wakeref_t wakeref;
3220
3221 wakeref = mmio_hw_access_pre(gt: gvt->gt);
3222 for (i = 0; i < vgpu_fence_sz(vgpu); i++)
3223 intel_vgpu_write_fence(vgpu, fence: i, vgpu_vreg64(vgpu, fence_num_to_offset(i)));
3224 mmio_hw_access_post(gt: gvt->gt, wakeref);
3225 }
3226}
3227
3228static int mmio_pm_restore_handler(struct intel_gvt *gvt, u32 offset, void *data)
3229{
3230 struct intel_vgpu *vgpu = data;
3231 struct drm_i915_private *dev_priv = gvt->gt->i915;
3232
3233 if (gvt->mmio.mmio_attribute[offset >> 2] & F_PM_SAVE)
3234 intel_uncore_write(uncore: &dev_priv->uncore, _MMIO(offset), vgpu_vreg(vgpu, offset));
3235
3236 return 0;
3237}
3238
3239void intel_gvt_restore_mmio(struct intel_gvt *gvt)
3240{
3241 struct intel_vgpu *vgpu;
3242 int id;
3243
3244 idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
3245 intel_wakeref_t wakeref;
3246
3247 wakeref = mmio_hw_access_pre(gt: gvt->gt);
3248 intel_gvt_for_each_tracked_mmio(gvt, handler: mmio_pm_restore_handler, data: vgpu);
3249 mmio_hw_access_post(gt: gvt->gt, wakeref);
3250 }
3251}
3252

source code of linux/drivers/gpu/drm/i915/gvt/handlers.c