1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2014-2018 Intel Corporation
4 */
5
6#include "i915_drv.h"
7#include "i915_reg.h"
8#include "i915_mmio_range.h"
9#include "intel_context.h"
10#include "intel_engine_pm.h"
11#include "intel_engine_regs.h"
12#include "intel_gpu_commands.h"
13#include "intel_gt.h"
14#include "intel_gt_ccs_mode.h"
15#include "intel_gt_mcr.h"
16#include "intel_gt_print.h"
17#include "intel_gt_regs.h"
18#include "intel_ring.h"
19#include "intel_workarounds.h"
20
21#include "display/intel_fbc_regs.h"
22
23/**
24 * DOC: Hardware workarounds
25 *
26 * Hardware workarounds are register programming documented to be executed in
27 * the driver that fall outside of the normal programming sequences for a
28 * platform. There are some basic categories of workarounds, depending on
29 * how/when they are applied:
30 *
31 * - Context workarounds: workarounds that touch registers that are
32 * saved/restored to/from the HW context image. The list is emitted (via Load
33 * Register Immediate commands) once when initializing the device and saved in
34 * the default context. That default context is then used on every context
35 * creation to have a "primed golden context", i.e. a context image that
36 * already contains the changes needed to all the registers.
37 *
38 * Context workarounds should be implemented in the \*_ctx_workarounds_init()
39 * variants respective to the targeted platforms.
40 *
41 * - Engine workarounds: the list of these WAs is applied whenever the specific
42 * engine is reset. It's also possible that a set of engine classes share a
43 * common power domain and they are reset together. This happens on some
44 * platforms with render and compute engines. In this case (at least) one of
45 * them need to keeep the workaround programming: the approach taken in the
46 * driver is to tie those workarounds to the first compute/render engine that
47 * is registered. When executing with GuC submission, engine resets are
48 * outside of kernel driver control, hence the list of registers involved in
49 * written once, on engine initialization, and then passed to GuC, that
50 * saves/restores their values before/after the reset takes place. See
51 * ``drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c`` for reference.
52 *
53 * Workarounds for registers specific to RCS and CCS should be implemented in
54 * rcs_engine_wa_init() and ccs_engine_wa_init(), respectively; those for
55 * registers belonging to BCS, VCS or VECS should be implemented in
56 * xcs_engine_wa_init(). Workarounds for registers not belonging to a specific
57 * engine's MMIO range but that are part of of the common RCS/CCS reset domain
58 * should be implemented in general_render_compute_wa_init(). The settings
59 * about the CCS load balancing should be added in ccs_engine_wa_mode().
60 *
61 * - GT workarounds: the list of these WAs is applied whenever these registers
62 * revert to their default values: on GPU reset, suspend/resume [1]_, etc.
63 *
64 * GT workarounds should be implemented in the \*_gt_workarounds_init()
65 * variants respective to the targeted platforms.
66 *
67 * - Register whitelist: some workarounds need to be implemented in userspace,
68 * but need to touch privileged registers. The whitelist in the kernel
69 * instructs the hardware to allow the access to happen. From the kernel side,
70 * this is just a special case of a MMIO workaround (as we write the list of
71 * these to/be-whitelisted registers to some special HW registers).
72 *
73 * Register whitelisting should be done in the \*_whitelist_build() variants
74 * respective to the targeted platforms.
75 *
76 * - Workaround batchbuffers: buffers that get executed automatically by the
77 * hardware on every HW context restore. These buffers are created and
78 * programmed in the default context so the hardware always go through those
79 * programming sequences when switching contexts. The support for workaround
80 * batchbuffers is enabled these hardware mechanisms:
81 *
82 * #. INDIRECT_CTX: A batchbuffer and an offset are provided in the default
83 * context, pointing the hardware to jump to that location when that offset
84 * is reached in the context restore. Workaround batchbuffer in the driver
85 * currently uses this mechanism for all platforms.
86 *
87 * #. BB_PER_CTX_PTR: A batchbuffer is provided in the default context,
88 * pointing the hardware to a buffer to continue executing after the
89 * engine registers are restored in a context restore sequence. This is
90 * currently not used in the driver.
91 *
92 * - Other: There are WAs that, due to their nature, cannot be applied from a
93 * central place. Those are peppered around the rest of the code, as needed.
94 * Workarounds related to the display IP are the main example.
95 *
96 * .. [1] Technically, some registers are powercontext saved & restored, so they
97 * survive a suspend/resume. In practice, writing them again is not too
98 * costly and simplifies things, so it's the approach taken in the driver.
99 */
100
101static void wa_init_start(struct i915_wa_list *wal, struct intel_gt *gt,
102 const char *name, const char *engine_name)
103{
104 wal->gt = gt;
105 wal->name = name;
106 wal->engine_name = engine_name;
107}
108
109#define WA_LIST_CHUNK (1 << 4)
110
111static void wa_init_finish(struct i915_wa_list *wal)
112{
113 /* Trim unused entries. */
114 if (!IS_ALIGNED(wal->count, WA_LIST_CHUNK)) {
115 struct i915_wa *list = kmemdup_array(src: wal->list, count: wal->count,
116 element_size: sizeof(*list), GFP_KERNEL);
117
118 if (list) {
119 kfree(objp: wal->list);
120 wal->list = list;
121 }
122 }
123
124 if (!wal->count)
125 return;
126
127 gt_dbg(wal->gt, "Initialized %u %s workarounds on %s\n",
128 wal->wa_count, wal->name, wal->engine_name);
129}
130
131static enum forcewake_domains
132wal_get_fw_for_rmw(struct intel_uncore *uncore, const struct i915_wa_list *wal)
133{
134 enum forcewake_domains fw = 0;
135 struct i915_wa *wa;
136 unsigned int i;
137
138 for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
139 fw |= intel_uncore_forcewake_for_reg(uncore,
140 reg: wa->reg,
141 FW_REG_READ |
142 FW_REG_WRITE);
143
144 return fw;
145}
146
147static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
148{
149 unsigned int addr = i915_mmio_reg_offset(wa->reg);
150 struct drm_i915_private *i915 = wal->gt->i915;
151 unsigned int start = 0, end = wal->count;
152 const unsigned int grow = WA_LIST_CHUNK;
153 struct i915_wa *wa_;
154
155 GEM_BUG_ON(!is_power_of_2(grow));
156
157 if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */
158 struct i915_wa *list;
159
160 list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*list),
161 GFP_KERNEL);
162 if (!list) {
163 drm_err(&i915->drm, "No space for workaround init!\n");
164 return;
165 }
166
167 if (wal->list) {
168 memcpy(list, wal->list, sizeof(*wa) * wal->count);
169 kfree(objp: wal->list);
170 }
171
172 wal->list = list;
173 }
174
175 while (start < end) {
176 unsigned int mid = start + (end - start) / 2;
177
178 if (i915_mmio_reg_offset(wal->list[mid].reg) < addr) {
179 start = mid + 1;
180 } else if (i915_mmio_reg_offset(wal->list[mid].reg) > addr) {
181 end = mid;
182 } else {
183 wa_ = &wal->list[mid];
184
185 if ((wa->clr | wa_->clr) && !(wa->clr & ~wa_->clr)) {
186 drm_err(&i915->drm,
187 "Discarding overwritten w/a for reg %04x (clear: %08x, set: %08x)\n",
188 i915_mmio_reg_offset(wa_->reg),
189 wa_->clr, wa_->set);
190
191 wa_->set &= ~wa->clr;
192 }
193
194 wal->wa_count++;
195 wa_->set |= wa->set;
196 wa_->clr |= wa->clr;
197 wa_->read |= wa->read;
198 return;
199 }
200 }
201
202 wal->wa_count++;
203 wa_ = &wal->list[wal->count++];
204 *wa_ = *wa;
205
206 while (wa_-- > wal->list) {
207 GEM_BUG_ON(i915_mmio_reg_offset(wa_[0].reg) ==
208 i915_mmio_reg_offset(wa_[1].reg));
209 if (i915_mmio_reg_offset(wa_[1].reg) >
210 i915_mmio_reg_offset(wa_[0].reg))
211 break;
212
213 swap(wa_[1], wa_[0]);
214 }
215}
216
217static void wa_add(struct i915_wa_list *wal, i915_reg_t reg,
218 u32 clear, u32 set, u32 read_mask, bool masked_reg)
219{
220 struct i915_wa wa = {
221 .reg = reg,
222 .clr = clear,
223 .set = set,
224 .read = read_mask,
225 .masked_reg = masked_reg,
226 };
227
228 _wa_add(wal, wa: &wa);
229}
230
231static void wa_mcr_add(struct i915_wa_list *wal, i915_mcr_reg_t reg,
232 u32 clear, u32 set, u32 read_mask, bool masked_reg)
233{
234 struct i915_wa wa = {
235 .mcr_reg = reg,
236 .clr = clear,
237 .set = set,
238 .read = read_mask,
239 .masked_reg = masked_reg,
240 .is_mcr = 1,
241 };
242
243 _wa_add(wal, wa: &wa);
244}
245
246static void
247wa_write_clr_set(struct i915_wa_list *wal, i915_reg_t reg, u32 clear, u32 set)
248{
249 wa_add(wal, reg, clear, set, read_mask: clear | set, masked_reg: false);
250}
251
252static void
253wa_mcr_write_clr_set(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 clear, u32 set)
254{
255 wa_mcr_add(wal, reg, clear, set, read_mask: clear | set, masked_reg: false);
256}
257
258static void
259wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
260{
261 wa_write_clr_set(wal, reg, clear: ~0, set);
262}
263
264static void
265wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
266{
267 wa_write_clr_set(wal, reg, clear: set, set);
268}
269
270static void
271wa_mcr_write_or(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 set)
272{
273 wa_mcr_write_clr_set(wal, reg, clear: set, set);
274}
275
276static void
277wa_write_clr(struct i915_wa_list *wal, i915_reg_t reg, u32 clr)
278{
279 wa_write_clr_set(wal, reg, clear: clr, set: 0);
280}
281
282static void
283wa_mcr_write_clr(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 clr)
284{
285 wa_mcr_write_clr_set(wal, reg, clear: clr, set: 0);
286}
287
288/*
289 * WA operations on "masked register". A masked register has the upper 16 bits
290 * documented as "masked" in b-spec. Its purpose is to allow writing to just a
291 * portion of the register without a rmw: you simply write in the upper 16 bits
292 * the mask of bits you are going to modify.
293 *
294 * The wa_masked_* family of functions already does the necessary operations to
295 * calculate the mask based on the parameters passed, so user only has to
296 * provide the lower 16 bits of that register.
297 */
298
299static void
300wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
301{
302 wa_add(wal, reg, clear: 0, _MASKED_BIT_ENABLE(val), read_mask: val, masked_reg: true);
303}
304
305static void
306wa_mcr_masked_en(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 val)
307{
308 wa_mcr_add(wal, reg, clear: 0, _MASKED_BIT_ENABLE(val), read_mask: val, masked_reg: true);
309}
310
311static void
312wa_masked_dis(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
313{
314 wa_add(wal, reg, clear: 0, _MASKED_BIT_DISABLE(val), read_mask: val, masked_reg: true);
315}
316
317static void
318wa_mcr_masked_dis(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 val)
319{
320 wa_mcr_add(wal, reg, clear: 0, _MASKED_BIT_DISABLE(val), read_mask: val, masked_reg: true);
321}
322
323static void
324wa_masked_field_set(struct i915_wa_list *wal, i915_reg_t reg,
325 u32 mask, u32 val)
326{
327 wa_add(wal, reg, clear: 0, _MASKED_FIELD(mask, val), read_mask: mask, masked_reg: true);
328}
329
330static void
331wa_mcr_masked_field_set(struct i915_wa_list *wal, i915_mcr_reg_t reg,
332 u32 mask, u32 val)
333{
334 wa_mcr_add(wal, reg, clear: 0, _MASKED_FIELD(mask, val), read_mask: mask, masked_reg: true);
335}
336
337static void gen6_ctx_workarounds_init(struct intel_engine_cs *engine,
338 struct i915_wa_list *wal)
339{
340 wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
341
342 /* WaDisable_RenderCache_OperationalFlush:snb */
343 wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
344}
345
346static void gen7_ctx_workarounds_init(struct intel_engine_cs *engine,
347 struct i915_wa_list *wal)
348{
349 wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
350 /* WaDisable_RenderCache_OperationalFlush:ivb,vlv,hsw */
351 wa_masked_dis(wal, CACHE_MODE_0_GEN7, RC_OP_FLUSH_ENABLE);
352
353 /*
354 * BSpec says this must be set, even though
355 * WaDisable4x2SubspanOptimization:ivb,hsw
356 * WaDisable4x2SubspanOptimization isn't listed for VLV.
357 */
358 wa_masked_en(wal,
359 CACHE_MODE_1,
360 PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
361}
362
363static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
364 struct i915_wa_list *wal)
365{
366 wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
367
368 /* WaDisableAsyncFlipPerfMode:bdw,chv */
369 wa_masked_en(wal, RING_MI_MODE(RENDER_RING_BASE), ASYNC_FLIP_PERF_DISABLE);
370
371 /* WaDisablePartialInstShootdown:bdw,chv */
372 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN,
373 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
374
375 /* Use Force Non-Coherent whenever executing a 3D context. This is a
376 * workaround for a possible hang in the unlikely event a TLB
377 * invalidation occurs during a PSD flush.
378 */
379 /* WaForceEnableNonCoherent:bdw,chv */
380 /* WaHdcDisableFetchWhenMasked:bdw,chv */
381 wa_masked_en(wal, HDC_CHICKEN0,
382 HDC_DONOT_FETCH_MEM_WHEN_MASKED |
383 HDC_FORCE_NON_COHERENT);
384
385 /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
386 * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
387 * polygons in the same 8x4 pixel/sample area to be processed without
388 * stalling waiting for the earlier ones to write to Hierarchical Z
389 * buffer."
390 *
391 * This optimization is off by default for BDW and CHV; turn it on.
392 */
393 wa_masked_dis(wal, CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
394
395 /* Wa4x4STCOptimizationDisable:bdw,chv */
396 wa_masked_en(wal, CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
397
398 /*
399 * BSpec recommends 8x4 when MSAA is used,
400 * however in practice 16x4 seems fastest.
401 *
402 * Note that PS/WM thread counts depend on the WIZ hashing
403 * disable bit, which we don't touch here, but it's good
404 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
405 */
406 wa_masked_field_set(wal, GEN7_GT_MODE,
407 GEN6_WIZ_HASHING_MASK,
408 GEN6_WIZ_HASHING_16x4);
409}
410
411static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine,
412 struct i915_wa_list *wal)
413{
414 struct drm_i915_private *i915 = engine->i915;
415
416 gen8_ctx_workarounds_init(engine, wal);
417
418 /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
419 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
420
421 /* WaDisableDopClockGating:bdw
422 *
423 * Also see the related UCGTCL1 write in bdw_init_clock_gating()
424 * to disable EUTC clock gating.
425 */
426 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2,
427 DOP_CLOCK_GATING_DISABLE);
428
429 wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN3,
430 GEN8_SAMPLER_POWER_BYPASS_DIS);
431
432 wa_masked_en(wal, HDC_CHICKEN0,
433 /* WaForceContextSaveRestoreNonCoherent:bdw */
434 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
435 /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
436 (INTEL_INFO(i915)->gt == 3 ? HDC_FENCE_DEST_SLM_DISABLE : 0));
437}
438
439static void chv_ctx_workarounds_init(struct intel_engine_cs *engine,
440 struct i915_wa_list *wal)
441{
442 gen8_ctx_workarounds_init(engine, wal);
443
444 /* WaDisableThreadStallDopClockGating:chv */
445 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
446
447 /* Improve HiZ throughput on CHV. */
448 wa_masked_en(wal, HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
449}
450
451static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
452 struct i915_wa_list *wal)
453{
454 struct drm_i915_private *i915 = engine->i915;
455
456 if (HAS_LLC(i915)) {
457 /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
458 *
459 * Must match Display Engine. See
460 * WaCompressedResourceDisplayNewHashMode.
461 */
462 wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
463 GEN9_PBE_COMPRESSED_HASH_SELECTION);
464 wa_mcr_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7,
465 GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR);
466 }
467
468 /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
469 /* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
470 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN,
471 FLOW_CONTROL_ENABLE |
472 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
473
474 /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
475 /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
476 wa_mcr_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7,
477 GEN9_ENABLE_YV12_BUGFIX |
478 GEN9_ENABLE_GPGPU_PREEMPTION);
479
480 /* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk,cfl */
481 /* WaDisablePartialResolveInVc:skl,bxt,kbl,cfl */
482 wa_masked_en(wal, CACHE_MODE_1,
483 GEN8_4x4_STC_OPTIMIZATION_DISABLE |
484 GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE);
485
486 /* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk,cfl */
487 wa_mcr_masked_dis(wal, GEN9_HALF_SLICE_CHICKEN5,
488 GEN9_CCS_TLB_PREFETCH_ENABLE);
489
490 /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */
491 wa_masked_en(wal, HDC_CHICKEN0,
492 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
493 HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
494
495 /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
496 * both tied to WaForceContextSaveRestoreNonCoherent
497 * in some hsds for skl. We keep the tie for all gen9. The
498 * documentation is a bit hazy and so we want to get common behaviour,
499 * even though there is no clear evidence we would need both on kbl/bxt.
500 * This area has been source of system hangs so we play it safe
501 * and mimic the skl regardless of what bspec says.
502 *
503 * Use Force Non-Coherent whenever executing a 3D context. This
504 * is a workaround for a possible hang in the unlikely event
505 * a TLB invalidation occurs during a PSD flush.
506 */
507
508 /* WaForceEnableNonCoherent:skl,bxt,kbl,cfl */
509 wa_masked_en(wal, HDC_CHICKEN0,
510 HDC_FORCE_NON_COHERENT);
511
512 /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
513 if (IS_SKYLAKE(i915) ||
514 IS_KABYLAKE(i915) ||
515 IS_COFFEELAKE(i915) ||
516 IS_COMETLAKE(i915))
517 wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN3,
518 GEN8_SAMPLER_POWER_BYPASS_DIS);
519
520 /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */
521 wa_mcr_masked_en(wal, HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
522
523 /*
524 * Supporting preemption with fine-granularity requires changes in the
525 * batch buffer programming. Since we can't break old userspace, we
526 * need to set our default preemption level to safe value. Userspace is
527 * still able to use more fine-grained preemption levels, since in
528 * WaEnablePreemptionGranularityControlByUMD we're whitelisting the
529 * per-ctx register. As such, WaDisable{3D,GPGPU}MidCmdPreemption are
530 * not real HW workarounds, but merely a way to start using preemption
531 * while maintaining old contract with userspace.
532 */
533
534 /* WaDisable3DMidCmdPreemption:skl,bxt,glk,cfl,[cnl] */
535 wa_masked_dis(wal, GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
536
537 /* WaDisableGPGPUMidCmdPreemption:skl,bxt,blk,cfl,[cnl] */
538 wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
539 GEN9_PREEMPT_GPGPU_LEVEL_MASK,
540 GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
541
542 /* WaClearHIZ_WM_CHICKEN3:bxt,glk */
543 if (IS_GEN9_LP(i915))
544 wa_masked_en(wal, GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
545}
546
547static void skl_tune_iz_hashing(struct intel_engine_cs *engine,
548 struct i915_wa_list *wal)
549{
550 struct intel_gt *gt = engine->gt;
551 u8 vals[3] = { 0, 0, 0 };
552 unsigned int i;
553
554 for (i = 0; i < 3; i++) {
555 u8 ss;
556
557 /*
558 * Only consider slices where one, and only one, subslice has 7
559 * EUs
560 */
561 if (!is_power_of_2(n: gt->info.sseu.subslice_7eu[i]))
562 continue;
563
564 /*
565 * subslice_7eu[i] != 0 (because of the check above) and
566 * ss_max == 4 (maximum number of subslices possible per slice)
567 *
568 * -> 0 <= ss <= 3;
569 */
570 ss = ffs(gt->info.sseu.subslice_7eu[i]) - 1;
571 vals[i] = 3 - ss;
572 }
573
574 if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
575 return;
576
577 /* Tune IZ hashing. See intel_device_info_runtime_init() */
578 wa_masked_field_set(wal, GEN7_GT_MODE,
579 GEN9_IZ_HASHING_MASK(2) |
580 GEN9_IZ_HASHING_MASK(1) |
581 GEN9_IZ_HASHING_MASK(0),
582 GEN9_IZ_HASHING(2, vals[2]) |
583 GEN9_IZ_HASHING(1, vals[1]) |
584 GEN9_IZ_HASHING(0, vals[0]));
585}
586
587static void skl_ctx_workarounds_init(struct intel_engine_cs *engine,
588 struct i915_wa_list *wal)
589{
590 gen9_ctx_workarounds_init(engine, wal);
591 skl_tune_iz_hashing(engine, wal);
592}
593
594static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine,
595 struct i915_wa_list *wal)
596{
597 gen9_ctx_workarounds_init(engine, wal);
598
599 /* WaDisableThreadStallDopClockGating:bxt */
600 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN,
601 STALL_DOP_GATING_DISABLE);
602
603 /* WaToEnableHwFixForPushConstHWBug:bxt */
604 wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
605 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
606}
607
608static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine,
609 struct i915_wa_list *wal)
610{
611 struct drm_i915_private *i915 = engine->i915;
612
613 gen9_ctx_workarounds_init(engine, wal);
614
615 /* WaToEnableHwFixForPushConstHWBug:kbl */
616 if (IS_KABYLAKE(i915) && IS_GRAPHICS_STEP(i915, STEP_C0, STEP_FOREVER))
617 wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
618 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
619
620 /* WaDisableSbeCacheDispatchPortSharing:kbl */
621 wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN1,
622 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
623}
624
625static void glk_ctx_workarounds_init(struct intel_engine_cs *engine,
626 struct i915_wa_list *wal)
627{
628 gen9_ctx_workarounds_init(engine, wal);
629
630 /* WaToEnableHwFixForPushConstHWBug:glk */
631 wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
632 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
633}
634
635static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine,
636 struct i915_wa_list *wal)
637{
638 gen9_ctx_workarounds_init(engine, wal);
639
640 /* WaToEnableHwFixForPushConstHWBug:cfl */
641 wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
642 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
643
644 /* WaDisableSbeCacheDispatchPortSharing:cfl */
645 wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN1,
646 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
647}
648
649static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
650 struct i915_wa_list *wal)
651{
652 struct drm_i915_private *i915 = engine->i915;
653
654 /* Wa_1406697149 (WaDisableBankHangMode:icl) */
655 wa_write(wal, GEN8_L3CNTLREG, GEN8_ERRDETBCTRL);
656
657 /* WaForceEnableNonCoherent:icl
658 * This is not the same workaround as in early Gen9 platforms, where
659 * lacking this could cause system hangs, but coherency performance
660 * overhead is high and only a few compute workloads really need it
661 * (the register is whitelisted in hardware now, so UMDs can opt in
662 * for coherency if they have a good reason).
663 */
664 wa_mcr_masked_en(wal, ICL_HDC_MODE, HDC_FORCE_NON_COHERENT);
665
666 /* WaEnableFloatBlendOptimization:icl */
667 wa_mcr_add(wal, GEN10_CACHE_MODE_SS, clear: 0,
668 _MASKED_BIT_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE),
669 read_mask: 0 /* write-only, so skip validation */,
670 masked_reg: true);
671
672 /* WaDisableGPGPUMidThreadPreemption:icl */
673 wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
674 GEN9_PREEMPT_GPGPU_LEVEL_MASK,
675 GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
676
677 /* allow headerless messages for preemptible GPGPU context */
678 wa_mcr_masked_en(wal, GEN10_SAMPLER_MODE,
679 GEN11_SAMPLER_ENABLE_HEADLESS_MSG);
680
681 /* Wa_1604278689:icl,ehl */
682 wa_write(wal, IVB_FBC_RT_BASE, set: 0xFFFFFFFF & ~ILK_FBC_RT_VALID);
683 wa_write_clr_set(wal, IVB_FBC_RT_BASE_UPPER,
684 clear: 0,
685 set: 0xFFFFFFFF);
686
687 /* Wa_1406306137:icl,ehl */
688 wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, GEN11_DIS_PICK_2ND_EU);
689
690 if (IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) {
691 /*
692 * Disable Repacking for Compression (masked R/W access)
693 * before rendering compressed surfaces for display.
694 */
695 wa_masked_en(wal, CACHE_MODE_0_GEN7,
696 DISABLE_REPACKING_FOR_COMPRESSION);
697 }
698}
699
700/*
701 * These settings aren't actually workarounds, but general tuning settings that
702 * need to be programmed on dg2 platform.
703 */
704static void dg2_ctx_gt_tuning_init(struct intel_engine_cs *engine,
705 struct i915_wa_list *wal)
706{
707 wa_mcr_masked_en(wal, CHICKEN_RASTER_2, TBIMR_FAST_CLIP);
708 wa_mcr_write_clr_set(wal, XEHP_L3SQCREG5, L3_PWM_TIMER_INIT_VAL_MASK,
709 REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f));
710 wa_mcr_write_clr_set(wal, XEHP_FF_MODE2, FF_MODE2_TDS_TIMER_MASK,
711 FF_MODE2_TDS_TIMER_128);
712}
713
714static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
715 struct i915_wa_list *wal)
716{
717 struct drm_i915_private *i915 = engine->i915;
718
719 /*
720 * Wa_1409142259:tgl,dg1,adl-p,adl-n
721 * Wa_1409347922:tgl,dg1,adl-p
722 * Wa_1409252684:tgl,dg1,adl-p
723 * Wa_1409217633:tgl,dg1,adl-p
724 * Wa_1409207793:tgl,dg1,adl-p
725 * Wa_1409178076:tgl,dg1,adl-p,adl-n
726 * Wa_1408979724:tgl,dg1,adl-p,adl-n
727 * Wa_14010443199:tgl,rkl,dg1,adl-p,adl-n
728 * Wa_14010698770:tgl,rkl,dg1,adl-s,adl-p,adl-n
729 * Wa_1409342910:tgl,rkl,dg1,adl-s,adl-p,adl-n
730 * Wa_22010465259:tgl,rkl,dg1,adl-s,adl-p,adl-n
731 */
732 wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3,
733 GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
734
735 /* WaDisableGPGPUMidThreadPreemption:gen12 */
736 wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
737 GEN9_PREEMPT_GPGPU_LEVEL_MASK,
738 GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
739
740 /*
741 * Wa_16011163337 - GS_TIMER
742 *
743 * TDS_TIMER: Although some platforms refer to it as Wa_1604555607, we
744 * need to program it even on those that don't explicitly list that
745 * workaround.
746 *
747 * Note that the programming of GEN12_FF_MODE2 is further modified
748 * according to the FF_MODE2 guidance given by Wa_1608008084.
749 * Wa_1608008084 tells us the FF_MODE2 register will return the wrong
750 * value when read from the CPU.
751 *
752 * The default value for this register is zero for all fields.
753 * So instead of doing a RMW we should just write the desired values
754 * for TDS and GS timers. Note that since the readback can't be trusted,
755 * the clear mask is just set to ~0 to make sure other bits are not
756 * inadvertently set. For the same reason read verification is ignored.
757 */
758 wa_add(wal,
759 GEN12_FF_MODE2,
760 clear: ~0,
761 FF_MODE2_TDS_TIMER_128 | FF_MODE2_GS_TIMER_224,
762 read_mask: 0, masked_reg: false);
763
764 if (!IS_DG1(i915)) {
765 /* Wa_1806527549 */
766 wa_masked_en(wal, HIZ_CHICKEN, HZ_DEPTH_TEST_LE_GE_OPT_DISABLE);
767
768 /* Wa_1606376872 */
769 wa_masked_en(wal, COMMON_SLICE_CHICKEN4, DISABLE_TDC_LOAD_BALANCING_CALC);
770 }
771
772 /*
773 * This bit must be set to enable performance optimization for fast
774 * clears.
775 */
776 wa_mcr_write_or(wal, GEN8_WM_CHICKEN2, WAIT_ON_DEPTH_STALL_DONE_DISABLE);
777}
778
779static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine,
780 struct i915_wa_list *wal)
781{
782 gen12_ctx_workarounds_init(engine, wal);
783
784 /* Wa_1409044764 */
785 wa_masked_dis(wal, GEN11_COMMON_SLICE_CHICKEN3,
786 DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN);
787
788 /* Wa_22010493298 */
789 wa_masked_en(wal, HIZ_CHICKEN,
790 DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE);
791}
792
793static void dg2_ctx_workarounds_init(struct intel_engine_cs *engine,
794 struct i915_wa_list *wal)
795{
796 dg2_ctx_gt_tuning_init(engine, wal);
797
798 /* Wa_16013271637:dg2 */
799 wa_mcr_masked_en(wal, XEHP_SLICE_COMMON_ECO_CHICKEN1,
800 MSC_MSAA_REODER_BUF_BYPASS_DISABLE);
801
802 /* Wa_14014947963:dg2 */
803 wa_masked_field_set(wal, VF_PREEMPTION, PREEMPTION_VERTEX_COUNT, val: 0x4000);
804
805 /* Wa_18018764978:dg2 */
806 wa_mcr_masked_en(wal, XEHP_PSS_MODE2, SCOREBOARD_STALL_FLUSH_CONTROL);
807
808 /* Wa_18019271663:dg2 */
809 wa_masked_en(wal, CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE);
810
811 /* Wa_14019877138:dg2 */
812 wa_mcr_masked_en(wal, XEHP_PSS_CHICKEN, FD_END_COLLECT);
813}
814
815static void xelpg_ctx_gt_tuning_init(struct intel_engine_cs *engine,
816 struct i915_wa_list *wal)
817{
818 struct intel_gt *gt = engine->gt;
819
820 dg2_ctx_gt_tuning_init(engine, wal);
821
822 /*
823 * Due to Wa_16014892111, the DRAW_WATERMARK tuning must be done in
824 * gen12_emit_indirect_ctx_rcs() rather than here on some early
825 * steppings.
826 */
827 if (!(IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
828 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0)))
829 wa_add(wal, DRAW_WATERMARK, VERT_WM_VAL, set: 0x3FF, read_mask: 0, masked_reg: false);
830}
831
832static void xelpg_ctx_workarounds_init(struct intel_engine_cs *engine,
833 struct i915_wa_list *wal)
834{
835 struct intel_gt *gt = engine->gt;
836
837 xelpg_ctx_gt_tuning_init(engine, wal);
838
839 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
840 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0)) {
841 /* Wa_14014947963 */
842 wa_masked_field_set(wal, VF_PREEMPTION,
843 PREEMPTION_VERTEX_COUNT, val: 0x4000);
844
845 /* Wa_16013271637 */
846 wa_mcr_masked_en(wal, XEHP_SLICE_COMMON_ECO_CHICKEN1,
847 MSC_MSAA_REODER_BUF_BYPASS_DISABLE);
848
849 /* Wa_18019627453 */
850 wa_mcr_masked_en(wal, VFLSKPD, VF_PREFETCH_TLB_DIS);
851
852 /* Wa_18018764978 */
853 wa_mcr_masked_en(wal, XEHP_PSS_MODE2, SCOREBOARD_STALL_FLUSH_CONTROL);
854 }
855
856 /* Wa_18019271663 */
857 wa_masked_en(wal, CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE);
858
859 /* Wa_14019877138 */
860 wa_mcr_masked_en(wal, XEHP_PSS_CHICKEN, FD_END_COLLECT);
861}
862
863static void fakewa_disable_nestedbb_mode(struct intel_engine_cs *engine,
864 struct i915_wa_list *wal)
865{
866 /*
867 * This is a "fake" workaround defined by software to ensure we
868 * maintain reliable, backward-compatible behavior for userspace with
869 * regards to how nested MI_BATCH_BUFFER_START commands are handled.
870 *
871 * The per-context setting of MI_MODE[12] determines whether the bits
872 * of a nested MI_BATCH_BUFFER_START instruction should be interpreted
873 * in the traditional manner or whether they should instead use a new
874 * tgl+ meaning that breaks backward compatibility, but allows nesting
875 * into 3rd-level batchbuffers. When this new capability was first
876 * added in TGL, it remained off by default unless a context
877 * intentionally opted in to the new behavior. However Xe_HPG now
878 * flips this on by default and requires that we explicitly opt out if
879 * we don't want the new behavior.
880 *
881 * From a SW perspective, we want to maintain the backward-compatible
882 * behavior for userspace, so we'll apply a fake workaround to set it
883 * back to the legacy behavior on platforms where the hardware default
884 * is to break compatibility. At the moment there is no Linux
885 * userspace that utilizes third-level batchbuffers, so this will avoid
886 * userspace from needing to make any changes. using the legacy
887 * meaning is the correct thing to do. If/when we have userspace
888 * consumers that want to utilize third-level batch nesting, we can
889 * provide a context parameter to allow them to opt-in.
890 */
891 wa_masked_dis(wal, RING_MI_MODE(engine->mmio_base), TGL_NESTED_BB_EN);
892}
893
894static void gen12_ctx_gt_mocs_init(struct intel_engine_cs *engine,
895 struct i915_wa_list *wal)
896{
897 u8 mocs;
898
899 /*
900 * Some blitter commands do not have a field for MOCS, those
901 * commands will use MOCS index pointed by BLIT_CCTL.
902 * BLIT_CCTL registers are needed to be programmed to un-cached.
903 */
904 if (engine->class == COPY_ENGINE_CLASS) {
905 mocs = engine->gt->mocs.uc_index;
906 wa_write_clr_set(wal,
907 BLIT_CCTL(engine->mmio_base),
908 BLIT_CCTL_MASK,
909 BLIT_CCTL_MOCS(mocs, mocs));
910 }
911}
912
913/*
914 * gen12_ctx_gt_fake_wa_init() aren't programmingan official workaround
915 * defined by the hardware team, but it programming general context registers.
916 * Adding those context register programming in context workaround
917 * allow us to use the wa framework for proper application and validation.
918 */
919static void
920gen12_ctx_gt_fake_wa_init(struct intel_engine_cs *engine,
921 struct i915_wa_list *wal)
922{
923 if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
924 fakewa_disable_nestedbb_mode(engine, wal);
925
926 gen12_ctx_gt_mocs_init(engine, wal);
927}
928
929static void
930__intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
931 struct i915_wa_list *wal,
932 const char *name)
933{
934 struct drm_i915_private *i915 = engine->i915;
935
936 wa_init_start(wal, gt: engine->gt, name, engine_name: engine->name);
937
938 /* Applies to all engines */
939 /*
940 * Fake workarounds are not the actual workaround but
941 * programming of context registers using workaround framework.
942 */
943 if (GRAPHICS_VER(i915) >= 12)
944 gen12_ctx_gt_fake_wa_init(engine, wal);
945
946 if (engine->class != RENDER_CLASS)
947 goto done;
948
949 if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 74)))
950 xelpg_ctx_workarounds_init(engine, wal);
951 else if (IS_DG2(i915))
952 dg2_ctx_workarounds_init(engine, wal);
953 else if (IS_DG1(i915))
954 dg1_ctx_workarounds_init(engine, wal);
955 else if (GRAPHICS_VER(i915) == 12)
956 gen12_ctx_workarounds_init(engine, wal);
957 else if (GRAPHICS_VER(i915) == 11)
958 icl_ctx_workarounds_init(engine, wal);
959 else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
960 cfl_ctx_workarounds_init(engine, wal);
961 else if (IS_GEMINILAKE(i915))
962 glk_ctx_workarounds_init(engine, wal);
963 else if (IS_KABYLAKE(i915))
964 kbl_ctx_workarounds_init(engine, wal);
965 else if (IS_BROXTON(i915))
966 bxt_ctx_workarounds_init(engine, wal);
967 else if (IS_SKYLAKE(i915))
968 skl_ctx_workarounds_init(engine, wal);
969 else if (IS_CHERRYVIEW(i915))
970 chv_ctx_workarounds_init(engine, wal);
971 else if (IS_BROADWELL(i915))
972 bdw_ctx_workarounds_init(engine, wal);
973 else if (GRAPHICS_VER(i915) == 7)
974 gen7_ctx_workarounds_init(engine, wal);
975 else if (GRAPHICS_VER(i915) == 6)
976 gen6_ctx_workarounds_init(engine, wal);
977 else if (GRAPHICS_VER(i915) < 8)
978 ;
979 else
980 MISSING_CASE(GRAPHICS_VER(i915));
981
982done:
983 wa_init_finish(wal);
984}
985
986void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
987{
988 __intel_engine_init_ctx_wa(engine, wal: &engine->ctx_wa_list, name: "context");
989}
990
991int intel_engine_emit_ctx_wa(struct i915_request *rq)
992{
993 struct i915_wa_list *wal = &rq->engine->ctx_wa_list;
994 struct intel_uncore *uncore = rq->engine->uncore;
995 enum forcewake_domains fw;
996 unsigned long flags;
997 struct i915_wa *wa;
998 unsigned int i;
999 u32 *cs;
1000 int ret;
1001
1002 if (wal->count == 0)
1003 return 0;
1004
1005 ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
1006 if (ret)
1007 return ret;
1008
1009 if ((IS_GFX_GT_IP_RANGE(rq->engine->gt, IP_VER(12, 70), IP_VER(12, 74)) ||
1010 IS_DG2(rq->i915)) && rq->engine->class == RENDER_CLASS)
1011 cs = intel_ring_begin(rq, num_dwords: (wal->count * 2 + 6));
1012 else
1013 cs = intel_ring_begin(rq, num_dwords: (wal->count * 2 + 2));
1014
1015 if (IS_ERR(ptr: cs))
1016 return PTR_ERR(ptr: cs);
1017
1018 fw = wal_get_fw_for_rmw(uncore, wal);
1019
1020 intel_gt_mcr_lock(gt: wal->gt, flags: &flags);
1021 spin_lock(lock: &uncore->lock);
1022 intel_uncore_forcewake_get__locked(uncore, domains: fw);
1023
1024 *cs++ = MI_LOAD_REGISTER_IMM(wal->count);
1025 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
1026 u32 val;
1027
1028 /* Skip reading the register if it's not really needed */
1029 if (wa->masked_reg || (wa->clr | wa->set) == U32_MAX) {
1030 val = wa->set;
1031 } else {
1032 val = wa->is_mcr ?
1033 intel_gt_mcr_read_any_fw(gt: wal->gt, reg: wa->mcr_reg) :
1034 intel_uncore_read_fw(uncore, wa->reg);
1035 val &= ~wa->clr;
1036 val |= wa->set;
1037 }
1038
1039 *cs++ = i915_mmio_reg_offset(wa->reg);
1040 *cs++ = val;
1041 }
1042 *cs++ = MI_NOOP;
1043
1044 /* Wa_14019789679 */
1045 if ((IS_GFX_GT_IP_RANGE(rq->engine->gt, IP_VER(12, 70), IP_VER(12, 74)) ||
1046 IS_DG2(rq->i915)) && rq->engine->class == RENDER_CLASS) {
1047 *cs++ = CMD_3DSTATE_MESH_CONTROL;
1048 *cs++ = 0;
1049 *cs++ = 0;
1050 *cs++ = MI_NOOP;
1051 }
1052
1053 intel_uncore_forcewake_put__locked(uncore, domains: fw);
1054 spin_unlock(lock: &uncore->lock);
1055 intel_gt_mcr_unlock(gt: wal->gt, flags);
1056
1057 intel_ring_advance(rq, cs);
1058
1059 ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
1060 if (ret)
1061 return ret;
1062
1063 return 0;
1064}
1065
1066static void
1067gen4_gt_workarounds_init(struct intel_gt *gt,
1068 struct i915_wa_list *wal)
1069{
1070 /* WaDisable_RenderCache_OperationalFlush:gen4,ilk */
1071 wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
1072}
1073
1074static void
1075g4x_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1076{
1077 gen4_gt_workarounds_init(gt, wal);
1078
1079 /* WaDisableRenderCachePipelinedFlush:g4x,ilk */
1080 wa_masked_en(wal, CACHE_MODE_0, CM0_PIPELINED_RENDER_FLUSH_DISABLE);
1081}
1082
1083static void
1084ilk_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1085{
1086 g4x_gt_workarounds_init(gt, wal);
1087
1088 wa_masked_en(wal, _3D_CHICKEN2, _3D_CHICKEN2_WM_READ_PIPELINED);
1089}
1090
1091static void
1092snb_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1093{
1094}
1095
1096static void
1097ivb_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1098{
1099 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
1100 wa_masked_dis(wal,
1101 GEN7_COMMON_SLICE_CHICKEN1,
1102 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
1103
1104 /* WaApplyL3ControlAndL3ChickenMode:ivb */
1105 wa_write(wal, GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL);
1106 wa_write(wal, GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
1107
1108 /* WaForceL3Serialization:ivb */
1109 wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE);
1110}
1111
1112static void
1113vlv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1114{
1115 /* WaForceL3Serialization:vlv */
1116 wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE);
1117
1118 /*
1119 * WaIncreaseL3CreditsForVLVB0:vlv
1120 * This is the hardware default actually.
1121 */
1122 wa_write(wal, GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
1123}
1124
1125static void
1126hsw_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1127{
1128 /* L3 caching of data atomics doesn't work -- disable it. */
1129 wa_write(wal, HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
1130
1131 wa_add(wal,
1132 HSW_ROW_CHICKEN3, clear: 0,
1133 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE),
1134 read_mask: 0 /* XXX does this reg exist? */, masked_reg: true);
1135
1136 /* WaVSRefCountFullforceMissDisable:hsw */
1137 wa_write_clr(wal, GEN7_FF_THREAD_MODE, GEN7_FF_VS_REF_CNT_FFME);
1138}
1139
1140static void
1141gen9_wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
1142{
1143 const struct sseu_dev_info *sseu = &to_gt(i915)->info.sseu;
1144 unsigned int slice, subslice;
1145 u32 mcr, mcr_mask;
1146
1147 GEM_BUG_ON(GRAPHICS_VER(i915) != 9);
1148
1149 /*
1150 * WaProgramMgsrForCorrectSliceSpecificMmioReads:gen9,glk,kbl,cml
1151 * Before any MMIO read into slice/subslice specific registers, MCR
1152 * packet control register needs to be programmed to point to any
1153 * enabled s/ss pair. Otherwise, incorrect values will be returned.
1154 * This means each subsequent MMIO read will be forwarded to an
1155 * specific s/ss combination, but this is OK since these registers
1156 * are consistent across s/ss in almost all cases. In the rare
1157 * occasions, such as INSTDONE, where this value is dependent
1158 * on s/ss combo, the read should be done with read_subslice_reg.
1159 */
1160 slice = ffs(sseu->slice_mask) - 1;
1161 GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask.hsw));
1162 subslice = ffs(intel_sseu_get_hsw_subslices(sseu, slice));
1163 GEM_BUG_ON(!subslice);
1164 subslice--;
1165
1166 /*
1167 * We use GEN8_MCR..() macros to calculate the |mcr| value for
1168 * Gen9 to address WaProgramMgsrForCorrectSliceSpecificMmioReads
1169 */
1170 mcr = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
1171 mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
1172
1173 drm_dbg(&i915->drm, "MCR slice:%d/subslice:%d = %x\n", slice, subslice, mcr);
1174
1175 wa_write_clr_set(wal, GEN8_MCR_SELECTOR, clear: mcr_mask, set: mcr);
1176}
1177
1178static void
1179gen9_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1180{
1181 struct drm_i915_private *i915 = gt->i915;
1182
1183 /* WaProgramMgsrForCorrectSliceSpecificMmioReads:glk,kbl,cml,gen9 */
1184 gen9_wa_init_mcr(i915, wal);
1185
1186 /* WaDisableKillLogic:bxt,skl,kbl */
1187 if (!IS_COFFEELAKE(i915) && !IS_COMETLAKE(i915))
1188 wa_write_or(wal,
1189 GAM_ECOCHK,
1190 ECOCHK_DIS_TLB);
1191
1192 if (HAS_LLC(i915)) {
1193 /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
1194 *
1195 * Must match Display Engine. See
1196 * WaCompressedResourceDisplayNewHashMode.
1197 */
1198 wa_write_or(wal,
1199 MMCD_MISC_CTRL,
1200 MMCD_PCLA | MMCD_HOTSPOT_EN);
1201 }
1202
1203 /* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
1204 wa_write_or(wal,
1205 GAM_ECOCHK,
1206 BDW_DISABLE_HDC_INVALIDATION);
1207}
1208
1209static void
1210skl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1211{
1212 gen9_gt_workarounds_init(gt, wal);
1213
1214 /* WaDisableGafsUnitClkGating:skl */
1215 wa_write_or(wal,
1216 GEN7_UCGCTL4,
1217 GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1218
1219 /* WaInPlaceDecompressionHang:skl */
1220 if (IS_SKYLAKE(gt->i915) && IS_GRAPHICS_STEP(gt->i915, STEP_A0, STEP_H0))
1221 wa_write_or(wal,
1222 GEN9_GAMT_ECO_REG_RW_IA,
1223 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1224}
1225
1226static void
1227kbl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1228{
1229 gen9_gt_workarounds_init(gt, wal);
1230
1231 /* WaDisableDynamicCreditSharing:kbl */
1232 if (IS_KABYLAKE(gt->i915) && IS_GRAPHICS_STEP(gt->i915, 0, STEP_C0))
1233 wa_write_or(wal,
1234 GAMT_CHKN_BIT_REG,
1235 GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
1236
1237 /* WaDisableGafsUnitClkGating:kbl */
1238 wa_write_or(wal,
1239 GEN7_UCGCTL4,
1240 GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1241
1242 /* WaInPlaceDecompressionHang:kbl */
1243 wa_write_or(wal,
1244 GEN9_GAMT_ECO_REG_RW_IA,
1245 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1246}
1247
1248static void
1249glk_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1250{
1251 gen9_gt_workarounds_init(gt, wal);
1252}
1253
1254static void
1255cfl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1256{
1257 gen9_gt_workarounds_init(gt, wal);
1258
1259 /* WaDisableGafsUnitClkGating:cfl */
1260 wa_write_or(wal,
1261 GEN7_UCGCTL4,
1262 GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1263
1264 /* WaInPlaceDecompressionHang:cfl */
1265 wa_write_or(wal,
1266 GEN9_GAMT_ECO_REG_RW_IA,
1267 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1268}
1269
1270static void __set_mcr_steering(struct i915_wa_list *wal,
1271 i915_reg_t steering_reg,
1272 unsigned int slice, unsigned int subslice)
1273{
1274 u32 mcr, mcr_mask;
1275
1276 mcr = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
1277 mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
1278
1279 wa_write_clr_set(wal, reg: steering_reg, clear: mcr_mask, set: mcr);
1280}
1281
1282static void debug_dump_steering(struct intel_gt *gt)
1283{
1284 struct drm_printer p = drm_dbg_printer(drm: &gt->i915->drm, category: DRM_UT_DRIVER,
1285 prefix: "MCR Steering:");
1286
1287 if (drm_debug_enabled(DRM_UT_DRIVER))
1288 intel_gt_mcr_report_steering(p: &p, gt, dump_table: false);
1289}
1290
1291static void __add_mcr_wa(struct intel_gt *gt, struct i915_wa_list *wal,
1292 unsigned int slice, unsigned int subslice)
1293{
1294 __set_mcr_steering(wal, GEN8_MCR_SELECTOR, slice, subslice);
1295
1296 gt->default_steering.groupid = slice;
1297 gt->default_steering.instanceid = subslice;
1298
1299 debug_dump_steering(gt);
1300}
1301
1302static void
1303icl_wa_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
1304{
1305 const struct sseu_dev_info *sseu = &gt->info.sseu;
1306 unsigned int subslice;
1307
1308 GEM_BUG_ON(GRAPHICS_VER(gt->i915) < 11);
1309 GEM_BUG_ON(hweight8(sseu->slice_mask) > 1);
1310
1311 /*
1312 * Although a platform may have subslices, we need to always steer
1313 * reads to the lowest instance that isn't fused off. When Render
1314 * Power Gating is enabled, grabbing forcewake will only power up a
1315 * single subslice (the "minconfig") if there isn't a real workload
1316 * that needs to be run; this means that if we steer register reads to
1317 * one of the higher subslices, we run the risk of reading back 0's or
1318 * random garbage.
1319 */
1320 subslice = __ffs(intel_sseu_get_hsw_subslices(sseu, 0));
1321
1322 /*
1323 * If the subslice we picked above also steers us to a valid L3 bank,
1324 * then we can just rely on the default steering and won't need to
1325 * worry about explicitly re-steering L3BANK reads later.
1326 */
1327 if (gt->info.l3bank_mask & BIT(subslice))
1328 gt->steering_table[L3BANK] = NULL;
1329
1330 __add_mcr_wa(gt, wal, slice: 0, subslice);
1331}
1332
1333static void
1334xehp_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
1335{
1336 const struct sseu_dev_info *sseu = &gt->info.sseu;
1337 unsigned long slice, subslice = 0, slice_mask = 0;
1338 u32 lncf_mask = 0;
1339 int i;
1340
1341 /*
1342 * On Xe_HP the steering increases in complexity. There are now several
1343 * more units that require steering and we're not guaranteed to be able
1344 * to find a common setting for all of them. These are:
1345 * - GSLICE (fusable)
1346 * - DSS (sub-unit within gslice; fusable)
1347 * - L3 Bank (fusable)
1348 * - MSLICE (fusable)
1349 * - LNCF (sub-unit within mslice; always present if mslice is present)
1350 *
1351 * We'll do our default/implicit steering based on GSLICE (in the
1352 * sliceid field) and DSS (in the subsliceid field). If we can
1353 * find overlap between the valid MSLICE and/or LNCF values with
1354 * a suitable GSLICE, then we can just reuse the default value and
1355 * skip and explicit steering at runtime.
1356 *
1357 * We only need to look for overlap between GSLICE/MSLICE/LNCF to find
1358 * a valid sliceid value. DSS steering is the only type of steering
1359 * that utilizes the 'subsliceid' bits.
1360 *
1361 * Also note that, even though the steering domain is called "GSlice"
1362 * and it is encoded in the register using the gslice format, the spec
1363 * says that the combined (geometry | compute) fuse should be used to
1364 * select the steering.
1365 */
1366
1367 /* Find the potential gslice candidates */
1368 slice_mask = intel_slicemask_from_xehp_dssmask(dss_mask: sseu->subslice_mask,
1369 GEN_DSS_PER_GSLICE);
1370
1371 /*
1372 * Find the potential LNCF candidates. Either LNCF within a valid
1373 * mslice is fine.
1374 */
1375 for_each_set_bit(i, &gt->info.mslice_mask, GEN12_MAX_MSLICES)
1376 lncf_mask |= (0x3 << (i * 2));
1377
1378 /*
1379 * Are there any sliceid values that work for both GSLICE and LNCF
1380 * steering?
1381 */
1382 if (slice_mask & lncf_mask) {
1383 slice_mask &= lncf_mask;
1384 gt->steering_table[LNCF] = NULL;
1385 }
1386
1387 /* How about sliceid values that also work for MSLICE steering? */
1388 if (slice_mask & gt->info.mslice_mask) {
1389 slice_mask &= gt->info.mslice_mask;
1390 gt->steering_table[MSLICE] = NULL;
1391 }
1392
1393 slice = __ffs(slice_mask);
1394 subslice = intel_sseu_find_first_xehp_dss(sseu, GEN_DSS_PER_GSLICE, groupnum: slice) %
1395 GEN_DSS_PER_GSLICE;
1396
1397 __add_mcr_wa(gt, wal, slice, subslice);
1398
1399 /*
1400 * SQIDI ranges are special because they use different steering
1401 * registers than everything else we work with. On XeHP SDV and
1402 * DG2-G10, any value in the steering registers will work fine since
1403 * all instances are present, but DG2-G11 only has SQIDI instances at
1404 * ID's 2 and 3, so we need to steer to one of those. For simplicity
1405 * we'll just steer to a hardcoded "2" since that value will work
1406 * everywhere.
1407 */
1408 __set_mcr_steering(wal, MCFG_MCR_SELECTOR, slice: 0, subslice: 2);
1409 __set_mcr_steering(wal, SF_MCR_SELECTOR, slice: 0, subslice: 2);
1410
1411 /*
1412 * On DG2, GAM registers have a dedicated steering control register
1413 * and must always be programmed to a hardcoded groupid of "1."
1414 */
1415 if (IS_DG2(gt->i915))
1416 __set_mcr_steering(wal, GAM_MCR_SELECTOR, slice: 1, subslice: 0);
1417}
1418
1419static void
1420icl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1421{
1422 struct drm_i915_private *i915 = gt->i915;
1423
1424 icl_wa_init_mcr(gt, wal);
1425
1426 /* WaModifyGamTlbPartitioning:icl */
1427 wa_write_clr_set(wal,
1428 GEN11_GACB_PERF_CTRL,
1429 GEN11_HASH_CTRL_MASK,
1430 GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
1431
1432 /* Wa_1405766107:icl
1433 * Formerly known as WaCL2SFHalfMaxAlloc
1434 */
1435 wa_write_or(wal,
1436 GEN11_LSN_UNSLCVC,
1437 GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
1438 GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
1439
1440 /* Wa_220166154:icl
1441 * Formerly known as WaDisCtxReload
1442 */
1443 wa_write_or(wal,
1444 GEN8_GAMW_ECO_DEV_RW_IA,
1445 GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
1446
1447 /* Wa_1406463099:icl
1448 * Formerly known as WaGamTlbPendError
1449 */
1450 wa_write_or(wal,
1451 GAMT_CHKN_BIT_REG,
1452 GAMT_CHKN_DISABLE_L3_COH_PIPE);
1453
1454 /*
1455 * Wa_1408615072:icl,ehl (vsunit)
1456 * Wa_1407596294:icl,ehl (hsunit)
1457 */
1458 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
1459 VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
1460
1461 /* Wa_1407352427:icl,ehl */
1462 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
1463 PSDUNIT_CLKGATE_DIS);
1464
1465 /* Wa_1406680159:icl,ehl */
1466 wa_mcr_write_or(wal,
1467 GEN11_SUBSLICE_UNIT_LEVEL_CLKGATE,
1468 GWUNIT_CLKGATE_DIS);
1469
1470 /* Wa_1607087056:icl,ehl,jsl */
1471 if (IS_ICELAKE(i915) ||
1472 ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
1473 IS_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)))
1474 wa_write_or(wal,
1475 GEN11_SLICE_UNIT_LEVEL_CLKGATE,
1476 L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
1477
1478 /*
1479 * This is not a documented workaround, but rather an optimization
1480 * to reduce sampler power.
1481 */
1482 wa_mcr_write_clr(wal, GEN10_DFR_RATIO_EN_AND_CHICKEN, DFR_DISABLE);
1483}
1484
1485/*
1486 * Though there are per-engine instances of these registers,
1487 * they retain their value through engine resets and should
1488 * only be provided on the GT workaround list rather than
1489 * the engine-specific workaround list.
1490 */
1491static void
1492wa_14011060649(struct intel_gt *gt, struct i915_wa_list *wal)
1493{
1494 struct intel_engine_cs *engine;
1495 int id;
1496
1497 for_each_engine(engine, gt, id) {
1498 if (engine->class != VIDEO_DECODE_CLASS ||
1499 (engine->instance % 2))
1500 continue;
1501
1502 wa_write_or(wal, VDBOX_CGCTL3F10(engine->mmio_base),
1503 IECPUNIT_CLKGATE_DIS);
1504 }
1505}
1506
1507static void
1508gen12_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1509{
1510 icl_wa_init_mcr(gt, wal);
1511
1512 /* Wa_14011060649:tgl,rkl,dg1,adl-s,adl-p */
1513 wa_14011060649(gt, wal);
1514
1515 /* Wa_14011059788:tgl,rkl,adl-s,dg1,adl-p */
1516 wa_mcr_write_or(wal, GEN10_DFR_RATIO_EN_AND_CHICKEN, DFR_DISABLE);
1517
1518 /*
1519 * Wa_14015795083
1520 *
1521 * Firmware on some gen12 platforms locks the MISCCPCTL register,
1522 * preventing i915 from modifying it for this workaround. Skip the
1523 * readback verification for this workaround on debug builds; if the
1524 * workaround doesn't stick due to firmware behavior, it's not an error
1525 * that we want CI to flag.
1526 */
1527 wa_add(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE,
1528 set: 0, read_mask: 0, masked_reg: false);
1529}
1530
1531static void
1532dg1_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1533{
1534 gen12_gt_workarounds_init(gt, wal);
1535
1536 /* Wa_1409420604:dg1 */
1537 wa_mcr_write_or(wal, SUBSLICE_UNIT_LEVEL_CLKGATE2,
1538 CPSSUNIT_CLKGATE_DIS);
1539
1540 /* Wa_1408615072:dg1 */
1541 /* Empirical testing shows this register is unaffected by engine reset. */
1542 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2, VSUNIT_CLKGATE_DIS_TGL);
1543}
1544
1545static void
1546dg2_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1547{
1548 xehp_init_mcr(gt, wal);
1549
1550 /* Wa_14011060649:dg2 */
1551 wa_14011060649(gt, wal);
1552
1553 if (IS_DG2_G10(gt->i915)) {
1554 /* Wa_22010523718:dg2 */
1555 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
1556 CG3DDISCFEG_CLKGATE_DIS);
1557
1558 /* Wa_14011006942:dg2 */
1559 wa_mcr_write_or(wal, GEN11_SUBSLICE_UNIT_LEVEL_CLKGATE,
1560 DSS_ROUTER_CLKGATE_DIS);
1561 }
1562
1563 /* Wa_14014830051:dg2 */
1564 wa_mcr_write_clr(wal, SARB_CHICKEN1, COMP_CKN_IN);
1565
1566 /*
1567 * Wa_14015795083
1568 * Skip verification for possibly locked register.
1569 */
1570 wa_add(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE,
1571 set: 0, read_mask: 0, masked_reg: false);
1572
1573 /* Wa_18018781329 */
1574 wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
1575 wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
1576 wa_mcr_write_or(wal, XEHP_VDBX_MOD_CTRL, FORCE_MISS_FTLB);
1577 wa_mcr_write_or(wal, XEHP_VEBX_MOD_CTRL, FORCE_MISS_FTLB);
1578
1579 /* Wa_1509235366:dg2 */
1580 wa_mcr_write_or(wal, XEHP_GAMCNTRL_CTRL,
1581 INVALIDATION_BROADCAST_MODE_DIS | GLOBAL_INVALIDATION_MODE);
1582
1583 /* Wa_14010648519:dg2 */
1584 wa_mcr_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE);
1585}
1586
1587static void
1588xelpg_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1589{
1590 /* Wa_14018575942 / Wa_18018781329 */
1591 wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
1592 wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
1593
1594 /* Wa_22016670082 */
1595 wa_write_or(wal, GEN12_SQCNT1, GEN12_STRICT_RAR_ENABLE);
1596
1597 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
1598 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0)) {
1599 /* Wa_14014830051 */
1600 wa_mcr_write_clr(wal, SARB_CHICKEN1, COMP_CKN_IN);
1601
1602 /* Wa_14015795083 */
1603 wa_write_clr(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE);
1604 }
1605
1606 /*
1607 * Unlike older platforms, we no longer setup implicit steering here;
1608 * all MCR accesses are explicitly steered.
1609 */
1610 debug_dump_steering(gt);
1611}
1612
1613static void
1614wa_16021867713(struct intel_gt *gt, struct i915_wa_list *wal)
1615{
1616 struct intel_engine_cs *engine;
1617 int id;
1618
1619 for_each_engine(engine, gt, id)
1620 if (engine->class == VIDEO_DECODE_CLASS)
1621 wa_write_or(wal, VDBOX_CGCTL3F1C(engine->mmio_base),
1622 MFXPIPE_CLKGATE_DIS);
1623}
1624
1625static void
1626xelpmp_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1627{
1628 wa_16021867713(gt, wal);
1629
1630 /*
1631 * Wa_14018778641
1632 * Wa_18018781329
1633 *
1634 * Note that although these registers are MCR on the primary
1635 * GT, the media GT's versions are regular singleton registers.
1636 */
1637 wa_write_or(wal, XELPMP_GSC_MOD_CTRL, FORCE_MISS_FTLB);
1638
1639 /*
1640 * Wa_14018575942
1641 *
1642 * Issue is seen on media KPI test running on VDBOX engine
1643 * especially VP9 encoding WLs
1644 */
1645 wa_write_or(wal, XELPMP_VDBX_MOD_CTRL, FORCE_MISS_FTLB);
1646
1647 /* Wa_22016670082 */
1648 wa_write_or(wal, GEN12_SQCNT1, GEN12_STRICT_RAR_ENABLE);
1649
1650 debug_dump_steering(gt);
1651}
1652
1653/*
1654 * The bspec performance guide has recommended MMIO tuning settings. These
1655 * aren't truly "workarounds" but we want to program them through the
1656 * workaround infrastructure to make sure they're (re)applied at the proper
1657 * times.
1658 *
1659 * The programming in this function is for settings that persist through
1660 * engine resets and also are not part of any engine's register state context.
1661 * I.e., settings that only need to be re-applied in the event of a full GT
1662 * reset.
1663 */
1664static void gt_tuning_settings(struct intel_gt *gt, struct i915_wa_list *wal)
1665{
1666 if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74))) {
1667 wa_mcr_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
1668 wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS);
1669 }
1670
1671 if (IS_DG2(gt->i915)) {
1672 wa_mcr_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
1673 wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS);
1674 }
1675}
1676
1677static void
1678gt_init_workarounds(struct intel_gt *gt, struct i915_wa_list *wal)
1679{
1680 struct drm_i915_private *i915 = gt->i915;
1681
1682 gt_tuning_settings(gt, wal);
1683
1684 if (gt->type == GT_MEDIA) {
1685 if (MEDIA_VER_FULL(i915) == IP_VER(13, 0))
1686 xelpmp_gt_workarounds_init(gt, wal);
1687 else
1688 MISSING_CASE(MEDIA_VER_FULL(i915));
1689
1690 return;
1691 }
1692
1693 if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)))
1694 xelpg_gt_workarounds_init(gt, wal);
1695 else if (IS_DG2(i915))
1696 dg2_gt_workarounds_init(gt, wal);
1697 else if (IS_DG1(i915))
1698 dg1_gt_workarounds_init(gt, wal);
1699 else if (GRAPHICS_VER(i915) == 12)
1700 gen12_gt_workarounds_init(gt, wal);
1701 else if (GRAPHICS_VER(i915) == 11)
1702 icl_gt_workarounds_init(gt, wal);
1703 else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
1704 cfl_gt_workarounds_init(gt, wal);
1705 else if (IS_GEMINILAKE(i915))
1706 glk_gt_workarounds_init(gt, wal);
1707 else if (IS_KABYLAKE(i915))
1708 kbl_gt_workarounds_init(gt, wal);
1709 else if (IS_BROXTON(i915))
1710 gen9_gt_workarounds_init(gt, wal);
1711 else if (IS_SKYLAKE(i915))
1712 skl_gt_workarounds_init(gt, wal);
1713 else if (IS_HASWELL(i915))
1714 hsw_gt_workarounds_init(gt, wal);
1715 else if (IS_VALLEYVIEW(i915))
1716 vlv_gt_workarounds_init(gt, wal);
1717 else if (IS_IVYBRIDGE(i915))
1718 ivb_gt_workarounds_init(gt, wal);
1719 else if (GRAPHICS_VER(i915) == 6)
1720 snb_gt_workarounds_init(gt, wal);
1721 else if (GRAPHICS_VER(i915) == 5)
1722 ilk_gt_workarounds_init(gt, wal);
1723 else if (IS_G4X(i915))
1724 g4x_gt_workarounds_init(gt, wal);
1725 else if (GRAPHICS_VER(i915) == 4)
1726 gen4_gt_workarounds_init(gt, wal);
1727 else if (GRAPHICS_VER(i915) <= 8)
1728 ;
1729 else
1730 MISSING_CASE(GRAPHICS_VER(i915));
1731}
1732
1733void intel_gt_init_workarounds(struct intel_gt *gt)
1734{
1735 struct i915_wa_list *wal = &gt->wa_list;
1736
1737 wa_init_start(wal, gt, name: "GT", engine_name: "global");
1738 gt_init_workarounds(gt, wal);
1739 wa_init_finish(wal);
1740}
1741
1742static bool
1743wa_verify(struct intel_gt *gt, const struct i915_wa *wa, u32 cur,
1744 const char *name, const char *from)
1745{
1746 if ((cur ^ wa->set) & wa->read) {
1747 gt_err(gt,
1748 "%s workaround lost on %s! (reg[%x]=0x%x, relevant bits were 0x%x vs expected 0x%x)\n",
1749 name, from, i915_mmio_reg_offset(wa->reg),
1750 cur, cur & wa->read, wa->set & wa->read);
1751
1752 return false;
1753 }
1754
1755 return true;
1756}
1757
1758static void wa_list_apply(const struct i915_wa_list *wal)
1759{
1760 struct intel_gt *gt = wal->gt;
1761 struct intel_uncore *uncore = gt->uncore;
1762 enum forcewake_domains fw;
1763 unsigned long flags;
1764 struct i915_wa *wa;
1765 unsigned int i;
1766
1767 if (!wal->count)
1768 return;
1769
1770 fw = wal_get_fw_for_rmw(uncore, wal);
1771
1772 intel_gt_mcr_lock(gt, flags: &flags);
1773 spin_lock(lock: &uncore->lock);
1774 intel_uncore_forcewake_get__locked(uncore, domains: fw);
1775
1776 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
1777 u32 val, old = 0;
1778
1779 /* open-coded rmw due to steering */
1780 if (wa->clr)
1781 old = wa->is_mcr ?
1782 intel_gt_mcr_read_any_fw(gt, reg: wa->mcr_reg) :
1783 intel_uncore_read_fw(uncore, wa->reg);
1784 val = (old & ~wa->clr) | wa->set;
1785 if (val != old || !wa->clr) {
1786 if (wa->is_mcr)
1787 intel_gt_mcr_multicast_write_fw(gt, reg: wa->mcr_reg, value: val);
1788 else
1789 intel_uncore_write_fw(uncore, wa->reg, val);
1790 }
1791
1792 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
1793 u32 val = wa->is_mcr ?
1794 intel_gt_mcr_read_any_fw(gt, reg: wa->mcr_reg) :
1795 intel_uncore_read_fw(uncore, wa->reg);
1796
1797 wa_verify(gt, wa, cur: val, name: wal->name, from: "application");
1798 }
1799 }
1800
1801 intel_uncore_forcewake_put__locked(uncore, domains: fw);
1802 spin_unlock(lock: &uncore->lock);
1803 intel_gt_mcr_unlock(gt, flags);
1804}
1805
1806void intel_gt_apply_workarounds(struct intel_gt *gt)
1807{
1808 wa_list_apply(wal: &gt->wa_list);
1809}
1810
1811static bool wa_list_verify(struct intel_gt *gt,
1812 const struct i915_wa_list *wal,
1813 const char *from)
1814{
1815 struct intel_uncore *uncore = gt->uncore;
1816 struct i915_wa *wa;
1817 enum forcewake_domains fw;
1818 unsigned long flags;
1819 unsigned int i;
1820 bool ok = true;
1821
1822 fw = wal_get_fw_for_rmw(uncore, wal);
1823
1824 intel_gt_mcr_lock(gt, flags: &flags);
1825 spin_lock(lock: &uncore->lock);
1826 intel_uncore_forcewake_get__locked(uncore, domains: fw);
1827
1828 for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
1829 ok &= wa_verify(gt: wal->gt, wa, cur: wa->is_mcr ?
1830 intel_gt_mcr_read_any_fw(gt, reg: wa->mcr_reg) :
1831 intel_uncore_read_fw(uncore, wa->reg),
1832 name: wal->name, from);
1833
1834 intel_uncore_forcewake_put__locked(uncore, domains: fw);
1835 spin_unlock(lock: &uncore->lock);
1836 intel_gt_mcr_unlock(gt, flags);
1837
1838 return ok;
1839}
1840
1841bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from)
1842{
1843 return wa_list_verify(gt, wal: &gt->wa_list, from);
1844}
1845
1846__maybe_unused
1847static bool is_nonpriv_flags_valid(u32 flags)
1848{
1849 /* Check only valid flag bits are set */
1850 if (flags & ~RING_FORCE_TO_NONPRIV_MASK_VALID)
1851 return false;
1852
1853 /* NB: Only 3 out of 4 enum values are valid for access field */
1854 if ((flags & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
1855 RING_FORCE_TO_NONPRIV_ACCESS_INVALID)
1856 return false;
1857
1858 return true;
1859}
1860
1861static void
1862whitelist_reg_ext(struct i915_wa_list *wal, i915_reg_t reg, u32 flags)
1863{
1864 struct i915_wa wa = {
1865 .reg = reg
1866 };
1867
1868 if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS))
1869 return;
1870
1871 if (GEM_DEBUG_WARN_ON(!is_nonpriv_flags_valid(flags)))
1872 return;
1873
1874 wa.reg.reg |= flags;
1875 _wa_add(wal, wa: &wa);
1876}
1877
1878static void
1879whitelist_mcr_reg_ext(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 flags)
1880{
1881 struct i915_wa wa = {
1882 .mcr_reg = reg,
1883 .is_mcr = 1,
1884 };
1885
1886 if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS))
1887 return;
1888
1889 if (GEM_DEBUG_WARN_ON(!is_nonpriv_flags_valid(flags)))
1890 return;
1891
1892 wa.mcr_reg.reg |= flags;
1893 _wa_add(wal, wa: &wa);
1894}
1895
1896static void
1897whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg)
1898{
1899 whitelist_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_ACCESS_RW);
1900}
1901
1902static void
1903whitelist_mcr_reg(struct i915_wa_list *wal, i915_mcr_reg_t reg)
1904{
1905 whitelist_mcr_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_ACCESS_RW);
1906}
1907
1908static void gen9_whitelist_build(struct i915_wa_list *w)
1909{
1910 /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
1911 whitelist_reg(wal: w, GEN9_CTX_PREEMPT_REG);
1912
1913 /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
1914 whitelist_reg(wal: w, GEN8_CS_CHICKEN1);
1915
1916 /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
1917 whitelist_reg(wal: w, GEN8_HDC_CHICKEN1);
1918
1919 /* WaSendPushConstantsFromMMIO:skl,bxt */
1920 whitelist_reg(wal: w, COMMON_SLICE_CHICKEN2);
1921}
1922
1923static void skl_whitelist_build(struct intel_engine_cs *engine)
1924{
1925 struct i915_wa_list *w = &engine->whitelist;
1926
1927 if (engine->class != RENDER_CLASS)
1928 return;
1929
1930 gen9_whitelist_build(w);
1931
1932 /* WaDisableLSQCROPERFforOCL:skl */
1933 whitelist_mcr_reg(wal: w, GEN8_L3SQCREG4);
1934}
1935
1936static void bxt_whitelist_build(struct intel_engine_cs *engine)
1937{
1938 if (engine->class != RENDER_CLASS)
1939 return;
1940
1941 gen9_whitelist_build(w: &engine->whitelist);
1942}
1943
1944static void kbl_whitelist_build(struct intel_engine_cs *engine)
1945{
1946 struct i915_wa_list *w = &engine->whitelist;
1947
1948 if (engine->class != RENDER_CLASS)
1949 return;
1950
1951 gen9_whitelist_build(w);
1952
1953 /* WaDisableLSQCROPERFforOCL:kbl */
1954 whitelist_mcr_reg(wal: w, GEN8_L3SQCREG4);
1955}
1956
1957static void glk_whitelist_build(struct intel_engine_cs *engine)
1958{
1959 struct i915_wa_list *w = &engine->whitelist;
1960
1961 if (engine->class != RENDER_CLASS)
1962 return;
1963
1964 gen9_whitelist_build(w);
1965
1966 /* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
1967 whitelist_reg(wal: w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
1968}
1969
1970static void cfl_whitelist_build(struct intel_engine_cs *engine)
1971{
1972 struct i915_wa_list *w = &engine->whitelist;
1973
1974 if (engine->class != RENDER_CLASS)
1975 return;
1976
1977 gen9_whitelist_build(w);
1978
1979 /*
1980 * WaAllowPMDepthAndInvocationCountAccessFromUMD:cfl,whl,cml,aml
1981 *
1982 * This covers 4 register which are next to one another :
1983 * - PS_INVOCATION_COUNT
1984 * - PS_INVOCATION_COUNT_UDW
1985 * - PS_DEPTH_COUNT
1986 * - PS_DEPTH_COUNT_UDW
1987 */
1988 whitelist_reg_ext(wal: w, PS_INVOCATION_COUNT,
1989 RING_FORCE_TO_NONPRIV_ACCESS_RD |
1990 RING_FORCE_TO_NONPRIV_RANGE_4);
1991}
1992
1993static void allow_read_ctx_timestamp(struct intel_engine_cs *engine)
1994{
1995 struct i915_wa_list *w = &engine->whitelist;
1996
1997 if (engine->class != RENDER_CLASS)
1998 whitelist_reg_ext(wal: w,
1999 RING_CTX_TIMESTAMP(engine->mmio_base),
2000 RING_FORCE_TO_NONPRIV_ACCESS_RD);
2001}
2002
2003static void cml_whitelist_build(struct intel_engine_cs *engine)
2004{
2005 allow_read_ctx_timestamp(engine);
2006
2007 cfl_whitelist_build(engine);
2008}
2009
2010static void icl_whitelist_build(struct intel_engine_cs *engine)
2011{
2012 struct i915_wa_list *w = &engine->whitelist;
2013
2014 allow_read_ctx_timestamp(engine);
2015
2016 switch (engine->class) {
2017 case RENDER_CLASS:
2018 /* WaAllowUMDToModifyHalfSliceChicken7:icl */
2019 whitelist_mcr_reg(wal: w, GEN9_HALF_SLICE_CHICKEN7);
2020
2021 /* WaAllowUMDToModifySamplerMode:icl */
2022 whitelist_mcr_reg(wal: w, GEN10_SAMPLER_MODE);
2023
2024 /* WaEnableStateCacheRedirectToCS:icl */
2025 whitelist_reg(wal: w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
2026
2027 /*
2028 * WaAllowPMDepthAndInvocationCountAccessFromUMD:icl
2029 *
2030 * This covers 4 register which are next to one another :
2031 * - PS_INVOCATION_COUNT
2032 * - PS_INVOCATION_COUNT_UDW
2033 * - PS_DEPTH_COUNT
2034 * - PS_DEPTH_COUNT_UDW
2035 */
2036 whitelist_reg_ext(wal: w, PS_INVOCATION_COUNT,
2037 RING_FORCE_TO_NONPRIV_ACCESS_RD |
2038 RING_FORCE_TO_NONPRIV_RANGE_4);
2039 break;
2040
2041 case VIDEO_DECODE_CLASS:
2042 /* hucStatusRegOffset */
2043 whitelist_reg_ext(wal: w, _MMIO(0x2000 + engine->mmio_base),
2044 RING_FORCE_TO_NONPRIV_ACCESS_RD);
2045 /* hucUKernelHdrInfoRegOffset */
2046 whitelist_reg_ext(wal: w, _MMIO(0x2014 + engine->mmio_base),
2047 RING_FORCE_TO_NONPRIV_ACCESS_RD);
2048 /* hucStatus2RegOffset */
2049 whitelist_reg_ext(wal: w, _MMIO(0x23B0 + engine->mmio_base),
2050 RING_FORCE_TO_NONPRIV_ACCESS_RD);
2051 break;
2052
2053 default:
2054 break;
2055 }
2056}
2057
2058static void tgl_whitelist_build(struct intel_engine_cs *engine)
2059{
2060 struct i915_wa_list *w = &engine->whitelist;
2061
2062 allow_read_ctx_timestamp(engine);
2063
2064 switch (engine->class) {
2065 case RENDER_CLASS:
2066 /*
2067 * WaAllowPMDepthAndInvocationCountAccessFromUMD:tgl
2068 * Wa_1408556865:tgl
2069 *
2070 * This covers 4 registers which are next to one another :
2071 * - PS_INVOCATION_COUNT
2072 * - PS_INVOCATION_COUNT_UDW
2073 * - PS_DEPTH_COUNT
2074 * - PS_DEPTH_COUNT_UDW
2075 */
2076 whitelist_reg_ext(wal: w, PS_INVOCATION_COUNT,
2077 RING_FORCE_TO_NONPRIV_ACCESS_RD |
2078 RING_FORCE_TO_NONPRIV_RANGE_4);
2079
2080 /*
2081 * Wa_1808121037:tgl
2082 * Wa_14012131227:dg1
2083 * Wa_1508744258:tgl,rkl,dg1,adl-s,adl-p
2084 */
2085 whitelist_reg(wal: w, GEN7_COMMON_SLICE_CHICKEN1);
2086
2087 /* Wa_1806527549:tgl */
2088 whitelist_reg(wal: w, HIZ_CHICKEN);
2089
2090 /* Required by recommended tuning setting (not a workaround) */
2091 whitelist_reg(wal: w, GEN11_COMMON_SLICE_CHICKEN3);
2092
2093 break;
2094 default:
2095 break;
2096 }
2097}
2098
2099static void dg2_whitelist_build(struct intel_engine_cs *engine)
2100{
2101 struct i915_wa_list *w = &engine->whitelist;
2102
2103 switch (engine->class) {
2104 case RENDER_CLASS:
2105 /* Required by recommended tuning setting (not a workaround) */
2106 whitelist_mcr_reg(wal: w, XEHP_COMMON_SLICE_CHICKEN3);
2107 whitelist_reg(wal: w, GEN7_COMMON_SLICE_CHICKEN1);
2108 break;
2109 default:
2110 break;
2111 }
2112}
2113
2114static void xelpg_whitelist_build(struct intel_engine_cs *engine)
2115{
2116 struct i915_wa_list *w = &engine->whitelist;
2117
2118 switch (engine->class) {
2119 case RENDER_CLASS:
2120 /* Required by recommended tuning setting (not a workaround) */
2121 whitelist_mcr_reg(wal: w, XEHP_COMMON_SLICE_CHICKEN3);
2122 whitelist_reg(wal: w, GEN7_COMMON_SLICE_CHICKEN1);
2123 break;
2124 default:
2125 break;
2126 }
2127}
2128
2129void intel_engine_init_whitelist(struct intel_engine_cs *engine)
2130{
2131 struct drm_i915_private *i915 = engine->i915;
2132 struct i915_wa_list *w = &engine->whitelist;
2133
2134 wa_init_start(wal: w, gt: engine->gt, name: "whitelist", engine_name: engine->name);
2135
2136 if (engine->gt->type == GT_MEDIA)
2137 ; /* none yet */
2138 else if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 74)))
2139 xelpg_whitelist_build(engine);
2140 else if (IS_DG2(i915))
2141 dg2_whitelist_build(engine);
2142 else if (GRAPHICS_VER(i915) == 12)
2143 tgl_whitelist_build(engine);
2144 else if (GRAPHICS_VER(i915) == 11)
2145 icl_whitelist_build(engine);
2146 else if (IS_COMETLAKE(i915))
2147 cml_whitelist_build(engine);
2148 else if (IS_COFFEELAKE(i915))
2149 cfl_whitelist_build(engine);
2150 else if (IS_GEMINILAKE(i915))
2151 glk_whitelist_build(engine);
2152 else if (IS_KABYLAKE(i915))
2153 kbl_whitelist_build(engine);
2154 else if (IS_BROXTON(i915))
2155 bxt_whitelist_build(engine);
2156 else if (IS_SKYLAKE(i915))
2157 skl_whitelist_build(engine);
2158 else if (GRAPHICS_VER(i915) <= 8)
2159 ;
2160 else
2161 MISSING_CASE(GRAPHICS_VER(i915));
2162
2163 wa_init_finish(wal: w);
2164}
2165
2166void intel_engine_apply_whitelist(struct intel_engine_cs *engine)
2167{
2168 const struct i915_wa_list *wal = &engine->whitelist;
2169 struct intel_uncore *uncore = engine->uncore;
2170 const u32 base = engine->mmio_base;
2171 struct i915_wa *wa;
2172 unsigned int i;
2173
2174 if (!wal->count)
2175 return;
2176
2177 for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
2178 intel_uncore_write(uncore,
2179 RING_FORCE_TO_NONPRIV(base, i),
2180 i915_mmio_reg_offset(wa->reg));
2181
2182 /* And clear the rest just in case of garbage */
2183 for (; i < RING_MAX_NONPRIV_SLOTS; i++)
2184 intel_uncore_write(uncore,
2185 RING_FORCE_TO_NONPRIV(base, i),
2186 i915_mmio_reg_offset(RING_NOPID(base)));
2187}
2188
2189/*
2190 * engine_fake_wa_init(), a place holder to program the registers
2191 * which are not part of an official workaround defined by the
2192 * hardware team.
2193 * Adding programming of those register inside workaround will
2194 * allow utilizing wa framework to proper application and verification.
2195 */
2196static void
2197engine_fake_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2198{
2199 u8 mocs_w, mocs_r;
2200
2201 /*
2202 * RING_CMD_CCTL specifies the default MOCS entry that will be used
2203 * by the command streamer when executing commands that don't have
2204 * a way to explicitly specify a MOCS setting. The default should
2205 * usually reference whichever MOCS entry corresponds to uncached
2206 * behavior, although use of a WB cached entry is recommended by the
2207 * spec in certain circumstances on specific platforms.
2208 */
2209 if (GRAPHICS_VER(engine->i915) >= 12) {
2210 mocs_r = engine->gt->mocs.uc_index;
2211 mocs_w = engine->gt->mocs.uc_index;
2212
2213 if (HAS_L3_CCS_READ(engine->i915) &&
2214 engine->class == COMPUTE_CLASS) {
2215 mocs_r = engine->gt->mocs.wb_index;
2216
2217 /*
2218 * Even on the few platforms where MOCS 0 is a
2219 * legitimate table entry, it's never the correct
2220 * setting to use here; we can assume the MOCS init
2221 * just forgot to initialize wb_index.
2222 */
2223 drm_WARN_ON(&engine->i915->drm, mocs_r == 0);
2224 }
2225
2226 wa_masked_field_set(wal,
2227 RING_CMD_CCTL(engine->mmio_base),
2228 CMD_CCTL_MOCS_MASK,
2229 CMD_CCTL_MOCS_OVERRIDE(mocs_w, mocs_r));
2230 }
2231}
2232
2233static void
2234rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2235{
2236 struct drm_i915_private *i915 = engine->i915;
2237 struct intel_gt *gt = engine->gt;
2238
2239 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
2240 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0)) {
2241 /* Wa_22014600077 */
2242 wa_mcr_masked_en(wal, GEN10_CACHE_MODE_SS,
2243 ENABLE_EU_COUNT_FOR_TDL_FLUSH);
2244 }
2245
2246 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
2247 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0) ||
2248 IS_DG2(i915)) {
2249 /* Wa_1509727124 */
2250 wa_mcr_masked_en(wal, GEN10_SAMPLER_MODE,
2251 SC_DISABLE_POWER_OPTIMIZATION_EBB);
2252 }
2253
2254 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
2255 IS_DG2(i915)) {
2256 /* Wa_22012856258 */
2257 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2,
2258 GEN12_DISABLE_READ_SUPPRESSION);
2259 }
2260
2261 if (IS_DG2(i915)) {
2262 /*
2263 * Wa_22010960976:dg2
2264 * Wa_14013347512:dg2
2265 */
2266 wa_mcr_masked_dis(wal, XEHP_HDC_CHICKEN0,
2267 LSC_L1_FLUSH_CTL_3D_DATAPORT_FLUSH_EVENTS_MASK);
2268 }
2269
2270 if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71)) ||
2271 IS_DG2(i915)) {
2272 /* Wa_14015150844 */
2273 wa_mcr_add(wal, XEHP_HDC_CHICKEN0, clear: 0,
2274 _MASKED_BIT_ENABLE(DIS_ATOMIC_CHAINING_TYPED_WRITES),
2275 read_mask: 0, masked_reg: true);
2276 }
2277
2278 if (IS_DG2(i915) || IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) ||
2279 IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
2280 /*
2281 * Wa_1606700617:tgl,dg1,adl-p
2282 * Wa_22010271021:tgl,rkl,dg1,adl-s,adl-p
2283 * Wa_14010826681:tgl,dg1,rkl,adl-p
2284 * Wa_18019627453:dg2
2285 */
2286 wa_masked_en(wal,
2287 GEN9_CS_DEBUG_MODE1,
2288 FF_DOP_CLOCK_GATE_DISABLE);
2289 }
2290
2291 if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) || IS_DG1(i915) ||
2292 IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
2293 /* Wa_1606931601:tgl,rkl,dg1,adl-s,adl-p */
2294 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2, GEN12_DISABLE_EARLY_READ);
2295
2296 /*
2297 * Wa_1407928979:tgl A*
2298 * Wa_18011464164:tgl[B0+],dg1[B0+]
2299 * Wa_22010931296:tgl[B0+],dg1[B0+]
2300 * Wa_14010919138:rkl,dg1,adl-s,adl-p
2301 */
2302 wa_write_or(wal, GEN7_FF_THREAD_MODE,
2303 GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
2304
2305 /* Wa_1406941453:tgl,rkl,dg1,adl-s,adl-p */
2306 wa_mcr_masked_en(wal,
2307 GEN10_SAMPLER_MODE,
2308 ENABLE_SMALLPL);
2309 }
2310
2311 if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) ||
2312 IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
2313 /* Wa_1409804808 */
2314 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2,
2315 GEN12_PUSH_CONST_DEREF_HOLD_DIS);
2316
2317 /* Wa_14010229206 */
2318 wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
2319 }
2320
2321 if (IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915) || IS_ALDERLAKE_P(i915)) {
2322 /*
2323 * Wa_1607297627
2324 *
2325 * On TGL and RKL there are multiple entries for this WA in the
2326 * BSpec; some indicate this is an A0-only WA, others indicate
2327 * it applies to all steppings so we trust the "all steppings."
2328 */
2329 wa_masked_en(wal,
2330 RING_PSMI_CTL(RENDER_RING_BASE),
2331 GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE |
2332 GEN8_RC_SEMA_IDLE_MSG_DISABLE);
2333 }
2334
2335 if (GRAPHICS_VER(i915) == 11) {
2336 /* This is not an Wa. Enable for better image quality */
2337 wa_masked_en(wal,
2338 _3D_CHICKEN3,
2339 _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE);
2340
2341 /*
2342 * Wa_1405543622:icl
2343 * Formerly known as WaGAPZPriorityScheme
2344 */
2345 wa_write_or(wal,
2346 GEN8_GARBCNTL,
2347 GEN11_ARBITRATION_PRIO_ORDER_MASK);
2348
2349 /*
2350 * Wa_1604223664:icl
2351 * Formerly known as WaL3BankAddressHashing
2352 */
2353 wa_write_clr_set(wal,
2354 GEN8_GARBCNTL,
2355 GEN11_HASH_CTRL_EXCL_MASK,
2356 GEN11_HASH_CTRL_EXCL_BIT0);
2357 wa_write_clr_set(wal,
2358 GEN11_GLBLINVL,
2359 GEN11_BANK_HASH_ADDR_EXCL_MASK,
2360 GEN11_BANK_HASH_ADDR_EXCL_BIT0);
2361
2362 /*
2363 * Wa_1405733216:icl
2364 * Formerly known as WaDisableCleanEvicts
2365 */
2366 wa_mcr_write_or(wal,
2367 GEN8_L3SQCREG4,
2368 GEN11_LQSC_CLEAN_EVICT_DISABLE);
2369
2370 /* Wa_1606682166:icl */
2371 wa_write_or(wal,
2372 GEN7_SARCHKMD,
2373 GEN7_DISABLE_SAMPLER_PREFETCH);
2374
2375 /* Wa_1409178092:icl */
2376 wa_mcr_write_clr_set(wal,
2377 GEN11_SCRATCH2,
2378 GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE,
2379 set: 0);
2380
2381 /* WaEnable32PlaneMode:icl */
2382 wa_masked_en(wal, GEN9_CSFE_CHICKEN1_RCS,
2383 GEN11_ENABLE_32_PLANE_MODE);
2384
2385 /*
2386 * Wa_1408767742:icl[a2..forever],ehl[all]
2387 * Wa_1605460711:icl[a0..c0]
2388 */
2389 wa_write_or(wal,
2390 GEN7_FF_THREAD_MODE,
2391 GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
2392
2393 /* Wa_22010271021 */
2394 wa_masked_en(wal,
2395 GEN9_CS_DEBUG_MODE1,
2396 FF_DOP_CLOCK_GATE_DISABLE);
2397 }
2398
2399 /*
2400 * Intel platforms that support fine-grained preemption (i.e., gen9 and
2401 * beyond) allow the kernel-mode driver to choose between two different
2402 * options for controlling preemption granularity and behavior.
2403 *
2404 * Option 1 (hardware default):
2405 * Preemption settings are controlled in a global manner via
2406 * kernel-only register CS_DEBUG_MODE1 (0x20EC). Any granularity
2407 * and settings chosen by the kernel-mode driver will apply to all
2408 * userspace clients.
2409 *
2410 * Option 2:
2411 * Preemption settings are controlled on a per-context basis via
2412 * register CS_CHICKEN1 (0x2580). CS_CHICKEN1 is saved/restored on
2413 * context switch and is writable by userspace (e.g., via
2414 * MI_LOAD_REGISTER_IMMEDIATE instructions placed in a batch buffer)
2415 * which allows different userspace drivers/clients to select
2416 * different settings, or to change those settings on the fly in
2417 * response to runtime needs. This option was known by name
2418 * "FtrPerCtxtPreemptionGranularityControl" at one time, although
2419 * that name is somewhat misleading as other non-granularity
2420 * preemption settings are also impacted by this decision.
2421 *
2422 * On Linux, our policy has always been to let userspace drivers
2423 * control preemption granularity/settings (Option 2). This was
2424 * originally mandatory on gen9 to prevent ABI breakage (old gen9
2425 * userspace developed before object-level preemption was enabled would
2426 * not behave well if i915 were to go with Option 1 and enable that
2427 * preemption in a global manner). On gen9 each context would have
2428 * object-level preemption disabled by default (see
2429 * WaDisable3DMidCmdPreemption in gen9_ctx_workarounds_init), but
2430 * userspace drivers could opt-in to object-level preemption as they
2431 * saw fit. For post-gen9 platforms, we continue to utilize Option 2;
2432 * even though it is no longer necessary for ABI compatibility when
2433 * enabling a new platform, it does ensure that userspace will be able
2434 * to implement any workarounds that show up requiring temporary
2435 * adjustments to preemption behavior at runtime.
2436 *
2437 * Notes/Workarounds:
2438 * - Wa_14015141709: On DG2 and early steppings of MTL,
2439 * CS_CHICKEN1[0] does not disable object-level preemption as
2440 * it is supposed to (nor does CS_DEBUG_MODE1[0] if we had been
2441 * using Option 1). Effectively this means userspace is unable
2442 * to disable object-level preemption on these platforms/steppings
2443 * despite the setting here.
2444 *
2445 * - Wa_16013994831: May require that userspace program
2446 * CS_CHICKEN1[10] when certain runtime conditions are true.
2447 * Userspace requires Option 2 to be in effect for their update of
2448 * CS_CHICKEN1[10] to be effective.
2449 *
2450 * Other workarounds may appear in the future that will also require
2451 * Option 2 behavior to allow proper userspace implementation.
2452 */
2453 if (GRAPHICS_VER(i915) >= 9)
2454 wa_masked_en(wal,
2455 GEN7_FF_SLICE_CS_CHICKEN1,
2456 GEN9_FFSC_PERCTX_PREEMPT_CTRL);
2457
2458 if (IS_SKYLAKE(i915) ||
2459 IS_KABYLAKE(i915) ||
2460 IS_COFFEELAKE(i915) ||
2461 IS_COMETLAKE(i915)) {
2462 /* WaEnableGapsTsvCreditFix:skl,kbl,cfl */
2463 wa_write_or(wal,
2464 GEN8_GARBCNTL,
2465 GEN9_GAPS_TSV_CREDIT_DISABLE);
2466 }
2467
2468 if (IS_BROXTON(i915)) {
2469 /* WaDisablePooledEuLoadBalancingFix:bxt */
2470 wa_masked_en(wal,
2471 FF_SLICE_CS_CHICKEN2,
2472 GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
2473 }
2474
2475 if (GRAPHICS_VER(i915) == 9) {
2476 /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
2477 wa_masked_en(wal,
2478 GEN9_CSFE_CHICKEN1_RCS,
2479 GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE);
2480
2481 /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
2482 wa_mcr_write_or(wal,
2483 BDW_SCRATCH1,
2484 GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
2485
2486 /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
2487 if (IS_GEN9_LP(i915))
2488 wa_mcr_write_clr_set(wal,
2489 GEN8_L3SQCREG1,
2490 L3_PRIO_CREDITS_MASK,
2491 L3_GENERAL_PRIO_CREDITS(62) |
2492 L3_HIGH_PRIO_CREDITS(2));
2493
2494 /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
2495 wa_mcr_write_or(wal,
2496 GEN8_L3SQCREG4,
2497 GEN8_LQSC_FLUSH_COHERENT_LINES);
2498
2499 /* Disable atomics in L3 to prevent unrecoverable hangs */
2500 wa_write_clr_set(wal, GEN9_SCRATCH_LNCF1,
2501 GEN9_LNCF_NONIA_COHERENT_ATOMICS_ENABLE, set: 0);
2502 wa_mcr_write_clr_set(wal, GEN8_L3SQCREG4,
2503 GEN8_LQSQ_NONIA_COHERENT_ATOMICS_ENABLE, set: 0);
2504 wa_mcr_write_clr_set(wal, GEN9_SCRATCH1,
2505 EVICTION_PERF_FIX_ENABLE, set: 0);
2506 }
2507
2508 if (IS_HASWELL(i915)) {
2509 /* WaSampleCChickenBitEnable:hsw */
2510 wa_masked_en(wal,
2511 HSW_HALF_SLICE_CHICKEN3, HSW_SAMPLE_C_PERFORMANCE);
2512
2513 wa_masked_dis(wal,
2514 CACHE_MODE_0_GEN7,
2515 /* enable HiZ Raw Stall Optimization */
2516 HIZ_RAW_STALL_OPT_DISABLE);
2517 }
2518
2519 if (IS_VALLEYVIEW(i915)) {
2520 /* WaDisableEarlyCull:vlv */
2521 wa_masked_en(wal,
2522 _3D_CHICKEN3,
2523 _3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
2524
2525 /*
2526 * WaVSThreadDispatchOverride:ivb,vlv
2527 *
2528 * This actually overrides the dispatch
2529 * mode for all thread types.
2530 */
2531 wa_write_clr_set(wal,
2532 GEN7_FF_THREAD_MODE,
2533 GEN7_FF_SCHED_MASK,
2534 GEN7_FF_TS_SCHED_HW |
2535 GEN7_FF_VS_SCHED_HW |
2536 GEN7_FF_DS_SCHED_HW);
2537
2538 /* WaPsdDispatchEnable:vlv */
2539 /* WaDisablePSDDualDispatchEnable:vlv */
2540 wa_masked_en(wal,
2541 GEN7_HALF_SLICE_CHICKEN1,
2542 GEN7_MAX_PS_THREAD_DEP |
2543 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
2544 }
2545
2546 if (IS_IVYBRIDGE(i915)) {
2547 /* WaDisableEarlyCull:ivb */
2548 wa_masked_en(wal,
2549 _3D_CHICKEN3,
2550 _3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
2551
2552 if (0) { /* causes HiZ corruption on ivb:gt1 */
2553 /* enable HiZ Raw Stall Optimization */
2554 wa_masked_dis(wal,
2555 CACHE_MODE_0_GEN7,
2556 HIZ_RAW_STALL_OPT_DISABLE);
2557 }
2558
2559 /*
2560 * WaVSThreadDispatchOverride:ivb,vlv
2561 *
2562 * This actually overrides the dispatch
2563 * mode for all thread types.
2564 */
2565 wa_write_clr_set(wal,
2566 GEN7_FF_THREAD_MODE,
2567 GEN7_FF_SCHED_MASK,
2568 GEN7_FF_TS_SCHED_HW |
2569 GEN7_FF_VS_SCHED_HW |
2570 GEN7_FF_DS_SCHED_HW);
2571
2572 /* WaDisablePSDDualDispatchEnable:ivb */
2573 if (INTEL_INFO(i915)->gt == 1)
2574 wa_masked_en(wal,
2575 GEN7_HALF_SLICE_CHICKEN1,
2576 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
2577 }
2578
2579 if (GRAPHICS_VER(i915) == 7) {
2580 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
2581 wa_masked_en(wal,
2582 RING_MODE_GEN7(RENDER_RING_BASE),
2583 GFX_TLB_INVALIDATE_EXPLICIT | GFX_REPLAY_MODE);
2584
2585 /*
2586 * BSpec recommends 8x4 when MSAA is used,
2587 * however in practice 16x4 seems fastest.
2588 *
2589 * Note that PS/WM thread counts depend on the WIZ hashing
2590 * disable bit, which we don't touch here, but it's good
2591 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
2592 */
2593 wa_masked_field_set(wal,
2594 GEN7_GT_MODE,
2595 GEN6_WIZ_HASHING_MASK,
2596 GEN6_WIZ_HASHING_16x4);
2597 }
2598
2599 if (IS_GRAPHICS_VER(i915, 6, 7))
2600 /*
2601 * We need to disable the AsyncFlip performance optimisations in
2602 * order to use MI_WAIT_FOR_EVENT within the CS. It should
2603 * already be programmed to '1' on all products.
2604 *
2605 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
2606 */
2607 wa_masked_en(wal,
2608 RING_MI_MODE(RENDER_RING_BASE),
2609 ASYNC_FLIP_PERF_DISABLE);
2610
2611 if (GRAPHICS_VER(i915) == 6) {
2612 /*
2613 * Required for the hardware to program scanline values for
2614 * waiting
2615 * WaEnableFlushTlbInvalidationMode:snb
2616 */
2617 wa_masked_en(wal,
2618 GFX_MODE,
2619 GFX_TLB_INVALIDATE_EXPLICIT);
2620
2621 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
2622 wa_masked_en(wal,
2623 _3D_CHICKEN,
2624 _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB);
2625
2626 wa_masked_en(wal,
2627 _3D_CHICKEN3,
2628 /* WaStripsFansDisableFastClipPerformanceFix:snb */
2629 _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL |
2630 /*
2631 * Bspec says:
2632 * "This bit must be set if 3DSTATE_CLIP clip mode is set
2633 * to normal and 3DSTATE_SF number of SF output attributes
2634 * is more than 16."
2635 */
2636 _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH);
2637
2638 /*
2639 * BSpec recommends 8x4 when MSAA is used,
2640 * however in practice 16x4 seems fastest.
2641 *
2642 * Note that PS/WM thread counts depend on the WIZ hashing
2643 * disable bit, which we don't touch here, but it's good
2644 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
2645 */
2646 wa_masked_field_set(wal,
2647 GEN6_GT_MODE,
2648 GEN6_WIZ_HASHING_MASK,
2649 GEN6_WIZ_HASHING_16x4);
2650
2651 /*
2652 * From the Sandybridge PRM, volume 1 part 3, page 24:
2653 * "If this bit is set, STCunit will have LRA as replacement
2654 * policy. [...] This bit must be reset. LRA replacement
2655 * policy is not supported."
2656 */
2657 wa_masked_dis(wal,
2658 CACHE_MODE_0,
2659 CM0_STC_EVICT_DISABLE_LRA_SNB);
2660 }
2661
2662 if (IS_GRAPHICS_VER(i915, 4, 6))
2663 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
2664 wa_add(wal, RING_MI_MODE(RENDER_RING_BASE),
2665 clear: 0, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH),
2666 /* XXX bit doesn't stick on Broadwater */
2667 IS_I965G(i915) ? 0 : VS_TIMER_DISPATCH, masked_reg: true);
2668
2669 if (GRAPHICS_VER(i915) == 4)
2670 /*
2671 * Disable CONSTANT_BUFFER before it is loaded from the context
2672 * image. For as it is loaded, it is executed and the stored
2673 * address may no longer be valid, leading to a GPU hang.
2674 *
2675 * This imposes the requirement that userspace reload their
2676 * CONSTANT_BUFFER on every batch, fortunately a requirement
2677 * they are already accustomed to from before contexts were
2678 * enabled.
2679 */
2680 wa_add(wal, ECOSKPD(RENDER_RING_BASE),
2681 clear: 0, _MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE),
2682 read_mask: 0 /* XXX bit doesn't stick on Broadwater */,
2683 masked_reg: true);
2684}
2685
2686static void
2687xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2688{
2689 struct drm_i915_private *i915 = engine->i915;
2690
2691 /* WaKBLVECSSemaphoreWaitPoll:kbl */
2692 if (IS_KABYLAKE(i915) && IS_GRAPHICS_STEP(i915, STEP_A0, STEP_F0)) {
2693 wa_write(wal,
2694 RING_SEMA_WAIT_POLL(engine->mmio_base),
2695 set: 1);
2696 }
2697 /* Wa_16018031267, Wa_16018063123 */
2698 if (NEEDS_FASTCOLOR_BLT_WABB(engine))
2699 wa_masked_field_set(wal, ECOSKPD(engine->mmio_base),
2700 XEHP_BLITTER_SCHEDULING_MODE_MASK,
2701 XEHP_BLITTER_ROUND_ROBIN_MODE);
2702}
2703
2704static void
2705ccs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2706{
2707 /* boilerplate for any CCS engine workaround */
2708}
2709
2710/*
2711 * The bspec performance guide has recommended MMIO tuning settings. These
2712 * aren't truly "workarounds" but we want to program them with the same
2713 * workaround infrastructure to ensure that they're automatically added to
2714 * the GuC save/restore lists, re-applied at the right times, and checked for
2715 * any conflicting programming requested by real workarounds.
2716 *
2717 * Programming settings should be added here only if their registers are not
2718 * part of an engine's register state context. If a register is part of a
2719 * context, then any tuning settings should be programmed in an appropriate
2720 * function invoked by __intel_engine_init_ctx_wa().
2721 */
2722static void
2723add_render_compute_tuning_settings(struct intel_gt *gt,
2724 struct i915_wa_list *wal)
2725{
2726 struct drm_i915_private *i915 = gt->i915;
2727
2728 if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)) || IS_DG2(i915))
2729 wa_mcr_write_clr_set(wal, RT_CTRL, STACKID_CTRL, STACKID_CTRL_512);
2730
2731 /*
2732 * This tuning setting proves beneficial only on ATS-M designs; the
2733 * default "age based" setting is optimal on regular DG2 and other
2734 * platforms.
2735 */
2736 if (INTEL_INFO(i915)->tuning_thread_rr_after_dep)
2737 wa_mcr_masked_field_set(wal, GEN9_ROW_CHICKEN4, THREAD_EX_ARB_MODE,
2738 THREAD_EX_ARB_MODE_RR_AFTER_DEP);
2739
2740 if (GRAPHICS_VER(i915) == 12 && GRAPHICS_VER_FULL(i915) < IP_VER(12, 55))
2741 wa_write_clr(wal, GEN8_GARBCNTL, GEN12_BUS_HASH_CTL_BIT_EXC);
2742}
2743
2744static void ccs_engine_wa_mode(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2745{
2746 struct intel_gt *gt = engine->gt;
2747 u32 mode;
2748
2749 if (!IS_DG2(gt->i915))
2750 return;
2751
2752 /*
2753 * Wa_14019159160: This workaround, along with others, leads to
2754 * significant challenges in utilizing load balancing among the
2755 * CCS slices. Consequently, an architectural decision has been
2756 * made to completely disable automatic CCS load balancing.
2757 */
2758 wa_masked_en(wal, GEN12_RCU_MODE, XEHP_RCU_MODE_FIXED_SLICE_CCS_MODE);
2759
2760 /*
2761 * After having disabled automatic load balancing we need to
2762 * assign all slices to a single CCS. We will call it CCS mode 1
2763 */
2764 mode = intel_gt_apply_ccs_mode(gt);
2765 wa_masked_en(wal, XEHP_CCS_MODE, val: mode);
2766}
2767
2768/*
2769 * The workarounds in this function apply to shared registers in
2770 * the general render reset domain that aren't tied to a
2771 * specific engine. Since all render+compute engines get reset
2772 * together, and the contents of these registers are lost during
2773 * the shared render domain reset, we'll define such workarounds
2774 * here and then add them to just a single RCS or CCS engine's
2775 * workaround list (whichever engine has the XXXX flag).
2776 */
2777static void
2778general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2779{
2780 struct drm_i915_private *i915 = engine->i915;
2781 struct intel_gt *gt = engine->gt;
2782
2783 add_render_compute_tuning_settings(gt, wal);
2784
2785 if (GRAPHICS_VER(i915) >= 11) {
2786 /* This is not a Wa (although referred to as
2787 * WaSetInidrectStateOverride in places), this allows
2788 * applications that reference sampler states through
2789 * the BindlessSamplerStateBaseAddress to have their
2790 * border color relative to DynamicStateBaseAddress
2791 * rather than BindlessSamplerStateBaseAddress.
2792 *
2793 * Otherwise SAMPLER_STATE border colors have to be
2794 * copied in multiple heaps (DynamicStateBaseAddress &
2795 * BindlessSamplerStateBaseAddress)
2796 *
2797 * BSpec: 46052
2798 */
2799 wa_mcr_masked_en(wal,
2800 GEN10_SAMPLER_MODE,
2801 GEN11_INDIRECT_STATE_BASE_ADDR_OVERRIDE);
2802 }
2803
2804 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_B0, STEP_FOREVER) ||
2805 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_B0, STEP_FOREVER) ||
2806 IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 74), IP_VER(12, 74))) {
2807 /* Wa_14017856879 */
2808 wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN3, MTL_DISABLE_FIX_FOR_EOT_FLUSH);
2809
2810 /* Wa_14020495402 */
2811 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2, XELPG_DISABLE_TDL_SVHS_GATING);
2812 }
2813
2814 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
2815 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0))
2816 /*
2817 * Wa_14017066071
2818 * Wa_14017654203
2819 */
2820 wa_mcr_masked_en(wal, GEN10_SAMPLER_MODE,
2821 MTL_DISABLE_SAMPLER_SC_OOO);
2822
2823 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0))
2824 /* Wa_22015279794 */
2825 wa_mcr_masked_en(wal, GEN10_CACHE_MODE_SS,
2826 DISABLE_PREFETCH_INTO_IC);
2827
2828 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
2829 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0) ||
2830 IS_DG2(i915)) {
2831 /* Wa_22013037850 */
2832 wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW,
2833 DISABLE_128B_EVICTION_COMMAND_UDW);
2834
2835 /* Wa_18017747507 */
2836 wa_masked_en(wal, VFG_PREEMPTION_CHICKEN, POLYGON_TRIFAN_LINELOOP_DISABLE);
2837 }
2838
2839 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
2840 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0) ||
2841 IS_DG2(i915)) {
2842 /* Wa_22014226127 */
2843 wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0, DISABLE_D8_D16_COASLESCE);
2844 }
2845
2846 if (IS_DG2(i915)) {
2847 /* Wa_14015227452:dg2,pvc */
2848 wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, XEHP_DIS_BBL_SYSPIPE);
2849
2850 /*
2851 * Wa_16011620976:dg2_g11
2852 * Wa_22015475538:dg2
2853 */
2854 wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW, DIS_CHAIN_2XSIMD8);
2855
2856 /* Wa_18028616096 */
2857 wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW, UGM_FRAGMENT_THRESHOLD_TO_3);
2858 }
2859
2860 if (IS_DG2_G11(i915)) {
2861 /*
2862 * Wa_22012826095:dg2
2863 * Wa_22013059131:dg2
2864 */
2865 wa_mcr_write_clr_set(wal, LSC_CHICKEN_BIT_0_UDW,
2866 MAXREQS_PER_BANK,
2867 REG_FIELD_PREP(MAXREQS_PER_BANK, 2));
2868
2869 /* Wa_22013059131:dg2 */
2870 wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0,
2871 FORCE_1_SUB_MESSAGE_PER_FRAGMENT);
2872
2873 /*
2874 * Wa_22012654132
2875 *
2876 * Note that register 0xE420 is write-only and cannot be read
2877 * back for verification on DG2 (due to Wa_14012342262), so
2878 * we need to explicitly skip the readback.
2879 */
2880 wa_mcr_add(wal, GEN10_CACHE_MODE_SS, clear: 0,
2881 _MASKED_BIT_ENABLE(ENABLE_PREFETCH_INTO_IC),
2882 read_mask: 0 /* write-only, so skip validation */,
2883 masked_reg: true);
2884 }
2885}
2886
2887static void
2888engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2889{
2890 if (GRAPHICS_VER(engine->i915) < 4)
2891 return;
2892
2893 engine_fake_wa_init(engine, wal);
2894
2895 /*
2896 * These are common workarounds that just need to applied
2897 * to a single RCS/CCS engine's workaround list since
2898 * they're reset as part of the general render domain reset.
2899 */
2900 if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE) {
2901 general_render_compute_wa_init(engine, wal);
2902 ccs_engine_wa_mode(engine, wal);
2903 }
2904
2905 if (engine->class == COMPUTE_CLASS)
2906 ccs_engine_wa_init(engine, wal);
2907 else if (engine->class == RENDER_CLASS)
2908 rcs_engine_wa_init(engine, wal);
2909 else
2910 xcs_engine_wa_init(engine, wal);
2911}
2912
2913void intel_engine_init_workarounds(struct intel_engine_cs *engine)
2914{
2915 struct i915_wa_list *wal = &engine->wa_list;
2916
2917 wa_init_start(wal, gt: engine->gt, name: "engine", engine_name: engine->name);
2918 engine_init_workarounds(engine, wal);
2919 wa_init_finish(wal);
2920}
2921
2922void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
2923{
2924 wa_list_apply(wal: &engine->wa_list);
2925}
2926
2927static const struct i915_mmio_range mcr_ranges_gen8[] = {
2928 { .start = 0x5500, .end = 0x55ff },
2929 { .start = 0x7000, .end = 0x7fff },
2930 { .start = 0x9400, .end = 0x97ff },
2931 { .start = 0xb000, .end = 0xb3ff },
2932 { .start = 0xe000, .end = 0xe7ff },
2933 {},
2934};
2935
2936static const struct i915_mmio_range mcr_ranges_gen12[] = {
2937 { .start = 0x8150, .end = 0x815f },
2938 { .start = 0x9520, .end = 0x955f },
2939 { .start = 0xb100, .end = 0xb3ff },
2940 { .start = 0xde80, .end = 0xe8ff },
2941 { .start = 0x24a00, .end = 0x24a7f },
2942 {},
2943};
2944
2945static const struct i915_mmio_range mcr_ranges_xehp[] = {
2946 { .start = 0x4000, .end = 0x4aff },
2947 { .start = 0x5200, .end = 0x52ff },
2948 { .start = 0x5400, .end = 0x7fff },
2949 { .start = 0x8140, .end = 0x815f },
2950 { .start = 0x8c80, .end = 0x8dff },
2951 { .start = 0x94d0, .end = 0x955f },
2952 { .start = 0x9680, .end = 0x96ff },
2953 { .start = 0xb000, .end = 0xb3ff },
2954 { .start = 0xc800, .end = 0xcfff },
2955 { .start = 0xd800, .end = 0xd8ff },
2956 { .start = 0xdc00, .end = 0xffff },
2957 { .start = 0x17000, .end = 0x17fff },
2958 { .start = 0x24a00, .end = 0x24a7f },
2959 {},
2960};
2961
2962static bool mcr_range(struct drm_i915_private *i915, u32 offset)
2963{
2964 const struct i915_mmio_range *mcr_ranges;
2965 int i;
2966
2967 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
2968 mcr_ranges = mcr_ranges_xehp;
2969 else if (GRAPHICS_VER(i915) >= 12)
2970 mcr_ranges = mcr_ranges_gen12;
2971 else if (GRAPHICS_VER(i915) >= 8)
2972 mcr_ranges = mcr_ranges_gen8;
2973 else
2974 return false;
2975
2976 /*
2977 * Registers in these ranges are affected by the MCR selector
2978 * which only controls CPU initiated MMIO. Routing does not
2979 * work for CS access so we cannot verify them on this path.
2980 */
2981 for (i = 0; mcr_ranges[i].start; i++)
2982 if (offset >= mcr_ranges[i].start &&
2983 offset <= mcr_ranges[i].end)
2984 return true;
2985
2986 return false;
2987}
2988
2989static int
2990wa_list_srm(struct i915_request *rq,
2991 const struct i915_wa_list *wal,
2992 struct i915_vma *vma)
2993{
2994 struct drm_i915_private *i915 = rq->i915;
2995 unsigned int i, count = 0;
2996 const struct i915_wa *wa;
2997 u32 srm, *cs;
2998
2999 srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
3000 if (GRAPHICS_VER(i915) >= 8)
3001 srm++;
3002
3003 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
3004 if (!mcr_range(i915, i915_mmio_reg_offset(wa->reg)))
3005 count++;
3006 }
3007
3008 cs = intel_ring_begin(rq, num_dwords: 4 * count);
3009 if (IS_ERR(ptr: cs))
3010 return PTR_ERR(ptr: cs);
3011
3012 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
3013 u32 offset = i915_mmio_reg_offset(wa->reg);
3014
3015 if (mcr_range(i915, offset))
3016 continue;
3017
3018 *cs++ = srm;
3019 *cs++ = offset;
3020 *cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
3021 *cs++ = 0;
3022 }
3023 intel_ring_advance(rq, cs);
3024
3025 return 0;
3026}
3027
3028static int engine_wa_list_verify(struct intel_context *ce,
3029 const struct i915_wa_list * const wal,
3030 const char *from)
3031{
3032 const struct i915_wa *wa;
3033 struct i915_request *rq;
3034 struct i915_vma *vma;
3035 struct i915_gem_ww_ctx ww;
3036 unsigned int i;
3037 u32 *results;
3038 int err;
3039
3040 if (!wal->count)
3041 return 0;
3042
3043 vma = __vm_create_scratch_for_read(vm: &ce->engine->gt->ggtt->vm,
3044 size: wal->count * sizeof(u32));
3045 if (IS_ERR(ptr: vma))
3046 return PTR_ERR(ptr: vma);
3047
3048 intel_engine_pm_get(engine: ce->engine);
3049 i915_gem_ww_ctx_init(ctx: &ww, intr: false);
3050retry:
3051 err = i915_gem_object_lock(obj: vma->obj, ww: &ww);
3052 if (err == 0)
3053 err = intel_context_pin_ww(ce, ww: &ww);
3054 if (err)
3055 goto err_pm;
3056
3057 err = i915_vma_pin_ww(vma, ww: &ww, size: 0, alignment: 0,
3058 flags: i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
3059 if (err)
3060 goto err_unpin;
3061
3062 rq = i915_request_create(ce);
3063 if (IS_ERR(ptr: rq)) {
3064 err = PTR_ERR(ptr: rq);
3065 goto err_vma;
3066 }
3067
3068 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
3069 if (err == 0)
3070 err = wa_list_srm(rq, wal, vma);
3071
3072 i915_request_get(rq);
3073 if (err)
3074 i915_request_set_error_once(rq, error: err);
3075 i915_request_add(rq);
3076
3077 if (err)
3078 goto err_rq;
3079
3080 if (i915_request_wait(rq, flags: 0, HZ / 5) < 0) {
3081 err = -ETIME;
3082 goto err_rq;
3083 }
3084
3085 results = i915_gem_object_pin_map(obj: vma->obj, type: I915_MAP_WB);
3086 if (IS_ERR(ptr: results)) {
3087 err = PTR_ERR(ptr: results);
3088 goto err_rq;
3089 }
3090
3091 err = 0;
3092 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
3093 if (mcr_range(i915: rq->i915, i915_mmio_reg_offset(wa->reg)))
3094 continue;
3095
3096 if (!wa_verify(gt: wal->gt, wa, cur: results[i], name: wal->name, from))
3097 err = -ENXIO;
3098 }
3099
3100 i915_gem_object_unpin_map(obj: vma->obj);
3101
3102err_rq:
3103 i915_request_put(rq);
3104err_vma:
3105 i915_vma_unpin(vma);
3106err_unpin:
3107 intel_context_unpin(ce);
3108err_pm:
3109 if (err == -EDEADLK) {
3110 err = i915_gem_ww_ctx_backoff(ctx: &ww);
3111 if (!err)
3112 goto retry;
3113 }
3114 i915_gem_ww_ctx_fini(ctx: &ww);
3115 intel_engine_pm_put(engine: ce->engine);
3116 i915_vma_put(vma);
3117 return err;
3118}
3119
3120int intel_engine_verify_workarounds(struct intel_engine_cs *engine,
3121 const char *from)
3122{
3123 return engine_wa_list_verify(ce: engine->kernel_context,
3124 wal: &engine->wa_list,
3125 from);
3126}
3127
3128#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
3129#include "selftest_workarounds.c"
3130#endif
3131

source code of linux/drivers/gpu/drm/i915/gt/intel_workarounds.c