| 1 | /* |
| 2 | * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice (including the next |
| 12 | * paragraph) shall be included in all copies or substantial portions of the |
| 13 | * Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 20 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 21 | * SOFTWARE. |
| 22 | * |
| 23 | * Authors: |
| 24 | * Kevin Tian <kevin.tian@intel.com> |
| 25 | * Eddie Dong <eddie.dong@intel.com> |
| 26 | * |
| 27 | * Contributors: |
| 28 | * Niu Bing <bing.niu@intel.com> |
| 29 | * Zhi Wang <zhi.a.wang@intel.com> |
| 30 | * |
| 31 | */ |
| 32 | |
| 33 | #ifndef _GVT_H_ |
| 34 | #define _GVT_H_ |
| 35 | |
| 36 | #include <uapi/linux/pci_regs.h> |
| 37 | #include <linux/vfio.h> |
| 38 | #include <linux/mdev.h> |
| 39 | |
| 40 | #include <asm/kvm_page_track.h> |
| 41 | |
| 42 | #include "gt/intel_gt.h" |
| 43 | #include "intel_gvt.h" |
| 44 | |
| 45 | #include "debug.h" |
| 46 | #include "mmio.h" |
| 47 | #include "reg.h" |
| 48 | #include "interrupt.h" |
| 49 | #include "gtt.h" |
| 50 | #include "display.h" |
| 51 | #include "edid.h" |
| 52 | #include "execlist.h" |
| 53 | #include "scheduler.h" |
| 54 | #include "sched_policy.h" |
| 55 | #include "mmio_context.h" |
| 56 | #include "cmd_parser.h" |
| 57 | #include "fb_decoder.h" |
| 58 | #include "dmabuf.h" |
| 59 | #include "page_track.h" |
| 60 | |
| 61 | #define GVT_MAX_VGPU 8 |
| 62 | |
| 63 | struct engine_mmio; |
| 64 | |
| 65 | /* Describe per-platform limitations. */ |
| 66 | struct intel_gvt_device_info { |
| 67 | u32 max_support_vgpus; |
| 68 | u32 cfg_space_size; |
| 69 | u32 mmio_size; |
| 70 | u32 mmio_bar; |
| 71 | unsigned long msi_cap_offset; |
| 72 | u32 gtt_start_offset; |
| 73 | u32 gtt_entry_size; |
| 74 | u32 gtt_entry_size_shift; |
| 75 | int gmadr_bytes_in_cmd; |
| 76 | u32 max_surface_size; |
| 77 | }; |
| 78 | |
| 79 | /* GM resources owned by a vGPU */ |
| 80 | struct intel_vgpu_gm { |
| 81 | u64 aperture_sz; |
| 82 | u64 hidden_sz; |
| 83 | struct drm_mm_node low_gm_node; |
| 84 | struct drm_mm_node high_gm_node; |
| 85 | }; |
| 86 | |
| 87 | #define INTEL_GVT_MAX_NUM_FENCES 32 |
| 88 | |
| 89 | /* Fences owned by a vGPU */ |
| 90 | struct intel_vgpu_fence { |
| 91 | struct i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES]; |
| 92 | u32 size; |
| 93 | }; |
| 94 | |
| 95 | struct intel_vgpu_mmio { |
| 96 | void *vreg; |
| 97 | }; |
| 98 | |
| 99 | #define INTEL_GVT_MAX_BAR_NUM 4 |
| 100 | |
| 101 | struct intel_vgpu_pci_bar { |
| 102 | u64 size; |
| 103 | bool tracked; |
| 104 | }; |
| 105 | |
| 106 | struct intel_vgpu_cfg_space { |
| 107 | unsigned char virtual_cfg_space[PCI_CFG_SPACE_EXP_SIZE]; |
| 108 | struct intel_vgpu_pci_bar bar[INTEL_GVT_MAX_BAR_NUM]; |
| 109 | u32 pmcsr_off; |
| 110 | }; |
| 111 | |
| 112 | #define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space) |
| 113 | |
| 114 | struct intel_vgpu_irq { |
| 115 | bool irq_warn_once[INTEL_GVT_EVENT_MAX]; |
| 116 | DECLARE_BITMAP(flip_done_event[I915_MAX_PIPES], |
| 117 | INTEL_GVT_EVENT_MAX); |
| 118 | }; |
| 119 | |
| 120 | struct intel_vgpu_opregion { |
| 121 | void *va; |
| 122 | u32 gfn[INTEL_GVT_OPREGION_PAGES]; |
| 123 | }; |
| 124 | |
| 125 | #define vgpu_opregion(vgpu) (&(vgpu->opregion)) |
| 126 | |
| 127 | struct intel_vgpu_display { |
| 128 | struct intel_vgpu_i2c_edid i2c_edid; |
| 129 | struct intel_vgpu_port ports[I915_MAX_PORTS]; |
| 130 | struct intel_vgpu_sbi sbi; |
| 131 | enum port port_num; |
| 132 | }; |
| 133 | |
| 134 | struct vgpu_sched_ctl { |
| 135 | int weight; |
| 136 | }; |
| 137 | |
| 138 | enum { |
| 139 | INTEL_VGPU_EXECLIST_SUBMISSION = 1, |
| 140 | INTEL_VGPU_GUC_SUBMISSION, |
| 141 | }; |
| 142 | |
| 143 | struct intel_vgpu_submission_ops { |
| 144 | const char *name; |
| 145 | int (*init)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask); |
| 146 | void (*clean)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask); |
| 147 | void (*reset)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask); |
| 148 | }; |
| 149 | |
| 150 | struct intel_vgpu_submission { |
| 151 | struct intel_vgpu_execlist execlist[I915_NUM_ENGINES]; |
| 152 | struct list_head workload_q_head[I915_NUM_ENGINES]; |
| 153 | struct intel_context *shadow[I915_NUM_ENGINES]; |
| 154 | struct kmem_cache *workloads; |
| 155 | atomic_t running_workload_num; |
| 156 | union { |
| 157 | u64 i915_context_pml4; |
| 158 | u64 i915_context_pdps[GEN8_3LVL_PDPES]; |
| 159 | }; |
| 160 | DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES); |
| 161 | DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES); |
| 162 | void *ring_scan_buffer[I915_NUM_ENGINES]; |
| 163 | int ring_scan_buffer_size[I915_NUM_ENGINES]; |
| 164 | const struct intel_vgpu_submission_ops *ops; |
| 165 | int virtual_submission_interface; |
| 166 | bool active; |
| 167 | struct { |
| 168 | u32 lrca; |
| 169 | bool valid; |
| 170 | u64 ring_context_gpa; |
| 171 | } last_ctx[I915_NUM_ENGINES]; |
| 172 | }; |
| 173 | |
| 174 | #define KVMGT_DEBUGFS_FILENAME "kvmgt_nr_cache_entries" |
| 175 | |
| 176 | enum { |
| 177 | INTEL_VGPU_STATUS_ATTACHED = 0, |
| 178 | INTEL_VGPU_STATUS_ACTIVE, |
| 179 | INTEL_VGPU_STATUS_NR_BITS, |
| 180 | }; |
| 181 | |
| 182 | struct intel_vgpu { |
| 183 | struct vfio_device vfio_device; |
| 184 | struct intel_gvt *gvt; |
| 185 | struct mutex vgpu_lock; |
| 186 | int id; |
| 187 | DECLARE_BITMAP(status, INTEL_VGPU_STATUS_NR_BITS); |
| 188 | bool pv_notified; |
| 189 | bool failsafe; |
| 190 | unsigned int resetting_eng; |
| 191 | |
| 192 | /* Both sched_data and sched_ctl can be seen a part of the global gvt |
| 193 | * scheduler structure. So below 2 vgpu data are protected |
| 194 | * by sched_lock, not vgpu_lock. |
| 195 | */ |
| 196 | void *sched_data; |
| 197 | struct vgpu_sched_ctl sched_ctl; |
| 198 | |
| 199 | struct intel_vgpu_fence fence; |
| 200 | struct intel_vgpu_gm gm; |
| 201 | struct intel_vgpu_cfg_space cfg_space; |
| 202 | struct intel_vgpu_mmio mmio; |
| 203 | struct intel_vgpu_irq irq; |
| 204 | struct intel_vgpu_gtt gtt; |
| 205 | struct intel_vgpu_opregion opregion; |
| 206 | struct intel_vgpu_display display; |
| 207 | struct intel_vgpu_submission submission; |
| 208 | struct radix_tree_root page_track_tree; |
| 209 | u32 hws_pga[I915_NUM_ENGINES]; |
| 210 | /* Set on PCI_D3, reset on DMLR, not reflecting the actual PM state */ |
| 211 | bool d3_entered; |
| 212 | |
| 213 | struct dentry *debugfs; |
| 214 | |
| 215 | struct list_head dmabuf_obj_list_head; |
| 216 | struct mutex dmabuf_lock; |
| 217 | struct idr object_idr; |
| 218 | struct intel_vgpu_vblank_timer vblank_timer; |
| 219 | |
| 220 | u32 scan_nonprivbb; |
| 221 | |
| 222 | struct vfio_region *region; |
| 223 | int num_regions; |
| 224 | struct eventfd_ctx *msi_trigger; |
| 225 | |
| 226 | /* |
| 227 | * Two caches are used to avoid mapping duplicated pages (eg. |
| 228 | * scratch pages). This help to reduce dma setup overhead. |
| 229 | */ |
| 230 | struct rb_root gfn_cache; |
| 231 | struct rb_root dma_addr_cache; |
| 232 | unsigned long nr_cache_entries; |
| 233 | struct mutex cache_lock; |
| 234 | |
| 235 | struct kvm_page_track_notifier_node track_node; |
| 236 | #define NR_BKT (1 << 18) |
| 237 | struct hlist_head ptable[NR_BKT]; |
| 238 | #undef NR_BKT |
| 239 | }; |
| 240 | |
| 241 | /* validating GM healthy status*/ |
| 242 | #define vgpu_is_vm_unhealthy(ret_val) \ |
| 243 | (((ret_val) == -EBADRQC) || ((ret_val) == -EFAULT)) |
| 244 | |
| 245 | struct intel_gvt_gm { |
| 246 | unsigned long vgpu_allocated_low_gm_size; |
| 247 | unsigned long vgpu_allocated_high_gm_size; |
| 248 | }; |
| 249 | |
| 250 | struct intel_gvt_fence { |
| 251 | unsigned long vgpu_allocated_fence_num; |
| 252 | }; |
| 253 | |
| 254 | /* Special MMIO blocks. */ |
| 255 | struct gvt_mmio_block { |
| 256 | i915_reg_t offset; |
| 257 | unsigned int size; |
| 258 | gvt_mmio_func read; |
| 259 | gvt_mmio_func write; |
| 260 | }; |
| 261 | |
| 262 | #define INTEL_GVT_MMIO_HASH_BITS 11 |
| 263 | |
| 264 | struct intel_gvt_mmio { |
| 265 | u16 *mmio_attribute; |
| 266 | /* Register contains RO bits */ |
| 267 | #define F_RO (1 << 0) |
| 268 | /* Register contains graphics address */ |
| 269 | #define F_GMADR (1 << 1) |
| 270 | /* Mode mask registers with high 16 bits as the mask bits */ |
| 271 | #define F_MODE_MASK (1 << 2) |
| 272 | /* This reg can be accessed by GPU commands */ |
| 273 | #define F_CMD_ACCESS (1 << 3) |
| 274 | /* This reg has been accessed by a VM */ |
| 275 | #define F_ACCESSED (1 << 4) |
| 276 | /* This reg requires save & restore during host PM suspend/resume */ |
| 277 | #define F_PM_SAVE (1 << 5) |
| 278 | /* This reg could be accessed by unaligned address */ |
| 279 | #define F_UNALIGN (1 << 6) |
| 280 | /* This reg is in GVT's mmio save-restor list and in hardware |
| 281 | * logical context image |
| 282 | */ |
| 283 | #define F_SR_IN_CTX (1 << 7) |
| 284 | /* Value of command write of this reg needs to be patched */ |
| 285 | #define F_CMD_WRITE_PATCH (1 << 8) |
| 286 | |
| 287 | struct gvt_mmio_block *mmio_block; |
| 288 | unsigned int num_mmio_block; |
| 289 | |
| 290 | DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS); |
| 291 | unsigned long num_tracked_mmio; |
| 292 | }; |
| 293 | |
| 294 | struct intel_gvt_firmware { |
| 295 | void *cfg_space; |
| 296 | void *mmio; |
| 297 | bool firmware_loaded; |
| 298 | }; |
| 299 | |
| 300 | struct intel_vgpu_config { |
| 301 | unsigned int low_mm; |
| 302 | unsigned int high_mm; |
| 303 | unsigned int fence; |
| 304 | |
| 305 | /* |
| 306 | * A vGPU with a weight of 8 will get twice as much GPU as a vGPU with |
| 307 | * a weight of 4 on a contended host, different vGPU type has different |
| 308 | * weight set. Legal weights range from 1 to 16. |
| 309 | */ |
| 310 | unsigned int weight; |
| 311 | enum intel_vgpu_edid edid; |
| 312 | const char *name; |
| 313 | }; |
| 314 | |
| 315 | struct intel_vgpu_type { |
| 316 | struct mdev_type type; |
| 317 | char name[16]; |
| 318 | const struct intel_vgpu_config *conf; |
| 319 | }; |
| 320 | |
| 321 | struct intel_gvt { |
| 322 | /* GVT scope lock, protect GVT itself, and all resource currently |
| 323 | * not yet protected by special locks(vgpu and scheduler lock). |
| 324 | */ |
| 325 | struct mutex lock; |
| 326 | /* scheduler scope lock, protect gvt and vgpu schedule related data */ |
| 327 | struct mutex sched_lock; |
| 328 | |
| 329 | struct intel_gt *gt; |
| 330 | struct idr vgpu_idr; /* vGPU IDR pool */ |
| 331 | |
| 332 | struct intel_gvt_device_info device_info; |
| 333 | struct intel_gvt_gm gm; |
| 334 | struct intel_gvt_fence fence; |
| 335 | struct intel_gvt_mmio mmio; |
| 336 | struct intel_gvt_firmware firmware; |
| 337 | struct intel_gvt_irq irq; |
| 338 | struct intel_gvt_gtt gtt; |
| 339 | struct intel_gvt_workload_scheduler scheduler; |
| 340 | struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES]; |
| 341 | DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS); |
| 342 | struct mdev_parent parent; |
| 343 | struct mdev_type **mdev_types; |
| 344 | struct intel_vgpu_type *types; |
| 345 | unsigned int num_types; |
| 346 | struct intel_vgpu *idle_vgpu; |
| 347 | |
| 348 | struct task_struct *service_thread; |
| 349 | wait_queue_head_t service_thread_wq; |
| 350 | |
| 351 | /* service_request is always used in bit operation, we should always |
| 352 | * use it with atomic bit ops so that no need to use gvt big lock. |
| 353 | */ |
| 354 | unsigned long service_request; |
| 355 | |
| 356 | struct { |
| 357 | struct engine_mmio *mmio; |
| 358 | int ctx_mmio_count[I915_NUM_ENGINES]; |
| 359 | u32 *tlb_mmio_offset_list; |
| 360 | u32 tlb_mmio_offset_list_cnt; |
| 361 | u32 *mocs_mmio_offset_list; |
| 362 | u32 mocs_mmio_offset_list_cnt; |
| 363 | } engine_mmio_list; |
| 364 | bool is_reg_whitelist_updated; |
| 365 | |
| 366 | struct dentry *debugfs_root; |
| 367 | }; |
| 368 | |
| 369 | enum { |
| 370 | /* Scheduling trigger by timer */ |
| 371 | INTEL_GVT_REQUEST_SCHED = 0, |
| 372 | |
| 373 | /* Scheduling trigger by event */ |
| 374 | INTEL_GVT_REQUEST_EVENT_SCHED = 1, |
| 375 | |
| 376 | /* per-vGPU vblank emulation request */ |
| 377 | INTEL_GVT_REQUEST_EMULATE_VBLANK = 2, |
| 378 | INTEL_GVT_REQUEST_EMULATE_VBLANK_MAX = INTEL_GVT_REQUEST_EMULATE_VBLANK |
| 379 | + GVT_MAX_VGPU, |
| 380 | }; |
| 381 | |
| 382 | static inline void intel_gvt_request_service(struct intel_gvt *gvt, |
| 383 | int service) |
| 384 | { |
| 385 | set_bit(nr: service, addr: (void *)&gvt->service_request); |
| 386 | wake_up(&gvt->service_thread_wq); |
| 387 | } |
| 388 | |
| 389 | void intel_gvt_free_firmware(struct intel_gvt *gvt); |
| 390 | int intel_gvt_load_firmware(struct intel_gvt *gvt); |
| 391 | |
| 392 | /* Aperture/GM space definitions for GVT device */ |
| 393 | #define MB_TO_BYTES(mb) ((mb) << 20ULL) |
| 394 | #define BYTES_TO_MB(b) ((b) >> 20ULL) |
| 395 | |
| 396 | #define HOST_LOW_GM_SIZE MB_TO_BYTES(128) |
| 397 | #define HOST_HIGH_GM_SIZE MB_TO_BYTES(384) |
| 398 | #define HOST_FENCE 4 |
| 399 | |
| 400 | #define gvt_to_ggtt(gvt) ((gvt)->gt->ggtt) |
| 401 | |
| 402 | /* Aperture/GM space definitions for GVT device */ |
| 403 | #define gvt_aperture_sz(gvt) gvt_to_ggtt(gvt)->mappable_end |
| 404 | #define gvt_aperture_pa_base(gvt) gvt_to_ggtt(gvt)->gmadr.start |
| 405 | |
| 406 | #define gvt_ggtt_gm_sz(gvt) gvt_to_ggtt(gvt)->vm.total |
| 407 | #define gvt_ggtt_sz(gvt) (gvt_to_ggtt(gvt)->vm.total >> PAGE_SHIFT << 3) |
| 408 | #define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt)) |
| 409 | |
| 410 | #define gvt_aperture_gmadr_base(gvt) (0) |
| 411 | #define gvt_aperture_gmadr_end(gvt) (gvt_aperture_gmadr_base(gvt) \ |
| 412 | + gvt_aperture_sz(gvt) - 1) |
| 413 | |
| 414 | #define gvt_hidden_gmadr_base(gvt) (gvt_aperture_gmadr_base(gvt) \ |
| 415 | + gvt_aperture_sz(gvt)) |
| 416 | #define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \ |
| 417 | + gvt_hidden_sz(gvt) - 1) |
| 418 | |
| 419 | #define gvt_fence_sz(gvt) (gvt_to_ggtt(gvt)->num_fences) |
| 420 | |
| 421 | /* Aperture/GM space definitions for vGPU */ |
| 422 | #define vgpu_aperture_offset(vgpu) ((vgpu)->gm.low_gm_node.start) |
| 423 | #define vgpu_hidden_offset(vgpu) ((vgpu)->gm.high_gm_node.start) |
| 424 | #define vgpu_aperture_sz(vgpu) ((vgpu)->gm.aperture_sz) |
| 425 | #define vgpu_hidden_sz(vgpu) ((vgpu)->gm.hidden_sz) |
| 426 | |
| 427 | #define vgpu_aperture_pa_base(vgpu) \ |
| 428 | (gvt_aperture_pa_base(vgpu->gvt) + vgpu_aperture_offset(vgpu)) |
| 429 | |
| 430 | #define vgpu_ggtt_gm_sz(vgpu) ((vgpu)->gm.aperture_sz + (vgpu)->gm.hidden_sz) |
| 431 | |
| 432 | #define vgpu_aperture_pa_end(vgpu) \ |
| 433 | (vgpu_aperture_pa_base(vgpu) + vgpu_aperture_sz(vgpu) - 1) |
| 434 | |
| 435 | #define vgpu_aperture_gmadr_base(vgpu) (vgpu_aperture_offset(vgpu)) |
| 436 | #define vgpu_aperture_gmadr_end(vgpu) \ |
| 437 | (vgpu_aperture_gmadr_base(vgpu) + vgpu_aperture_sz(vgpu) - 1) |
| 438 | |
| 439 | #define vgpu_hidden_gmadr_base(vgpu) (vgpu_hidden_offset(vgpu)) |
| 440 | #define vgpu_hidden_gmadr_end(vgpu) \ |
| 441 | (vgpu_hidden_gmadr_base(vgpu) + vgpu_hidden_sz(vgpu) - 1) |
| 442 | |
| 443 | #define vgpu_fence_sz(vgpu) (vgpu->fence.size) |
| 444 | |
| 445 | /* ring context size i.e. the first 0x50 dwords*/ |
| 446 | #define RING_CTX_SIZE 320 |
| 447 | |
| 448 | int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu, |
| 449 | const struct intel_vgpu_config *conf); |
| 450 | void intel_vgpu_reset_resource(struct intel_vgpu *vgpu); |
| 451 | void intel_vgpu_free_resource(struct intel_vgpu *vgpu); |
| 452 | void intel_vgpu_write_fence(struct intel_vgpu *vgpu, |
| 453 | u32 fence, u64 value); |
| 454 | |
| 455 | /* |
| 456 | * Macros for easily accessing vGPU virtual/shadow register. |
| 457 | * Explicitly separate use for typed MMIO reg or real offset. |
| 458 | */ |
| 459 | #define vgpu_vreg_t(vgpu, reg) \ |
| 460 | (*(u32 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg))) |
| 461 | #define vgpu_vreg(vgpu, offset) \ |
| 462 | (*(u32 *)(vgpu->mmio.vreg + (offset))) |
| 463 | #define vgpu_vreg64_t(vgpu, reg) \ |
| 464 | (*(u64 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg))) |
| 465 | #define vgpu_vreg64(vgpu, offset) \ |
| 466 | (*(u64 *)(vgpu->mmio.vreg + (offset))) |
| 467 | |
| 468 | #define for_each_active_vgpu(gvt, vgpu, id) \ |
| 469 | idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \ |
| 470 | for_each_if(test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status)) |
| 471 | |
| 472 | static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu, |
| 473 | u32 offset, u32 val, bool low) |
| 474 | { |
| 475 | u32 *pval; |
| 476 | |
| 477 | /* BAR offset should be 32 bits algiend */ |
| 478 | offset = rounddown(offset, 4); |
| 479 | pval = (u32 *)(vgpu_cfg_space(vgpu) + offset); |
| 480 | |
| 481 | if (low) { |
| 482 | /* |
| 483 | * only update bit 31 - bit 4, |
| 484 | * leave the bit 3 - bit 0 unchanged. |
| 485 | */ |
| 486 | *pval = (val & GENMASK(31, 4)) | (*pval & GENMASK(3, 0)); |
| 487 | } else { |
| 488 | *pval = val; |
| 489 | } |
| 490 | } |
| 491 | |
| 492 | int intel_gvt_init_vgpu_types(struct intel_gvt *gvt); |
| 493 | void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt); |
| 494 | |
| 495 | struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt); |
| 496 | void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu); |
| 497 | int intel_gvt_create_vgpu(struct intel_vgpu *vgpu, |
| 498 | const struct intel_vgpu_config *conf); |
| 499 | void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu); |
| 500 | void intel_gvt_release_vgpu(struct intel_vgpu *vgpu); |
| 501 | void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, |
| 502 | intel_engine_mask_t engine_mask); |
| 503 | void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu); |
| 504 | void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu); |
| 505 | void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu); |
| 506 | |
| 507 | int intel_gvt_set_opregion(struct intel_vgpu *vgpu); |
| 508 | int intel_gvt_set_edid(struct intel_vgpu *vgpu, int port_num); |
| 509 | |
| 510 | /* validating GM functions */ |
| 511 | #define vgpu_gmadr_is_aperture(vgpu, gmadr) \ |
| 512 | ((gmadr >= vgpu_aperture_gmadr_base(vgpu)) && \ |
| 513 | (gmadr <= vgpu_aperture_gmadr_end(vgpu))) |
| 514 | |
| 515 | #define vgpu_gmadr_is_hidden(vgpu, gmadr) \ |
| 516 | ((gmadr >= vgpu_hidden_gmadr_base(vgpu)) && \ |
| 517 | (gmadr <= vgpu_hidden_gmadr_end(vgpu))) |
| 518 | |
| 519 | #define vgpu_gmadr_is_valid(vgpu, gmadr) \ |
| 520 | ((vgpu_gmadr_is_aperture(vgpu, gmadr) || \ |
| 521 | (vgpu_gmadr_is_hidden(vgpu, gmadr)))) |
| 522 | |
| 523 | #define gvt_gmadr_is_aperture(gvt, gmadr) \ |
| 524 | ((gmadr >= gvt_aperture_gmadr_base(gvt)) && \ |
| 525 | (gmadr <= gvt_aperture_gmadr_end(gvt))) |
| 526 | |
| 527 | #define gvt_gmadr_is_hidden(gvt, gmadr) \ |
| 528 | ((gmadr >= gvt_hidden_gmadr_base(gvt)) && \ |
| 529 | (gmadr <= gvt_hidden_gmadr_end(gvt))) |
| 530 | |
| 531 | #define gvt_gmadr_is_valid(gvt, gmadr) \ |
| 532 | (gvt_gmadr_is_aperture(gvt, gmadr) || \ |
| 533 | gvt_gmadr_is_hidden(gvt, gmadr)) |
| 534 | |
| 535 | bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size); |
| 536 | |
| 537 | void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, |
| 538 | bool primary); |
| 539 | void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu); |
| 540 | |
| 541 | int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset, |
| 542 | void *p_data, unsigned int bytes); |
| 543 | |
| 544 | int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, |
| 545 | void *p_data, unsigned int bytes); |
| 546 | |
| 547 | void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected); |
| 548 | |
| 549 | static inline u64 intel_vgpu_get_bar_gpa(struct intel_vgpu *vgpu, int bar) |
| 550 | { |
| 551 | /* We are 64bit bar. */ |
| 552 | return (*(u64 *)(vgpu->cfg_space.virtual_cfg_space + bar)) & |
| 553 | PCI_BASE_ADDRESS_MEM_MASK; |
| 554 | } |
| 555 | |
| 556 | void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu); |
| 557 | int intel_vgpu_init_opregion(struct intel_vgpu *vgpu); |
| 558 | int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa); |
| 559 | |
| 560 | int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci); |
| 561 | void populate_pvinfo_page(struct intel_vgpu *vgpu); |
| 562 | |
| 563 | int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload); |
| 564 | void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason); |
| 565 | void intel_vgpu_detach_regions(struct intel_vgpu *vgpu); |
| 566 | |
| 567 | enum { |
| 568 | GVT_FAILSAFE_UNSUPPORTED_GUEST, |
| 569 | GVT_FAILSAFE_INSUFFICIENT_RESOURCE, |
| 570 | GVT_FAILSAFE_GUEST_ERR, |
| 571 | }; |
| 572 | |
| 573 | static inline intel_wakeref_t mmio_hw_access_pre(struct intel_gt *gt) |
| 574 | { |
| 575 | return intel_runtime_pm_get(rpm: gt->uncore->rpm); |
| 576 | } |
| 577 | |
| 578 | static inline void mmio_hw_access_post(struct intel_gt *gt, |
| 579 | intel_wakeref_t wakeref) |
| 580 | { |
| 581 | intel_runtime_pm_put(rpm: gt->uncore->rpm, wref: wakeref); |
| 582 | } |
| 583 | |
| 584 | /** |
| 585 | * intel_gvt_mmio_set_accessed - mark a MMIO has been accessed |
| 586 | * @gvt: a GVT device |
| 587 | * @offset: register offset |
| 588 | * |
| 589 | */ |
| 590 | static inline void intel_gvt_mmio_set_accessed( |
| 591 | struct intel_gvt *gvt, unsigned int offset) |
| 592 | { |
| 593 | gvt->mmio.mmio_attribute[offset >> 2] |= F_ACCESSED; |
| 594 | } |
| 595 | |
| 596 | /** |
| 597 | * intel_gvt_mmio_is_cmd_accessible - if a MMIO could be accessed by command |
| 598 | * @gvt: a GVT device |
| 599 | * @offset: register offset |
| 600 | * |
| 601 | * Returns: |
| 602 | * True if an MMIO is able to be accessed by GPU commands |
| 603 | */ |
| 604 | static inline bool intel_gvt_mmio_is_cmd_accessible( |
| 605 | struct intel_gvt *gvt, unsigned int offset) |
| 606 | { |
| 607 | return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_ACCESS; |
| 608 | } |
| 609 | |
| 610 | /** |
| 611 | * intel_gvt_mmio_set_cmd_accessible - |
| 612 | * mark a MMIO could be accessible by command |
| 613 | * @gvt: a GVT device |
| 614 | * @offset: register offset |
| 615 | * |
| 616 | */ |
| 617 | static inline void intel_gvt_mmio_set_cmd_accessible( |
| 618 | struct intel_gvt *gvt, unsigned int offset) |
| 619 | { |
| 620 | gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_ACCESS; |
| 621 | } |
| 622 | |
| 623 | /** |
| 624 | * intel_gvt_mmio_is_unalign - mark a MMIO could be accessed unaligned |
| 625 | * @gvt: a GVT device |
| 626 | * @offset: register offset |
| 627 | * |
| 628 | */ |
| 629 | static inline bool intel_gvt_mmio_is_unalign( |
| 630 | struct intel_gvt *gvt, unsigned int offset) |
| 631 | { |
| 632 | return gvt->mmio.mmio_attribute[offset >> 2] & F_UNALIGN; |
| 633 | } |
| 634 | |
| 635 | /** |
| 636 | * intel_gvt_mmio_has_mode_mask - if a MMIO has a mode mask |
| 637 | * @gvt: a GVT device |
| 638 | * @offset: register offset |
| 639 | * |
| 640 | * Returns: |
| 641 | * True if a MMIO has a mode mask in its higher 16 bits, false if it isn't. |
| 642 | * |
| 643 | */ |
| 644 | static inline bool intel_gvt_mmio_has_mode_mask( |
| 645 | struct intel_gvt *gvt, unsigned int offset) |
| 646 | { |
| 647 | return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK; |
| 648 | } |
| 649 | |
| 650 | /** |
| 651 | * intel_gvt_mmio_is_sr_in_ctx - |
| 652 | * check if an MMIO has F_SR_IN_CTX mask |
| 653 | * @gvt: a GVT device |
| 654 | * @offset: register offset |
| 655 | * |
| 656 | * Returns: |
| 657 | * True if an MMIO has an F_SR_IN_CTX mask, false if it isn't. |
| 658 | * |
| 659 | */ |
| 660 | static inline bool intel_gvt_mmio_is_sr_in_ctx( |
| 661 | struct intel_gvt *gvt, unsigned int offset) |
| 662 | { |
| 663 | return gvt->mmio.mmio_attribute[offset >> 2] & F_SR_IN_CTX; |
| 664 | } |
| 665 | |
| 666 | /** |
| 667 | * intel_gvt_mmio_set_sr_in_ctx - |
| 668 | * mask an MMIO in GVT's mmio save-restore list and also |
| 669 | * in hardware logical context image |
| 670 | * @gvt: a GVT device |
| 671 | * @offset: register offset |
| 672 | * |
| 673 | */ |
| 674 | static inline void intel_gvt_mmio_set_sr_in_ctx( |
| 675 | struct intel_gvt *gvt, unsigned int offset) |
| 676 | { |
| 677 | gvt->mmio.mmio_attribute[offset >> 2] |= F_SR_IN_CTX; |
| 678 | } |
| 679 | |
| 680 | void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu); |
| 681 | /** |
| 682 | * intel_gvt_mmio_set_cmd_write_patch - |
| 683 | * mark an MMIO if its cmd write needs to be |
| 684 | * patched |
| 685 | * @gvt: a GVT device |
| 686 | * @offset: register offset |
| 687 | * |
| 688 | */ |
| 689 | static inline void intel_gvt_mmio_set_cmd_write_patch( |
| 690 | struct intel_gvt *gvt, unsigned int offset) |
| 691 | { |
| 692 | gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_WRITE_PATCH; |
| 693 | } |
| 694 | |
| 695 | /** |
| 696 | * intel_gvt_mmio_is_cmd_write_patch - check if an mmio's cmd access needs to |
| 697 | * be patched |
| 698 | * @gvt: a GVT device |
| 699 | * @offset: register offset |
| 700 | * |
| 701 | * Returns: |
| 702 | * True if GPU command write to an MMIO should be patched. |
| 703 | */ |
| 704 | static inline bool intel_gvt_mmio_is_cmd_write_patch( |
| 705 | struct intel_gvt *gvt, unsigned int offset) |
| 706 | { |
| 707 | return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_WRITE_PATCH; |
| 708 | } |
| 709 | |
| 710 | /** |
| 711 | * intel_gvt_read_gpa - copy data from GPA to host data buffer |
| 712 | * @vgpu: a vGPU |
| 713 | * @gpa: guest physical address |
| 714 | * @buf: host data buffer |
| 715 | * @len: data length |
| 716 | * |
| 717 | * Returns: |
| 718 | * Zero on success, negative error code if failed. |
| 719 | */ |
| 720 | static inline int intel_gvt_read_gpa(struct intel_vgpu *vgpu, unsigned long gpa, |
| 721 | void *buf, unsigned long len) |
| 722 | { |
| 723 | if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status)) |
| 724 | return -ESRCH; |
| 725 | return vfio_dma_rw(device: &vgpu->vfio_device, iova: gpa, data: buf, len, write: false); |
| 726 | } |
| 727 | |
| 728 | /** |
| 729 | * intel_gvt_write_gpa - copy data from host data buffer to GPA |
| 730 | * @vgpu: a vGPU |
| 731 | * @gpa: guest physical address |
| 732 | * @buf: host data buffer |
| 733 | * @len: data length |
| 734 | * |
| 735 | * Returns: |
| 736 | * Zero on success, negative error code if failed. |
| 737 | */ |
| 738 | static inline int intel_gvt_write_gpa(struct intel_vgpu *vgpu, |
| 739 | unsigned long gpa, void *buf, unsigned long len) |
| 740 | { |
| 741 | if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status)) |
| 742 | return -ESRCH; |
| 743 | return vfio_dma_rw(device: &vgpu->vfio_device, iova: gpa, data: buf, len, write: true); |
| 744 | } |
| 745 | |
| 746 | void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu); |
| 747 | void intel_gvt_debugfs_init(struct intel_gvt *gvt); |
| 748 | void intel_gvt_debugfs_clean(struct intel_gvt *gvt); |
| 749 | |
| 750 | int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn); |
| 751 | int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn); |
| 752 | int intel_gvt_dma_pin_guest_page(struct intel_vgpu *vgpu, dma_addr_t dma_addr); |
| 753 | int intel_gvt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, |
| 754 | unsigned long size, dma_addr_t *dma_addr); |
| 755 | void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu, |
| 756 | dma_addr_t dma_addr); |
| 757 | |
| 758 | #include "trace.h" |
| 759 | |
| 760 | #endif |
| 761 | |