| 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
| 2 | /* |
| 3 | * Kernel-based Virtual Machine driver for Linux |
| 4 | * |
| 5 | * This header defines architecture specific interfaces, x86 version |
| 6 | */ |
| 7 | |
| 8 | #ifndef _ASM_X86_KVM_HOST_H |
| 9 | #define _ASM_X86_KVM_HOST_H |
| 10 | |
| 11 | #include <linux/types.h> |
| 12 | #include <linux/mm.h> |
| 13 | #include <linux/mmu_notifier.h> |
| 14 | #include <linux/tracepoint.h> |
| 15 | #include <linux/cpumask.h> |
| 16 | #include <linux/irq_work.h> |
| 17 | #include <linux/irq.h> |
| 18 | #include <linux/workqueue.h> |
| 19 | |
| 20 | #include <linux/kvm.h> |
| 21 | #include <linux/kvm_para.h> |
| 22 | #include <linux/kvm_types.h> |
| 23 | #include <linux/perf_event.h> |
| 24 | #include <linux/pvclock_gtod.h> |
| 25 | #include <linux/clocksource.h> |
| 26 | #include <linux/irqbypass.h> |
| 27 | #include <linux/kfifo.h> |
| 28 | #include <linux/sched/vhost_task.h> |
| 29 | #include <linux/call_once.h> |
| 30 | #include <linux/atomic.h> |
| 31 | |
| 32 | #include <asm/apic.h> |
| 33 | #include <asm/pvclock-abi.h> |
| 34 | #include <asm/debugreg.h> |
| 35 | #include <asm/desc.h> |
| 36 | #include <asm/mtrr.h> |
| 37 | #include <asm/msr-index.h> |
| 38 | #include <asm/msr.h> |
| 39 | #include <asm/asm.h> |
| 40 | #include <asm/irq_remapping.h> |
| 41 | #include <asm/kvm_page_track.h> |
| 42 | #include <asm/kvm_vcpu_regs.h> |
| 43 | #include <asm/reboot.h> |
| 44 | #include <hyperv/hvhdk.h> |
| 45 | |
| 46 | #define __KVM_HAVE_ARCH_VCPU_DEBUGFS |
| 47 | |
| 48 | /* |
| 49 | * CONFIG_KVM_MAX_NR_VCPUS is defined iff CONFIG_KVM!=n, provide a dummy max if |
| 50 | * KVM is disabled (arbitrarily use the default from CONFIG_KVM_MAX_NR_VCPUS). |
| 51 | */ |
| 52 | #ifdef CONFIG_KVM_MAX_NR_VCPUS |
| 53 | #define KVM_MAX_VCPUS CONFIG_KVM_MAX_NR_VCPUS |
| 54 | #else |
| 55 | #define KVM_MAX_VCPUS 1024 |
| 56 | #endif |
| 57 | |
| 58 | /* |
| 59 | * In x86, the VCPU ID corresponds to the APIC ID, and APIC IDs |
| 60 | * might be larger than the actual number of VCPUs because the |
| 61 | * APIC ID encodes CPU topology information. |
| 62 | * |
| 63 | * In the worst case, we'll need less than one extra bit for the |
| 64 | * Core ID, and less than one extra bit for the Package (Die) ID, |
| 65 | * so ratio of 4 should be enough. |
| 66 | */ |
| 67 | #define KVM_VCPU_ID_RATIO 4 |
| 68 | #define KVM_MAX_VCPU_IDS (KVM_MAX_VCPUS * KVM_VCPU_ID_RATIO) |
| 69 | |
| 70 | /* memory slots that are not exposed to userspace */ |
| 71 | #define KVM_INTERNAL_MEM_SLOTS 3 |
| 72 | |
| 73 | #define KVM_HALT_POLL_NS_DEFAULT 200000 |
| 74 | |
| 75 | #define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS |
| 76 | |
| 77 | #define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \ |
| 78 | KVM_DIRTY_LOG_INITIALLY_SET) |
| 79 | |
| 80 | #define KVM_BUS_LOCK_DETECTION_VALID_MODE (KVM_BUS_LOCK_DETECTION_OFF | \ |
| 81 | KVM_BUS_LOCK_DETECTION_EXIT) |
| 82 | |
| 83 | #define KVM_X86_NOTIFY_VMEXIT_VALID_BITS (KVM_X86_NOTIFY_VMEXIT_ENABLED | \ |
| 84 | KVM_X86_NOTIFY_VMEXIT_USER) |
| 85 | |
| 86 | /* x86-specific vcpu->requests bit members */ |
| 87 | #define KVM_REQ_MIGRATE_TIMER KVM_ARCH_REQ(0) |
| 88 | #define KVM_REQ_REPORT_TPR_ACCESS KVM_ARCH_REQ(1) |
| 89 | #define KVM_REQ_TRIPLE_FAULT KVM_ARCH_REQ(2) |
| 90 | #define KVM_REQ_MMU_SYNC KVM_ARCH_REQ(3) |
| 91 | #define KVM_REQ_CLOCK_UPDATE KVM_ARCH_REQ(4) |
| 92 | #define KVM_REQ_LOAD_MMU_PGD KVM_ARCH_REQ(5) |
| 93 | #define KVM_REQ_EVENT KVM_ARCH_REQ(6) |
| 94 | #define KVM_REQ_APF_HALT KVM_ARCH_REQ(7) |
| 95 | #define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(8) |
| 96 | #define KVM_REQ_NMI KVM_ARCH_REQ(9) |
| 97 | #define KVM_REQ_PMU KVM_ARCH_REQ(10) |
| 98 | #define KVM_REQ_PMI KVM_ARCH_REQ(11) |
| 99 | #ifdef CONFIG_KVM_SMM |
| 100 | #define KVM_REQ_SMI KVM_ARCH_REQ(12) |
| 101 | #endif |
| 102 | #define KVM_REQ_MASTERCLOCK_UPDATE KVM_ARCH_REQ(13) |
| 103 | #define KVM_REQ_MCLOCK_INPROGRESS \ |
| 104 | KVM_ARCH_REQ_FLAGS(14, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) |
| 105 | #define KVM_REQ_SCAN_IOAPIC \ |
| 106 | KVM_ARCH_REQ_FLAGS(15, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) |
| 107 | #define KVM_REQ_GLOBAL_CLOCK_UPDATE KVM_ARCH_REQ(16) |
| 108 | #define KVM_REQ_APIC_PAGE_RELOAD \ |
| 109 | KVM_ARCH_REQ_FLAGS(17, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) |
| 110 | #define KVM_REQ_HV_CRASH KVM_ARCH_REQ(18) |
| 111 | #define KVM_REQ_IOAPIC_EOI_EXIT KVM_ARCH_REQ(19) |
| 112 | #define KVM_REQ_HV_RESET KVM_ARCH_REQ(20) |
| 113 | #define KVM_REQ_HV_EXIT KVM_ARCH_REQ(21) |
| 114 | #define KVM_REQ_HV_STIMER KVM_ARCH_REQ(22) |
| 115 | #define KVM_REQ_LOAD_EOI_EXITMAP KVM_ARCH_REQ(23) |
| 116 | #define KVM_REQ_GET_NESTED_STATE_PAGES KVM_ARCH_REQ(24) |
| 117 | #define KVM_REQ_APICV_UPDATE \ |
| 118 | KVM_ARCH_REQ_FLAGS(25, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) |
| 119 | #define KVM_REQ_TLB_FLUSH_CURRENT KVM_ARCH_REQ(26) |
| 120 | #define KVM_REQ_TLB_FLUSH_GUEST \ |
| 121 | KVM_ARCH_REQ_FLAGS(27, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) |
| 122 | #define KVM_REQ_APF_READY KVM_ARCH_REQ(28) |
| 123 | #define KVM_REQ_RECALC_INTERCEPTS KVM_ARCH_REQ(29) |
| 124 | #define KVM_REQ_UPDATE_CPU_DIRTY_LOGGING \ |
| 125 | KVM_ARCH_REQ_FLAGS(30, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) |
| 126 | #define KVM_REQ_MMU_FREE_OBSOLETE_ROOTS \ |
| 127 | KVM_ARCH_REQ_FLAGS(31, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) |
| 128 | #define KVM_REQ_HV_TLB_FLUSH \ |
| 129 | KVM_ARCH_REQ_FLAGS(32, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) |
| 130 | #define KVM_REQ_UPDATE_PROTECTED_GUEST_STATE \ |
| 131 | KVM_ARCH_REQ_FLAGS(34, KVM_REQUEST_WAIT) |
| 132 | |
| 133 | #define CR0_RESERVED_BITS \ |
| 134 | (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \ |
| 135 | | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \ |
| 136 | | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG)) |
| 137 | |
| 138 | #define CR4_RESERVED_BITS \ |
| 139 | (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\ |
| 140 | | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \ |
| 141 | | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \ |
| 142 | | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \ |
| 143 | | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_VMXE \ |
| 144 | | X86_CR4_SMAP | X86_CR4_PKE | X86_CR4_UMIP \ |
| 145 | | X86_CR4_LAM_SUP | X86_CR4_CET)) |
| 146 | |
| 147 | #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR) |
| 148 | |
| 149 | |
| 150 | |
| 151 | #define INVALID_PAGE (~(hpa_t)0) |
| 152 | #define VALID_PAGE(x) ((x) != INVALID_PAGE) |
| 153 | |
| 154 | /* KVM Hugepage definitions for x86 */ |
| 155 | #define KVM_MAX_HUGEPAGE_LEVEL PG_LEVEL_1G |
| 156 | #define KVM_NR_PAGE_SIZES (KVM_MAX_HUGEPAGE_LEVEL - PG_LEVEL_4K + 1) |
| 157 | #define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 9) |
| 158 | #define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x)) |
| 159 | #define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x)) |
| 160 | #define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1)) |
| 161 | #define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE) |
| 162 | |
| 163 | #define KVM_MEMSLOT_PAGES_TO_MMU_PAGES_RATIO 50 |
| 164 | #define KVM_MIN_ALLOC_MMU_PAGES 64UL |
| 165 | #define KVM_MMU_HASH_SHIFT 12 |
| 166 | #define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT) |
| 167 | #define KVM_MIN_FREE_MMU_PAGES 5 |
| 168 | #define KVM_REFILL_PAGES 25 |
| 169 | #define KVM_MAX_CPUID_ENTRIES 256 |
| 170 | #define KVM_NR_VAR_MTRR 8 |
| 171 | |
| 172 | #define ASYNC_PF_PER_VCPU 64 |
| 173 | |
| 174 | enum kvm_reg { |
| 175 | VCPU_REGS_RAX = __VCPU_REGS_RAX, |
| 176 | VCPU_REGS_RCX = __VCPU_REGS_RCX, |
| 177 | VCPU_REGS_RDX = __VCPU_REGS_RDX, |
| 178 | VCPU_REGS_RBX = __VCPU_REGS_RBX, |
| 179 | VCPU_REGS_RSP = __VCPU_REGS_RSP, |
| 180 | VCPU_REGS_RBP = __VCPU_REGS_RBP, |
| 181 | VCPU_REGS_RSI = __VCPU_REGS_RSI, |
| 182 | VCPU_REGS_RDI = __VCPU_REGS_RDI, |
| 183 | #ifdef CONFIG_X86_64 |
| 184 | VCPU_REGS_R8 = __VCPU_REGS_R8, |
| 185 | VCPU_REGS_R9 = __VCPU_REGS_R9, |
| 186 | VCPU_REGS_R10 = __VCPU_REGS_R10, |
| 187 | VCPU_REGS_R11 = __VCPU_REGS_R11, |
| 188 | VCPU_REGS_R12 = __VCPU_REGS_R12, |
| 189 | VCPU_REGS_R13 = __VCPU_REGS_R13, |
| 190 | VCPU_REGS_R14 = __VCPU_REGS_R14, |
| 191 | VCPU_REGS_R15 = __VCPU_REGS_R15, |
| 192 | #endif |
| 193 | VCPU_REGS_RIP, |
| 194 | NR_VCPU_REGS, |
| 195 | |
| 196 | VCPU_EXREG_PDPTR = NR_VCPU_REGS, |
| 197 | VCPU_EXREG_CR0, |
| 198 | VCPU_EXREG_CR3, |
| 199 | VCPU_EXREG_CR4, |
| 200 | VCPU_EXREG_RFLAGS, |
| 201 | VCPU_EXREG_SEGMENTS, |
| 202 | VCPU_EXREG_EXIT_INFO_1, |
| 203 | VCPU_EXREG_EXIT_INFO_2, |
| 204 | }; |
| 205 | |
| 206 | enum { |
| 207 | VCPU_SREG_ES, |
| 208 | VCPU_SREG_CS, |
| 209 | VCPU_SREG_SS, |
| 210 | VCPU_SREG_DS, |
| 211 | VCPU_SREG_FS, |
| 212 | VCPU_SREG_GS, |
| 213 | VCPU_SREG_TR, |
| 214 | VCPU_SREG_LDTR, |
| 215 | }; |
| 216 | |
| 217 | enum exit_fastpath_completion { |
| 218 | EXIT_FASTPATH_NONE, |
| 219 | EXIT_FASTPATH_REENTER_GUEST, |
| 220 | EXIT_FASTPATH_EXIT_HANDLED, |
| 221 | EXIT_FASTPATH_EXIT_USERSPACE, |
| 222 | }; |
| 223 | typedef enum exit_fastpath_completion fastpath_t; |
| 224 | |
| 225 | struct x86_emulate_ctxt; |
| 226 | struct x86_exception; |
| 227 | union kvm_smram; |
| 228 | enum x86_intercept; |
| 229 | enum x86_intercept_stage; |
| 230 | |
| 231 | #define KVM_NR_DB_REGS 4 |
| 232 | |
| 233 | #define DR6_BUS_LOCK (1 << 11) |
| 234 | #define DR6_BD (1 << 13) |
| 235 | #define DR6_BS (1 << 14) |
| 236 | #define DR6_BT (1 << 15) |
| 237 | #define DR6_RTM (1 << 16) |
| 238 | /* |
| 239 | * DR6_ACTIVE_LOW combines fixed-1 and active-low bits. |
| 240 | * We can regard all the bits in DR6_FIXED_1 as active_low bits; |
| 241 | * they will never be 0 for now, but when they are defined |
| 242 | * in the future it will require no code change. |
| 243 | * |
| 244 | * DR6_ACTIVE_LOW is also used as the init/reset value for DR6. |
| 245 | */ |
| 246 | #define DR6_ACTIVE_LOW 0xffff0ff0 |
| 247 | #define DR6_VOLATILE 0x0001e80f |
| 248 | #define DR6_FIXED_1 (DR6_ACTIVE_LOW & ~DR6_VOLATILE) |
| 249 | |
| 250 | #define DR7_BP_EN_MASK 0x000000ff |
| 251 | #define DR7_GE (1 << 9) |
| 252 | #define DR7_GD (1 << 13) |
| 253 | #define DR7_VOLATILE 0xffff2bff |
| 254 | |
| 255 | #define KVM_GUESTDBG_VALID_MASK \ |
| 256 | (KVM_GUESTDBG_ENABLE | \ |
| 257 | KVM_GUESTDBG_SINGLESTEP | \ |
| 258 | KVM_GUESTDBG_USE_HW_BP | \ |
| 259 | KVM_GUESTDBG_USE_SW_BP | \ |
| 260 | KVM_GUESTDBG_INJECT_BP | \ |
| 261 | KVM_GUESTDBG_INJECT_DB | \ |
| 262 | KVM_GUESTDBG_BLOCKIRQ) |
| 263 | |
| 264 | #define PFERR_PRESENT_MASK BIT(0) |
| 265 | #define PFERR_WRITE_MASK BIT(1) |
| 266 | #define PFERR_USER_MASK BIT(2) |
| 267 | #define PFERR_RSVD_MASK BIT(3) |
| 268 | #define PFERR_FETCH_MASK BIT(4) |
| 269 | #define PFERR_PK_MASK BIT(5) |
| 270 | #define PFERR_SS_MASK BIT(6) |
| 271 | #define PFERR_SGX_MASK BIT(15) |
| 272 | #define PFERR_GUEST_RMP_MASK BIT_ULL(31) |
| 273 | #define PFERR_GUEST_FINAL_MASK BIT_ULL(32) |
| 274 | #define PFERR_GUEST_PAGE_MASK BIT_ULL(33) |
| 275 | #define PFERR_GUEST_ENC_MASK BIT_ULL(34) |
| 276 | #define PFERR_GUEST_SIZEM_MASK BIT_ULL(35) |
| 277 | #define PFERR_GUEST_VMPL_MASK BIT_ULL(36) |
| 278 | |
| 279 | /* |
| 280 | * IMPLICIT_ACCESS is a KVM-defined flag used to correctly perform SMAP checks |
| 281 | * when emulating instructions that triggers implicit access. |
| 282 | */ |
| 283 | #define PFERR_IMPLICIT_ACCESS BIT_ULL(48) |
| 284 | /* |
| 285 | * PRIVATE_ACCESS is a KVM-defined flag us to indicate that a fault occurred |
| 286 | * when the guest was accessing private memory. |
| 287 | */ |
| 288 | #define PFERR_PRIVATE_ACCESS BIT_ULL(49) |
| 289 | #define PFERR_SYNTHETIC_MASK (PFERR_IMPLICIT_ACCESS | PFERR_PRIVATE_ACCESS) |
| 290 | |
| 291 | /* apic attention bits */ |
| 292 | #define KVM_APIC_CHECK_VAPIC 0 |
| 293 | /* |
| 294 | * The following bit is set with PV-EOI, unset on EOI. |
| 295 | * We detect PV-EOI changes by guest by comparing |
| 296 | * this bit with PV-EOI in guest memory. |
| 297 | * See the implementation in apic_update_pv_eoi. |
| 298 | */ |
| 299 | #define KVM_APIC_PV_EOI_PENDING 1 |
| 300 | |
| 301 | struct kvm_kernel_irqfd; |
| 302 | struct kvm_kernel_irq_routing_entry; |
| 303 | |
| 304 | /* |
| 305 | * kvm_mmu_page_role tracks the properties of a shadow page (where shadow page |
| 306 | * also includes TDP pages) to determine whether or not a page can be used in |
| 307 | * the given MMU context. This is a subset of the overall kvm_cpu_role to |
| 308 | * minimize the size of kvm_memory_slot.arch.gfn_write_track, i.e. allows |
| 309 | * allocating 2 bytes per gfn instead of 4 bytes per gfn. |
| 310 | * |
| 311 | * Upper-level shadow pages having gptes are tracked for write-protection via |
| 312 | * gfn_write_track. As above, gfn_write_track is a 16 bit counter, so KVM must |
| 313 | * not create more than 2^16-1 upper-level shadow pages at a single gfn, |
| 314 | * otherwise gfn_write_track will overflow and explosions will ensue. |
| 315 | * |
| 316 | * A unique shadow page (SP) for a gfn is created if and only if an existing SP |
| 317 | * cannot be reused. The ability to reuse a SP is tracked by its role, which |
| 318 | * incorporates various mode bits and properties of the SP. Roughly speaking, |
| 319 | * the number of unique SPs that can theoretically be created is 2^n, where n |
| 320 | * is the number of bits that are used to compute the role. |
| 321 | * |
| 322 | * But, even though there are 20 bits in the mask below, not all combinations |
| 323 | * of modes and flags are possible: |
| 324 | * |
| 325 | * - invalid shadow pages are not accounted, mirror pages are not shadowed, |
| 326 | * so the bits are effectively 18. |
| 327 | * |
| 328 | * - quadrant will only be used if has_4_byte_gpte=1 (non-PAE paging); |
| 329 | * execonly and ad_disabled are only used for nested EPT which has |
| 330 | * has_4_byte_gpte=0. Therefore, 2 bits are always unused. |
| 331 | * |
| 332 | * - the 4 bits of level are effectively limited to the values 2/3/4/5, |
| 333 | * as 4k SPs are not tracked (allowed to go unsync). In addition non-PAE |
| 334 | * paging has exactly one upper level, making level completely redundant |
| 335 | * when has_4_byte_gpte=1. |
| 336 | * |
| 337 | * - on top of this, smep_andnot_wp and smap_andnot_wp are only set if |
| 338 | * cr0_wp=0, therefore these three bits only give rise to 5 possibilities. |
| 339 | * |
| 340 | * Therefore, the maximum number of possible upper-level shadow pages for a |
| 341 | * single gfn is a bit less than 2^13. |
| 342 | */ |
| 343 | union kvm_mmu_page_role { |
| 344 | u32 word; |
| 345 | struct { |
| 346 | unsigned level:4; |
| 347 | unsigned has_4_byte_gpte:1; |
| 348 | unsigned quadrant:2; |
| 349 | unsigned direct:1; |
| 350 | unsigned access:3; |
| 351 | unsigned invalid:1; |
| 352 | unsigned efer_nx:1; |
| 353 | unsigned cr0_wp:1; |
| 354 | unsigned smep_andnot_wp:1; |
| 355 | unsigned smap_andnot_wp:1; |
| 356 | unsigned ad_disabled:1; |
| 357 | unsigned guest_mode:1; |
| 358 | unsigned passthrough:1; |
| 359 | unsigned is_mirror:1; |
| 360 | unsigned :4; |
| 361 | |
| 362 | /* |
| 363 | * This is left at the top of the word so that |
| 364 | * kvm_memslots_for_spte_role can extract it with a |
| 365 | * simple shift. While there is room, give it a whole |
| 366 | * byte so it is also faster to load it from memory. |
| 367 | */ |
| 368 | unsigned smm:8; |
| 369 | }; |
| 370 | }; |
| 371 | |
| 372 | /* |
| 373 | * kvm_mmu_extended_role complements kvm_mmu_page_role, tracking properties |
| 374 | * relevant to the current MMU configuration. When loading CR0, CR4, or EFER, |
| 375 | * including on nested transitions, if nothing in the full role changes then |
| 376 | * MMU re-configuration can be skipped. @valid bit is set on first usage so we |
| 377 | * don't treat all-zero structure as valid data. |
| 378 | * |
| 379 | * The properties that are tracked in the extended role but not the page role |
| 380 | * are for things that either (a) do not affect the validity of the shadow page |
| 381 | * or (b) are indirectly reflected in the shadow page's role. For example, |
| 382 | * CR4.PKE only affects permission checks for software walks of the guest page |
| 383 | * tables (because KVM doesn't support Protection Keys with shadow paging), and |
| 384 | * CR0.PG, CR4.PAE, and CR4.PSE are indirectly reflected in role.level. |
| 385 | * |
| 386 | * Note, SMEP and SMAP are not redundant with sm*p_andnot_wp in the page role. |
| 387 | * If CR0.WP=1, KVM can reuse shadow pages for the guest regardless of SMEP and |
| 388 | * SMAP, but the MMU's permission checks for software walks need to be SMEP and |
| 389 | * SMAP aware regardless of CR0.WP. |
| 390 | */ |
| 391 | union kvm_mmu_extended_role { |
| 392 | u32 word; |
| 393 | struct { |
| 394 | unsigned int valid:1; |
| 395 | unsigned int execonly:1; |
| 396 | unsigned int cr4_pse:1; |
| 397 | unsigned int cr4_pke:1; |
| 398 | unsigned int cr4_smap:1; |
| 399 | unsigned int cr4_smep:1; |
| 400 | unsigned int cr4_la57:1; |
| 401 | unsigned int efer_lma:1; |
| 402 | }; |
| 403 | }; |
| 404 | |
| 405 | union kvm_cpu_role { |
| 406 | u64 as_u64; |
| 407 | struct { |
| 408 | union kvm_mmu_page_role base; |
| 409 | union kvm_mmu_extended_role ext; |
| 410 | }; |
| 411 | }; |
| 412 | |
| 413 | struct kvm_rmap_head { |
| 414 | atomic_long_t val; |
| 415 | }; |
| 416 | |
| 417 | struct kvm_pio_request { |
| 418 | unsigned long count; |
| 419 | int in; |
| 420 | int port; |
| 421 | int size; |
| 422 | }; |
| 423 | |
| 424 | #define PT64_ROOT_MAX_LEVEL 5 |
| 425 | |
| 426 | struct rsvd_bits_validate { |
| 427 | u64 rsvd_bits_mask[2][PT64_ROOT_MAX_LEVEL]; |
| 428 | u64 bad_mt_xwr; |
| 429 | }; |
| 430 | |
| 431 | struct kvm_mmu_root_info { |
| 432 | gpa_t pgd; |
| 433 | hpa_t hpa; |
| 434 | }; |
| 435 | |
| 436 | #define KVM_MMU_ROOT_INFO_INVALID \ |
| 437 | ((struct kvm_mmu_root_info) { .pgd = INVALID_PAGE, .hpa = INVALID_PAGE }) |
| 438 | |
| 439 | #define KVM_MMU_NUM_PREV_ROOTS 3 |
| 440 | |
| 441 | #define KVM_MMU_ROOT_CURRENT BIT(0) |
| 442 | #define KVM_MMU_ROOT_PREVIOUS(i) BIT(1+i) |
| 443 | #define KVM_MMU_ROOTS_ALL (BIT(1 + KVM_MMU_NUM_PREV_ROOTS) - 1) |
| 444 | |
| 445 | #define KVM_HAVE_MMU_RWLOCK |
| 446 | |
| 447 | struct kvm_mmu_page; |
| 448 | struct kvm_page_fault; |
| 449 | |
| 450 | /* |
| 451 | * x86 supports 4 paging modes (5-level 64-bit, 4-level 64-bit, 3-level 32-bit, |
| 452 | * and 2-level 32-bit). The kvm_mmu structure abstracts the details of the |
| 453 | * current mmu mode. |
| 454 | */ |
| 455 | struct kvm_mmu { |
| 456 | unsigned long (*get_guest_pgd)(struct kvm_vcpu *vcpu); |
| 457 | u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index); |
| 458 | int (*page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault); |
| 459 | void (*inject_page_fault)(struct kvm_vcpu *vcpu, |
| 460 | struct x86_exception *fault); |
| 461 | gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, |
| 462 | gpa_t gva_or_gpa, u64 access, |
| 463 | struct x86_exception *exception); |
| 464 | int (*sync_spte)(struct kvm_vcpu *vcpu, |
| 465 | struct kvm_mmu_page *sp, int i); |
| 466 | struct kvm_mmu_root_info root; |
| 467 | hpa_t mirror_root_hpa; |
| 468 | union kvm_cpu_role cpu_role; |
| 469 | union kvm_mmu_page_role root_role; |
| 470 | |
| 471 | /* |
| 472 | * The pkru_mask indicates if protection key checks are needed. It |
| 473 | * consists of 16 domains indexed by page fault error code bits [4:1], |
| 474 | * with PFEC.RSVD replaced by ACC_USER_MASK from the page tables. |
| 475 | * Each domain has 2 bits which are ANDed with AD and WD from PKRU. |
| 476 | */ |
| 477 | u32 pkru_mask; |
| 478 | |
| 479 | struct kvm_mmu_root_info prev_roots[KVM_MMU_NUM_PREV_ROOTS]; |
| 480 | |
| 481 | /* |
| 482 | * Bitmap; bit set = permission fault |
| 483 | * Byte index: page fault error code [4:1] |
| 484 | * Bit index: pte permissions in ACC_* format |
| 485 | */ |
| 486 | u8 permissions[16]; |
| 487 | |
| 488 | u64 *pae_root; |
| 489 | u64 *pml4_root; |
| 490 | u64 *pml5_root; |
| 491 | |
| 492 | /* |
| 493 | * check zero bits on shadow page table entries, these |
| 494 | * bits include not only hardware reserved bits but also |
| 495 | * the bits spte never used. |
| 496 | */ |
| 497 | struct rsvd_bits_validate shadow_zero_check; |
| 498 | |
| 499 | struct rsvd_bits_validate guest_rsvd_check; |
| 500 | |
| 501 | u64 pdptrs[4]; /* pae */ |
| 502 | }; |
| 503 | |
| 504 | enum pmc_type { |
| 505 | KVM_PMC_GP = 0, |
| 506 | KVM_PMC_FIXED, |
| 507 | }; |
| 508 | |
| 509 | struct kvm_pmc { |
| 510 | enum pmc_type type; |
| 511 | u8 idx; |
| 512 | bool is_paused; |
| 513 | bool intr; |
| 514 | /* |
| 515 | * Base value of the PMC counter, relative to the *consumed* count in |
| 516 | * the associated perf_event. This value includes counter updates from |
| 517 | * the perf_event and emulated_count since the last time the counter |
| 518 | * was reprogrammed, but it is *not* the current value as seen by the |
| 519 | * guest or userspace. |
| 520 | * |
| 521 | * The count is relative to the associated perf_event so that KVM |
| 522 | * doesn't need to reprogram the perf_event every time the guest writes |
| 523 | * to the counter. |
| 524 | */ |
| 525 | u64 counter; |
| 526 | /* |
| 527 | * PMC events triggered by KVM emulation that haven't been fully |
| 528 | * processed, i.e. haven't undergone overflow detection. |
| 529 | */ |
| 530 | u64 emulated_counter; |
| 531 | u64 eventsel; |
| 532 | struct perf_event *perf_event; |
| 533 | struct kvm_vcpu *vcpu; |
| 534 | /* |
| 535 | * only for creating or reusing perf_event, |
| 536 | * eventsel value for general purpose counters, |
| 537 | * ctrl value for fixed counters. |
| 538 | */ |
| 539 | u64 current_config; |
| 540 | }; |
| 541 | |
| 542 | /* More counters may conflict with other existing Architectural MSRs */ |
| 543 | #define KVM_MAX(a, b) ((a) >= (b) ? (a) : (b)) |
| 544 | #define KVM_MAX_NR_INTEL_GP_COUNTERS 8 |
| 545 | #define KVM_MAX_NR_AMD_GP_COUNTERS 6 |
| 546 | #define KVM_MAX_NR_GP_COUNTERS KVM_MAX(KVM_MAX_NR_INTEL_GP_COUNTERS, \ |
| 547 | KVM_MAX_NR_AMD_GP_COUNTERS) |
| 548 | |
| 549 | #define KVM_MAX_NR_INTEL_FIXED_COUNTERS 3 |
| 550 | #define KVM_MAX_NR_AMD_FIXED_COUNTERS 0 |
| 551 | #define KVM_MAX_NR_FIXED_COUNTERS KVM_MAX(KVM_MAX_NR_INTEL_FIXED_COUNTERS, \ |
| 552 | KVM_MAX_NR_AMD_FIXED_COUNTERS) |
| 553 | |
| 554 | struct kvm_pmu { |
| 555 | u8 version; |
| 556 | unsigned nr_arch_gp_counters; |
| 557 | unsigned nr_arch_fixed_counters; |
| 558 | unsigned available_event_types; |
| 559 | u64 fixed_ctr_ctrl; |
| 560 | u64 fixed_ctr_ctrl_rsvd; |
| 561 | u64 global_ctrl; |
| 562 | u64 global_status; |
| 563 | u64 counter_bitmask[2]; |
| 564 | u64 global_ctrl_rsvd; |
| 565 | u64 global_status_rsvd; |
| 566 | u64 reserved_bits; |
| 567 | u64 raw_event_mask; |
| 568 | struct kvm_pmc gp_counters[KVM_MAX_NR_GP_COUNTERS]; |
| 569 | struct kvm_pmc fixed_counters[KVM_MAX_NR_FIXED_COUNTERS]; |
| 570 | |
| 571 | /* |
| 572 | * Overlay the bitmap with a 64-bit atomic so that all bits can be |
| 573 | * set in a single access, e.g. to reprogram all counters when the PMU |
| 574 | * filter changes. |
| 575 | */ |
| 576 | union { |
| 577 | DECLARE_BITMAP(reprogram_pmi, X86_PMC_IDX_MAX); |
| 578 | atomic64_t __reprogram_pmi; |
| 579 | }; |
| 580 | DECLARE_BITMAP(all_valid_pmc_idx, X86_PMC_IDX_MAX); |
| 581 | DECLARE_BITMAP(pmc_in_use, X86_PMC_IDX_MAX); |
| 582 | |
| 583 | DECLARE_BITMAP(pmc_counting_instructions, X86_PMC_IDX_MAX); |
| 584 | DECLARE_BITMAP(pmc_counting_branches, X86_PMC_IDX_MAX); |
| 585 | |
| 586 | u64 ds_area; |
| 587 | u64 pebs_enable; |
| 588 | u64 pebs_enable_rsvd; |
| 589 | u64 pebs_data_cfg; |
| 590 | u64 pebs_data_cfg_rsvd; |
| 591 | |
| 592 | /* |
| 593 | * If a guest counter is cross-mapped to host counter with different |
| 594 | * index, its PEBS capability will be temporarily disabled. |
| 595 | * |
| 596 | * The user should make sure that this mask is updated |
| 597 | * after disabling interrupts and before perf_guest_get_msrs(); |
| 598 | */ |
| 599 | u64 host_cross_mapped_mask; |
| 600 | |
| 601 | /* |
| 602 | * The gate to release perf_events not marked in |
| 603 | * pmc_in_use only once in a vcpu time slice. |
| 604 | */ |
| 605 | bool need_cleanup; |
| 606 | |
| 607 | /* |
| 608 | * The total number of programmed perf_events and it helps to avoid |
| 609 | * redundant check before cleanup if guest don't use vPMU at all. |
| 610 | */ |
| 611 | u8 event_count; |
| 612 | }; |
| 613 | |
| 614 | struct kvm_pmu_ops; |
| 615 | |
| 616 | enum { |
| 617 | KVM_DEBUGREG_BP_ENABLED = BIT(0), |
| 618 | KVM_DEBUGREG_WONT_EXIT = BIT(1), |
| 619 | /* |
| 620 | * Guest debug registers (DR0-3, DR6 and DR7) are saved/restored by |
| 621 | * hardware on exit from or enter to guest. KVM needn't switch them. |
| 622 | * DR0-3, DR6 and DR7 are set to their architectural INIT value on VM |
| 623 | * exit, host values need to be restored. |
| 624 | */ |
| 625 | KVM_DEBUGREG_AUTO_SWITCH = BIT(2), |
| 626 | }; |
| 627 | |
| 628 | struct kvm_mtrr { |
| 629 | u64 var[KVM_NR_VAR_MTRR * 2]; |
| 630 | u64 fixed_64k; |
| 631 | u64 fixed_16k[2]; |
| 632 | u64 fixed_4k[8]; |
| 633 | u64 deftype; |
| 634 | }; |
| 635 | |
| 636 | /* Hyper-V SynIC timer */ |
| 637 | struct kvm_vcpu_hv_stimer { |
| 638 | struct hrtimer timer; |
| 639 | int index; |
| 640 | union hv_stimer_config config; |
| 641 | u64 count; |
| 642 | u64 exp_time; |
| 643 | struct hv_message msg; |
| 644 | bool msg_pending; |
| 645 | }; |
| 646 | |
| 647 | /* Hyper-V synthetic interrupt controller (SynIC)*/ |
| 648 | struct kvm_vcpu_hv_synic { |
| 649 | u64 version; |
| 650 | u64 control; |
| 651 | u64 msg_page; |
| 652 | u64 evt_page; |
| 653 | atomic64_t sint[HV_SYNIC_SINT_COUNT]; |
| 654 | atomic_t sint_to_gsi[HV_SYNIC_SINT_COUNT]; |
| 655 | DECLARE_BITMAP(auto_eoi_bitmap, 256); |
| 656 | DECLARE_BITMAP(vec_bitmap, 256); |
| 657 | bool active; |
| 658 | bool dont_zero_synic_pages; |
| 659 | }; |
| 660 | |
| 661 | /* The maximum number of entries on the TLB flush fifo. */ |
| 662 | #define KVM_HV_TLB_FLUSH_FIFO_SIZE (16) |
| 663 | /* |
| 664 | * Note: the following 'magic' entry is made up by KVM to avoid putting |
| 665 | * anything besides GVA on the TLB flush fifo. It is theoretically possible |
| 666 | * to observe a request to flush 4095 PFNs starting from 0xfffffffffffff000 |
| 667 | * which will look identical. KVM's action to 'flush everything' instead of |
| 668 | * flushing these particular addresses is, however, fully legitimate as |
| 669 | * flushing more than requested is always OK. |
| 670 | */ |
| 671 | #define KVM_HV_TLB_FLUSHALL_ENTRY ((u64)-1) |
| 672 | |
| 673 | enum hv_tlb_flush_fifos { |
| 674 | HV_L1_TLB_FLUSH_FIFO, |
| 675 | HV_L2_TLB_FLUSH_FIFO, |
| 676 | HV_NR_TLB_FLUSH_FIFOS, |
| 677 | }; |
| 678 | |
| 679 | struct kvm_vcpu_hv_tlb_flush_fifo { |
| 680 | spinlock_t write_lock; |
| 681 | DECLARE_KFIFO(entries, u64, KVM_HV_TLB_FLUSH_FIFO_SIZE); |
| 682 | }; |
| 683 | |
| 684 | /* Hyper-V per vcpu emulation context */ |
| 685 | struct kvm_vcpu_hv { |
| 686 | struct kvm_vcpu *vcpu; |
| 687 | u32 vp_index; |
| 688 | u64 hv_vapic; |
| 689 | s64 runtime_offset; |
| 690 | struct kvm_vcpu_hv_synic synic; |
| 691 | struct kvm_hyperv_exit exit; |
| 692 | struct kvm_vcpu_hv_stimer stimer[HV_SYNIC_STIMER_COUNT]; |
| 693 | DECLARE_BITMAP(stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT); |
| 694 | bool enforce_cpuid; |
| 695 | struct { |
| 696 | u32 features_eax; /* HYPERV_CPUID_FEATURES.EAX */ |
| 697 | u32 features_ebx; /* HYPERV_CPUID_FEATURES.EBX */ |
| 698 | u32 features_edx; /* HYPERV_CPUID_FEATURES.EDX */ |
| 699 | u32 enlightenments_eax; /* HYPERV_CPUID_ENLIGHTMENT_INFO.EAX */ |
| 700 | u32 enlightenments_ebx; /* HYPERV_CPUID_ENLIGHTMENT_INFO.EBX */ |
| 701 | u32 syndbg_cap_eax; /* HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES.EAX */ |
| 702 | u32 nested_eax; /* HYPERV_CPUID_NESTED_FEATURES.EAX */ |
| 703 | u32 nested_ebx; /* HYPERV_CPUID_NESTED_FEATURES.EBX */ |
| 704 | } cpuid_cache; |
| 705 | |
| 706 | struct kvm_vcpu_hv_tlb_flush_fifo tlb_flush_fifo[HV_NR_TLB_FLUSH_FIFOS]; |
| 707 | |
| 708 | /* |
| 709 | * Preallocated buffers for handling hypercalls that pass sparse vCPU |
| 710 | * sets (for high vCPU counts, they're too large to comfortably fit on |
| 711 | * the stack). |
| 712 | */ |
| 713 | u64 sparse_banks[HV_MAX_SPARSE_VCPU_BANKS]; |
| 714 | DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS); |
| 715 | |
| 716 | struct hv_vp_assist_page vp_assist_page; |
| 717 | |
| 718 | struct { |
| 719 | u64 pa_page_gpa; |
| 720 | u64 vm_id; |
| 721 | u32 vp_id; |
| 722 | } nested; |
| 723 | }; |
| 724 | |
| 725 | struct kvm_hypervisor_cpuid { |
| 726 | u32 base; |
| 727 | u32 limit; |
| 728 | }; |
| 729 | |
| 730 | #ifdef CONFIG_KVM_XEN |
| 731 | /* Xen HVM per vcpu emulation context */ |
| 732 | struct kvm_vcpu_xen { |
| 733 | u64 hypercall_rip; |
| 734 | u32 current_runstate; |
| 735 | u8 upcall_vector; |
| 736 | struct gfn_to_pfn_cache vcpu_info_cache; |
| 737 | struct gfn_to_pfn_cache vcpu_time_info_cache; |
| 738 | struct gfn_to_pfn_cache runstate_cache; |
| 739 | struct gfn_to_pfn_cache runstate2_cache; |
| 740 | u64 last_steal; |
| 741 | u64 runstate_entry_time; |
| 742 | u64 runstate_times[4]; |
| 743 | unsigned long evtchn_pending_sel; |
| 744 | u32 vcpu_id; /* The Xen / ACPI vCPU ID */ |
| 745 | u32 timer_virq; |
| 746 | u64 timer_expires; /* In guest epoch */ |
| 747 | atomic_t timer_pending; |
| 748 | struct hrtimer timer; |
| 749 | int poll_evtchn; |
| 750 | struct timer_list poll_timer; |
| 751 | struct kvm_hypervisor_cpuid cpuid; |
| 752 | }; |
| 753 | #endif |
| 754 | |
| 755 | struct kvm_queued_exception { |
| 756 | bool pending; |
| 757 | bool injected; |
| 758 | bool has_error_code; |
| 759 | u8 vector; |
| 760 | u32 error_code; |
| 761 | unsigned long payload; |
| 762 | bool has_payload; |
| 763 | }; |
| 764 | |
| 765 | /* |
| 766 | * Hardware-defined CPUID leafs that are either scattered by the kernel or are |
| 767 | * unknown to the kernel, but need to be directly used by KVM. Note, these |
| 768 | * word values conflict with the kernel's "bug" caps, but KVM doesn't use those. |
| 769 | */ |
| 770 | enum kvm_only_cpuid_leafs { |
| 771 | CPUID_12_EAX = NCAPINTS, |
| 772 | CPUID_7_1_EDX, |
| 773 | CPUID_8000_0007_EDX, |
| 774 | CPUID_8000_0022_EAX, |
| 775 | CPUID_7_2_EDX, |
| 776 | CPUID_24_0_EBX, |
| 777 | CPUID_8000_0021_ECX, |
| 778 | CPUID_7_1_ECX, |
| 779 | NR_KVM_CPU_CAPS, |
| 780 | |
| 781 | NKVMCAPINTS = NR_KVM_CPU_CAPS - NCAPINTS, |
| 782 | }; |
| 783 | |
| 784 | struct kvm_vcpu_arch { |
| 785 | /* |
| 786 | * rip and regs accesses must go through |
| 787 | * kvm_{register,rip}_{read,write} functions. |
| 788 | */ |
| 789 | unsigned long regs[NR_VCPU_REGS]; |
| 790 | u32 regs_avail; |
| 791 | u32 regs_dirty; |
| 792 | |
| 793 | unsigned long cr0; |
| 794 | unsigned long cr0_guest_owned_bits; |
| 795 | unsigned long cr2; |
| 796 | unsigned long cr3; |
| 797 | unsigned long cr4; |
| 798 | unsigned long cr4_guest_owned_bits; |
| 799 | unsigned long cr4_guest_rsvd_bits; |
| 800 | unsigned long cr8; |
| 801 | u32 host_pkru; |
| 802 | u32 pkru; |
| 803 | u32 hflags; |
| 804 | u64 efer; |
| 805 | u64 host_debugctl; |
| 806 | u64 apic_base; |
| 807 | struct kvm_lapic *apic; /* kernel irqchip context */ |
| 808 | bool load_eoi_exitmap_pending; |
| 809 | DECLARE_BITMAP(ioapic_handled_vectors, 256); |
| 810 | unsigned long apic_attention; |
| 811 | int32_t apic_arb_prio; |
| 812 | int mp_state; |
| 813 | u64 ia32_misc_enable_msr; |
| 814 | u64 smbase; |
| 815 | u64 smi_count; |
| 816 | bool at_instruction_boundary; |
| 817 | bool tpr_access_reporting; |
| 818 | bool xfd_no_write_intercept; |
| 819 | u64 microcode_version; |
| 820 | u64 arch_capabilities; |
| 821 | u64 perf_capabilities; |
| 822 | |
| 823 | /* |
| 824 | * Paging state of the vcpu |
| 825 | * |
| 826 | * If the vcpu runs in guest mode with two level paging this still saves |
| 827 | * the paging mode of the l1 guest. This context is always used to |
| 828 | * handle faults. |
| 829 | */ |
| 830 | struct kvm_mmu *mmu; |
| 831 | |
| 832 | /* Non-nested MMU for L1 */ |
| 833 | struct kvm_mmu root_mmu; |
| 834 | |
| 835 | /* L1 MMU when running nested */ |
| 836 | struct kvm_mmu guest_mmu; |
| 837 | |
| 838 | /* |
| 839 | * Paging state of an L2 guest (used for nested npt) |
| 840 | * |
| 841 | * This context will save all necessary information to walk page tables |
| 842 | * of an L2 guest. This context is only initialized for page table |
| 843 | * walking and not for faulting since we never handle l2 page faults on |
| 844 | * the host. |
| 845 | */ |
| 846 | struct kvm_mmu nested_mmu; |
| 847 | |
| 848 | /* |
| 849 | * Pointer to the mmu context currently used for |
| 850 | * gva_to_gpa translations. |
| 851 | */ |
| 852 | struct kvm_mmu *walk_mmu; |
| 853 | |
| 854 | struct kvm_mmu_memory_cache mmu_pte_list_desc_cache; |
| 855 | struct kvm_mmu_memory_cache mmu_shadow_page_cache; |
| 856 | struct kvm_mmu_memory_cache mmu_shadowed_info_cache; |
| 857 | struct kvm_mmu_memory_cache ; |
| 858 | /* |
| 859 | * This cache is to allocate external page table. E.g. private EPT used |
| 860 | * by the TDX module. |
| 861 | */ |
| 862 | struct kvm_mmu_memory_cache mmu_external_spt_cache; |
| 863 | |
| 864 | /* |
| 865 | * QEMU userspace and the guest each have their own FPU state. |
| 866 | * In vcpu_run, we switch between the user and guest FPU contexts. |
| 867 | * While running a VCPU, the VCPU thread will have the guest FPU |
| 868 | * context. |
| 869 | * |
| 870 | * Note that while the PKRU state lives inside the fpu registers, |
| 871 | * it is switched out separately at VMENTER and VMEXIT time. The |
| 872 | * "guest_fpstate" state here contains the guest FPU context, with the |
| 873 | * host PRKU bits. |
| 874 | */ |
| 875 | struct fpu_guest guest_fpu; |
| 876 | |
| 877 | u64 xcr0; |
| 878 | u64 guest_supported_xcr0; |
| 879 | u64 ia32_xss; |
| 880 | u64 guest_supported_xss; |
| 881 | |
| 882 | struct kvm_pio_request pio; |
| 883 | void *pio_data; |
| 884 | void *sev_pio_data; |
| 885 | unsigned sev_pio_count; |
| 886 | |
| 887 | u8 event_exit_inst_len; |
| 888 | |
| 889 | bool exception_from_userspace; |
| 890 | |
| 891 | /* Exceptions to be injected to the guest. */ |
| 892 | struct kvm_queued_exception exception; |
| 893 | /* Exception VM-Exits to be synthesized to L1. */ |
| 894 | struct kvm_queued_exception exception_vmexit; |
| 895 | |
| 896 | struct kvm_queued_interrupt { |
| 897 | bool injected; |
| 898 | bool soft; |
| 899 | u8 nr; |
| 900 | } interrupt; |
| 901 | |
| 902 | int halt_request; /* real mode on Intel only */ |
| 903 | |
| 904 | int cpuid_nent; |
| 905 | struct kvm_cpuid_entry2 *cpuid_entries; |
| 906 | bool cpuid_dynamic_bits_dirty; |
| 907 | bool is_amd_compatible; |
| 908 | |
| 909 | /* |
| 910 | * cpu_caps holds the effective guest capabilities, i.e. the features |
| 911 | * the vCPU is allowed to use. Typically, but not always, features can |
| 912 | * be used by the guest if and only if both KVM and userspace want to |
| 913 | * expose the feature to the guest. |
| 914 | * |
| 915 | * A common exception is for virtualization holes, i.e. when KVM can't |
| 916 | * prevent the guest from using a feature, in which case the vCPU "has" |
| 917 | * the feature regardless of what KVM or userspace desires. |
| 918 | * |
| 919 | * Note, features that don't require KVM involvement in any way are |
| 920 | * NOT enforced/sanitized by KVM, i.e. are taken verbatim from the |
| 921 | * guest CPUID provided by userspace. |
| 922 | */ |
| 923 | u32 cpu_caps[NR_KVM_CPU_CAPS]; |
| 924 | |
| 925 | u64 reserved_gpa_bits; |
| 926 | int maxphyaddr; |
| 927 | |
| 928 | /* emulate context */ |
| 929 | |
| 930 | struct x86_emulate_ctxt *emulate_ctxt; |
| 931 | bool emulate_regs_need_sync_to_vcpu; |
| 932 | bool emulate_regs_need_sync_from_vcpu; |
| 933 | int (*complete_userspace_io)(struct kvm_vcpu *vcpu); |
| 934 | unsigned long cui_linear_rip; |
| 935 | int cui_rdmsr_imm_reg; |
| 936 | |
| 937 | gpa_t time; |
| 938 | s8 pvclock_tsc_shift; |
| 939 | u32 pvclock_tsc_mul; |
| 940 | unsigned int hw_tsc_khz; |
| 941 | struct gfn_to_pfn_cache pv_time; |
| 942 | /* set guest stopped flag in pvclock flags field */ |
| 943 | bool pvclock_set_guest_stopped_request; |
| 944 | |
| 945 | struct { |
| 946 | u8 preempted; |
| 947 | u64 msr_val; |
| 948 | u64 last_steal; |
| 949 | struct gfn_to_hva_cache cache; |
| 950 | } st; |
| 951 | |
| 952 | u64 l1_tsc_offset; |
| 953 | u64 tsc_offset; /* current tsc offset */ |
| 954 | u64 last_guest_tsc; |
| 955 | u64 last_host_tsc; |
| 956 | u64 tsc_offset_adjustment; |
| 957 | u64 this_tsc_nsec; |
| 958 | u64 this_tsc_write; |
| 959 | u64 this_tsc_generation; |
| 960 | bool tsc_catchup; |
| 961 | bool tsc_always_catchup; |
| 962 | s8 virtual_tsc_shift; |
| 963 | u32 virtual_tsc_mult; |
| 964 | u32 virtual_tsc_khz; |
| 965 | s64 ia32_tsc_adjust_msr; |
| 966 | u64 msr_ia32_power_ctl; |
| 967 | u64 l1_tsc_scaling_ratio; |
| 968 | u64 tsc_scaling_ratio; /* current scaling ratio */ |
| 969 | |
| 970 | atomic_t nmi_queued; /* unprocessed asynchronous NMIs */ |
| 971 | /* Number of NMIs pending injection, not including hardware vNMIs. */ |
| 972 | unsigned int nmi_pending; |
| 973 | bool nmi_injected; /* Trying to inject an NMI this entry */ |
| 974 | bool smi_pending; /* SMI queued after currently running handler */ |
| 975 | u8 handling_intr_from_guest; |
| 976 | |
| 977 | struct kvm_mtrr mtrr_state; |
| 978 | u64 pat; |
| 979 | |
| 980 | unsigned switch_db_regs; |
| 981 | unsigned long db[KVM_NR_DB_REGS]; |
| 982 | unsigned long dr6; |
| 983 | unsigned long dr7; |
| 984 | unsigned long eff_db[KVM_NR_DB_REGS]; |
| 985 | unsigned long guest_debug_dr7; |
| 986 | u64 msr_platform_info; |
| 987 | u64 msr_misc_features_enables; |
| 988 | |
| 989 | u64 mcg_cap; |
| 990 | u64 mcg_status; |
| 991 | u64 mcg_ctl; |
| 992 | u64 mcg_ext_ctl; |
| 993 | u64 *mce_banks; |
| 994 | u64 *mci_ctl2_banks; |
| 995 | |
| 996 | /* Cache MMIO info */ |
| 997 | u64 mmio_gva; |
| 998 | unsigned mmio_access; |
| 999 | gfn_t mmio_gfn; |
| 1000 | u64 mmio_gen; |
| 1001 | |
| 1002 | struct kvm_pmu pmu; |
| 1003 | |
| 1004 | /* used for guest single stepping over the given code position */ |
| 1005 | unsigned long singlestep_rip; |
| 1006 | |
| 1007 | #ifdef CONFIG_KVM_HYPERV |
| 1008 | bool hyperv_enabled; |
| 1009 | struct kvm_vcpu_hv *hyperv; |
| 1010 | #endif |
| 1011 | #ifdef CONFIG_KVM_XEN |
| 1012 | struct kvm_vcpu_xen xen; |
| 1013 | #endif |
| 1014 | cpumask_var_t wbinvd_dirty_mask; |
| 1015 | |
| 1016 | unsigned long last_retry_eip; |
| 1017 | unsigned long last_retry_addr; |
| 1018 | |
| 1019 | struct { |
| 1020 | bool halted; |
| 1021 | gfn_t gfns[ASYNC_PF_PER_VCPU]; |
| 1022 | struct gfn_to_hva_cache data; |
| 1023 | u64 msr_en_val; /* MSR_KVM_ASYNC_PF_EN */ |
| 1024 | u64 msr_int_val; /* MSR_KVM_ASYNC_PF_INT */ |
| 1025 | u16 vec; |
| 1026 | u32 id; |
| 1027 | u32 host_apf_flags; |
| 1028 | bool send_always; |
| 1029 | bool delivery_as_pf_vmexit; |
| 1030 | bool ; |
| 1031 | } apf; |
| 1032 | |
| 1033 | /* OSVW MSRs (AMD only) */ |
| 1034 | struct { |
| 1035 | u64 length; |
| 1036 | u64 status; |
| 1037 | } osvw; |
| 1038 | |
| 1039 | struct { |
| 1040 | u64 msr_val; |
| 1041 | struct gfn_to_hva_cache data; |
| 1042 | } pv_eoi; |
| 1043 | |
| 1044 | u64 msr_kvm_poll_control; |
| 1045 | |
| 1046 | /* pv related host specific info */ |
| 1047 | struct { |
| 1048 | bool pv_unhalted; |
| 1049 | } pv; |
| 1050 | |
| 1051 | int pending_ioapic_eoi; |
| 1052 | int pending_external_vector; |
| 1053 | int highest_stale_pending_ioapic_eoi; |
| 1054 | |
| 1055 | /* be preempted when it's in kernel-mode(cpl=0) */ |
| 1056 | bool preempted_in_kernel; |
| 1057 | |
| 1058 | /* Host CPU on which VM-entry was most recently attempted */ |
| 1059 | int last_vmentry_cpu; |
| 1060 | |
| 1061 | /* AMD MSRC001_0015 Hardware Configuration */ |
| 1062 | u64 msr_hwcr; |
| 1063 | |
| 1064 | /* pv related cpuid info */ |
| 1065 | struct { |
| 1066 | /* |
| 1067 | * value of the eax register in the KVM_CPUID_FEATURES CPUID |
| 1068 | * leaf. |
| 1069 | */ |
| 1070 | u32 features; |
| 1071 | |
| 1072 | /* |
| 1073 | * indicates whether pv emulation should be disabled if features |
| 1074 | * are not present in the guest's cpuid |
| 1075 | */ |
| 1076 | bool enforce; |
| 1077 | } pv_cpuid; |
| 1078 | |
| 1079 | /* Protected Guests */ |
| 1080 | bool guest_state_protected; |
| 1081 | bool guest_tsc_protected; |
| 1082 | |
| 1083 | /* |
| 1084 | * Set when PDPTS were loaded directly by the userspace without |
| 1085 | * reading the guest memory |
| 1086 | */ |
| 1087 | bool pdptrs_from_userspace; |
| 1088 | |
| 1089 | #if IS_ENABLED(CONFIG_HYPERV) |
| 1090 | hpa_t hv_root_tdp; |
| 1091 | #endif |
| 1092 | }; |
| 1093 | |
| 1094 | struct kvm_lpage_info { |
| 1095 | int disallow_lpage; |
| 1096 | }; |
| 1097 | |
| 1098 | struct kvm_arch_memory_slot { |
| 1099 | struct kvm_rmap_head *rmap[KVM_NR_PAGE_SIZES]; |
| 1100 | struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1]; |
| 1101 | unsigned short *gfn_write_track; |
| 1102 | }; |
| 1103 | |
| 1104 | /* |
| 1105 | * Track the mode of the optimized logical map, as the rules for decoding the |
| 1106 | * destination vary per mode. Enabling the optimized logical map requires all |
| 1107 | * software-enabled local APIs to be in the same mode, each addressable APIC to |
| 1108 | * be mapped to only one MDA, and each MDA to map to at most one APIC. |
| 1109 | */ |
| 1110 | enum kvm_apic_logical_mode { |
| 1111 | /* All local APICs are software disabled. */ |
| 1112 | KVM_APIC_MODE_SW_DISABLED, |
| 1113 | /* All software enabled local APICs in xAPIC cluster addressing mode. */ |
| 1114 | KVM_APIC_MODE_XAPIC_CLUSTER, |
| 1115 | /* All software enabled local APICs in xAPIC flat addressing mode. */ |
| 1116 | KVM_APIC_MODE_XAPIC_FLAT, |
| 1117 | /* All software enabled local APICs in x2APIC mode. */ |
| 1118 | KVM_APIC_MODE_X2APIC, |
| 1119 | /* |
| 1120 | * Optimized map disabled, e.g. not all local APICs in the same logical |
| 1121 | * mode, same logical ID assigned to multiple APICs, etc. |
| 1122 | */ |
| 1123 | KVM_APIC_MODE_MAP_DISABLED, |
| 1124 | }; |
| 1125 | |
| 1126 | struct kvm_apic_map { |
| 1127 | struct rcu_head rcu; |
| 1128 | enum kvm_apic_logical_mode logical_mode; |
| 1129 | u32 max_apic_id; |
| 1130 | union { |
| 1131 | struct kvm_lapic *xapic_flat_map[8]; |
| 1132 | struct kvm_lapic *xapic_cluster_map[16][4]; |
| 1133 | }; |
| 1134 | struct kvm_lapic *phys_map[]; |
| 1135 | }; |
| 1136 | |
| 1137 | /* Hyper-V synthetic debugger (SynDbg)*/ |
| 1138 | struct kvm_hv_syndbg { |
| 1139 | struct { |
| 1140 | u64 control; |
| 1141 | u64 status; |
| 1142 | u64 send_page; |
| 1143 | u64 recv_page; |
| 1144 | u64 pending_page; |
| 1145 | } control; |
| 1146 | u64 options; |
| 1147 | }; |
| 1148 | |
| 1149 | /* Current state of Hyper-V TSC page clocksource */ |
| 1150 | enum hv_tsc_page_status { |
| 1151 | /* TSC page was not set up or disabled */ |
| 1152 | HV_TSC_PAGE_UNSET = 0, |
| 1153 | /* TSC page MSR was written by the guest, update pending */ |
| 1154 | HV_TSC_PAGE_GUEST_CHANGED, |
| 1155 | /* TSC page update was triggered from the host side */ |
| 1156 | HV_TSC_PAGE_HOST_CHANGED, |
| 1157 | /* TSC page was properly set up and is currently active */ |
| 1158 | HV_TSC_PAGE_SET, |
| 1159 | /* TSC page was set up with an inaccessible GPA */ |
| 1160 | HV_TSC_PAGE_BROKEN, |
| 1161 | }; |
| 1162 | |
| 1163 | #ifdef CONFIG_KVM_HYPERV |
| 1164 | /* Hyper-V emulation context */ |
| 1165 | struct kvm_hv { |
| 1166 | struct mutex hv_lock; |
| 1167 | u64 hv_guest_os_id; |
| 1168 | u64 hv_hypercall; |
| 1169 | u64 hv_tsc_page; |
| 1170 | enum hv_tsc_page_status hv_tsc_page_status; |
| 1171 | |
| 1172 | /* Hyper-v based guest crash (NT kernel bugcheck) parameters */ |
| 1173 | u64 hv_crash_param[HV_X64_MSR_CRASH_PARAMS]; |
| 1174 | u64 hv_crash_ctl; |
| 1175 | |
| 1176 | struct ms_hyperv_tsc_page tsc_ref; |
| 1177 | |
| 1178 | struct idr conn_to_evt; |
| 1179 | |
| 1180 | u64 hv_reenlightenment_control; |
| 1181 | u64 hv_tsc_emulation_control; |
| 1182 | u64 hv_tsc_emulation_status; |
| 1183 | u64 hv_invtsc_control; |
| 1184 | |
| 1185 | /* How many vCPUs have VP index != vCPU index */ |
| 1186 | atomic_t num_mismatched_vp_indexes; |
| 1187 | |
| 1188 | /* |
| 1189 | * How many SynICs use 'AutoEOI' feature |
| 1190 | * (protected by arch.apicv_update_lock) |
| 1191 | */ |
| 1192 | unsigned int synic_auto_eoi_used; |
| 1193 | |
| 1194 | struct kvm_hv_syndbg hv_syndbg; |
| 1195 | |
| 1196 | bool xsaves_xsavec_checked; |
| 1197 | }; |
| 1198 | #endif |
| 1199 | |
| 1200 | struct msr_bitmap_range { |
| 1201 | u32 flags; |
| 1202 | u32 nmsrs; |
| 1203 | u32 base; |
| 1204 | unsigned long *bitmap; |
| 1205 | }; |
| 1206 | |
| 1207 | #ifdef CONFIG_KVM_XEN |
| 1208 | /* Xen emulation context */ |
| 1209 | struct kvm_xen { |
| 1210 | struct mutex xen_lock; |
| 1211 | u32 xen_version; |
| 1212 | bool long_mode; |
| 1213 | bool runstate_update_flag; |
| 1214 | u8 upcall_vector; |
| 1215 | struct gfn_to_pfn_cache shinfo_cache; |
| 1216 | struct idr evtchn_ports; |
| 1217 | unsigned long poll_mask[BITS_TO_LONGS(KVM_MAX_VCPUS)]; |
| 1218 | |
| 1219 | struct kvm_xen_hvm_config hvm_config; |
| 1220 | }; |
| 1221 | #endif |
| 1222 | |
| 1223 | enum kvm_irqchip_mode { |
| 1224 | KVM_IRQCHIP_NONE, |
| 1225 | KVM_IRQCHIP_KERNEL, /* created with KVM_CREATE_IRQCHIP */ |
| 1226 | KVM_IRQCHIP_SPLIT, /* created with KVM_CAP_SPLIT_IRQCHIP */ |
| 1227 | }; |
| 1228 | |
| 1229 | struct kvm_x86_msr_filter { |
| 1230 | u8 count; |
| 1231 | bool default_allow:1; |
| 1232 | struct msr_bitmap_range ranges[16]; |
| 1233 | }; |
| 1234 | |
| 1235 | struct kvm_x86_pmu_event_filter { |
| 1236 | __u32 action; |
| 1237 | __u32 nevents; |
| 1238 | __u32 fixed_counter_bitmap; |
| 1239 | __u32 flags; |
| 1240 | __u32 nr_includes; |
| 1241 | __u32 nr_excludes; |
| 1242 | __u64 *includes; |
| 1243 | __u64 *excludes; |
| 1244 | __u64 events[]; |
| 1245 | }; |
| 1246 | |
| 1247 | enum kvm_apicv_inhibit { |
| 1248 | |
| 1249 | /********************************************************************/ |
| 1250 | /* INHIBITs that are relevant to both Intel's APICv and AMD's AVIC. */ |
| 1251 | /********************************************************************/ |
| 1252 | |
| 1253 | /* |
| 1254 | * APIC acceleration is disabled by a module parameter |
| 1255 | * and/or not supported in hardware. |
| 1256 | */ |
| 1257 | APICV_INHIBIT_REASON_DISABLED, |
| 1258 | |
| 1259 | /* |
| 1260 | * APIC acceleration is inhibited because AutoEOI feature is |
| 1261 | * being used by a HyperV guest. |
| 1262 | */ |
| 1263 | APICV_INHIBIT_REASON_HYPERV, |
| 1264 | |
| 1265 | /* |
| 1266 | * APIC acceleration is inhibited because the userspace didn't yet |
| 1267 | * enable the kernel/split irqchip. |
| 1268 | */ |
| 1269 | APICV_INHIBIT_REASON_ABSENT, |
| 1270 | |
| 1271 | /* APIC acceleration is inhibited because KVM_GUESTDBG_BLOCKIRQ |
| 1272 | * (out of band, debug measure of blocking all interrupts on this vCPU) |
| 1273 | * was enabled, to avoid AVIC/APICv bypassing it. |
| 1274 | */ |
| 1275 | APICV_INHIBIT_REASON_BLOCKIRQ, |
| 1276 | |
| 1277 | /* |
| 1278 | * APICv is disabled because not all vCPUs have a 1:1 mapping between |
| 1279 | * APIC ID and vCPU, _and_ KVM is not applying its x2APIC hotplug hack. |
| 1280 | */ |
| 1281 | APICV_INHIBIT_REASON_PHYSICAL_ID_ALIASED, |
| 1282 | |
| 1283 | /* |
| 1284 | * For simplicity, the APIC acceleration is inhibited |
| 1285 | * first time either APIC ID or APIC base are changed by the guest |
| 1286 | * from their reset values. |
| 1287 | */ |
| 1288 | APICV_INHIBIT_REASON_APIC_ID_MODIFIED, |
| 1289 | APICV_INHIBIT_REASON_APIC_BASE_MODIFIED, |
| 1290 | |
| 1291 | /******************************************************/ |
| 1292 | /* INHIBITs that are relevant only to the AMD's AVIC. */ |
| 1293 | /******************************************************/ |
| 1294 | |
| 1295 | /* |
| 1296 | * AVIC is inhibited on a vCPU because it runs a nested guest. |
| 1297 | * |
| 1298 | * This is needed because unlike APICv, the peers of this vCPU |
| 1299 | * cannot use the doorbell mechanism to signal interrupts via AVIC when |
| 1300 | * a vCPU runs nested. |
| 1301 | */ |
| 1302 | APICV_INHIBIT_REASON_NESTED, |
| 1303 | |
| 1304 | /* |
| 1305 | * On SVM, the wait for the IRQ window is implemented with pending vIRQ, |
| 1306 | * which cannot be injected when the AVIC is enabled, thus AVIC |
| 1307 | * is inhibited while KVM waits for IRQ window. |
| 1308 | */ |
| 1309 | APICV_INHIBIT_REASON_IRQWIN, |
| 1310 | |
| 1311 | /* |
| 1312 | * PIT (i8254) 're-inject' mode, relies on EOI intercept, |
| 1313 | * which AVIC doesn't support for edge triggered interrupts. |
| 1314 | */ |
| 1315 | APICV_INHIBIT_REASON_PIT_REINJ, |
| 1316 | |
| 1317 | /* |
| 1318 | * AVIC is disabled because SEV doesn't support it. |
| 1319 | */ |
| 1320 | APICV_INHIBIT_REASON_SEV, |
| 1321 | |
| 1322 | /* |
| 1323 | * AVIC is disabled because not all vCPUs with a valid LDR have a 1:1 |
| 1324 | * mapping between logical ID and vCPU. |
| 1325 | */ |
| 1326 | APICV_INHIBIT_REASON_LOGICAL_ID_ALIASED, |
| 1327 | |
| 1328 | /* |
| 1329 | * AVIC is disabled because the vCPU's APIC ID is beyond the max |
| 1330 | * supported by AVIC/x2AVIC, i.e. the vCPU is unaddressable. |
| 1331 | */ |
| 1332 | APICV_INHIBIT_REASON_PHYSICAL_ID_TOO_BIG, |
| 1333 | |
| 1334 | NR_APICV_INHIBIT_REASONS, |
| 1335 | }; |
| 1336 | |
| 1337 | #define __APICV_INHIBIT_REASON(reason) \ |
| 1338 | { BIT(APICV_INHIBIT_REASON_##reason), #reason } |
| 1339 | |
| 1340 | #define APICV_INHIBIT_REASONS \ |
| 1341 | __APICV_INHIBIT_REASON(DISABLED), \ |
| 1342 | __APICV_INHIBIT_REASON(HYPERV), \ |
| 1343 | __APICV_INHIBIT_REASON(ABSENT), \ |
| 1344 | __APICV_INHIBIT_REASON(BLOCKIRQ), \ |
| 1345 | __APICV_INHIBIT_REASON(PHYSICAL_ID_ALIASED), \ |
| 1346 | __APICV_INHIBIT_REASON(APIC_ID_MODIFIED), \ |
| 1347 | __APICV_INHIBIT_REASON(APIC_BASE_MODIFIED), \ |
| 1348 | __APICV_INHIBIT_REASON(NESTED), \ |
| 1349 | __APICV_INHIBIT_REASON(IRQWIN), \ |
| 1350 | __APICV_INHIBIT_REASON(PIT_REINJ), \ |
| 1351 | __APICV_INHIBIT_REASON(SEV), \ |
| 1352 | __APICV_INHIBIT_REASON(LOGICAL_ID_ALIASED), \ |
| 1353 | __APICV_INHIBIT_REASON(PHYSICAL_ID_TOO_BIG) |
| 1354 | |
| 1355 | struct kvm_possible_nx_huge_pages { |
| 1356 | /* |
| 1357 | * A list of kvm_mmu_page structs that, if zapped, could possibly be |
| 1358 | * replaced by an NX huge page. A shadow page is on this list if its |
| 1359 | * existence disallows an NX huge page (nx_huge_page_disallowed is set) |
| 1360 | * and there are no other conditions that prevent a huge page, e.g. |
| 1361 | * the backing host page is huge, dirtly logging is not enabled for its |
| 1362 | * memslot, etc... Note, zapping shadow pages on this list doesn't |
| 1363 | * guarantee an NX huge page will be created in its stead, e.g. if the |
| 1364 | * guest attempts to execute from the region then KVM obviously can't |
| 1365 | * create an NX huge page (without hanging the guest). |
| 1366 | */ |
| 1367 | struct list_head pages; |
| 1368 | u64 nr_pages; |
| 1369 | }; |
| 1370 | |
| 1371 | enum kvm_mmu_type { |
| 1372 | KVM_SHADOW_MMU, |
| 1373 | #ifdef CONFIG_X86_64 |
| 1374 | KVM_TDP_MMU, |
| 1375 | #endif |
| 1376 | KVM_NR_MMU_TYPES, |
| 1377 | }; |
| 1378 | |
| 1379 | struct kvm_arch { |
| 1380 | unsigned long n_used_mmu_pages; |
| 1381 | unsigned long n_requested_mmu_pages; |
| 1382 | unsigned long n_max_mmu_pages; |
| 1383 | unsigned int indirect_shadow_pages; |
| 1384 | u8 mmu_valid_gen; |
| 1385 | u8 vm_type; |
| 1386 | bool has_private_mem; |
| 1387 | bool has_protected_state; |
| 1388 | bool has_protected_eoi; |
| 1389 | bool pre_fault_allowed; |
| 1390 | struct hlist_head *mmu_page_hash; |
| 1391 | struct list_head active_mmu_pages; |
| 1392 | struct kvm_possible_nx_huge_pages possible_nx_huge_pages[KVM_NR_MMU_TYPES]; |
| 1393 | #ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING |
| 1394 | struct kvm_page_track_notifier_head track_notifier_head; |
| 1395 | #endif |
| 1396 | /* |
| 1397 | * Protects marking pages unsync during page faults, as TDP MMU page |
| 1398 | * faults only take mmu_lock for read. For simplicity, the unsync |
| 1399 | * pages lock is always taken when marking pages unsync regardless of |
| 1400 | * whether mmu_lock is held for read or write. |
| 1401 | */ |
| 1402 | spinlock_t mmu_unsync_pages_lock; |
| 1403 | |
| 1404 | u64 shadow_mmio_value; |
| 1405 | |
| 1406 | #define __KVM_HAVE_ARCH_NONCOHERENT_DMA |
| 1407 | atomic_t noncoherent_dma_count; |
| 1408 | unsigned long nr_possible_bypass_irqs; |
| 1409 | |
| 1410 | #ifdef CONFIG_KVM_IOAPIC |
| 1411 | struct kvm_pic *vpic; |
| 1412 | struct kvm_ioapic *vioapic; |
| 1413 | struct kvm_pit *vpit; |
| 1414 | #endif |
| 1415 | atomic_t vapics_in_nmi_mode; |
| 1416 | struct mutex apic_map_lock; |
| 1417 | struct kvm_apic_map __rcu *apic_map; |
| 1418 | atomic_t apic_map_dirty; |
| 1419 | |
| 1420 | bool apic_access_memslot_enabled; |
| 1421 | bool apic_access_memslot_inhibited; |
| 1422 | |
| 1423 | /* Protects apicv_inhibit_reasons */ |
| 1424 | struct rw_semaphore apicv_update_lock; |
| 1425 | unsigned long apicv_inhibit_reasons; |
| 1426 | |
| 1427 | gpa_t wall_clock; |
| 1428 | |
| 1429 | u64 disabled_exits; |
| 1430 | |
| 1431 | s64 kvmclock_offset; |
| 1432 | |
| 1433 | /* |
| 1434 | * This also protects nr_vcpus_matched_tsc which is read from a |
| 1435 | * preemption-disabled region, so it must be a raw spinlock. |
| 1436 | */ |
| 1437 | raw_spinlock_t tsc_write_lock; |
| 1438 | u64 last_tsc_nsec; |
| 1439 | u64 last_tsc_write; |
| 1440 | u32 last_tsc_khz; |
| 1441 | u64 last_tsc_offset; |
| 1442 | u64 cur_tsc_nsec; |
| 1443 | u64 cur_tsc_write; |
| 1444 | u64 cur_tsc_offset; |
| 1445 | u64 cur_tsc_generation; |
| 1446 | int nr_vcpus_matched_tsc; |
| 1447 | |
| 1448 | u32 default_tsc_khz; |
| 1449 | bool user_set_tsc; |
| 1450 | u64 apic_bus_cycle_ns; |
| 1451 | |
| 1452 | seqcount_raw_spinlock_t pvclock_sc; |
| 1453 | bool use_master_clock; |
| 1454 | u64 master_kernel_ns; |
| 1455 | u64 master_cycle_now; |
| 1456 | |
| 1457 | #ifdef CONFIG_KVM_HYPERV |
| 1458 | struct kvm_hv hyperv; |
| 1459 | #endif |
| 1460 | |
| 1461 | #ifdef CONFIG_KVM_XEN |
| 1462 | struct kvm_xen xen; |
| 1463 | #endif |
| 1464 | |
| 1465 | bool backwards_tsc_observed; |
| 1466 | bool boot_vcpu_runs_old_kvmclock; |
| 1467 | u32 bsp_vcpu_id; |
| 1468 | |
| 1469 | u64 disabled_quirks; |
| 1470 | |
| 1471 | enum kvm_irqchip_mode irqchip_mode; |
| 1472 | u8 nr_reserved_ioapic_pins; |
| 1473 | |
| 1474 | bool disabled_lapic_found; |
| 1475 | |
| 1476 | bool x2apic_format; |
| 1477 | bool x2apic_broadcast_quirk_disabled; |
| 1478 | |
| 1479 | bool has_mapped_host_mmio; |
| 1480 | bool guest_can_read_msr_platform_info; |
| 1481 | bool exception_payload_enabled; |
| 1482 | |
| 1483 | bool triple_fault_event; |
| 1484 | |
| 1485 | bool bus_lock_detection_enabled; |
| 1486 | bool enable_pmu; |
| 1487 | |
| 1488 | u32 notify_window; |
| 1489 | u32 notify_vmexit_flags; |
| 1490 | /* |
| 1491 | * If exit_on_emulation_error is set, and the in-kernel instruction |
| 1492 | * emulator fails to emulate an instruction, allow userspace |
| 1493 | * the opportunity to look at it. |
| 1494 | */ |
| 1495 | bool exit_on_emulation_error; |
| 1496 | |
| 1497 | /* Deflect RDMSR and WRMSR to user space when they trigger a #GP */ |
| 1498 | u32 user_space_msr_mask; |
| 1499 | struct kvm_x86_msr_filter __rcu *msr_filter; |
| 1500 | |
| 1501 | u32 hypercall_exit_enabled; |
| 1502 | |
| 1503 | /* Guest can access the SGX PROVISIONKEY. */ |
| 1504 | bool sgx_provisioning_allowed; |
| 1505 | |
| 1506 | struct kvm_x86_pmu_event_filter __rcu *pmu_event_filter; |
| 1507 | struct vhost_task *nx_huge_page_recovery_thread; |
| 1508 | u64 nx_huge_page_last; |
| 1509 | struct once nx_once; |
| 1510 | |
| 1511 | #ifdef CONFIG_X86_64 |
| 1512 | #ifdef CONFIG_KVM_PROVE_MMU |
| 1513 | /* |
| 1514 | * The number of TDP MMU pages across all roots. Used only to sanity |
| 1515 | * check that KVM isn't leaking TDP MMU pages. |
| 1516 | */ |
| 1517 | atomic64_t tdp_mmu_pages; |
| 1518 | #endif |
| 1519 | |
| 1520 | /* |
| 1521 | * List of struct kvm_mmu_pages being used as roots. |
| 1522 | * All struct kvm_mmu_pages in the list should have |
| 1523 | * tdp_mmu_page set. |
| 1524 | * |
| 1525 | * For reads, this list is protected by: |
| 1526 | * RCU alone or |
| 1527 | * the MMU lock in read mode + RCU or |
| 1528 | * the MMU lock in write mode |
| 1529 | * |
| 1530 | * For writes, this list is protected by tdp_mmu_pages_lock; see |
| 1531 | * below for the details. |
| 1532 | * |
| 1533 | * Roots will remain in the list until their tdp_mmu_root_count |
| 1534 | * drops to zero, at which point the thread that decremented the |
| 1535 | * count to zero should removed the root from the list and clean |
| 1536 | * it up, freeing the root after an RCU grace period. |
| 1537 | */ |
| 1538 | struct list_head tdp_mmu_roots; |
| 1539 | |
| 1540 | /* |
| 1541 | * Protects accesses to the following fields when the MMU lock |
| 1542 | * is held in read mode: |
| 1543 | * - tdp_mmu_roots (above) |
| 1544 | * - the link field of kvm_mmu_page structs used by the TDP MMU |
| 1545 | * - possible_nx_huge_pages[KVM_TDP_MMU]; |
| 1546 | * - the possible_nx_huge_page_link field of kvm_mmu_page structs used |
| 1547 | * by the TDP MMU |
| 1548 | * Because the lock is only taken within the MMU lock, strictly |
| 1549 | * speaking it is redundant to acquire this lock when the thread |
| 1550 | * holds the MMU lock in write mode. However it often simplifies |
| 1551 | * the code to do so. |
| 1552 | */ |
| 1553 | spinlock_t tdp_mmu_pages_lock; |
| 1554 | #endif /* CONFIG_X86_64 */ |
| 1555 | |
| 1556 | /* |
| 1557 | * If set, at least one shadow root has been allocated. This flag |
| 1558 | * is used as one input when determining whether certain memslot |
| 1559 | * related allocations are necessary. |
| 1560 | */ |
| 1561 | bool shadow_root_allocated; |
| 1562 | |
| 1563 | #ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING |
| 1564 | /* |
| 1565 | * If set, the VM has (or had) an external write tracking user, and |
| 1566 | * thus all write tracking metadata has been allocated, even if KVM |
| 1567 | * itself isn't using write tracking. |
| 1568 | */ |
| 1569 | bool external_write_tracking_enabled; |
| 1570 | #endif |
| 1571 | |
| 1572 | #if IS_ENABLED(CONFIG_HYPERV) |
| 1573 | hpa_t hv_root_tdp; |
| 1574 | spinlock_t hv_root_tdp_lock; |
| 1575 | struct hv_partition_assist_pg *hv_pa_pg; |
| 1576 | #endif |
| 1577 | /* |
| 1578 | * VM-scope maximum vCPU ID. Used to determine the size of structures |
| 1579 | * that increase along with the maximum vCPU ID, in which case, using |
| 1580 | * the global KVM_MAX_VCPU_IDS may lead to significant memory waste. |
| 1581 | */ |
| 1582 | u32 max_vcpu_ids; |
| 1583 | |
| 1584 | bool disable_nx_huge_pages; |
| 1585 | |
| 1586 | /* |
| 1587 | * Memory caches used to allocate shadow pages when performing eager |
| 1588 | * page splitting. No need for a shadowed_info_cache since eager page |
| 1589 | * splitting only allocates direct shadow pages. |
| 1590 | * |
| 1591 | * Protected by kvm->slots_lock. |
| 1592 | */ |
| 1593 | struct kvm_mmu_memory_cache split_shadow_page_cache; |
| 1594 | struct kvm_mmu_memory_cache ; |
| 1595 | |
| 1596 | /* |
| 1597 | * Memory cache used to allocate pte_list_desc structs while splitting |
| 1598 | * huge pages. In the worst case, to split one huge page, 512 |
| 1599 | * pte_list_desc structs are needed to add each lower level leaf sptep |
| 1600 | * to the rmap plus 1 to extend the parent_ptes rmap of the lower level |
| 1601 | * page table. |
| 1602 | * |
| 1603 | * Protected by kvm->slots_lock. |
| 1604 | */ |
| 1605 | #define SPLIT_DESC_CACHE_MIN_NR_OBJECTS (SPTE_ENT_PER_PAGE + 1) |
| 1606 | struct kvm_mmu_memory_cache split_desc_cache; |
| 1607 | |
| 1608 | gfn_t gfn_direct_bits; |
| 1609 | |
| 1610 | /* |
| 1611 | * Size of the CPU's dirty log buffer, i.e. VMX's PML buffer. A Zero |
| 1612 | * value indicates CPU dirty logging is unsupported or disabled in |
| 1613 | * current VM. |
| 1614 | */ |
| 1615 | int cpu_dirty_log_size; |
| 1616 | }; |
| 1617 | |
| 1618 | struct kvm_vm_stat { |
| 1619 | struct kvm_vm_stat_generic generic; |
| 1620 | u64 mmu_shadow_zapped; |
| 1621 | u64 mmu_pte_write; |
| 1622 | u64 mmu_pde_zapped; |
| 1623 | u64 mmu_flooded; |
| 1624 | u64 mmu_recycled; |
| 1625 | u64 mmu_cache_miss; |
| 1626 | u64 mmu_unsync; |
| 1627 | union { |
| 1628 | struct { |
| 1629 | atomic64_t pages_4k; |
| 1630 | atomic64_t pages_2m; |
| 1631 | atomic64_t pages_1g; |
| 1632 | }; |
| 1633 | atomic64_t pages[KVM_NR_PAGE_SIZES]; |
| 1634 | }; |
| 1635 | u64 nx_lpage_splits; |
| 1636 | u64 max_mmu_page_hash_collisions; |
| 1637 | u64 max_mmu_rmap_size; |
| 1638 | }; |
| 1639 | |
| 1640 | struct kvm_vcpu_stat { |
| 1641 | struct kvm_vcpu_stat_generic generic; |
| 1642 | u64 pf_taken; |
| 1643 | u64 pf_fixed; |
| 1644 | u64 pf_emulate; |
| 1645 | u64 pf_spurious; |
| 1646 | u64 pf_fast; |
| 1647 | u64 pf_mmio_spte_created; |
| 1648 | u64 pf_guest; |
| 1649 | u64 tlb_flush; |
| 1650 | u64 invlpg; |
| 1651 | |
| 1652 | u64 exits; |
| 1653 | u64 io_exits; |
| 1654 | u64 mmio_exits; |
| 1655 | u64 signal_exits; |
| 1656 | u64 irq_window_exits; |
| 1657 | u64 nmi_window_exits; |
| 1658 | u64 l1d_flush; |
| 1659 | u64 halt_exits; |
| 1660 | u64 request_irq_exits; |
| 1661 | u64 irq_exits; |
| 1662 | u64 host_state_reload; |
| 1663 | u64 fpu_reload; |
| 1664 | u64 insn_emulation; |
| 1665 | u64 insn_emulation_fail; |
| 1666 | u64 hypercalls; |
| 1667 | u64 irq_injections; |
| 1668 | u64 nmi_injections; |
| 1669 | u64 req_event; |
| 1670 | u64 nested_run; |
| 1671 | u64 directed_yield_attempted; |
| 1672 | u64 directed_yield_successful; |
| 1673 | u64 preemption_reported; |
| 1674 | u64 preemption_other; |
| 1675 | u64 guest_mode; |
| 1676 | u64 notify_window_exits; |
| 1677 | }; |
| 1678 | |
| 1679 | struct x86_instruction_info; |
| 1680 | |
| 1681 | struct msr_data { |
| 1682 | bool host_initiated; |
| 1683 | u32 index; |
| 1684 | u64 data; |
| 1685 | }; |
| 1686 | |
| 1687 | struct kvm_lapic_irq { |
| 1688 | u32 vector; |
| 1689 | u16 delivery_mode; |
| 1690 | u16 dest_mode; |
| 1691 | bool level; |
| 1692 | u16 trig_mode; |
| 1693 | u32 shorthand; |
| 1694 | u32 dest_id; |
| 1695 | bool msi_redir_hint; |
| 1696 | }; |
| 1697 | |
| 1698 | static inline u16 kvm_lapic_irq_dest_mode(bool dest_mode_logical) |
| 1699 | { |
| 1700 | return dest_mode_logical ? APIC_DEST_LOGICAL : APIC_DEST_PHYSICAL; |
| 1701 | } |
| 1702 | |
| 1703 | enum kvm_x86_run_flags { |
| 1704 | KVM_RUN_FORCE_IMMEDIATE_EXIT = BIT(0), |
| 1705 | KVM_RUN_LOAD_GUEST_DR6 = BIT(1), |
| 1706 | KVM_RUN_LOAD_DEBUGCTL = BIT(2), |
| 1707 | }; |
| 1708 | |
| 1709 | struct kvm_x86_ops { |
| 1710 | const char *name; |
| 1711 | |
| 1712 | int (*check_processor_compatibility)(void); |
| 1713 | |
| 1714 | int (*enable_virtualization_cpu)(void); |
| 1715 | void (*disable_virtualization_cpu)(void); |
| 1716 | cpu_emergency_virt_cb *emergency_disable_virtualization_cpu; |
| 1717 | |
| 1718 | void (*hardware_unsetup)(void); |
| 1719 | bool (*has_emulated_msr)(struct kvm *kvm, u32 index); |
| 1720 | void (*vcpu_after_set_cpuid)(struct kvm_vcpu *vcpu); |
| 1721 | |
| 1722 | unsigned int vm_size; |
| 1723 | int (*vm_init)(struct kvm *kvm); |
| 1724 | void (*vm_destroy)(struct kvm *kvm); |
| 1725 | void (*vm_pre_destroy)(struct kvm *kvm); |
| 1726 | |
| 1727 | /* Create, but do not attach this VCPU */ |
| 1728 | int (*vcpu_precreate)(struct kvm *kvm); |
| 1729 | int (*vcpu_create)(struct kvm_vcpu *vcpu); |
| 1730 | void (*vcpu_free)(struct kvm_vcpu *vcpu); |
| 1731 | void (*vcpu_reset)(struct kvm_vcpu *vcpu, bool init_event); |
| 1732 | |
| 1733 | void (*prepare_switch_to_guest)(struct kvm_vcpu *vcpu); |
| 1734 | void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); |
| 1735 | void (*vcpu_put)(struct kvm_vcpu *vcpu); |
| 1736 | |
| 1737 | /* |
| 1738 | * Mask of DEBUGCTL bits that are owned by the host, i.e. that need to |
| 1739 | * match the host's value even while the guest is active. |
| 1740 | */ |
| 1741 | const u64 HOST_OWNED_DEBUGCTL; |
| 1742 | |
| 1743 | void (*update_exception_bitmap)(struct kvm_vcpu *vcpu); |
| 1744 | int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr); |
| 1745 | int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr); |
| 1746 | u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); |
| 1747 | void (*get_segment)(struct kvm_vcpu *vcpu, |
| 1748 | struct kvm_segment *var, int seg); |
| 1749 | int (*get_cpl)(struct kvm_vcpu *vcpu); |
| 1750 | int (*get_cpl_no_cache)(struct kvm_vcpu *vcpu); |
| 1751 | void (*set_segment)(struct kvm_vcpu *vcpu, |
| 1752 | struct kvm_segment *var, int seg); |
| 1753 | void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); |
| 1754 | bool (*is_valid_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); |
| 1755 | void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); |
| 1756 | void (*post_set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); |
| 1757 | bool (*is_valid_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4); |
| 1758 | void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4); |
| 1759 | int (*set_efer)(struct kvm_vcpu *vcpu, u64 efer); |
| 1760 | void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); |
| 1761 | void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); |
| 1762 | void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); |
| 1763 | void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); |
| 1764 | void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu); |
| 1765 | void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value); |
| 1766 | void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); |
| 1767 | unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); |
| 1768 | void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); |
| 1769 | bool (*get_if_flag)(struct kvm_vcpu *vcpu); |
| 1770 | |
| 1771 | void (*flush_tlb_all)(struct kvm_vcpu *vcpu); |
| 1772 | void (*flush_tlb_current)(struct kvm_vcpu *vcpu); |
| 1773 | #if IS_ENABLED(CONFIG_HYPERV) |
| 1774 | int (*flush_remote_tlbs)(struct kvm *kvm); |
| 1775 | int (*flush_remote_tlbs_range)(struct kvm *kvm, gfn_t gfn, |
| 1776 | gfn_t nr_pages); |
| 1777 | #endif |
| 1778 | |
| 1779 | /* |
| 1780 | * Flush any TLB entries associated with the given GVA. |
| 1781 | * Does not need to flush GPA->HPA mappings. |
| 1782 | * Can potentially get non-canonical addresses through INVLPGs, which |
| 1783 | * the implementation may choose to ignore if appropriate. |
| 1784 | */ |
| 1785 | void (*flush_tlb_gva)(struct kvm_vcpu *vcpu, gva_t addr); |
| 1786 | |
| 1787 | /* |
| 1788 | * Flush any TLB entries created by the guest. Like tlb_flush_gva(), |
| 1789 | * does not need to flush GPA->HPA mappings. |
| 1790 | */ |
| 1791 | void (*flush_tlb_guest)(struct kvm_vcpu *vcpu); |
| 1792 | |
| 1793 | int (*vcpu_pre_run)(struct kvm_vcpu *vcpu); |
| 1794 | enum exit_fastpath_completion (*vcpu_run)(struct kvm_vcpu *vcpu, |
| 1795 | u64 run_flags); |
| 1796 | int (*handle_exit)(struct kvm_vcpu *vcpu, |
| 1797 | enum exit_fastpath_completion exit_fastpath); |
| 1798 | int (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); |
| 1799 | void (*update_emulated_instruction)(struct kvm_vcpu *vcpu); |
| 1800 | void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask); |
| 1801 | u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu); |
| 1802 | void (*patch_hypercall)(struct kvm_vcpu *vcpu, |
| 1803 | unsigned char *hypercall_addr); |
| 1804 | void (*inject_irq)(struct kvm_vcpu *vcpu, bool reinjected); |
| 1805 | void (*inject_nmi)(struct kvm_vcpu *vcpu); |
| 1806 | void (*inject_exception)(struct kvm_vcpu *vcpu); |
| 1807 | void (*cancel_injection)(struct kvm_vcpu *vcpu); |
| 1808 | int (*interrupt_allowed)(struct kvm_vcpu *vcpu, bool for_injection); |
| 1809 | int (*nmi_allowed)(struct kvm_vcpu *vcpu, bool for_injection); |
| 1810 | bool (*get_nmi_mask)(struct kvm_vcpu *vcpu); |
| 1811 | void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked); |
| 1812 | /* Whether or not a virtual NMI is pending in hardware. */ |
| 1813 | bool (*is_vnmi_pending)(struct kvm_vcpu *vcpu); |
| 1814 | /* |
| 1815 | * Attempt to pend a virtual NMI in hardware. Returns %true on success |
| 1816 | * to allow using static_call_ret0 as the fallback. |
| 1817 | */ |
| 1818 | bool (*set_vnmi_pending)(struct kvm_vcpu *vcpu); |
| 1819 | void (*enable_nmi_window)(struct kvm_vcpu *vcpu); |
| 1820 | void (*enable_irq_window)(struct kvm_vcpu *vcpu); |
| 1821 | void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr); |
| 1822 | |
| 1823 | const bool x2apic_icr_is_split; |
| 1824 | const unsigned long required_apicv_inhibits; |
| 1825 | bool allow_apicv_in_x2apic_without_x2apic_virtualization; |
| 1826 | void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu); |
| 1827 | void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr); |
| 1828 | void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap); |
| 1829 | void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu); |
| 1830 | void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu); |
| 1831 | void (*deliver_interrupt)(struct kvm_lapic *apic, int delivery_mode, |
| 1832 | int trig_mode, int vector); |
| 1833 | int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu); |
| 1834 | int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); |
| 1835 | int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr); |
| 1836 | u8 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); |
| 1837 | |
| 1838 | void (*load_mmu_pgd)(struct kvm_vcpu *vcpu, hpa_t root_hpa, |
| 1839 | int root_level); |
| 1840 | |
| 1841 | /* Update external mapping with page table link. */ |
| 1842 | int (*link_external_spt)(struct kvm *kvm, gfn_t gfn, enum pg_level level, |
| 1843 | void *external_spt); |
| 1844 | /* Update the external page table from spte getting set. */ |
| 1845 | int (*set_external_spte)(struct kvm *kvm, gfn_t gfn, enum pg_level level, |
| 1846 | u64 mirror_spte); |
| 1847 | |
| 1848 | /* Update external page tables for page table about to be freed. */ |
| 1849 | int (*free_external_spt)(struct kvm *kvm, gfn_t gfn, enum pg_level level, |
| 1850 | void *external_spt); |
| 1851 | |
| 1852 | /* Update external page table from spte getting removed, and flush TLB. */ |
| 1853 | void (*remove_external_spte)(struct kvm *kvm, gfn_t gfn, enum pg_level level, |
| 1854 | u64 mirror_spte); |
| 1855 | |
| 1856 | bool (*has_wbinvd_exit)(void); |
| 1857 | |
| 1858 | u64 (*get_l2_tsc_offset)(struct kvm_vcpu *vcpu); |
| 1859 | u64 (*get_l2_tsc_multiplier)(struct kvm_vcpu *vcpu); |
| 1860 | void (*write_tsc_offset)(struct kvm_vcpu *vcpu); |
| 1861 | void (*write_tsc_multiplier)(struct kvm_vcpu *vcpu); |
| 1862 | |
| 1863 | /* |
| 1864 | * Retrieve somewhat arbitrary exit/entry information. Intended to |
| 1865 | * be used only from within tracepoints or error paths. |
| 1866 | */ |
| 1867 | void (*get_exit_info)(struct kvm_vcpu *vcpu, u32 *reason, |
| 1868 | u64 *info1, u64 *info2, |
| 1869 | u32 *intr_info, u32 *error_code); |
| 1870 | |
| 1871 | void (*get_entry_info)(struct kvm_vcpu *vcpu, |
| 1872 | u32 *intr_info, u32 *error_code); |
| 1873 | |
| 1874 | int (*check_intercept)(struct kvm_vcpu *vcpu, |
| 1875 | struct x86_instruction_info *info, |
| 1876 | enum x86_intercept_stage stage, |
| 1877 | struct x86_exception *exception); |
| 1878 | void (*handle_exit_irqoff)(struct kvm_vcpu *vcpu); |
| 1879 | |
| 1880 | void (*update_cpu_dirty_logging)(struct kvm_vcpu *vcpu); |
| 1881 | |
| 1882 | const struct kvm_x86_nested_ops *nested_ops; |
| 1883 | |
| 1884 | void (*vcpu_blocking)(struct kvm_vcpu *vcpu); |
| 1885 | void (*vcpu_unblocking)(struct kvm_vcpu *vcpu); |
| 1886 | |
| 1887 | int (*pi_update_irte)(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm, |
| 1888 | unsigned int host_irq, uint32_t guest_irq, |
| 1889 | struct kvm_vcpu *vcpu, u32 vector); |
| 1890 | void (*pi_start_bypass)(struct kvm *kvm); |
| 1891 | void (*apicv_pre_state_restore)(struct kvm_vcpu *vcpu); |
| 1892 | void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu); |
| 1893 | bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu); |
| 1894 | bool (*protected_apic_has_interrupt)(struct kvm_vcpu *vcpu); |
| 1895 | |
| 1896 | int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc, |
| 1897 | bool *expired); |
| 1898 | void (*cancel_hv_timer)(struct kvm_vcpu *vcpu); |
| 1899 | |
| 1900 | void (*setup_mce)(struct kvm_vcpu *vcpu); |
| 1901 | |
| 1902 | #ifdef CONFIG_KVM_SMM |
| 1903 | int (*smi_allowed)(struct kvm_vcpu *vcpu, bool for_injection); |
| 1904 | int (*enter_smm)(struct kvm_vcpu *vcpu, union kvm_smram *smram); |
| 1905 | int (*leave_smm)(struct kvm_vcpu *vcpu, const union kvm_smram *smram); |
| 1906 | void (*enable_smi_window)(struct kvm_vcpu *vcpu); |
| 1907 | #endif |
| 1908 | |
| 1909 | int (*dev_get_attr)(u32 group, u64 attr, u64 *val); |
| 1910 | int (*mem_enc_ioctl)(struct kvm *kvm, void __user *argp); |
| 1911 | int (*vcpu_mem_enc_ioctl)(struct kvm_vcpu *vcpu, void __user *argp); |
| 1912 | int (*vcpu_mem_enc_unlocked_ioctl)(struct kvm_vcpu *vcpu, void __user *argp); |
| 1913 | int (*mem_enc_register_region)(struct kvm *kvm, struct kvm_enc_region *argp); |
| 1914 | int (*mem_enc_unregister_region)(struct kvm *kvm, struct kvm_enc_region *argp); |
| 1915 | int (*vm_copy_enc_context_from)(struct kvm *kvm, unsigned int source_fd); |
| 1916 | int (*vm_move_enc_context_from)(struct kvm *kvm, unsigned int source_fd); |
| 1917 | void (*guest_memory_reclaimed)(struct kvm *kvm); |
| 1918 | |
| 1919 | int (*get_feature_msr)(u32 msr, u64 *data); |
| 1920 | |
| 1921 | int (*check_emulate_instruction)(struct kvm_vcpu *vcpu, int emul_type, |
| 1922 | void *insn, int insn_len); |
| 1923 | |
| 1924 | bool (*apic_init_signal_blocked)(struct kvm_vcpu *vcpu); |
| 1925 | int (*enable_l2_tlb_flush)(struct kvm_vcpu *vcpu); |
| 1926 | |
| 1927 | void (*migrate_timers)(struct kvm_vcpu *vcpu); |
| 1928 | void (*recalc_intercepts)(struct kvm_vcpu *vcpu); |
| 1929 | int (*complete_emulated_msr)(struct kvm_vcpu *vcpu, int err); |
| 1930 | |
| 1931 | void (*vcpu_deliver_sipi_vector)(struct kvm_vcpu *vcpu, u8 vector); |
| 1932 | |
| 1933 | /* |
| 1934 | * Returns vCPU specific APICv inhibit reasons |
| 1935 | */ |
| 1936 | unsigned long (*vcpu_get_apicv_inhibit_reasons)(struct kvm_vcpu *vcpu); |
| 1937 | |
| 1938 | gva_t (*get_untagged_addr)(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags); |
| 1939 | void *(*alloc_apic_backing_page)(struct kvm_vcpu *vcpu); |
| 1940 | int (*gmem_prepare)(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order); |
| 1941 | void (*gmem_invalidate)(kvm_pfn_t start, kvm_pfn_t end); |
| 1942 | int (*gmem_max_mapping_level)(struct kvm *kvm, kvm_pfn_t pfn, bool is_private); |
| 1943 | }; |
| 1944 | |
| 1945 | struct kvm_x86_nested_ops { |
| 1946 | void (*leave_nested)(struct kvm_vcpu *vcpu); |
| 1947 | bool (*is_exception_vmexit)(struct kvm_vcpu *vcpu, u8 vector, |
| 1948 | u32 error_code); |
| 1949 | int (*check_events)(struct kvm_vcpu *vcpu); |
| 1950 | bool (*has_events)(struct kvm_vcpu *vcpu, bool for_injection); |
| 1951 | void (*triple_fault)(struct kvm_vcpu *vcpu); |
| 1952 | int (*get_state)(struct kvm_vcpu *vcpu, |
| 1953 | struct kvm_nested_state __user *user_kvm_nested_state, |
| 1954 | unsigned user_data_size); |
| 1955 | int (*set_state)(struct kvm_vcpu *vcpu, |
| 1956 | struct kvm_nested_state __user *user_kvm_nested_state, |
| 1957 | struct kvm_nested_state *kvm_state); |
| 1958 | bool (*get_nested_state_pages)(struct kvm_vcpu *vcpu); |
| 1959 | int (*write_log_dirty)(struct kvm_vcpu *vcpu, gpa_t l2_gpa); |
| 1960 | |
| 1961 | int (*enable_evmcs)(struct kvm_vcpu *vcpu, |
| 1962 | uint16_t *vmcs_version); |
| 1963 | uint16_t (*get_evmcs_version)(struct kvm_vcpu *vcpu); |
| 1964 | void (*hv_inject_synthetic_vmexit_post_tlb_flush)(struct kvm_vcpu *vcpu); |
| 1965 | }; |
| 1966 | |
| 1967 | struct kvm_x86_init_ops { |
| 1968 | int (*hardware_setup)(void); |
| 1969 | unsigned int (*handle_intel_pt_intr)(void); |
| 1970 | |
| 1971 | struct kvm_x86_ops *runtime_ops; |
| 1972 | struct kvm_pmu_ops *pmu_ops; |
| 1973 | }; |
| 1974 | |
| 1975 | struct kvm_arch_async_pf { |
| 1976 | u32 token; |
| 1977 | gfn_t gfn; |
| 1978 | unsigned long cr3; |
| 1979 | bool direct_map; |
| 1980 | u64 error_code; |
| 1981 | }; |
| 1982 | |
| 1983 | extern u32 __read_mostly kvm_nr_uret_msrs; |
| 1984 | extern bool __read_mostly allow_smaller_maxphyaddr; |
| 1985 | extern bool __read_mostly enable_apicv; |
| 1986 | extern bool __read_mostly enable_ipiv; |
| 1987 | extern bool __read_mostly enable_device_posted_irqs; |
| 1988 | extern struct kvm_x86_ops kvm_x86_ops; |
| 1989 | |
| 1990 | #define kvm_x86_call(func) static_call(kvm_x86_##func) |
| 1991 | #define kvm_pmu_call(func) static_call(kvm_x86_pmu_##func) |
| 1992 | |
| 1993 | #define KVM_X86_OP(func) \ |
| 1994 | DECLARE_STATIC_CALL(kvm_x86_##func, *(((struct kvm_x86_ops *)0)->func)); |
| 1995 | #define KVM_X86_OP_OPTIONAL KVM_X86_OP |
| 1996 | #define KVM_X86_OP_OPTIONAL_RET0 KVM_X86_OP |
| 1997 | #include <asm/kvm-x86-ops.h> |
| 1998 | |
| 1999 | int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops); |
| 2000 | void kvm_x86_vendor_exit(void); |
| 2001 | |
| 2002 | #define __KVM_HAVE_ARCH_VM_ALLOC |
| 2003 | static inline struct kvm *kvm_arch_alloc_vm(void) |
| 2004 | { |
| 2005 | return kvzalloc(kvm_x86_ops.vm_size, GFP_KERNEL_ACCOUNT); |
| 2006 | } |
| 2007 | |
| 2008 | #define __KVM_HAVE_ARCH_VM_FREE |
| 2009 | void kvm_arch_free_vm(struct kvm *kvm); |
| 2010 | |
| 2011 | #if IS_ENABLED(CONFIG_HYPERV) |
| 2012 | #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS |
| 2013 | static inline int kvm_arch_flush_remote_tlbs(struct kvm *kvm) |
| 2014 | { |
| 2015 | if (kvm_x86_ops.flush_remote_tlbs && |
| 2016 | !kvm_x86_call(flush_remote_tlbs)(kvm)) |
| 2017 | return 0; |
| 2018 | else |
| 2019 | return -ENOTSUPP; |
| 2020 | } |
| 2021 | |
| 2022 | #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE |
| 2023 | static inline int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, |
| 2024 | u64 nr_pages) |
| 2025 | { |
| 2026 | if (!kvm_x86_ops.flush_remote_tlbs_range) |
| 2027 | return -EOPNOTSUPP; |
| 2028 | |
| 2029 | return kvm_x86_call(flush_remote_tlbs_range)(kvm, gfn, nr_pages); |
| 2030 | } |
| 2031 | #endif /* CONFIG_HYPERV */ |
| 2032 | |
| 2033 | enum kvm_intr_type { |
| 2034 | /* Values are arbitrary, but must be non-zero. */ |
| 2035 | KVM_HANDLING_IRQ = 1, |
| 2036 | KVM_HANDLING_NMI, |
| 2037 | }; |
| 2038 | |
| 2039 | /* Enable perf NMI and timer modes to work, and minimise false positives. */ |
| 2040 | #define kvm_arch_pmi_in_guest(vcpu) \ |
| 2041 | ((vcpu) && (vcpu)->arch.handling_intr_from_guest && \ |
| 2042 | (!!in_nmi() == ((vcpu)->arch.handling_intr_from_guest == KVM_HANDLING_NMI))) |
| 2043 | |
| 2044 | void __init kvm_mmu_x86_module_init(void); |
| 2045 | int kvm_mmu_vendor_module_init(void); |
| 2046 | void kvm_mmu_vendor_module_exit(void); |
| 2047 | |
| 2048 | void kvm_mmu_destroy(struct kvm_vcpu *vcpu); |
| 2049 | int kvm_mmu_create(struct kvm_vcpu *vcpu); |
| 2050 | int kvm_mmu_init_vm(struct kvm *kvm); |
| 2051 | void kvm_mmu_uninit_vm(struct kvm *kvm); |
| 2052 | |
| 2053 | void kvm_mmu_init_memslot_memory_attributes(struct kvm *kvm, |
| 2054 | struct kvm_memory_slot *slot); |
| 2055 | |
| 2056 | void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu); |
| 2057 | void kvm_mmu_reset_context(struct kvm_vcpu *vcpu); |
| 2058 | void kvm_mmu_slot_remove_write_access(struct kvm *kvm, |
| 2059 | const struct kvm_memory_slot *memslot, |
| 2060 | int start_level); |
| 2061 | void kvm_mmu_slot_try_split_huge_pages(struct kvm *kvm, |
| 2062 | const struct kvm_memory_slot *memslot, |
| 2063 | int target_level); |
| 2064 | void kvm_mmu_try_split_huge_pages(struct kvm *kvm, |
| 2065 | const struct kvm_memory_slot *memslot, |
| 2066 | u64 start, u64 end, |
| 2067 | int target_level); |
| 2068 | void kvm_mmu_recover_huge_pages(struct kvm *kvm, |
| 2069 | const struct kvm_memory_slot *memslot); |
| 2070 | void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, |
| 2071 | const struct kvm_memory_slot *memslot); |
| 2072 | void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen); |
| 2073 | void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long kvm_nr_mmu_pages); |
| 2074 | void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end); |
| 2075 | |
| 2076 | int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); |
| 2077 | |
| 2078 | int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, |
| 2079 | const void *val, int bytes); |
| 2080 | |
| 2081 | extern bool tdp_enabled; |
| 2082 | |
| 2083 | u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu); |
| 2084 | |
| 2085 | /* |
| 2086 | * EMULTYPE_NO_DECODE - Set when re-emulating an instruction (after completing |
| 2087 | * userspace I/O) to indicate that the emulation context |
| 2088 | * should be reused as is, i.e. skip initialization of |
| 2089 | * emulation context, instruction fetch and decode. |
| 2090 | * |
| 2091 | * EMULTYPE_TRAP_UD - Set when emulating an intercepted #UD from hardware. |
| 2092 | * Indicates that only select instructions (tagged with |
| 2093 | * EmulateOnUD) should be emulated (to minimize the emulator |
| 2094 | * attack surface). See also EMULTYPE_TRAP_UD_FORCED. |
| 2095 | * |
| 2096 | * EMULTYPE_SKIP - Set when emulating solely to skip an instruction, i.e. to |
| 2097 | * decode the instruction length. For use *only* by |
| 2098 | * kvm_x86_ops.skip_emulated_instruction() implementations if |
| 2099 | * EMULTYPE_COMPLETE_USER_EXIT is not set. |
| 2100 | * |
| 2101 | * EMULTYPE_ALLOW_RETRY_PF - Set when the emulator should resume the guest to |
| 2102 | * retry native execution under certain conditions, |
| 2103 | * Can only be set in conjunction with EMULTYPE_PF. |
| 2104 | * |
| 2105 | * EMULTYPE_TRAP_UD_FORCED - Set when emulating an intercepted #UD that was |
| 2106 | * triggered by KVM's magic "force emulation" prefix, |
| 2107 | * which is opt in via module param (off by default). |
| 2108 | * Bypasses EmulateOnUD restriction despite emulating |
| 2109 | * due to an intercepted #UD (see EMULTYPE_TRAP_UD). |
| 2110 | * Used to test the full emulator from userspace. |
| 2111 | * |
| 2112 | * EMULTYPE_VMWARE_GP - Set when emulating an intercepted #GP for VMware |
| 2113 | * backdoor emulation, which is opt in via module param. |
| 2114 | * VMware backdoor emulation handles select instructions |
| 2115 | * and reinjects the #GP for all other cases. |
| 2116 | * |
| 2117 | * EMULTYPE_PF - Set when an intercepted #PF triggers the emulation, in which case |
| 2118 | * the CR2/GPA value pass on the stack is valid. |
| 2119 | * |
| 2120 | * EMULTYPE_COMPLETE_USER_EXIT - Set when the emulator should update interruptibility |
| 2121 | * state and inject single-step #DBs after skipping |
| 2122 | * an instruction (after completing userspace I/O). |
| 2123 | * |
| 2124 | * EMULTYPE_WRITE_PF_TO_SP - Set when emulating an intercepted page fault that |
| 2125 | * is attempting to write a gfn that contains one or |
| 2126 | * more of the PTEs used to translate the write itself, |
| 2127 | * and the owning page table is being shadowed by KVM. |
| 2128 | * If emulation of the faulting instruction fails and |
| 2129 | * this flag is set, KVM will exit to userspace instead |
| 2130 | * of retrying emulation as KVM cannot make forward |
| 2131 | * progress. |
| 2132 | * |
| 2133 | * If emulation fails for a write to guest page tables, |
| 2134 | * KVM unprotects (zaps) the shadow page for the target |
| 2135 | * gfn and resumes the guest to retry the non-emulatable |
| 2136 | * instruction (on hardware). Unprotecting the gfn |
| 2137 | * doesn't allow forward progress for a self-changing |
| 2138 | * access because doing so also zaps the translation for |
| 2139 | * the gfn, i.e. retrying the instruction will hit a |
| 2140 | * !PRESENT fault, which results in a new shadow page |
| 2141 | * and sends KVM back to square one. |
| 2142 | * |
| 2143 | * EMULTYPE_SKIP_SOFT_INT - Set in combination with EMULTYPE_SKIP to only skip |
| 2144 | * an instruction if it could generate a given software |
| 2145 | * interrupt, which must be encoded via |
| 2146 | * EMULTYPE_SET_SOFT_INT_VECTOR(). |
| 2147 | */ |
| 2148 | #define EMULTYPE_NO_DECODE (1 << 0) |
| 2149 | #define EMULTYPE_TRAP_UD (1 << 1) |
| 2150 | #define EMULTYPE_SKIP (1 << 2) |
| 2151 | #define EMULTYPE_ALLOW_RETRY_PF (1 << 3) |
| 2152 | #define EMULTYPE_TRAP_UD_FORCED (1 << 4) |
| 2153 | #define EMULTYPE_VMWARE_GP (1 << 5) |
| 2154 | #define EMULTYPE_PF (1 << 6) |
| 2155 | #define EMULTYPE_COMPLETE_USER_EXIT (1 << 7) |
| 2156 | #define EMULTYPE_WRITE_PF_TO_SP (1 << 8) |
| 2157 | #define EMULTYPE_SKIP_SOFT_INT (1 << 9) |
| 2158 | |
| 2159 | #define EMULTYPE_SET_SOFT_INT_VECTOR(v) ((u32)((v) & 0xff) << 16) |
| 2160 | #define EMULTYPE_GET_SOFT_INT_VECTOR(e) (((e) >> 16) & 0xff) |
| 2161 | |
| 2162 | static inline bool kvm_can_emulate_event_vectoring(int emul_type) |
| 2163 | { |
| 2164 | return !(emul_type & EMULTYPE_PF); |
| 2165 | } |
| 2166 | |
| 2167 | int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type); |
| 2168 | int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu, |
| 2169 | void *insn, int insn_len); |
| 2170 | void __kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu, |
| 2171 | u64 *data, u8 ndata); |
| 2172 | void kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu); |
| 2173 | |
| 2174 | void kvm_prepare_event_vectoring_exit(struct kvm_vcpu *vcpu, gpa_t gpa); |
| 2175 | void kvm_prepare_unexpected_reason_exit(struct kvm_vcpu *vcpu, u64 exit_reason); |
| 2176 | |
| 2177 | void kvm_enable_efer_bits(u64); |
| 2178 | bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer); |
| 2179 | int kvm_emulate_msr_read(struct kvm_vcpu *vcpu, u32 index, u64 *data); |
| 2180 | int kvm_emulate_msr_write(struct kvm_vcpu *vcpu, u32 index, u64 data); |
| 2181 | int __kvm_emulate_msr_read(struct kvm_vcpu *vcpu, u32 index, u64 *data); |
| 2182 | int __kvm_emulate_msr_write(struct kvm_vcpu *vcpu, u32 index, u64 data); |
| 2183 | int kvm_msr_read(struct kvm_vcpu *vcpu, u32 index, u64 *data); |
| 2184 | int kvm_msr_write(struct kvm_vcpu *vcpu, u32 index, u64 data); |
| 2185 | int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu); |
| 2186 | int kvm_emulate_rdmsr_imm(struct kvm_vcpu *vcpu, u32 msr, int reg); |
| 2187 | int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu); |
| 2188 | int kvm_emulate_wrmsr_imm(struct kvm_vcpu *vcpu, u32 msr, int reg); |
| 2189 | int kvm_emulate_as_nop(struct kvm_vcpu *vcpu); |
| 2190 | int kvm_emulate_invd(struct kvm_vcpu *vcpu); |
| 2191 | int kvm_emulate_mwait(struct kvm_vcpu *vcpu); |
| 2192 | int kvm_handle_invalid_op(struct kvm_vcpu *vcpu); |
| 2193 | int kvm_emulate_monitor(struct kvm_vcpu *vcpu); |
| 2194 | |
| 2195 | int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in); |
| 2196 | int kvm_emulate_cpuid(struct kvm_vcpu *vcpu); |
| 2197 | int kvm_emulate_halt(struct kvm_vcpu *vcpu); |
| 2198 | int kvm_emulate_halt_noskip(struct kvm_vcpu *vcpu); |
| 2199 | int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu); |
| 2200 | int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu); |
| 2201 | |
| 2202 | void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); |
| 2203 | void kvm_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); |
| 2204 | int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg); |
| 2205 | void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector); |
| 2206 | |
| 2207 | int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, |
| 2208 | int reason, bool has_error_code, u32 error_code); |
| 2209 | |
| 2210 | void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0); |
| 2211 | void kvm_post_set_cr4(struct kvm_vcpu *vcpu, unsigned long old_cr4, unsigned long cr4); |
| 2212 | int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); |
| 2213 | int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); |
| 2214 | int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); |
| 2215 | int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8); |
| 2216 | int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val); |
| 2217 | unsigned long kvm_get_dr(struct kvm_vcpu *vcpu, int dr); |
| 2218 | unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu); |
| 2219 | void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw); |
| 2220 | int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr); |
| 2221 | int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu); |
| 2222 | |
| 2223 | int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr); |
| 2224 | int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr); |
| 2225 | |
| 2226 | unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu); |
| 2227 | void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); |
| 2228 | int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu); |
| 2229 | |
| 2230 | void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr); |
| 2231 | void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); |
| 2232 | void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr, unsigned long payload); |
| 2233 | void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned int nr, |
| 2234 | bool has_error_code, u32 error_code); |
| 2235 | void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault); |
| 2236 | void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu, |
| 2237 | struct x86_exception *fault); |
| 2238 | bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl); |
| 2239 | bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr); |
| 2240 | |
| 2241 | static inline int __kvm_irq_line_state(unsigned long *irq_state, |
| 2242 | int irq_source_id, int level) |
| 2243 | { |
| 2244 | /* Logical OR for level trig interrupt */ |
| 2245 | if (level) |
| 2246 | __set_bit(irq_source_id, irq_state); |
| 2247 | else |
| 2248 | __clear_bit(irq_source_id, irq_state); |
| 2249 | |
| 2250 | return !!(*irq_state); |
| 2251 | } |
| 2252 | |
| 2253 | void kvm_inject_nmi(struct kvm_vcpu *vcpu); |
| 2254 | int kvm_get_nr_pending_nmis(struct kvm_vcpu *vcpu); |
| 2255 | |
| 2256 | void kvm_update_dr7(struct kvm_vcpu *vcpu); |
| 2257 | |
| 2258 | bool __kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, |
| 2259 | bool always_retry); |
| 2260 | |
| 2261 | static inline bool kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu, |
| 2262 | gpa_t cr2_or_gpa) |
| 2263 | { |
| 2264 | return __kvm_mmu_unprotect_gfn_and_retry(vcpu, cr2_or_gpa, always_retry: false); |
| 2265 | } |
| 2266 | |
| 2267 | void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu, |
| 2268 | ulong roots_to_free); |
| 2269 | void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu); |
| 2270 | gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, |
| 2271 | struct x86_exception *exception); |
| 2272 | gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, |
| 2273 | struct x86_exception *exception); |
| 2274 | gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, |
| 2275 | struct x86_exception *exception); |
| 2276 | |
| 2277 | bool kvm_apicv_activated(struct kvm *kvm); |
| 2278 | bool kvm_vcpu_apicv_activated(struct kvm_vcpu *vcpu); |
| 2279 | void __kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu); |
| 2280 | void __kvm_set_or_clear_apicv_inhibit(struct kvm *kvm, |
| 2281 | enum kvm_apicv_inhibit reason, bool set); |
| 2282 | void kvm_set_or_clear_apicv_inhibit(struct kvm *kvm, |
| 2283 | enum kvm_apicv_inhibit reason, bool set); |
| 2284 | |
| 2285 | static inline void kvm_set_apicv_inhibit(struct kvm *kvm, |
| 2286 | enum kvm_apicv_inhibit reason) |
| 2287 | { |
| 2288 | kvm_set_or_clear_apicv_inhibit(kvm, reason, set: true); |
| 2289 | } |
| 2290 | |
| 2291 | static inline void kvm_clear_apicv_inhibit(struct kvm *kvm, |
| 2292 | enum kvm_apicv_inhibit reason) |
| 2293 | { |
| 2294 | kvm_set_or_clear_apicv_inhibit(kvm, reason, set: false); |
| 2295 | } |
| 2296 | |
| 2297 | int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code, |
| 2298 | void *insn, int insn_len); |
| 2299 | void kvm_mmu_print_sptes(struct kvm_vcpu *vcpu, gpa_t gpa, const char *msg); |
| 2300 | void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva); |
| 2301 | void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, |
| 2302 | u64 addr, unsigned long roots); |
| 2303 | void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid); |
| 2304 | void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd); |
| 2305 | |
| 2306 | void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level, |
| 2307 | int tdp_max_root_level, int tdp_huge_page_level); |
| 2308 | |
| 2309 | |
| 2310 | #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES |
| 2311 | #define kvm_arch_has_private_mem(kvm) ((kvm)->arch.has_private_mem) |
| 2312 | #endif |
| 2313 | |
| 2314 | #define kvm_arch_has_readonly_mem(kvm) (!(kvm)->arch.has_protected_state) |
| 2315 | |
| 2316 | static inline u16 kvm_read_ldt(void) |
| 2317 | { |
| 2318 | u16 ldt; |
| 2319 | asm("sldt %0" : "=g" (ldt)); |
| 2320 | return ldt; |
| 2321 | } |
| 2322 | |
| 2323 | static inline void kvm_load_ldt(u16 sel) |
| 2324 | { |
| 2325 | asm("lldt %0" : : "rm" (sel)); |
| 2326 | } |
| 2327 | |
| 2328 | #ifdef CONFIG_X86_64 |
| 2329 | static inline unsigned long read_msr(unsigned long msr) |
| 2330 | { |
| 2331 | u64 value; |
| 2332 | |
| 2333 | rdmsrq(msr, value); |
| 2334 | return value; |
| 2335 | } |
| 2336 | #endif |
| 2337 | |
| 2338 | static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code) |
| 2339 | { |
| 2340 | kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); |
| 2341 | } |
| 2342 | |
| 2343 | #define TSS_IOPB_BASE_OFFSET 0x66 |
| 2344 | #define TSS_BASE_SIZE 0x68 |
| 2345 | #define TSS_IOPB_SIZE (65536 / 8) |
| 2346 | #define TSS_REDIRECTION_SIZE (256 / 8) |
| 2347 | #define RMODE_TSS_SIZE \ |
| 2348 | (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1) |
| 2349 | |
| 2350 | enum { |
| 2351 | TASK_SWITCH_CALL = 0, |
| 2352 | TASK_SWITCH_IRET = 1, |
| 2353 | TASK_SWITCH_JMP = 2, |
| 2354 | TASK_SWITCH_GATE = 3, |
| 2355 | }; |
| 2356 | |
| 2357 | #define HF_GUEST_MASK (1 << 0) /* VCPU is in guest-mode */ |
| 2358 | |
| 2359 | #ifdef CONFIG_KVM_SMM |
| 2360 | #define HF_SMM_MASK (1 << 1) |
| 2361 | #define HF_SMM_INSIDE_NMI_MASK (1 << 2) |
| 2362 | |
| 2363 | # define KVM_MAX_NR_ADDRESS_SPACES 2 |
| 2364 | /* SMM is currently unsupported for guests with private memory. */ |
| 2365 | # define kvm_arch_nr_memslot_as_ids(kvm) (kvm_arch_has_private_mem(kvm) ? 1 : 2) |
| 2366 | # define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0) |
| 2367 | # define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm) |
| 2368 | #else |
| 2369 | # define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, 0) |
| 2370 | #endif |
| 2371 | |
| 2372 | int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v); |
| 2373 | int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); |
| 2374 | int kvm_cpu_has_extint(struct kvm_vcpu *v); |
| 2375 | int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); |
| 2376 | int kvm_cpu_get_extint(struct kvm_vcpu *v); |
| 2377 | int kvm_cpu_get_interrupt(struct kvm_vcpu *v); |
| 2378 | void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event); |
| 2379 | |
| 2380 | int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low, |
| 2381 | unsigned long ipi_bitmap_high, u32 min, |
| 2382 | unsigned long icr, int op_64_bit); |
| 2383 | |
| 2384 | int kvm_add_user_return_msr(u32 msr); |
| 2385 | int kvm_find_user_return_msr(u32 msr); |
| 2386 | int kvm_set_user_return_msr(unsigned index, u64 val, u64 mask); |
| 2387 | u64 kvm_get_user_return_msr(unsigned int slot); |
| 2388 | |
| 2389 | static inline bool kvm_is_supported_user_return_msr(u32 msr) |
| 2390 | { |
| 2391 | return kvm_find_user_return_msr(msr) >= 0; |
| 2392 | } |
| 2393 | |
| 2394 | u64 kvm_scale_tsc(u64 tsc, u64 ratio); |
| 2395 | u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc); |
| 2396 | u64 kvm_calc_nested_tsc_offset(u64 l1_offset, u64 l2_offset, u64 l2_multiplier); |
| 2397 | u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier); |
| 2398 | |
| 2399 | unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu); |
| 2400 | bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip); |
| 2401 | |
| 2402 | void kvm_make_scan_ioapic_request(struct kvm *kvm); |
| 2403 | void kvm_make_scan_ioapic_request_mask(struct kvm *kvm, |
| 2404 | unsigned long *vcpu_bitmap); |
| 2405 | |
| 2406 | bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, |
| 2407 | struct kvm_async_pf *work); |
| 2408 | void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, |
| 2409 | struct kvm_async_pf *work); |
| 2410 | void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, |
| 2411 | struct kvm_async_pf *work); |
| 2412 | void kvm_arch_async_page_present_queued(struct kvm_vcpu *vcpu); |
| 2413 | bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu); |
| 2414 | extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); |
| 2415 | |
| 2416 | int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu); |
| 2417 | int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err); |
| 2418 | |
| 2419 | void __user *__x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, |
| 2420 | u32 size); |
| 2421 | bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu); |
| 2422 | bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu); |
| 2423 | |
| 2424 | static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq) |
| 2425 | { |
| 2426 | /* We can only post Fixed and LowPrio IRQs */ |
| 2427 | return (irq->delivery_mode == APIC_DM_FIXED || |
| 2428 | irq->delivery_mode == APIC_DM_LOWEST); |
| 2429 | } |
| 2430 | |
| 2431 | static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) |
| 2432 | { |
| 2433 | kvm_x86_call(vcpu_blocking)(vcpu); |
| 2434 | } |
| 2435 | |
| 2436 | static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) |
| 2437 | { |
| 2438 | kvm_x86_call(vcpu_unblocking)(vcpu); |
| 2439 | } |
| 2440 | |
| 2441 | static inline int kvm_cpu_get_apicid(int mps_cpu) |
| 2442 | { |
| 2443 | #ifdef CONFIG_X86_LOCAL_APIC |
| 2444 | return default_cpu_present_to_apicid(mps_cpu); |
| 2445 | #else |
| 2446 | WARN_ON_ONCE(1); |
| 2447 | return BAD_APICID; |
| 2448 | #endif |
| 2449 | } |
| 2450 | |
| 2451 | int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages); |
| 2452 | |
| 2453 | #define KVM_CLOCK_VALID_FLAGS \ |
| 2454 | (KVM_CLOCK_TSC_STABLE | KVM_CLOCK_REALTIME | KVM_CLOCK_HOST_TSC) |
| 2455 | |
| 2456 | #define KVM_X86_VALID_QUIRKS \ |
| 2457 | (KVM_X86_QUIRK_LINT0_REENABLED | \ |
| 2458 | KVM_X86_QUIRK_CD_NW_CLEARED | \ |
| 2459 | KVM_X86_QUIRK_LAPIC_MMIO_HOLE | \ |
| 2460 | KVM_X86_QUIRK_OUT_7E_INC_RIP | \ |
| 2461 | KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT | \ |
| 2462 | KVM_X86_QUIRK_FIX_HYPERCALL_INSN | \ |
| 2463 | KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS | \ |
| 2464 | KVM_X86_QUIRK_SLOT_ZAP_ALL | \ |
| 2465 | KVM_X86_QUIRK_STUFF_FEATURE_MSRS | \ |
| 2466 | KVM_X86_QUIRK_IGNORE_GUEST_PAT) |
| 2467 | |
| 2468 | #define KVM_X86_CONDITIONAL_QUIRKS \ |
| 2469 | (KVM_X86_QUIRK_CD_NW_CLEARED | \ |
| 2470 | KVM_X86_QUIRK_IGNORE_GUEST_PAT) |
| 2471 | |
| 2472 | /* |
| 2473 | * KVM previously used a u32 field in kvm_run to indicate the hypercall was |
| 2474 | * initiated from long mode. KVM now sets bit 0 to indicate long mode, but the |
| 2475 | * remaining 31 lower bits must be 0 to preserve ABI. |
| 2476 | */ |
| 2477 | #define KVM_EXIT_HYPERCALL_MBZ GENMASK_ULL(31, 1) |
| 2478 | |
| 2479 | static inline bool kvm_arch_has_irq_bypass(void) |
| 2480 | { |
| 2481 | return enable_device_posted_irqs; |
| 2482 | } |
| 2483 | |
| 2484 | #endif /* _ASM_X86_KVM_HOST_H */ |
| 2485 | |