| 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _X86_MICROCODE_INTERNAL_H |
| 3 | #define _X86_MICROCODE_INTERNAL_H |
| 4 | |
| 5 | #include <linux/earlycpio.h> |
| 6 | #include <linux/initrd.h> |
| 7 | |
| 8 | #include <asm/cpu.h> |
| 9 | #include <asm/microcode.h> |
| 10 | |
| 11 | struct device; |
| 12 | |
| 13 | enum ucode_state { |
| 14 | UCODE_OK = 0, |
| 15 | UCODE_NEW, |
| 16 | UCODE_NEW_SAFE, |
| 17 | UCODE_UPDATED, |
| 18 | UCODE_NFOUND, |
| 19 | UCODE_ERROR, |
| 20 | UCODE_TIMEOUT, |
| 21 | UCODE_OFFLINE, |
| 22 | }; |
| 23 | |
| 24 | struct microcode_ops { |
| 25 | enum ucode_state (*request_microcode_fw)(int cpu, struct device *dev); |
| 26 | void (*microcode_fini_cpu)(int cpu); |
| 27 | |
| 28 | /* |
| 29 | * The generic 'microcode_core' part guarantees that the callbacks |
| 30 | * below run on a target CPU when they are being called. |
| 31 | * See also the "Synchronization" section in microcode_core.c. |
| 32 | */ |
| 33 | enum ucode_state (*apply_microcode)(int cpu); |
| 34 | void (*stage_microcode)(void); |
| 35 | int (*collect_cpu_info)(int cpu, struct cpu_signature *csig); |
| 36 | void (*finalize_late_load)(int result); |
| 37 | unsigned int nmi_safe : 1, |
| 38 | use_nmi : 1, |
| 39 | use_staging : 1; |
| 40 | }; |
| 41 | |
| 42 | struct early_load_data { |
| 43 | u32 old_rev; |
| 44 | u32 new_rev; |
| 45 | }; |
| 46 | |
| 47 | extern struct early_load_data early_data; |
| 48 | extern struct ucode_cpu_info ucode_cpu_info[]; |
| 49 | extern u32 microcode_rev[NR_CPUS]; |
| 50 | extern u32 base_rev; |
| 51 | |
| 52 | struct cpio_data find_microcode_in_initrd(const char *path); |
| 53 | |
| 54 | #define MAX_UCODE_COUNT 128 |
| 55 | |
| 56 | #define QCHAR(a, b, c, d) ((a) + ((b) << 8) + ((c) << 16) + ((d) << 24)) |
| 57 | #define CPUID_INTEL1 QCHAR('G', 'e', 'n', 'u') |
| 58 | #define CPUID_INTEL2 QCHAR('i', 'n', 'e', 'I') |
| 59 | #define CPUID_INTEL3 QCHAR('n', 't', 'e', 'l') |
| 60 | #define CPUID_AMD1 QCHAR('A', 'u', 't', 'h') |
| 61 | #define CPUID_AMD2 QCHAR('e', 'n', 't', 'i') |
| 62 | #define CPUID_AMD3 QCHAR('c', 'A', 'M', 'D') |
| 63 | |
| 64 | #define CPUID_IS(a, b, c, ebx, ecx, edx) \ |
| 65 | (!(((ebx) ^ (a)) | ((edx) ^ (b)) | ((ecx) ^ (c)))) |
| 66 | |
| 67 | /* |
| 68 | * In early loading microcode phase on BSP, boot_cpu_data is not set up yet. |
| 69 | * x86_cpuid_vendor() gets vendor id for BSP. |
| 70 | * |
| 71 | * In 32 bit AP case, accessing boot_cpu_data needs linear address. To simplify |
| 72 | * coding, we still use x86_cpuid_vendor() to get vendor id for AP. |
| 73 | * |
| 74 | * x86_cpuid_vendor() gets vendor information directly from CPUID. |
| 75 | */ |
| 76 | static inline int x86_cpuid_vendor(void) |
| 77 | { |
| 78 | u32 eax = 0x00000000; |
| 79 | u32 ebx, ecx = 0, edx; |
| 80 | |
| 81 | native_cpuid(eax: &eax, ebx: &ebx, ecx: &ecx, edx: &edx); |
| 82 | |
| 83 | if (CPUID_IS(CPUID_INTEL1, CPUID_INTEL2, CPUID_INTEL3, ebx, ecx, edx)) |
| 84 | return X86_VENDOR_INTEL; |
| 85 | |
| 86 | if (CPUID_IS(CPUID_AMD1, CPUID_AMD2, CPUID_AMD3, ebx, ecx, edx)) |
| 87 | return X86_VENDOR_AMD; |
| 88 | |
| 89 | return X86_VENDOR_UNKNOWN; |
| 90 | } |
| 91 | |
| 92 | static inline unsigned int x86_cpuid_family(void) |
| 93 | { |
| 94 | u32 eax = 0x00000001; |
| 95 | u32 ebx, ecx = 0, edx; |
| 96 | |
| 97 | native_cpuid(eax: &eax, ebx: &ebx, ecx: &ecx, edx: &edx); |
| 98 | |
| 99 | return x86_family(sig: eax); |
| 100 | } |
| 101 | |
| 102 | extern bool force_minrev; |
| 103 | |
| 104 | #ifdef CONFIG_CPU_SUP_AMD |
| 105 | void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family); |
| 106 | void load_ucode_amd_ap(unsigned int family); |
| 107 | void reload_ucode_amd(unsigned int cpu); |
| 108 | struct microcode_ops *init_amd_microcode(void); |
| 109 | void exit_amd_microcode(void); |
| 110 | #else /* CONFIG_CPU_SUP_AMD */ |
| 111 | static inline void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family) { } |
| 112 | static inline void load_ucode_amd_ap(unsigned int family) { } |
| 113 | static inline void reload_ucode_amd(unsigned int cpu) { } |
| 114 | static inline struct microcode_ops *init_amd_microcode(void) { return NULL; } |
| 115 | static inline void exit_amd_microcode(void) { } |
| 116 | #endif /* !CONFIG_CPU_SUP_AMD */ |
| 117 | |
| 118 | #ifdef CONFIG_CPU_SUP_INTEL |
| 119 | void load_ucode_intel_bsp(struct early_load_data *ed); |
| 120 | void load_ucode_intel_ap(void); |
| 121 | void reload_ucode_intel(void); |
| 122 | struct microcode_ops *init_intel_microcode(void); |
| 123 | #else /* CONFIG_CPU_SUP_INTEL */ |
| 124 | static inline void load_ucode_intel_bsp(struct early_load_data *ed) { } |
| 125 | static inline void load_ucode_intel_ap(void) { } |
| 126 | static inline void reload_ucode_intel(void) { } |
| 127 | static inline struct microcode_ops *init_intel_microcode(void) { return NULL; } |
| 128 | #endif /* !CONFIG_CPU_SUP_INTEL */ |
| 129 | |
| 130 | #define ucode_dbg(fmt, ...) \ |
| 131 | ({ \ |
| 132 | if (IS_ENABLED(CONFIG_MICROCODE_DBG)) \ |
| 133 | pr_info(fmt, ##__VA_ARGS__); \ |
| 134 | }) |
| 135 | |
| 136 | #endif /* _X86_MICROCODE_INTERNAL_H */ |
| 137 | |