| 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _ASM_X86_MMU_CONTEXT_H |
| 3 | #define _ASM_X86_MMU_CONTEXT_H |
| 4 | |
| 5 | #include <linux/atomic.h> |
| 6 | #include <linux/mm_types.h> |
| 7 | #include <linux/pkeys.h> |
| 8 | |
| 9 | #include <trace/events/tlb.h> |
| 10 | |
| 11 | #include <asm/tlbflush.h> |
| 12 | #include <asm/paravirt.h> |
| 13 | #include <asm/debugreg.h> |
| 14 | #include <asm/gsseg.h> |
| 15 | #include <asm/desc.h> |
| 16 | |
| 17 | extern atomic64_t last_mm_ctx_id; |
| 18 | |
| 19 | #ifdef CONFIG_PERF_EVENTS |
| 20 | DECLARE_STATIC_KEY_FALSE(rdpmc_never_available_key); |
| 21 | DECLARE_STATIC_KEY_FALSE(rdpmc_always_available_key); |
| 22 | void cr4_update_pce(void *ignored); |
| 23 | #endif |
| 24 | |
| 25 | #ifdef CONFIG_MODIFY_LDT_SYSCALL |
| 26 | /* |
| 27 | * ldt_structs can be allocated, used, and freed, but they are never |
| 28 | * modified while live. |
| 29 | */ |
| 30 | struct ldt_struct { |
| 31 | /* |
| 32 | * Xen requires page-aligned LDTs with special permissions. This is |
| 33 | * needed to prevent us from installing evil descriptors such as |
| 34 | * call gates. On native, we could merge the ldt_struct and LDT |
| 35 | * allocations, but it's not worth trying to optimize. |
| 36 | */ |
| 37 | struct desc_struct *entries; |
| 38 | unsigned int nr_entries; |
| 39 | |
| 40 | /* |
| 41 | * If PTI is in use, then the entries array is not mapped while we're |
| 42 | * in user mode. The whole array will be aliased at the addressed |
| 43 | * given by ldt_slot_va(slot). We use two slots so that we can allocate |
| 44 | * and map, and enable a new LDT without invalidating the mapping |
| 45 | * of an older, still-in-use LDT. |
| 46 | * |
| 47 | * slot will be -1 if this LDT doesn't have an alias mapping. |
| 48 | */ |
| 49 | int slot; |
| 50 | }; |
| 51 | |
| 52 | /* |
| 53 | * Used for LDT copy/destruction. |
| 54 | */ |
| 55 | static inline void init_new_context_ldt(struct mm_struct *mm) |
| 56 | { |
| 57 | mm->context.ldt = NULL; |
| 58 | init_rwsem(&mm->context.ldt_usr_sem); |
| 59 | } |
| 60 | int ldt_dup_context(struct mm_struct *oldmm, struct mm_struct *mm); |
| 61 | void destroy_context_ldt(struct mm_struct *mm); |
| 62 | void ldt_arch_exit_mmap(struct mm_struct *mm); |
| 63 | #else /* CONFIG_MODIFY_LDT_SYSCALL */ |
| 64 | static inline void init_new_context_ldt(struct mm_struct *mm) { } |
| 65 | static inline int ldt_dup_context(struct mm_struct *oldmm, |
| 66 | struct mm_struct *mm) |
| 67 | { |
| 68 | return 0; |
| 69 | } |
| 70 | static inline void destroy_context_ldt(struct mm_struct *mm) { } |
| 71 | static inline void ldt_arch_exit_mmap(struct mm_struct *mm) { } |
| 72 | #endif |
| 73 | |
| 74 | #ifdef CONFIG_MODIFY_LDT_SYSCALL |
| 75 | extern void load_mm_ldt(struct mm_struct *mm); |
| 76 | extern void switch_ldt(struct mm_struct *prev, struct mm_struct *next); |
| 77 | #else |
| 78 | static inline void load_mm_ldt(struct mm_struct *mm) |
| 79 | { |
| 80 | clear_LDT(); |
| 81 | } |
| 82 | static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next) |
| 83 | { |
| 84 | DEBUG_LOCKS_WARN_ON(preemptible()); |
| 85 | } |
| 86 | #endif |
| 87 | |
| 88 | #ifdef CONFIG_ADDRESS_MASKING |
| 89 | static inline unsigned long mm_lam_cr3_mask(struct mm_struct *mm) |
| 90 | { |
| 91 | /* |
| 92 | * When switch_mm_irqs_off() is called for a kthread, it may race with |
| 93 | * LAM enablement. switch_mm_irqs_off() uses the LAM mask to do two |
| 94 | * things: populate CR3 and populate 'cpu_tlbstate.lam'. Make sure it |
| 95 | * reads a single value for both. |
| 96 | */ |
| 97 | return READ_ONCE(mm->context.lam_cr3_mask); |
| 98 | } |
| 99 | |
| 100 | static inline void dup_lam(struct mm_struct *oldmm, struct mm_struct *mm) |
| 101 | { |
| 102 | mm->context.lam_cr3_mask = oldmm->context.lam_cr3_mask; |
| 103 | mm->context.untag_mask = oldmm->context.untag_mask; |
| 104 | } |
| 105 | |
| 106 | #define mm_untag_mask mm_untag_mask |
| 107 | static inline unsigned long mm_untag_mask(struct mm_struct *mm) |
| 108 | { |
| 109 | return mm->context.untag_mask; |
| 110 | } |
| 111 | |
| 112 | static inline void mm_reset_untag_mask(struct mm_struct *mm) |
| 113 | { |
| 114 | mm->context.untag_mask = -1UL; |
| 115 | } |
| 116 | |
| 117 | #define arch_pgtable_dma_compat arch_pgtable_dma_compat |
| 118 | static inline bool arch_pgtable_dma_compat(struct mm_struct *mm) |
| 119 | { |
| 120 | return !mm_lam_cr3_mask(mm) || |
| 121 | test_bit(MM_CONTEXT_FORCE_TAGGED_SVA, &mm->context.flags); |
| 122 | } |
| 123 | #else |
| 124 | |
| 125 | static inline unsigned long mm_lam_cr3_mask(struct mm_struct *mm) |
| 126 | { |
| 127 | return 0; |
| 128 | } |
| 129 | |
| 130 | static inline void dup_lam(struct mm_struct *oldmm, struct mm_struct *mm) |
| 131 | { |
| 132 | } |
| 133 | |
| 134 | static inline void mm_reset_untag_mask(struct mm_struct *mm) |
| 135 | { |
| 136 | } |
| 137 | #endif |
| 138 | |
| 139 | #define enter_lazy_tlb enter_lazy_tlb |
| 140 | extern void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk); |
| 141 | |
| 142 | #define mm_init_global_asid mm_init_global_asid |
| 143 | extern void mm_init_global_asid(struct mm_struct *mm); |
| 144 | |
| 145 | extern void mm_free_global_asid(struct mm_struct *mm); |
| 146 | |
| 147 | /* |
| 148 | * Init a new mm. Used on mm copies, like at fork() |
| 149 | * and on mm's that are brand-new, like at execve(). |
| 150 | */ |
| 151 | #define init_new_context init_new_context |
| 152 | static inline int init_new_context(struct task_struct *tsk, |
| 153 | struct mm_struct *mm) |
| 154 | { |
| 155 | mutex_init(&mm->context.lock); |
| 156 | |
| 157 | mm->context.ctx_id = atomic64_inc_return(v: &last_mm_ctx_id); |
| 158 | atomic64_set(v: &mm->context.tlb_gen, i: 0); |
| 159 | mm->context.next_trim_cpumask = jiffies + HZ; |
| 160 | |
| 161 | #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS |
| 162 | if (cpu_feature_enabled(X86_FEATURE_OSPKE)) { |
| 163 | /* pkey 0 is the default and allocated implicitly */ |
| 164 | mm->context.pkey_allocation_map = 0x1; |
| 165 | /* -1 means unallocated or invalid */ |
| 166 | mm->context.execute_only_pkey = -1; |
| 167 | } |
| 168 | #endif |
| 169 | |
| 170 | mm_init_global_asid(mm); |
| 171 | mm_reset_untag_mask(mm); |
| 172 | init_new_context_ldt(mm); |
| 173 | return 0; |
| 174 | } |
| 175 | |
| 176 | #define destroy_context destroy_context |
| 177 | static inline void destroy_context(struct mm_struct *mm) |
| 178 | { |
| 179 | destroy_context_ldt(mm); |
| 180 | mm_free_global_asid(mm); |
| 181 | } |
| 182 | |
| 183 | extern void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
| 184 | struct task_struct *tsk); |
| 185 | |
| 186 | extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, |
| 187 | struct task_struct *tsk); |
| 188 | #define switch_mm_irqs_off switch_mm_irqs_off |
| 189 | |
| 190 | #define activate_mm(prev, next) \ |
| 191 | do { \ |
| 192 | paravirt_enter_mmap(next); \ |
| 193 | switch_mm_irqs_off((prev), (next), NULL); \ |
| 194 | } while (0); |
| 195 | |
| 196 | #ifdef CONFIG_X86_32 |
| 197 | #define deactivate_mm(tsk, mm) \ |
| 198 | do { \ |
| 199 | loadsegment(gs, 0); \ |
| 200 | } while (0) |
| 201 | #else |
| 202 | #define deactivate_mm(tsk, mm) \ |
| 203 | do { \ |
| 204 | shstk_free(tsk); \ |
| 205 | load_gs_index(0); \ |
| 206 | loadsegment(fs, 0); \ |
| 207 | } while (0) |
| 208 | #endif |
| 209 | |
| 210 | static inline void arch_dup_pkeys(struct mm_struct *oldmm, |
| 211 | struct mm_struct *mm) |
| 212 | { |
| 213 | #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS |
| 214 | if (!cpu_feature_enabled(X86_FEATURE_OSPKE)) |
| 215 | return; |
| 216 | |
| 217 | /* Duplicate the oldmm pkey state in mm: */ |
| 218 | mm->context.pkey_allocation_map = oldmm->context.pkey_allocation_map; |
| 219 | mm->context.execute_only_pkey = oldmm->context.execute_only_pkey; |
| 220 | #endif |
| 221 | } |
| 222 | |
| 223 | static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) |
| 224 | { |
| 225 | arch_dup_pkeys(oldmm, mm); |
| 226 | paravirt_enter_mmap(next: mm); |
| 227 | dup_lam(oldmm, mm); |
| 228 | return ldt_dup_context(oldmm, mm); |
| 229 | } |
| 230 | |
| 231 | static inline void arch_exit_mmap(struct mm_struct *mm) |
| 232 | { |
| 233 | paravirt_arch_exit_mmap(mm); |
| 234 | ldt_arch_exit_mmap(mm); |
| 235 | } |
| 236 | |
| 237 | #ifdef CONFIG_X86_64 |
| 238 | static inline bool is_64bit_mm(struct mm_struct *mm) |
| 239 | { |
| 240 | return !IS_ENABLED(CONFIG_IA32_EMULATION) || |
| 241 | !test_bit(MM_CONTEXT_UPROBE_IA32, &mm->context.flags); |
| 242 | } |
| 243 | #else |
| 244 | static inline bool is_64bit_mm(struct mm_struct *mm) |
| 245 | { |
| 246 | return false; |
| 247 | } |
| 248 | #endif |
| 249 | |
| 250 | static inline bool is_notrack_mm(struct mm_struct *mm) |
| 251 | { |
| 252 | return test_bit(MM_CONTEXT_NOTRACK, &mm->context.flags); |
| 253 | } |
| 254 | |
| 255 | static inline void set_notrack_mm(struct mm_struct *mm) |
| 256 | { |
| 257 | set_bit(MM_CONTEXT_NOTRACK, addr: &mm->context.flags); |
| 258 | } |
| 259 | |
| 260 | /* |
| 261 | * We only want to enforce protection keys on the current process |
| 262 | * because we effectively have no access to PKRU for other |
| 263 | * processes or any way to tell *which * PKRU in a threaded |
| 264 | * process we could use. |
| 265 | * |
| 266 | * So do not enforce things if the VMA is not from the current |
| 267 | * mm, or if we are in a kernel thread. |
| 268 | */ |
| 269 | static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, |
| 270 | bool write, bool execute, bool foreign) |
| 271 | { |
| 272 | /* pkeys never affect instruction fetches */ |
| 273 | if (execute) |
| 274 | return true; |
| 275 | /* allow access if the VMA is not one from this process */ |
| 276 | if (foreign || vma_is_foreign(vma)) |
| 277 | return true; |
| 278 | return __pkru_allows_pkey(pkey: vma_pkey(vma), write); |
| 279 | } |
| 280 | |
| 281 | unsigned long __get_current_cr3_fast(void); |
| 282 | |
| 283 | #include <asm-generic/mmu_context.h> |
| 284 | |
| 285 | extern struct mm_struct *use_temporary_mm(struct mm_struct *temp_mm); |
| 286 | extern void unuse_temporary_mm(struct mm_struct *prev_mm); |
| 287 | |
| 288 | #endif /* _ASM_X86_MMU_CONTEXT_H */ |
| 289 | |