| 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _ASM_X86_PGTABLE_64_H |
| 3 | #define _ASM_X86_PGTABLE_64_H |
| 4 | |
| 5 | #include <linux/const.h> |
| 6 | #include <asm/pgtable_64_types.h> |
| 7 | |
| 8 | #ifndef __ASSEMBLER__ |
| 9 | |
| 10 | /* |
| 11 | * This file contains the functions and defines necessary to modify and use |
| 12 | * the x86-64 page table tree. |
| 13 | */ |
| 14 | #include <asm/processor.h> |
| 15 | #include <linux/bitops.h> |
| 16 | #include <linux/threads.h> |
| 17 | #include <asm/fixmap.h> |
| 18 | |
| 19 | extern p4d_t level4_kernel_pgt[512]; |
| 20 | extern p4d_t level4_ident_pgt[512]; |
| 21 | extern pud_t level3_kernel_pgt[512]; |
| 22 | extern pud_t level3_ident_pgt[512]; |
| 23 | extern pmd_t level2_kernel_pgt[512]; |
| 24 | extern pmd_t level2_fixmap_pgt[512]; |
| 25 | extern pmd_t level2_ident_pgt[512]; |
| 26 | extern pte_t level1_fixmap_pgt[512 * FIXMAP_PMD_NUM]; |
| 27 | extern pgd_t init_top_pgt[]; |
| 28 | |
| 29 | #define swapper_pg_dir init_top_pgt |
| 30 | |
| 31 | extern void paging_init(void); |
| 32 | static inline void sync_initial_page_table(void) { } |
| 33 | |
| 34 | #define pte_ERROR(e) \ |
| 35 | pr_err("%s:%d: bad pte %p(%016lx)\n", \ |
| 36 | __FILE__, __LINE__, &(e), pte_val(e)) |
| 37 | #define pmd_ERROR(e) \ |
| 38 | pr_err("%s:%d: bad pmd %p(%016lx)\n", \ |
| 39 | __FILE__, __LINE__, &(e), pmd_val(e)) |
| 40 | #define pud_ERROR(e) \ |
| 41 | pr_err("%s:%d: bad pud %p(%016lx)\n", \ |
| 42 | __FILE__, __LINE__, &(e), pud_val(e)) |
| 43 | |
| 44 | #define p4d_ERROR(e) \ |
| 45 | pr_err("%s:%d: bad p4d %p(%016lx)\n", \ |
| 46 | __FILE__, __LINE__, &(e), p4d_val(e)) |
| 47 | |
| 48 | #define pgd_ERROR(e) \ |
| 49 | pr_err("%s:%d: bad pgd %p(%016lx)\n", \ |
| 50 | __FILE__, __LINE__, &(e), pgd_val(e)) |
| 51 | |
| 52 | struct mm_struct; |
| 53 | |
| 54 | #define mm_p4d_folded mm_p4d_folded |
| 55 | static inline bool mm_p4d_folded(struct mm_struct *mm) |
| 56 | { |
| 57 | return !pgtable_l5_enabled(); |
| 58 | } |
| 59 | |
| 60 | void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte); |
| 61 | void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte); |
| 62 | |
| 63 | static inline void native_set_pte(pte_t *ptep, pte_t pte) |
| 64 | { |
| 65 | WRITE_ONCE(*ptep, pte); |
| 66 | } |
| 67 | |
| 68 | static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, |
| 69 | pte_t *ptep) |
| 70 | { |
| 71 | native_set_pte(ptep, pte: native_make_pte(val: 0)); |
| 72 | } |
| 73 | |
| 74 | static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) |
| 75 | { |
| 76 | native_set_pte(ptep, pte); |
| 77 | } |
| 78 | |
| 79 | static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) |
| 80 | { |
| 81 | WRITE_ONCE(*pmdp, pmd); |
| 82 | } |
| 83 | |
| 84 | static inline void native_pmd_clear(pmd_t *pmd) |
| 85 | { |
| 86 | native_set_pmd(pmdp: pmd, pmd: native_make_pmd(val: 0)); |
| 87 | } |
| 88 | |
| 89 | static inline pte_t native_ptep_get_and_clear(pte_t *xp) |
| 90 | { |
| 91 | #ifdef CONFIG_SMP |
| 92 | return native_make_pte(xchg(&xp->pte, 0)); |
| 93 | #else |
| 94 | /* native_local_ptep_get_and_clear, |
| 95 | but duplicated because of cyclic dependency */ |
| 96 | pte_t ret = *xp; |
| 97 | native_pte_clear(NULL, 0, xp); |
| 98 | return ret; |
| 99 | #endif |
| 100 | } |
| 101 | |
| 102 | static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp) |
| 103 | { |
| 104 | #ifdef CONFIG_SMP |
| 105 | return native_make_pmd(xchg(&xp->pmd, 0)); |
| 106 | #else |
| 107 | /* native_local_pmdp_get_and_clear, |
| 108 | but duplicated because of cyclic dependency */ |
| 109 | pmd_t ret = *xp; |
| 110 | native_pmd_clear(xp); |
| 111 | return ret; |
| 112 | #endif |
| 113 | } |
| 114 | |
| 115 | static inline void native_set_pud(pud_t *pudp, pud_t pud) |
| 116 | { |
| 117 | WRITE_ONCE(*pudp, pud); |
| 118 | } |
| 119 | |
| 120 | static inline void native_pud_clear(pud_t *pud) |
| 121 | { |
| 122 | native_set_pud(pudp: pud, pud: native_make_pud(val: 0)); |
| 123 | } |
| 124 | |
| 125 | static inline pud_t native_pudp_get_and_clear(pud_t *xp) |
| 126 | { |
| 127 | #ifdef CONFIG_SMP |
| 128 | return native_make_pud(xchg(&xp->pud, 0)); |
| 129 | #else |
| 130 | /* native_local_pudp_get_and_clear, |
| 131 | * but duplicated because of cyclic dependency |
| 132 | */ |
| 133 | pud_t ret = *xp; |
| 134 | |
| 135 | native_pud_clear(xp); |
| 136 | return ret; |
| 137 | #endif |
| 138 | } |
| 139 | |
| 140 | static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d) |
| 141 | { |
| 142 | pgd_t pgd; |
| 143 | |
| 144 | if (pgtable_l5_enabled() || |
| 145 | !IS_ENABLED(CONFIG_MITIGATION_PAGE_TABLE_ISOLATION)) { |
| 146 | WRITE_ONCE(*p4dp, p4d); |
| 147 | return; |
| 148 | } |
| 149 | |
| 150 | pgd = native_make_pgd(val: native_p4d_val(p4d)); |
| 151 | pgd = pti_set_user_pgtbl(pgdp: (pgd_t *)p4dp, pgd); |
| 152 | WRITE_ONCE(*p4dp, native_make_p4d(native_pgd_val(pgd))); |
| 153 | } |
| 154 | |
| 155 | static inline void native_p4d_clear(p4d_t *p4d) |
| 156 | { |
| 157 | native_set_p4d(p4dp: p4d, p4d: native_make_p4d(val: 0)); |
| 158 | } |
| 159 | |
| 160 | static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd) |
| 161 | { |
| 162 | WRITE_ONCE(*pgdp, pti_set_user_pgtbl(pgdp, pgd)); |
| 163 | } |
| 164 | |
| 165 | static inline void native_pgd_clear(pgd_t *pgd) |
| 166 | { |
| 167 | native_set_pgd(pgdp: pgd, pgd: native_make_pgd(val: 0)); |
| 168 | } |
| 169 | |
| 170 | /* |
| 171 | * Conversion functions: convert a page and protection to a page entry, |
| 172 | * and a page entry and page directory to the page they refer to. |
| 173 | */ |
| 174 | |
| 175 | /* PGD - Level 4 access */ |
| 176 | |
| 177 | /* PUD - Level 3 access */ |
| 178 | |
| 179 | /* PMD - Level 2 access */ |
| 180 | |
| 181 | /* PTE - Level 1 access */ |
| 182 | |
| 183 | /* |
| 184 | * Encode and de-code a swap entry |
| 185 | * |
| 186 | * | ... | 11| 10| 9|8|7|6|5| 4| 3|2| 1|0| <- bit number |
| 187 | * | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U| W|P| <- bit names |
| 188 | * | TYPE (59-63) | ~OFFSET (9-58) |0|0|X|X| X| E|F|SD|0| <- swp entry |
| 189 | * |
| 190 | * G (8) is aliased and used as a PROT_NONE indicator for |
| 191 | * !present ptes. We need to start storing swap entries above |
| 192 | * there. We also need to avoid using A and D because of an |
| 193 | * erratum where they can be incorrectly set by hardware on |
| 194 | * non-present PTEs. |
| 195 | * |
| 196 | * SD Bits 1-4 are not used in non-present format and available for |
| 197 | * special use described below: |
| 198 | * |
| 199 | * SD (1) in swp entry is used to store soft dirty bit, which helps us |
| 200 | * remember soft dirty over page migration |
| 201 | * |
| 202 | * F (2) in swp entry is used to record when a pagetable is |
| 203 | * writeprotected by userfaultfd WP support. |
| 204 | * |
| 205 | * E (3) in swp entry is used to remember PG_anon_exclusive. |
| 206 | * |
| 207 | * Bit 7 in swp entry should be 0 because pmd_present checks not only P, |
| 208 | * but also L and G. |
| 209 | * |
| 210 | * The offset is inverted by a binary not operation to make the high |
| 211 | * physical bits set. |
| 212 | */ |
| 213 | #define SWP_TYPE_BITS 5 |
| 214 | |
| 215 | #define SWP_OFFSET_FIRST_BIT (_PAGE_BIT_PROTNONE + 1) |
| 216 | |
| 217 | /* We always extract/encode the offset by shifting it all the way up, and then down again */ |
| 218 | #define SWP_OFFSET_SHIFT (SWP_OFFSET_FIRST_BIT+SWP_TYPE_BITS) |
| 219 | |
| 220 | #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS) |
| 221 | |
| 222 | /* Extract the high bits for type */ |
| 223 | #define __swp_type(x) ((x).val >> (64 - SWP_TYPE_BITS)) |
| 224 | |
| 225 | /* Shift up (to get rid of type), then down to get value */ |
| 226 | #define __swp_offset(x) (~(x).val << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT) |
| 227 | |
| 228 | /* |
| 229 | * Shift the offset up "too far" by TYPE bits, then down again |
| 230 | * The offset is inverted by a binary not operation to make the high |
| 231 | * physical bits set. |
| 232 | */ |
| 233 | #define __swp_entry(type, offset) ((swp_entry_t) { \ |
| 234 | (~(unsigned long)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \ |
| 235 | | ((unsigned long)(type) << (64-SWP_TYPE_BITS)) }) |
| 236 | |
| 237 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) }) |
| 238 | #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val((pmd)) }) |
| 239 | #define __swp_entry_to_pte(x) (__pte((x).val)) |
| 240 | #define __swp_entry_to_pmd(x) (__pmd((x).val)) |
| 241 | |
| 242 | extern void cleanup_highmap(void); |
| 243 | |
| 244 | #define HAVE_ARCH_UNMAPPED_AREA |
| 245 | #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN |
| 246 | |
| 247 | #define PAGE_AGP PAGE_KERNEL_NOCACHE |
| 248 | #define HAVE_PAGE_AGP 1 |
| 249 | |
| 250 | /* fs/proc/kcore.c */ |
| 251 | #define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK) |
| 252 | #define kc_offset_to_vaddr(o) ((o) | ~__VIRTUAL_MASK) |
| 253 | |
| 254 | #define __HAVE_ARCH_PTE_SAME |
| 255 | |
| 256 | #define vmemmap ((struct page *)VMEMMAP_START) |
| 257 | |
| 258 | extern void (unsigned long phys, unsigned long size); |
| 259 | extern void (unsigned long phys, unsigned long size); |
| 260 | |
| 261 | #define gup_fast_permitted gup_fast_permitted |
| 262 | static inline bool gup_fast_permitted(unsigned long start, unsigned long end) |
| 263 | { |
| 264 | if (end >> __VIRTUAL_MASK_SHIFT) |
| 265 | return false; |
| 266 | return true; |
| 267 | } |
| 268 | |
| 269 | #include <asm/pgtable-invert.h> |
| 270 | |
| 271 | #else /* __ASSEMBLER__ */ |
| 272 | |
| 273 | #define l4_index(x) (((x) >> 39) & 511) |
| 274 | #define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)) |
| 275 | |
| 276 | L4_PAGE_OFFSET = l4_index(__PAGE_OFFSET_BASE_L4) |
| 277 | L4_START_KERNEL = l4_index(__START_KERNEL_map) |
| 278 | |
| 279 | L3_START_KERNEL = pud_index(__START_KERNEL_map) |
| 280 | |
| 281 | #define SYM_DATA_START_PAGE_ALIGNED(name) \ |
| 282 | SYM_START(name, SYM_L_GLOBAL, .balign PAGE_SIZE) |
| 283 | |
| 284 | /* Automate the creation of 1 to 1 mapping pmd entries */ |
| 285 | #define PMDS(START, PERM, COUNT) \ |
| 286 | i = 0 ; \ |
| 287 | .rept (COUNT) ; \ |
| 288 | .quad (START) + (i << PMD_SHIFT) + (PERM) ; \ |
| 289 | i = i + 1 ; \ |
| 290 | .endr |
| 291 | |
| 292 | #endif /* __ASSEMBLER__ */ |
| 293 | #endif /* _ASM_X86_PGTABLE_64_H */ |
| 294 | |