| 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* |
| 3 | * Based on arch/arm/mm/mmu.c |
| 4 | * |
| 5 | * Copyright (C) 1995-2005 Russell King |
| 6 | * Copyright (C) 2012 ARM Ltd. |
| 7 | */ |
| 8 | |
| 9 | #include <linux/cache.h> |
| 10 | #include <linux/export.h> |
| 11 | #include <linux/kernel.h> |
| 12 | #include <linux/errno.h> |
| 13 | #include <linux/init.h> |
| 14 | #include <linux/ioport.h> |
| 15 | #include <linux/kexec.h> |
| 16 | #include <linux/libfdt.h> |
| 17 | #include <linux/mman.h> |
| 18 | #include <linux/nodemask.h> |
| 19 | #include <linux/memblock.h> |
| 20 | #include <linux/memremap.h> |
| 21 | #include <linux/memory.h> |
| 22 | #include <linux/fs.h> |
| 23 | #include <linux/io.h> |
| 24 | #include <linux/mm.h> |
| 25 | #include <linux/vmalloc.h> |
| 26 | #include <linux/set_memory.h> |
| 27 | #include <linux/kfence.h> |
| 28 | #include <linux/pkeys.h> |
| 29 | #include <linux/mm_inline.h> |
| 30 | #include <linux/pagewalk.h> |
| 31 | #include <linux/stop_machine.h> |
| 32 | |
| 33 | #include <asm/barrier.h> |
| 34 | #include <asm/cputype.h> |
| 35 | #include <asm/fixmap.h> |
| 36 | #include <asm/kasan.h> |
| 37 | #include <asm/kernel-pgtable.h> |
| 38 | #include <asm/sections.h> |
| 39 | #include <asm/setup.h> |
| 40 | #include <linux/sizes.h> |
| 41 | #include <asm/tlb.h> |
| 42 | #include <asm/mmu_context.h> |
| 43 | #include <asm/ptdump.h> |
| 44 | #include <asm/tlbflush.h> |
| 45 | #include <asm/pgalloc.h> |
| 46 | #include <asm/kfence.h> |
| 47 | |
| 48 | #define NO_BLOCK_MAPPINGS BIT(0) |
| 49 | #define NO_CONT_MAPPINGS BIT(1) |
| 50 | #define NO_EXEC_MAPPINGS BIT(2) /* assumes FEAT_HPDS is not used */ |
| 51 | |
| 52 | DEFINE_STATIC_KEY_FALSE(arm64_ptdump_lock_key); |
| 53 | |
| 54 | u64 kimage_voffset __ro_after_init; |
| 55 | EXPORT_SYMBOL(kimage_voffset); |
| 56 | |
| 57 | u32 __boot_cpu_mode[] = { BOOT_CPU_MODE_EL2, BOOT_CPU_MODE_EL1 }; |
| 58 | |
| 59 | static bool rodata_is_rw __ro_after_init = true; |
| 60 | |
| 61 | /* |
| 62 | * The booting CPU updates the failed status @__early_cpu_boot_status, |
| 63 | * with MMU turned off. |
| 64 | */ |
| 65 | long __section(".mmuoff.data.write" ) __early_cpu_boot_status; |
| 66 | |
| 67 | /* |
| 68 | * Empty_zero_page is a special page that is used for zero-initialized data |
| 69 | * and COW. |
| 70 | */ |
| 71 | unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; |
| 72 | EXPORT_SYMBOL(empty_zero_page); |
| 73 | |
| 74 | static DEFINE_SPINLOCK(swapper_pgdir_lock); |
| 75 | static DEFINE_MUTEX(fixmap_lock); |
| 76 | |
| 77 | void noinstr set_swapper_pgd(pgd_t *pgdp, pgd_t pgd) |
| 78 | { |
| 79 | pgd_t *fixmap_pgdp; |
| 80 | |
| 81 | /* |
| 82 | * Don't bother with the fixmap if swapper_pg_dir is still mapped |
| 83 | * writable in the kernel mapping. |
| 84 | */ |
| 85 | if (rodata_is_rw) { |
| 86 | WRITE_ONCE(*pgdp, pgd); |
| 87 | dsb(ishst); |
| 88 | isb(); |
| 89 | return; |
| 90 | } |
| 91 | |
| 92 | spin_lock(lock: &swapper_pgdir_lock); |
| 93 | fixmap_pgdp = pgd_set_fixmap(__pa_symbol(pgdp)); |
| 94 | WRITE_ONCE(*fixmap_pgdp, pgd); |
| 95 | /* |
| 96 | * We need dsb(ishst) here to ensure the page-table-walker sees |
| 97 | * our new entry before set_p?d() returns. The fixmap's |
| 98 | * flush_tlb_kernel_range() via clear_fixmap() does this for us. |
| 99 | */ |
| 100 | pgd_clear_fixmap(); |
| 101 | spin_unlock(lock: &swapper_pgdir_lock); |
| 102 | } |
| 103 | |
| 104 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, |
| 105 | unsigned long size, pgprot_t vma_prot) |
| 106 | { |
| 107 | if (!pfn_is_map_memory(pfn)) |
| 108 | return pgprot_noncached(vma_prot); |
| 109 | else if (file->f_flags & O_SYNC) |
| 110 | return pgprot_writecombine(prot: vma_prot); |
| 111 | return vma_prot; |
| 112 | } |
| 113 | EXPORT_SYMBOL(phys_mem_access_prot); |
| 114 | |
| 115 | static phys_addr_t __init early_pgtable_alloc(enum pgtable_type pgtable_type) |
| 116 | { |
| 117 | phys_addr_t phys; |
| 118 | |
| 119 | phys = memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE, start: 0, |
| 120 | MEMBLOCK_ALLOC_NOLEAKTRACE); |
| 121 | if (!phys) |
| 122 | panic(fmt: "Failed to allocate page table page\n" ); |
| 123 | |
| 124 | return phys; |
| 125 | } |
| 126 | |
| 127 | bool pgattr_change_is_safe(pteval_t old, pteval_t new) |
| 128 | { |
| 129 | /* |
| 130 | * The following mapping attributes may be updated in live |
| 131 | * kernel mappings without the need for break-before-make. |
| 132 | */ |
| 133 | pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG | |
| 134 | PTE_SWBITS_MASK; |
| 135 | |
| 136 | /* creating or taking down mappings is always safe */ |
| 137 | if (!pte_valid(__pte(val: old)) || !pte_valid(__pte(val: new))) |
| 138 | return true; |
| 139 | |
| 140 | /* A live entry's pfn should not change */ |
| 141 | if (pte_pfn(pte: __pte(val: old)) != pte_pfn(pte: __pte(val: new))) |
| 142 | return false; |
| 143 | |
| 144 | /* live contiguous mappings may not be manipulated at all */ |
| 145 | if ((old | new) & PTE_CONT) |
| 146 | return false; |
| 147 | |
| 148 | /* Transitioning from Non-Global to Global is unsafe */ |
| 149 | if (old & ~new & PTE_NG) |
| 150 | return false; |
| 151 | |
| 152 | /* |
| 153 | * Changing the memory type between Normal and Normal-Tagged is safe |
| 154 | * since Tagged is considered a permission attribute from the |
| 155 | * mismatched attribute aliases perspective. |
| 156 | */ |
| 157 | if (((old & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL) || |
| 158 | (old & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_TAGGED)) && |
| 159 | ((new & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL) || |
| 160 | (new & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_TAGGED))) |
| 161 | mask |= PTE_ATTRINDX_MASK; |
| 162 | |
| 163 | return ((old ^ new) & ~mask) == 0; |
| 164 | } |
| 165 | |
| 166 | static void init_clear_pgtable(void *table) |
| 167 | { |
| 168 | clear_page(page: table); |
| 169 | |
| 170 | /* Ensure the zeroing is observed by page table walks. */ |
| 171 | dsb(ishst); |
| 172 | } |
| 173 | |
| 174 | static void init_pte(pte_t *ptep, unsigned long addr, unsigned long end, |
| 175 | phys_addr_t phys, pgprot_t prot) |
| 176 | { |
| 177 | do { |
| 178 | pte_t old_pte = __ptep_get(ptep); |
| 179 | |
| 180 | /* |
| 181 | * Required barriers to make this visible to the table walker |
| 182 | * are deferred to the end of alloc_init_cont_pte(). |
| 183 | */ |
| 184 | __set_pte_nosync(ptep, pfn_pte(__phys_to_pfn(phys), pgprot: prot)); |
| 185 | |
| 186 | /* |
| 187 | * After the PTE entry has been populated once, we |
| 188 | * only allow updates to the permission attributes. |
| 189 | */ |
| 190 | BUG_ON(!pgattr_change_is_safe(pte_val(old_pte), |
| 191 | pte_val(__ptep_get(ptep)))); |
| 192 | |
| 193 | phys += PAGE_SIZE; |
| 194 | } while (ptep++, addr += PAGE_SIZE, addr != end); |
| 195 | } |
| 196 | |
| 197 | static int alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr, |
| 198 | unsigned long end, phys_addr_t phys, |
| 199 | pgprot_t prot, |
| 200 | phys_addr_t (*pgtable_alloc)(enum pgtable_type), |
| 201 | int flags) |
| 202 | { |
| 203 | unsigned long next; |
| 204 | pmd_t pmd = READ_ONCE(*pmdp); |
| 205 | pte_t *ptep; |
| 206 | |
| 207 | BUG_ON(pmd_sect(pmd)); |
| 208 | if (pmd_none(pmd)) { |
| 209 | pmdval_t pmdval = PMD_TYPE_TABLE | PMD_TABLE_UXN | PMD_TABLE_AF; |
| 210 | phys_addr_t pte_phys; |
| 211 | |
| 212 | if (flags & NO_EXEC_MAPPINGS) |
| 213 | pmdval |= PMD_TABLE_PXN; |
| 214 | BUG_ON(!pgtable_alloc); |
| 215 | pte_phys = pgtable_alloc(TABLE_PTE); |
| 216 | if (pte_phys == INVALID_PHYS_ADDR) |
| 217 | return -ENOMEM; |
| 218 | ptep = pte_set_fixmap(pte_phys); |
| 219 | init_clear_pgtable(table: ptep); |
| 220 | ptep += pte_index(address: addr); |
| 221 | __pmd_populate(pmdp, pte_phys, pmdval); |
| 222 | } else { |
| 223 | BUG_ON(pmd_bad(pmd)); |
| 224 | ptep = pte_set_fixmap_offset(pmdp, addr); |
| 225 | } |
| 226 | |
| 227 | do { |
| 228 | pgprot_t __prot = prot; |
| 229 | |
| 230 | next = pte_cont_addr_end(addr, end); |
| 231 | |
| 232 | /* use a contiguous mapping if the range is suitably aligned */ |
| 233 | if ((((addr | next | phys) & ~CONT_PTE_MASK) == 0) && |
| 234 | (flags & NO_CONT_MAPPINGS) == 0) |
| 235 | __prot = __pgprot(pgprot_val(prot) | PTE_CONT); |
| 236 | |
| 237 | init_pte(ptep, addr, end: next, phys, prot: __prot); |
| 238 | |
| 239 | ptep += pte_index(address: next) - pte_index(address: addr); |
| 240 | phys += next - addr; |
| 241 | } while (addr = next, addr != end); |
| 242 | |
| 243 | /* |
| 244 | * Note: barriers and maintenance necessary to clear the fixmap slot |
| 245 | * ensure that all previous pgtable writes are visible to the table |
| 246 | * walker. |
| 247 | */ |
| 248 | pte_clear_fixmap(); |
| 249 | |
| 250 | return 0; |
| 251 | } |
| 252 | |
| 253 | static int init_pmd(pmd_t *pmdp, unsigned long addr, unsigned long end, |
| 254 | phys_addr_t phys, pgprot_t prot, |
| 255 | phys_addr_t (*pgtable_alloc)(enum pgtable_type), int flags) |
| 256 | { |
| 257 | unsigned long next; |
| 258 | |
| 259 | do { |
| 260 | pmd_t old_pmd = READ_ONCE(*pmdp); |
| 261 | |
| 262 | next = pmd_addr_end(addr, end); |
| 263 | |
| 264 | /* try section mapping first */ |
| 265 | if (((addr | next | phys) & ~PMD_MASK) == 0 && |
| 266 | (flags & NO_BLOCK_MAPPINGS) == 0) { |
| 267 | pmd_set_huge(pmd: pmdp, addr: phys, prot); |
| 268 | |
| 269 | /* |
| 270 | * After the PMD entry has been populated once, we |
| 271 | * only allow updates to the permission attributes. |
| 272 | */ |
| 273 | BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd), |
| 274 | READ_ONCE(pmd_val(*pmdp)))); |
| 275 | } else { |
| 276 | int ret; |
| 277 | |
| 278 | ret = alloc_init_cont_pte(pmdp, addr, end: next, phys, prot, |
| 279 | pgtable_alloc, flags); |
| 280 | if (ret) |
| 281 | return ret; |
| 282 | |
| 283 | BUG_ON(pmd_val(old_pmd) != 0 && |
| 284 | pmd_val(old_pmd) != READ_ONCE(pmd_val(*pmdp))); |
| 285 | } |
| 286 | phys += next - addr; |
| 287 | } while (pmdp++, addr = next, addr != end); |
| 288 | |
| 289 | return 0; |
| 290 | } |
| 291 | |
| 292 | static int alloc_init_cont_pmd(pud_t *pudp, unsigned long addr, |
| 293 | unsigned long end, phys_addr_t phys, |
| 294 | pgprot_t prot, |
| 295 | phys_addr_t (*pgtable_alloc)(enum pgtable_type), |
| 296 | int flags) |
| 297 | { |
| 298 | int ret; |
| 299 | unsigned long next; |
| 300 | pud_t pud = READ_ONCE(*pudp); |
| 301 | pmd_t *pmdp; |
| 302 | |
| 303 | /* |
| 304 | * Check for initial section mappings in the pgd/pud. |
| 305 | */ |
| 306 | BUG_ON(pud_sect(pud)); |
| 307 | if (pud_none(pud)) { |
| 308 | pudval_t pudval = PUD_TYPE_TABLE | PUD_TABLE_UXN | PUD_TABLE_AF; |
| 309 | phys_addr_t pmd_phys; |
| 310 | |
| 311 | if (flags & NO_EXEC_MAPPINGS) |
| 312 | pudval |= PUD_TABLE_PXN; |
| 313 | BUG_ON(!pgtable_alloc); |
| 314 | pmd_phys = pgtable_alloc(TABLE_PMD); |
| 315 | if (pmd_phys == INVALID_PHYS_ADDR) |
| 316 | return -ENOMEM; |
| 317 | pmdp = pmd_set_fixmap(pmd_phys); |
| 318 | init_clear_pgtable(table: pmdp); |
| 319 | pmdp += pmd_index(address: addr); |
| 320 | __pud_populate(pudp, pmd_phys, pudval); |
| 321 | } else { |
| 322 | BUG_ON(pud_bad(pud)); |
| 323 | pmdp = pmd_set_fixmap_offset(pudp, addr); |
| 324 | } |
| 325 | |
| 326 | do { |
| 327 | pgprot_t __prot = prot; |
| 328 | |
| 329 | next = pmd_cont_addr_end(addr, end); |
| 330 | |
| 331 | /* use a contiguous mapping if the range is suitably aligned */ |
| 332 | if ((((addr | next | phys) & ~CONT_PMD_MASK) == 0) && |
| 333 | (flags & NO_CONT_MAPPINGS) == 0) |
| 334 | __prot = __pgprot(pgprot_val(prot) | PTE_CONT); |
| 335 | |
| 336 | ret = init_pmd(pmdp, addr, end: next, phys, prot: __prot, pgtable_alloc, flags); |
| 337 | if (ret) |
| 338 | goto out; |
| 339 | |
| 340 | pmdp += pmd_index(address: next) - pmd_index(address: addr); |
| 341 | phys += next - addr; |
| 342 | } while (addr = next, addr != end); |
| 343 | |
| 344 | out: |
| 345 | pmd_clear_fixmap(); |
| 346 | |
| 347 | return ret; |
| 348 | } |
| 349 | |
| 350 | static int alloc_init_pud(p4d_t *p4dp, unsigned long addr, unsigned long end, |
| 351 | phys_addr_t phys, pgprot_t prot, |
| 352 | phys_addr_t (*pgtable_alloc)(enum pgtable_type), |
| 353 | int flags) |
| 354 | { |
| 355 | int ret = 0; |
| 356 | unsigned long next; |
| 357 | p4d_t p4d = READ_ONCE(*p4dp); |
| 358 | pud_t *pudp; |
| 359 | |
| 360 | if (p4d_none(p4d)) { |
| 361 | p4dval_t p4dval = P4D_TYPE_TABLE | P4D_TABLE_UXN | P4D_TABLE_AF; |
| 362 | phys_addr_t pud_phys; |
| 363 | |
| 364 | if (flags & NO_EXEC_MAPPINGS) |
| 365 | p4dval |= P4D_TABLE_PXN; |
| 366 | BUG_ON(!pgtable_alloc); |
| 367 | pud_phys = pgtable_alloc(TABLE_PUD); |
| 368 | if (pud_phys == INVALID_PHYS_ADDR) |
| 369 | return -ENOMEM; |
| 370 | pudp = pud_set_fixmap(pud_phys); |
| 371 | init_clear_pgtable(table: pudp); |
| 372 | pudp += pud_index(address: addr); |
| 373 | __p4d_populate(p4dp, pud_phys, p4dval); |
| 374 | } else { |
| 375 | BUG_ON(p4d_bad(p4d)); |
| 376 | pudp = pud_set_fixmap_offset(p4dp, addr); |
| 377 | } |
| 378 | |
| 379 | do { |
| 380 | pud_t old_pud = READ_ONCE(*pudp); |
| 381 | |
| 382 | next = pud_addr_end(addr, end); |
| 383 | |
| 384 | /* |
| 385 | * For 4K granule only, attempt to put down a 1GB block |
| 386 | */ |
| 387 | if (pud_sect_supported() && |
| 388 | ((addr | next | phys) & ~PUD_MASK) == 0 && |
| 389 | (flags & NO_BLOCK_MAPPINGS) == 0) { |
| 390 | pud_set_huge(pud: pudp, addr: phys, prot); |
| 391 | |
| 392 | /* |
| 393 | * After the PUD entry has been populated once, we |
| 394 | * only allow updates to the permission attributes. |
| 395 | */ |
| 396 | BUG_ON(!pgattr_change_is_safe(pud_val(old_pud), |
| 397 | READ_ONCE(pud_val(*pudp)))); |
| 398 | } else { |
| 399 | ret = alloc_init_cont_pmd(pudp, addr, end: next, phys, prot, |
| 400 | pgtable_alloc, flags); |
| 401 | if (ret) |
| 402 | goto out; |
| 403 | |
| 404 | BUG_ON(pud_val(old_pud) != 0 && |
| 405 | pud_val(old_pud) != READ_ONCE(pud_val(*pudp))); |
| 406 | } |
| 407 | phys += next - addr; |
| 408 | } while (pudp++, addr = next, addr != end); |
| 409 | |
| 410 | out: |
| 411 | pud_clear_fixmap(); |
| 412 | |
| 413 | return ret; |
| 414 | } |
| 415 | |
| 416 | static int alloc_init_p4d(pgd_t *pgdp, unsigned long addr, unsigned long end, |
| 417 | phys_addr_t phys, pgprot_t prot, |
| 418 | phys_addr_t (*pgtable_alloc)(enum pgtable_type), |
| 419 | int flags) |
| 420 | { |
| 421 | int ret; |
| 422 | unsigned long next; |
| 423 | pgd_t pgd = READ_ONCE(*pgdp); |
| 424 | p4d_t *p4dp; |
| 425 | |
| 426 | if (pgd_none(pgd)) { |
| 427 | pgdval_t pgdval = PGD_TYPE_TABLE | PGD_TABLE_UXN | PGD_TABLE_AF; |
| 428 | phys_addr_t p4d_phys; |
| 429 | |
| 430 | if (flags & NO_EXEC_MAPPINGS) |
| 431 | pgdval |= PGD_TABLE_PXN; |
| 432 | BUG_ON(!pgtable_alloc); |
| 433 | p4d_phys = pgtable_alloc(TABLE_P4D); |
| 434 | if (p4d_phys == INVALID_PHYS_ADDR) |
| 435 | return -ENOMEM; |
| 436 | p4dp = p4d_set_fixmap(p4d_phys); |
| 437 | init_clear_pgtable(table: p4dp); |
| 438 | p4dp += p4d_index(address: addr); |
| 439 | __pgd_populate(pgdp, p4d_phys, pgdval); |
| 440 | } else { |
| 441 | BUG_ON(pgd_bad(pgd)); |
| 442 | p4dp = p4d_set_fixmap_offset(pgdp, addr); |
| 443 | } |
| 444 | |
| 445 | do { |
| 446 | p4d_t old_p4d = READ_ONCE(*p4dp); |
| 447 | |
| 448 | next = p4d_addr_end(addr, end); |
| 449 | |
| 450 | ret = alloc_init_pud(p4dp, addr, end: next, phys, prot, |
| 451 | pgtable_alloc, flags); |
| 452 | if (ret) |
| 453 | goto out; |
| 454 | |
| 455 | BUG_ON(p4d_val(old_p4d) != 0 && |
| 456 | p4d_val(old_p4d) != READ_ONCE(p4d_val(*p4dp))); |
| 457 | |
| 458 | phys += next - addr; |
| 459 | } while (p4dp++, addr = next, addr != end); |
| 460 | |
| 461 | out: |
| 462 | p4d_clear_fixmap(); |
| 463 | |
| 464 | return ret; |
| 465 | } |
| 466 | |
| 467 | static int __create_pgd_mapping_locked(pgd_t *pgdir, phys_addr_t phys, |
| 468 | unsigned long virt, phys_addr_t size, |
| 469 | pgprot_t prot, |
| 470 | phys_addr_t (*pgtable_alloc)(enum pgtable_type), |
| 471 | int flags) |
| 472 | { |
| 473 | int ret; |
| 474 | unsigned long addr, end, next; |
| 475 | pgd_t *pgdp = pgd_offset_pgd(pgd: pgdir, address: virt); |
| 476 | |
| 477 | /* |
| 478 | * If the virtual and physical address don't have the same offset |
| 479 | * within a page, we cannot map the region as the caller expects. |
| 480 | */ |
| 481 | if (WARN_ON((phys ^ virt) & ~PAGE_MASK)) |
| 482 | return -EINVAL; |
| 483 | |
| 484 | phys &= PAGE_MASK; |
| 485 | addr = virt & PAGE_MASK; |
| 486 | end = PAGE_ALIGN(virt + size); |
| 487 | |
| 488 | do { |
| 489 | next = pgd_addr_end(addr, end); |
| 490 | ret = alloc_init_p4d(pgdp, addr, end: next, phys, prot, pgtable_alloc, |
| 491 | flags); |
| 492 | if (ret) |
| 493 | return ret; |
| 494 | phys += next - addr; |
| 495 | } while (pgdp++, addr = next, addr != end); |
| 496 | |
| 497 | return 0; |
| 498 | } |
| 499 | |
| 500 | static int __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys, |
| 501 | unsigned long virt, phys_addr_t size, |
| 502 | pgprot_t prot, |
| 503 | phys_addr_t (*pgtable_alloc)(enum pgtable_type), |
| 504 | int flags) |
| 505 | { |
| 506 | int ret; |
| 507 | |
| 508 | mutex_lock(&fixmap_lock); |
| 509 | ret = __create_pgd_mapping_locked(pgdir, phys, virt, size, prot, |
| 510 | pgtable_alloc, flags); |
| 511 | mutex_unlock(lock: &fixmap_lock); |
| 512 | |
| 513 | return ret; |
| 514 | } |
| 515 | |
| 516 | static void early_create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys, |
| 517 | unsigned long virt, phys_addr_t size, |
| 518 | pgprot_t prot, |
| 519 | phys_addr_t (*pgtable_alloc)(enum pgtable_type), |
| 520 | int flags) |
| 521 | { |
| 522 | int ret; |
| 523 | |
| 524 | ret = __create_pgd_mapping(pgdir, phys, virt, size, prot, pgtable_alloc, |
| 525 | flags); |
| 526 | if (ret) |
| 527 | panic(fmt: "Failed to create page tables\n" ); |
| 528 | } |
| 529 | |
| 530 | static phys_addr_t __pgd_pgtable_alloc(struct mm_struct *mm, gfp_t gfp, |
| 531 | enum pgtable_type pgtable_type) |
| 532 | { |
| 533 | /* Page is zeroed by init_clear_pgtable() so don't duplicate effort. */ |
| 534 | struct ptdesc *ptdesc = pagetable_alloc(gfp & ~__GFP_ZERO, 0); |
| 535 | phys_addr_t pa; |
| 536 | |
| 537 | if (!ptdesc) |
| 538 | return INVALID_PHYS_ADDR; |
| 539 | |
| 540 | pa = page_to_phys(ptdesc_page(ptdesc)); |
| 541 | |
| 542 | switch (pgtable_type) { |
| 543 | case TABLE_PTE: |
| 544 | BUG_ON(!pagetable_pte_ctor(mm, ptdesc)); |
| 545 | break; |
| 546 | case TABLE_PMD: |
| 547 | BUG_ON(!pagetable_pmd_ctor(mm, ptdesc)); |
| 548 | break; |
| 549 | case TABLE_PUD: |
| 550 | pagetable_pud_ctor(ptdesc); |
| 551 | break; |
| 552 | case TABLE_P4D: |
| 553 | pagetable_p4d_ctor(ptdesc); |
| 554 | break; |
| 555 | } |
| 556 | |
| 557 | return pa; |
| 558 | } |
| 559 | |
| 560 | static phys_addr_t |
| 561 | pgd_pgtable_alloc_init_mm_gfp(enum pgtable_type pgtable_type, gfp_t gfp) |
| 562 | { |
| 563 | return __pgd_pgtable_alloc(mm: &init_mm, gfp, pgtable_type: pgtable_type); |
| 564 | } |
| 565 | |
| 566 | static phys_addr_t __maybe_unused |
| 567 | pgd_pgtable_alloc_init_mm(enum pgtable_type pgtable_type) |
| 568 | { |
| 569 | return pgd_pgtable_alloc_init_mm_gfp(pgtable_type: pgtable_type, GFP_PGTABLE_KERNEL); |
| 570 | } |
| 571 | |
| 572 | static phys_addr_t |
| 573 | pgd_pgtable_alloc_special_mm(enum pgtable_type pgtable_type) |
| 574 | { |
| 575 | return __pgd_pgtable_alloc(NULL, GFP_PGTABLE_KERNEL, pgtable_type: pgtable_type); |
| 576 | } |
| 577 | |
| 578 | static void split_contpte(pte_t *ptep) |
| 579 | { |
| 580 | int i; |
| 581 | |
| 582 | ptep = PTR_ALIGN_DOWN(ptep, sizeof(*ptep) * CONT_PTES); |
| 583 | for (i = 0; i < CONT_PTES; i++, ptep++) |
| 584 | __set_pte(ptep, pte_mknoncont(__ptep_get(ptep))); |
| 585 | } |
| 586 | |
| 587 | static int split_pmd(pmd_t *pmdp, pmd_t pmd, gfp_t gfp, bool to_cont) |
| 588 | { |
| 589 | pmdval_t tableprot = PMD_TYPE_TABLE | PMD_TABLE_UXN | PMD_TABLE_AF; |
| 590 | unsigned long pfn = pmd_pfn(pmd); |
| 591 | pgprot_t prot = pmd_pgprot(pmd); |
| 592 | phys_addr_t pte_phys; |
| 593 | pte_t *ptep; |
| 594 | int i; |
| 595 | |
| 596 | pte_phys = pgd_pgtable_alloc_init_mm_gfp(TABLE_PTE, gfp); |
| 597 | if (pte_phys == INVALID_PHYS_ADDR) |
| 598 | return -ENOMEM; |
| 599 | ptep = (pte_t *)phys_to_virt(address: pte_phys); |
| 600 | |
| 601 | if (pgprot_val(prot) & PMD_SECT_PXN) |
| 602 | tableprot |= PMD_TABLE_PXN; |
| 603 | |
| 604 | prot = __pgprot((pgprot_val(prot) & ~PTE_TYPE_MASK) | PTE_TYPE_PAGE); |
| 605 | prot = __pgprot(pgprot_val(prot) & ~PTE_CONT); |
| 606 | if (to_cont) |
| 607 | prot = __pgprot(pgprot_val(prot) | PTE_CONT); |
| 608 | |
| 609 | for (i = 0; i < PTRS_PER_PTE; i++, ptep++, pfn++) |
| 610 | __set_pte(ptep, pfn_pte(page_nr: pfn, pgprot: prot)); |
| 611 | |
| 612 | /* |
| 613 | * Ensure the pte entries are visible to the table walker by the time |
| 614 | * the pmd entry that points to the ptes is visible. |
| 615 | */ |
| 616 | dsb(ishst); |
| 617 | __pmd_populate(pmdp, pte_phys, tableprot); |
| 618 | |
| 619 | return 0; |
| 620 | } |
| 621 | |
| 622 | static void split_contpmd(pmd_t *pmdp) |
| 623 | { |
| 624 | int i; |
| 625 | |
| 626 | pmdp = PTR_ALIGN_DOWN(pmdp, sizeof(*pmdp) * CONT_PMDS); |
| 627 | for (i = 0; i < CONT_PMDS; i++, pmdp++) |
| 628 | set_pmd(pmdp, pmd_mknoncont(pmdp_get(pmdp))); |
| 629 | } |
| 630 | |
| 631 | static int split_pud(pud_t *pudp, pud_t pud, gfp_t gfp, bool to_cont) |
| 632 | { |
| 633 | pudval_t tableprot = PUD_TYPE_TABLE | PUD_TABLE_UXN | PUD_TABLE_AF; |
| 634 | unsigned int step = PMD_SIZE >> PAGE_SHIFT; |
| 635 | unsigned long pfn = pud_pfn(pud); |
| 636 | pgprot_t prot = pud_pgprot(pud); |
| 637 | phys_addr_t pmd_phys; |
| 638 | pmd_t *pmdp; |
| 639 | int i; |
| 640 | |
| 641 | pmd_phys = pgd_pgtable_alloc_init_mm_gfp(TABLE_PMD, gfp); |
| 642 | if (pmd_phys == INVALID_PHYS_ADDR) |
| 643 | return -ENOMEM; |
| 644 | pmdp = (pmd_t *)phys_to_virt(address: pmd_phys); |
| 645 | |
| 646 | if (pgprot_val(prot) & PMD_SECT_PXN) |
| 647 | tableprot |= PUD_TABLE_PXN; |
| 648 | |
| 649 | prot = __pgprot((pgprot_val(prot) & ~PMD_TYPE_MASK) | PMD_TYPE_SECT); |
| 650 | prot = __pgprot(pgprot_val(prot) & ~PTE_CONT); |
| 651 | if (to_cont) |
| 652 | prot = __pgprot(pgprot_val(prot) | PTE_CONT); |
| 653 | |
| 654 | for (i = 0; i < PTRS_PER_PMD; i++, pmdp++, pfn += step) |
| 655 | set_pmd(pmdp, pmd: pfn_pmd(page_nr: pfn, pgprot: prot)); |
| 656 | |
| 657 | /* |
| 658 | * Ensure the pmd entries are visible to the table walker by the time |
| 659 | * the pud entry that points to the pmds is visible. |
| 660 | */ |
| 661 | dsb(ishst); |
| 662 | __pud_populate(pudp, pmd_phys, tableprot); |
| 663 | |
| 664 | return 0; |
| 665 | } |
| 666 | |
| 667 | static int split_kernel_leaf_mapping_locked(unsigned long addr) |
| 668 | { |
| 669 | pgd_t *pgdp, pgd; |
| 670 | p4d_t *p4dp, p4d; |
| 671 | pud_t *pudp, pud; |
| 672 | pmd_t *pmdp, pmd; |
| 673 | pte_t *ptep, pte; |
| 674 | int ret = 0; |
| 675 | |
| 676 | /* |
| 677 | * PGD: If addr is PGD aligned then addr already describes a leaf |
| 678 | * boundary. If not present then there is nothing to split. |
| 679 | */ |
| 680 | if (ALIGN_DOWN(addr, PGDIR_SIZE) == addr) |
| 681 | goto out; |
| 682 | pgdp = pgd_offset_k(addr); |
| 683 | pgd = pgdp_get(pgdp); |
| 684 | if (!pgd_present(pgd)) |
| 685 | goto out; |
| 686 | |
| 687 | /* |
| 688 | * P4D: If addr is P4D aligned then addr already describes a leaf |
| 689 | * boundary. If not present then there is nothing to split. |
| 690 | */ |
| 691 | if (ALIGN_DOWN(addr, P4D_SIZE) == addr) |
| 692 | goto out; |
| 693 | p4dp = p4d_offset(pgd: pgdp, address: addr); |
| 694 | p4d = p4dp_get(p4dp); |
| 695 | if (!p4d_present(p4d)) |
| 696 | goto out; |
| 697 | |
| 698 | /* |
| 699 | * PUD: If addr is PUD aligned then addr already describes a leaf |
| 700 | * boundary. If not present then there is nothing to split. Otherwise, |
| 701 | * if we have a pud leaf, split to contpmd. |
| 702 | */ |
| 703 | if (ALIGN_DOWN(addr, PUD_SIZE) == addr) |
| 704 | goto out; |
| 705 | pudp = pud_offset(p4d: p4dp, address: addr); |
| 706 | pud = pudp_get(pudp); |
| 707 | if (!pud_present(pud)) |
| 708 | goto out; |
| 709 | if (pud_leaf(pud)) { |
| 710 | ret = split_pud(pudp, pud, GFP_PGTABLE_KERNEL, to_cont: true); |
| 711 | if (ret) |
| 712 | goto out; |
| 713 | } |
| 714 | |
| 715 | /* |
| 716 | * CONTPMD: If addr is CONTPMD aligned then addr already describes a |
| 717 | * leaf boundary. If not present then there is nothing to split. |
| 718 | * Otherwise, if we have a contpmd leaf, split to pmd. |
| 719 | */ |
| 720 | if (ALIGN_DOWN(addr, CONT_PMD_SIZE) == addr) |
| 721 | goto out; |
| 722 | pmdp = pmd_offset(pud: pudp, address: addr); |
| 723 | pmd = pmdp_get(pmdp); |
| 724 | if (!pmd_present(pmd)) |
| 725 | goto out; |
| 726 | if (pmd_leaf(pte: pmd)) { |
| 727 | if (pmd_cont(pmd)) |
| 728 | split_contpmd(pmdp); |
| 729 | /* |
| 730 | * PMD: If addr is PMD aligned then addr already describes a |
| 731 | * leaf boundary. Otherwise, split to contpte. |
| 732 | */ |
| 733 | if (ALIGN_DOWN(addr, PMD_SIZE) == addr) |
| 734 | goto out; |
| 735 | ret = split_pmd(pmdp, pmd, GFP_PGTABLE_KERNEL, to_cont: true); |
| 736 | if (ret) |
| 737 | goto out; |
| 738 | } |
| 739 | |
| 740 | /* |
| 741 | * CONTPTE: If addr is CONTPTE aligned then addr already describes a |
| 742 | * leaf boundary. If not present then there is nothing to split. |
| 743 | * Otherwise, if we have a contpte leaf, split to pte. |
| 744 | */ |
| 745 | if (ALIGN_DOWN(addr, CONT_PTE_SIZE) == addr) |
| 746 | goto out; |
| 747 | ptep = pte_offset_kernel(pmd: pmdp, address: addr); |
| 748 | pte = __ptep_get(ptep); |
| 749 | if (!pte_present(a: pte)) |
| 750 | goto out; |
| 751 | if (pte_cont(pte)) |
| 752 | split_contpte(ptep); |
| 753 | |
| 754 | out: |
| 755 | return ret; |
| 756 | } |
| 757 | |
| 758 | static inline bool force_pte_mapping(void) |
| 759 | { |
| 760 | const bool bbml2 = system_capabilities_finalized() ? |
| 761 | system_supports_bbml2_noabort() : cpu_supports_bbml2_noabort(); |
| 762 | |
| 763 | if (debug_pagealloc_enabled()) |
| 764 | return true; |
| 765 | if (bbml2) |
| 766 | return false; |
| 767 | return rodata_full || arm64_kfence_can_set_direct_map() || is_realm_world(); |
| 768 | } |
| 769 | |
| 770 | static DEFINE_MUTEX(pgtable_split_lock); |
| 771 | |
| 772 | int split_kernel_leaf_mapping(unsigned long start, unsigned long end) |
| 773 | { |
| 774 | int ret; |
| 775 | |
| 776 | /* |
| 777 | * !BBML2_NOABORT systems should not be trying to change permissions on |
| 778 | * anything that is not pte-mapped in the first place. Just return early |
| 779 | * and let the permission change code raise a warning if not already |
| 780 | * pte-mapped. |
| 781 | */ |
| 782 | if (!system_supports_bbml2_noabort()) |
| 783 | return 0; |
| 784 | |
| 785 | /* |
| 786 | * If the region is within a pte-mapped area, there is no need to try to |
| 787 | * split. Additionally, CONFIG_DEBUG_PAGEALLOC and CONFIG_KFENCE may |
| 788 | * change permissions from atomic context so for those cases (which are |
| 789 | * always pte-mapped), we must not go any further because taking the |
| 790 | * mutex below may sleep. |
| 791 | */ |
| 792 | if (force_pte_mapping() || is_kfence_address(addr: (void *)start)) |
| 793 | return 0; |
| 794 | |
| 795 | /* |
| 796 | * Ensure start and end are at least page-aligned since this is the |
| 797 | * finest granularity we can split to. |
| 798 | */ |
| 799 | if (start != PAGE_ALIGN(start) || end != PAGE_ALIGN(end)) |
| 800 | return -EINVAL; |
| 801 | |
| 802 | mutex_lock(&pgtable_split_lock); |
| 803 | arch_enter_lazy_mmu_mode(); |
| 804 | |
| 805 | /* |
| 806 | * The split_kernel_leaf_mapping_locked() may sleep, it is not a |
| 807 | * problem for ARM64 since ARM64's lazy MMU implementation allows |
| 808 | * sleeping. |
| 809 | * |
| 810 | * Optimize for the common case of splitting out a single page from a |
| 811 | * larger mapping. Here we can just split on the "least aligned" of |
| 812 | * start and end and this will guarantee that there must also be a split |
| 813 | * on the more aligned address since the both addresses must be in the |
| 814 | * same contpte block and it must have been split to ptes. |
| 815 | */ |
| 816 | if (end - start == PAGE_SIZE) { |
| 817 | start = __ffs(start) < __ffs(end) ? start : end; |
| 818 | ret = split_kernel_leaf_mapping_locked(addr: start); |
| 819 | } else { |
| 820 | ret = split_kernel_leaf_mapping_locked(addr: start); |
| 821 | if (!ret) |
| 822 | ret = split_kernel_leaf_mapping_locked(addr: end); |
| 823 | } |
| 824 | |
| 825 | arch_leave_lazy_mmu_mode(); |
| 826 | mutex_unlock(lock: &pgtable_split_lock); |
| 827 | return ret; |
| 828 | } |
| 829 | |
| 830 | static int split_to_ptes_pud_entry(pud_t *pudp, unsigned long addr, |
| 831 | unsigned long next, struct mm_walk *walk) |
| 832 | { |
| 833 | gfp_t gfp = *(gfp_t *)walk->private; |
| 834 | pud_t pud = pudp_get(pudp); |
| 835 | int ret = 0; |
| 836 | |
| 837 | if (pud_leaf(pud)) |
| 838 | ret = split_pud(pudp, pud, gfp, to_cont: false); |
| 839 | |
| 840 | return ret; |
| 841 | } |
| 842 | |
| 843 | static int split_to_ptes_pmd_entry(pmd_t *pmdp, unsigned long addr, |
| 844 | unsigned long next, struct mm_walk *walk) |
| 845 | { |
| 846 | gfp_t gfp = *(gfp_t *)walk->private; |
| 847 | pmd_t pmd = pmdp_get(pmdp); |
| 848 | int ret = 0; |
| 849 | |
| 850 | if (pmd_leaf(pte: pmd)) { |
| 851 | if (pmd_cont(pmd)) |
| 852 | split_contpmd(pmdp); |
| 853 | ret = split_pmd(pmdp, pmd, gfp, to_cont: false); |
| 854 | |
| 855 | /* |
| 856 | * We have split the pmd directly to ptes so there is no need to |
| 857 | * visit each pte to check if they are contpte. |
| 858 | */ |
| 859 | walk->action = ACTION_CONTINUE; |
| 860 | } |
| 861 | |
| 862 | return ret; |
| 863 | } |
| 864 | |
| 865 | static int split_to_ptes_pte_entry(pte_t *ptep, unsigned long addr, |
| 866 | unsigned long next, struct mm_walk *walk) |
| 867 | { |
| 868 | pte_t pte = __ptep_get(ptep); |
| 869 | |
| 870 | if (pte_cont(pte)) |
| 871 | split_contpte(ptep); |
| 872 | |
| 873 | return 0; |
| 874 | } |
| 875 | |
| 876 | static const struct mm_walk_ops split_to_ptes_ops = { |
| 877 | .pud_entry = split_to_ptes_pud_entry, |
| 878 | .pmd_entry = split_to_ptes_pmd_entry, |
| 879 | .pte_entry = split_to_ptes_pte_entry, |
| 880 | }; |
| 881 | |
| 882 | static int range_split_to_ptes(unsigned long start, unsigned long end, gfp_t gfp) |
| 883 | { |
| 884 | int ret; |
| 885 | |
| 886 | arch_enter_lazy_mmu_mode(); |
| 887 | ret = walk_kernel_page_table_range_lockless(start, end, |
| 888 | ops: &split_to_ptes_ops, NULL, private: &gfp); |
| 889 | arch_leave_lazy_mmu_mode(); |
| 890 | |
| 891 | return ret; |
| 892 | } |
| 893 | |
| 894 | static bool linear_map_requires_bbml2 __initdata; |
| 895 | |
| 896 | u32 idmap_kpti_bbml2_flag; |
| 897 | |
| 898 | static void __init init_idmap_kpti_bbml2_flag(void) |
| 899 | { |
| 900 | WRITE_ONCE(idmap_kpti_bbml2_flag, 1); |
| 901 | /* Must be visible to other CPUs before stop_machine() is called. */ |
| 902 | smp_mb(); |
| 903 | } |
| 904 | |
| 905 | static int __init linear_map_split_to_ptes(void *__unused) |
| 906 | { |
| 907 | /* |
| 908 | * Repainting the linear map must be done by CPU0 (the boot CPU) because |
| 909 | * that's the only CPU that we know supports BBML2. The other CPUs will |
| 910 | * be held in a waiting area with the idmap active. |
| 911 | */ |
| 912 | if (!smp_processor_id()) { |
| 913 | unsigned long lstart = _PAGE_OFFSET(vabits_actual); |
| 914 | unsigned long lend = PAGE_END; |
| 915 | unsigned long kstart = (unsigned long)lm_alias(_stext); |
| 916 | unsigned long kend = (unsigned long)lm_alias(__init_begin); |
| 917 | int ret; |
| 918 | |
| 919 | /* |
| 920 | * Wait for all secondary CPUs to be put into the waiting area. |
| 921 | */ |
| 922 | smp_cond_load_acquire(&idmap_kpti_bbml2_flag, VAL == num_online_cpus()); |
| 923 | |
| 924 | /* |
| 925 | * Walk all of the linear map [lstart, lend), except the kernel |
| 926 | * linear map alias [kstart, kend), and split all mappings to |
| 927 | * PTE. The kernel alias remains static throughout runtime so |
| 928 | * can continue to be safely mapped with large mappings. |
| 929 | */ |
| 930 | ret = range_split_to_ptes(start: lstart, end: kstart, GFP_ATOMIC); |
| 931 | if (!ret) |
| 932 | ret = range_split_to_ptes(start: kend, end: lend, GFP_ATOMIC); |
| 933 | if (ret) |
| 934 | panic(fmt: "Failed to split linear map\n" ); |
| 935 | flush_tlb_kernel_range(start: lstart, end: lend); |
| 936 | |
| 937 | /* |
| 938 | * Relies on dsb in flush_tlb_kernel_range() to avoid reordering |
| 939 | * before any page table split operations. |
| 940 | */ |
| 941 | WRITE_ONCE(idmap_kpti_bbml2_flag, 0); |
| 942 | } else { |
| 943 | typedef void (wait_split_fn)(void); |
| 944 | extern wait_split_fn wait_linear_map_split_to_ptes; |
| 945 | wait_split_fn *wait_fn; |
| 946 | |
| 947 | wait_fn = (void *)__pa_symbol(wait_linear_map_split_to_ptes); |
| 948 | |
| 949 | /* |
| 950 | * At least one secondary CPU doesn't support BBML2 so cannot |
| 951 | * tolerate the size of the live mappings changing. So have the |
| 952 | * secondary CPUs wait for the boot CPU to make the changes |
| 953 | * with the idmap active and init_mm inactive. |
| 954 | */ |
| 955 | cpu_install_idmap(); |
| 956 | wait_fn(); |
| 957 | cpu_uninstall_idmap(); |
| 958 | } |
| 959 | |
| 960 | return 0; |
| 961 | } |
| 962 | |
| 963 | void __init linear_map_maybe_split_to_ptes(void) |
| 964 | { |
| 965 | if (linear_map_requires_bbml2 && !system_supports_bbml2_noabort()) { |
| 966 | init_idmap_kpti_bbml2_flag(); |
| 967 | stop_machine(fn: linear_map_split_to_ptes, NULL, cpu_online_mask); |
| 968 | } |
| 969 | } |
| 970 | |
| 971 | /* |
| 972 | * This function can only be used to modify existing table entries, |
| 973 | * without allocating new levels of table. Note that this permits the |
| 974 | * creation of new section or page entries. |
| 975 | */ |
| 976 | void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt, |
| 977 | phys_addr_t size, pgprot_t prot) |
| 978 | { |
| 979 | if (virt < PAGE_OFFSET) { |
| 980 | pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n" , |
| 981 | &phys, virt); |
| 982 | return; |
| 983 | } |
| 984 | early_create_pgd_mapping(pgdir: init_mm.pgd, phys, virt, size, prot, NULL, |
| 985 | NO_CONT_MAPPINGS); |
| 986 | } |
| 987 | |
| 988 | void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, |
| 989 | unsigned long virt, phys_addr_t size, |
| 990 | pgprot_t prot, bool page_mappings_only) |
| 991 | { |
| 992 | int flags = 0; |
| 993 | |
| 994 | BUG_ON(mm == &init_mm); |
| 995 | |
| 996 | if (page_mappings_only) |
| 997 | flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; |
| 998 | |
| 999 | early_create_pgd_mapping(pgdir: mm->pgd, phys, virt, size, prot, |
| 1000 | pgtable_alloc: pgd_pgtable_alloc_special_mm, flags); |
| 1001 | } |
| 1002 | |
| 1003 | static void update_mapping_prot(phys_addr_t phys, unsigned long virt, |
| 1004 | phys_addr_t size, pgprot_t prot) |
| 1005 | { |
| 1006 | if (virt < PAGE_OFFSET) { |
| 1007 | pr_warn("BUG: not updating mapping for %pa at 0x%016lx - outside kernel range\n" , |
| 1008 | &phys, virt); |
| 1009 | return; |
| 1010 | } |
| 1011 | |
| 1012 | early_create_pgd_mapping(pgdir: init_mm.pgd, phys, virt, size, prot, NULL, |
| 1013 | NO_CONT_MAPPINGS); |
| 1014 | |
| 1015 | /* flush the TLBs after updating live kernel mappings */ |
| 1016 | flush_tlb_kernel_range(start: virt, end: virt + size); |
| 1017 | } |
| 1018 | |
| 1019 | static void __init __map_memblock(pgd_t *pgdp, phys_addr_t start, |
| 1020 | phys_addr_t end, pgprot_t prot, int flags) |
| 1021 | { |
| 1022 | early_create_pgd_mapping(pgdir: pgdp, phys: start, virt: __phys_to_virt(start), size: end - start, |
| 1023 | prot, pgtable_alloc: early_pgtable_alloc, flags); |
| 1024 | } |
| 1025 | |
| 1026 | void __init mark_linear_text_alias_ro(void) |
| 1027 | { |
| 1028 | /* |
| 1029 | * Remove the write permissions from the linear alias of .text/.rodata |
| 1030 | */ |
| 1031 | update_mapping_prot(__pa_symbol(_text), virt: (unsigned long)lm_alias(_text), |
| 1032 | size: (unsigned long)__init_begin - (unsigned long)_text, |
| 1033 | PAGE_KERNEL_RO); |
| 1034 | } |
| 1035 | |
| 1036 | #ifdef CONFIG_KFENCE |
| 1037 | |
| 1038 | bool __ro_after_init kfence_early_init = !!CONFIG_KFENCE_SAMPLE_INTERVAL; |
| 1039 | |
| 1040 | /* early_param() will be parsed before map_mem() below. */ |
| 1041 | static int __init parse_kfence_early_init(char *arg) |
| 1042 | { |
| 1043 | int val; |
| 1044 | |
| 1045 | if (get_option(str: &arg, pint: &val)) |
| 1046 | kfence_early_init = !!val; |
| 1047 | return 0; |
| 1048 | } |
| 1049 | early_param("kfence.sample_interval" , parse_kfence_early_init); |
| 1050 | |
| 1051 | static phys_addr_t __init arm64_kfence_alloc_pool(void) |
| 1052 | { |
| 1053 | phys_addr_t kfence_pool; |
| 1054 | |
| 1055 | if (!kfence_early_init) |
| 1056 | return 0; |
| 1057 | |
| 1058 | kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE); |
| 1059 | if (!kfence_pool) { |
| 1060 | pr_err("failed to allocate kfence pool\n" ); |
| 1061 | kfence_early_init = false; |
| 1062 | return 0; |
| 1063 | } |
| 1064 | |
| 1065 | /* Temporarily mark as NOMAP. */ |
| 1066 | memblock_mark_nomap(base: kfence_pool, KFENCE_POOL_SIZE); |
| 1067 | |
| 1068 | return kfence_pool; |
| 1069 | } |
| 1070 | |
| 1071 | static void __init arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp) |
| 1072 | { |
| 1073 | if (!kfence_pool) |
| 1074 | return; |
| 1075 | |
| 1076 | /* KFENCE pool needs page-level mapping. */ |
| 1077 | __map_memblock(pgdp, kfence_pool, kfence_pool + KFENCE_POOL_SIZE, |
| 1078 | pgprot_tagged(PAGE_KERNEL), |
| 1079 | NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS); |
| 1080 | memblock_clear_nomap(base: kfence_pool, KFENCE_POOL_SIZE); |
| 1081 | __kfence_pool = phys_to_virt(address: kfence_pool); |
| 1082 | } |
| 1083 | |
| 1084 | bool arch_kfence_init_pool(void) |
| 1085 | { |
| 1086 | unsigned long start = (unsigned long)__kfence_pool; |
| 1087 | unsigned long end = start + KFENCE_POOL_SIZE; |
| 1088 | int ret; |
| 1089 | |
| 1090 | /* Exit early if we know the linear map is already pte-mapped. */ |
| 1091 | if (force_pte_mapping()) |
| 1092 | return true; |
| 1093 | |
| 1094 | /* Kfence pool is already pte-mapped for the early init case. */ |
| 1095 | if (kfence_early_init) |
| 1096 | return true; |
| 1097 | |
| 1098 | mutex_lock(&pgtable_split_lock); |
| 1099 | ret = range_split_to_ptes(start, end, GFP_PGTABLE_KERNEL); |
| 1100 | mutex_unlock(lock: &pgtable_split_lock); |
| 1101 | |
| 1102 | /* |
| 1103 | * Since the system supports bbml2_noabort, tlb invalidation is not |
| 1104 | * required here; the pgtable mappings have been split to pte but larger |
| 1105 | * entries may safely linger in the TLB. |
| 1106 | */ |
| 1107 | |
| 1108 | return !ret; |
| 1109 | } |
| 1110 | #else /* CONFIG_KFENCE */ |
| 1111 | |
| 1112 | static inline phys_addr_t arm64_kfence_alloc_pool(void) { return 0; } |
| 1113 | static inline void arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp) { } |
| 1114 | |
| 1115 | #endif /* CONFIG_KFENCE */ |
| 1116 | |
| 1117 | static void __init map_mem(pgd_t *pgdp) |
| 1118 | { |
| 1119 | static const u64 direct_map_end = _PAGE_END(VA_BITS_MIN); |
| 1120 | phys_addr_t kernel_start = __pa_symbol(_text); |
| 1121 | phys_addr_t kernel_end = __pa_symbol(__init_begin); |
| 1122 | phys_addr_t start, end; |
| 1123 | phys_addr_t early_kfence_pool; |
| 1124 | int flags = NO_EXEC_MAPPINGS; |
| 1125 | u64 i; |
| 1126 | |
| 1127 | /* |
| 1128 | * Setting hierarchical PXNTable attributes on table entries covering |
| 1129 | * the linear region is only possible if it is guaranteed that no table |
| 1130 | * entries at any level are being shared between the linear region and |
| 1131 | * the vmalloc region. Check whether this is true for the PGD level, in |
| 1132 | * which case it is guaranteed to be true for all other levels as well. |
| 1133 | * (Unless we are running with support for LPA2, in which case the |
| 1134 | * entire reduced VA space is covered by a single pgd_t which will have |
| 1135 | * been populated without the PXNTable attribute by the time we get here.) |
| 1136 | */ |
| 1137 | BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end) && |
| 1138 | pgd_index(_PAGE_OFFSET(VA_BITS_MIN)) != PTRS_PER_PGD - 1); |
| 1139 | |
| 1140 | early_kfence_pool = arm64_kfence_alloc_pool(); |
| 1141 | |
| 1142 | linear_map_requires_bbml2 = !force_pte_mapping() && can_set_direct_map(); |
| 1143 | |
| 1144 | if (force_pte_mapping()) |
| 1145 | flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; |
| 1146 | |
| 1147 | /* |
| 1148 | * Take care not to create a writable alias for the |
| 1149 | * read-only text and rodata sections of the kernel image. |
| 1150 | * So temporarily mark them as NOMAP to skip mappings in |
| 1151 | * the following for-loop |
| 1152 | */ |
| 1153 | memblock_mark_nomap(base: kernel_start, size: kernel_end - kernel_start); |
| 1154 | |
| 1155 | /* map all the memory banks */ |
| 1156 | for_each_mem_range(i, &start, &end) { |
| 1157 | if (start >= end) |
| 1158 | break; |
| 1159 | /* |
| 1160 | * The linear map must allow allocation tags reading/writing |
| 1161 | * if MTE is present. Otherwise, it has the same attributes as |
| 1162 | * PAGE_KERNEL. |
| 1163 | */ |
| 1164 | __map_memblock(pgdp, start, end, pgprot_tagged(PAGE_KERNEL), |
| 1165 | flags); |
| 1166 | } |
| 1167 | |
| 1168 | /* |
| 1169 | * Map the linear alias of the [_text, __init_begin) interval |
| 1170 | * as non-executable now, and remove the write permission in |
| 1171 | * mark_linear_text_alias_ro() below (which will be called after |
| 1172 | * alternative patching has completed). This makes the contents |
| 1173 | * of the region accessible to subsystems such as hibernate, |
| 1174 | * but protects it from inadvertent modification or execution. |
| 1175 | * Note that contiguous mappings cannot be remapped in this way, |
| 1176 | * so we should avoid them here. |
| 1177 | */ |
| 1178 | __map_memblock(pgdp, start: kernel_start, end: kernel_end, |
| 1179 | PAGE_KERNEL, NO_CONT_MAPPINGS); |
| 1180 | memblock_clear_nomap(base: kernel_start, size: kernel_end - kernel_start); |
| 1181 | arm64_kfence_map_pool(kfence_pool: early_kfence_pool, pgdp); |
| 1182 | } |
| 1183 | |
| 1184 | void mark_rodata_ro(void) |
| 1185 | { |
| 1186 | unsigned long section_size; |
| 1187 | |
| 1188 | /* |
| 1189 | * mark .rodata as read only. Use __init_begin rather than __end_rodata |
| 1190 | * to cover NOTES and EXCEPTION_TABLE. |
| 1191 | */ |
| 1192 | section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata; |
| 1193 | WRITE_ONCE(rodata_is_rw, false); |
| 1194 | update_mapping_prot(__pa_symbol(__start_rodata), virt: (unsigned long)__start_rodata, |
| 1195 | size: section_size, PAGE_KERNEL_RO); |
| 1196 | /* mark the range between _text and _stext as read only. */ |
| 1197 | update_mapping_prot(__pa_symbol(_text), virt: (unsigned long)_text, |
| 1198 | size: (unsigned long)_stext - (unsigned long)_text, |
| 1199 | PAGE_KERNEL_RO); |
| 1200 | } |
| 1201 | |
| 1202 | static void __init declare_vma(struct vm_struct *vma, |
| 1203 | void *va_start, void *va_end, |
| 1204 | unsigned long vm_flags) |
| 1205 | { |
| 1206 | phys_addr_t pa_start = __pa_symbol(va_start); |
| 1207 | unsigned long size = va_end - va_start; |
| 1208 | |
| 1209 | BUG_ON(!PAGE_ALIGNED(pa_start)); |
| 1210 | BUG_ON(!PAGE_ALIGNED(size)); |
| 1211 | |
| 1212 | if (!(vm_flags & VM_NO_GUARD)) |
| 1213 | size += PAGE_SIZE; |
| 1214 | |
| 1215 | vma->addr = va_start; |
| 1216 | vma->phys_addr = pa_start; |
| 1217 | vma->size = size; |
| 1218 | vma->flags = VM_MAP | vm_flags; |
| 1219 | vma->caller = __builtin_return_address(0); |
| 1220 | |
| 1221 | vm_area_add_early(vm: vma); |
| 1222 | } |
| 1223 | |
| 1224 | #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 |
| 1225 | #define KPTI_NG_TEMP_VA (-(1UL << PMD_SHIFT)) |
| 1226 | |
| 1227 | static phys_addr_t kpti_ng_temp_alloc __initdata; |
| 1228 | |
| 1229 | static phys_addr_t __init kpti_ng_pgd_alloc(enum pgtable_type type) |
| 1230 | { |
| 1231 | kpti_ng_temp_alloc -= PAGE_SIZE; |
| 1232 | return kpti_ng_temp_alloc; |
| 1233 | } |
| 1234 | |
| 1235 | static int __init __kpti_install_ng_mappings(void *__unused) |
| 1236 | { |
| 1237 | typedef void (kpti_remap_fn)(int, int, phys_addr_t, unsigned long); |
| 1238 | extern kpti_remap_fn idmap_kpti_install_ng_mappings; |
| 1239 | kpti_remap_fn *remap_fn; |
| 1240 | |
| 1241 | int cpu = smp_processor_id(); |
| 1242 | int levels = CONFIG_PGTABLE_LEVELS; |
| 1243 | int order = order_base_2(levels); |
| 1244 | u64 kpti_ng_temp_pgd_pa = 0; |
| 1245 | pgd_t *kpti_ng_temp_pgd; |
| 1246 | u64 alloc = 0; |
| 1247 | |
| 1248 | if (levels == 5 && !pgtable_l5_enabled()) |
| 1249 | levels = 4; |
| 1250 | else if (levels == 4 && !pgtable_l4_enabled()) |
| 1251 | levels = 3; |
| 1252 | |
| 1253 | remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings); |
| 1254 | |
| 1255 | if (!cpu) { |
| 1256 | int ret; |
| 1257 | |
| 1258 | alloc = __get_free_pages(GFP_ATOMIC | __GFP_ZERO, order); |
| 1259 | kpti_ng_temp_pgd = (pgd_t *)(alloc + (levels - 1) * PAGE_SIZE); |
| 1260 | kpti_ng_temp_alloc = kpti_ng_temp_pgd_pa = __pa(kpti_ng_temp_pgd); |
| 1261 | |
| 1262 | // |
| 1263 | // Create a minimal page table hierarchy that permits us to map |
| 1264 | // the swapper page tables temporarily as we traverse them. |
| 1265 | // |
| 1266 | // The physical pages are laid out as follows: |
| 1267 | // |
| 1268 | // +--------+-/-------+-/------ +-/------ +-\\\--------+ |
| 1269 | // : PTE[] : | PMD[] : | PUD[] : | P4D[] : ||| PGD[] : |
| 1270 | // +--------+-\-------+-\------ +-\------ +-///--------+ |
| 1271 | // ^ |
| 1272 | // The first page is mapped into this hierarchy at a PMD_SHIFT |
| 1273 | // aligned virtual address, so that we can manipulate the PTE |
| 1274 | // level entries while the mapping is active. The first entry |
| 1275 | // covers the PTE[] page itself, the remaining entries are free |
| 1276 | // to be used as a ad-hoc fixmap. |
| 1277 | // |
| 1278 | ret = __create_pgd_mapping_locked(kpti_ng_temp_pgd, __pa(alloc), |
| 1279 | KPTI_NG_TEMP_VA, PAGE_SIZE, PAGE_KERNEL, |
| 1280 | kpti_ng_pgd_alloc, 0); |
| 1281 | if (ret) |
| 1282 | panic("Failed to create page tables\n" ); |
| 1283 | } |
| 1284 | |
| 1285 | cpu_install_idmap(); |
| 1286 | remap_fn(cpu, num_online_cpus(), kpti_ng_temp_pgd_pa, KPTI_NG_TEMP_VA); |
| 1287 | cpu_uninstall_idmap(); |
| 1288 | |
| 1289 | if (!cpu) { |
| 1290 | free_pages(alloc, order); |
| 1291 | arm64_use_ng_mappings = true; |
| 1292 | } |
| 1293 | |
| 1294 | return 0; |
| 1295 | } |
| 1296 | |
| 1297 | void __init kpti_install_ng_mappings(void) |
| 1298 | { |
| 1299 | /* Check whether KPTI is going to be used */ |
| 1300 | if (!arm64_kernel_unmapped_at_el0()) |
| 1301 | return; |
| 1302 | |
| 1303 | /* |
| 1304 | * We don't need to rewrite the page-tables if either we've done |
| 1305 | * it already or we have KASLR enabled and therefore have not |
| 1306 | * created any global mappings at all. |
| 1307 | */ |
| 1308 | if (arm64_use_ng_mappings) |
| 1309 | return; |
| 1310 | |
| 1311 | init_idmap_kpti_bbml2_flag(); |
| 1312 | stop_machine(__kpti_install_ng_mappings, NULL, cpu_online_mask); |
| 1313 | } |
| 1314 | |
| 1315 | static pgprot_t __init kernel_exec_prot(void) |
| 1316 | { |
| 1317 | return rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC; |
| 1318 | } |
| 1319 | |
| 1320 | static int __init map_entry_trampoline(void) |
| 1321 | { |
| 1322 | int i; |
| 1323 | |
| 1324 | if (!arm64_kernel_unmapped_at_el0()) |
| 1325 | return 0; |
| 1326 | |
| 1327 | pgprot_t prot = kernel_exec_prot(); |
| 1328 | phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start); |
| 1329 | |
| 1330 | /* The trampoline is always mapped and can therefore be global */ |
| 1331 | pgprot_val(prot) &= ~PTE_NG; |
| 1332 | |
| 1333 | /* Map only the text into the trampoline page table */ |
| 1334 | memset(tramp_pg_dir, 0, PGD_SIZE); |
| 1335 | early_create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, |
| 1336 | entry_tramp_text_size(), prot, |
| 1337 | pgd_pgtable_alloc_init_mm, NO_BLOCK_MAPPINGS); |
| 1338 | |
| 1339 | /* Map both the text and data into the kernel page table */ |
| 1340 | for (i = 0; i < DIV_ROUND_UP(entry_tramp_text_size(), PAGE_SIZE); i++) |
| 1341 | __set_fixmap(FIX_ENTRY_TRAMP_TEXT1 - i, |
| 1342 | pa_start + i * PAGE_SIZE, prot); |
| 1343 | |
| 1344 | if (IS_ENABLED(CONFIG_RELOCATABLE)) |
| 1345 | __set_fixmap(FIX_ENTRY_TRAMP_TEXT1 - i, |
| 1346 | pa_start + i * PAGE_SIZE, PAGE_KERNEL_RO); |
| 1347 | |
| 1348 | return 0; |
| 1349 | } |
| 1350 | core_initcall(map_entry_trampoline); |
| 1351 | #endif |
| 1352 | |
| 1353 | /* |
| 1354 | * Declare the VMA areas for the kernel |
| 1355 | */ |
| 1356 | static void __init declare_kernel_vmas(void) |
| 1357 | { |
| 1358 | static struct vm_struct vmlinux_seg[KERNEL_SEGMENT_COUNT]; |
| 1359 | |
| 1360 | declare_vma(vma: &vmlinux_seg[0], va_start: _text, va_end: _etext, VM_NO_GUARD); |
| 1361 | declare_vma(&vmlinux_seg[1], __start_rodata, __inittext_begin, VM_NO_GUARD); |
| 1362 | declare_vma(&vmlinux_seg[2], __inittext_begin, __inittext_end, VM_NO_GUARD); |
| 1363 | declare_vma(&vmlinux_seg[3], __initdata_begin, __initdata_end, VM_NO_GUARD); |
| 1364 | declare_vma(vma: &vmlinux_seg[4], va_start: _data, va_end: _end, vm_flags: 0); |
| 1365 | } |
| 1366 | |
| 1367 | void __pi_map_range(phys_addr_t *pte, u64 start, u64 end, phys_addr_t pa, |
| 1368 | pgprot_t prot, int level, pte_t *tbl, bool may_use_cont, |
| 1369 | u64 va_offset); |
| 1370 | |
| 1371 | static u8 idmap_ptes[IDMAP_LEVELS - 1][PAGE_SIZE] __aligned(PAGE_SIZE) __ro_after_init, |
| 1372 | kpti_bbml2_ptes[IDMAP_LEVELS - 1][PAGE_SIZE] __aligned(PAGE_SIZE) __ro_after_init; |
| 1373 | |
| 1374 | static void __init create_idmap(void) |
| 1375 | { |
| 1376 | phys_addr_t start = __pa_symbol(__idmap_text_start); |
| 1377 | phys_addr_t end = __pa_symbol(__idmap_text_end); |
| 1378 | phys_addr_t ptep = __pa_symbol(idmap_ptes); |
| 1379 | |
| 1380 | __pi_map_range(&ptep, start, end, start, PAGE_KERNEL_ROX, |
| 1381 | IDMAP_ROOT_LEVEL, (pte_t *)idmap_pg_dir, false, |
| 1382 | __phys_to_virt(ptep) - ptep); |
| 1383 | |
| 1384 | if (linear_map_requires_bbml2 || |
| 1385 | (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) && !arm64_use_ng_mappings)) { |
| 1386 | phys_addr_t pa = __pa_symbol(&idmap_kpti_bbml2_flag); |
| 1387 | |
| 1388 | /* |
| 1389 | * The KPTI G-to-nG conversion code needs a read-write mapping |
| 1390 | * of its synchronization flag in the ID map. This is also used |
| 1391 | * when splitting the linear map to ptes if a secondary CPU |
| 1392 | * doesn't support bbml2. |
| 1393 | */ |
| 1394 | ptep = __pa_symbol(kpti_bbml2_ptes); |
| 1395 | __pi_map_range(&ptep, pa, pa + sizeof(u32), pa, PAGE_KERNEL, |
| 1396 | IDMAP_ROOT_LEVEL, (pte_t *)idmap_pg_dir, false, |
| 1397 | __phys_to_virt(ptep) - ptep); |
| 1398 | } |
| 1399 | } |
| 1400 | |
| 1401 | void __init paging_init(void) |
| 1402 | { |
| 1403 | map_mem(swapper_pg_dir); |
| 1404 | |
| 1405 | memblock_allow_resize(); |
| 1406 | |
| 1407 | create_idmap(); |
| 1408 | declare_kernel_vmas(); |
| 1409 | } |
| 1410 | |
| 1411 | #ifdef CONFIG_MEMORY_HOTPLUG |
| 1412 | static void free_hotplug_page_range(struct page *page, size_t size, |
| 1413 | struct vmem_altmap *altmap) |
| 1414 | { |
| 1415 | if (altmap) { |
| 1416 | vmem_altmap_free(altmap, nr_pfns: size >> PAGE_SHIFT); |
| 1417 | } else { |
| 1418 | WARN_ON(PageReserved(page)); |
| 1419 | __free_pages(page, order: get_order(size)); |
| 1420 | } |
| 1421 | } |
| 1422 | |
| 1423 | static void free_hotplug_pgtable_page(struct page *page) |
| 1424 | { |
| 1425 | free_hotplug_page_range(page, PAGE_SIZE, NULL); |
| 1426 | } |
| 1427 | |
| 1428 | static bool pgtable_range_aligned(unsigned long start, unsigned long end, |
| 1429 | unsigned long floor, unsigned long ceiling, |
| 1430 | unsigned long mask) |
| 1431 | { |
| 1432 | start &= mask; |
| 1433 | if (start < floor) |
| 1434 | return false; |
| 1435 | |
| 1436 | if (ceiling) { |
| 1437 | ceiling &= mask; |
| 1438 | if (!ceiling) |
| 1439 | return false; |
| 1440 | } |
| 1441 | |
| 1442 | if (end - 1 > ceiling - 1) |
| 1443 | return false; |
| 1444 | return true; |
| 1445 | } |
| 1446 | |
| 1447 | static void unmap_hotplug_pte_range(pmd_t *pmdp, unsigned long addr, |
| 1448 | unsigned long end, bool free_mapped, |
| 1449 | struct vmem_altmap *altmap) |
| 1450 | { |
| 1451 | pte_t *ptep, pte; |
| 1452 | |
| 1453 | do { |
| 1454 | ptep = pte_offset_kernel(pmd: pmdp, address: addr); |
| 1455 | pte = __ptep_get(ptep); |
| 1456 | if (pte_none(pte)) |
| 1457 | continue; |
| 1458 | |
| 1459 | WARN_ON(!pte_present(pte)); |
| 1460 | __pte_clear(&init_mm, addr, ptep); |
| 1461 | flush_tlb_kernel_range(start: addr, end: addr + PAGE_SIZE); |
| 1462 | if (free_mapped) |
| 1463 | free_hotplug_page_range(pte_page(pte), |
| 1464 | PAGE_SIZE, altmap); |
| 1465 | } while (addr += PAGE_SIZE, addr < end); |
| 1466 | } |
| 1467 | |
| 1468 | static void unmap_hotplug_pmd_range(pud_t *pudp, unsigned long addr, |
| 1469 | unsigned long end, bool free_mapped, |
| 1470 | struct vmem_altmap *altmap) |
| 1471 | { |
| 1472 | unsigned long next; |
| 1473 | pmd_t *pmdp, pmd; |
| 1474 | |
| 1475 | do { |
| 1476 | next = pmd_addr_end(addr, end); |
| 1477 | pmdp = pmd_offset(pud: pudp, address: addr); |
| 1478 | pmd = READ_ONCE(*pmdp); |
| 1479 | if (pmd_none(pmd)) |
| 1480 | continue; |
| 1481 | |
| 1482 | WARN_ON(!pmd_present(pmd)); |
| 1483 | if (pmd_sect(pmd)) { |
| 1484 | pmd_clear(pmdp); |
| 1485 | |
| 1486 | /* |
| 1487 | * One TLBI should be sufficient here as the PMD_SIZE |
| 1488 | * range is mapped with a single block entry. |
| 1489 | */ |
| 1490 | flush_tlb_kernel_range(start: addr, end: addr + PAGE_SIZE); |
| 1491 | if (free_mapped) |
| 1492 | free_hotplug_page_range(pmd_page(pmd), |
| 1493 | PMD_SIZE, altmap); |
| 1494 | continue; |
| 1495 | } |
| 1496 | WARN_ON(!pmd_table(pmd)); |
| 1497 | unmap_hotplug_pte_range(pmdp, addr, end: next, free_mapped, altmap); |
| 1498 | } while (addr = next, addr < end); |
| 1499 | } |
| 1500 | |
| 1501 | static void unmap_hotplug_pud_range(p4d_t *p4dp, unsigned long addr, |
| 1502 | unsigned long end, bool free_mapped, |
| 1503 | struct vmem_altmap *altmap) |
| 1504 | { |
| 1505 | unsigned long next; |
| 1506 | pud_t *pudp, pud; |
| 1507 | |
| 1508 | do { |
| 1509 | next = pud_addr_end(addr, end); |
| 1510 | pudp = pud_offset(p4d: p4dp, address: addr); |
| 1511 | pud = READ_ONCE(*pudp); |
| 1512 | if (pud_none(pud)) |
| 1513 | continue; |
| 1514 | |
| 1515 | WARN_ON(!pud_present(pud)); |
| 1516 | if (pud_sect(pud)) { |
| 1517 | pud_clear(pudp); |
| 1518 | |
| 1519 | /* |
| 1520 | * One TLBI should be sufficient here as the PUD_SIZE |
| 1521 | * range is mapped with a single block entry. |
| 1522 | */ |
| 1523 | flush_tlb_kernel_range(start: addr, end: addr + PAGE_SIZE); |
| 1524 | if (free_mapped) |
| 1525 | free_hotplug_page_range(pud_page(pud), |
| 1526 | PUD_SIZE, altmap); |
| 1527 | continue; |
| 1528 | } |
| 1529 | WARN_ON(!pud_table(pud)); |
| 1530 | unmap_hotplug_pmd_range(pudp, addr, end: next, free_mapped, altmap); |
| 1531 | } while (addr = next, addr < end); |
| 1532 | } |
| 1533 | |
| 1534 | static void unmap_hotplug_p4d_range(pgd_t *pgdp, unsigned long addr, |
| 1535 | unsigned long end, bool free_mapped, |
| 1536 | struct vmem_altmap *altmap) |
| 1537 | { |
| 1538 | unsigned long next; |
| 1539 | p4d_t *p4dp, p4d; |
| 1540 | |
| 1541 | do { |
| 1542 | next = p4d_addr_end(addr, end); |
| 1543 | p4dp = p4d_offset(pgd: pgdp, address: addr); |
| 1544 | p4d = READ_ONCE(*p4dp); |
| 1545 | if (p4d_none(p4d)) |
| 1546 | continue; |
| 1547 | |
| 1548 | WARN_ON(!p4d_present(p4d)); |
| 1549 | unmap_hotplug_pud_range(p4dp, addr, end: next, free_mapped, altmap); |
| 1550 | } while (addr = next, addr < end); |
| 1551 | } |
| 1552 | |
| 1553 | static void unmap_hotplug_range(unsigned long addr, unsigned long end, |
| 1554 | bool free_mapped, struct vmem_altmap *altmap) |
| 1555 | { |
| 1556 | unsigned long next; |
| 1557 | pgd_t *pgdp, pgd; |
| 1558 | |
| 1559 | /* |
| 1560 | * altmap can only be used as vmemmap mapping backing memory. |
| 1561 | * In case the backing memory itself is not being freed, then |
| 1562 | * altmap is irrelevant. Warn about this inconsistency when |
| 1563 | * encountered. |
| 1564 | */ |
| 1565 | WARN_ON(!free_mapped && altmap); |
| 1566 | |
| 1567 | do { |
| 1568 | next = pgd_addr_end(addr, end); |
| 1569 | pgdp = pgd_offset_k(addr); |
| 1570 | pgd = READ_ONCE(*pgdp); |
| 1571 | if (pgd_none(pgd)) |
| 1572 | continue; |
| 1573 | |
| 1574 | WARN_ON(!pgd_present(pgd)); |
| 1575 | unmap_hotplug_p4d_range(pgdp, addr, end: next, free_mapped, altmap); |
| 1576 | } while (addr = next, addr < end); |
| 1577 | } |
| 1578 | |
| 1579 | static void free_empty_pte_table(pmd_t *pmdp, unsigned long addr, |
| 1580 | unsigned long end, unsigned long floor, |
| 1581 | unsigned long ceiling) |
| 1582 | { |
| 1583 | pte_t *ptep, pte; |
| 1584 | unsigned long i, start = addr; |
| 1585 | |
| 1586 | do { |
| 1587 | ptep = pte_offset_kernel(pmd: pmdp, address: addr); |
| 1588 | pte = __ptep_get(ptep); |
| 1589 | |
| 1590 | /* |
| 1591 | * This is just a sanity check here which verifies that |
| 1592 | * pte clearing has been done by earlier unmap loops. |
| 1593 | */ |
| 1594 | WARN_ON(!pte_none(pte)); |
| 1595 | } while (addr += PAGE_SIZE, addr < end); |
| 1596 | |
| 1597 | if (!pgtable_range_aligned(start, end, floor, ceiling, PMD_MASK)) |
| 1598 | return; |
| 1599 | |
| 1600 | /* |
| 1601 | * Check whether we can free the pte page if the rest of the |
| 1602 | * entries are empty. Overlap with other regions have been |
| 1603 | * handled by the floor/ceiling check. |
| 1604 | */ |
| 1605 | ptep = pte_offset_kernel(pmd: pmdp, address: 0UL); |
| 1606 | for (i = 0; i < PTRS_PER_PTE; i++) { |
| 1607 | if (!pte_none(__ptep_get(&ptep[i]))) |
| 1608 | return; |
| 1609 | } |
| 1610 | |
| 1611 | pmd_clear(pmdp); |
| 1612 | __flush_tlb_kernel_pgtable(start); |
| 1613 | free_hotplug_pgtable_page(virt_to_page(ptep)); |
| 1614 | } |
| 1615 | |
| 1616 | static void free_empty_pmd_table(pud_t *pudp, unsigned long addr, |
| 1617 | unsigned long end, unsigned long floor, |
| 1618 | unsigned long ceiling) |
| 1619 | { |
| 1620 | pmd_t *pmdp, pmd; |
| 1621 | unsigned long i, next, start = addr; |
| 1622 | |
| 1623 | do { |
| 1624 | next = pmd_addr_end(addr, end); |
| 1625 | pmdp = pmd_offset(pud: pudp, address: addr); |
| 1626 | pmd = READ_ONCE(*pmdp); |
| 1627 | if (pmd_none(pmd)) |
| 1628 | continue; |
| 1629 | |
| 1630 | WARN_ON(!pmd_present(pmd) || !pmd_table(pmd) || pmd_sect(pmd)); |
| 1631 | free_empty_pte_table(pmdp, addr, end: next, floor, ceiling); |
| 1632 | } while (addr = next, addr < end); |
| 1633 | |
| 1634 | if (CONFIG_PGTABLE_LEVELS <= 2) |
| 1635 | return; |
| 1636 | |
| 1637 | if (!pgtable_range_aligned(start, end, floor, ceiling, PUD_MASK)) |
| 1638 | return; |
| 1639 | |
| 1640 | /* |
| 1641 | * Check whether we can free the pmd page if the rest of the |
| 1642 | * entries are empty. Overlap with other regions have been |
| 1643 | * handled by the floor/ceiling check. |
| 1644 | */ |
| 1645 | pmdp = pmd_offset(pud: pudp, address: 0UL); |
| 1646 | for (i = 0; i < PTRS_PER_PMD; i++) { |
| 1647 | if (!pmd_none(READ_ONCE(pmdp[i]))) |
| 1648 | return; |
| 1649 | } |
| 1650 | |
| 1651 | pud_clear(pudp); |
| 1652 | __flush_tlb_kernel_pgtable(start); |
| 1653 | free_hotplug_pgtable_page(virt_to_page(pmdp)); |
| 1654 | } |
| 1655 | |
| 1656 | static void free_empty_pud_table(p4d_t *p4dp, unsigned long addr, |
| 1657 | unsigned long end, unsigned long floor, |
| 1658 | unsigned long ceiling) |
| 1659 | { |
| 1660 | pud_t *pudp, pud; |
| 1661 | unsigned long i, next, start = addr; |
| 1662 | |
| 1663 | do { |
| 1664 | next = pud_addr_end(addr, end); |
| 1665 | pudp = pud_offset(p4d: p4dp, address: addr); |
| 1666 | pud = READ_ONCE(*pudp); |
| 1667 | if (pud_none(pud)) |
| 1668 | continue; |
| 1669 | |
| 1670 | WARN_ON(!pud_present(pud) || !pud_table(pud) || pud_sect(pud)); |
| 1671 | free_empty_pmd_table(pudp, addr, end: next, floor, ceiling); |
| 1672 | } while (addr = next, addr < end); |
| 1673 | |
| 1674 | if (!pgtable_l4_enabled()) |
| 1675 | return; |
| 1676 | |
| 1677 | if (!pgtable_range_aligned(start, end, floor, ceiling, P4D_MASK)) |
| 1678 | return; |
| 1679 | |
| 1680 | /* |
| 1681 | * Check whether we can free the pud page if the rest of the |
| 1682 | * entries are empty. Overlap with other regions have been |
| 1683 | * handled by the floor/ceiling check. |
| 1684 | */ |
| 1685 | pudp = pud_offset(p4d: p4dp, address: 0UL); |
| 1686 | for (i = 0; i < PTRS_PER_PUD; i++) { |
| 1687 | if (!pud_none(READ_ONCE(pudp[i]))) |
| 1688 | return; |
| 1689 | } |
| 1690 | |
| 1691 | p4d_clear(p4dp); |
| 1692 | __flush_tlb_kernel_pgtable(start); |
| 1693 | free_hotplug_pgtable_page(virt_to_page(pudp)); |
| 1694 | } |
| 1695 | |
| 1696 | static void free_empty_p4d_table(pgd_t *pgdp, unsigned long addr, |
| 1697 | unsigned long end, unsigned long floor, |
| 1698 | unsigned long ceiling) |
| 1699 | { |
| 1700 | p4d_t *p4dp, p4d; |
| 1701 | unsigned long i, next, start = addr; |
| 1702 | |
| 1703 | do { |
| 1704 | next = p4d_addr_end(addr, end); |
| 1705 | p4dp = p4d_offset(pgd: pgdp, address: addr); |
| 1706 | p4d = READ_ONCE(*p4dp); |
| 1707 | if (p4d_none(p4d)) |
| 1708 | continue; |
| 1709 | |
| 1710 | WARN_ON(!p4d_present(p4d)); |
| 1711 | free_empty_pud_table(p4dp, addr, end: next, floor, ceiling); |
| 1712 | } while (addr = next, addr < end); |
| 1713 | |
| 1714 | if (!pgtable_l5_enabled()) |
| 1715 | return; |
| 1716 | |
| 1717 | if (!pgtable_range_aligned(start, end, floor, ceiling, PGDIR_MASK)) |
| 1718 | return; |
| 1719 | |
| 1720 | /* |
| 1721 | * Check whether we can free the p4d page if the rest of the |
| 1722 | * entries are empty. Overlap with other regions have been |
| 1723 | * handled by the floor/ceiling check. |
| 1724 | */ |
| 1725 | p4dp = p4d_offset(pgd: pgdp, address: 0UL); |
| 1726 | for (i = 0; i < PTRS_PER_P4D; i++) { |
| 1727 | if (!p4d_none(READ_ONCE(p4dp[i]))) |
| 1728 | return; |
| 1729 | } |
| 1730 | |
| 1731 | pgd_clear(pgdp); |
| 1732 | __flush_tlb_kernel_pgtable(start); |
| 1733 | free_hotplug_pgtable_page(virt_to_page(p4dp)); |
| 1734 | } |
| 1735 | |
| 1736 | static void free_empty_tables(unsigned long addr, unsigned long end, |
| 1737 | unsigned long floor, unsigned long ceiling) |
| 1738 | { |
| 1739 | unsigned long next; |
| 1740 | pgd_t *pgdp, pgd; |
| 1741 | |
| 1742 | do { |
| 1743 | next = pgd_addr_end(addr, end); |
| 1744 | pgdp = pgd_offset_k(addr); |
| 1745 | pgd = READ_ONCE(*pgdp); |
| 1746 | if (pgd_none(pgd)) |
| 1747 | continue; |
| 1748 | |
| 1749 | WARN_ON(!pgd_present(pgd)); |
| 1750 | free_empty_p4d_table(pgdp, addr, end: next, floor, ceiling); |
| 1751 | } while (addr = next, addr < end); |
| 1752 | } |
| 1753 | #endif |
| 1754 | |
| 1755 | void __meminit vmemmap_set_pmd(pmd_t *pmdp, void *p, int node, |
| 1756 | unsigned long addr, unsigned long next) |
| 1757 | { |
| 1758 | pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL)); |
| 1759 | } |
| 1760 | |
| 1761 | int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node, |
| 1762 | unsigned long addr, unsigned long next) |
| 1763 | { |
| 1764 | vmemmap_verify((pte_t *)pmdp, node, addr, next); |
| 1765 | |
| 1766 | return pmd_sect(READ_ONCE(*pmdp)); |
| 1767 | } |
| 1768 | |
| 1769 | int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, |
| 1770 | struct vmem_altmap *altmap) |
| 1771 | { |
| 1772 | WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END)); |
| 1773 | /* [start, end] should be within one section */ |
| 1774 | WARN_ON_ONCE(end - start > PAGES_PER_SECTION * sizeof(struct page)); |
| 1775 | |
| 1776 | if (!IS_ENABLED(CONFIG_ARM64_4K_PAGES) || |
| 1777 | (end - start < PAGES_PER_SECTION * sizeof(struct page))) |
| 1778 | return vmemmap_populate_basepages(start, end, node, altmap); |
| 1779 | else |
| 1780 | return vmemmap_populate_hugepages(start, end, node, altmap); |
| 1781 | } |
| 1782 | |
| 1783 | #ifdef CONFIG_MEMORY_HOTPLUG |
| 1784 | void vmemmap_free(unsigned long start, unsigned long end, |
| 1785 | struct vmem_altmap *altmap) |
| 1786 | { |
| 1787 | WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END)); |
| 1788 | |
| 1789 | unmap_hotplug_range(addr: start, end, free_mapped: true, altmap); |
| 1790 | free_empty_tables(start, end, VMEMMAP_START, VMEMMAP_END); |
| 1791 | } |
| 1792 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
| 1793 | |
| 1794 | int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot) |
| 1795 | { |
| 1796 | pud_t new_pud = pfn_pud(__phys_to_pfn(phys), mk_pud_sect_prot(prot)); |
| 1797 | |
| 1798 | /* Only allow permission changes for now */ |
| 1799 | if (!pgattr_change_is_safe(READ_ONCE(pud_val(*pudp)), |
| 1800 | new: pud_val(pud: new_pud))) |
| 1801 | return 0; |
| 1802 | |
| 1803 | VM_BUG_ON(phys & ~PUD_MASK); |
| 1804 | set_pud(pudp, pud: new_pud); |
| 1805 | return 1; |
| 1806 | } |
| 1807 | |
| 1808 | int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot) |
| 1809 | { |
| 1810 | pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), mk_pmd_sect_prot(prot)); |
| 1811 | |
| 1812 | /* Only allow permission changes for now */ |
| 1813 | if (!pgattr_change_is_safe(READ_ONCE(pmd_val(*pmdp)), |
| 1814 | new: pmd_val(pmd: new_pmd))) |
| 1815 | return 0; |
| 1816 | |
| 1817 | VM_BUG_ON(phys & ~PMD_MASK); |
| 1818 | set_pmd(pmdp, pmd: new_pmd); |
| 1819 | return 1; |
| 1820 | } |
| 1821 | |
| 1822 | #ifndef __PAGETABLE_P4D_FOLDED |
| 1823 | void p4d_clear_huge(p4d_t *p4dp) |
| 1824 | { |
| 1825 | } |
| 1826 | #endif |
| 1827 | |
| 1828 | int pud_clear_huge(pud_t *pudp) |
| 1829 | { |
| 1830 | if (!pud_sect(READ_ONCE(*pudp))) |
| 1831 | return 0; |
| 1832 | pud_clear(pudp); |
| 1833 | return 1; |
| 1834 | } |
| 1835 | |
| 1836 | int pmd_clear_huge(pmd_t *pmdp) |
| 1837 | { |
| 1838 | if (!pmd_sect(READ_ONCE(*pmdp))) |
| 1839 | return 0; |
| 1840 | pmd_clear(pmdp); |
| 1841 | return 1; |
| 1842 | } |
| 1843 | |
| 1844 | static int __pmd_free_pte_page(pmd_t *pmdp, unsigned long addr, |
| 1845 | bool acquire_mmap_lock) |
| 1846 | { |
| 1847 | pte_t *table; |
| 1848 | pmd_t pmd; |
| 1849 | |
| 1850 | pmd = READ_ONCE(*pmdp); |
| 1851 | |
| 1852 | if (!pmd_table(pmd)) { |
| 1853 | VM_WARN_ON(1); |
| 1854 | return 1; |
| 1855 | } |
| 1856 | |
| 1857 | /* See comment in pud_free_pmd_page for static key logic */ |
| 1858 | table = pte_offset_kernel(pmd: pmdp, address: addr); |
| 1859 | pmd_clear(pmdp); |
| 1860 | __flush_tlb_kernel_pgtable(addr); |
| 1861 | if (static_branch_unlikely(&arm64_ptdump_lock_key) && acquire_mmap_lock) { |
| 1862 | mmap_read_lock(mm: &init_mm); |
| 1863 | mmap_read_unlock(mm: &init_mm); |
| 1864 | } |
| 1865 | |
| 1866 | pte_free_kernel(NULL, pte: table); |
| 1867 | return 1; |
| 1868 | } |
| 1869 | |
| 1870 | int pmd_free_pte_page(pmd_t *pmdp, unsigned long addr) |
| 1871 | { |
| 1872 | /* If ptdump is walking the pagetables, acquire init_mm.mmap_lock */ |
| 1873 | return __pmd_free_pte_page(pmdp, addr, /* acquire_mmap_lock = */ true); |
| 1874 | } |
| 1875 | |
| 1876 | int pud_free_pmd_page(pud_t *pudp, unsigned long addr) |
| 1877 | { |
| 1878 | pmd_t *table; |
| 1879 | pmd_t *pmdp; |
| 1880 | pud_t pud; |
| 1881 | unsigned long next, end; |
| 1882 | |
| 1883 | pud = READ_ONCE(*pudp); |
| 1884 | |
| 1885 | if (!pud_table(pud)) { |
| 1886 | VM_WARN_ON(1); |
| 1887 | return 1; |
| 1888 | } |
| 1889 | |
| 1890 | table = pmd_offset(pud: pudp, address: addr); |
| 1891 | |
| 1892 | /* |
| 1893 | * Our objective is to prevent ptdump from reading a PMD table which has |
| 1894 | * been freed. In this race, if pud_free_pmd_page observes the key on |
| 1895 | * (which got flipped by ptdump) then the mmap lock sequence here will, |
| 1896 | * as a result of the mmap write lock/unlock sequence in ptdump, give |
| 1897 | * us the correct synchronization. If not, this means that ptdump has |
| 1898 | * yet not started walking the pagetables - the sequence of barriers |
| 1899 | * issued by __flush_tlb_kernel_pgtable() guarantees that ptdump will |
| 1900 | * observe an empty PUD. |
| 1901 | */ |
| 1902 | pud_clear(pudp); |
| 1903 | __flush_tlb_kernel_pgtable(addr); |
| 1904 | if (static_branch_unlikely(&arm64_ptdump_lock_key)) { |
| 1905 | mmap_read_lock(mm: &init_mm); |
| 1906 | mmap_read_unlock(mm: &init_mm); |
| 1907 | } |
| 1908 | |
| 1909 | pmdp = table; |
| 1910 | next = addr; |
| 1911 | end = addr + PUD_SIZE; |
| 1912 | do { |
| 1913 | if (pmd_present(pmd: pmdp_get(pmdp))) |
| 1914 | /* |
| 1915 | * PMD has been isolated, so ptdump won't see it. No |
| 1916 | * need to acquire init_mm.mmap_lock. |
| 1917 | */ |
| 1918 | __pmd_free_pte_page(pmdp, addr: next, /* acquire_mmap_lock = */ false); |
| 1919 | } while (pmdp++, next += PMD_SIZE, next != end); |
| 1920 | |
| 1921 | pmd_free(NULL, pmd: table); |
| 1922 | return 1; |
| 1923 | } |
| 1924 | |
| 1925 | #ifdef CONFIG_MEMORY_HOTPLUG |
| 1926 | static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size) |
| 1927 | { |
| 1928 | unsigned long end = start + size; |
| 1929 | |
| 1930 | WARN_ON(pgdir != init_mm.pgd); |
| 1931 | WARN_ON((start < PAGE_OFFSET) || (end > PAGE_END)); |
| 1932 | |
| 1933 | unmap_hotplug_range(addr: start, end, free_mapped: false, NULL); |
| 1934 | free_empty_tables(start, end, PAGE_OFFSET, PAGE_END); |
| 1935 | } |
| 1936 | |
| 1937 | struct range arch_get_mappable_range(void) |
| 1938 | { |
| 1939 | struct range mhp_range; |
| 1940 | phys_addr_t start_linear_pa = __pa(_PAGE_OFFSET(vabits_actual)); |
| 1941 | phys_addr_t end_linear_pa = __pa(PAGE_END - 1); |
| 1942 | |
| 1943 | if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { |
| 1944 | /* |
| 1945 | * Check for a wrap, it is possible because of randomized linear |
| 1946 | * mapping the start physical address is actually bigger than |
| 1947 | * the end physical address. In this case set start to zero |
| 1948 | * because [0, end_linear_pa] range must still be able to cover |
| 1949 | * all addressable physical addresses. |
| 1950 | */ |
| 1951 | if (start_linear_pa > end_linear_pa) |
| 1952 | start_linear_pa = 0; |
| 1953 | } |
| 1954 | |
| 1955 | WARN_ON(start_linear_pa > end_linear_pa); |
| 1956 | |
| 1957 | /* |
| 1958 | * Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)] |
| 1959 | * accommodating both its ends but excluding PAGE_END. Max physical |
| 1960 | * range which can be mapped inside this linear mapping range, must |
| 1961 | * also be derived from its end points. |
| 1962 | */ |
| 1963 | mhp_range.start = start_linear_pa; |
| 1964 | mhp_range.end = end_linear_pa; |
| 1965 | |
| 1966 | return mhp_range; |
| 1967 | } |
| 1968 | |
| 1969 | int arch_add_memory(int nid, u64 start, u64 size, |
| 1970 | struct mhp_params *params) |
| 1971 | { |
| 1972 | int ret, flags = NO_EXEC_MAPPINGS; |
| 1973 | |
| 1974 | VM_BUG_ON(!mhp_range_allowed(start, size, true)); |
| 1975 | |
| 1976 | if (force_pte_mapping()) |
| 1977 | flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; |
| 1978 | |
| 1979 | ret = __create_pgd_mapping(swapper_pg_dir, phys: start, virt: __phys_to_virt(start), |
| 1980 | size, prot: params->pgprot, pgtable_alloc: pgd_pgtable_alloc_init_mm, |
| 1981 | flags); |
| 1982 | if (ret) |
| 1983 | goto err; |
| 1984 | |
| 1985 | memblock_clear_nomap(base: start, size); |
| 1986 | |
| 1987 | ret = __add_pages(nid, start_pfn: start >> PAGE_SHIFT, nr_pages: size >> PAGE_SHIFT, |
| 1988 | params); |
| 1989 | if (ret) |
| 1990 | goto err; |
| 1991 | |
| 1992 | /* Address of hotplugged memory can be smaller */ |
| 1993 | max_pfn = max(max_pfn, PFN_UP(start + size)); |
| 1994 | max_low_pfn = max_pfn; |
| 1995 | |
| 1996 | return 0; |
| 1997 | |
| 1998 | err: |
| 1999 | __remove_pgd_mapping(swapper_pg_dir, |
| 2000 | start: __phys_to_virt(start), size); |
| 2001 | return ret; |
| 2002 | } |
| 2003 | |
| 2004 | void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) |
| 2005 | { |
| 2006 | unsigned long start_pfn = start >> PAGE_SHIFT; |
| 2007 | unsigned long nr_pages = size >> PAGE_SHIFT; |
| 2008 | |
| 2009 | __remove_pages(start_pfn, nr_pages, altmap); |
| 2010 | __remove_pgd_mapping(swapper_pg_dir, start: __phys_to_virt(start), size); |
| 2011 | } |
| 2012 | |
| 2013 | /* |
| 2014 | * This memory hotplug notifier helps prevent boot memory from being |
| 2015 | * inadvertently removed as it blocks pfn range offlining process in |
| 2016 | * __offline_pages(). Hence this prevents both offlining as well as |
| 2017 | * removal process for boot memory which is initially always online. |
| 2018 | * In future if and when boot memory could be removed, this notifier |
| 2019 | * should be dropped and free_hotplug_page_range() should handle any |
| 2020 | * reserved pages allocated during boot. |
| 2021 | */ |
| 2022 | static int prevent_bootmem_remove_notifier(struct notifier_block *nb, |
| 2023 | unsigned long action, void *data) |
| 2024 | { |
| 2025 | struct mem_section *ms; |
| 2026 | struct memory_notify *arg = data; |
| 2027 | unsigned long end_pfn = arg->start_pfn + arg->nr_pages; |
| 2028 | unsigned long pfn = arg->start_pfn; |
| 2029 | |
| 2030 | if ((action != MEM_GOING_OFFLINE) && (action != MEM_OFFLINE)) |
| 2031 | return NOTIFY_OK; |
| 2032 | |
| 2033 | for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) { |
| 2034 | unsigned long start = PFN_PHYS(pfn); |
| 2035 | unsigned long end = start + (1UL << PA_SECTION_SHIFT); |
| 2036 | |
| 2037 | ms = __pfn_to_section(pfn); |
| 2038 | if (!early_section(section: ms)) |
| 2039 | continue; |
| 2040 | |
| 2041 | if (action == MEM_GOING_OFFLINE) { |
| 2042 | /* |
| 2043 | * Boot memory removal is not supported. Prevent |
| 2044 | * it via blocking any attempted offline request |
| 2045 | * for the boot memory and just report it. |
| 2046 | */ |
| 2047 | pr_warn("Boot memory [%lx %lx] offlining attempted\n" , start, end); |
| 2048 | return NOTIFY_BAD; |
| 2049 | } else if (action == MEM_OFFLINE) { |
| 2050 | /* |
| 2051 | * This should have never happened. Boot memory |
| 2052 | * offlining should have been prevented by this |
| 2053 | * very notifier. Probably some memory removal |
| 2054 | * procedure might have changed which would then |
| 2055 | * require further debug. |
| 2056 | */ |
| 2057 | pr_err("Boot memory [%lx %lx] offlined\n" , start, end); |
| 2058 | |
| 2059 | /* |
| 2060 | * Core memory hotplug does not process a return |
| 2061 | * code from the notifier for MEM_OFFLINE events. |
| 2062 | * The error condition has been reported. Return |
| 2063 | * from here as if ignored. |
| 2064 | */ |
| 2065 | return NOTIFY_DONE; |
| 2066 | } |
| 2067 | } |
| 2068 | return NOTIFY_OK; |
| 2069 | } |
| 2070 | |
| 2071 | static struct notifier_block prevent_bootmem_remove_nb = { |
| 2072 | .notifier_call = prevent_bootmem_remove_notifier, |
| 2073 | }; |
| 2074 | |
| 2075 | /* |
| 2076 | * This ensures that boot memory sections on the platform are online |
| 2077 | * from early boot. Memory sections could not be prevented from being |
| 2078 | * offlined, unless for some reason they are not online to begin with. |
| 2079 | * This helps validate the basic assumption on which the above memory |
| 2080 | * event notifier works to prevent boot memory section offlining and |
| 2081 | * its possible removal. |
| 2082 | */ |
| 2083 | static void validate_bootmem_online(void) |
| 2084 | { |
| 2085 | phys_addr_t start, end, addr; |
| 2086 | struct mem_section *ms; |
| 2087 | u64 i; |
| 2088 | |
| 2089 | /* |
| 2090 | * Scanning across all memblock might be expensive |
| 2091 | * on some big memory systems. Hence enable this |
| 2092 | * validation only with DEBUG_VM. |
| 2093 | */ |
| 2094 | if (!IS_ENABLED(CONFIG_DEBUG_VM)) |
| 2095 | return; |
| 2096 | |
| 2097 | for_each_mem_range(i, &start, &end) { |
| 2098 | for (addr = start; addr < end; addr += (1UL << PA_SECTION_SHIFT)) { |
| 2099 | ms = __pfn_to_section(PHYS_PFN(addr)); |
| 2100 | |
| 2101 | /* |
| 2102 | * All memory ranges in the system at this point |
| 2103 | * should have been marked as early sections. |
| 2104 | */ |
| 2105 | WARN_ON(!early_section(ms)); |
| 2106 | |
| 2107 | /* |
| 2108 | * Memory notifier mechanism here to prevent boot |
| 2109 | * memory offlining depends on the fact that each |
| 2110 | * early section memory on the system is initially |
| 2111 | * online. Otherwise a given memory section which |
| 2112 | * is already offline will be overlooked and can |
| 2113 | * be removed completely. Call out such sections. |
| 2114 | */ |
| 2115 | if (!online_section(section: ms)) |
| 2116 | pr_err("Boot memory [%llx %llx] is offline, can be removed\n" , |
| 2117 | addr, addr + (1UL << PA_SECTION_SHIFT)); |
| 2118 | } |
| 2119 | } |
| 2120 | } |
| 2121 | |
| 2122 | static int __init prevent_bootmem_remove_init(void) |
| 2123 | { |
| 2124 | int ret = 0; |
| 2125 | |
| 2126 | if (!IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) |
| 2127 | return ret; |
| 2128 | |
| 2129 | validate_bootmem_online(); |
| 2130 | ret = register_memory_notifier(nb: &prevent_bootmem_remove_nb); |
| 2131 | if (ret) |
| 2132 | pr_err("%s: Notifier registration failed %d\n" , __func__, ret); |
| 2133 | |
| 2134 | return ret; |
| 2135 | } |
| 2136 | early_initcall(prevent_bootmem_remove_init); |
| 2137 | #endif |
| 2138 | |
| 2139 | pte_t modify_prot_start_ptes(struct vm_area_struct *vma, unsigned long addr, |
| 2140 | pte_t *ptep, unsigned int nr) |
| 2141 | { |
| 2142 | pte_t pte = get_and_clear_ptes(mm: vma->vm_mm, addr, ptep, nr); |
| 2143 | |
| 2144 | if (alternative_has_cap_unlikely(ARM64_WORKAROUND_2645198)) { |
| 2145 | /* |
| 2146 | * Break-before-make (BBM) is required for all user space mappings |
| 2147 | * when the permission changes from executable to non-executable |
| 2148 | * in cases where cpu is affected with errata #2645198. |
| 2149 | */ |
| 2150 | if (pte_accessible(mm: vma->vm_mm, a: pte) && pte_user_exec(pte)) |
| 2151 | __flush_tlb_range(vma, addr, nr * PAGE_SIZE, |
| 2152 | PAGE_SIZE, true, 3); |
| 2153 | } |
| 2154 | |
| 2155 | return pte; |
| 2156 | } |
| 2157 | |
| 2158 | pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) |
| 2159 | { |
| 2160 | return modify_prot_start_ptes(vma, addr, ptep, 1); |
| 2161 | } |
| 2162 | |
| 2163 | void modify_prot_commit_ptes(struct vm_area_struct *vma, unsigned long addr, |
| 2164 | pte_t *ptep, pte_t old_pte, pte_t pte, |
| 2165 | unsigned int nr) |
| 2166 | { |
| 2167 | set_ptes(mm: vma->vm_mm, addr, ptep, pte, nr); |
| 2168 | } |
| 2169 | |
| 2170 | void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, |
| 2171 | pte_t old_pte, pte_t pte) |
| 2172 | { |
| 2173 | modify_prot_commit_ptes(vma, addr, ptep, old_pte, pte, 1); |
| 2174 | } |
| 2175 | |
| 2176 | /* |
| 2177 | * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD, |
| 2178 | * avoiding the possibility of conflicting TLB entries being allocated. |
| 2179 | */ |
| 2180 | void __cpu_replace_ttbr1(pgd_t *pgdp, bool cnp) |
| 2181 | { |
| 2182 | typedef void (ttbr_replace_func)(phys_addr_t); |
| 2183 | extern ttbr_replace_func idmap_cpu_replace_ttbr1; |
| 2184 | ttbr_replace_func *replace_phys; |
| 2185 | unsigned long daif; |
| 2186 | |
| 2187 | /* phys_to_ttbr() zeros lower 2 bits of ttbr with 52-bit PA */ |
| 2188 | phys_addr_t ttbr1 = phys_to_ttbr(virt_to_phys(address: pgdp)); |
| 2189 | |
| 2190 | if (cnp) |
| 2191 | ttbr1 |= TTBR_CNP_BIT; |
| 2192 | |
| 2193 | replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1); |
| 2194 | |
| 2195 | cpu_install_idmap(); |
| 2196 | |
| 2197 | /* |
| 2198 | * We really don't want to take *any* exceptions while TTBR1 is |
| 2199 | * in the process of being replaced so mask everything. |
| 2200 | */ |
| 2201 | daif = local_daif_save(); |
| 2202 | replace_phys(ttbr1); |
| 2203 | local_daif_restore(daif); |
| 2204 | |
| 2205 | cpu_uninstall_idmap(); |
| 2206 | } |
| 2207 | |
| 2208 | #ifdef CONFIG_ARCH_HAS_PKEYS |
| 2209 | int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, unsigned long init_val) |
| 2210 | { |
| 2211 | u64 new_por; |
| 2212 | u64 old_por; |
| 2213 | |
| 2214 | if (!system_supports_poe()) |
| 2215 | return -ENOSPC; |
| 2216 | |
| 2217 | /* |
| 2218 | * This code should only be called with valid 'pkey' |
| 2219 | * values originating from in-kernel users. Complain |
| 2220 | * if a bad value is observed. |
| 2221 | */ |
| 2222 | if (WARN_ON_ONCE(pkey >= arch_max_pkey())) |
| 2223 | return -EINVAL; |
| 2224 | |
| 2225 | /* Set the bits we need in POR: */ |
| 2226 | new_por = POE_RWX; |
| 2227 | if (init_val & PKEY_DISABLE_WRITE) |
| 2228 | new_por &= ~POE_W; |
| 2229 | if (init_val & PKEY_DISABLE_ACCESS) |
| 2230 | new_por &= ~POE_RW; |
| 2231 | if (init_val & PKEY_DISABLE_READ) |
| 2232 | new_por &= ~POE_R; |
| 2233 | if (init_val & PKEY_DISABLE_EXECUTE) |
| 2234 | new_por &= ~POE_X; |
| 2235 | |
| 2236 | /* Shift the bits in to the correct place in POR for pkey: */ |
| 2237 | new_por = POR_ELx_PERM_PREP(pkey, new_por); |
| 2238 | |
| 2239 | /* Get old POR and mask off any old bits in place: */ |
| 2240 | old_por = read_sysreg_s(SYS_POR_EL0); |
| 2241 | old_por &= ~(POE_MASK << POR_ELx_PERM_SHIFT(pkey)); |
| 2242 | |
| 2243 | /* Write old part along with new part: */ |
| 2244 | write_sysreg_s(old_por | new_por, SYS_POR_EL0); |
| 2245 | |
| 2246 | return 0; |
| 2247 | } |
| 2248 | #endif |
| 2249 | |