1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Based on arch/arm/mm/proc.S
4 *
5 * Copyright (C) 2001 Deep Blue Solutions Ltd.
6 * Copyright (C) 2012 ARM Ltd.
7 * Author: Catalin Marinas <catalin.marinas@arm.com>
8 */
9
10#include <linux/init.h>
11#include <linux/linkage.h>
12#include <linux/pgtable.h>
13#include <linux/cfi_types.h>
14#include <asm/assembler.h>
15#include <asm/asm-offsets.h>
16#include <asm/asm_pointer_auth.h>
17#include <asm/hwcap.h>
18#include <asm/kernel-pgtable.h>
19#include <asm/pgtable-hwdef.h>
20#include <asm/cpufeature.h>
21#include <asm/alternative.h>
22#include <asm/smp.h>
23#include <asm/sysreg.h>
24
25#ifdef CONFIG_ARM64_64K_PAGES
26#define TCR_TG_FLAGS ((TCR_EL1_TG0_64K << TCR_EL1_TG0_SHIFT) |\
27 (TCR_EL1_TG1_64K << TCR_EL1_TG1_SHIFT))
28#elif defined(CONFIG_ARM64_16K_PAGES)
29#define TCR_TG_FLAGS ((TCR_EL1_TG0_16K << TCR_EL1_TG0_SHIFT) |\
30 (TCR_EL1_TG1_16K << TCR_EL1_TG1_SHIFT))
31#else /* CONFIG_ARM64_4K_PAGES */
32#define TCR_TG_FLAGS ((TCR_EL1_TG0_4K << TCR_EL1_TG0_SHIFT) |\
33 (TCR_EL1_TG1_4K << TCR_EL1_TG1_SHIFT))
34#endif
35
36#ifdef CONFIG_RANDOMIZE_BASE
37#define TCR_KASLR_FLAGS TCR_EL1_NFD1
38#else
39#define TCR_KASLR_FLAGS 0
40#endif
41
42/* PTWs cacheable, inner/outer WBWA */
43#define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA
44
45#ifdef CONFIG_KASAN_SW_TAGS
46#define TCR_KASAN_SW_FLAGS TCR_EL1_TBI1 | TCR_EL1_TBID1
47#else
48#define TCR_KASAN_SW_FLAGS 0
49#endif
50
51#ifdef CONFIG_KASAN_HW_TAGS
52#define TCR_MTE_FLAGS TCR_EL1_TCMA1 | TCR_EL1_TBI1 | TCR_EL1_TBID1
53#elif defined(CONFIG_ARM64_MTE)
54/*
55 * The mte_zero_clear_page_tags() implementation uses DC GZVA, which relies on
56 * TBI being enabled at EL1.
57 */
58#define TCR_MTE_FLAGS TCR_EL1_TBI1 | TCR_EL1_TBID1
59#else
60#define TCR_MTE_FLAGS 0
61#endif
62
63#define TCR_IRGN_WBWA ((TCR_EL1_IRGN0_WBWA << TCR_EL1_IRGN0_SHIFT) |\
64 (TCR_EL1_IRGN1_WBWA << TCR_EL1_IRGN1_SHIFT))
65#define TCR_ORGN_WBWA ((TCR_EL1_ORGN0_WBWA << TCR_EL1_ORGN0_SHIFT) |\
66 (TCR_EL1_ORGN1_WBWA << TCR_EL1_ORGN1_SHIFT))
67#define TCR_SHARED ((TCR_EL1_SH0_INNER << TCR_EL1_SH0_SHIFT) |\
68 (TCR_EL1_SH1_INNER << TCR_EL1_SH1_SHIFT))
69
70/*
71 * Default MAIR_EL1. MT_NORMAL_TAGGED is initially mapped as Normal memory and
72 * changed during mte_cpu_setup to Normal Tagged if the system supports MTE.
73 */
74#define MAIR_EL1_SET \
75 (MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \
76 MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) | \
77 MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) | \
78 MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) | \
79 MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL_TAGGED))
80
81#ifdef CONFIG_CPU_PM
82/**
83 * cpu_do_suspend - save CPU registers context
84 *
85 * x0: virtual address of context pointer
86 *
87 * This must be kept in sync with struct cpu_suspend_ctx in <asm/suspend.h>.
88 */
89SYM_FUNC_START(cpu_do_suspend)
90 mrs x2, tpidr_el0
91 mrs x3, tpidrro_el0
92 mrs x4, contextidr_el1
93 mrs x5, osdlr_el1
94 mrs x6, cpacr_el1
95 mrs x7, tcr_el1
96 mrs x8, vbar_el1
97 mrs x9, mdscr_el1
98 mrs x10, oslsr_el1
99 mrs x11, sctlr_el1
100 get_this_cpu_offset x12
101 mrs x13, sp_el0
102 stp x2, x3, [x0]
103 stp x4, x5, [x0, #16]
104 stp x6, x7, [x0, #32]
105 stp x8, x9, [x0, #48]
106 stp x10, x11, [x0, #64]
107 stp x12, x13, [x0, #80]
108 /*
109 * Save x18 as it may be used as a platform register, e.g. by shadow
110 * call stack.
111 */
112 str x18, [x0, #96]
113alternative_if ARM64_HAS_TCR2
114 mrs x2, REG_TCR2_EL1
115 str x2, [x0, #104]
116alternative_else_nop_endif
117 ret
118SYM_FUNC_END(cpu_do_suspend)
119
120/**
121 * cpu_do_resume - restore CPU register context
122 *
123 * x0: Address of context pointer
124 */
125SYM_FUNC_START(cpu_do_resume)
126 ldp x2, x3, [x0]
127 ldp x4, x5, [x0, #16]
128 ldp x6, x8, [x0, #32]
129 ldp x9, x10, [x0, #48]
130 ldp x11, x12, [x0, #64]
131 ldp x13, x14, [x0, #80]
132 /*
133 * Restore x18, as it may be used as a platform register, and clear
134 * the buffer to minimize the risk of exposure when used for shadow
135 * call stack.
136 */
137 ldr x18, [x0, #96]
138 str xzr, [x0, #96]
139 msr tpidr_el0, x2
140 msr tpidrro_el0, x3
141 msr contextidr_el1, x4
142 msr cpacr_el1, x6
143
144 /* Don't change t0sz here, mask those bits when restoring */
145 mrs x7, tcr_el1
146 bfi x8, x7, TCR_EL1_T0SZ_SHIFT, TCR_EL1_T0SZ_WIDTH
147
148 msr tcr_el1, x8
149 msr vbar_el1, x9
150 msr mdscr_el1, x10
151alternative_if ARM64_HAS_TCR2
152 ldr x2, [x0, #104]
153 msr REG_TCR2_EL1, x2
154alternative_else_nop_endif
155
156 msr sctlr_el1, x12
157 set_this_cpu_offset x13
158 msr sp_el0, x14
159 /*
160 * Restore oslsr_el1 by writing oslar_el1
161 */
162 msr osdlr_el1, x5
163 ubfx x11, x11, #1, #1
164 msr oslar_el1, x11
165 reset_pmuserenr_el0 x0 // Disable PMU access from EL0
166 reset_amuserenr_el0 x0 // Disable AMU access from EL0
167
168alternative_if ARM64_HAS_RAS_EXTN
169 msr_s SYS_DISR_EL1, xzr
170alternative_else_nop_endif
171
172 ptrauth_keys_install_kernel_nosync x14, x1, x2, x3
173 isb
174 ret
175SYM_FUNC_END(cpu_do_resume)
176#endif
177
178 .pushsection ".idmap.text", "a"
179
180.macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2
181 adrp \tmp1, reserved_pg_dir
182 phys_to_ttbr \tmp2, \tmp1
183 offset_ttbr1 \tmp2, \tmp1
184 msr ttbr1_el1, \tmp2
185 isb
186 tlbi vmalle1
187 dsb nsh
188 isb
189.endm
190
191/*
192 * void idmap_cpu_replace_ttbr1(phys_addr_t ttbr1)
193 *
194 * This is the low-level counterpart to cpu_replace_ttbr1, and should not be
195 * called by anything else. It can only be executed from a TTBR0 mapping.
196 */
197SYM_TYPED_FUNC_START(idmap_cpu_replace_ttbr1)
198 __idmap_cpu_set_reserved_ttbr1 x1, x3
199
200 offset_ttbr1 x0, x3
201 msr ttbr1_el1, x0
202 isb
203
204 ret
205SYM_FUNC_END(idmap_cpu_replace_ttbr1)
206SYM_FUNC_ALIAS(__pi_idmap_cpu_replace_ttbr1, idmap_cpu_replace_ttbr1)
207 .popsection
208
209#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
210
211#define KPTI_NG_PTE_FLAGS (PTE_ATTRINDX(MT_NORMAL) | PTE_TYPE_PAGE | \
212 PTE_AF | PTE_SHARED | PTE_UXN | PTE_WRITE)
213
214 .pushsection ".idmap.text", "a"
215
216 .macro pte_to_phys, phys, pte
217 and \phys, \pte, #PTE_ADDR_LOW
218#ifdef CONFIG_ARM64_PA_BITS_52
219 and \pte, \pte, #PTE_ADDR_HIGH
220 orr \phys, \phys, \pte, lsl #PTE_ADDR_HIGH_SHIFT
221#endif
222 .endm
223
224 .macro kpti_mk_tbl_ng, type, num_entries
225 add end_\type\()p, cur_\type\()p, #\num_entries * 8
226.Ldo_\type:
227 ldr \type, [cur_\type\()p], #8 // Load the entry and advance
228 tbz \type, #0, .Lnext_\type // Skip invalid and
229 tbnz \type, #11, .Lnext_\type // non-global entries
230 orr \type, \type, #PTE_NG // Same bit for blocks and pages
231 str \type, [cur_\type\()p, #-8] // Update the entry
232 .ifnc \type, pte
233 tbnz \type, #1, .Lderef_\type
234 .endif
235.Lnext_\type:
236 cmp cur_\type\()p, end_\type\()p
237 b.ne .Ldo_\type
238 .endm
239
240 /*
241 * Dereference the current table entry and map it into the temporary
242 * fixmap slot associated with the current level.
243 */
244 .macro kpti_map_pgtbl, type, level
245 str xzr, [temp_pte, #8 * (\level + 2)] // break before make
246 dsb nshst
247 add pte, temp_pte, #PAGE_SIZE * (\level + 2)
248 lsr pte, pte, #12
249 tlbi vaae1, pte
250 dsb nsh
251 isb
252
253 phys_to_pte pte, cur_\type\()p
254 add cur_\type\()p, temp_pte, #PAGE_SIZE * (\level + 2)
255 orr pte, pte, pte_flags
256 str pte, [temp_pte, #8 * (\level + 2)]
257 dsb nshst
258 .endm
259
260/*
261 * void __kpti_install_ng_mappings(int cpu, int num_secondaries, phys_addr_t temp_pgd,
262 * unsigned long temp_pte_va)
263 *
264 * Called exactly once from stop_machine context by each CPU found during boot.
265 */
266SYM_TYPED_FUNC_START(idmap_kpti_install_ng_mappings)
267 cpu .req w0
268 temp_pte .req x0
269 num_cpus .req w1
270 pte_flags .req x1
271 temp_pgd_phys .req x2
272 swapper_ttb .req x3
273 flag_ptr .req x4
274 cur_pgdp .req x5
275 end_pgdp .req x6
276 pgd .req x7
277 cur_pudp .req x8
278 end_pudp .req x9
279 cur_pmdp .req x11
280 end_pmdp .req x12
281 cur_ptep .req x14
282 end_ptep .req x15
283 pte .req x16
284 valid .req x17
285 cur_p4dp .req x19
286 end_p4dp .req x20
287
288 mov x5, x3 // preserve temp_pte arg
289 mrs swapper_ttb, ttbr1_el1
290 adr_l flag_ptr, idmap_kpti_bbml2_flag
291
292 cbnz cpu, __idmap_kpti_secondary
293
294#if CONFIG_PGTABLE_LEVELS > 4
295 stp x29, x30, [sp, #-32]!
296 mov x29, sp
297 stp x19, x20, [sp, #16]
298#endif
299
300 /* We're the boot CPU. Wait for the others to catch up */
301 sevl
3021: wfe
303 ldaxr w17, [flag_ptr]
304 eor w17, w17, num_cpus
305 cbnz w17, 1b
306
307 /* Switch to the temporary page tables on this CPU only */
308 __idmap_cpu_set_reserved_ttbr1 x8, x9
309 offset_ttbr1 temp_pgd_phys, x8
310 msr ttbr1_el1, temp_pgd_phys
311 isb
312
313 mov temp_pte, x5
314 mov_q pte_flags, KPTI_NG_PTE_FLAGS
315
316 /* Everybody is enjoying the idmap, so we can rewrite swapper. */
317
318#ifdef CONFIG_ARM64_LPA2
319 /*
320 * If LPA2 support is configured, but 52-bit virtual addressing is not
321 * enabled at runtime, we will fall back to one level of paging less,
322 * and so we have to walk swapper_pg_dir as if we dereferenced its
323 * address from a PGD level entry, and terminate the PGD level loop
324 * right after.
325 */
326 adrp pgd, swapper_pg_dir // walk &swapper_pg_dir at the next level
327 mov cur_pgdp, end_pgdp // must be equal to terminate the PGD loop
328alternative_if_not ARM64_HAS_VA52
329 b .Lderef_pgd // skip to the next level
330alternative_else_nop_endif
331 /*
332 * LPA2 based 52-bit virtual addressing requires 52-bit physical
333 * addressing to be enabled as well. In this case, the shareability
334 * bits are repurposed as physical address bits, and should not be
335 * set in pte_flags.
336 */
337 bic pte_flags, pte_flags, #PTE_SHARED
338#endif
339
340 /* PGD */
341 adrp cur_pgdp, swapper_pg_dir
342 kpti_map_pgtbl pgd, -1
343 kpti_mk_tbl_ng pgd, PTRS_PER_PGD
344
345 /* Ensure all the updated entries are visible to secondary CPUs */
346 dsb ishst
347
348 /* We're done: fire up swapper_pg_dir again */
349 __idmap_cpu_set_reserved_ttbr1 x8, x9
350 msr ttbr1_el1, swapper_ttb
351 isb
352
353 /* Set the flag to zero to indicate that we're all done */
354 str wzr, [flag_ptr]
355#if CONFIG_PGTABLE_LEVELS > 4
356 ldp x19, x20, [sp, #16]
357 ldp x29, x30, [sp], #32
358#endif
359 ret
360
361.Lderef_pgd:
362 /* P4D */
363 .if CONFIG_PGTABLE_LEVELS > 4
364 p4d .req x30
365 pte_to_phys cur_p4dp, pgd
366 kpti_map_pgtbl p4d, 0
367 kpti_mk_tbl_ng p4d, PTRS_PER_P4D
368 b .Lnext_pgd
369 .else /* CONFIG_PGTABLE_LEVELS <= 4 */
370 p4d .req pgd
371 .set .Lnext_p4d, .Lnext_pgd
372 .endif
373
374.Lderef_p4d:
375 /* PUD */
376 .if CONFIG_PGTABLE_LEVELS > 3
377 pud .req x10
378 pte_to_phys cur_pudp, p4d
379 kpti_map_pgtbl pud, 1
380 kpti_mk_tbl_ng pud, PTRS_PER_PUD
381 b .Lnext_p4d
382 .else /* CONFIG_PGTABLE_LEVELS <= 3 */
383 pud .req pgd
384 .set .Lnext_pud, .Lnext_pgd
385 .endif
386
387.Lderef_pud:
388 /* PMD */
389 .if CONFIG_PGTABLE_LEVELS > 2
390 pmd .req x13
391 pte_to_phys cur_pmdp, pud
392 kpti_map_pgtbl pmd, 2
393 kpti_mk_tbl_ng pmd, PTRS_PER_PMD
394 b .Lnext_pud
395 .else /* CONFIG_PGTABLE_LEVELS <= 2 */
396 pmd .req pgd
397 .set .Lnext_pmd, .Lnext_pgd
398 .endif
399
400.Lderef_pmd:
401 /* PTE */
402 pte_to_phys cur_ptep, pmd
403 kpti_map_pgtbl pte, 3
404 kpti_mk_tbl_ng pte, PTRS_PER_PTE
405 b .Lnext_pmd
406
407 .unreq cpu
408 .unreq temp_pte
409 .unreq num_cpus
410 .unreq pte_flags
411 .unreq temp_pgd_phys
412 .unreq cur_pgdp
413 .unreq end_pgdp
414 .unreq pgd
415 .unreq cur_pudp
416 .unreq end_pudp
417 .unreq pud
418 .unreq cur_pmdp
419 .unreq end_pmdp
420 .unreq pmd
421 .unreq cur_ptep
422 .unreq end_ptep
423 .unreq pte
424 .unreq valid
425 .unreq cur_p4dp
426 .unreq end_p4dp
427 .unreq p4d
428
429 /* Secondary CPUs end up here */
430__idmap_kpti_secondary:
431 /* Uninstall swapper before surgery begins */
432 __idmap_cpu_set_reserved_ttbr1 x16, x17
433 b scondary_cpu_wait
434
435 .unreq swapper_ttb
436 .unreq flag_ptr
437SYM_FUNC_END(idmap_kpti_install_ng_mappings)
438 .popsection
439#endif
440
441 .pushsection ".idmap.text", "a"
442SYM_TYPED_FUNC_START(wait_linear_map_split_to_ptes)
443 /* Must be same registers as in idmap_kpti_install_ng_mappings */
444 swapper_ttb .req x3
445 flag_ptr .req x4
446
447 mrs swapper_ttb, ttbr1_el1
448 adr_l flag_ptr, idmap_kpti_bbml2_flag
449 __idmap_cpu_set_reserved_ttbr1 x16, x17
450
451scondary_cpu_wait:
452 /* Increment the flag to let the boot CPU we're ready */
4531: ldxr w16, [flag_ptr]
454 add w16, w16, #1
455 stxr w17, w16, [flag_ptr]
456 cbnz w17, 1b
457
458 /* Wait for the boot CPU to finish messing around with swapper */
459 sevl
4601: wfe
461 ldxr w16, [flag_ptr]
462 cbnz w16, 1b
463
464 /* All done, act like nothing happened */
465 msr ttbr1_el1, swapper_ttb
466 isb
467 ret
468
469 .unreq swapper_ttb
470 .unreq flag_ptr
471SYM_FUNC_END(wait_linear_map_split_to_ptes)
472 .popsection
473
474/*
475 * __cpu_setup
476 *
477 * Initialise the processor for turning the MMU on.
478 *
479 * Output:
480 * Return in x0 the value of the SCTLR_EL1 register.
481 */
482 .pushsection ".idmap.text", "a"
483SYM_FUNC_START(__cpu_setup)
484 tlbi vmalle1 // Invalidate local TLB
485 dsb nsh
486
487 msr cpacr_el1, xzr // Reset cpacr_el1
488 mov x1, MDSCR_EL1_TDCC // Reset mdscr_el1 and disable
489 msr mdscr_el1, x1 // access to the DCC from EL0
490 reset_pmuserenr_el0 x1 // Disable PMU access from EL0
491 reset_amuserenr_el0 x1 // Disable AMU access from EL0
492
493 /*
494 * Default values for VMSA control registers. These will be adjusted
495 * below depending on detected CPU features.
496 */
497 mair .req x17
498 tcr .req x16
499 tcr2 .req x15
500 mov_q mair, MAIR_EL1_SET
501 mov_q tcr, TCR_T0SZ(IDMAP_VA_BITS) | TCR_T1SZ(VA_BITS_MIN) | TCR_CACHE_FLAGS | \
502 TCR_SHARED | TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_EL1_AS | \
503 TCR_EL1_TBI0 | TCR_EL1_A1 | TCR_KASAN_SW_FLAGS | TCR_MTE_FLAGS
504 mov tcr2, xzr
505
506 tcr_clear_errata_bits tcr, x9, x5
507
508#ifdef CONFIG_ARM64_VA_BITS_52
509 mov x9, #64 - VA_BITS
510alternative_if ARM64_HAS_VA52
511 tcr_set_t1sz tcr, x9
512#ifdef CONFIG_ARM64_LPA2
513 orr tcr, tcr, #TCR_EL1_DS
514#endif
515alternative_else_nop_endif
516#endif
517
518 /*
519 * Set the IPS bits in TCR_EL1.
520 */
521 tcr_compute_pa_size tcr, #TCR_EL1_IPS_SHIFT, x5, x6
522#ifdef CONFIG_ARM64_HW_AFDBM
523 /*
524 * Enable hardware update of the Access Flags bit.
525 * Hardware dirty bit management is enabled later,
526 * via capabilities.
527 */
528 mrs x9, ID_AA64MMFR1_EL1
529 ubfx x9, x9, ID_AA64MMFR1_EL1_HAFDBS_SHIFT, #4
530 cbz x9, 1f
531 orr tcr, tcr, #TCR_EL1_HA // hardware Access flag update
532#ifdef CONFIG_ARM64_HAFT
533 cmp x9, ID_AA64MMFR1_EL1_HAFDBS_HAFT
534 b.lt 1f
535 orr tcr2, tcr2, TCR2_EL1_HAFT
536#endif /* CONFIG_ARM64_HAFT */
5371:
538#endif /* CONFIG_ARM64_HW_AFDBM */
539 msr mair_el1, mair
540 msr tcr_el1, tcr
541
542 mrs_s x1, SYS_ID_AA64MMFR3_EL1
543 ubfx x1, x1, #ID_AA64MMFR3_EL1_S1PIE_SHIFT, #4
544 cbz x1, .Lskip_indirection
545
546 mov_q x0, PIE_E0_ASM
547 msr REG_PIRE0_EL1, x0
548 mov_q x0, PIE_E1_ASM
549 msr REG_PIR_EL1, x0
550
551 orr tcr2, tcr2, TCR2_EL1_PIE
552
553.Lskip_indirection:
554
555 mrs_s x1, SYS_ID_AA64MMFR3_EL1
556 ubfx x1, x1, #ID_AA64MMFR3_EL1_TCRX_SHIFT, #4
557 cbz x1, 1f
558 msr REG_TCR2_EL1, tcr2
5591:
560
561 /*
562 * Prepare SCTLR
563 */
564 mov_q x0, INIT_SCTLR_EL1_MMU_ON
565 ret // return to head.S
566
567 .unreq mair
568 .unreq tcr
569 .unreq tcr2
570SYM_FUNC_END(__cpu_setup)
571

source code of linux/arch/arm64/mm/proc.S