1/* SPDX-License-Identifier: GPL-2.0-or-later */
2#include <linux/objtool.h>
3#include <asm/asm-offsets.h>
4#include <asm/code-patching-asm.h>
5#include <asm/mmu.h>
6#include <asm/ppc_asm.h>
7#include <asm/kup.h>
8#include <asm/thread_info.h>
9
10.section ".text","ax",@progbits
11
12#ifdef CONFIG_PPC_BOOK3S_64
13/*
14 * Cancel all explict user streams as they will have no use after context
15 * switch and will stop the HW from creating streams itself
16 */
17#define STOP_STREAMS \
18 DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r6)
19
20#define FLUSH_COUNT_CACHE \
211: nop; \
22 patch_site 1b, patch__call_flush_branch_caches1; \
231: nop; \
24 patch_site 1b, patch__call_flush_branch_caches2; \
251: nop; \
26 patch_site 1b, patch__call_flush_branch_caches3
27
28.macro nops number
29 .rept \number
30 nop
31 .endr
32.endm
33
34.balign 32
35.global flush_branch_caches
36flush_branch_caches:
37 /* Save LR into r9 */
38 mflr r9
39
40 // Flush the link stack
41 .rept 64
42 bl .+4
43 .endr
44 b 1f
45 nops 6
46
47 .balign 32
48 /* Restore LR */
491: mtlr r9
50
51 // If we're just flushing the link stack, return here
523: nop
53 patch_site 3b patch__flush_link_stack_return
54
55 li r9,0x7fff
56 mtctr r9
57
58 PPC_BCCTR_FLUSH
59
602: nop
61 patch_site 2b patch__flush_count_cache_return
62
63 nops 3
64
65 .rept 278
66 .balign 32
67 PPC_BCCTR_FLUSH
68 nops 7
69 .endr
70
71 blr
72
73#ifdef CONFIG_PPC_64S_HASH_MMU
74.balign 32
75/*
76 * New stack pointer in r8, old stack pointer in r1, must not clobber r3
77 */
78pin_stack_slb:
79BEGIN_FTR_SECTION
80 clrrdi r6,r8,28 /* get its ESID */
81 clrrdi r9,r1,28 /* get current sp ESID */
82FTR_SECTION_ELSE
83 clrrdi r6,r8,40 /* get its 1T ESID */
84 clrrdi r9,r1,40 /* get current sp 1T ESID */
85ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_1T_SEGMENT)
86 clrldi. r0,r6,2 /* is new ESID c00000000? */
87 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
88 cror eq,4*cr1+eq,eq
89 beq 2f /* if yes, don't slbie it */
90
91 /* Bolt in the new stack SLB entry */
92 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
93 oris r0,r6,(SLB_ESID_V)@h
94 ori r0,r0,(SLB_NUM_BOLTED-1)@l
95BEGIN_FTR_SECTION
96 li r9,MMU_SEGSIZE_1T /* insert B field */
97 oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
98 rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0
99END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
100
101 /* Update the last bolted SLB. No write barriers are needed
102 * here, provided we only update the current CPU's SLB shadow
103 * buffer.
104 */
105 ld r9,PACA_SLBSHADOWPTR(r13)
106 li r12,0
107 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
108 li r12,SLBSHADOW_STACKVSID
109 STDX_BE r7,r12,r9 /* Save VSID */
110 li r12,SLBSHADOW_STACKESID
111 STDX_BE r0,r12,r9 /* Save ESID */
112
113 /* No need to check for MMU_FTR_NO_SLBIE_B here, since when
114 * we have 1TB segments, the only CPUs known to have the errata
115 * only support less than 1TB of system memory and we'll never
116 * actually hit this code path.
117 */
118
119 isync
120 slbie r6
121BEGIN_FTR_SECTION
122 slbie r6 /* Workaround POWER5 < DD2.1 issue */
123END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
124 slbmte r7,r0
125 isync
1262: blr
127 .size pin_stack_slb,.-pin_stack_slb
128#endif /* CONFIG_PPC_64S_HASH_MMU */
129
130#else
131#define STOP_STREAMS
132#define FLUSH_COUNT_CACHE
133#endif /* CONFIG_PPC_BOOK3S_64 */
134
135/*
136 * do_switch_32/64 have the same calling convention as _switch, i.e., r3,r4
137 * are prev and next thread_struct *, and returns prev task_struct * in r3.
138
139 * This switches the stack, current, and does other task switch housekeeping.
140 */
141.macro do_switch_32
142 tophys(r0,r4)
143 mtspr SPRN_SPRG_THREAD,r0 /* Update current THREAD phys addr */
144 lwz r1,KSP(r4) /* Load new stack pointer */
145
146 /* save the old current 'last' for return value */
147 mr r3,r2
148 addi r2,r4,-THREAD /* Update current */
149.endm
150
151.macro do_switch_64
152 ld r8,KSP(r4) /* Load new stack pointer */
153
154 kuap_check_amr r9, r10
155
156 FLUSH_COUNT_CACHE /* Clobbers r9, ctr */
157
158 STOP_STREAMS /* Clobbers r6 */
159
160 addi r3,r3,-THREAD /* old thread -> task_struct for return value */
161 addi r6,r4,-THREAD /* new thread -> task_struct */
162 std r6,PACACURRENT(r13) /* Set new task_struct to 'current' */
163#if defined(CONFIG_STACKPROTECTOR)
164 ld r6, TASK_CANARY(r6)
165 std r6, PACA_CANARY(r13)
166#endif
167 /* Set new PACAKSAVE */
168 clrrdi r7,r8,THREAD_SHIFT /* base of new stack */
169 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
170 std r7,PACAKSAVE(r13)
171
172#ifdef CONFIG_PPC_64S_HASH_MMU
173BEGIN_MMU_FTR_SECTION
174 bl pin_stack_slb
175END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
176#endif
177 /*
178 * PMU interrupts in radix may come in here. They will use r1, not
179 * PACAKSAVE, so this stack switch will not cause a problem. They
180 * will store to the process stack, which may then be migrated to
181 * another CPU. However the rq lock release on this CPU paired with
182 * the rq lock acquire on the new CPU before the stack becomes
183 * active on the new CPU, will order those stores.
184 */
185 mr r1,r8 /* start using new stack pointer */
186.endm
187
188/*
189 * This routine switches between two different tasks. The process
190 * state of one is saved on its kernel stack. Then the state
191 * of the other is restored from its kernel stack. The memory
192 * management hardware is updated to the second process's state.
193 * Finally, we can return to the second process.
194 * On entry, r3 points to the THREAD for the current task, r4
195 * points to the THREAD for the new task.
196 *
197 * This routine is always called with interrupts disabled.
198 *
199 * Note: there are two ways to get to the "going out" portion
200 * of this code; either by coming in via the entry (_switch)
201 * or via "fork" which must set up an environment equivalent
202 * to the "_switch" path. If you change this , you'll have to
203 * change the fork code also.
204 *
205 * The code which creates the new task context is in 'copy_thread'
206 * in arch/ppc/kernel/process.c
207 *
208 * Note: this uses SWITCH_FRAME_SIZE rather than USER_INT_FRAME_SIZE
209 * because we don't need to leave the redzone ABI gap at the top of
210 * the kernel stack.
211 */
212_GLOBAL(_switch)
213 PPC_CREATE_STACK_FRAME(SWITCH_FRAME_SIZE)
214 PPC_STL r1,KSP(r3) /* Set old stack pointer */
215 SAVE_NVGPRS(r1) /* volatiles are caller-saved -- Cort */
216 PPC_STL r0,_NIP(r1) /* Return to switch caller */
217 mfcr r0
218 stw r0,_CCR(r1)
219
220 /*
221 * On SMP kernels, care must be taken because a task may be
222 * scheduled off CPUx and on to CPUy. Memory ordering must be
223 * considered.
224 *
225 * Cacheable stores on CPUx will be visible when the task is
226 * scheduled on CPUy by virtue of the core scheduler barriers
227 * (see "Notes on Program-Order guarantees on SMP systems." in
228 * kernel/sched/core.c).
229 *
230 * Uncacheable stores in the case of involuntary preemption must
231 * be taken care of. The smp_mb__after_spinlock() in __schedule()
232 * is implemented as hwsync on powerpc, which orders MMIO too. So
233 * long as there is an hwsync in the context switch path, it will
234 * be executed on the source CPU after the task has performed
235 * all MMIO ops on that CPU, and on the destination CPU before the
236 * task performs any MMIO ops there.
237 */
238
239 /*
240 * The kernel context switch path must contain a spin_lock,
241 * which contains larx/stcx, which will clear any reservation
242 * of the task being switched.
243 */
244
245#ifdef CONFIG_PPC32
246 do_switch_32
247#else
248 do_switch_64
249#endif
250
251 lwz r0,_CCR(r1)
252 mtcrf 0xFF,r0
253 REST_NVGPRS(r1) /* volatiles are destroyed -- Cort */
254 PPC_LL r0,_NIP(r1) /* Return to _switch caller in new task */
255 mtlr r0
256 addi r1,r1,SWITCH_FRAME_SIZE
257 blr
258

source code of linux/arch/powerpc/kernel/switch.S