| 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | /* |
| 3 | * S390 low-level entry points. |
| 4 | * |
| 5 | * Copyright IBM Corp. 1999, 2012 |
| 6 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), |
| 7 | * Hartmut Penner (hp@de.ibm.com), |
| 8 | * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), |
| 9 | */ |
| 10 | |
| 11 | #include <linux/export.h> |
| 12 | #include <linux/init.h> |
| 13 | #include <linux/linkage.h> |
| 14 | #include <asm/asm-extable.h> |
| 15 | #include <asm/alternative.h> |
| 16 | #include <asm/processor.h> |
| 17 | #include <asm/cache.h> |
| 18 | #include <asm/dwarf.h> |
| 19 | #include <asm/errno.h> |
| 20 | #include <asm/ptrace.h> |
| 21 | #include <asm/thread_info.h> |
| 22 | #include <asm/asm-offsets.h> |
| 23 | #include <asm/unistd.h> |
| 24 | #include <asm/page.h> |
| 25 | #include <asm/sigp.h> |
| 26 | #include <asm/irq.h> |
| 27 | #include <asm/fpu-insn.h> |
| 28 | #include <asm/setup.h> |
| 29 | #include <asm/nmi.h> |
| 30 | #include <asm/nospec-insn.h> |
| 31 | #include <asm/lowcore.h> |
| 32 | #include <asm/machine.h> |
| 33 | |
| 34 | _LPP_OFFSET = __LC_LPP |
| 35 | |
| 36 | .macro STBEAR address |
| 37 | ALTERNATIVE "nop" , ".insn s,0xb2010000,\address" , ALT_FACILITY(193) |
| 38 | .endm |
| 39 | |
| 40 | .macro LBEAR address |
| 41 | ALTERNATIVE "nop" , ".insn s,0xb2000000,\address" , ALT_FACILITY(193) |
| 42 | .endm |
| 43 | |
| 44 | .macro LPSWEY address, lpswe |
| 45 | ALTERNATIVE_2 "b \lpswe;nopr" , \ |
| 46 | ".insn siy,0xeb0000000071,\address,0" , ALT_FACILITY(193), \ |
| 47 | __stringify(.insn siy,0xeb0000000071,LOWCORE_ALT_ADDRESS+\address,0), \ |
| 48 | ALT_FEATURE(MFEATURE_LOWCORE) |
| 49 | .endm |
| 50 | |
| 51 | .macro MBEAR reg, lowcore |
| 52 | ALTERNATIVE "brcl 0,0" , __stringify(mvc __PT_LAST_BREAK(8,\reg),__LC_LAST_BREAK(\lowcore)),\ |
| 53 | ALT_FACILITY(193) |
| 54 | .endm |
| 55 | |
| 56 | .macro CHECK_VMAP_STACK savearea, lowcore, oklabel |
| 57 | lgr %r14,%r15 |
| 58 | nill %r14,0x10000 - THREAD_SIZE |
| 59 | oill %r14,STACK_INIT_OFFSET |
| 60 | clg %r14,__LC_KERNEL_STACK(\lowcore) |
| 61 | je \oklabel |
| 62 | clg %r14,__LC_ASYNC_STACK(\lowcore) |
| 63 | je \oklabel |
| 64 | clg %r14,__LC_MCCK_STACK(\lowcore) |
| 65 | je \oklabel |
| 66 | clg %r14,__LC_NODAT_STACK(\lowcore) |
| 67 | je \oklabel |
| 68 | clg %r14,__LC_RESTART_STACK(\lowcore) |
| 69 | je \oklabel |
| 70 | la %r14,\savearea(\lowcore) |
| 71 | j stack_invalid |
| 72 | .endm |
| 73 | |
| 74 | /* |
| 75 | * The TSTMSK macro generates a test-under-mask instruction by |
| 76 | * calculating the memory offset for the specified mask value. |
| 77 | * Mask value can be any constant. The macro shifts the mask |
| 78 | * value to calculate the memory offset for the test-under-mask |
| 79 | * instruction. |
| 80 | */ |
| 81 | .macro TSTMSK addr, mask, size=8, bytepos=0 |
| 82 | .if (\bytepos < \size) && (\mask >> 8) |
| 83 | .if (\mask & 0xff) |
| 84 | .error "Mask exceeds byte boundary" |
| 85 | .endif |
| 86 | TSTMSK \addr, "(\mask >> 8)" , \size, "(\bytepos + 1)" |
| 87 | .exitm |
| 88 | .endif |
| 89 | .ifeq \mask |
| 90 | .error "Mask must not be zero" |
| 91 | .endif |
| 92 | off = \size - \bytepos - 1 |
| 93 | tm off+\addr, \mask |
| 94 | .endm |
| 95 | |
| 96 | .macro BPOFF |
| 97 | ALTERNATIVE "nop" , ".insn rrf,0xb2e80000,0,0,12,0" , ALT_SPEC(82) |
| 98 | .endm |
| 99 | |
| 100 | .macro BPON |
| 101 | ALTERNATIVE "nop" , ".insn rrf,0xb2e80000,0,0,13,0" , ALT_SPEC(82) |
| 102 | .endm |
| 103 | |
| 104 | .macro BPENTER tif_ptr,tif_mask |
| 105 | ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .insn rrf,0xb2e80000,0,0,13,0" , \ |
| 106 | "j .+12; nop; nop" , ALT_SPEC(82) |
| 107 | .endm |
| 108 | |
| 109 | .macro BPEXIT tif_ptr,tif_mask |
| 110 | TSTMSK \tif_ptr,\tif_mask |
| 111 | ALTERNATIVE "jz .+8; .insn rrf,0xb2e80000,0,0,12,0" , \ |
| 112 | "jnz .+8; .insn rrf,0xb2e80000,0,0,13,0" , ALT_SPEC(82) |
| 113 | .endm |
| 114 | |
| 115 | #if IS_ENABLED(CONFIG_KVM) |
| 116 | .macro SIEEXIT sie_control,lowcore |
| 117 | lg %r9,\sie_control # get control block pointer |
| 118 | ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE |
| 119 | lctlg %c1,%c1,__LC_USER_ASCE(\lowcore) # load primary asce |
| 120 | lg %r9,__LC_CURRENT(\lowcore) |
| 121 | mvi __TI_sie(%r9),0 |
| 122 | larl %r9,sie_exit # skip forward to sie_exit |
| 123 | .endm |
| 124 | #endif |
| 125 | |
| 126 | .macro STACKLEAK_ERASE |
| 127 | #ifdef CONFIG_KSTACK_ERASE |
| 128 | brasl %r14,stackleak_erase_on_task_stack |
| 129 | #endif |
| 130 | .endm |
| 131 | |
| 132 | GEN_BR_THUNK %r14 |
| 133 | |
| 134 | .section .kprobes.text, "ax" |
| 135 | .Ldummy: |
| 136 | /* |
| 137 | * The following nop exists only in order to avoid that the next |
| 138 | * symbol starts at the beginning of the kprobes text section. |
| 139 | * In that case there would be several symbols at the same address. |
| 140 | * E.g. objdump would take an arbitrary symbol when disassembling |
| 141 | * the code. |
| 142 | * With the added nop in between this cannot happen. |
| 143 | */ |
| 144 | nop 0 |
| 145 | |
| 146 | /* |
| 147 | * Scheduler resume function, called by __switch_to |
| 148 | * gpr2 = (task_struct *)prev |
| 149 | * gpr3 = (task_struct *)next |
| 150 | * Returns: |
| 151 | * gpr2 = prev |
| 152 | */ |
| 153 | SYM_FUNC_START(__switch_to_asm) |
| 154 | stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task |
| 155 | lghi %r4,__TASK_stack |
| 156 | lghi %r1,__TASK_thread |
| 157 | llill %r5,STACK_INIT_OFFSET |
| 158 | stg %r15,__THREAD_ksp(%r1,%r2) # store kernel stack of prev |
| 159 | lg %r15,0(%r4,%r3) # start of kernel stack of next |
| 160 | agr %r15,%r5 # end of kernel stack of next |
| 161 | GET_LC %r13 |
| 162 | stg %r3,__LC_CURRENT(%r13) # store task struct of next |
| 163 | stg %r15,__LC_KERNEL_STACK(%r13) # store end of kernel stack |
| 164 | lg %r15,__THREAD_ksp(%r1,%r3) # load kernel stack of next |
| 165 | lay %r4,__TASK_pid(%r3) |
| 166 | mvc __LC_CURRENT_PID(4,%r13),0(%r4) # store pid of next |
| 167 | ALTERNATIVE "nop" , "lpp _LPP_OFFSET(%r13)" , ALT_FACILITY(40) |
| 168 | #ifdef CONFIG_STACKPROTECTOR |
| 169 | lg %r3,__TASK_stack_canary(%r3) |
| 170 | stg %r3,__LC_STACK_CANARY(%r13) |
| 171 | #endif |
| 172 | lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task |
| 173 | BR_EX %r14 |
| 174 | SYM_FUNC_END(__switch_to_asm) |
| 175 | |
| 176 | #if IS_ENABLED(CONFIG_KVM) |
| 177 | /* |
| 178 | * __sie64a calling convention: |
| 179 | * %r2 pointer to sie control block phys |
| 180 | * %r3 pointer to sie control block virt |
| 181 | * %r4 guest register save area |
| 182 | * %r5 guest asce |
| 183 | */ |
| 184 | SYM_FUNC_START(__sie64a) |
| 185 | stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers |
| 186 | GET_LC %r13 |
| 187 | lg %r14,__LC_CURRENT(%r13) |
| 188 | stg %r2,__SF_SIE_CONTROL_PHYS(%r15) # save sie block physical.. |
| 189 | stg %r3,__SF_SIE_CONTROL(%r15) # ...and virtual addresses |
| 190 | stg %r4,__SF_SIE_SAVEAREA(%r15) # save guest register save area |
| 191 | stg %r5,__SF_SIE_GUEST_ASCE(%r15) # save guest asce |
| 192 | xc __SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0 |
| 193 | mvc __SF_SIE_FLAGS(8,%r15),__TI_flags(%r14) # copy thread flags |
| 194 | lmg %r0,%r13,0(%r4) # load guest gprs 0-13 |
| 195 | mvi __TI_sie(%r14),1 |
| 196 | stosm __SF_SIE_IRQ(%r15),0x03 # enable interrupts |
| 197 | lctlg %c1,%c1,__SF_SIE_GUEST_ASCE(%r15) # load primary asce |
| 198 | lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer |
| 199 | oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now |
| 200 | tm __SIE_PROG20+3(%r14),3 # last exit... |
| 201 | jnz .Lsie_skip |
| 202 | lg %r14,__SF_SIE_CONTROL_PHYS(%r15) # get sie block phys addr |
| 203 | BPEXIT __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST |
| 204 | .Lsie_entry: |
| 205 | sie 0(%r14) |
| 206 | # Let the next instruction be NOP to avoid triggering a machine check |
| 207 | # and handling it in a guest as result of the instruction execution. |
| 208 | nopr 7 |
| 209 | .Lsie_leave: |
| 210 | BPOFF |
| 211 | BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST |
| 212 | .Lsie_skip: |
| 213 | lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer |
| 214 | ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE |
| 215 | GET_LC %r14 |
| 216 | lctlg %c1,%c1,__LC_USER_ASCE(%r14) # load primary asce |
| 217 | lg %r14,__LC_CURRENT(%r14) |
| 218 | mvi __TI_sie(%r14),0 |
| 219 | SYM_INNER_LABEL(sie_exit, SYM_L_GLOBAL) |
| 220 | stnsm __SF_SIE_IRQ(%r15),0xfc # disable interrupts |
| 221 | lg %r14,__SF_SIE_SAVEAREA(%r15) # load guest register save area |
| 222 | stmg %r0,%r13,0(%r14) # save guest gprs 0-13 |
| 223 | xgr %r0,%r0 # clear guest registers to |
| 224 | xgr %r1,%r1 # prevent speculative use |
| 225 | xgr %r3,%r3 |
| 226 | xgr %r4,%r4 |
| 227 | xgr %r5,%r5 |
| 228 | lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers |
| 229 | lg %r2,__SF_SIE_REASON(%r15) # return exit reason code |
| 230 | BR_EX %r14 |
| 231 | SYM_FUNC_END(__sie64a) |
| 232 | EXPORT_SYMBOL(__sie64a) |
| 233 | EXPORT_SYMBOL(sie_exit) |
| 234 | #endif |
| 235 | |
| 236 | /* |
| 237 | * SVC interrupt handler routine. System calls are synchronous events and |
| 238 | * are entered with interrupts disabled. |
| 239 | */ |
| 240 | |
| 241 | SYM_CODE_START(system_call) |
| 242 | STMG_LC %r8,%r15,__LC_SAVE_AREA |
| 243 | GET_LC %r13 |
| 244 | stpt __LC_SYS_ENTER_TIMER(%r13) |
| 245 | BPOFF |
| 246 | lghi %r14,0 |
| 247 | .Lsysc_per: |
| 248 | STBEAR __LC_LAST_BREAK(%r13) |
| 249 | lg %r15,__LC_KERNEL_STACK(%r13) |
| 250 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
| 251 | stmg %r0,%r7,STACK_FRAME_OVERHEAD+__PT_R0(%r15) |
| 252 | # clear user controlled register to prevent speculative use |
| 253 | xgr %r0,%r0 |
| 254 | xgr %r1,%r1 |
| 255 | xgr %r4,%r4 |
| 256 | xgr %r5,%r5 |
| 257 | xgr %r6,%r6 |
| 258 | xgr %r7,%r7 |
| 259 | xgr %r8,%r8 |
| 260 | xgr %r9,%r9 |
| 261 | xgr %r10,%r10 |
| 262 | xgr %r11,%r11 |
| 263 | la %r2,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs |
| 264 | mvc __PT_R8(64,%r2),__LC_SAVE_AREA(%r13) |
| 265 | MBEAR %r2,%r13 |
| 266 | lgr %r3,%r14 |
| 267 | brasl %r14,__do_syscall |
| 268 | STACKLEAK_ERASE |
| 269 | mvc __LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) |
| 270 | BPON |
| 271 | LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15) |
| 272 | stpt __LC_EXIT_TIMER(%r13) |
| 273 | lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) |
| 274 | LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE |
| 275 | SYM_CODE_END(system_call) |
| 276 | |
| 277 | # |
| 278 | # a new process exits the kernel with ret_from_fork |
| 279 | # |
| 280 | SYM_CODE_START(ret_from_fork) |
| 281 | lgr %r3,%r11 |
| 282 | brasl %r14,__ret_from_fork |
| 283 | STACKLEAK_ERASE |
| 284 | GET_LC %r13 |
| 285 | mvc __LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) |
| 286 | BPON |
| 287 | LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15) |
| 288 | stpt __LC_EXIT_TIMER(%r13) |
| 289 | lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) |
| 290 | LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE |
| 291 | SYM_CODE_END(ret_from_fork) |
| 292 | |
| 293 | /* |
| 294 | * Program check handler routine |
| 295 | */ |
| 296 | |
| 297 | SYM_CODE_START(pgm_check_handler) |
| 298 | STMG_LC %r8,%r15,__LC_SAVE_AREA |
| 299 | GET_LC %r13 |
| 300 | stpt __LC_SYS_ENTER_TIMER(%r13) |
| 301 | BPOFF |
| 302 | lmg %r8,%r9,__LC_PGM_OLD_PSW(%r13) |
| 303 | xgr %r10,%r10 |
| 304 | tmhh %r8,0x0001 # coming from user space? |
| 305 | jo 3f # -> fault in user space |
| 306 | #if IS_ENABLED(CONFIG_KVM) |
| 307 | lg %r11,__LC_CURRENT(%r13) |
| 308 | tm __TI_sie(%r11),0xff |
| 309 | jz 1f |
| 310 | BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST |
| 311 | SIEEXIT __SF_SIE_CONTROL(%r15),%r13 |
| 312 | lghi %r10,_PIF_GUEST_FAULT |
| 313 | #endif |
| 314 | 1: tmhh %r8,0x4000 # PER bit set in old PSW ? |
| 315 | jnz 2f # -> enabled, can't be a double fault |
| 316 | tm __LC_PGM_ILC+3(%r13),0x80 # check for per exception |
| 317 | jnz .Lpgm_svcper # -> single stepped svc |
| 318 | 2: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) |
| 319 | # CHECK_VMAP_STACK branches to stack_invalid or 4f |
| 320 | CHECK_VMAP_STACK __LC_SAVE_AREA,%r13,4f |
| 321 | 3: lg %r15,__LC_KERNEL_STACK(%r13) |
| 322 | 4: la %r11,STACK_FRAME_OVERHEAD(%r15) |
| 323 | stg %r10,__PT_FLAGS(%r11) |
| 324 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
| 325 | stmg %r0,%r7,__PT_R0(%r11) |
| 326 | mvc __PT_R8(64,%r11),__LC_SAVE_AREA(%r13) |
| 327 | mvc __PT_LAST_BREAK(8,%r11),__LC_PGM_LAST_BREAK(%r13) |
| 328 | stmg %r8,%r9,__PT_PSW(%r11) |
| 329 | # clear user controlled registers to prevent speculative use |
| 330 | xgr %r0,%r0 |
| 331 | xgr %r1,%r1 |
| 332 | xgr %r3,%r3 |
| 333 | xgr %r4,%r4 |
| 334 | xgr %r5,%r5 |
| 335 | xgr %r6,%r6 |
| 336 | xgr %r7,%r7 |
| 337 | xgr %r12,%r12 |
| 338 | lgr %r2,%r11 |
| 339 | brasl %r14,__do_pgm_check |
| 340 | tmhh %r8,0x0001 # returning to user space? |
| 341 | jno .Lpgm_exit_kernel |
| 342 | STACKLEAK_ERASE |
| 343 | BPON |
| 344 | stpt __LC_EXIT_TIMER(%r13) |
| 345 | .Lpgm_exit_kernel: |
| 346 | mvc __LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) |
| 347 | LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15) |
| 348 | lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) |
| 349 | LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE |
| 350 | |
| 351 | # |
| 352 | # single stepped system call |
| 353 | # |
| 354 | .Lpgm_svcper: |
| 355 | mvc __LC_RETURN_PSW(8,%r13),__LC_SVC_NEW_PSW(%r13) |
| 356 | larl %r14,.Lsysc_per |
| 357 | stg %r14,__LC_RETURN_PSW+8(%r13) |
| 358 | lghi %r14,1 |
| 359 | LBEAR __LC_PGM_LAST_BREAK(%r13) |
| 360 | LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE # branch to .Lsysc_per |
| 361 | SYM_CODE_END(pgm_check_handler) |
| 362 | |
| 363 | /* |
| 364 | * Interrupt handler macro used for external and IO interrupts. |
| 365 | */ |
| 366 | .macro INT_HANDLER name,lc_old_psw,handler |
| 367 | SYM_CODE_START(\name) |
| 368 | STMG_LC %r8,%r15,__LC_SAVE_AREA |
| 369 | GET_LC %r13 |
| 370 | stckf __LC_INT_CLOCK(%r13) |
| 371 | stpt __LC_SYS_ENTER_TIMER(%r13) |
| 372 | STBEAR __LC_LAST_BREAK(%r13) |
| 373 | BPOFF |
| 374 | lmg %r8,%r9,\lc_old_psw(%r13) |
| 375 | tmhh %r8,0x0001 # interrupting from user ? |
| 376 | jnz 1f |
| 377 | #if IS_ENABLED(CONFIG_KVM) |
| 378 | lg %r10,__LC_CURRENT(%r13) |
| 379 | tm __TI_sie(%r10),0xff |
| 380 | jz 0f |
| 381 | BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST |
| 382 | SIEEXIT __SF_SIE_CONTROL(%r15),%r13 |
| 383 | #endif |
| 384 | 0: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) |
| 385 | j 2f |
| 386 | 1: lg %r15,__LC_KERNEL_STACK(%r13) |
| 387 | 2: xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
| 388 | la %r11,STACK_FRAME_OVERHEAD(%r15) |
| 389 | stmg %r0,%r7,__PT_R0(%r11) |
| 390 | # clear user controlled registers to prevent speculative use |
| 391 | xgr %r0,%r0 |
| 392 | xgr %r1,%r1 |
| 393 | xgr %r3,%r3 |
| 394 | xgr %r4,%r4 |
| 395 | xgr %r5,%r5 |
| 396 | xgr %r6,%r6 |
| 397 | xgr %r7,%r7 |
| 398 | xgr %r10,%r10 |
| 399 | xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) |
| 400 | mvc __PT_R8(64,%r11),__LC_SAVE_AREA(%r13) |
| 401 | MBEAR %r11,%r13 |
| 402 | stmg %r8,%r9,__PT_PSW(%r11) |
| 403 | lgr %r2,%r11 # pass pointer to pt_regs |
| 404 | brasl %r14,\handler |
| 405 | mvc __LC_RETURN_PSW(16,%r13),__PT_PSW(%r11) |
| 406 | tmhh %r8,0x0001 # returning to user ? |
| 407 | jno 2f |
| 408 | STACKLEAK_ERASE |
| 409 | BPON |
| 410 | stpt __LC_EXIT_TIMER(%r13) |
| 411 | 2: LBEAR __PT_LAST_BREAK(%r11) |
| 412 | lmg %r0,%r15,__PT_R0(%r11) |
| 413 | LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE |
| 414 | SYM_CODE_END(\name) |
| 415 | .endm |
| 416 | |
| 417 | .section .irqentry.text, "ax" |
| 418 | |
| 419 | INT_HANDLER ext_int_handler,__LC_EXT_OLD_PSW,do_ext_irq |
| 420 | INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq |
| 421 | |
| 422 | .section .kprobes.text, "ax" |
| 423 | |
| 424 | /* |
| 425 | * Machine check handler routines |
| 426 | */ |
| 427 | SYM_CODE_START(mcck_int_handler) |
| 428 | BPOFF |
| 429 | GET_LC %r13 |
| 430 | lmg %r8,%r9,__LC_MCK_OLD_PSW(%r13) |
| 431 | TSTMSK __LC_MCCK_CODE(%r13),MCCK_CODE_SYSTEM_DAMAGE |
| 432 | jo .Lmcck_panic # yes -> rest of mcck code invalid |
| 433 | TSTMSK __LC_MCCK_CODE(%r13),MCCK_CODE_CR_VALID |
| 434 | jno .Lmcck_panic # control registers invalid -> panic |
| 435 | ptlb |
| 436 | lay %r14,__LC_CPU_TIMER_SAVE_AREA(%r13) |
| 437 | mvc __LC_MCCK_ENTER_TIMER(8,%r13),0(%r14) |
| 438 | TSTMSK __LC_MCCK_CODE(%r13),MCCK_CODE_CPU_TIMER_VALID |
| 439 | jo 3f |
| 440 | la %r14,__LC_SYS_ENTER_TIMER(%r13) |
| 441 | clc 0(8,%r14),__LC_EXIT_TIMER(%r13) |
| 442 | jl 1f |
| 443 | la %r14,__LC_EXIT_TIMER(%r13) |
| 444 | 1: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER(%r13) |
| 445 | jl 2f |
| 446 | la %r14,__LC_LAST_UPDATE_TIMER(%r13) |
| 447 | 2: spt 0(%r14) |
| 448 | mvc __LC_MCCK_ENTER_TIMER(8,%r13),0(%r14) |
| 449 | 3: TSTMSK __LC_MCCK_CODE(%r13),MCCK_CODE_PSW_MWP_VALID |
| 450 | jno .Lmcck_panic |
| 451 | tmhh %r8,0x0001 # interrupting from user ? |
| 452 | jnz .Lmcck_user |
| 453 | TSTMSK __LC_MCCK_CODE(%r13),MCCK_CODE_PSW_IA_VALID |
| 454 | jno .Lmcck_panic |
| 455 | #if IS_ENABLED(CONFIG_KVM) |
| 456 | lg %r10,__LC_CURRENT(%r13) |
| 457 | tm __TI_sie(%r10),0xff |
| 458 | jz .Lmcck_user |
| 459 | # Need to compare the address instead of __TI_SIE flag. |
| 460 | # Otherwise there would be a race between setting the flag |
| 461 | # and entering SIE (or leaving and clearing the flag). This |
| 462 | # would cause machine checks targeted at the guest to be |
| 463 | # handled by the host. |
| 464 | larl %r14,.Lsie_entry |
| 465 | clgrjl %r9,%r14, 4f |
| 466 | larl %r14,.Lsie_leave |
| 467 | clgrjhe %r9,%r14, 4f |
| 468 | lg %r10,__LC_PCPU(%r13) |
| 469 | oi __PCPU_FLAGS+7(%r10), _CIF_MCCK_GUEST |
| 470 | 4: BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST |
| 471 | SIEEXIT __SF_SIE_CONTROL(%r15),%r13 |
| 472 | #endif |
| 473 | .Lmcck_user: |
| 474 | lg %r15,__LC_MCCK_STACK(%r13) |
| 475 | la %r11,STACK_FRAME_OVERHEAD(%r15) |
| 476 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
| 477 | lay %r14,__LC_GPREGS_SAVE_AREA(%r13) |
| 478 | mvc __PT_R0(128,%r11),0(%r14) |
| 479 | # clear user controlled registers to prevent speculative use |
| 480 | xgr %r0,%r0 |
| 481 | xgr %r1,%r1 |
| 482 | xgr %r3,%r3 |
| 483 | xgr %r4,%r4 |
| 484 | xgr %r5,%r5 |
| 485 | xgr %r6,%r6 |
| 486 | xgr %r7,%r7 |
| 487 | xgr %r10,%r10 |
| 488 | stmg %r8,%r9,__PT_PSW(%r11) |
| 489 | xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) |
| 490 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
| 491 | lgr %r2,%r11 # pass pointer to pt_regs |
| 492 | brasl %r14,s390_do_machine_check |
| 493 | lmg %r0,%r10,__PT_R0(%r11) |
| 494 | mvc __LC_RETURN_MCCK_PSW(16,%r13),__PT_PSW(%r11) # move return PSW |
| 495 | tm __LC_RETURN_MCCK_PSW+1(%r13),0x01 # returning to user ? |
| 496 | jno 0f |
| 497 | BPON |
| 498 | stpt __LC_EXIT_TIMER(%r13) |
| 499 | 0: ALTERNATIVE "brcl 0,0" , __stringify(lay %r12,__LC_LAST_BREAK_SAVE_AREA(%r13)),\ |
| 500 | ALT_FACILITY(193) |
| 501 | LBEAR 0(%r12) |
| 502 | lmg %r11,%r15,__PT_R11(%r11) |
| 503 | LPSWEY __LC_RETURN_MCCK_PSW,__LC_RETURN_MCCK_LPSWE |
| 504 | |
| 505 | .Lmcck_panic: |
| 506 | /* |
| 507 | * Iterate over all possible CPU addresses in the range 0..0xffff |
| 508 | * and stop each CPU using signal processor. Use compare and swap |
| 509 | * to allow just one CPU-stopper and prevent concurrent CPUs from |
| 510 | * stopping each other while leaving the others running. |
| 511 | */ |
| 512 | lhi %r5,0 |
| 513 | lhi %r6,1 |
| 514 | larl %r7,stop_lock |
| 515 | cs %r5,%r6,0(%r7) # single CPU-stopper only |
| 516 | jnz 4f |
| 517 | larl %r7,this_cpu |
| 518 | stap 0(%r7) # this CPU address |
| 519 | lh %r4,0(%r7) |
| 520 | nilh %r4,0 |
| 521 | lhi %r0,1 |
| 522 | sll %r0,16 # CPU counter |
| 523 | lhi %r3,0 # next CPU address |
| 524 | 0: cr %r3,%r4 |
| 525 | je 2f |
| 526 | 1: sigp %r1,%r3,SIGP_STOP # stop next CPU |
| 527 | brc SIGP_CC_BUSY,1b |
| 528 | 2: ahi %r3,1 |
| 529 | brct %r0,0b |
| 530 | 3: sigp %r1,%r4,SIGP_STOP # stop this CPU |
| 531 | brc SIGP_CC_BUSY,3b |
| 532 | 4: j 4b |
| 533 | SYM_CODE_END(mcck_int_handler) |
| 534 | |
| 535 | SYM_CODE_START(restart_int_handler) |
| 536 | ALTERNATIVE "nop" , "lpp _LPP_OFFSET" , ALT_FACILITY(40) |
| 537 | stg %r15,__LC_SAVE_AREA_RESTART |
| 538 | TSTMSK __LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4 |
| 539 | jz 0f |
| 540 | lctlg %c0,%c15,__LC_CREGS_SAVE_AREA |
| 541 | 0: larl %r15,daton_psw |
| 542 | lpswe 0(%r15) # turn dat on, keep irqs off |
| 543 | .Ldaton: |
| 544 | GET_LC %r15 |
| 545 | lg %r15,__LC_RESTART_STACK(%r15) |
| 546 | xc STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15) |
| 547 | stmg %r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15) |
| 548 | GET_LC %r13 |
| 549 | mvc STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART(%r13) |
| 550 | mvc STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW(%r13) |
| 551 | xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15) |
| 552 | lg %r1,__LC_RESTART_FN(%r13) # load fn, parm & source cpu |
| 553 | lg %r2,__LC_RESTART_DATA(%r13) |
| 554 | lgf %r3,__LC_RESTART_SOURCE(%r13) |
| 555 | ltgr %r3,%r3 # test source cpu address |
| 556 | jm 1f # negative -> skip source stop |
| 557 | 0: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu |
| 558 | brc 10,0b # wait for status stored |
| 559 | 1: basr %r14,%r1 # call function |
| 560 | stap __SF_EMPTY(%r15) # store cpu address |
| 561 | llgh %r3,__SF_EMPTY(%r15) |
| 562 | 2: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu |
| 563 | brc 2,2b |
| 564 | 3: j 3b |
| 565 | SYM_CODE_END(restart_int_handler) |
| 566 | |
| 567 | __INIT |
| 568 | SYM_CODE_START(early_pgm_check_handler) |
| 569 | STMG_LC %r8,%r15,__LC_SAVE_AREA |
| 570 | GET_LC %r13 |
| 571 | aghi %r15,-(STACK_FRAME_OVERHEAD+__PT_SIZE) |
| 572 | la %r11,STACK_FRAME_OVERHEAD(%r15) |
| 573 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
| 574 | stmg %r0,%r7,__PT_R0(%r11) |
| 575 | mvc __PT_PSW(16,%r11),__LC_PGM_OLD_PSW(%r13) |
| 576 | mvc __PT_R8(64,%r11),__LC_SAVE_AREA(%r13) |
| 577 | lgr %r2,%r11 |
| 578 | brasl %r14,__do_early_pgm_check |
| 579 | mvc __LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) |
| 580 | lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) |
| 581 | LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE |
| 582 | SYM_CODE_END(early_pgm_check_handler) |
| 583 | __FINIT |
| 584 | |
| 585 | .section .kprobes.text, "ax" |
| 586 | |
| 587 | /* |
| 588 | * The synchronous or the asynchronous stack pointer is invalid. We are dead. |
| 589 | * No need to properly save the registers, we are going to panic anyway. |
| 590 | * Setup a pt_regs so that show_trace can provide a good call trace. |
| 591 | */ |
| 592 | SYM_CODE_START(stack_invalid) |
| 593 | GET_LC %r15 |
| 594 | lg %r15,__LC_NODAT_STACK(%r15) # change to panic stack |
| 595 | la %r11,STACK_FRAME_OVERHEAD(%r15) |
| 596 | stmg %r0,%r7,__PT_R0(%r11) |
| 597 | stmg %r8,%r9,__PT_PSW(%r11) |
| 598 | mvc __PT_R8(64,%r11),0(%r14) |
| 599 | GET_LC %r2 |
| 600 | mvc __PT_ORIG_GPR2(8,%r11),__LC_PGM_LAST_BREAK(%r2) |
| 601 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
| 602 | lgr %r2,%r11 # pass pointer to pt_regs |
| 603 | jg kernel_stack_invalid |
| 604 | SYM_CODE_END(stack_invalid) |
| 605 | |
| 606 | .section .data, "aw" |
| 607 | .balign 4 |
| 608 | SYM_DATA_LOCAL(stop_lock, .long 0) |
| 609 | SYM_DATA_LOCAL(this_cpu, .short 0) |
| 610 | .balign 8 |
| 611 | SYM_DATA_START_LOCAL(daton_psw) |
| 612 | .quad PSW_KERNEL_BITS |
| 613 | .quad .Ldaton |
| 614 | SYM_DATA_END(daton_psw) |
| 615 | |