| 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* |
| 3 | * FP/SIMD context switching and fault handling |
| 4 | * |
| 5 | * Copyright (C) 2012 ARM Ltd. |
| 6 | * Author: Catalin Marinas <catalin.marinas@arm.com> |
| 7 | */ |
| 8 | |
| 9 | #include <linux/bitmap.h> |
| 10 | #include <linux/bitops.h> |
| 11 | #include <linux/bottom_half.h> |
| 12 | #include <linux/bug.h> |
| 13 | #include <linux/cache.h> |
| 14 | #include <linux/compat.h> |
| 15 | #include <linux/compiler.h> |
| 16 | #include <linux/cpu.h> |
| 17 | #include <linux/cpu_pm.h> |
| 18 | #include <linux/ctype.h> |
| 19 | #include <linux/kernel.h> |
| 20 | #include <linux/linkage.h> |
| 21 | #include <linux/irqflags.h> |
| 22 | #include <linux/init.h> |
| 23 | #include <linux/percpu.h> |
| 24 | #include <linux/prctl.h> |
| 25 | #include <linux/preempt.h> |
| 26 | #include <linux/ptrace.h> |
| 27 | #include <linux/sched/signal.h> |
| 28 | #include <linux/sched/task_stack.h> |
| 29 | #include <linux/signal.h> |
| 30 | #include <linux/slab.h> |
| 31 | #include <linux/stddef.h> |
| 32 | #include <linux/sysctl.h> |
| 33 | #include <linux/swab.h> |
| 34 | |
| 35 | #include <asm/esr.h> |
| 36 | #include <asm/exception.h> |
| 37 | #include <asm/fpsimd.h> |
| 38 | #include <asm/cpufeature.h> |
| 39 | #include <asm/cputype.h> |
| 40 | #include <asm/neon.h> |
| 41 | #include <asm/processor.h> |
| 42 | #include <asm/simd.h> |
| 43 | #include <asm/sigcontext.h> |
| 44 | #include <asm/sysreg.h> |
| 45 | #include <asm/traps.h> |
| 46 | #include <asm/virt.h> |
| 47 | |
| 48 | #define FPEXC_IOF (1 << 0) |
| 49 | #define FPEXC_DZF (1 << 1) |
| 50 | #define FPEXC_OFF (1 << 2) |
| 51 | #define FPEXC_UFF (1 << 3) |
| 52 | #define FPEXC_IXF (1 << 4) |
| 53 | #define FPEXC_IDF (1 << 7) |
| 54 | |
| 55 | /* |
| 56 | * (Note: in this discussion, statements about FPSIMD apply equally to SVE.) |
| 57 | * |
| 58 | * In order to reduce the number of times the FPSIMD state is needlessly saved |
| 59 | * and restored, we need to keep track of two things: |
| 60 | * (a) for each task, we need to remember which CPU was the last one to have |
| 61 | * the task's FPSIMD state loaded into its FPSIMD registers; |
| 62 | * (b) for each CPU, we need to remember which task's userland FPSIMD state has |
| 63 | * been loaded into its FPSIMD registers most recently, or whether it has |
| 64 | * been used to perform kernel mode NEON in the meantime. |
| 65 | * |
| 66 | * For (a), we add a fpsimd_cpu field to thread_struct, which gets updated to |
| 67 | * the id of the current CPU every time the state is loaded onto a CPU. For (b), |
| 68 | * we add the per-cpu variable 'fpsimd_last_state' (below), which contains the |
| 69 | * address of the userland FPSIMD state of the task that was loaded onto the CPU |
| 70 | * the most recently, or NULL if kernel mode NEON has been performed after that. |
| 71 | * |
| 72 | * With this in place, we no longer have to restore the next FPSIMD state right |
| 73 | * when switching between tasks. Instead, we can defer this check to userland |
| 74 | * resume, at which time we verify whether the CPU's fpsimd_last_state and the |
| 75 | * task's fpsimd_cpu are still mutually in sync. If this is the case, we |
| 76 | * can omit the FPSIMD restore. |
| 77 | * |
| 78 | * As an optimization, we use the thread_info flag TIF_FOREIGN_FPSTATE to |
| 79 | * indicate whether or not the userland FPSIMD state of the current task is |
| 80 | * present in the registers. The flag is set unless the FPSIMD registers of this |
| 81 | * CPU currently contain the most recent userland FPSIMD state of the current |
| 82 | * task. If the task is behaving as a VMM, then this is will be managed by |
| 83 | * KVM which will clear it to indicate that the vcpu FPSIMD state is currently |
| 84 | * loaded on the CPU, allowing the state to be saved if a FPSIMD-aware |
| 85 | * softirq kicks in. Upon vcpu_put(), KVM will save the vcpu FP state and |
| 86 | * flag the register state as invalid. |
| 87 | * |
| 88 | * In order to allow softirq handlers to use FPSIMD, kernel_neon_begin() may be |
| 89 | * called from softirq context, which will save the task's FPSIMD context back |
| 90 | * to task_struct. To prevent this from racing with the manipulation of the |
| 91 | * task's FPSIMD state from task context and thereby corrupting the state, it |
| 92 | * is necessary to protect any manipulation of a task's fpsimd_state or |
| 93 | * TIF_FOREIGN_FPSTATE flag with get_cpu_fpsimd_context(), which will suspend |
| 94 | * softirq servicing entirely until put_cpu_fpsimd_context() is called. |
| 95 | * |
| 96 | * For a certain task, the sequence may look something like this: |
| 97 | * - the task gets scheduled in; if both the task's fpsimd_cpu field |
| 98 | * contains the id of the current CPU, and the CPU's fpsimd_last_state per-cpu |
| 99 | * variable points to the task's fpsimd_state, the TIF_FOREIGN_FPSTATE flag is |
| 100 | * cleared, otherwise it is set; |
| 101 | * |
| 102 | * - the task returns to userland; if TIF_FOREIGN_FPSTATE is set, the task's |
| 103 | * userland FPSIMD state is copied from memory to the registers, the task's |
| 104 | * fpsimd_cpu field is set to the id of the current CPU, the current |
| 105 | * CPU's fpsimd_last_state pointer is set to this task's fpsimd_state and the |
| 106 | * TIF_FOREIGN_FPSTATE flag is cleared; |
| 107 | * |
| 108 | * - the task executes an ordinary syscall; upon return to userland, the |
| 109 | * TIF_FOREIGN_FPSTATE flag will still be cleared, so no FPSIMD state is |
| 110 | * restored; |
| 111 | * |
| 112 | * - the task executes a syscall which executes some NEON instructions; this is |
| 113 | * preceded by a call to kernel_neon_begin(), which copies the task's FPSIMD |
| 114 | * register contents to memory, clears the fpsimd_last_state per-cpu variable |
| 115 | * and sets the TIF_FOREIGN_FPSTATE flag; |
| 116 | * |
| 117 | * - the task gets preempted after kernel_neon_end() is called; as we have not |
| 118 | * returned from the 2nd syscall yet, TIF_FOREIGN_FPSTATE is still set so |
| 119 | * whatever is in the FPSIMD registers is not saved to memory, but discarded. |
| 120 | */ |
| 121 | |
| 122 | DEFINE_PER_CPU(struct cpu_fp_state, fpsimd_last_state); |
| 123 | |
| 124 | __ro_after_init struct vl_info vl_info[ARM64_VEC_MAX] = { |
| 125 | #ifdef CONFIG_ARM64_SVE |
| 126 | [ARM64_VEC_SVE] = { |
| 127 | .type = ARM64_VEC_SVE, |
| 128 | .name = "SVE" , |
| 129 | .min_vl = SVE_VL_MIN, |
| 130 | .max_vl = SVE_VL_MIN, |
| 131 | .max_virtualisable_vl = SVE_VL_MIN, |
| 132 | }, |
| 133 | #endif |
| 134 | #ifdef CONFIG_ARM64_SME |
| 135 | [ARM64_VEC_SME] = { |
| 136 | .type = ARM64_VEC_SME, |
| 137 | .name = "SME" , |
| 138 | }, |
| 139 | #endif |
| 140 | }; |
| 141 | |
| 142 | static unsigned int vec_vl_inherit_flag(enum vec_type type) |
| 143 | { |
| 144 | switch (type) { |
| 145 | case ARM64_VEC_SVE: |
| 146 | return TIF_SVE_VL_INHERIT; |
| 147 | case ARM64_VEC_SME: |
| 148 | return TIF_SME_VL_INHERIT; |
| 149 | default: |
| 150 | WARN_ON_ONCE(1); |
| 151 | return 0; |
| 152 | } |
| 153 | } |
| 154 | |
| 155 | struct vl_config { |
| 156 | int __default_vl; /* Default VL for tasks */ |
| 157 | }; |
| 158 | |
| 159 | static struct vl_config vl_config[ARM64_VEC_MAX]; |
| 160 | |
| 161 | static inline int get_default_vl(enum vec_type type) |
| 162 | { |
| 163 | return READ_ONCE(vl_config[type].__default_vl); |
| 164 | } |
| 165 | |
| 166 | #ifdef CONFIG_ARM64_SVE |
| 167 | |
| 168 | static inline int get_sve_default_vl(void) |
| 169 | { |
| 170 | return get_default_vl(ARM64_VEC_SVE); |
| 171 | } |
| 172 | |
| 173 | static inline void set_default_vl(enum vec_type type, int val) |
| 174 | { |
| 175 | WRITE_ONCE(vl_config[type].__default_vl, val); |
| 176 | } |
| 177 | |
| 178 | static inline void set_sve_default_vl(int val) |
| 179 | { |
| 180 | set_default_vl(ARM64_VEC_SVE, val); |
| 181 | } |
| 182 | |
| 183 | #endif /* ! CONFIG_ARM64_SVE */ |
| 184 | |
| 185 | #ifdef CONFIG_ARM64_SME |
| 186 | |
| 187 | static int get_sme_default_vl(void) |
| 188 | { |
| 189 | return get_default_vl(ARM64_VEC_SME); |
| 190 | } |
| 191 | |
| 192 | static void set_sme_default_vl(int val) |
| 193 | { |
| 194 | set_default_vl(ARM64_VEC_SME, val); |
| 195 | } |
| 196 | |
| 197 | static void sme_free(struct task_struct *); |
| 198 | |
| 199 | #else |
| 200 | |
| 201 | static inline void sme_free(struct task_struct *t) { } |
| 202 | |
| 203 | #endif |
| 204 | |
| 205 | static void fpsimd_bind_task_to_cpu(void); |
| 206 | |
| 207 | /* |
| 208 | * Claim ownership of the CPU FPSIMD context for use by the calling context. |
| 209 | * |
| 210 | * The caller may freely manipulate the FPSIMD context metadata until |
| 211 | * put_cpu_fpsimd_context() is called. |
| 212 | * |
| 213 | * On RT kernels local_bh_disable() is not sufficient because it only |
| 214 | * serializes soft interrupt related sections via a local lock, but stays |
| 215 | * preemptible. Disabling preemption is the right choice here as bottom |
| 216 | * half processing is always in thread context on RT kernels so it |
| 217 | * implicitly prevents bottom half processing as well. |
| 218 | */ |
| 219 | static void get_cpu_fpsimd_context(void) |
| 220 | { |
| 221 | if (!IS_ENABLED(CONFIG_PREEMPT_RT)) { |
| 222 | /* |
| 223 | * The softirq subsystem lacks a true unmask/mask API, and |
| 224 | * re-enabling softirq processing using local_bh_enable() will |
| 225 | * not only unmask softirqs, it will also result in immediate |
| 226 | * delivery of any pending softirqs. |
| 227 | * This is undesirable when running with IRQs disabled, but in |
| 228 | * that case, there is no need to mask softirqs in the first |
| 229 | * place, so only bother doing so when IRQs are enabled. |
| 230 | */ |
| 231 | if (!irqs_disabled()) |
| 232 | local_bh_disable(); |
| 233 | } else { |
| 234 | preempt_disable(); |
| 235 | } |
| 236 | } |
| 237 | |
| 238 | /* |
| 239 | * Release the CPU FPSIMD context. |
| 240 | * |
| 241 | * Must be called from a context in which get_cpu_fpsimd_context() was |
| 242 | * previously called, with no call to put_cpu_fpsimd_context() in the |
| 243 | * meantime. |
| 244 | */ |
| 245 | static void put_cpu_fpsimd_context(void) |
| 246 | { |
| 247 | if (!IS_ENABLED(CONFIG_PREEMPT_RT)) { |
| 248 | if (!irqs_disabled()) |
| 249 | local_bh_enable(); |
| 250 | } else { |
| 251 | preempt_enable(); |
| 252 | } |
| 253 | } |
| 254 | |
| 255 | unsigned int task_get_vl(const struct task_struct *task, enum vec_type type) |
| 256 | { |
| 257 | return task->thread.vl[type]; |
| 258 | } |
| 259 | |
| 260 | void task_set_vl(struct task_struct *task, enum vec_type type, |
| 261 | unsigned long vl) |
| 262 | { |
| 263 | task->thread.vl[type] = vl; |
| 264 | } |
| 265 | |
| 266 | unsigned int task_get_vl_onexec(const struct task_struct *task, |
| 267 | enum vec_type type) |
| 268 | { |
| 269 | return task->thread.vl_onexec[type]; |
| 270 | } |
| 271 | |
| 272 | void task_set_vl_onexec(struct task_struct *task, enum vec_type type, |
| 273 | unsigned long vl) |
| 274 | { |
| 275 | task->thread.vl_onexec[type] = vl; |
| 276 | } |
| 277 | |
| 278 | /* |
| 279 | * TIF_SME controls whether a task can use SME without trapping while |
| 280 | * in userspace, when TIF_SME is set then we must have storage |
| 281 | * allocated in sve_state and sme_state to store the contents of both ZA |
| 282 | * and the SVE registers for both streaming and non-streaming modes. |
| 283 | * |
| 284 | * If both SVCR.ZA and SVCR.SM are disabled then at any point we |
| 285 | * may disable TIF_SME and reenable traps. |
| 286 | */ |
| 287 | |
| 288 | |
| 289 | /* |
| 290 | * TIF_SVE controls whether a task can use SVE without trapping while |
| 291 | * in userspace, and also (together with TIF_SME) the way a task's |
| 292 | * FPSIMD/SVE state is stored in thread_struct. |
| 293 | * |
| 294 | * The kernel uses this flag to track whether a user task is actively |
| 295 | * using SVE, and therefore whether full SVE register state needs to |
| 296 | * be tracked. If not, the cheaper FPSIMD context handling code can |
| 297 | * be used instead of the more costly SVE equivalents. |
| 298 | * |
| 299 | * * TIF_SVE or SVCR.SM set: |
| 300 | * |
| 301 | * The task can execute SVE instructions while in userspace without |
| 302 | * trapping to the kernel. |
| 303 | * |
| 304 | * During any syscall, the kernel may optionally clear TIF_SVE and |
| 305 | * discard the vector state except for the FPSIMD subset. |
| 306 | * |
| 307 | * * TIF_SVE clear: |
| 308 | * |
| 309 | * An attempt by the user task to execute an SVE instruction causes |
| 310 | * do_sve_acc() to be called, which does some preparation and then |
| 311 | * sets TIF_SVE. |
| 312 | * |
| 313 | * During any syscall, the kernel may optionally clear TIF_SVE and |
| 314 | * discard the vector state except for the FPSIMD subset. |
| 315 | * |
| 316 | * The data will be stored in one of two formats: |
| 317 | * |
| 318 | * * FPSIMD only - FP_STATE_FPSIMD: |
| 319 | * |
| 320 | * When the FPSIMD only state stored task->thread.fp_type is set to |
| 321 | * FP_STATE_FPSIMD, the FPSIMD registers V0-V31 are encoded in |
| 322 | * task->thread.uw.fpsimd_state; bits [max : 128] for each of Z0-Z31 are |
| 323 | * logically zero but not stored anywhere; P0-P15 and FFR are not |
| 324 | * stored and have unspecified values from userspace's point of |
| 325 | * view. For hygiene purposes, the kernel zeroes them on next use, |
| 326 | * but userspace is discouraged from relying on this. |
| 327 | * |
| 328 | * task->thread.sve_state does not need to be non-NULL, valid or any |
| 329 | * particular size: it must not be dereferenced and any data stored |
| 330 | * there should be considered stale and not referenced. |
| 331 | * |
| 332 | * * SVE state - FP_STATE_SVE: |
| 333 | * |
| 334 | * When the full SVE state is stored task->thread.fp_type is set to |
| 335 | * FP_STATE_SVE and Z0-Z31 (incorporating Vn in bits[127:0] or the |
| 336 | * corresponding Zn), P0-P15 and FFR are encoded in in |
| 337 | * task->thread.sve_state, formatted appropriately for vector |
| 338 | * length task->thread.sve_vl or, if SVCR.SM is set, |
| 339 | * task->thread.sme_vl. The storage for the vector registers in |
| 340 | * task->thread.uw.fpsimd_state should be ignored. |
| 341 | * |
| 342 | * task->thread.sve_state must point to a valid buffer at least |
| 343 | * sve_state_size(task) bytes in size. The data stored in |
| 344 | * task->thread.uw.fpsimd_state.vregs should be considered stale |
| 345 | * and not referenced. |
| 346 | * |
| 347 | * * FPSR and FPCR are always stored in task->thread.uw.fpsimd_state |
| 348 | * irrespective of whether TIF_SVE is clear or set, since these are |
| 349 | * not vector length dependent. |
| 350 | */ |
| 351 | |
| 352 | /* |
| 353 | * Update current's FPSIMD/SVE registers from thread_struct. |
| 354 | * |
| 355 | * This function should be called only when the FPSIMD/SVE state in |
| 356 | * thread_struct is known to be up to date, when preparing to enter |
| 357 | * userspace. |
| 358 | */ |
| 359 | static void task_fpsimd_load(void) |
| 360 | { |
| 361 | bool restore_sve_regs = false; |
| 362 | bool restore_ffr; |
| 363 | |
| 364 | WARN_ON(!system_supports_fpsimd()); |
| 365 | WARN_ON(preemptible()); |
| 366 | WARN_ON(test_thread_flag(TIF_KERNEL_FPSTATE)); |
| 367 | |
| 368 | if (system_supports_sve() || system_supports_sme()) { |
| 369 | switch (current->thread.fp_type) { |
| 370 | case FP_STATE_FPSIMD: |
| 371 | /* Stop tracking SVE for this task until next use. */ |
| 372 | clear_thread_flag(TIF_SVE); |
| 373 | break; |
| 374 | case FP_STATE_SVE: |
| 375 | if (!thread_sm_enabled(¤t->thread)) |
| 376 | WARN_ON_ONCE(!test_and_set_thread_flag(TIF_SVE)); |
| 377 | |
| 378 | if (test_thread_flag(TIF_SVE)) |
| 379 | sve_set_vq(sve_vq_from_vl(task_get_sve_vl(current)) - 1); |
| 380 | |
| 381 | restore_sve_regs = true; |
| 382 | restore_ffr = true; |
| 383 | break; |
| 384 | default: |
| 385 | /* |
| 386 | * This indicates either a bug in |
| 387 | * fpsimd_save_user_state() or memory corruption, we |
| 388 | * should always record an explicit format |
| 389 | * when we save. We always at least have the |
| 390 | * memory allocated for FPSIMD registers so |
| 391 | * try that and hope for the best. |
| 392 | */ |
| 393 | WARN_ON_ONCE(1); |
| 394 | clear_thread_flag(TIF_SVE); |
| 395 | break; |
| 396 | } |
| 397 | } |
| 398 | |
| 399 | /* Restore SME, override SVE register configuration if needed */ |
| 400 | if (system_supports_sme()) { |
| 401 | unsigned long sme_vl = task_get_sme_vl(current); |
| 402 | |
| 403 | /* Ensure VL is set up for restoring data */ |
| 404 | if (test_thread_flag(TIF_SME)) |
| 405 | sme_set_vq(sve_vq_from_vl(sme_vl) - 1); |
| 406 | |
| 407 | write_sysreg_s(current->thread.svcr, SYS_SVCR); |
| 408 | |
| 409 | if (thread_za_enabled(¤t->thread)) |
| 410 | sme_load_state(current->thread.sme_state, |
| 411 | system_supports_sme2()); |
| 412 | |
| 413 | if (thread_sm_enabled(¤t->thread)) |
| 414 | restore_ffr = system_supports_fa64(); |
| 415 | } |
| 416 | |
| 417 | if (system_supports_fpmr()) |
| 418 | write_sysreg_s(current->thread.uw.fpmr, SYS_FPMR); |
| 419 | |
| 420 | if (restore_sve_regs) { |
| 421 | WARN_ON_ONCE(current->thread.fp_type != FP_STATE_SVE); |
| 422 | sve_load_state(sve_pffr(¤t->thread), |
| 423 | ¤t->thread.uw.fpsimd_state.fpsr, |
| 424 | restore_ffr); |
| 425 | } else { |
| 426 | WARN_ON_ONCE(current->thread.fp_type != FP_STATE_FPSIMD); |
| 427 | fpsimd_load_state(¤t->thread.uw.fpsimd_state); |
| 428 | } |
| 429 | } |
| 430 | |
| 431 | /* |
| 432 | * Ensure FPSIMD/SVE storage in memory for the loaded context is up to |
| 433 | * date with respect to the CPU registers. Note carefully that the |
| 434 | * current context is the context last bound to the CPU stored in |
| 435 | * last, if KVM is involved this may be the guest VM context rather |
| 436 | * than the host thread for the VM pointed to by current. This means |
| 437 | * that we must always reference the state storage via last rather |
| 438 | * than via current, if we are saving KVM state then it will have |
| 439 | * ensured that the type of registers to save is set in last->to_save. |
| 440 | */ |
| 441 | static void fpsimd_save_user_state(void) |
| 442 | { |
| 443 | struct cpu_fp_state const *last = |
| 444 | this_cpu_ptr(&fpsimd_last_state); |
| 445 | /* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */ |
| 446 | bool save_sve_regs = false; |
| 447 | bool save_ffr; |
| 448 | unsigned int vl; |
| 449 | |
| 450 | WARN_ON(!system_supports_fpsimd()); |
| 451 | WARN_ON(preemptible()); |
| 452 | |
| 453 | if (test_thread_flag(TIF_FOREIGN_FPSTATE)) |
| 454 | return; |
| 455 | |
| 456 | if (system_supports_fpmr()) |
| 457 | *(last->fpmr) = read_sysreg_s(SYS_FPMR); |
| 458 | |
| 459 | /* |
| 460 | * Save SVE state if it is live. |
| 461 | * |
| 462 | * The syscall ABI discards live SVE state at syscall entry. When |
| 463 | * entering a syscall, fpsimd_syscall_enter() sets to_save to |
| 464 | * FP_STATE_FPSIMD to allow the SVE state to be lazily discarded until |
| 465 | * either new SVE state is loaded+bound or fpsimd_syscall_exit() is |
| 466 | * called prior to a return to userspace. |
| 467 | */ |
| 468 | if ((last->to_save == FP_STATE_CURRENT && test_thread_flag(TIF_SVE)) || |
| 469 | last->to_save == FP_STATE_SVE) { |
| 470 | save_sve_regs = true; |
| 471 | save_ffr = true; |
| 472 | vl = last->sve_vl; |
| 473 | } |
| 474 | |
| 475 | if (system_supports_sme()) { |
| 476 | u64 *svcr = last->svcr; |
| 477 | |
| 478 | *svcr = read_sysreg_s(SYS_SVCR); |
| 479 | |
| 480 | if (*svcr & SVCR_ZA_MASK) |
| 481 | sme_save_state(last->sme_state, |
| 482 | system_supports_sme2()); |
| 483 | |
| 484 | /* If we are in streaming mode override regular SVE. */ |
| 485 | if (*svcr & SVCR_SM_MASK) { |
| 486 | save_sve_regs = true; |
| 487 | save_ffr = system_supports_fa64(); |
| 488 | vl = last->sme_vl; |
| 489 | } |
| 490 | } |
| 491 | |
| 492 | if (IS_ENABLED(CONFIG_ARM64_SVE) && save_sve_regs) { |
| 493 | /* Get the configured VL from RDVL, will account for SM */ |
| 494 | if (WARN_ON(sve_get_vl() != vl)) { |
| 495 | /* |
| 496 | * Can't save the user regs, so current would |
| 497 | * re-enter user with corrupt state. |
| 498 | * There's no way to recover, so kill it: |
| 499 | */ |
| 500 | force_signal_inject(SIGKILL, SI_KERNEL, 0, 0); |
| 501 | return; |
| 502 | } |
| 503 | |
| 504 | sve_save_state((char *)last->sve_state + |
| 505 | sve_ffr_offset(vl), |
| 506 | &last->st->fpsr, save_ffr); |
| 507 | *last->fp_type = FP_STATE_SVE; |
| 508 | } else { |
| 509 | fpsimd_save_state(last->st); |
| 510 | *last->fp_type = FP_STATE_FPSIMD; |
| 511 | } |
| 512 | } |
| 513 | |
| 514 | /* |
| 515 | * All vector length selection from userspace comes through here. |
| 516 | * We're on a slow path, so some sanity-checks are included. |
| 517 | * If things go wrong there's a bug somewhere, but try to fall back to a |
| 518 | * safe choice. |
| 519 | */ |
| 520 | static unsigned int find_supported_vector_length(enum vec_type type, |
| 521 | unsigned int vl) |
| 522 | { |
| 523 | struct vl_info *info = &vl_info[type]; |
| 524 | int bit; |
| 525 | int max_vl = info->max_vl; |
| 526 | |
| 527 | if (WARN_ON(!sve_vl_valid(vl))) |
| 528 | vl = info->min_vl; |
| 529 | |
| 530 | if (WARN_ON(!sve_vl_valid(max_vl))) |
| 531 | max_vl = info->min_vl; |
| 532 | |
| 533 | if (vl > max_vl) |
| 534 | vl = max_vl; |
| 535 | if (vl < info->min_vl) |
| 536 | vl = info->min_vl; |
| 537 | |
| 538 | bit = find_next_bit(info->vq_map, SVE_VQ_MAX, |
| 539 | __vq_to_bit(sve_vq_from_vl(vl))); |
| 540 | return sve_vl_from_vq(__bit_to_vq(bit)); |
| 541 | } |
| 542 | |
| 543 | #if defined(CONFIG_ARM64_SVE) && defined(CONFIG_SYSCTL) |
| 544 | |
| 545 | static int vec_proc_do_default_vl(const struct ctl_table *table, int write, |
| 546 | void *buffer, size_t *lenp, loff_t *ppos) |
| 547 | { |
| 548 | struct vl_info *info = table->extra1; |
| 549 | enum vec_type type = info->type; |
| 550 | int ret; |
| 551 | int vl = get_default_vl(type); |
| 552 | struct ctl_table tmp_table = { |
| 553 | .data = &vl, |
| 554 | .maxlen = sizeof(vl), |
| 555 | }; |
| 556 | |
| 557 | ret = proc_dointvec(&tmp_table, write, buffer, lenp, ppos); |
| 558 | if (ret || !write) |
| 559 | return ret; |
| 560 | |
| 561 | /* Writing -1 has the special meaning "set to max": */ |
| 562 | if (vl == -1) |
| 563 | vl = info->max_vl; |
| 564 | |
| 565 | if (!sve_vl_valid(vl)) |
| 566 | return -EINVAL; |
| 567 | |
| 568 | set_default_vl(type, find_supported_vector_length(type, vl)); |
| 569 | return 0; |
| 570 | } |
| 571 | |
| 572 | static const struct ctl_table sve_default_vl_table[] = { |
| 573 | { |
| 574 | .procname = "sve_default_vector_length" , |
| 575 | .mode = 0644, |
| 576 | .proc_handler = vec_proc_do_default_vl, |
| 577 | .extra1 = &vl_info[ARM64_VEC_SVE], |
| 578 | }, |
| 579 | }; |
| 580 | |
| 581 | static int __init sve_sysctl_init(void) |
| 582 | { |
| 583 | if (system_supports_sve()) |
| 584 | if (!register_sysctl("abi" , sve_default_vl_table)) |
| 585 | return -EINVAL; |
| 586 | |
| 587 | return 0; |
| 588 | } |
| 589 | |
| 590 | #else /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */ |
| 591 | static int __init sve_sysctl_init(void) { return 0; } |
| 592 | #endif /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */ |
| 593 | |
| 594 | #if defined(CONFIG_ARM64_SME) && defined(CONFIG_SYSCTL) |
| 595 | static const struct ctl_table sme_default_vl_table[] = { |
| 596 | { |
| 597 | .procname = "sme_default_vector_length" , |
| 598 | .mode = 0644, |
| 599 | .proc_handler = vec_proc_do_default_vl, |
| 600 | .extra1 = &vl_info[ARM64_VEC_SME], |
| 601 | }, |
| 602 | }; |
| 603 | |
| 604 | static int __init sme_sysctl_init(void) |
| 605 | { |
| 606 | if (system_supports_sme()) |
| 607 | if (!register_sysctl("abi" , sme_default_vl_table)) |
| 608 | return -EINVAL; |
| 609 | |
| 610 | return 0; |
| 611 | } |
| 612 | |
| 613 | #else /* ! (CONFIG_ARM64_SME && CONFIG_SYSCTL) */ |
| 614 | static int __init sme_sysctl_init(void) { return 0; } |
| 615 | #endif /* ! (CONFIG_ARM64_SME && CONFIG_SYSCTL) */ |
| 616 | |
| 617 | #define ZREG(sve_state, vq, n) ((char *)(sve_state) + \ |
| 618 | (SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET)) |
| 619 | |
| 620 | #ifdef CONFIG_CPU_BIG_ENDIAN |
| 621 | static __uint128_t arm64_cpu_to_le128(__uint128_t x) |
| 622 | { |
| 623 | u64 a = swab64(x); |
| 624 | u64 b = swab64(x >> 64); |
| 625 | |
| 626 | return ((__uint128_t)a << 64) | b; |
| 627 | } |
| 628 | #else |
| 629 | static __uint128_t arm64_cpu_to_le128(__uint128_t x) |
| 630 | { |
| 631 | return x; |
| 632 | } |
| 633 | #endif |
| 634 | |
| 635 | #define arm64_le128_to_cpu(x) arm64_cpu_to_le128(x) |
| 636 | |
| 637 | static void __fpsimd_to_sve(void *sst, struct user_fpsimd_state const *fst, |
| 638 | unsigned int vq) |
| 639 | { |
| 640 | unsigned int i; |
| 641 | __uint128_t *p; |
| 642 | |
| 643 | for (i = 0; i < SVE_NUM_ZREGS; ++i) { |
| 644 | p = (__uint128_t *)ZREG(sst, vq, i); |
| 645 | *p = arm64_cpu_to_le128(fst->vregs[i]); |
| 646 | } |
| 647 | } |
| 648 | |
| 649 | /* |
| 650 | * Transfer the FPSIMD state in task->thread.uw.fpsimd_state to |
| 651 | * task->thread.sve_state. |
| 652 | * |
| 653 | * Task can be a non-runnable task, or current. In the latter case, |
| 654 | * the caller must have ownership of the cpu FPSIMD context before calling |
| 655 | * this function. |
| 656 | * task->thread.sve_state must point to at least sve_state_size(task) |
| 657 | * bytes of allocated kernel memory. |
| 658 | * task->thread.uw.fpsimd_state must be up to date before calling this |
| 659 | * function. |
| 660 | */ |
| 661 | static inline void fpsimd_to_sve(struct task_struct *task) |
| 662 | { |
| 663 | unsigned int vq; |
| 664 | void *sst = task->thread.sve_state; |
| 665 | struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state; |
| 666 | |
| 667 | if (!system_supports_sve() && !system_supports_sme()) |
| 668 | return; |
| 669 | |
| 670 | vq = sve_vq_from_vl(thread_get_cur_vl(&task->thread)); |
| 671 | __fpsimd_to_sve(sst, fst, vq); |
| 672 | } |
| 673 | |
| 674 | /* |
| 675 | * Transfer the SVE state in task->thread.sve_state to |
| 676 | * task->thread.uw.fpsimd_state. |
| 677 | * |
| 678 | * Task can be a non-runnable task, or current. In the latter case, |
| 679 | * the caller must have ownership of the cpu FPSIMD context before calling |
| 680 | * this function. |
| 681 | * task->thread.sve_state must point to at least sve_state_size(task) |
| 682 | * bytes of allocated kernel memory. |
| 683 | * task->thread.sve_state must be up to date before calling this function. |
| 684 | */ |
| 685 | static inline void sve_to_fpsimd(struct task_struct *task) |
| 686 | { |
| 687 | unsigned int vq, vl; |
| 688 | void const *sst = task->thread.sve_state; |
| 689 | struct user_fpsimd_state *fst = &task->thread.uw.fpsimd_state; |
| 690 | unsigned int i; |
| 691 | __uint128_t const *p; |
| 692 | |
| 693 | if (!system_supports_sve() && !system_supports_sme()) |
| 694 | return; |
| 695 | |
| 696 | vl = thread_get_cur_vl(&task->thread); |
| 697 | vq = sve_vq_from_vl(vl); |
| 698 | for (i = 0; i < SVE_NUM_ZREGS; ++i) { |
| 699 | p = (__uint128_t const *)ZREG(sst, vq, i); |
| 700 | fst->vregs[i] = arm64_le128_to_cpu(*p); |
| 701 | } |
| 702 | } |
| 703 | |
| 704 | static inline void __fpsimd_zero_vregs(struct user_fpsimd_state *fpsimd) |
| 705 | { |
| 706 | memset(&fpsimd->vregs, 0, sizeof(fpsimd->vregs)); |
| 707 | } |
| 708 | |
| 709 | /* |
| 710 | * Simulate the effects of an SMSTOP SM instruction. |
| 711 | */ |
| 712 | void task_smstop_sm(struct task_struct *task) |
| 713 | { |
| 714 | if (!thread_sm_enabled(&task->thread)) |
| 715 | return; |
| 716 | |
| 717 | __fpsimd_zero_vregs(fpsimd: &task->thread.uw.fpsimd_state); |
| 718 | task->thread.uw.fpsimd_state.fpsr = 0x0800009f; |
| 719 | if (system_supports_fpmr()) |
| 720 | task->thread.uw.fpmr = 0; |
| 721 | |
| 722 | task->thread.svcr &= ~SVCR_SM_MASK; |
| 723 | task->thread.fp_type = FP_STATE_FPSIMD; |
| 724 | } |
| 725 | |
| 726 | void cpu_enable_fpmr(const struct arm64_cpu_capabilities *__always_unused p) |
| 727 | { |
| 728 | write_sysreg_s(read_sysreg_s(SYS_SCTLR_EL1) | SCTLR_EL1_EnFPM_MASK, |
| 729 | SYS_SCTLR_EL1); |
| 730 | } |
| 731 | |
| 732 | #ifdef CONFIG_ARM64_SVE |
| 733 | static void sve_free(struct task_struct *task) |
| 734 | { |
| 735 | kfree(task->thread.sve_state); |
| 736 | task->thread.sve_state = NULL; |
| 737 | } |
| 738 | |
| 739 | /* |
| 740 | * Ensure that task->thread.sve_state is allocated and sufficiently large. |
| 741 | * |
| 742 | * This function should be used only in preparation for replacing |
| 743 | * task->thread.sve_state with new data. The memory is always zeroed |
| 744 | * here to prevent stale data from showing through: this is done in |
| 745 | * the interest of testability and predictability: except in the |
| 746 | * do_sve_acc() case, there is no ABI requirement to hide stale data |
| 747 | * written previously be task. |
| 748 | */ |
| 749 | void sve_alloc(struct task_struct *task, bool flush) |
| 750 | { |
| 751 | if (task->thread.sve_state) { |
| 752 | if (flush) |
| 753 | memset(task->thread.sve_state, 0, |
| 754 | sve_state_size(task)); |
| 755 | return; |
| 756 | } |
| 757 | |
| 758 | /* This is a small allocation (maximum ~8KB) and Should Not Fail. */ |
| 759 | task->thread.sve_state = |
| 760 | kzalloc(sve_state_size(task), GFP_KERNEL); |
| 761 | } |
| 762 | |
| 763 | /* |
| 764 | * Ensure that task->thread.uw.fpsimd_state is up to date with respect to the |
| 765 | * task's currently effective FPSIMD/SVE state. |
| 766 | * |
| 767 | * The task's FPSIMD/SVE/SME state must not be subject to concurrent |
| 768 | * manipulation. |
| 769 | */ |
| 770 | void fpsimd_sync_from_effective_state(struct task_struct *task) |
| 771 | { |
| 772 | if (task->thread.fp_type == FP_STATE_SVE) |
| 773 | sve_to_fpsimd(task); |
| 774 | } |
| 775 | |
| 776 | /* |
| 777 | * Ensure that the task's currently effective FPSIMD/SVE state is up to date |
| 778 | * with respect to task->thread.uw.fpsimd_state, zeroing any effective |
| 779 | * non-FPSIMD (S)SVE state. |
| 780 | * |
| 781 | * The task's FPSIMD/SVE/SME state must not be subject to concurrent |
| 782 | * manipulation. |
| 783 | */ |
| 784 | void fpsimd_sync_to_effective_state_zeropad(struct task_struct *task) |
| 785 | { |
| 786 | unsigned int vq; |
| 787 | void *sst = task->thread.sve_state; |
| 788 | struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state; |
| 789 | |
| 790 | if (task->thread.fp_type != FP_STATE_SVE) |
| 791 | return; |
| 792 | |
| 793 | vq = sve_vq_from_vl(thread_get_cur_vl(&task->thread)); |
| 794 | |
| 795 | memset(sst, 0, SVE_SIG_REGS_SIZE(vq)); |
| 796 | __fpsimd_to_sve(sst, fst, vq); |
| 797 | } |
| 798 | |
| 799 | static int change_live_vector_length(struct task_struct *task, |
| 800 | enum vec_type type, |
| 801 | unsigned long vl) |
| 802 | { |
| 803 | unsigned int sve_vl = task_get_sve_vl(task); |
| 804 | unsigned int sme_vl = task_get_sme_vl(task); |
| 805 | void *sve_state = NULL, *sme_state = NULL; |
| 806 | |
| 807 | if (type == ARM64_VEC_SME) |
| 808 | sme_vl = vl; |
| 809 | else |
| 810 | sve_vl = vl; |
| 811 | |
| 812 | /* |
| 813 | * Allocate the new sve_state and sme_state before freeing the old |
| 814 | * copies so that allocation failure can be handled without needing to |
| 815 | * mutate the task's state in any way. |
| 816 | * |
| 817 | * Changes to the SVE vector length must not discard live ZA state or |
| 818 | * clear PSTATE.ZA, as userspace code which is unaware of the AAPCS64 |
| 819 | * ZA lazy saving scheme may attempt to change the SVE vector length |
| 820 | * while unsaved/dormant ZA state exists. |
| 821 | */ |
| 822 | sve_state = kzalloc(__sve_state_size(sve_vl, sme_vl), GFP_KERNEL); |
| 823 | if (!sve_state) |
| 824 | goto out_mem; |
| 825 | |
| 826 | if (type == ARM64_VEC_SME) { |
| 827 | sme_state = kzalloc(__sme_state_size(sme_vl), GFP_KERNEL); |
| 828 | if (!sme_state) |
| 829 | goto out_mem; |
| 830 | } |
| 831 | |
| 832 | if (task == current) |
| 833 | fpsimd_save_and_flush_current_state(); |
| 834 | else |
| 835 | fpsimd_flush_task_state(task); |
| 836 | |
| 837 | /* |
| 838 | * Always preserve PSTATE.SM and the effective FPSIMD state, zeroing |
| 839 | * other SVE state. |
| 840 | */ |
| 841 | fpsimd_sync_from_effective_state(task); |
| 842 | task_set_vl(task, type, vl); |
| 843 | kfree(task->thread.sve_state); |
| 844 | task->thread.sve_state = sve_state; |
| 845 | fpsimd_sync_to_effective_state_zeropad(task); |
| 846 | |
| 847 | if (type == ARM64_VEC_SME) { |
| 848 | task->thread.svcr &= ~SVCR_ZA_MASK; |
| 849 | kfree(task->thread.sme_state); |
| 850 | task->thread.sme_state = sme_state; |
| 851 | } |
| 852 | |
| 853 | return 0; |
| 854 | |
| 855 | out_mem: |
| 856 | kfree(sve_state); |
| 857 | kfree(sme_state); |
| 858 | return -ENOMEM; |
| 859 | } |
| 860 | |
| 861 | int vec_set_vector_length(struct task_struct *task, enum vec_type type, |
| 862 | unsigned long vl, unsigned long flags) |
| 863 | { |
| 864 | bool onexec = flags & PR_SVE_SET_VL_ONEXEC; |
| 865 | bool inherit = flags & PR_SVE_VL_INHERIT; |
| 866 | |
| 867 | if (flags & ~(unsigned long)(PR_SVE_VL_INHERIT | |
| 868 | PR_SVE_SET_VL_ONEXEC)) |
| 869 | return -EINVAL; |
| 870 | |
| 871 | if (!sve_vl_valid(vl)) |
| 872 | return -EINVAL; |
| 873 | |
| 874 | /* |
| 875 | * Clamp to the maximum vector length that VL-agnostic code |
| 876 | * can work with. A flag may be assigned in the future to |
| 877 | * allow setting of larger vector lengths without confusing |
| 878 | * older software. |
| 879 | */ |
| 880 | if (vl > VL_ARCH_MAX) |
| 881 | vl = VL_ARCH_MAX; |
| 882 | |
| 883 | vl = find_supported_vector_length(type, vl); |
| 884 | |
| 885 | if (!onexec && vl != task_get_vl(task, type)) { |
| 886 | if (change_live_vector_length(task, type, vl)) |
| 887 | return -ENOMEM; |
| 888 | } |
| 889 | |
| 890 | if (onexec || inherit) |
| 891 | task_set_vl_onexec(task, type, vl); |
| 892 | else |
| 893 | /* Reset VL to system default on next exec: */ |
| 894 | task_set_vl_onexec(task, type, 0); |
| 895 | |
| 896 | update_tsk_thread_flag(task, vec_vl_inherit_flag(type), |
| 897 | flags & PR_SVE_VL_INHERIT); |
| 898 | |
| 899 | return 0; |
| 900 | } |
| 901 | |
| 902 | /* |
| 903 | * Encode the current vector length and flags for return. |
| 904 | * This is only required for prctl(): ptrace has separate fields. |
| 905 | * SVE and SME use the same bits for _ONEXEC and _INHERIT. |
| 906 | * |
| 907 | * flags are as for vec_set_vector_length(). |
| 908 | */ |
| 909 | static int vec_prctl_status(enum vec_type type, unsigned long flags) |
| 910 | { |
| 911 | int ret; |
| 912 | |
| 913 | if (flags & PR_SVE_SET_VL_ONEXEC) |
| 914 | ret = task_get_vl_onexec(current, type); |
| 915 | else |
| 916 | ret = task_get_vl(current, type); |
| 917 | |
| 918 | if (test_thread_flag(vec_vl_inherit_flag(type))) |
| 919 | ret |= PR_SVE_VL_INHERIT; |
| 920 | |
| 921 | return ret; |
| 922 | } |
| 923 | |
| 924 | /* PR_SVE_SET_VL */ |
| 925 | int sve_set_current_vl(unsigned long arg) |
| 926 | { |
| 927 | unsigned long vl, flags; |
| 928 | int ret; |
| 929 | |
| 930 | vl = arg & PR_SVE_VL_LEN_MASK; |
| 931 | flags = arg & ~vl; |
| 932 | |
| 933 | if (!system_supports_sve() || is_compat_task()) |
| 934 | return -EINVAL; |
| 935 | |
| 936 | ret = vec_set_vector_length(current, ARM64_VEC_SVE, vl, flags); |
| 937 | if (ret) |
| 938 | return ret; |
| 939 | |
| 940 | return vec_prctl_status(ARM64_VEC_SVE, flags); |
| 941 | } |
| 942 | |
| 943 | /* PR_SVE_GET_VL */ |
| 944 | int sve_get_current_vl(void) |
| 945 | { |
| 946 | if (!system_supports_sve() || is_compat_task()) |
| 947 | return -EINVAL; |
| 948 | |
| 949 | return vec_prctl_status(ARM64_VEC_SVE, 0); |
| 950 | } |
| 951 | |
| 952 | #ifdef CONFIG_ARM64_SME |
| 953 | /* PR_SME_SET_VL */ |
| 954 | int sme_set_current_vl(unsigned long arg) |
| 955 | { |
| 956 | unsigned long vl, flags; |
| 957 | int ret; |
| 958 | |
| 959 | vl = arg & PR_SME_VL_LEN_MASK; |
| 960 | flags = arg & ~vl; |
| 961 | |
| 962 | if (!system_supports_sme() || is_compat_task()) |
| 963 | return -EINVAL; |
| 964 | |
| 965 | ret = vec_set_vector_length(current, ARM64_VEC_SME, vl, flags); |
| 966 | if (ret) |
| 967 | return ret; |
| 968 | |
| 969 | return vec_prctl_status(ARM64_VEC_SME, flags); |
| 970 | } |
| 971 | |
| 972 | /* PR_SME_GET_VL */ |
| 973 | int sme_get_current_vl(void) |
| 974 | { |
| 975 | if (!system_supports_sme() || is_compat_task()) |
| 976 | return -EINVAL; |
| 977 | |
| 978 | return vec_prctl_status(ARM64_VEC_SME, 0); |
| 979 | } |
| 980 | #endif /* CONFIG_ARM64_SME */ |
| 981 | |
| 982 | static void vec_probe_vqs(struct vl_info *info, |
| 983 | DECLARE_BITMAP(map, SVE_VQ_MAX)) |
| 984 | { |
| 985 | unsigned int vq, vl; |
| 986 | |
| 987 | bitmap_zero(map, SVE_VQ_MAX); |
| 988 | |
| 989 | for (vq = SVE_VQ_MAX; vq >= SVE_VQ_MIN; --vq) { |
| 990 | write_vl(info->type, vq - 1); /* self-syncing */ |
| 991 | |
| 992 | switch (info->type) { |
| 993 | case ARM64_VEC_SVE: |
| 994 | vl = sve_get_vl(); |
| 995 | break; |
| 996 | case ARM64_VEC_SME: |
| 997 | vl = sme_get_vl(); |
| 998 | break; |
| 999 | default: |
| 1000 | vl = 0; |
| 1001 | break; |
| 1002 | } |
| 1003 | |
| 1004 | /* Minimum VL identified? */ |
| 1005 | if (sve_vq_from_vl(vl) > vq) |
| 1006 | break; |
| 1007 | |
| 1008 | vq = sve_vq_from_vl(vl); /* skip intervening lengths */ |
| 1009 | set_bit(__vq_to_bit(vq), map); |
| 1010 | } |
| 1011 | } |
| 1012 | |
| 1013 | /* |
| 1014 | * Initialise the set of known supported VQs for the boot CPU. |
| 1015 | * This is called during kernel boot, before secondary CPUs are brought up. |
| 1016 | */ |
| 1017 | void __init vec_init_vq_map(enum vec_type type) |
| 1018 | { |
| 1019 | struct vl_info *info = &vl_info[type]; |
| 1020 | vec_probe_vqs(info, info->vq_map); |
| 1021 | bitmap_copy(info->vq_partial_map, info->vq_map, SVE_VQ_MAX); |
| 1022 | } |
| 1023 | |
| 1024 | /* |
| 1025 | * If we haven't committed to the set of supported VQs yet, filter out |
| 1026 | * those not supported by the current CPU. |
| 1027 | * This function is called during the bring-up of early secondary CPUs only. |
| 1028 | */ |
| 1029 | void vec_update_vq_map(enum vec_type type) |
| 1030 | { |
| 1031 | struct vl_info *info = &vl_info[type]; |
| 1032 | DECLARE_BITMAP(tmp_map, SVE_VQ_MAX); |
| 1033 | |
| 1034 | vec_probe_vqs(info, tmp_map); |
| 1035 | bitmap_and(info->vq_map, info->vq_map, tmp_map, SVE_VQ_MAX); |
| 1036 | bitmap_or(info->vq_partial_map, info->vq_partial_map, tmp_map, |
| 1037 | SVE_VQ_MAX); |
| 1038 | } |
| 1039 | |
| 1040 | /* |
| 1041 | * Check whether the current CPU supports all VQs in the committed set. |
| 1042 | * This function is called during the bring-up of late secondary CPUs only. |
| 1043 | */ |
| 1044 | int vec_verify_vq_map(enum vec_type type) |
| 1045 | { |
| 1046 | struct vl_info *info = &vl_info[type]; |
| 1047 | DECLARE_BITMAP(tmp_map, SVE_VQ_MAX); |
| 1048 | unsigned long b; |
| 1049 | |
| 1050 | vec_probe_vqs(info, tmp_map); |
| 1051 | |
| 1052 | bitmap_complement(tmp_map, tmp_map, SVE_VQ_MAX); |
| 1053 | if (bitmap_intersects(tmp_map, info->vq_map, SVE_VQ_MAX)) { |
| 1054 | pr_warn("%s: cpu%d: Required vector length(s) missing\n" , |
| 1055 | info->name, smp_processor_id()); |
| 1056 | return -EINVAL; |
| 1057 | } |
| 1058 | |
| 1059 | if (!IS_ENABLED(CONFIG_KVM) || !is_hyp_mode_available()) |
| 1060 | return 0; |
| 1061 | |
| 1062 | /* |
| 1063 | * For KVM, it is necessary to ensure that this CPU doesn't |
| 1064 | * support any vector length that guests may have probed as |
| 1065 | * unsupported. |
| 1066 | */ |
| 1067 | |
| 1068 | /* Recover the set of supported VQs: */ |
| 1069 | bitmap_complement(tmp_map, tmp_map, SVE_VQ_MAX); |
| 1070 | /* Find VQs supported that are not globally supported: */ |
| 1071 | bitmap_andnot(tmp_map, tmp_map, info->vq_map, SVE_VQ_MAX); |
| 1072 | |
| 1073 | /* Find the lowest such VQ, if any: */ |
| 1074 | b = find_last_bit(tmp_map, SVE_VQ_MAX); |
| 1075 | if (b >= SVE_VQ_MAX) |
| 1076 | return 0; /* no mismatches */ |
| 1077 | |
| 1078 | /* |
| 1079 | * Mismatches above sve_max_virtualisable_vl are fine, since |
| 1080 | * no guest is allowed to configure ZCR_EL2.LEN to exceed this: |
| 1081 | */ |
| 1082 | if (sve_vl_from_vq(__bit_to_vq(b)) <= info->max_virtualisable_vl) { |
| 1083 | pr_warn("%s: cpu%d: Unsupported vector length(s) present\n" , |
| 1084 | info->name, smp_processor_id()); |
| 1085 | return -EINVAL; |
| 1086 | } |
| 1087 | |
| 1088 | return 0; |
| 1089 | } |
| 1090 | |
| 1091 | void cpu_enable_sve(const struct arm64_cpu_capabilities *__always_unused p) |
| 1092 | { |
| 1093 | write_sysreg(read_sysreg(CPACR_EL1) | CPACR_EL1_ZEN_EL1EN, CPACR_EL1); |
| 1094 | isb(); |
| 1095 | |
| 1096 | write_sysreg_s(0, SYS_ZCR_EL1); |
| 1097 | } |
| 1098 | |
| 1099 | void __init sve_setup(void) |
| 1100 | { |
| 1101 | struct vl_info *info = &vl_info[ARM64_VEC_SVE]; |
| 1102 | DECLARE_BITMAP(tmp_map, SVE_VQ_MAX); |
| 1103 | unsigned long b; |
| 1104 | int max_bit; |
| 1105 | |
| 1106 | if (!system_supports_sve()) |
| 1107 | return; |
| 1108 | |
| 1109 | /* |
| 1110 | * The SVE architecture mandates support for 128-bit vectors, |
| 1111 | * so sve_vq_map must have at least SVE_VQ_MIN set. |
| 1112 | * If something went wrong, at least try to patch it up: |
| 1113 | */ |
| 1114 | if (WARN_ON(!test_bit(__vq_to_bit(SVE_VQ_MIN), info->vq_map))) |
| 1115 | set_bit(__vq_to_bit(SVE_VQ_MIN), info->vq_map); |
| 1116 | |
| 1117 | max_bit = find_first_bit(info->vq_map, SVE_VQ_MAX); |
| 1118 | info->max_vl = sve_vl_from_vq(__bit_to_vq(max_bit)); |
| 1119 | |
| 1120 | /* |
| 1121 | * For the default VL, pick the maximum supported value <= 64. |
| 1122 | * VL == 64 is guaranteed not to grow the signal frame. |
| 1123 | */ |
| 1124 | set_sve_default_vl(find_supported_vector_length(ARM64_VEC_SVE, 64)); |
| 1125 | |
| 1126 | bitmap_andnot(tmp_map, info->vq_partial_map, info->vq_map, |
| 1127 | SVE_VQ_MAX); |
| 1128 | |
| 1129 | b = find_last_bit(tmp_map, SVE_VQ_MAX); |
| 1130 | if (b >= SVE_VQ_MAX) |
| 1131 | /* No non-virtualisable VLs found */ |
| 1132 | info->max_virtualisable_vl = SVE_VQ_MAX; |
| 1133 | else if (WARN_ON(b == SVE_VQ_MAX - 1)) |
| 1134 | /* No virtualisable VLs? This is architecturally forbidden. */ |
| 1135 | info->max_virtualisable_vl = SVE_VQ_MIN; |
| 1136 | else /* b + 1 < SVE_VQ_MAX */ |
| 1137 | info->max_virtualisable_vl = sve_vl_from_vq(__bit_to_vq(b + 1)); |
| 1138 | |
| 1139 | if (info->max_virtualisable_vl > info->max_vl) |
| 1140 | info->max_virtualisable_vl = info->max_vl; |
| 1141 | |
| 1142 | pr_info("%s: maximum available vector length %u bytes per vector\n" , |
| 1143 | info->name, info->max_vl); |
| 1144 | pr_info("%s: default vector length %u bytes per vector\n" , |
| 1145 | info->name, get_sve_default_vl()); |
| 1146 | |
| 1147 | /* KVM decides whether to support mismatched systems. Just warn here: */ |
| 1148 | if (sve_max_virtualisable_vl() < sve_max_vl()) |
| 1149 | pr_warn("%s: unvirtualisable vector lengths present\n" , |
| 1150 | info->name); |
| 1151 | } |
| 1152 | |
| 1153 | /* |
| 1154 | * Called from the put_task_struct() path, which cannot get here |
| 1155 | * unless dead_task is really dead and not schedulable. |
| 1156 | */ |
| 1157 | void fpsimd_release_task(struct task_struct *dead_task) |
| 1158 | { |
| 1159 | sve_free(dead_task); |
| 1160 | sme_free(dead_task); |
| 1161 | } |
| 1162 | |
| 1163 | #endif /* CONFIG_ARM64_SVE */ |
| 1164 | |
| 1165 | #ifdef CONFIG_ARM64_SME |
| 1166 | |
| 1167 | /* |
| 1168 | * Ensure that task->thread.sme_state is allocated and sufficiently large. |
| 1169 | * |
| 1170 | * This function should be used only in preparation for replacing |
| 1171 | * task->thread.sme_state with new data. The memory is always zeroed |
| 1172 | * here to prevent stale data from showing through: this is done in |
| 1173 | * the interest of testability and predictability, the architecture |
| 1174 | * guarantees that when ZA is enabled it will be zeroed. |
| 1175 | */ |
| 1176 | void sme_alloc(struct task_struct *task, bool flush) |
| 1177 | { |
| 1178 | if (task->thread.sme_state) { |
| 1179 | if (flush) |
| 1180 | memset(task->thread.sme_state, 0, |
| 1181 | sme_state_size(task)); |
| 1182 | return; |
| 1183 | } |
| 1184 | |
| 1185 | /* This could potentially be up to 64K. */ |
| 1186 | task->thread.sme_state = |
| 1187 | kzalloc(sme_state_size(task), GFP_KERNEL); |
| 1188 | } |
| 1189 | |
| 1190 | static void sme_free(struct task_struct *task) |
| 1191 | { |
| 1192 | kfree(task->thread.sme_state); |
| 1193 | task->thread.sme_state = NULL; |
| 1194 | } |
| 1195 | |
| 1196 | void cpu_enable_sme(const struct arm64_cpu_capabilities *__always_unused p) |
| 1197 | { |
| 1198 | /* Set priority for all PEs to architecturally defined minimum */ |
| 1199 | write_sysreg_s(read_sysreg_s(SYS_SMPRI_EL1) & ~SMPRI_EL1_PRIORITY_MASK, |
| 1200 | SYS_SMPRI_EL1); |
| 1201 | |
| 1202 | /* Allow SME in kernel */ |
| 1203 | write_sysreg(read_sysreg(CPACR_EL1) | CPACR_EL1_SMEN_EL1EN, CPACR_EL1); |
| 1204 | isb(); |
| 1205 | |
| 1206 | /* Ensure all bits in SMCR are set to known values */ |
| 1207 | write_sysreg_s(0, SYS_SMCR_EL1); |
| 1208 | |
| 1209 | /* Allow EL0 to access TPIDR2 */ |
| 1210 | write_sysreg(read_sysreg(SCTLR_EL1) | SCTLR_ELx_ENTP2, SCTLR_EL1); |
| 1211 | isb(); |
| 1212 | } |
| 1213 | |
| 1214 | void cpu_enable_sme2(const struct arm64_cpu_capabilities *__always_unused p) |
| 1215 | { |
| 1216 | /* This must be enabled after SME */ |
| 1217 | BUILD_BUG_ON(ARM64_SME2 <= ARM64_SME); |
| 1218 | |
| 1219 | /* Allow use of ZT0 */ |
| 1220 | write_sysreg_s(read_sysreg_s(SYS_SMCR_EL1) | SMCR_ELx_EZT0_MASK, |
| 1221 | SYS_SMCR_EL1); |
| 1222 | } |
| 1223 | |
| 1224 | void cpu_enable_fa64(const struct arm64_cpu_capabilities *__always_unused p) |
| 1225 | { |
| 1226 | /* This must be enabled after SME */ |
| 1227 | BUILD_BUG_ON(ARM64_SME_FA64 <= ARM64_SME); |
| 1228 | |
| 1229 | /* Allow use of FA64 */ |
| 1230 | write_sysreg_s(read_sysreg_s(SYS_SMCR_EL1) | SMCR_ELx_FA64_MASK, |
| 1231 | SYS_SMCR_EL1); |
| 1232 | } |
| 1233 | |
| 1234 | void __init sme_setup(void) |
| 1235 | { |
| 1236 | struct vl_info *info = &vl_info[ARM64_VEC_SME]; |
| 1237 | int min_bit, max_bit; |
| 1238 | |
| 1239 | if (!system_supports_sme()) |
| 1240 | return; |
| 1241 | |
| 1242 | min_bit = find_last_bit(info->vq_map, SVE_VQ_MAX); |
| 1243 | |
| 1244 | /* |
| 1245 | * SME doesn't require any particular vector length be |
| 1246 | * supported but it does require at least one. We should have |
| 1247 | * disabled the feature entirely while bringing up CPUs but |
| 1248 | * let's double check here. The bitmap is SVE_VQ_MAP sized for |
| 1249 | * sharing with SVE. |
| 1250 | */ |
| 1251 | WARN_ON(min_bit >= SVE_VQ_MAX); |
| 1252 | |
| 1253 | info->min_vl = sve_vl_from_vq(__bit_to_vq(min_bit)); |
| 1254 | |
| 1255 | max_bit = find_first_bit(info->vq_map, SVE_VQ_MAX); |
| 1256 | info->max_vl = sve_vl_from_vq(__bit_to_vq(max_bit)); |
| 1257 | |
| 1258 | WARN_ON(info->min_vl > info->max_vl); |
| 1259 | |
| 1260 | /* |
| 1261 | * For the default VL, pick the maximum supported value <= 32 |
| 1262 | * (256 bits) if there is one since this is guaranteed not to |
| 1263 | * grow the signal frame when in streaming mode, otherwise the |
| 1264 | * minimum available VL will be used. |
| 1265 | */ |
| 1266 | set_sme_default_vl(find_supported_vector_length(ARM64_VEC_SME, 32)); |
| 1267 | |
| 1268 | pr_info("SME: minimum available vector length %u bytes per vector\n" , |
| 1269 | info->min_vl); |
| 1270 | pr_info("SME: maximum available vector length %u bytes per vector\n" , |
| 1271 | info->max_vl); |
| 1272 | pr_info("SME: default vector length %u bytes per vector\n" , |
| 1273 | get_sme_default_vl()); |
| 1274 | } |
| 1275 | |
| 1276 | void sme_suspend_exit(void) |
| 1277 | { |
| 1278 | u64 smcr = 0; |
| 1279 | |
| 1280 | if (!system_supports_sme()) |
| 1281 | return; |
| 1282 | |
| 1283 | if (system_supports_fa64()) |
| 1284 | smcr |= SMCR_ELx_FA64; |
| 1285 | if (system_supports_sme2()) |
| 1286 | smcr |= SMCR_ELx_EZT0; |
| 1287 | |
| 1288 | write_sysreg_s(smcr, SYS_SMCR_EL1); |
| 1289 | write_sysreg_s(0, SYS_SMPRI_EL1); |
| 1290 | } |
| 1291 | |
| 1292 | #endif /* CONFIG_ARM64_SME */ |
| 1293 | |
| 1294 | static void sve_init_regs(void) |
| 1295 | { |
| 1296 | /* |
| 1297 | * Convert the FPSIMD state to SVE, zeroing all the state that |
| 1298 | * is not shared with FPSIMD. If (as is likely) the current |
| 1299 | * state is live in the registers then do this there and |
| 1300 | * update our metadata for the current task including |
| 1301 | * disabling the trap, otherwise update our in-memory copy. |
| 1302 | * We are guaranteed to not be in streaming mode, we can only |
| 1303 | * take a SVE trap when not in streaming mode and we can't be |
| 1304 | * in streaming mode when taking a SME trap. |
| 1305 | */ |
| 1306 | if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) { |
| 1307 | unsigned long vq_minus_one = |
| 1308 | sve_vq_from_vl(task_get_sve_vl(current)) - 1; |
| 1309 | sve_set_vq(vq_minus_one); |
| 1310 | sve_flush_live(true, vq_minus_one); |
| 1311 | fpsimd_bind_task_to_cpu(); |
| 1312 | } else { |
| 1313 | fpsimd_to_sve(current); |
| 1314 | current->thread.fp_type = FP_STATE_SVE; |
| 1315 | fpsimd_flush_task_state(current); |
| 1316 | } |
| 1317 | } |
| 1318 | |
| 1319 | /* |
| 1320 | * Trapped SVE access |
| 1321 | * |
| 1322 | * Storage is allocated for the full SVE state, the current FPSIMD |
| 1323 | * register contents are migrated across, and the access trap is |
| 1324 | * disabled. |
| 1325 | * |
| 1326 | * TIF_SVE should be clear on entry: otherwise, fpsimd_restore_current_state() |
| 1327 | * would have disabled the SVE access trap for userspace during |
| 1328 | * ret_to_user, making an SVE access trap impossible in that case. |
| 1329 | */ |
| 1330 | void do_sve_acc(unsigned long esr, struct pt_regs *regs) |
| 1331 | { |
| 1332 | /* Even if we chose not to use SVE, the hardware could still trap: */ |
| 1333 | if (unlikely(!system_supports_sve()) || WARN_ON(is_compat_task())) { |
| 1334 | force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); |
| 1335 | return; |
| 1336 | } |
| 1337 | |
| 1338 | sve_alloc(current, true); |
| 1339 | if (!current->thread.sve_state) { |
| 1340 | force_sig(SIGKILL); |
| 1341 | return; |
| 1342 | } |
| 1343 | |
| 1344 | get_cpu_fpsimd_context(); |
| 1345 | |
| 1346 | if (test_and_set_thread_flag(TIF_SVE)) |
| 1347 | WARN_ON(1); /* SVE access shouldn't have trapped */ |
| 1348 | |
| 1349 | /* |
| 1350 | * Even if the task can have used streaming mode we can only |
| 1351 | * generate SVE access traps in normal SVE mode and |
| 1352 | * transitioning out of streaming mode may discard any |
| 1353 | * streaming mode state. Always clear the high bits to avoid |
| 1354 | * any potential errors tracking what is properly initialised. |
| 1355 | */ |
| 1356 | sve_init_regs(); |
| 1357 | |
| 1358 | put_cpu_fpsimd_context(); |
| 1359 | } |
| 1360 | |
| 1361 | /* |
| 1362 | * Trapped SME access |
| 1363 | * |
| 1364 | * Storage is allocated for the full SVE and SME state, the current |
| 1365 | * FPSIMD register contents are migrated to SVE if SVE is not already |
| 1366 | * active, and the access trap is disabled. |
| 1367 | * |
| 1368 | * TIF_SME should be clear on entry: otherwise, fpsimd_restore_current_state() |
| 1369 | * would have disabled the SME access trap for userspace during |
| 1370 | * ret_to_user, making an SME access trap impossible in that case. |
| 1371 | */ |
| 1372 | void do_sme_acc(unsigned long esr, struct pt_regs *regs) |
| 1373 | { |
| 1374 | /* Even if we chose not to use SME, the hardware could still trap: */ |
| 1375 | if (unlikely(!system_supports_sme()) || WARN_ON(is_compat_task())) { |
| 1376 | force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); |
| 1377 | return; |
| 1378 | } |
| 1379 | |
| 1380 | /* |
| 1381 | * If this not a trap due to SME being disabled then something |
| 1382 | * is being used in the wrong mode, report as SIGILL. |
| 1383 | */ |
| 1384 | if (ESR_ELx_SME_ISS_SMTC(esr) != ESR_ELx_SME_ISS_SMTC_SME_DISABLED) { |
| 1385 | force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); |
| 1386 | return; |
| 1387 | } |
| 1388 | |
| 1389 | sve_alloc(current, false); |
| 1390 | sme_alloc(current, true); |
| 1391 | if (!current->thread.sve_state || !current->thread.sme_state) { |
| 1392 | force_sig(SIGKILL); |
| 1393 | return; |
| 1394 | } |
| 1395 | |
| 1396 | get_cpu_fpsimd_context(); |
| 1397 | |
| 1398 | /* With TIF_SME userspace shouldn't generate any traps */ |
| 1399 | if (test_and_set_thread_flag(TIF_SME)) |
| 1400 | WARN_ON(1); |
| 1401 | |
| 1402 | if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) { |
| 1403 | unsigned long vq_minus_one = |
| 1404 | sve_vq_from_vl(task_get_sme_vl(current)) - 1; |
| 1405 | sme_set_vq(vq_minus_one); |
| 1406 | |
| 1407 | fpsimd_bind_task_to_cpu(); |
| 1408 | } else { |
| 1409 | fpsimd_flush_task_state(current); |
| 1410 | } |
| 1411 | |
| 1412 | put_cpu_fpsimd_context(); |
| 1413 | } |
| 1414 | |
| 1415 | /* |
| 1416 | * Trapped FP/ASIMD access. |
| 1417 | */ |
| 1418 | void do_fpsimd_acc(unsigned long esr, struct pt_regs *regs) |
| 1419 | { |
| 1420 | /* Even if we chose not to use FPSIMD, the hardware could still trap: */ |
| 1421 | if (!system_supports_fpsimd()) { |
| 1422 | force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); |
| 1423 | return; |
| 1424 | } |
| 1425 | |
| 1426 | /* |
| 1427 | * When FPSIMD is enabled, we should never take a trap unless something |
| 1428 | * has gone very wrong. |
| 1429 | */ |
| 1430 | BUG(); |
| 1431 | } |
| 1432 | |
| 1433 | /* |
| 1434 | * Raise a SIGFPE for the current process. |
| 1435 | */ |
| 1436 | void do_fpsimd_exc(unsigned long esr, struct pt_regs *regs) |
| 1437 | { |
| 1438 | unsigned int si_code = FPE_FLTUNK; |
| 1439 | |
| 1440 | if (esr & ESR_ELx_FP_EXC_TFV) { |
| 1441 | if (esr & FPEXC_IOF) |
| 1442 | si_code = FPE_FLTINV; |
| 1443 | else if (esr & FPEXC_DZF) |
| 1444 | si_code = FPE_FLTDIV; |
| 1445 | else if (esr & FPEXC_OFF) |
| 1446 | si_code = FPE_FLTOVF; |
| 1447 | else if (esr & FPEXC_UFF) |
| 1448 | si_code = FPE_FLTUND; |
| 1449 | else if (esr & FPEXC_IXF) |
| 1450 | si_code = FPE_FLTRES; |
| 1451 | } |
| 1452 | |
| 1453 | send_sig_fault(SIGFPE, code: si_code, |
| 1454 | addr: (void __user *)instruction_pointer(regs), |
| 1455 | current); |
| 1456 | } |
| 1457 | |
| 1458 | static void fpsimd_load_kernel_state(struct task_struct *task) |
| 1459 | { |
| 1460 | struct cpu_fp_state *last = this_cpu_ptr(&fpsimd_last_state); |
| 1461 | |
| 1462 | /* |
| 1463 | * Elide the load if this CPU holds the most recent kernel mode |
| 1464 | * FPSIMD context of the current task. |
| 1465 | */ |
| 1466 | if (last->st == task->thread.kernel_fpsimd_state && |
| 1467 | task->thread.kernel_fpsimd_cpu == smp_processor_id()) |
| 1468 | return; |
| 1469 | |
| 1470 | fpsimd_load_state(task->thread.kernel_fpsimd_state); |
| 1471 | } |
| 1472 | |
| 1473 | static void fpsimd_save_kernel_state(struct task_struct *task) |
| 1474 | { |
| 1475 | struct cpu_fp_state cpu_fp_state = { |
| 1476 | .st = task->thread.kernel_fpsimd_state, |
| 1477 | .to_save = FP_STATE_FPSIMD, |
| 1478 | }; |
| 1479 | |
| 1480 | BUG_ON(!cpu_fp_state.st); |
| 1481 | |
| 1482 | fpsimd_save_state(task->thread.kernel_fpsimd_state); |
| 1483 | fpsimd_bind_state_to_cpu(&cpu_fp_state); |
| 1484 | |
| 1485 | task->thread.kernel_fpsimd_cpu = smp_processor_id(); |
| 1486 | } |
| 1487 | |
| 1488 | /* |
| 1489 | * Invalidate any task's FPSIMD state that is present on this cpu. |
| 1490 | * The FPSIMD context should be acquired with get_cpu_fpsimd_context() |
| 1491 | * before calling this function. |
| 1492 | */ |
| 1493 | static void fpsimd_flush_cpu_state(void) |
| 1494 | { |
| 1495 | WARN_ON(!system_supports_fpsimd()); |
| 1496 | __this_cpu_write(fpsimd_last_state.st, NULL); |
| 1497 | |
| 1498 | /* |
| 1499 | * Leaving streaming mode enabled will cause issues for any kernel |
| 1500 | * NEON and leaving streaming mode or ZA enabled may increase power |
| 1501 | * consumption. |
| 1502 | */ |
| 1503 | if (system_supports_sme()) |
| 1504 | sme_smstop(); |
| 1505 | |
| 1506 | set_thread_flag(TIF_FOREIGN_FPSTATE); |
| 1507 | } |
| 1508 | |
| 1509 | void fpsimd_thread_switch(struct task_struct *next) |
| 1510 | { |
| 1511 | bool wrong_task, wrong_cpu; |
| 1512 | |
| 1513 | if (!system_supports_fpsimd()) |
| 1514 | return; |
| 1515 | |
| 1516 | WARN_ON_ONCE(!irqs_disabled()); |
| 1517 | |
| 1518 | /* Save unsaved fpsimd state, if any: */ |
| 1519 | if (test_thread_flag(TIF_KERNEL_FPSTATE)) |
| 1520 | fpsimd_save_kernel_state(current); |
| 1521 | else |
| 1522 | fpsimd_save_user_state(); |
| 1523 | |
| 1524 | if (test_tsk_thread_flag(next, TIF_KERNEL_FPSTATE)) { |
| 1525 | fpsimd_flush_cpu_state(); |
| 1526 | fpsimd_load_kernel_state(task: next); |
| 1527 | } else { |
| 1528 | /* |
| 1529 | * Fix up TIF_FOREIGN_FPSTATE to correctly describe next's |
| 1530 | * state. For kernel threads, FPSIMD registers are never |
| 1531 | * loaded with user mode FPSIMD state and so wrong_task and |
| 1532 | * wrong_cpu will always be true. |
| 1533 | */ |
| 1534 | wrong_task = __this_cpu_read(fpsimd_last_state.st) != |
| 1535 | &next->thread.uw.fpsimd_state; |
| 1536 | wrong_cpu = next->thread.fpsimd_cpu != smp_processor_id(); |
| 1537 | |
| 1538 | update_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE, |
| 1539 | wrong_task || wrong_cpu); |
| 1540 | } |
| 1541 | } |
| 1542 | |
| 1543 | static void fpsimd_flush_thread_vl(enum vec_type type) |
| 1544 | { |
| 1545 | int vl, supported_vl; |
| 1546 | |
| 1547 | /* |
| 1548 | * Reset the task vector length as required. This is where we |
| 1549 | * ensure that all user tasks have a valid vector length |
| 1550 | * configured: no kernel task can become a user task without |
| 1551 | * an exec and hence a call to this function. By the time the |
| 1552 | * first call to this function is made, all early hardware |
| 1553 | * probing is complete, so __sve_default_vl should be valid. |
| 1554 | * If a bug causes this to go wrong, we make some noise and |
| 1555 | * try to fudge thread.sve_vl to a safe value here. |
| 1556 | */ |
| 1557 | vl = task_get_vl_onexec(current, type: type); |
| 1558 | if (!vl) |
| 1559 | vl = get_default_vl(type: type); |
| 1560 | |
| 1561 | if (WARN_ON(!sve_vl_valid(vl))) |
| 1562 | vl = vl_info[type].min_vl; |
| 1563 | |
| 1564 | supported_vl = find_supported_vector_length(type: type, vl); |
| 1565 | if (WARN_ON(supported_vl != vl)) |
| 1566 | vl = supported_vl; |
| 1567 | |
| 1568 | task_set_vl(current, type: type, vl); |
| 1569 | |
| 1570 | /* |
| 1571 | * If the task is not set to inherit, ensure that the vector |
| 1572 | * length will be reset by a subsequent exec: |
| 1573 | */ |
| 1574 | if (!test_thread_flag(vec_vl_inherit_flag(type))) |
| 1575 | task_set_vl_onexec(current, type: type, vl: 0); |
| 1576 | } |
| 1577 | |
| 1578 | void fpsimd_flush_thread(void) |
| 1579 | { |
| 1580 | void *sve_state = NULL; |
| 1581 | void *sme_state = NULL; |
| 1582 | |
| 1583 | if (!system_supports_fpsimd()) |
| 1584 | return; |
| 1585 | |
| 1586 | get_cpu_fpsimd_context(); |
| 1587 | |
| 1588 | fpsimd_flush_task_state(current); |
| 1589 | memset(¤t->thread.uw.fpsimd_state, 0, |
| 1590 | sizeof(current->thread.uw.fpsimd_state)); |
| 1591 | |
| 1592 | if (system_supports_sve()) { |
| 1593 | clear_thread_flag(TIF_SVE); |
| 1594 | |
| 1595 | /* Defer kfree() while in atomic context */ |
| 1596 | sve_state = current->thread.sve_state; |
| 1597 | current->thread.sve_state = NULL; |
| 1598 | |
| 1599 | fpsimd_flush_thread_vl(ARM64_VEC_SVE); |
| 1600 | } |
| 1601 | |
| 1602 | if (system_supports_sme()) { |
| 1603 | clear_thread_flag(TIF_SME); |
| 1604 | |
| 1605 | /* Defer kfree() while in atomic context */ |
| 1606 | sme_state = current->thread.sme_state; |
| 1607 | current->thread.sme_state = NULL; |
| 1608 | |
| 1609 | fpsimd_flush_thread_vl(ARM64_VEC_SME); |
| 1610 | current->thread.svcr = 0; |
| 1611 | } |
| 1612 | |
| 1613 | if (system_supports_fpmr()) |
| 1614 | current->thread.uw.fpmr = 0; |
| 1615 | |
| 1616 | current->thread.fp_type = FP_STATE_FPSIMD; |
| 1617 | |
| 1618 | put_cpu_fpsimd_context(); |
| 1619 | kfree(objp: sve_state); |
| 1620 | kfree(objp: sme_state); |
| 1621 | } |
| 1622 | |
| 1623 | /* |
| 1624 | * Save the userland FPSIMD state of 'current' to memory, but only if the state |
| 1625 | * currently held in the registers does in fact belong to 'current' |
| 1626 | */ |
| 1627 | void fpsimd_preserve_current_state(void) |
| 1628 | { |
| 1629 | if (!system_supports_fpsimd()) |
| 1630 | return; |
| 1631 | |
| 1632 | get_cpu_fpsimd_context(); |
| 1633 | fpsimd_save_user_state(); |
| 1634 | put_cpu_fpsimd_context(); |
| 1635 | } |
| 1636 | |
| 1637 | /* |
| 1638 | * Associate current's FPSIMD context with this cpu |
| 1639 | * The caller must have ownership of the cpu FPSIMD context before calling |
| 1640 | * this function. |
| 1641 | */ |
| 1642 | static void fpsimd_bind_task_to_cpu(void) |
| 1643 | { |
| 1644 | struct cpu_fp_state *last = this_cpu_ptr(&fpsimd_last_state); |
| 1645 | |
| 1646 | WARN_ON(!system_supports_fpsimd()); |
| 1647 | last->st = ¤t->thread.uw.fpsimd_state; |
| 1648 | last->sve_state = current->thread.sve_state; |
| 1649 | last->sme_state = current->thread.sme_state; |
| 1650 | last->sve_vl = task_get_sve_vl(current); |
| 1651 | last->sme_vl = task_get_sme_vl(current); |
| 1652 | last->svcr = ¤t->thread.svcr; |
| 1653 | last->fpmr = ¤t->thread.uw.fpmr; |
| 1654 | last->fp_type = ¤t->thread.fp_type; |
| 1655 | last->to_save = FP_STATE_CURRENT; |
| 1656 | current->thread.fpsimd_cpu = smp_processor_id(); |
| 1657 | |
| 1658 | /* |
| 1659 | * Toggle SVE and SME trapping for userspace if needed, these |
| 1660 | * are serialsied by ret_to_user(). |
| 1661 | */ |
| 1662 | if (system_supports_sme()) { |
| 1663 | if (test_thread_flag(TIF_SME)) |
| 1664 | sme_user_enable(); |
| 1665 | else |
| 1666 | sme_user_disable(); |
| 1667 | } |
| 1668 | |
| 1669 | if (system_supports_sve()) { |
| 1670 | if (test_thread_flag(TIF_SVE)) |
| 1671 | sve_user_enable(); |
| 1672 | else |
| 1673 | sve_user_disable(); |
| 1674 | } |
| 1675 | } |
| 1676 | |
| 1677 | void fpsimd_bind_state_to_cpu(struct cpu_fp_state *state) |
| 1678 | { |
| 1679 | struct cpu_fp_state *last = this_cpu_ptr(&fpsimd_last_state); |
| 1680 | |
| 1681 | WARN_ON(!system_supports_fpsimd()); |
| 1682 | WARN_ON(!in_softirq() && !irqs_disabled()); |
| 1683 | |
| 1684 | *last = *state; |
| 1685 | } |
| 1686 | |
| 1687 | /* |
| 1688 | * Load the userland FPSIMD state of 'current' from memory, but only if the |
| 1689 | * FPSIMD state already held in the registers is /not/ the most recent FPSIMD |
| 1690 | * state of 'current'. This is called when we are preparing to return to |
| 1691 | * userspace to ensure that userspace sees a good register state. |
| 1692 | */ |
| 1693 | void fpsimd_restore_current_state(void) |
| 1694 | { |
| 1695 | /* |
| 1696 | * TIF_FOREIGN_FPSTATE is set on the init task and copied by |
| 1697 | * arch_dup_task_struct() regardless of whether FP/SIMD is detected. |
| 1698 | * Thus user threads can have this set even when FP/SIMD hasn't been |
| 1699 | * detected. |
| 1700 | * |
| 1701 | * When FP/SIMD is detected, begin_new_exec() will set |
| 1702 | * TIF_FOREIGN_FPSTATE via flush_thread() -> fpsimd_flush_thread(), |
| 1703 | * and fpsimd_thread_switch() will set TIF_FOREIGN_FPSTATE when |
| 1704 | * switching tasks. We detect FP/SIMD before we exec the first user |
| 1705 | * process, ensuring this has TIF_FOREIGN_FPSTATE set and |
| 1706 | * do_notify_resume() will call fpsimd_restore_current_state() to |
| 1707 | * install the user FP/SIMD context. |
| 1708 | * |
| 1709 | * When FP/SIMD is not detected, nothing else will clear or set |
| 1710 | * TIF_FOREIGN_FPSTATE prior to the first return to userspace, and |
| 1711 | * we must clear TIF_FOREIGN_FPSTATE to avoid do_notify_resume() |
| 1712 | * looping forever calling fpsimd_restore_current_state(). |
| 1713 | */ |
| 1714 | if (!system_supports_fpsimd()) { |
| 1715 | clear_thread_flag(TIF_FOREIGN_FPSTATE); |
| 1716 | return; |
| 1717 | } |
| 1718 | |
| 1719 | get_cpu_fpsimd_context(); |
| 1720 | |
| 1721 | if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) { |
| 1722 | task_fpsimd_load(); |
| 1723 | fpsimd_bind_task_to_cpu(); |
| 1724 | } |
| 1725 | |
| 1726 | put_cpu_fpsimd_context(); |
| 1727 | } |
| 1728 | |
| 1729 | void fpsimd_update_current_state(struct user_fpsimd_state const *state) |
| 1730 | { |
| 1731 | if (WARN_ON(!system_supports_fpsimd())) |
| 1732 | return; |
| 1733 | |
| 1734 | current->thread.uw.fpsimd_state = *state; |
| 1735 | if (current->thread.fp_type == FP_STATE_SVE) |
| 1736 | fpsimd_to_sve(current); |
| 1737 | } |
| 1738 | |
| 1739 | /* |
| 1740 | * Invalidate live CPU copies of task t's FPSIMD state |
| 1741 | * |
| 1742 | * This function may be called with preemption enabled. The barrier() |
| 1743 | * ensures that the assignment to fpsimd_cpu is visible to any |
| 1744 | * preemption/softirq that could race with set_tsk_thread_flag(), so |
| 1745 | * that TIF_FOREIGN_FPSTATE cannot be spuriously re-cleared. |
| 1746 | * |
| 1747 | * The final barrier ensures that TIF_FOREIGN_FPSTATE is seen set by any |
| 1748 | * subsequent code. |
| 1749 | */ |
| 1750 | void fpsimd_flush_task_state(struct task_struct *t) |
| 1751 | { |
| 1752 | t->thread.fpsimd_cpu = NR_CPUS; |
| 1753 | t->thread.kernel_fpsimd_state = NULL; |
| 1754 | /* |
| 1755 | * If we don't support fpsimd, bail out after we have |
| 1756 | * reset the fpsimd_cpu for this task and clear the |
| 1757 | * FPSTATE. |
| 1758 | */ |
| 1759 | if (!system_supports_fpsimd()) |
| 1760 | return; |
| 1761 | barrier(); |
| 1762 | set_tsk_thread_flag(t, TIF_FOREIGN_FPSTATE); |
| 1763 | |
| 1764 | barrier(); |
| 1765 | } |
| 1766 | |
| 1767 | void fpsimd_save_and_flush_current_state(void) |
| 1768 | { |
| 1769 | if (!system_supports_fpsimd()) |
| 1770 | return; |
| 1771 | |
| 1772 | get_cpu_fpsimd_context(); |
| 1773 | fpsimd_save_user_state(); |
| 1774 | fpsimd_flush_task_state(current); |
| 1775 | put_cpu_fpsimd_context(); |
| 1776 | } |
| 1777 | |
| 1778 | /* |
| 1779 | * Save the FPSIMD state to memory and invalidate cpu view. |
| 1780 | * This function must be called with preemption disabled. |
| 1781 | */ |
| 1782 | void fpsimd_save_and_flush_cpu_state(void) |
| 1783 | { |
| 1784 | unsigned long flags; |
| 1785 | |
| 1786 | if (!system_supports_fpsimd()) |
| 1787 | return; |
| 1788 | WARN_ON(preemptible()); |
| 1789 | local_irq_save(flags); |
| 1790 | fpsimd_save_user_state(); |
| 1791 | fpsimd_flush_cpu_state(); |
| 1792 | local_irq_restore(flags); |
| 1793 | } |
| 1794 | |
| 1795 | #ifdef CONFIG_KERNEL_MODE_NEON |
| 1796 | |
| 1797 | /* |
| 1798 | * Kernel-side NEON support functions |
| 1799 | */ |
| 1800 | |
| 1801 | /* |
| 1802 | * kernel_neon_begin(): obtain the CPU FPSIMD registers for use by the calling |
| 1803 | * context |
| 1804 | * |
| 1805 | * Must not be called unless may_use_simd() returns true. |
| 1806 | * Task context in the FPSIMD registers is saved back to memory as necessary. |
| 1807 | * |
| 1808 | * A matching call to kernel_neon_end() must be made before returning from the |
| 1809 | * calling context. |
| 1810 | * |
| 1811 | * The caller may freely use the FPSIMD registers until kernel_neon_end() is |
| 1812 | * called. |
| 1813 | * |
| 1814 | * Unless called from non-preemptible task context, @state must point to a |
| 1815 | * caller provided buffer that will be used to preserve the task's kernel mode |
| 1816 | * FPSIMD context when it is scheduled out, or if it is interrupted by kernel |
| 1817 | * mode FPSIMD occurring in softirq context. May be %NULL otherwise. |
| 1818 | */ |
| 1819 | void kernel_neon_begin(struct user_fpsimd_state *state) |
| 1820 | { |
| 1821 | if (WARN_ON(!system_supports_fpsimd())) |
| 1822 | return; |
| 1823 | |
| 1824 | WARN_ON((preemptible() || in_serving_softirq()) && !state); |
| 1825 | |
| 1826 | BUG_ON(!may_use_simd()); |
| 1827 | |
| 1828 | get_cpu_fpsimd_context(); |
| 1829 | |
| 1830 | /* Save unsaved fpsimd state, if any: */ |
| 1831 | if (test_thread_flag(TIF_KERNEL_FPSTATE)) { |
| 1832 | BUG_ON(IS_ENABLED(CONFIG_PREEMPT_RT) || !in_serving_softirq()); |
| 1833 | fpsimd_save_state(state); |
| 1834 | } else { |
| 1835 | fpsimd_save_user_state(); |
| 1836 | |
| 1837 | /* |
| 1838 | * Set the thread flag so that the kernel mode FPSIMD state |
| 1839 | * will be context switched along with the rest of the task |
| 1840 | * state. |
| 1841 | * |
| 1842 | * On non-PREEMPT_RT, softirqs may interrupt task level kernel |
| 1843 | * mode FPSIMD, but the task will not be preemptible so setting |
| 1844 | * TIF_KERNEL_FPSTATE for those would be both wrong (as it |
| 1845 | * would mark the task context FPSIMD state as requiring a |
| 1846 | * context switch) and unnecessary. |
| 1847 | * |
| 1848 | * On PREEMPT_RT, softirqs are serviced from a separate thread, |
| 1849 | * which is scheduled as usual, and this guarantees that these |
| 1850 | * softirqs are not interrupting use of the FPSIMD in kernel |
| 1851 | * mode in task context. So in this case, setting the flag here |
| 1852 | * is always appropriate. |
| 1853 | */ |
| 1854 | if (IS_ENABLED(CONFIG_PREEMPT_RT) || !in_serving_softirq()) { |
| 1855 | /* |
| 1856 | * Record the caller provided buffer as the kernel mode |
| 1857 | * FP/SIMD buffer for this task, so that the state can |
| 1858 | * be preserved and restored on a context switch. |
| 1859 | */ |
| 1860 | WARN_ON(current->thread.kernel_fpsimd_state != NULL); |
| 1861 | current->thread.kernel_fpsimd_state = state; |
| 1862 | set_thread_flag(TIF_KERNEL_FPSTATE); |
| 1863 | } |
| 1864 | } |
| 1865 | |
| 1866 | /* Invalidate any task state remaining in the fpsimd regs: */ |
| 1867 | fpsimd_flush_cpu_state(); |
| 1868 | |
| 1869 | put_cpu_fpsimd_context(); |
| 1870 | } |
| 1871 | EXPORT_SYMBOL_GPL(kernel_neon_begin); |
| 1872 | |
| 1873 | /* |
| 1874 | * kernel_neon_end(): give the CPU FPSIMD registers back to the current task |
| 1875 | * |
| 1876 | * Must be called from a context in which kernel_neon_begin() was previously |
| 1877 | * called, with no call to kernel_neon_end() in the meantime. |
| 1878 | * |
| 1879 | * The caller must not use the FPSIMD registers after this function is called, |
| 1880 | * unless kernel_neon_begin() is called again in the meantime. |
| 1881 | * |
| 1882 | * The value of @state must match the value passed to the preceding call to |
| 1883 | * kernel_neon_begin(). |
| 1884 | */ |
| 1885 | void kernel_neon_end(struct user_fpsimd_state *state) |
| 1886 | { |
| 1887 | if (!system_supports_fpsimd()) |
| 1888 | return; |
| 1889 | |
| 1890 | if (!test_thread_flag(TIF_KERNEL_FPSTATE)) |
| 1891 | return; |
| 1892 | |
| 1893 | /* |
| 1894 | * If we are returning from a nested use of kernel mode FPSIMD, restore |
| 1895 | * the task context kernel mode FPSIMD state. This can only happen when |
| 1896 | * running in softirq context on non-PREEMPT_RT. |
| 1897 | */ |
| 1898 | if (!IS_ENABLED(CONFIG_PREEMPT_RT) && in_serving_softirq()) { |
| 1899 | fpsimd_load_state(state); |
| 1900 | } else { |
| 1901 | clear_thread_flag(TIF_KERNEL_FPSTATE); |
| 1902 | WARN_ON(current->thread.kernel_fpsimd_state != state); |
| 1903 | current->thread.kernel_fpsimd_state = NULL; |
| 1904 | } |
| 1905 | } |
| 1906 | EXPORT_SYMBOL_GPL(kernel_neon_end); |
| 1907 | |
| 1908 | #ifdef CONFIG_EFI |
| 1909 | |
| 1910 | static struct user_fpsimd_state efi_fpsimd_state; |
| 1911 | |
| 1912 | /* |
| 1913 | * EFI runtime services support functions |
| 1914 | * |
| 1915 | * The ABI for EFI runtime services allows EFI to use FPSIMD during the call. |
| 1916 | * This means that for EFI (and only for EFI), we have to assume that FPSIMD |
| 1917 | * is always used rather than being an optional accelerator. |
| 1918 | * |
| 1919 | * These functions provide the necessary support for ensuring FPSIMD |
| 1920 | * save/restore in the contexts from which EFI is used. |
| 1921 | * |
| 1922 | * Do not use them for any other purpose -- if tempted to do so, you are |
| 1923 | * either doing something wrong or you need to propose some refactoring. |
| 1924 | */ |
| 1925 | |
| 1926 | /* |
| 1927 | * __efi_fpsimd_begin(): prepare FPSIMD for making an EFI runtime services call |
| 1928 | */ |
| 1929 | void __efi_fpsimd_begin(void) |
| 1930 | { |
| 1931 | if (!system_supports_fpsimd()) |
| 1932 | return; |
| 1933 | |
| 1934 | if (may_use_simd()) { |
| 1935 | kernel_neon_begin(&efi_fpsimd_state); |
| 1936 | } else { |
| 1937 | /* |
| 1938 | * We are running in hardirq or NMI context, and the only |
| 1939 | * legitimate case where this might happen is when EFI pstore |
| 1940 | * is attempting to record the system's dying gasps into EFI |
| 1941 | * variables. This could be due to an oops, a panic or a call |
| 1942 | * to emergency_restart(), and in none of those cases, we can |
| 1943 | * expect the current task to ever return to user space again, |
| 1944 | * or for the kernel to resume any normal execution, for that |
| 1945 | * matter (an oops in hardirq context triggers a panic too). |
| 1946 | * |
| 1947 | * Therefore, there is no point in attempting to preserve any |
| 1948 | * SVE/SME state here. On the off chance that we might have |
| 1949 | * ended up here for a different reason inadvertently, kill the |
| 1950 | * task and preserve/restore the base FP/SIMD state, which |
| 1951 | * might belong to kernel mode FP/SIMD. |
| 1952 | */ |
| 1953 | pr_warn_ratelimited("Calling EFI runtime from %s context\n" , |
| 1954 | in_nmi() ? "NMI" : "hardirq" ); |
| 1955 | force_signal_inject(SIGKILL, SI_KERNEL, 0, 0); |
| 1956 | fpsimd_save_state(&efi_fpsimd_state); |
| 1957 | } |
| 1958 | } |
| 1959 | |
| 1960 | /* |
| 1961 | * __efi_fpsimd_end(): clean up FPSIMD after an EFI runtime services call |
| 1962 | */ |
| 1963 | void __efi_fpsimd_end(void) |
| 1964 | { |
| 1965 | if (!system_supports_fpsimd()) |
| 1966 | return; |
| 1967 | |
| 1968 | if (may_use_simd()) { |
| 1969 | kernel_neon_end(&efi_fpsimd_state); |
| 1970 | } else { |
| 1971 | fpsimd_load_state(&efi_fpsimd_state); |
| 1972 | } |
| 1973 | } |
| 1974 | |
| 1975 | #endif /* CONFIG_EFI */ |
| 1976 | |
| 1977 | #endif /* CONFIG_KERNEL_MODE_NEON */ |
| 1978 | |
| 1979 | #ifdef CONFIG_CPU_PM |
| 1980 | static int fpsimd_cpu_pm_notifier(struct notifier_block *self, |
| 1981 | unsigned long cmd, void *v) |
| 1982 | { |
| 1983 | switch (cmd) { |
| 1984 | case CPU_PM_ENTER: |
| 1985 | fpsimd_save_and_flush_cpu_state(); |
| 1986 | break; |
| 1987 | case CPU_PM_EXIT: |
| 1988 | break; |
| 1989 | case CPU_PM_ENTER_FAILED: |
| 1990 | default: |
| 1991 | return NOTIFY_DONE; |
| 1992 | } |
| 1993 | return NOTIFY_OK; |
| 1994 | } |
| 1995 | |
| 1996 | static struct notifier_block fpsimd_cpu_pm_notifier_block = { |
| 1997 | .notifier_call = fpsimd_cpu_pm_notifier, |
| 1998 | }; |
| 1999 | |
| 2000 | static void __init fpsimd_pm_init(void) |
| 2001 | { |
| 2002 | cpu_pm_register_notifier(&fpsimd_cpu_pm_notifier_block); |
| 2003 | } |
| 2004 | |
| 2005 | #else |
| 2006 | static inline void fpsimd_pm_init(void) { } |
| 2007 | #endif /* CONFIG_CPU_PM */ |
| 2008 | |
| 2009 | #ifdef CONFIG_HOTPLUG_CPU |
| 2010 | static int fpsimd_cpu_dead(unsigned int cpu) |
| 2011 | { |
| 2012 | per_cpu(fpsimd_last_state.st, cpu) = NULL; |
| 2013 | return 0; |
| 2014 | } |
| 2015 | |
| 2016 | static inline void fpsimd_hotplug_init(void) |
| 2017 | { |
| 2018 | cpuhp_setup_state_nocalls(state: CPUHP_ARM64_FPSIMD_DEAD, name: "arm64/fpsimd:dead" , |
| 2019 | NULL, teardown: fpsimd_cpu_dead); |
| 2020 | } |
| 2021 | |
| 2022 | #else |
| 2023 | static inline void fpsimd_hotplug_init(void) { } |
| 2024 | #endif |
| 2025 | |
| 2026 | void cpu_enable_fpsimd(const struct arm64_cpu_capabilities *__always_unused p) |
| 2027 | { |
| 2028 | unsigned long enable = CPACR_EL1_FPEN_EL1EN | CPACR_EL1_FPEN_EL0EN; |
| 2029 | write_sysreg(read_sysreg(CPACR_EL1) | enable, CPACR_EL1); |
| 2030 | isb(); |
| 2031 | } |
| 2032 | |
| 2033 | /* |
| 2034 | * FP/SIMD support code initialisation. |
| 2035 | */ |
| 2036 | static int __init fpsimd_init(void) |
| 2037 | { |
| 2038 | if (cpu_have_named_feature(FP)) { |
| 2039 | fpsimd_pm_init(); |
| 2040 | fpsimd_hotplug_init(); |
| 2041 | } else { |
| 2042 | pr_notice("Floating-point is not implemented\n" ); |
| 2043 | } |
| 2044 | |
| 2045 | if (!cpu_have_named_feature(ASIMD)) |
| 2046 | pr_notice("Advanced SIMD is not implemented\n" ); |
| 2047 | |
| 2048 | |
| 2049 | sve_sysctl_init(); |
| 2050 | sme_sysctl_init(); |
| 2051 | |
| 2052 | return 0; |
| 2053 | } |
| 2054 | core_initcall(fpsimd_init); |
| 2055 | |