1 // SPDX-License-Identifier: GPL-2.0-only
3 * Handle detection, reporting and mitigation of Spectre v1, v2, v3a and v4, as
6 * https://developer.arm.com/support/arm-security-updates/speculative-processor-vulnerability
8 * This code was originally written hastily under an awful lot of stress and so
9 * aspects of it are somewhat hacky. Unfortunately, changing anything in here
10 * instantly makes me feel ill. Thanks, Jann. Thann.
12 * Copyright (C) 2018 ARM Ltd, All Rights Reserved.
13 * Copyright (C) 2020 Google LLC
15 * "If there's something strange in your neighbourhood, who you gonna call?"
17 * Authors: Will Deacon <will@kernel.org> and Marc Zyngier <maz@kernel.org>
20 #include <linux/arm-smccc.h>
21 #include <linux/cpu.h>
22 #include <linux/device.h>
23 #include <linux/nospec.h>
24 #include <linux/prctl.h>
25 #include <linux/sched/task_stack.h>
27 #include <asm/debug-monitors.h>
29 #include <asm/spectre.h>
30 #include <asm/traps.h>
31 #include <asm/vectors.h>
35 * We try to ensure that the mitigation state can never change as the result of
36 * onlining a late CPU.
38 static void update_mitigation_state(enum mitigation_state *oldp,
39 enum mitigation_state new)
41 enum mitigation_state state;
44 state = READ_ONCE(*oldp);
48 /* Userspace almost certainly can't deal with this. */
49 if (WARN_ON(system_capabilities_finalized()))
51 } while (cmpxchg_relaxed(oldp, state, new) != state);
57 * The kernel can't protect userspace for this one: it's each person for
58 * themselves. Advertise what we're doing and be done with it.
60 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
63 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
69 * This one sucks. A CPU is either:
71 * - Mitigated in hardware and advertised by ID_AA64PFR0_EL1.CSV2.
72 * - Mitigated in hardware and listed in our "safe list".
73 * - Mitigated in software by firmware.
74 * - Mitigated in software by a CPU-specific dance in the kernel and a
75 * firmware call at EL2.
78 * It's not unlikely for different CPUs in a big.LITTLE system to fall into
81 static enum mitigation_state spectre_v2_state;
83 static bool __read_mostly __nospectre_v2;
84 static int __init parse_spectre_v2_param(char *str)
86 __nospectre_v2 = true;
89 early_param("nospectre_v2", parse_spectre_v2_param);
91 static bool spectre_v2_mitigations_off(void)
93 bool ret = __nospectre_v2 || cpu_mitigations_off();
96 pr_info_once("spectre-v2 mitigation disabled by command line option\n");
101 static const char *get_bhb_affected_string(enum mitigation_state bhb_state)
104 case SPECTRE_UNAFFECTED:
107 case SPECTRE_VULNERABLE:
108 return ", but not BHB";
109 case SPECTRE_MITIGATED:
114 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
117 enum mitigation_state bhb_state = arm64_get_spectre_bhb_state();
118 const char *bhb_str = get_bhb_affected_string(bhb_state);
119 const char *v2_str = "Branch predictor hardening";
121 switch (spectre_v2_state) {
122 case SPECTRE_UNAFFECTED:
123 if (bhb_state == SPECTRE_UNAFFECTED)
124 return sprintf(buf, "Not affected\n");
127 * Platforms affected by Spectre-BHB can't report
128 * "Not affected" for Spectre-v2.
132 case SPECTRE_MITIGATED:
133 return sprintf(buf, "Mitigation: %s%s\n", v2_str, bhb_str);
134 case SPECTRE_VULNERABLE:
137 return sprintf(buf, "Vulnerable\n");
141 static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void)
144 static const struct midr_range spectre_v2_safe_list[] = {
145 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
146 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
147 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
148 MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
149 MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
150 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_SILVER),
151 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
152 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
156 /* If the CPU has CSV2 set, we're safe */
157 pfr0 = read_cpuid(ID_AA64PFR0_EL1);
158 if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
159 return SPECTRE_UNAFFECTED;
161 /* Alternatively, we have a list of unaffected CPUs */
162 if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
163 return SPECTRE_UNAFFECTED;
165 return SPECTRE_VULNERABLE;
168 static enum mitigation_state spectre_v2_get_cpu_fw_mitigation_state(void)
171 struct arm_smccc_res res;
173 arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
174 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
178 case SMCCC_RET_SUCCESS:
179 return SPECTRE_MITIGATED;
180 case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
181 return SPECTRE_UNAFFECTED;
184 case SMCCC_RET_NOT_SUPPORTED:
185 return SPECTRE_VULNERABLE;
189 bool has_spectre_v2(const struct arm64_cpu_capabilities *entry, int scope)
191 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
193 if (spectre_v2_get_cpu_hw_mitigation_state() == SPECTRE_UNAFFECTED)
196 if (spectre_v2_get_cpu_fw_mitigation_state() == SPECTRE_UNAFFECTED)
202 enum mitigation_state arm64_get_spectre_v2_state(void)
204 return spectre_v2_state;
207 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
209 static void install_bp_hardening_cb(bp_hardening_cb_t fn)
211 __this_cpu_write(bp_hardening_data.fn, fn);
214 * Vinz Clortho takes the hyp_vecs start/end "keys" at
215 * the door when we're a guest. Skip the hyp-vectors work.
217 if (!is_hyp_mode_available())
220 __this_cpu_write(bp_hardening_data.slot, HYP_VECTOR_SPECTRE_DIRECT);
223 static void call_smc_arch_workaround_1(void)
225 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
228 static void call_hvc_arch_workaround_1(void)
230 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
233 static void qcom_link_stack_sanitisation(void)
237 asm volatile("mov %0, x30 \n"
245 static bp_hardening_cb_t spectre_v2_get_sw_mitigation_cb(void)
247 u32 midr = read_cpuid_id();
248 if (((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR) &&
249 ((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR_V1))
252 return qcom_link_stack_sanitisation;
255 static enum mitigation_state spectre_v2_enable_fw_mitigation(void)
257 bp_hardening_cb_t cb;
258 enum mitigation_state state;
260 state = spectre_v2_get_cpu_fw_mitigation_state();
261 if (state != SPECTRE_MITIGATED)
264 if (spectre_v2_mitigations_off())
265 return SPECTRE_VULNERABLE;
267 switch (arm_smccc_1_1_get_conduit()) {
268 case SMCCC_CONDUIT_HVC:
269 cb = call_hvc_arch_workaround_1;
272 case SMCCC_CONDUIT_SMC:
273 cb = call_smc_arch_workaround_1;
277 return SPECTRE_VULNERABLE;
281 * Prefer a CPU-specific workaround if it exists. Note that we
282 * still rely on firmware for the mitigation at EL2.
284 cb = spectre_v2_get_sw_mitigation_cb() ?: cb;
285 install_bp_hardening_cb(cb);
286 return SPECTRE_MITIGATED;
289 void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
291 enum mitigation_state state;
293 WARN_ON(preemptible());
295 state = spectre_v2_get_cpu_hw_mitigation_state();
296 if (state == SPECTRE_VULNERABLE)
297 state = spectre_v2_enable_fw_mitigation();
299 update_mitigation_state(&spectre_v2_state, state);
305 * Phew, there's not an awful lot to do here! We just instruct EL2 to use
306 * an indirect trampoline for the hyp vectors so that guests can't read
307 * VBAR_EL2 to defeat randomisation of the hypervisor VA layout.
309 bool has_spectre_v3a(const struct arm64_cpu_capabilities *entry, int scope)
311 static const struct midr_range spectre_v3a_unsafe_list[] = {
312 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
313 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
317 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
318 return is_midr_in_range_list(read_cpuid_id(), spectre_v3a_unsafe_list);
321 void spectre_v3a_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
323 struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
325 if (this_cpu_has_cap(ARM64_SPECTRE_V3A))
326 data->slot += HYP_VECTOR_INDIRECT;
332 * If you thought Spectre v2 was nasty, wait until you see this mess. A CPU is
335 * - Mitigated in hardware and listed in our "safe list".
336 * - Mitigated in hardware via PSTATE.SSBS.
337 * - Mitigated in software by firmware (sometimes referred to as SSBD).
339 * Wait, that doesn't sound so bad, does it? Keep reading...
341 * A major source of headaches is that the software mitigation is enabled both
342 * on a per-task basis, but can also be forced on for the kernel, necessitating
343 * both context-switch *and* entry/exit hooks. To make it even worse, some CPUs
344 * allow EL0 to toggle SSBS directly, which can end up with the prctl() state
345 * being stale when re-entering the kernel. The usual big.LITTLE caveats apply,
346 * so you can have systems that have both firmware and SSBS mitigations. This
347 * means we actually have to reject late onlining of CPUs with mitigations if
348 * all of the currently onlined CPUs are safelisted, as the mitigation tends to
349 * be opt-in for userspace. Yes, really, the cure is worse than the disease.
351 * The only good part is that if the firmware mitigation is present, then it is
352 * present for all CPUs, meaning we don't have to worry about late onlining of a
353 * vulnerable CPU if one of the boot CPUs is using the firmware mitigation.
355 * Give me a VAX-11/780 any day of the week...
357 static enum mitigation_state spectre_v4_state;
359 /* This is the per-cpu state tracking whether we need to talk to firmware */
360 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
362 enum spectre_v4_policy {
363 SPECTRE_V4_POLICY_MITIGATION_DYNAMIC,
364 SPECTRE_V4_POLICY_MITIGATION_ENABLED,
365 SPECTRE_V4_POLICY_MITIGATION_DISABLED,
368 static enum spectre_v4_policy __read_mostly __spectre_v4_policy;
370 static const struct spectre_v4_param {
372 enum spectre_v4_policy policy;
373 } spectre_v4_params[] = {
374 { "force-on", SPECTRE_V4_POLICY_MITIGATION_ENABLED, },
375 { "force-off", SPECTRE_V4_POLICY_MITIGATION_DISABLED, },
376 { "kernel", SPECTRE_V4_POLICY_MITIGATION_DYNAMIC, },
378 static int __init parse_spectre_v4_param(char *str)
385 for (i = 0; i < ARRAY_SIZE(spectre_v4_params); i++) {
386 const struct spectre_v4_param *param = &spectre_v4_params[i];
388 if (strncmp(str, param->str, strlen(param->str)))
391 __spectre_v4_policy = param->policy;
397 early_param("ssbd", parse_spectre_v4_param);
400 * Because this was all written in a rush by people working in different silos,
401 * we've ended up with multiple command line options to control the same thing.
402 * Wrap these up in some helpers, which prefer disabling the mitigation if faced
403 * with contradictory parameters. The mitigation is always either "off",
406 static bool spectre_v4_mitigations_off(void)
408 bool ret = cpu_mitigations_off() ||
409 __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DISABLED;
412 pr_info_once("spectre-v4 mitigation disabled by command-line option\n");
417 /* Do we need to toggle the mitigation state on entry to/exit from the kernel? */
418 static bool spectre_v4_mitigations_dynamic(void)
420 return !spectre_v4_mitigations_off() &&
421 __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DYNAMIC;
424 static bool spectre_v4_mitigations_on(void)
426 return !spectre_v4_mitigations_off() &&
427 __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_ENABLED;
430 ssize_t cpu_show_spec_store_bypass(struct device *dev,
431 struct device_attribute *attr, char *buf)
433 switch (spectre_v4_state) {
434 case SPECTRE_UNAFFECTED:
435 return sprintf(buf, "Not affected\n");
436 case SPECTRE_MITIGATED:
437 return sprintf(buf, "Mitigation: Speculative Store Bypass disabled via prctl\n");
438 case SPECTRE_VULNERABLE:
441 return sprintf(buf, "Vulnerable\n");
445 enum mitigation_state arm64_get_spectre_v4_state(void)
447 return spectre_v4_state;
450 static enum mitigation_state spectre_v4_get_cpu_hw_mitigation_state(void)
452 static const struct midr_range spectre_v4_safe_list[] = {
453 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
454 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
455 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
456 MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
457 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
458 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
462 if (is_midr_in_range_list(read_cpuid_id(), spectre_v4_safe_list))
463 return SPECTRE_UNAFFECTED;
465 /* CPU features are detected first */
466 if (this_cpu_has_cap(ARM64_SSBS))
467 return SPECTRE_MITIGATED;
469 return SPECTRE_VULNERABLE;
472 static enum mitigation_state spectre_v4_get_cpu_fw_mitigation_state(void)
475 struct arm_smccc_res res;
477 arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
478 ARM_SMCCC_ARCH_WORKAROUND_2, &res);
482 case SMCCC_RET_SUCCESS:
483 return SPECTRE_MITIGATED;
484 case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
486 case SMCCC_RET_NOT_REQUIRED:
487 return SPECTRE_UNAFFECTED;
490 case SMCCC_RET_NOT_SUPPORTED:
491 return SPECTRE_VULNERABLE;
495 bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope)
497 enum mitigation_state state;
499 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
501 state = spectre_v4_get_cpu_hw_mitigation_state();
502 if (state == SPECTRE_VULNERABLE)
503 state = spectre_v4_get_cpu_fw_mitigation_state();
505 return state != SPECTRE_UNAFFECTED;
508 static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
513 if (instr & BIT(PSTATE_Imm_shift))
514 regs->pstate |= PSR_SSBS_BIT;
516 regs->pstate &= ~PSR_SSBS_BIT;
518 arm64_skip_faulting_instruction(regs, 4);
522 static struct undef_hook ssbs_emulation_hook = {
523 .instr_mask = ~(1U << PSTATE_Imm_shift),
524 .instr_val = 0xd500401f | PSTATE_SSBS,
525 .fn = ssbs_emulation_handler,
528 static enum mitigation_state spectre_v4_enable_hw_mitigation(void)
530 static bool undef_hook_registered = false;
531 static DEFINE_RAW_SPINLOCK(hook_lock);
532 enum mitigation_state state;
535 * If the system is mitigated but this CPU doesn't have SSBS, then
536 * we must be on the safelist and there's nothing more to do.
538 state = spectre_v4_get_cpu_hw_mitigation_state();
539 if (state != SPECTRE_MITIGATED || !this_cpu_has_cap(ARM64_SSBS))
542 raw_spin_lock(&hook_lock);
543 if (!undef_hook_registered) {
544 register_undef_hook(&ssbs_emulation_hook);
545 undef_hook_registered = true;
547 raw_spin_unlock(&hook_lock);
549 if (spectre_v4_mitigations_off()) {
550 sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
552 return SPECTRE_VULNERABLE;
555 /* SCTLR_EL1.DSSBS was initialised to 0 during boot */
557 return SPECTRE_MITIGATED;
561 * Patch a branch over the Spectre-v4 mitigation code with a NOP so that
562 * we fallthrough and check whether firmware needs to be called on this CPU.
564 void __init spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt,
566 __le32 *updptr, int nr_inst)
568 BUG_ON(nr_inst != 1); /* Branch -> NOP */
570 if (spectre_v4_mitigations_off())
573 if (cpus_have_final_cap(ARM64_SSBS))
576 if (spectre_v4_mitigations_dynamic())
577 *updptr = cpu_to_le32(aarch64_insn_gen_nop());
581 * Patch a NOP in the Spectre-v4 mitigation code with an SMC/HVC instruction
582 * to call into firmware to adjust the mitigation state.
584 void __init smccc_patch_fw_mitigation_conduit(struct alt_instr *alt,
586 __le32 *updptr, int nr_inst)
590 BUG_ON(nr_inst != 1); /* NOP -> HVC/SMC */
592 switch (arm_smccc_1_1_get_conduit()) {
593 case SMCCC_CONDUIT_HVC:
594 insn = aarch64_insn_get_hvc_value();
596 case SMCCC_CONDUIT_SMC:
597 insn = aarch64_insn_get_smc_value();
603 *updptr = cpu_to_le32(insn);
606 static enum mitigation_state spectre_v4_enable_fw_mitigation(void)
608 enum mitigation_state state;
610 state = spectre_v4_get_cpu_fw_mitigation_state();
611 if (state != SPECTRE_MITIGATED)
614 if (spectre_v4_mitigations_off()) {
615 arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, false, NULL);
616 return SPECTRE_VULNERABLE;
619 arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, true, NULL);
621 if (spectre_v4_mitigations_dynamic())
622 __this_cpu_write(arm64_ssbd_callback_required, 1);
624 return SPECTRE_MITIGATED;
627 void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
629 enum mitigation_state state;
631 WARN_ON(preemptible());
633 state = spectre_v4_enable_hw_mitigation();
634 if (state == SPECTRE_VULNERABLE)
635 state = spectre_v4_enable_fw_mitigation();
637 update_mitigation_state(&spectre_v4_state, state);
640 static void __update_pstate_ssbs(struct pt_regs *regs, bool state)
642 u64 bit = compat_user_mode(regs) ? PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
647 regs->pstate &= ~bit;
650 void spectre_v4_enable_task_mitigation(struct task_struct *tsk)
652 struct pt_regs *regs = task_pt_regs(tsk);
653 bool ssbs = false, kthread = tsk->flags & PF_KTHREAD;
655 if (spectre_v4_mitigations_off())
657 else if (spectre_v4_mitigations_dynamic() && !kthread)
658 ssbs = !test_tsk_thread_flag(tsk, TIF_SSBD);
660 __update_pstate_ssbs(regs, ssbs);
664 * The Spectre-v4 mitigation can be controlled via a prctl() from userspace.
665 * This is interesting because the "speculation disabled" behaviour can be
666 * configured so that it is preserved across exec(), which means that the
667 * prctl() may be necessary even when PSTATE.SSBS can be toggled directly
670 static void ssbd_prctl_enable_mitigation(struct task_struct *task)
672 task_clear_spec_ssb_noexec(task);
673 task_set_spec_ssb_disable(task);
674 set_tsk_thread_flag(task, TIF_SSBD);
677 static void ssbd_prctl_disable_mitigation(struct task_struct *task)
679 task_clear_spec_ssb_noexec(task);
680 task_clear_spec_ssb_disable(task);
681 clear_tsk_thread_flag(task, TIF_SSBD);
684 static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
688 /* Enable speculation: disable mitigation */
690 * Force disabled speculation prevents it from being
693 if (task_spec_ssb_force_disable(task))
697 * If the mitigation is forced on, then speculation is forced
698 * off and we again prevent it from being re-enabled.
700 if (spectre_v4_mitigations_on())
703 ssbd_prctl_disable_mitigation(task);
705 case PR_SPEC_FORCE_DISABLE:
706 /* Force disable speculation: force enable mitigation */
708 * If the mitigation is forced off, then speculation is forced
709 * on and we prevent it from being disabled.
711 if (spectre_v4_mitigations_off())
714 task_set_spec_ssb_force_disable(task);
716 case PR_SPEC_DISABLE:
717 /* Disable speculation: enable mitigation */
718 /* Same as PR_SPEC_FORCE_DISABLE */
719 if (spectre_v4_mitigations_off())
722 ssbd_prctl_enable_mitigation(task);
724 case PR_SPEC_DISABLE_NOEXEC:
725 /* Disable speculation until execve(): enable mitigation */
727 * If the mitigation state is forced one way or the other, then
728 * we must fail now before we try to toggle it on execve().
730 if (task_spec_ssb_force_disable(task) ||
731 spectre_v4_mitigations_off() ||
732 spectre_v4_mitigations_on()) {
736 ssbd_prctl_enable_mitigation(task);
737 task_set_spec_ssb_noexec(task);
743 spectre_v4_enable_task_mitigation(task);
747 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
751 case PR_SPEC_STORE_BYPASS:
752 return ssbd_prctl_set(task, ctrl);
758 static int ssbd_prctl_get(struct task_struct *task)
760 switch (spectre_v4_state) {
761 case SPECTRE_UNAFFECTED:
762 return PR_SPEC_NOT_AFFECTED;
763 case SPECTRE_MITIGATED:
764 if (spectre_v4_mitigations_on())
765 return PR_SPEC_NOT_AFFECTED;
767 if (spectre_v4_mitigations_dynamic())
770 /* Mitigations are disabled, so we're vulnerable. */
772 case SPECTRE_VULNERABLE:
775 return PR_SPEC_ENABLE;
778 /* Check the mitigation state for this task */
779 if (task_spec_ssb_force_disable(task))
780 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
782 if (task_spec_ssb_noexec(task))
783 return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
785 if (task_spec_ssb_disable(task))
786 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
788 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
791 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
794 case PR_SPEC_STORE_BYPASS:
795 return ssbd_prctl_get(task);
805 * - Mitigated by a branchy loop a CPU specific number of times, and listed
806 * in our "loop mitigated list".
807 * - Mitigated in software by the firmware Spectre v2 call.
808 * - Has the 'Exception Clears Branch History Buffer' (ECBHB) feature, so no
809 * software mitigation in the vectors is needed.
810 * - Has CSV2.3, so is unaffected.
812 static enum mitigation_state spectre_bhb_state;
814 enum mitigation_state arm64_get_spectre_bhb_state(void)
816 return spectre_bhb_state;
819 enum bhb_mitigation_bits {
824 static unsigned long system_bhb_mitigations;
827 * This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
828 * SCOPE_SYSTEM call will give the right answer.
830 u8 spectre_bhb_loop_affected(int scope)
835 if (scope == SCOPE_LOCAL_CPU) {
836 static const struct midr_range spectre_bhb_k32_list[] = {
837 MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
838 MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
839 MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
840 MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
841 MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
842 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
843 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
846 static const struct midr_range spectre_bhb_k24_list[] = {
847 MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
848 MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
849 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
852 static const struct midr_range spectre_bhb_k8_list[] = {
853 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
854 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
858 if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
860 else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
862 else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
865 max_bhb_k = max(max_bhb_k, k);
873 static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void)
876 struct arm_smccc_res res;
878 arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
879 ARM_SMCCC_ARCH_WORKAROUND_3, &res);
883 case SMCCC_RET_SUCCESS:
884 return SPECTRE_MITIGATED;
885 case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
886 return SPECTRE_UNAFFECTED;
889 case SMCCC_RET_NOT_SUPPORTED:
890 return SPECTRE_VULNERABLE;
894 static bool is_spectre_bhb_fw_affected(int scope)
896 static bool system_affected;
897 enum mitigation_state fw_state;
898 bool has_smccc = arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_NONE;
899 static const struct midr_range spectre_bhb_firmware_mitigated_list[] = {
900 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
901 MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
904 bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(),
905 spectre_bhb_firmware_mitigated_list);
907 if (scope != SCOPE_LOCAL_CPU)
908 return system_affected;
910 fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
911 if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) {
912 system_affected = true;
919 static bool supports_ecbhb(int scope)
923 if (scope == SCOPE_LOCAL_CPU)
924 mmfr1 = read_sysreg_s(SYS_ID_AA64MMFR1_EL1);
926 mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
928 return cpuid_feature_extract_unsigned_field(mmfr1,
929 ID_AA64MMFR1_ECBHB_SHIFT);
932 bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
935 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
937 if (supports_csv2p3(scope))
940 if (spectre_bhb_loop_affected(scope))
943 if (is_spectre_bhb_fw_affected(scope))
949 static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
951 const char *v = arm64_get_bp_hardening_vector(slot);
956 __this_cpu_write(this_cpu_vector, v);
959 * When KPTI is in use, the vectors are switched when exiting to
962 if (arm64_kernel_unmapped_at_el0())
965 write_sysreg(v, vbar_el1);
969 void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
971 bp_hardening_cb_t cpu_cb;
972 enum mitigation_state fw_state, state = SPECTRE_VULNERABLE;
973 struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
975 if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU))
978 if (arm64_get_spectre_v2_state() == SPECTRE_VULNERABLE) {
979 /* No point mitigating Spectre-BHB alone. */
980 } else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) {
981 pr_info_once("spectre-bhb mitigation disabled by compile time option\n");
982 } else if (cpu_mitigations_off()) {
983 pr_info_once("spectre-bhb mitigation disabled by command line option\n");
984 } else if (supports_ecbhb(SCOPE_LOCAL_CPU)) {
985 state = SPECTRE_MITIGATED;
986 set_bit(BHB_HW, &system_bhb_mitigations);
987 } else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) {
989 * Ensure KVM uses the indirect vector which will have the
990 * branchy-loop added. A57/A72-r0 will already have selected
991 * the spectre-indirect vector, which is sufficient for BHB
995 data->slot = HYP_VECTOR_INDIRECT;
997 this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP);
998 state = SPECTRE_MITIGATED;
999 set_bit(BHB_LOOP, &system_bhb_mitigations);
1000 } else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) {
1001 fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
1002 if (fw_state == SPECTRE_MITIGATED) {
1004 * Ensure KVM uses one of the spectre bp_hardening
1005 * vectors. The indirect vector doesn't include the EL3
1006 * call, so needs upgrading to
1007 * HYP_VECTOR_SPECTRE_INDIRECT.
1009 if (!data->slot || data->slot == HYP_VECTOR_INDIRECT)
1012 this_cpu_set_vectors(EL1_VECTOR_BHB_FW);
1015 * The WA3 call in the vectors supersedes the WA1 call
1016 * made during context-switch. Uninstall any firmware
1017 * bp_hardening callback.
1019 cpu_cb = spectre_v2_get_sw_mitigation_cb();
1020 if (__this_cpu_read(bp_hardening_data.fn) != cpu_cb)
1021 __this_cpu_write(bp_hardening_data.fn, NULL);
1023 state = SPECTRE_MITIGATED;
1024 set_bit(BHB_FW, &system_bhb_mitigations);
1028 update_mitigation_state(&spectre_bhb_state, state);
1031 /* Patched to NOP when enabled */
1032 void noinstr spectre_bhb_patch_loop_mitigation_enable(struct alt_instr *alt,
1034 __le32 *updptr, int nr_inst)
1036 BUG_ON(nr_inst != 1);
1038 if (test_bit(BHB_LOOP, &system_bhb_mitigations))
1039 *updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
1042 /* Patched to NOP when enabled */
1043 void noinstr spectre_bhb_patch_fw_mitigation_enabled(struct alt_instr *alt,
1045 __le32 *updptr, int nr_inst)
1047 BUG_ON(nr_inst != 1);
1049 if (test_bit(BHB_FW, &system_bhb_mitigations))
1050 *updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
1053 /* Patched to correct the immediate */
1054 void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt,
1055 __le32 *origptr, __le32 *updptr, int nr_inst)
1059 u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM);
1061 BUG_ON(nr_inst != 1); /* MOV -> MOV */
1063 if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY))
1066 insn = le32_to_cpu(*origptr);
1067 rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
1068 insn = aarch64_insn_gen_movewide(rd, loop_count, 0,
1069 AARCH64_INSN_VARIANT_64BIT,
1070 AARCH64_INSN_MOVEWIDE_ZERO);
1071 *updptr++ = cpu_to_le32(insn);
1074 /* Patched to mov WA3 when supported */
1075 void noinstr spectre_bhb_patch_wa3(struct alt_instr *alt,
1076 __le32 *origptr, __le32 *updptr, int nr_inst)
1081 BUG_ON(nr_inst != 1); /* MOV -> MOV */
1083 if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY) ||
1084 !test_bit(BHB_FW, &system_bhb_mitigations))
1087 insn = le32_to_cpu(*origptr);
1088 rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
1090 insn = aarch64_insn_gen_logical_immediate(AARCH64_INSN_LOGIC_ORR,
1091 AARCH64_INSN_VARIANT_32BIT,
1092 AARCH64_INSN_REG_ZR, rd,
1093 ARM_SMCCC_ARCH_WORKAROUND_3);
1094 if (WARN_ON_ONCE(insn == AARCH64_BREAK_FAULT))
1097 *updptr++ = cpu_to_le32(insn);