arm64: Mitigate spectre style branch history side channels
[linux.git] / arch / arm64 / kernel / proton-pack.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Handle detection, reporting and mitigation of Spectre v1, v2, v3a and v4, as
4  * detailed at:
5  *
6  *   https://developer.arm.com/support/arm-security-updates/speculative-processor-vulnerability
7  *
8  * This code was originally written hastily under an awful lot of stress and so
9  * aspects of it are somewhat hacky. Unfortunately, changing anything in here
10  * instantly makes me feel ill. Thanks, Jann. Thann.
11  *
12  * Copyright (C) 2018 ARM Ltd, All Rights Reserved.
13  * Copyright (C) 2020 Google LLC
14  *
15  * "If there's something strange in your neighbourhood, who you gonna call?"
16  *
17  * Authors: Will Deacon <will@kernel.org> and Marc Zyngier <maz@kernel.org>
18  */
19
20 #include <linux/arm-smccc.h>
21 #include <linux/cpu.h>
22 #include <linux/device.h>
23 #include <linux/nospec.h>
24 #include <linux/prctl.h>
25 #include <linux/sched/task_stack.h>
26
27 #include <asm/debug-monitors.h>
28 #include <asm/insn.h>
29 #include <asm/spectre.h>
30 #include <asm/traps.h>
31 #include <asm/vectors.h>
32 #include <asm/virt.h>
33
34 /*
35  * We try to ensure that the mitigation state can never change as the result of
36  * onlining a late CPU.
37  */
38 static void update_mitigation_state(enum mitigation_state *oldp,
39                                     enum mitigation_state new)
40 {
41         enum mitigation_state state;
42
43         do {
44                 state = READ_ONCE(*oldp);
45                 if (new <= state)
46                         break;
47
48                 /* Userspace almost certainly can't deal with this. */
49                 if (WARN_ON(system_capabilities_finalized()))
50                         break;
51         } while (cmpxchg_relaxed(oldp, state, new) != state);
52 }
53
54 /*
55  * Spectre v1.
56  *
57  * The kernel can't protect userspace for this one: it's each person for
58  * themselves. Advertise what we're doing and be done with it.
59  */
60 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
61                             char *buf)
62 {
63         return sprintf(buf, "Mitigation: __user pointer sanitization\n");
64 }
65
66 /*
67  * Spectre v2.
68  *
69  * This one sucks. A CPU is either:
70  *
71  * - Mitigated in hardware and advertised by ID_AA64PFR0_EL1.CSV2.
72  * - Mitigated in hardware and listed in our "safe list".
73  * - Mitigated in software by firmware.
74  * - Mitigated in software by a CPU-specific dance in the kernel and a
75  *   firmware call at EL2.
76  * - Vulnerable.
77  *
78  * It's not unlikely for different CPUs in a big.LITTLE system to fall into
79  * different camps.
80  */
81 static enum mitigation_state spectre_v2_state;
82
83 static bool __read_mostly __nospectre_v2;
84 static int __init parse_spectre_v2_param(char *str)
85 {
86         __nospectre_v2 = true;
87         return 0;
88 }
89 early_param("nospectre_v2", parse_spectre_v2_param);
90
91 static bool spectre_v2_mitigations_off(void)
92 {
93         bool ret = __nospectre_v2 || cpu_mitigations_off();
94
95         if (ret)
96                 pr_info_once("spectre-v2 mitigation disabled by command line option\n");
97
98         return ret;
99 }
100
101 static const char *get_bhb_affected_string(enum mitigation_state bhb_state)
102 {
103         switch (bhb_state) {
104         case SPECTRE_UNAFFECTED:
105                 return "";
106         default:
107         case SPECTRE_VULNERABLE:
108                 return ", but not BHB";
109         case SPECTRE_MITIGATED:
110                 return ", BHB";
111         }
112 }
113
114 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
115                             char *buf)
116 {
117         enum mitigation_state bhb_state = arm64_get_spectre_bhb_state();
118         const char *bhb_str = get_bhb_affected_string(bhb_state);
119         const char *v2_str = "Branch predictor hardening";
120
121         switch (spectre_v2_state) {
122         case SPECTRE_UNAFFECTED:
123                 if (bhb_state == SPECTRE_UNAFFECTED)
124                         return sprintf(buf, "Not affected\n");
125
126                 /*
127                  * Platforms affected by Spectre-BHB can't report
128                  * "Not affected" for Spectre-v2.
129                  */
130                 v2_str = "CSV2";
131                 fallthrough;
132         case SPECTRE_MITIGATED:
133                 return sprintf(buf, "Mitigation: %s%s\n", v2_str, bhb_str);
134         case SPECTRE_VULNERABLE:
135                 fallthrough;
136         default:
137                 return sprintf(buf, "Vulnerable\n");
138         }
139 }
140
141 static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void)
142 {
143         u64 pfr0;
144         static const struct midr_range spectre_v2_safe_list[] = {
145                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
146                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
147                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
148                 MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
149                 MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
150                 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_SILVER),
151                 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
152                 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
153                 { /* sentinel */ }
154         };
155
156         /* If the CPU has CSV2 set, we're safe */
157         pfr0 = read_cpuid(ID_AA64PFR0_EL1);
158         if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
159                 return SPECTRE_UNAFFECTED;
160
161         /* Alternatively, we have a list of unaffected CPUs */
162         if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
163                 return SPECTRE_UNAFFECTED;
164
165         return SPECTRE_VULNERABLE;
166 }
167
168 static enum mitigation_state spectre_v2_get_cpu_fw_mitigation_state(void)
169 {
170         int ret;
171         struct arm_smccc_res res;
172
173         arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
174                              ARM_SMCCC_ARCH_WORKAROUND_1, &res);
175
176         ret = res.a0;
177         switch (ret) {
178         case SMCCC_RET_SUCCESS:
179                 return SPECTRE_MITIGATED;
180         case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
181                 return SPECTRE_UNAFFECTED;
182         default:
183                 fallthrough;
184         case SMCCC_RET_NOT_SUPPORTED:
185                 return SPECTRE_VULNERABLE;
186         }
187 }
188
189 bool has_spectre_v2(const struct arm64_cpu_capabilities *entry, int scope)
190 {
191         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
192
193         if (spectre_v2_get_cpu_hw_mitigation_state() == SPECTRE_UNAFFECTED)
194                 return false;
195
196         if (spectre_v2_get_cpu_fw_mitigation_state() == SPECTRE_UNAFFECTED)
197                 return false;
198
199         return true;
200 }
201
202 enum mitigation_state arm64_get_spectre_v2_state(void)
203 {
204         return spectre_v2_state;
205 }
206
207 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
208
209 static void install_bp_hardening_cb(bp_hardening_cb_t fn)
210 {
211         __this_cpu_write(bp_hardening_data.fn, fn);
212
213         /*
214          * Vinz Clortho takes the hyp_vecs start/end "keys" at
215          * the door when we're a guest. Skip the hyp-vectors work.
216          */
217         if (!is_hyp_mode_available())
218                 return;
219
220         __this_cpu_write(bp_hardening_data.slot, HYP_VECTOR_SPECTRE_DIRECT);
221 }
222
223 static void call_smc_arch_workaround_1(void)
224 {
225         arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
226 }
227
228 static void call_hvc_arch_workaround_1(void)
229 {
230         arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
231 }
232
233 static void qcom_link_stack_sanitisation(void)
234 {
235         u64 tmp;
236
237         asm volatile("mov       %0, x30         \n"
238                      ".rept     16              \n"
239                      "bl        . + 4           \n"
240                      ".endr                     \n"
241                      "mov       x30, %0         \n"
242                      : "=&r" (tmp));
243 }
244
245 static bp_hardening_cb_t spectre_v2_get_sw_mitigation_cb(void)
246 {
247         u32 midr = read_cpuid_id();
248         if (((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR) &&
249             ((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR_V1))
250                 return NULL;
251
252         return qcom_link_stack_sanitisation;
253 }
254
255 static enum mitigation_state spectre_v2_enable_fw_mitigation(void)
256 {
257         bp_hardening_cb_t cb;
258         enum mitigation_state state;
259
260         state = spectre_v2_get_cpu_fw_mitigation_state();
261         if (state != SPECTRE_MITIGATED)
262                 return state;
263
264         if (spectre_v2_mitigations_off())
265                 return SPECTRE_VULNERABLE;
266
267         switch (arm_smccc_1_1_get_conduit()) {
268         case SMCCC_CONDUIT_HVC:
269                 cb = call_hvc_arch_workaround_1;
270                 break;
271
272         case SMCCC_CONDUIT_SMC:
273                 cb = call_smc_arch_workaround_1;
274                 break;
275
276         default:
277                 return SPECTRE_VULNERABLE;
278         }
279
280         /*
281          * Prefer a CPU-specific workaround if it exists. Note that we
282          * still rely on firmware for the mitigation at EL2.
283          */
284         cb = spectre_v2_get_sw_mitigation_cb() ?: cb;
285         install_bp_hardening_cb(cb);
286         return SPECTRE_MITIGATED;
287 }
288
289 void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
290 {
291         enum mitigation_state state;
292
293         WARN_ON(preemptible());
294
295         state = spectre_v2_get_cpu_hw_mitigation_state();
296         if (state == SPECTRE_VULNERABLE)
297                 state = spectre_v2_enable_fw_mitigation();
298
299         update_mitigation_state(&spectre_v2_state, state);
300 }
301
302 /*
303  * Spectre-v3a.
304  *
305  * Phew, there's not an awful lot to do here! We just instruct EL2 to use
306  * an indirect trampoline for the hyp vectors so that guests can't read
307  * VBAR_EL2 to defeat randomisation of the hypervisor VA layout.
308  */
309 bool has_spectre_v3a(const struct arm64_cpu_capabilities *entry, int scope)
310 {
311         static const struct midr_range spectre_v3a_unsafe_list[] = {
312                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
313                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
314                 {},
315         };
316
317         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
318         return is_midr_in_range_list(read_cpuid_id(), spectre_v3a_unsafe_list);
319 }
320
321 void spectre_v3a_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
322 {
323         struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
324
325         if (this_cpu_has_cap(ARM64_SPECTRE_V3A))
326                 data->slot += HYP_VECTOR_INDIRECT;
327 }
328
329 /*
330  * Spectre v4.
331  *
332  * If you thought Spectre v2 was nasty, wait until you see this mess. A CPU is
333  * either:
334  *
335  * - Mitigated in hardware and listed in our "safe list".
336  * - Mitigated in hardware via PSTATE.SSBS.
337  * - Mitigated in software by firmware (sometimes referred to as SSBD).
338  *
339  * Wait, that doesn't sound so bad, does it? Keep reading...
340  *
341  * A major source of headaches is that the software mitigation is enabled both
342  * on a per-task basis, but can also be forced on for the kernel, necessitating
343  * both context-switch *and* entry/exit hooks. To make it even worse, some CPUs
344  * allow EL0 to toggle SSBS directly, which can end up with the prctl() state
345  * being stale when re-entering the kernel. The usual big.LITTLE caveats apply,
346  * so you can have systems that have both firmware and SSBS mitigations. This
347  * means we actually have to reject late onlining of CPUs with mitigations if
348  * all of the currently onlined CPUs are safelisted, as the mitigation tends to
349  * be opt-in for userspace. Yes, really, the cure is worse than the disease.
350  *
351  * The only good part is that if the firmware mitigation is present, then it is
352  * present for all CPUs, meaning we don't have to worry about late onlining of a
353  * vulnerable CPU if one of the boot CPUs is using the firmware mitigation.
354  *
355  * Give me a VAX-11/780 any day of the week...
356  */
357 static enum mitigation_state spectre_v4_state;
358
359 /* This is the per-cpu state tracking whether we need to talk to firmware */
360 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
361
362 enum spectre_v4_policy {
363         SPECTRE_V4_POLICY_MITIGATION_DYNAMIC,
364         SPECTRE_V4_POLICY_MITIGATION_ENABLED,
365         SPECTRE_V4_POLICY_MITIGATION_DISABLED,
366 };
367
368 static enum spectre_v4_policy __read_mostly __spectre_v4_policy;
369
370 static const struct spectre_v4_param {
371         const char              *str;
372         enum spectre_v4_policy  policy;
373 } spectre_v4_params[] = {
374         { "force-on",   SPECTRE_V4_POLICY_MITIGATION_ENABLED, },
375         { "force-off",  SPECTRE_V4_POLICY_MITIGATION_DISABLED, },
376         { "kernel",     SPECTRE_V4_POLICY_MITIGATION_DYNAMIC, },
377 };
378 static int __init parse_spectre_v4_param(char *str)
379 {
380         int i;
381
382         if (!str || !str[0])
383                 return -EINVAL;
384
385         for (i = 0; i < ARRAY_SIZE(spectre_v4_params); i++) {
386                 const struct spectre_v4_param *param = &spectre_v4_params[i];
387
388                 if (strncmp(str, param->str, strlen(param->str)))
389                         continue;
390
391                 __spectre_v4_policy = param->policy;
392                 return 0;
393         }
394
395         return -EINVAL;
396 }
397 early_param("ssbd", parse_spectre_v4_param);
398
399 /*
400  * Because this was all written in a rush by people working in different silos,
401  * we've ended up with multiple command line options to control the same thing.
402  * Wrap these up in some helpers, which prefer disabling the mitigation if faced
403  * with contradictory parameters. The mitigation is always either "off",
404  * "dynamic" or "on".
405  */
406 static bool spectre_v4_mitigations_off(void)
407 {
408         bool ret = cpu_mitigations_off() ||
409                    __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DISABLED;
410
411         if (ret)
412                 pr_info_once("spectre-v4 mitigation disabled by command-line option\n");
413
414         return ret;
415 }
416
417 /* Do we need to toggle the mitigation state on entry to/exit from the kernel? */
418 static bool spectre_v4_mitigations_dynamic(void)
419 {
420         return !spectre_v4_mitigations_off() &&
421                __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DYNAMIC;
422 }
423
424 static bool spectre_v4_mitigations_on(void)
425 {
426         return !spectre_v4_mitigations_off() &&
427                __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_ENABLED;
428 }
429
430 ssize_t cpu_show_spec_store_bypass(struct device *dev,
431                                    struct device_attribute *attr, char *buf)
432 {
433         switch (spectre_v4_state) {
434         case SPECTRE_UNAFFECTED:
435                 return sprintf(buf, "Not affected\n");
436         case SPECTRE_MITIGATED:
437                 return sprintf(buf, "Mitigation: Speculative Store Bypass disabled via prctl\n");
438         case SPECTRE_VULNERABLE:
439                 fallthrough;
440         default:
441                 return sprintf(buf, "Vulnerable\n");
442         }
443 }
444
445 enum mitigation_state arm64_get_spectre_v4_state(void)
446 {
447         return spectre_v4_state;
448 }
449
450 static enum mitigation_state spectre_v4_get_cpu_hw_mitigation_state(void)
451 {
452         static const struct midr_range spectre_v4_safe_list[] = {
453                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
454                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
455                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
456                 MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
457                 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
458                 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
459                 { /* sentinel */ },
460         };
461
462         if (is_midr_in_range_list(read_cpuid_id(), spectre_v4_safe_list))
463                 return SPECTRE_UNAFFECTED;
464
465         /* CPU features are detected first */
466         if (this_cpu_has_cap(ARM64_SSBS))
467                 return SPECTRE_MITIGATED;
468
469         return SPECTRE_VULNERABLE;
470 }
471
472 static enum mitigation_state spectre_v4_get_cpu_fw_mitigation_state(void)
473 {
474         int ret;
475         struct arm_smccc_res res;
476
477         arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
478                              ARM_SMCCC_ARCH_WORKAROUND_2, &res);
479
480         ret = res.a0;
481         switch (ret) {
482         case SMCCC_RET_SUCCESS:
483                 return SPECTRE_MITIGATED;
484         case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
485                 fallthrough;
486         case SMCCC_RET_NOT_REQUIRED:
487                 return SPECTRE_UNAFFECTED;
488         default:
489                 fallthrough;
490         case SMCCC_RET_NOT_SUPPORTED:
491                 return SPECTRE_VULNERABLE;
492         }
493 }
494
495 bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope)
496 {
497         enum mitigation_state state;
498
499         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
500
501         state = spectre_v4_get_cpu_hw_mitigation_state();
502         if (state == SPECTRE_VULNERABLE)
503                 state = spectre_v4_get_cpu_fw_mitigation_state();
504
505         return state != SPECTRE_UNAFFECTED;
506 }
507
508 static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
509 {
510         if (user_mode(regs))
511                 return 1;
512
513         if (instr & BIT(PSTATE_Imm_shift))
514                 regs->pstate |= PSR_SSBS_BIT;
515         else
516                 regs->pstate &= ~PSR_SSBS_BIT;
517
518         arm64_skip_faulting_instruction(regs, 4);
519         return 0;
520 }
521
522 static struct undef_hook ssbs_emulation_hook = {
523         .instr_mask     = ~(1U << PSTATE_Imm_shift),
524         .instr_val      = 0xd500401f | PSTATE_SSBS,
525         .fn             = ssbs_emulation_handler,
526 };
527
528 static enum mitigation_state spectre_v4_enable_hw_mitigation(void)
529 {
530         static bool undef_hook_registered = false;
531         static DEFINE_RAW_SPINLOCK(hook_lock);
532         enum mitigation_state state;
533
534         /*
535          * If the system is mitigated but this CPU doesn't have SSBS, then
536          * we must be on the safelist and there's nothing more to do.
537          */
538         state = spectre_v4_get_cpu_hw_mitigation_state();
539         if (state != SPECTRE_MITIGATED || !this_cpu_has_cap(ARM64_SSBS))
540                 return state;
541
542         raw_spin_lock(&hook_lock);
543         if (!undef_hook_registered) {
544                 register_undef_hook(&ssbs_emulation_hook);
545                 undef_hook_registered = true;
546         }
547         raw_spin_unlock(&hook_lock);
548
549         if (spectre_v4_mitigations_off()) {
550                 sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
551                 set_pstate_ssbs(1);
552                 return SPECTRE_VULNERABLE;
553         }
554
555         /* SCTLR_EL1.DSSBS was initialised to 0 during boot */
556         set_pstate_ssbs(0);
557         return SPECTRE_MITIGATED;
558 }
559
560 /*
561  * Patch a branch over the Spectre-v4 mitigation code with a NOP so that
562  * we fallthrough and check whether firmware needs to be called on this CPU.
563  */
564 void __init spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt,
565                                                   __le32 *origptr,
566                                                   __le32 *updptr, int nr_inst)
567 {
568         BUG_ON(nr_inst != 1); /* Branch -> NOP */
569
570         if (spectre_v4_mitigations_off())
571                 return;
572
573         if (cpus_have_final_cap(ARM64_SSBS))
574                 return;
575
576         if (spectre_v4_mitigations_dynamic())
577                 *updptr = cpu_to_le32(aarch64_insn_gen_nop());
578 }
579
580 /*
581  * Patch a NOP in the Spectre-v4 mitigation code with an SMC/HVC instruction
582  * to call into firmware to adjust the mitigation state.
583  */
584 void __init smccc_patch_fw_mitigation_conduit(struct alt_instr *alt,
585                                                __le32 *origptr,
586                                                __le32 *updptr, int nr_inst)
587 {
588         u32 insn;
589
590         BUG_ON(nr_inst != 1); /* NOP -> HVC/SMC */
591
592         switch (arm_smccc_1_1_get_conduit()) {
593         case SMCCC_CONDUIT_HVC:
594                 insn = aarch64_insn_get_hvc_value();
595                 break;
596         case SMCCC_CONDUIT_SMC:
597                 insn = aarch64_insn_get_smc_value();
598                 break;
599         default:
600                 return;
601         }
602
603         *updptr = cpu_to_le32(insn);
604 }
605
606 static enum mitigation_state spectre_v4_enable_fw_mitigation(void)
607 {
608         enum mitigation_state state;
609
610         state = spectre_v4_get_cpu_fw_mitigation_state();
611         if (state != SPECTRE_MITIGATED)
612                 return state;
613
614         if (spectre_v4_mitigations_off()) {
615                 arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, false, NULL);
616                 return SPECTRE_VULNERABLE;
617         }
618
619         arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, true, NULL);
620
621         if (spectre_v4_mitigations_dynamic())
622                 __this_cpu_write(arm64_ssbd_callback_required, 1);
623
624         return SPECTRE_MITIGATED;
625 }
626
627 void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
628 {
629         enum mitigation_state state;
630
631         WARN_ON(preemptible());
632
633         state = spectre_v4_enable_hw_mitigation();
634         if (state == SPECTRE_VULNERABLE)
635                 state = spectre_v4_enable_fw_mitigation();
636
637         update_mitigation_state(&spectre_v4_state, state);
638 }
639
640 static void __update_pstate_ssbs(struct pt_regs *regs, bool state)
641 {
642         u64 bit = compat_user_mode(regs) ? PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
643
644         if (state)
645                 regs->pstate |= bit;
646         else
647                 regs->pstate &= ~bit;
648 }
649
650 void spectre_v4_enable_task_mitigation(struct task_struct *tsk)
651 {
652         struct pt_regs *regs = task_pt_regs(tsk);
653         bool ssbs = false, kthread = tsk->flags & PF_KTHREAD;
654
655         if (spectre_v4_mitigations_off())
656                 ssbs = true;
657         else if (spectre_v4_mitigations_dynamic() && !kthread)
658                 ssbs = !test_tsk_thread_flag(tsk, TIF_SSBD);
659
660         __update_pstate_ssbs(regs, ssbs);
661 }
662
663 /*
664  * The Spectre-v4 mitigation can be controlled via a prctl() from userspace.
665  * This is interesting because the "speculation disabled" behaviour can be
666  * configured so that it is preserved across exec(), which means that the
667  * prctl() may be necessary even when PSTATE.SSBS can be toggled directly
668  * from userspace.
669  */
670 static void ssbd_prctl_enable_mitigation(struct task_struct *task)
671 {
672         task_clear_spec_ssb_noexec(task);
673         task_set_spec_ssb_disable(task);
674         set_tsk_thread_flag(task, TIF_SSBD);
675 }
676
677 static void ssbd_prctl_disable_mitigation(struct task_struct *task)
678 {
679         task_clear_spec_ssb_noexec(task);
680         task_clear_spec_ssb_disable(task);
681         clear_tsk_thread_flag(task, TIF_SSBD);
682 }
683
684 static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
685 {
686         switch (ctrl) {
687         case PR_SPEC_ENABLE:
688                 /* Enable speculation: disable mitigation */
689                 /*
690                  * Force disabled speculation prevents it from being
691                  * re-enabled.
692                  */
693                 if (task_spec_ssb_force_disable(task))
694                         return -EPERM;
695
696                 /*
697                  * If the mitigation is forced on, then speculation is forced
698                  * off and we again prevent it from being re-enabled.
699                  */
700                 if (spectre_v4_mitigations_on())
701                         return -EPERM;
702
703                 ssbd_prctl_disable_mitigation(task);
704                 break;
705         case PR_SPEC_FORCE_DISABLE:
706                 /* Force disable speculation: force enable mitigation */
707                 /*
708                  * If the mitigation is forced off, then speculation is forced
709                  * on and we prevent it from being disabled.
710                  */
711                 if (spectre_v4_mitigations_off())
712                         return -EPERM;
713
714                 task_set_spec_ssb_force_disable(task);
715                 fallthrough;
716         case PR_SPEC_DISABLE:
717                 /* Disable speculation: enable mitigation */
718                 /* Same as PR_SPEC_FORCE_DISABLE */
719                 if (spectre_v4_mitigations_off())
720                         return -EPERM;
721
722                 ssbd_prctl_enable_mitigation(task);
723                 break;
724         case PR_SPEC_DISABLE_NOEXEC:
725                 /* Disable speculation until execve(): enable mitigation */
726                 /*
727                  * If the mitigation state is forced one way or the other, then
728                  * we must fail now before we try to toggle it on execve().
729                  */
730                 if (task_spec_ssb_force_disable(task) ||
731                     spectre_v4_mitigations_off() ||
732                     spectre_v4_mitigations_on()) {
733                         return -EPERM;
734                 }
735
736                 ssbd_prctl_enable_mitigation(task);
737                 task_set_spec_ssb_noexec(task);
738                 break;
739         default:
740                 return -ERANGE;
741         }
742
743         spectre_v4_enable_task_mitigation(task);
744         return 0;
745 }
746
747 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
748                              unsigned long ctrl)
749 {
750         switch (which) {
751         case PR_SPEC_STORE_BYPASS:
752                 return ssbd_prctl_set(task, ctrl);
753         default:
754                 return -ENODEV;
755         }
756 }
757
758 static int ssbd_prctl_get(struct task_struct *task)
759 {
760         switch (spectre_v4_state) {
761         case SPECTRE_UNAFFECTED:
762                 return PR_SPEC_NOT_AFFECTED;
763         case SPECTRE_MITIGATED:
764                 if (spectre_v4_mitigations_on())
765                         return PR_SPEC_NOT_AFFECTED;
766
767                 if (spectre_v4_mitigations_dynamic())
768                         break;
769
770                 /* Mitigations are disabled, so we're vulnerable. */
771                 fallthrough;
772         case SPECTRE_VULNERABLE:
773                 fallthrough;
774         default:
775                 return PR_SPEC_ENABLE;
776         }
777
778         /* Check the mitigation state for this task */
779         if (task_spec_ssb_force_disable(task))
780                 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
781
782         if (task_spec_ssb_noexec(task))
783                 return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
784
785         if (task_spec_ssb_disable(task))
786                 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
787
788         return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
789 }
790
791 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
792 {
793         switch (which) {
794         case PR_SPEC_STORE_BYPASS:
795                 return ssbd_prctl_get(task);
796         default:
797                 return -ENODEV;
798         }
799 }
800
801 /*
802  * Spectre BHB.
803  *
804  * A CPU is either:
805  * - Mitigated by a branchy loop a CPU specific number of times, and listed
806  *   in our "loop mitigated list".
807  * - Mitigated in software by the firmware Spectre v2 call.
808  * - Has the 'Exception Clears Branch History Buffer' (ECBHB) feature, so no
809  *   software mitigation in the vectors is needed.
810  * - Has CSV2.3, so is unaffected.
811  */
812 static enum mitigation_state spectre_bhb_state;
813
814 enum mitigation_state arm64_get_spectre_bhb_state(void)
815 {
816         return spectre_bhb_state;
817 }
818
819 enum bhb_mitigation_bits {
820         BHB_LOOP,
821         BHB_FW,
822         BHB_HW,
823 };
824 static unsigned long system_bhb_mitigations;
825
826 /*
827  * This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
828  * SCOPE_SYSTEM call will give the right answer.
829  */
830 u8 spectre_bhb_loop_affected(int scope)
831 {
832         u8 k = 0;
833         static u8 max_bhb_k;
834
835         if (scope == SCOPE_LOCAL_CPU) {
836                 static const struct midr_range spectre_bhb_k32_list[] = {
837                         MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
838                         MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
839                         MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
840                         MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
841                         MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
842                         MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
843                         MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
844                         {},
845                 };
846                 static const struct midr_range spectre_bhb_k24_list[] = {
847                         MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
848                         MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
849                         MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
850                         {},
851                 };
852                 static const struct midr_range spectre_bhb_k8_list[] = {
853                         MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
854                         MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
855                         {},
856                 };
857
858                 if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
859                         k = 32;
860                 else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
861                         k = 24;
862                 else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
863                         k =  8;
864
865                 max_bhb_k = max(max_bhb_k, k);
866         } else {
867                 k = max_bhb_k;
868         }
869
870         return k;
871 }
872
873 static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void)
874 {
875         int ret;
876         struct arm_smccc_res res;
877
878         arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
879                              ARM_SMCCC_ARCH_WORKAROUND_3, &res);
880
881         ret = res.a0;
882         switch (ret) {
883         case SMCCC_RET_SUCCESS:
884                 return SPECTRE_MITIGATED;
885         case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
886                 return SPECTRE_UNAFFECTED;
887         default:
888                 fallthrough;
889         case SMCCC_RET_NOT_SUPPORTED:
890                 return SPECTRE_VULNERABLE;
891         }
892 }
893
894 static bool is_spectre_bhb_fw_affected(int scope)
895 {
896         static bool system_affected;
897         enum mitigation_state fw_state;
898         bool has_smccc = arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_NONE;
899         static const struct midr_range spectre_bhb_firmware_mitigated_list[] = {
900                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
901                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
902                 {},
903         };
904         bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(),
905                                          spectre_bhb_firmware_mitigated_list);
906
907         if (scope != SCOPE_LOCAL_CPU)
908                 return system_affected;
909
910         fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
911         if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) {
912                 system_affected = true;
913                 return true;
914         }
915
916         return false;
917 }
918
919 static bool supports_ecbhb(int scope)
920 {
921         u64 mmfr1;
922
923         if (scope == SCOPE_LOCAL_CPU)
924                 mmfr1 = read_sysreg_s(SYS_ID_AA64MMFR1_EL1);
925         else
926                 mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
927
928         return cpuid_feature_extract_unsigned_field(mmfr1,
929                                                     ID_AA64MMFR1_ECBHB_SHIFT);
930 }
931
932 bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
933                              int scope)
934 {
935         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
936
937         if (supports_csv2p3(scope))
938                 return false;
939
940         if (spectre_bhb_loop_affected(scope))
941                 return true;
942
943         if (is_spectre_bhb_fw_affected(scope))
944                 return true;
945
946         return false;
947 }
948
949 static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
950 {
951         const char *v = arm64_get_bp_hardening_vector(slot);
952
953         if (slot < 0)
954                 return;
955
956         __this_cpu_write(this_cpu_vector, v);
957
958         /*
959          * When KPTI is in use, the vectors are switched when exiting to
960          * user-space.
961          */
962         if (arm64_kernel_unmapped_at_el0())
963                 return;
964
965         write_sysreg(v, vbar_el1);
966         isb();
967 }
968
969 void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
970 {
971         bp_hardening_cb_t cpu_cb;
972         enum mitigation_state fw_state, state = SPECTRE_VULNERABLE;
973         struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
974
975         if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU))
976                 return;
977
978         if (arm64_get_spectre_v2_state() == SPECTRE_VULNERABLE) {
979                 /* No point mitigating Spectre-BHB alone. */
980         } else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) {
981                 pr_info_once("spectre-bhb mitigation disabled by compile time option\n");
982         } else if (cpu_mitigations_off()) {
983                 pr_info_once("spectre-bhb mitigation disabled by command line option\n");
984         } else if (supports_ecbhb(SCOPE_LOCAL_CPU)) {
985                 state = SPECTRE_MITIGATED;
986                 set_bit(BHB_HW, &system_bhb_mitigations);
987         } else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) {
988                 /*
989                  * Ensure KVM uses the indirect vector which will have the
990                  * branchy-loop added. A57/A72-r0 will already have selected
991                  * the spectre-indirect vector, which is sufficient for BHB
992                  * too.
993                  */
994                 if (!data->slot)
995                         data->slot = HYP_VECTOR_INDIRECT;
996
997                 this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP);
998                 state = SPECTRE_MITIGATED;
999                 set_bit(BHB_LOOP, &system_bhb_mitigations);
1000         } else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) {
1001                 fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
1002                 if (fw_state == SPECTRE_MITIGATED) {
1003                         /*
1004                          * Ensure KVM uses one of the spectre bp_hardening
1005                          * vectors. The indirect vector doesn't include the EL3
1006                          * call, so needs upgrading to
1007                          * HYP_VECTOR_SPECTRE_INDIRECT.
1008                          */
1009                         if (!data->slot || data->slot == HYP_VECTOR_INDIRECT)
1010                                 data->slot += 1;
1011
1012                         this_cpu_set_vectors(EL1_VECTOR_BHB_FW);
1013
1014                         /*
1015                          * The WA3 call in the vectors supersedes the WA1 call
1016                          * made during context-switch. Uninstall any firmware
1017                          * bp_hardening callback.
1018                          */
1019                         cpu_cb = spectre_v2_get_sw_mitigation_cb();
1020                         if (__this_cpu_read(bp_hardening_data.fn) != cpu_cb)
1021                                 __this_cpu_write(bp_hardening_data.fn, NULL);
1022
1023                         state = SPECTRE_MITIGATED;
1024                         set_bit(BHB_FW, &system_bhb_mitigations);
1025                 }
1026         }
1027
1028         update_mitigation_state(&spectre_bhb_state, state);
1029 }
1030
1031 /* Patched to NOP when enabled */
1032 void noinstr spectre_bhb_patch_loop_mitigation_enable(struct alt_instr *alt,
1033                                                      __le32 *origptr,
1034                                                       __le32 *updptr, int nr_inst)
1035 {
1036         BUG_ON(nr_inst != 1);
1037
1038         if (test_bit(BHB_LOOP, &system_bhb_mitigations))
1039                 *updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
1040 }
1041
1042 /* Patched to NOP when enabled */
1043 void noinstr spectre_bhb_patch_fw_mitigation_enabled(struct alt_instr *alt,
1044                                                    __le32 *origptr,
1045                                                    __le32 *updptr, int nr_inst)
1046 {
1047         BUG_ON(nr_inst != 1);
1048
1049         if (test_bit(BHB_FW, &system_bhb_mitigations))
1050                 *updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
1051 }
1052
1053 /* Patched to correct the immediate */
1054 void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt,
1055                                    __le32 *origptr, __le32 *updptr, int nr_inst)
1056 {
1057         u8 rd;
1058         u32 insn;
1059         u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM);
1060
1061         BUG_ON(nr_inst != 1); /* MOV -> MOV */
1062
1063         if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY))
1064                 return;
1065
1066         insn = le32_to_cpu(*origptr);
1067         rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
1068         insn = aarch64_insn_gen_movewide(rd, loop_count, 0,
1069                                          AARCH64_INSN_VARIANT_64BIT,
1070                                          AARCH64_INSN_MOVEWIDE_ZERO);
1071         *updptr++ = cpu_to_le32(insn);
1072 }
1073
1074 /* Patched to mov WA3 when supported */
1075 void noinstr spectre_bhb_patch_wa3(struct alt_instr *alt,
1076                                    __le32 *origptr, __le32 *updptr, int nr_inst)
1077 {
1078         u8 rd;
1079         u32 insn;
1080
1081         BUG_ON(nr_inst != 1); /* MOV -> MOV */
1082
1083         if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY) ||
1084             !test_bit(BHB_FW, &system_bhb_mitigations))
1085                 return;
1086
1087         insn = le32_to_cpu(*origptr);
1088         rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
1089
1090         insn = aarch64_insn_gen_logical_immediate(AARCH64_INSN_LOGIC_ORR,
1091                                                   AARCH64_INSN_VARIANT_32BIT,
1092                                                   AARCH64_INSN_REG_ZR, rd,
1093                                                   ARM_SMCCC_ARCH_WORKAROUND_3);
1094         if (WARN_ON_ONCE(insn == AARCH64_BREAK_FAULT))
1095                 return;
1096
1097         *updptr++ = cpu_to_le32(insn);
1098 }