treewide: kzalloc() -> kcalloc()
[linux.git] / arch / arm64 / kernel / armv8_deprecated.c
1 /*
2  *  Copyright (C) 2014 ARM Limited
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8
9 #include <linux/cpu.h>
10 #include <linux/init.h>
11 #include <linux/list.h>
12 #include <linux/perf_event.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/sysctl.h>
16 #include <linux/uaccess.h>
17
18 #include <asm/cpufeature.h>
19 #include <asm/insn.h>
20 #include <asm/sysreg.h>
21 #include <asm/system_misc.h>
22 #include <asm/traps.h>
23 #include <asm/kprobes.h>
24
25 #define CREATE_TRACE_POINTS
26 #include "trace-events-emulation.h"
27
28 /*
29  * The runtime support for deprecated instruction support can be in one of
30  * following three states -
31  *
32  * 0 = undef
33  * 1 = emulate (software emulation)
34  * 2 = hw (supported in hardware)
35  */
36 enum insn_emulation_mode {
37         INSN_UNDEF,
38         INSN_EMULATE,
39         INSN_HW,
40 };
41
42 enum legacy_insn_status {
43         INSN_DEPRECATED,
44         INSN_OBSOLETE,
45 };
46
47 struct insn_emulation_ops {
48         const char              *name;
49         enum legacy_insn_status status;
50         struct undef_hook       *hooks;
51         int                     (*set_hw_mode)(bool enable);
52 };
53
54 struct insn_emulation {
55         struct list_head node;
56         struct insn_emulation_ops *ops;
57         int current_mode;
58         int min;
59         int max;
60 };
61
62 static LIST_HEAD(insn_emulation);
63 static int nr_insn_emulated __initdata;
64 static DEFINE_RAW_SPINLOCK(insn_emulation_lock);
65
66 static void register_emulation_hooks(struct insn_emulation_ops *ops)
67 {
68         struct undef_hook *hook;
69
70         BUG_ON(!ops->hooks);
71
72         for (hook = ops->hooks; hook->instr_mask; hook++)
73                 register_undef_hook(hook);
74
75         pr_notice("Registered %s emulation handler\n", ops->name);
76 }
77
78 static void remove_emulation_hooks(struct insn_emulation_ops *ops)
79 {
80         struct undef_hook *hook;
81
82         BUG_ON(!ops->hooks);
83
84         for (hook = ops->hooks; hook->instr_mask; hook++)
85                 unregister_undef_hook(hook);
86
87         pr_notice("Removed %s emulation handler\n", ops->name);
88 }
89
90 static void enable_insn_hw_mode(void *data)
91 {
92         struct insn_emulation *insn = (struct insn_emulation *)data;
93         if (insn->ops->set_hw_mode)
94                 insn->ops->set_hw_mode(true);
95 }
96
97 static void disable_insn_hw_mode(void *data)
98 {
99         struct insn_emulation *insn = (struct insn_emulation *)data;
100         if (insn->ops->set_hw_mode)
101                 insn->ops->set_hw_mode(false);
102 }
103
104 /* Run set_hw_mode(mode) on all active CPUs */
105 static int run_all_cpu_set_hw_mode(struct insn_emulation *insn, bool enable)
106 {
107         if (!insn->ops->set_hw_mode)
108                 return -EINVAL;
109         if (enable)
110                 on_each_cpu(enable_insn_hw_mode, (void *)insn, true);
111         else
112                 on_each_cpu(disable_insn_hw_mode, (void *)insn, true);
113         return 0;
114 }
115
116 /*
117  * Run set_hw_mode for all insns on a starting CPU.
118  * Returns:
119  *  0           - If all the hooks ran successfully.
120  * -EINVAL      - At least one hook is not supported by the CPU.
121  */
122 static int run_all_insn_set_hw_mode(unsigned int cpu)
123 {
124         int rc = 0;
125         unsigned long flags;
126         struct insn_emulation *insn;
127
128         raw_spin_lock_irqsave(&insn_emulation_lock, flags);
129         list_for_each_entry(insn, &insn_emulation, node) {
130                 bool enable = (insn->current_mode == INSN_HW);
131                 if (insn->ops->set_hw_mode && insn->ops->set_hw_mode(enable)) {
132                         pr_warn("CPU[%u] cannot support the emulation of %s",
133                                 cpu, insn->ops->name);
134                         rc = -EINVAL;
135                 }
136         }
137         raw_spin_unlock_irqrestore(&insn_emulation_lock, flags);
138         return rc;
139 }
140
141 static int update_insn_emulation_mode(struct insn_emulation *insn,
142                                        enum insn_emulation_mode prev)
143 {
144         int ret = 0;
145
146         switch (prev) {
147         case INSN_UNDEF: /* Nothing to be done */
148                 break;
149         case INSN_EMULATE:
150                 remove_emulation_hooks(insn->ops);
151                 break;
152         case INSN_HW:
153                 if (!run_all_cpu_set_hw_mode(insn, false))
154                         pr_notice("Disabled %s support\n", insn->ops->name);
155                 break;
156         }
157
158         switch (insn->current_mode) {
159         case INSN_UNDEF:
160                 break;
161         case INSN_EMULATE:
162                 register_emulation_hooks(insn->ops);
163                 break;
164         case INSN_HW:
165                 ret = run_all_cpu_set_hw_mode(insn, true);
166                 if (!ret)
167                         pr_notice("Enabled %s support\n", insn->ops->name);
168                 break;
169         }
170
171         return ret;
172 }
173
174 static void __init register_insn_emulation(struct insn_emulation_ops *ops)
175 {
176         unsigned long flags;
177         struct insn_emulation *insn;
178
179         insn = kzalloc(sizeof(*insn), GFP_KERNEL);
180         insn->ops = ops;
181         insn->min = INSN_UNDEF;
182
183         switch (ops->status) {
184         case INSN_DEPRECATED:
185                 insn->current_mode = INSN_EMULATE;
186                 /* Disable the HW mode if it was turned on at early boot time */
187                 run_all_cpu_set_hw_mode(insn, false);
188                 insn->max = INSN_HW;
189                 break;
190         case INSN_OBSOLETE:
191                 insn->current_mode = INSN_UNDEF;
192                 insn->max = INSN_EMULATE;
193                 break;
194         }
195
196         raw_spin_lock_irqsave(&insn_emulation_lock, flags);
197         list_add(&insn->node, &insn_emulation);
198         nr_insn_emulated++;
199         raw_spin_unlock_irqrestore(&insn_emulation_lock, flags);
200
201         /* Register any handlers if required */
202         update_insn_emulation_mode(insn, INSN_UNDEF);
203 }
204
205 static int emulation_proc_handler(struct ctl_table *table, int write,
206                                   void __user *buffer, size_t *lenp,
207                                   loff_t *ppos)
208 {
209         int ret = 0;
210         struct insn_emulation *insn = (struct insn_emulation *) table->data;
211         enum insn_emulation_mode prev_mode = insn->current_mode;
212
213         table->data = &insn->current_mode;
214         ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
215
216         if (ret || !write || prev_mode == insn->current_mode)
217                 goto ret;
218
219         ret = update_insn_emulation_mode(insn, prev_mode);
220         if (ret) {
221                 /* Mode change failed, revert to previous mode. */
222                 insn->current_mode = prev_mode;
223                 update_insn_emulation_mode(insn, INSN_UNDEF);
224         }
225 ret:
226         table->data = insn;
227         return ret;
228 }
229
230 static void __init register_insn_emulation_sysctl(void)
231 {
232         unsigned long flags;
233         int i = 0;
234         struct insn_emulation *insn;
235         struct ctl_table *insns_sysctl, *sysctl;
236
237         insns_sysctl = kcalloc(nr_insn_emulated + 1, sizeof(*sysctl),
238                                GFP_KERNEL);
239
240         raw_spin_lock_irqsave(&insn_emulation_lock, flags);
241         list_for_each_entry(insn, &insn_emulation, node) {
242                 sysctl = &insns_sysctl[i];
243
244                 sysctl->mode = 0644;
245                 sysctl->maxlen = sizeof(int);
246
247                 sysctl->procname = insn->ops->name;
248                 sysctl->data = insn;
249                 sysctl->extra1 = &insn->min;
250                 sysctl->extra2 = &insn->max;
251                 sysctl->proc_handler = emulation_proc_handler;
252                 i++;
253         }
254         raw_spin_unlock_irqrestore(&insn_emulation_lock, flags);
255
256         register_sysctl("abi", insns_sysctl);
257 }
258
259 /*
260  *  Implement emulation of the SWP/SWPB instructions using load-exclusive and
261  *  store-exclusive.
262  *
263  *  Syntax of SWP{B} instruction: SWP{B}<c> <Rt>, <Rt2>, [<Rn>]
264  *  Where: Rt  = destination
265  *         Rt2 = source
266  *         Rn  = address
267  */
268
269 /*
270  * Error-checking SWP macros implemented using ldxr{b}/stxr{b}
271  */
272
273 /* Arbitrary constant to ensure forward-progress of the LL/SC loop */
274 #define __SWP_LL_SC_LOOPS       4
275
276 #define __user_swpX_asm(data, addr, res, temp, temp2, B)        \
277 do {                                                            \
278         uaccess_enable();                                       \
279         __asm__ __volatile__(                                   \
280         "       mov             %w3, %w7\n"                     \
281         "0:     ldxr"B"         %w2, [%4]\n"                    \
282         "1:     stxr"B"         %w0, %w1, [%4]\n"               \
283         "       cbz             %w0, 2f\n"                      \
284         "       sub             %w3, %w3, #1\n"                 \
285         "       cbnz            %w3, 0b\n"                      \
286         "       mov             %w0, %w5\n"                     \
287         "       b               3f\n"                           \
288         "2:\n"                                                  \
289         "       mov             %w1, %w2\n"                     \
290         "3:\n"                                                  \
291         "       .pushsection     .fixup,\"ax\"\n"               \
292         "       .align          2\n"                            \
293         "4:     mov             %w0, %w6\n"                     \
294         "       b               3b\n"                           \
295         "       .popsection"                                    \
296         _ASM_EXTABLE(0b, 4b)                                    \
297         _ASM_EXTABLE(1b, 4b)                                    \
298         : "=&r" (res), "+r" (data), "=&r" (temp), "=&r" (temp2) \
299         : "r" ((unsigned long)addr), "i" (-EAGAIN),             \
300           "i" (-EFAULT),                                        \
301           "i" (__SWP_LL_SC_LOOPS)                               \
302         : "memory");                                            \
303         uaccess_disable();                                      \
304 } while (0)
305
306 #define __user_swp_asm(data, addr, res, temp, temp2) \
307         __user_swpX_asm(data, addr, res, temp, temp2, "")
308 #define __user_swpb_asm(data, addr, res, temp, temp2) \
309         __user_swpX_asm(data, addr, res, temp, temp2, "b")
310
311 /*
312  * Bit 22 of the instruction encoding distinguishes between
313  * the SWP and SWPB variants (bit set means SWPB).
314  */
315 #define TYPE_SWPB (1 << 22)
316
317 static int emulate_swpX(unsigned int address, unsigned int *data,
318                         unsigned int type)
319 {
320         unsigned int res = 0;
321
322         if ((type != TYPE_SWPB) && (address & 0x3)) {
323                 /* SWP to unaligned address not permitted */
324                 pr_debug("SWP instruction on unaligned pointer!\n");
325                 return -EFAULT;
326         }
327
328         while (1) {
329                 unsigned long temp, temp2;
330
331                 if (type == TYPE_SWPB)
332                         __user_swpb_asm(*data, address, res, temp, temp2);
333                 else
334                         __user_swp_asm(*data, address, res, temp, temp2);
335
336                 if (likely(res != -EAGAIN) || signal_pending(current))
337                         break;
338
339                 cond_resched();
340         }
341
342         return res;
343 }
344
345 #define ARM_OPCODE_CONDTEST_FAIL   0
346 #define ARM_OPCODE_CONDTEST_PASS   1
347 #define ARM_OPCODE_CONDTEST_UNCOND 2
348
349 #define ARM_OPCODE_CONDITION_UNCOND     0xf
350
351 static unsigned int __kprobes aarch32_check_condition(u32 opcode, u32 psr)
352 {
353         u32 cc_bits  = opcode >> 28;
354
355         if (cc_bits != ARM_OPCODE_CONDITION_UNCOND) {
356                 if ((*aarch32_opcode_cond_checks[cc_bits])(psr))
357                         return ARM_OPCODE_CONDTEST_PASS;
358                 else
359                         return ARM_OPCODE_CONDTEST_FAIL;
360         }
361         return ARM_OPCODE_CONDTEST_UNCOND;
362 }
363
364 /*
365  * swp_handler logs the id of calling process, dissects the instruction, sanity
366  * checks the memory location, calls emulate_swpX for the actual operation and
367  * deals with fixup/error handling before returning
368  */
369 static int swp_handler(struct pt_regs *regs, u32 instr)
370 {
371         u32 destreg, data, type, address = 0;
372         const void __user *user_ptr;
373         int rn, rt2, res = 0;
374
375         perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc);
376
377         type = instr & TYPE_SWPB;
378
379         switch (aarch32_check_condition(instr, regs->pstate)) {
380         case ARM_OPCODE_CONDTEST_PASS:
381                 break;
382         case ARM_OPCODE_CONDTEST_FAIL:
383                 /* Condition failed - return to next instruction */
384                 goto ret;
385         case ARM_OPCODE_CONDTEST_UNCOND:
386                 /* If unconditional encoding - not a SWP, undef */
387                 return -EFAULT;
388         default:
389                 return -EINVAL;
390         }
391
392         rn = aarch32_insn_extract_reg_num(instr, A32_RN_OFFSET);
393         rt2 = aarch32_insn_extract_reg_num(instr, A32_RT2_OFFSET);
394
395         address = (u32)regs->user_regs.regs[rn];
396         data    = (u32)regs->user_regs.regs[rt2];
397         destreg = aarch32_insn_extract_reg_num(instr, A32_RT_OFFSET);
398
399         pr_debug("addr in r%d->0x%08x, dest is r%d, source in r%d->0x%08x)\n",
400                 rn, address, destreg,
401                 aarch32_insn_extract_reg_num(instr, A32_RT2_OFFSET), data);
402
403         /* Check access in reasonable access range for both SWP and SWPB */
404         user_ptr = (const void __user *)(unsigned long)(address & ~3);
405         if (!access_ok(VERIFY_WRITE, user_ptr, 4)) {
406                 pr_debug("SWP{B} emulation: access to 0x%08x not allowed!\n",
407                         address);
408                 goto fault;
409         }
410
411         res = emulate_swpX(address, &data, type);
412         if (res == -EFAULT)
413                 goto fault;
414         else if (res == 0)
415                 regs->user_regs.regs[destreg] = data;
416
417 ret:
418         if (type == TYPE_SWPB)
419                 trace_instruction_emulation("swpb", regs->pc);
420         else
421                 trace_instruction_emulation("swp", regs->pc);
422
423         pr_warn_ratelimited("\"%s\" (%ld) uses obsolete SWP{B} instruction at 0x%llx\n",
424                         current->comm, (unsigned long)current->pid, regs->pc);
425
426         arm64_skip_faulting_instruction(regs, 4);
427         return 0;
428
429 fault:
430         pr_debug("SWP{B} emulation: access caused memory abort!\n");
431         arm64_notify_segfault(address);
432
433         return 0;
434 }
435
436 /*
437  * Only emulate SWP/SWPB executed in ARM state/User mode.
438  * The kernel must be SWP free and SWP{B} does not exist in Thumb.
439  */
440 static struct undef_hook swp_hooks[] = {
441         {
442                 .instr_mask     = 0x0fb00ff0,
443                 .instr_val      = 0x01000090,
444                 .pstate_mask    = COMPAT_PSR_MODE_MASK,
445                 .pstate_val     = COMPAT_PSR_MODE_USR,
446                 .fn             = swp_handler
447         },
448         { }
449 };
450
451 static struct insn_emulation_ops swp_ops = {
452         .name = "swp",
453         .status = INSN_OBSOLETE,
454         .hooks = swp_hooks,
455         .set_hw_mode = NULL,
456 };
457
458 static int cp15barrier_handler(struct pt_regs *regs, u32 instr)
459 {
460         perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc);
461
462         switch (aarch32_check_condition(instr, regs->pstate)) {
463         case ARM_OPCODE_CONDTEST_PASS:
464                 break;
465         case ARM_OPCODE_CONDTEST_FAIL:
466                 /* Condition failed - return to next instruction */
467                 goto ret;
468         case ARM_OPCODE_CONDTEST_UNCOND:
469                 /* If unconditional encoding - not a barrier instruction */
470                 return -EFAULT;
471         default:
472                 return -EINVAL;
473         }
474
475         switch (aarch32_insn_mcr_extract_crm(instr)) {
476         case 10:
477                 /*
478                  * dmb - mcr p15, 0, Rt, c7, c10, 5
479                  * dsb - mcr p15, 0, Rt, c7, c10, 4
480                  */
481                 if (aarch32_insn_mcr_extract_opc2(instr) == 5) {
482                         dmb(sy);
483                         trace_instruction_emulation(
484                                 "mcr p15, 0, Rt, c7, c10, 5 ; dmb", regs->pc);
485                 } else {
486                         dsb(sy);
487                         trace_instruction_emulation(
488                                 "mcr p15, 0, Rt, c7, c10, 4 ; dsb", regs->pc);
489                 }
490                 break;
491         case 5:
492                 /*
493                  * isb - mcr p15, 0, Rt, c7, c5, 4
494                  *
495                  * Taking an exception or returning from one acts as an
496                  * instruction barrier. So no explicit barrier needed here.
497                  */
498                 trace_instruction_emulation(
499                         "mcr p15, 0, Rt, c7, c5, 4 ; isb", regs->pc);
500                 break;
501         }
502
503 ret:
504         pr_warn_ratelimited("\"%s\" (%ld) uses deprecated CP15 Barrier instruction at 0x%llx\n",
505                         current->comm, (unsigned long)current->pid, regs->pc);
506
507         arm64_skip_faulting_instruction(regs, 4);
508         return 0;
509 }
510
511 static int cp15_barrier_set_hw_mode(bool enable)
512 {
513         if (enable)
514                 config_sctlr_el1(0, SCTLR_EL1_CP15BEN);
515         else
516                 config_sctlr_el1(SCTLR_EL1_CP15BEN, 0);
517         return 0;
518 }
519
520 static struct undef_hook cp15_barrier_hooks[] = {
521         {
522                 .instr_mask     = 0x0fff0fdf,
523                 .instr_val      = 0x0e070f9a,
524                 .pstate_mask    = COMPAT_PSR_MODE_MASK,
525                 .pstate_val     = COMPAT_PSR_MODE_USR,
526                 .fn             = cp15barrier_handler,
527         },
528         {
529                 .instr_mask     = 0x0fff0fff,
530                 .instr_val      = 0x0e070f95,
531                 .pstate_mask    = COMPAT_PSR_MODE_MASK,
532                 .pstate_val     = COMPAT_PSR_MODE_USR,
533                 .fn             = cp15barrier_handler,
534         },
535         { }
536 };
537
538 static struct insn_emulation_ops cp15_barrier_ops = {
539         .name = "cp15_barrier",
540         .status = INSN_DEPRECATED,
541         .hooks = cp15_barrier_hooks,
542         .set_hw_mode = cp15_barrier_set_hw_mode,
543 };
544
545 static int setend_set_hw_mode(bool enable)
546 {
547         if (!cpu_supports_mixed_endian_el0())
548                 return -EINVAL;
549
550         if (enable)
551                 config_sctlr_el1(SCTLR_EL1_SED, 0);
552         else
553                 config_sctlr_el1(0, SCTLR_EL1_SED);
554         return 0;
555 }
556
557 static int compat_setend_handler(struct pt_regs *regs, u32 big_endian)
558 {
559         char *insn;
560
561         perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc);
562
563         if (big_endian) {
564                 insn = "setend be";
565                 regs->pstate |= COMPAT_PSR_E_BIT;
566         } else {
567                 insn = "setend le";
568                 regs->pstate &= ~COMPAT_PSR_E_BIT;
569         }
570
571         trace_instruction_emulation(insn, regs->pc);
572         pr_warn_ratelimited("\"%s\" (%ld) uses deprecated setend instruction at 0x%llx\n",
573                         current->comm, (unsigned long)current->pid, regs->pc);
574
575         return 0;
576 }
577
578 static int a32_setend_handler(struct pt_regs *regs, u32 instr)
579 {
580         int rc = compat_setend_handler(regs, (instr >> 9) & 1);
581         arm64_skip_faulting_instruction(regs, 4);
582         return rc;
583 }
584
585 static int t16_setend_handler(struct pt_regs *regs, u32 instr)
586 {
587         int rc = compat_setend_handler(regs, (instr >> 3) & 1);
588         arm64_skip_faulting_instruction(regs, 2);
589         return rc;
590 }
591
592 static struct undef_hook setend_hooks[] = {
593         {
594                 .instr_mask     = 0xfffffdff,
595                 .instr_val      = 0xf1010000,
596                 .pstate_mask    = COMPAT_PSR_MODE_MASK,
597                 .pstate_val     = COMPAT_PSR_MODE_USR,
598                 .fn             = a32_setend_handler,
599         },
600         {
601                 /* Thumb mode */
602                 .instr_mask     = 0x0000fff7,
603                 .instr_val      = 0x0000b650,
604                 .pstate_mask    = (COMPAT_PSR_T_BIT | COMPAT_PSR_MODE_MASK),
605                 .pstate_val     = (COMPAT_PSR_T_BIT | COMPAT_PSR_MODE_USR),
606                 .fn             = t16_setend_handler,
607         },
608         {}
609 };
610
611 static struct insn_emulation_ops setend_ops = {
612         .name = "setend",
613         .status = INSN_DEPRECATED,
614         .hooks = setend_hooks,
615         .set_hw_mode = setend_set_hw_mode,
616 };
617
618 /*
619  * Invoked as late_initcall, since not needed before init spawned.
620  */
621 static int __init armv8_deprecated_init(void)
622 {
623         if (IS_ENABLED(CONFIG_SWP_EMULATION))
624                 register_insn_emulation(&swp_ops);
625
626         if (IS_ENABLED(CONFIG_CP15_BARRIER_EMULATION))
627                 register_insn_emulation(&cp15_barrier_ops);
628
629         if (IS_ENABLED(CONFIG_SETEND_EMULATION)) {
630                 if(system_supports_mixed_endian_el0())
631                         register_insn_emulation(&setend_ops);
632                 else
633                         pr_info("setend instruction emulation is not supported on this system\n");
634         }
635
636         cpuhp_setup_state_nocalls(CPUHP_AP_ARM64_ISNDEP_STARTING,
637                                   "arm64/isndep:starting",
638                                   run_all_insn_set_hw_mode, NULL);
639         register_insn_emulation_sysctl();
640
641         return 0;
642 }
643
644 core_initcall(armv8_deprecated_init);