kernel - Misc fixes and debugging
[dragonfly.git] / sys / platform / pc64 / x86_64 / exception.S
CommitLineData
c8fe38ae
MD
1/*-
2 * Copyright (c) 1989, 1990 William F. Jolitz.
3 * Copyright (c) 1990 The Regents of the University of California.
4 * Copyright (c) 2007 The FreeBSD Foundation
5 * Copyright (c) 2008 The DragonFly Project.
6 * Copyright (c) 2008 Jordan Gordeev.
7 * All rights reserved.
8 *
9 * Portions of this software were developed by A. Joseph Koshy under
10 * sponsorship from the FreeBSD Foundation and Google, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
c8fe38ae
MD
35 */
36
37#if JG
38#include "opt_atpic.h"
39#endif
40#include "opt_compat.h"
41
42#include <machine/asmacros.h>
43#include <machine/psl.h>
44#include <machine/trap.h>
bfc09ba0 45#include <machine/segments.h>
c8fe38ae
MD
46
47#include "assym.s"
48
49 .text
50
cc9b6223
MD
51 .globl lwkt_switch_return
52
c8fe38ae
MD
53/*****************************************************************************/
54/* Trap handling */
55/*****************************************************************************/
56/*
57 * Trap and fault vector routines.
58 *
59 * All traps are 'interrupt gates', SDT_SYSIGT. An interrupt gate pushes
60 * state on the stack but also disables interrupts. This is important for
61 * us for the use of the swapgs instruction. We cannot be interrupted
62 * until the GS.base value is correct. For most traps, we automatically
63 * then enable interrupts if the interrupted context had them enabled.
64 * This is equivalent to the i386 port's use of SDT_SYS386TGT.
65 *
66 * The cpu will push a certain amount of state onto the kernel stack for
b2b3ffcd 67 * the current process. See x86_64/include/frame.h.
c8fe38ae
MD
68 * This includes the current RFLAGS (status register, which includes
69 * the interrupt disable state prior to the trap), the code segment register,
70 * and the return instruction pointer are pushed by the cpu. The cpu
71 * will also push an 'error' code for certain traps. We push a dummy
72 * error code for those traps where the cpu doesn't in order to maintain
73 * a consistent frame. We also push a contrived 'trap number'.
74 *
75 * The cpu does not push the general registers, we must do that, and we
76 * must restore them prior to calling 'iret'. The cpu adjusts the %cs and
77 * %ss segment registers, but does not mess with %ds, %es, or %fs. Thus we
78 * must load them with appropriate values for supervisor mode operation.
79 */
80
81MCOUNT_LABEL(user)
82MCOUNT_LABEL(btrap)
83
bd52bedf
MD
84/*
85 * Interrupts are enabled for all traps, otherwise horrible livelocks
86 * can occur with the smp_invltlb and cpusync ode.
87 */
88#if 0
c8fe38ae
MD
89#define TRAP_NOEN(a) \
90 subq $TF_RIP,%rsp; \
5b9f6cc4 91 movq $0,TF_XFLAGS(%rsp) ; \
c8fe38ae
MD
92 movq $(a),TF_TRAPNO(%rsp) ; \
93 movq $0,TF_ADDR(%rsp) ; \
94 movq $0,TF_ERR(%rsp) ; \
95 jmp alltraps_noen
bd52bedf 96#endif
c8fe38ae
MD
97
98/* Regular traps; The cpu does not supply tf_err for these. */
99#define TRAP(a) \
100 subq $TF_RIP,%rsp; \
5b9f6cc4 101 movq $0,TF_XFLAGS(%rsp) ; \
c8fe38ae
MD
102 movq $(a),TF_TRAPNO(%rsp) ; \
103 movq $0,TF_ADDR(%rsp) ; \
104 movq $0,TF_ERR(%rsp) ; \
105 jmp alltraps
bd52bedf
MD
106IDTVEC(dbg)
107 TRAP(T_TRCTRAP)
108IDTVEC(bpt)
109 TRAP(T_BPTFLT)
c8fe38ae
MD
110IDTVEC(div)
111 TRAP(T_DIVIDE)
112IDTVEC(ofl)
113 TRAP(T_OFLOW)
114IDTVEC(bnd)
115 TRAP(T_BOUND)
116IDTVEC(ill)
117 TRAP(T_PRIVINFLT)
118IDTVEC(dna)
119 TRAP(T_DNA)
120IDTVEC(fpusegm)
121 TRAP(T_FPOPFLT)
122IDTVEC(mchk)
123 TRAP(T_MCHK)
124IDTVEC(rsvd)
125 TRAP(T_RESERVED)
126IDTVEC(fpu)
127 TRAP(T_ARITHTRAP)
128IDTVEC(xmm)
129 TRAP(T_XMMFLT)
130
131/* This group of traps have tf_err already pushed by the cpu */
132#define TRAP_ERR(a) \
133 subq $TF_ERR,%rsp; \
134 movq $(a),TF_TRAPNO(%rsp) ; \
135 movq $0,TF_ADDR(%rsp) ; \
16094a36 136 movq $0,TF_XFLAGS(%rsp) ; \
c8fe38ae
MD
137 jmp alltraps
138IDTVEC(tss)
139 TRAP_ERR(T_TSSFLT)
140IDTVEC(missing)
141 TRAP_ERR(T_SEGNPFLT)
142IDTVEC(stk)
143 TRAP_ERR(T_STKFLT)
144IDTVEC(align)
145 TRAP_ERR(T_ALIGNFLT)
146
147 /*
148 * alltraps entry point. Use swapgs if this is the first time in the
149 * kernel from userland. Reenable interrupts if they were enabled
150 * before the trap. This approximates SDT_SYS386TGT on the i386 port.
151 */
152
153 SUPERALIGN_TEXT
154 .globl alltraps
155 .type alltraps,@function
156alltraps:
2883d2d8
MD
157 /* Fixup %gs if coming from userland */
158 testb $SEL_RPL_MASK,TF_CS(%rsp)
159 jz alltraps_testi
c8fe38ae
MD
160 swapgs
161alltraps_testi:
162 testq $PSL_I,TF_RFLAGS(%rsp)
163 jz alltraps_pushregs
164 sti
165alltraps_pushregs:
166 movq %rdi,TF_RDI(%rsp)
167alltraps_pushregs_no_rdi:
168 movq %rsi,TF_RSI(%rsp)
169 movq %rdx,TF_RDX(%rsp)
170 movq %rcx,TF_RCX(%rsp)
171 movq %r8,TF_R8(%rsp)
172 movq %r9,TF_R9(%rsp)
173 movq %rax,TF_RAX(%rsp)
174 movq %rbx,TF_RBX(%rsp)
175 movq %rbp,TF_RBP(%rsp)
176 movq %r10,TF_R10(%rsp)
177 movq %r11,TF_R11(%rsp)
178 movq %r12,TF_R12(%rsp)
179 movq %r13,TF_R13(%rsp)
180 movq %r14,TF_R14(%rsp)
181 movq %r15,TF_R15(%rsp)
182 FAKE_MCOUNT(TF_RIP(%rsp))
183 .globl calltrap
184 .type calltrap,@function
185calltrap:
3338cc67 186 cld
c8fe38ae
MD
187 movq %rsp, %rdi
188 call trap
189 MEXITCOUNT
190 jmp doreti /* Handle any pending ASTs */
191
192 /*
193 * alltraps_noen entry point. Unlike alltraps above, we want to
194 * leave the interrupts disabled. This corresponds to
195 * SDT_SYS386IGT on the i386 port.
196 */
197 SUPERALIGN_TEXT
198 .globl alltraps_noen
199 .type alltraps_noen,@function
200alltraps_noen:
2883d2d8
MD
201 /* Fixup %gs if coming from userland */
202 testb $SEL_RPL_MASK,TF_CS(%rsp)
203 jz alltraps_pushregs
c8fe38ae
MD
204 swapgs
205 jmp alltraps_pushregs
206
207IDTVEC(dblfault)
208 subq $TF_ERR,%rsp
209 movq $T_DOUBLEFLT,TF_TRAPNO(%rsp)
210 movq $0,TF_ADDR(%rsp)
211 movq $0,TF_ERR(%rsp)
16094a36 212 movq $0,TF_XFLAGS(%rsp)
c8fe38ae
MD
213 movq %rdi,TF_RDI(%rsp)
214 movq %rsi,TF_RSI(%rsp)
215 movq %rdx,TF_RDX(%rsp)
216 movq %rcx,TF_RCX(%rsp)
217 movq %r8,TF_R8(%rsp)
218 movq %r9,TF_R9(%rsp)
219 movq %rax,TF_RAX(%rsp)
220 movq %rbx,TF_RBX(%rsp)
221 movq %rbp,TF_RBP(%rsp)
222 movq %r10,TF_R10(%rsp)
223 movq %r11,TF_R11(%rsp)
224 movq %r12,TF_R12(%rsp)
225 movq %r13,TF_R13(%rsp)
226 movq %r14,TF_R14(%rsp)
227 movq %r15,TF_R15(%rsp)
2883d2d8
MD
228 testb $SEL_RPL_MASK,TF_CS(%rsp)
229 jz 1f
c8fe38ae
MD
230 swapgs
2311: movq %rsp, %rdi
3338cc67 232 cld
c8fe38ae
MD
233 call dblfault_handler
2342: hlt
235 jmp 2b
236
237IDTVEC(page)
238 subq $TF_ERR,%rsp
239 movq $T_PAGEFLT,TF_TRAPNO(%rsp)
2883d2d8
MD
240 /* Fixup %gs if coming from userland */
241 testb $SEL_RPL_MASK,TF_CS(%rsp)
242 jz 1f
c8fe38ae
MD
243 swapgs
2441:
245 movq %rdi,TF_RDI(%rsp) /* free up a GP register */
246 movq %cr2,%rdi /* preserve %cr2 before .. */
247 movq %rdi,TF_ADDR(%rsp) /* enabling interrupts. */
16094a36 248 movq $0,TF_XFLAGS(%rsp)
c8fe38ae
MD
249 testq $PSL_I,TF_RFLAGS(%rsp)
250 jz alltraps_pushregs_no_rdi
251 sti
252 jmp alltraps_pushregs_no_rdi
253
254 /*
255 * We have to special-case this one. If we get a trap in doreti() at
256 * the iretq stage, we'll reenter with the wrong gs state. We'll have
257 * to do a special the swapgs in this case even coming from the kernel.
258 * XXX linux has a trap handler for their equivalent of load_gs().
259 */
260IDTVEC(prot)
261 subq $TF_ERR,%rsp
262 movq $T_PROTFLT,TF_TRAPNO(%rsp)
263 movq $0,TF_ADDR(%rsp)
16094a36 264 movq $0,TF_XFLAGS(%rsp)
c8fe38ae 265 movq %rdi,TF_RDI(%rsp) /* free up a GP register */
2883d2d8
MD
266
267 /*
268 * Fixup %gs if coming from userland. Handle the special case where
269 * %fs faults in doreti at the iretq instruction itself.
270 */
c8fe38ae 271 leaq doreti_iret(%rip),%rdi
2883d2d8
MD
272 cmpq %rdi,TF_RIP(%rsp) /* special iretq fault case */
273 je 2f
274 testb $SEL_RPL_MASK,TF_CS(%rsp) /* check if from userland */
275 jz 1f
c8fe38ae
MD
2762:
277 swapgs
2781:
279 testq $PSL_I,TF_RFLAGS(%rsp)
280 jz alltraps_pushregs_no_rdi
281 sti
282 jmp alltraps_pushregs_no_rdi
283
284/*
285 * Fast syscall entry point. We enter here with just our new %cs/%ss set,
286 * and the new privilige level. We are still running on the old user stack
287 * pointer. We have to juggle a few things around to find our stack etc.
288 * swapgs gives us access to our PCPU space only.
289 */
290IDTVEC(fast_syscall)
291 swapgs
292 movq %rsp,PCPU(scratch_rsp)
d1368d1a 293 movq PCPU(common_tss) + TSS_RSP0, %rsp
c8fe38ae
MD
294 /* Now emulate a trapframe. Make the 8 byte alignment odd for call. */
295 subq $TF_SIZE,%rsp
296 /* defer TF_RSP till we have a spare register */
297 movq %r11,TF_RFLAGS(%rsp)
298 movq %rcx,TF_RIP(%rsp) /* %rcx original value is in %r10 */
299 movq PCPU(scratch_rsp),%r11 /* %r11 already saved */
300 movq %r11,TF_RSP(%rsp) /* user stack pointer */
f2081646 301 orl $RQF_QUICKRET,PCPU(reqflags)
c8fe38ae
MD
302 sti
303 movq $KUDSEL,TF_SS(%rsp)
304 movq $KUCSEL,TF_CS(%rsp)
305 movq $2,TF_ERR(%rsp)
571cdd83 306 movq $T_FAST_SYSCALL,TF_TRAPNO(%rsp) /* for the vkernel */
16094a36 307 movq $0,TF_XFLAGS(%rsp) /* note: used in signal frame */
c8fe38ae
MD
308 movq %rdi,TF_RDI(%rsp) /* arg 1 */
309 movq %rsi,TF_RSI(%rsp) /* arg 2 */
310 movq %rdx,TF_RDX(%rsp) /* arg 3 */
311 movq %r10,TF_RCX(%rsp) /* arg 4 */
312 movq %r8,TF_R8(%rsp) /* arg 5 */
313 movq %r9,TF_R9(%rsp) /* arg 6 */
314 movq %rax,TF_RAX(%rsp) /* syscall number */
315 movq %rbx,TF_RBX(%rsp) /* C preserved */
316 movq %rbp,TF_RBP(%rsp) /* C preserved */
317 movq %r12,TF_R12(%rsp) /* C preserved */
318 movq %r13,TF_R13(%rsp) /* C preserved */
319 movq %r14,TF_R14(%rsp) /* C preserved */
320 movq %r15,TF_R15(%rsp) /* C preserved */
321 FAKE_MCOUNT(TF_RIP(%rsp))
322 movq %rsp, %rdi
323 call syscall2
f2081646
MD
324
325 /*
326 * Fast return from system call
327 */
328 cli
329 testl $RQF_IPIQ|RQF_TIMER|RQF_INTPEND|RQF_AST_MASK,PCPU(reqflags)
330 jnz 1f
331 testl $RQF_QUICKRET,PCPU(reqflags)
332 jz 1f
333 MEXITCOUNT
334 movq TF_RDI(%rsp),%rdi
335 movq TF_RSI(%rsp),%rsi
336 movq TF_RDX(%rsp),%rdx
337 movq TF_RAX(%rsp),%rax
338 movq TF_RFLAGS(%rsp),%r11
339 movq TF_RIP(%rsp),%rcx
340 movq TF_RSP(%rsp),%rsp
341 swapgs
342 sysretq
343 /*
344 * Normal slow / full iret
345 */
3461:
c8fe38ae
MD
347 MEXITCOUNT
348 jmp doreti
349
350/*
351 * Here for CYA insurance, in case a "syscall" instruction gets
352 * issued from 32 bit compatability mode. MSR_CSTAR has to point
353 * to *something* if EFER_SCE is enabled.
354 */
355IDTVEC(fast_syscall32)
356 sysret
357
358/*
359 * NMI handling is special.
360 *
361 * First, NMIs do not respect the state of the processor's RFLAGS.IF
362 * bit and the NMI handler may be invoked at any time, including when
363 * the processor is in a critical section with RFLAGS.IF == 0. In
364 * particular, this means that the processor's GS.base values could be
365 * inconsistent on entry to the handler, and so we need to read
366 * MSR_GSBASE to determine if a 'swapgs' is needed. We use '%ebx', a
367 * C-preserved register, to remember whether to swap GS back on the
368 * exit path.
369 *
370 * Second, the processor treats NMIs specially, blocking further NMIs
371 * until an 'iretq' instruction is executed. We therefore need to
372 * execute the NMI handler with interrupts disabled to prevent a
373 * nested interrupt from executing an 'iretq' instruction and
374 * inadvertently taking the processor out of NMI mode.
375 *
376 * Third, the NMI handler runs on its own stack (tss_ist1), shared
377 * with the double fault handler.
378 */
379
380IDTVEC(nmi)
381 subq $TF_RIP,%rsp
382 movq $(T_NMI),TF_TRAPNO(%rsp)
383 movq $0,TF_ADDR(%rsp)
384 movq $0,TF_ERR(%rsp)
16094a36 385 movq $0,TF_XFLAGS(%rsp)
c8fe38ae
MD
386 movq %rdi,TF_RDI(%rsp)
387 movq %rsi,TF_RSI(%rsp)
388 movq %rdx,TF_RDX(%rsp)
389 movq %rcx,TF_RCX(%rsp)
390 movq %r8,TF_R8(%rsp)
391 movq %r9,TF_R9(%rsp)
392 movq %rax,TF_RAX(%rsp)
393 movq %rbx,TF_RBX(%rsp)
394 movq %rbp,TF_RBP(%rsp)
395 movq %r10,TF_R10(%rsp)
396 movq %r11,TF_R11(%rsp)
397 movq %r12,TF_R12(%rsp)
398 movq %r13,TF_R13(%rsp)
399 movq %r14,TF_R14(%rsp)
400 movq %r15,TF_R15(%rsp)
401 xorl %ebx,%ebx
402 testb $SEL_RPL_MASK,TF_CS(%rsp)
403 jnz nmi_needswapgs /* we came from userland */
404 movl $MSR_GSBASE,%ecx
405 rdmsr
48ffc236 406 cmpl $VM_MAX_USER_ADDRESS >> 32,%edx
c8fe38ae
MD
407 jae nmi_calltrap /* GS.base holds a kernel VA */
408nmi_needswapgs:
409 incl %ebx
410 swapgs
411/* Note: this label is also used by ddb and gdb: */
412nmi_calltrap:
413 FAKE_MCOUNT(TF_RIP(%rsp))
3338cc67 414 cld
c8fe38ae
MD
415 movq %rsp, %rdi
416 call trap
417 MEXITCOUNT
418 testl %ebx,%ebx
419 jz nmi_restoreregs
420 swapgs
421nmi_restoreregs:
422 movq TF_RDI(%rsp),%rdi
423 movq TF_RSI(%rsp),%rsi
424 movq TF_RDX(%rsp),%rdx
425 movq TF_RCX(%rsp),%rcx
426 movq TF_R8(%rsp),%r8
427 movq TF_R9(%rsp),%r9
428 movq TF_RAX(%rsp),%rax
429 movq TF_RBX(%rsp),%rbx
430 movq TF_RBP(%rsp),%rbp
431 movq TF_R10(%rsp),%r10
432 movq TF_R11(%rsp),%r11
433 movq TF_R12(%rsp),%r12
434 movq TF_R13(%rsp),%r13
435 movq TF_R14(%rsp),%r14
436 movq TF_R15(%rsp),%r15
437 addq $TF_RIP,%rsp
438 iretq
439
440/*
441 * This function is what cpu_heavy_restore jumps to after a new process
442 * is created. The LWKT subsystem switches while holding a critical
443 * section and we maintain that abstraction here (e.g. because
444 * cpu_heavy_restore needs it due to PCB_*() manipulation), then get out of
445 * it before calling the initial function (typically fork_return()) and/or
446 * returning to user mode.
447 *
2b0bd8aa
MD
448 * The MP lock is not held at any point but the critcount is bumped
449 * on entry to prevent interruption of the trampoline at a bad point.
cc9b6223
MD
450 *
451 * This is effectively what td->td_switch() returns to. It 'returns' the
452 * old thread in %rax and since this is not returning to a td->td_switch()
453 * call from lwkt_switch() we must handle the cleanup for the old thread
454 * by calling lwkt_switch_return().
455 *
456 * fork_trampoline(%rax:otd, %rbx:func, %r12:arg)
c8fe38ae
MD
457 */
458ENTRY(fork_trampoline)
cc9b6223
MD
459 movq %rax,%rdi
460 call lwkt_switch_return
c8fe38ae 461 movq PCPU(curthread),%rax
f9235b6d 462 decl TD_CRITCOUNT(%rax)
c8fe38ae
MD
463
464 /*
465 * cpu_set_fork_handler intercepts this function call to
466 * have this call a non-return function to stay in kernel mode.
467 *
468 * initproc has its own fork handler, start_init(), which DOES
469 * return.
470 *
471 * %rbx - chaining function (typically fork_return)
472 * %r12 -> %rdi (argument)
473 * frame-> %rsi (trap frame)
474 *
475 * void (func:rbx)(arg:rdi, trapframe:rsi)
476 */
477 movq %rsp, %rsi /* pass trapframe by reference */
478 movq %r12, %rdi /* arg1 */
479 call *%rbx /* function */
480
481 /* cut from syscall */
482
483 sti
484 call splz
485
c8fe38ae
MD
486 /*
487 * Return via doreti to handle ASTs.
488 *
489 * trapframe is at the top of the stack.
490 */
c8fe38ae
MD
491 MEXITCOUNT
492 jmp doreti
493
494/*
495 * To efficiently implement classification of trap and interrupt handlers
496 * for profiling, there must be only trap handlers between the labels btrap
497 * and bintr, and only interrupt handlers between the labels bintr and
498 * eintr. This is implemented (partly) by including files that contain
499 * some of the handlers. Before including the files, set up a normal asm
500 * environment so that the included files doen't need to know that they are
501 * included.
502 */
503
504#ifdef COMPAT_IA32
505 .data
506 .p2align 4
507 .text
508 SUPERALIGN_TEXT
509
b2b3ffcd 510#include <x86_64/ia32/ia32_exception.S>
c8fe38ae
MD
511#endif
512
513 .data
514 .p2align 4
515 .text
516 SUPERALIGN_TEXT
517MCOUNT_LABEL(bintr)
518
519#if JG
b2b3ffcd 520#include <x86_64/x86_64/apic_vector.S>
c8fe38ae
MD
521#endif
522
523#ifdef DEV_ATPIC
524 .data
525 .p2align 4
526 .text
527 SUPERALIGN_TEXT
528
b2b3ffcd 529#include <x86_64/isa/atpic_vector.S>
c8fe38ae
MD
530#endif
531
532 .text
533MCOUNT_LABEL(eintr)
534