acpi/fadt: Make sure that SCI IRQ is valid
[dragonfly.git] / sys / platform / pc64 / acpica5 / acpi_wakecode.S
CommitLineData
e774ca6d
MD
1/*-
2 * Copyright (c) 2001 Takanori Watanabe <takawata@jp.freebsd.org>
3 * Copyright (c) 2001 Mitsuru IWASAKI <iwasaki@jp.freebsd.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: src/sys/i386/acpica/acpi_wakecode.S,v 1.9 2004/01/01 22:57:22 njl Exp $
28 * $DragonFly: src/sys/platform/pc32/acpica5/acpi_wakecode.S,v 1.1 2004/02/21 06:48:05 dillon Exp $
29 */
30
31#define LOCORE
32
33#include <machine/asmacros.h>
34#include <machine/param.h>
35#include <machine/specialreg.h>
36
37 .align 4
38 .code16
39wakeup_16:
40 nop
41 cli
42
43 /*
44 * Set up segment registers for real mode and a small stack for
45 * any calls we make.
46 */
47 movw %cs,%ax
48 movw %ax,%ds
49 movw %ax,%ss
50 movw $PAGE_SIZE,%sp
51
52 /* Re-initialize video BIOS if the reset_video tunable is set. */
53 cmp $0,reset_video
54 je wakeup_16_gdt
55 lcall $0xc000,$3
56
57 /*
58 * Set up segment registers for real mode again in case the
59 * previous BIOS call clobbers them.
60 */
61 movw %cs,%ax
62 movw %ax,%ds
63 movw %ax,%ss
64
65wakeup_16_gdt:
66 /* Load GDT for real mode */
67 lgdt physical_gdt
68
69 /* Restore CR2, CR3 and CR4 */
70 mov previous_cr2,%eax
71 mov %eax,%cr2
72 mov previous_cr3,%eax
73 mov %eax,%cr3
74 mov previous_cr4,%eax
75 mov %eax,%cr4
76
77 /* Transfer some values to protected mode */
78#define NVALUES 9
79#define TRANSFER_STACK32(val, idx) \
80 mov val,%eax; \
81 mov %eax,wakeup_32stack+(idx+1)+(idx*4);
82
83 TRANSFER_STACK32(previous_ss, (NVALUES - 9))
84 TRANSFER_STACK32(previous_fs, (NVALUES - 8))
85 TRANSFER_STACK32(previous_ds, (NVALUES - 7))
86 TRANSFER_STACK32(physical_gdt+2, (NVALUES - 6))
87 TRANSFER_STACK32(where_to_recover, (NVALUES - 5))
88 TRANSFER_STACK32(previous_idt+2, (NVALUES - 4))
89 TRANSFER_STACK32(previous_ldt, (NVALUES - 3))
90 TRANSFER_STACK32(previous_gdt+2, (NVALUES - 2))
91 TRANSFER_STACK32(previous_tr, (NVALUES - 1))
92 TRANSFER_STACK32(previous_cr0, (NVALUES - 0))
93
94 mov physical_esp,%esi /* to be used in 32bit code */
95
96 /* Enable protected mode */
97 mov %cr0,%eax
98 orl $(CR0_PE),%eax
99 mov %eax,%cr0
100
101wakeup_sw32:
102 /* Switch to protected mode by intersegmental jump */
103 ljmpl $0x8,$0x12345678 /* Code location, to be replaced */
104
105 .code32
106wakeup_32:
107 /*
108 * Switched to protected mode w/o paging
109 * %esi: KERNEL stack pointer (physical address)
110 */
111
112 nop
113
114 /* Set up segment registers for protected mode */
115 movw $0x10,%ax /* KDSEL to segment registers */
116 movw %ax,%ds
117 movw %ax,%es
118 movw %ax,%gs
119 movw %ax,%ss
120 movw $0x18,%ax /* KPSEL to %fs */
121 movw %ax,%fs
122 movl %esi,%esp /* physical address stack pointer */
123
124wakeup_32stack:
125 /* Operands are overwritten in 16bit code */
126 pushl $0xabcdef09 /* ss + dummy */
127 pushl $0xabcdef08 /* fs + gs */
128 pushl $0xabcdef07 /* ds + es */
129 pushl $0xabcdef06 /* gdt:base (physical address) */
130 pushl $0xabcdef05 /* recover address */
131 pushl $0xabcdef04 /* idt:base */
132 pushl $0xabcdef03 /* ldt + idt:limit */
133 pushl $0xabcdef02 /* gdt:base */
134 pushl $0xabcdef01 /* TR + gdt:limit */
135 pushl $0xabcdef00 /* CR0 */
136
137 movl %esp,%ebp
138#define CR0_REGISTER 0(%ebp)
139#define TASK_REGISTER 4(%ebp)
140#define PREVIOUS_GDT 6(%ebp)
141#define PREVIOUS_LDT 12(%ebp)
142#define PREVIOUS_IDT 14(%ebp)
143#define RECOVER_ADDR 20(%ebp)
144#define PHYSICAL_GDT_BASE 24(%ebp)
145#define PREVIOUS_DS 28(%ebp)
146#define PREVIOUS_ES 30(%ebp)
147#define PREVIOUS_FS 32(%ebp)
148#define PREVIOUS_GS 34(%ebp)
149#define PREVIOUS_SS 36(%ebp)
150
151 /* Fixup TSS type field */
152#define TSS_TYPEFIX_MASK 0xf9
153 xorl %esi,%esi
154 movl PHYSICAL_GDT_BASE,%ebx
155 movw TASK_REGISTER,%si
156 leal (%ebx,%esi),%eax /* get TSS segment descriptor */
157 andb $TSS_TYPEFIX_MASK,5(%eax)
158
159 /* Prepare to return to sleep/wakeup code point */
160 lgdt PREVIOUS_GDT
161 lidt PREVIOUS_IDT
162
163 xorl %eax,%eax
164 movl %eax,%ebx
165 movl %eax,%ecx
166 movl %eax,%edx
167 movl %eax,%esi
168 movl %eax,%edi
169 movl PREVIOUS_DS,%ebx
170 movl PREVIOUS_FS,%ecx
171 movl PREVIOUS_SS,%edx
172 movw TASK_REGISTER,%si
173 shll $16,%esi
174 movw PREVIOUS_LDT,%si
175 movl RECOVER_ADDR,%edi
176
177 /* Enable paging and etc. */
178 movl CR0_REGISTER,%eax
179 movl %eax,%cr0
180
181 /* Flush the prefetch queue */
182 jmp 1f
1831: jmp 1f
1841:
185 /*
186 * Now that we are in kernel virtual memory addressing
187 * %ebx: ds + es
188 * %ecx: fs + gs
189 * %edx: ss + dummy
190 * %esi: LDTR + TR
191 * %edi: recover address
192 */
193
194 nop
195
196 movl %esi,%eax /* LDTR + TR */
197 lldt %ax /* load LDT register */
198 shrl $16,%eax
199 ltr %ax /* load task register */
200
201 /* Restore segment registers */
202 movl %ebx,%eax /* ds + es */
203 movw %ax,%ds
204 shrl $16,%eax
205 movw %ax,%es
206 movl %ecx,%eax /* fs + gs */
207 movw %ax,%fs
208 shrl $16,%eax
209 movw %ax,%gs
210 movl %edx,%eax /* ss */
211 movw %ax,%ss
212
213 /* Jump to acpi_restorecpu() */
214 jmp *%edi
215
216/* used in real mode */
217physical_gdt: .word 0
218 .long 0
219physical_esp: .long 0
220previous_cr2: .long 0
221previous_cr3: .long 0
222previous_cr4: .long 0
223reset_video: .long 0
224
225/* transfer from real mode to protected mode */
226previous_cr0: .long 0
227previous_tr: .word 0
228previous_gdt: .word 0
229 .long 0
230previous_ldt: .word 0
231previous_idt: .word 0
232 .long 0
233where_to_recover: .long 0
234previous_ds: .word 0
235previous_es: .word 0
236previous_fs: .word 0
237previous_gs: .word 0
238previous_ss: .word 0
239dummy: .word 0