pc32: Split out isa_intr.h and move isa/intr_machdep.h to include/
[dragonfly.git] / sys / platform / pc32 / i386 / machdep.c
CommitLineData
984263bc
MD
1/*-
2 * Copyright (c) 1992 Terrence R. Lambert.
3 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * William Jolitz.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
38 * $FreeBSD: src/sys/i386/i386/machdep.c,v 1.385.2.30 2003/05/31 08:48:05 alc Exp $
39 */
40
1f2de5d4
MD
41#include "use_npx.h"
42#include "use_isa.h"
984263bc
MD
43#include "opt_atalk.h"
44#include "opt_compat.h"
45#include "opt_cpu.h"
46#include "opt_ddb.h"
47#include "opt_directio.h"
48#include "opt_inet.h"
49#include "opt_ipx.h"
50#include "opt_maxmem.h"
51#include "opt_msgbuf.h"
52#include "opt_perfmon.h"
53#include "opt_swap.h"
984263bc 54#include "opt_userconfig.h"
eac0bf8f 55#include "opt_apic.h"
984263bc
MD
56
57#include <sys/param.h>
58#include <sys/systm.h>
59#include <sys/sysproto.h>
60#include <sys/signalvar.h>
61#include <sys/kernel.h>
62#include <sys/linker.h>
63#include <sys/malloc.h>
64#include <sys/proc.h>
895c1f85 65#include <sys/priv.h>
984263bc
MD
66#include <sys/buf.h>
67#include <sys/reboot.h>
984263bc
MD
68#include <sys/mbuf.h>
69#include <sys/msgbuf.h>
70#include <sys/sysent.h>
71#include <sys/sysctl.h>
72#include <sys/vmmeter.h>
73#include <sys/bus.h>
a722be49 74#include <sys/upcall.h>
cb7f4ab1 75#include <sys/usched.h>
527fddf7 76#include <sys/reg.h>
984263bc
MD
77
78#include <vm/vm.h>
79#include <vm/vm_param.h>
80#include <sys/lock.h>
81#include <vm/vm_kern.h>
82#include <vm/vm_object.h>
83#include <vm/vm_page.h>
84#include <vm/vm_map.h>
85#include <vm/vm_pager.h>
86#include <vm/vm_extern.h>
87
4b5f931b 88#include <sys/thread2.h>
684a93c4 89#include <sys/mplock2.h>
4b5f931b 90
984263bc
MD
91#include <sys/user.h>
92#include <sys/exec.h>
93#include <sys/cons.h>
94
95#include <ddb/ddb.h>
96
984263bc 97#include <machine/cpu.h>
984263bc
MD
98#include <machine/clock.h>
99#include <machine/specialreg.h>
100#include <machine/bootinfo.h>
984263bc
MD
101#include <machine/md_var.h>
102#include <machine/pcb_ext.h> /* pcb.h included via sys/user.h */
85100692 103#include <machine/globaldata.h> /* CPU_prvspace */
984263bc 104#include <machine/smp.h>
984263bc
MD
105#ifdef PERFMON
106#include <machine/perfmon.h>
107#endif
108#include <machine/cputypes.h>
87cf6827 109#include <machine/intr_machdep.h>
984263bc
MD
110
111#ifdef OLD_BUS_ARCH
21ce0dfa 112#include <bus/isa/isa_device.h>
984263bc 113#endif
87cf6827 114#include <machine_base/isa/isa_intr.h>
e24dd6e0 115#include <machine_base/isa/elcr_var.h>
1f2de5d4 116#include <bus/isa/rtc.h>
984263bc
MD
117#include <machine/vm86.h>
118#include <sys/random.h>
119#include <sys/ptrace.h>
120#include <machine/sigframe.h>
121
30c5f287
MN
122#include <sys/machintr.h>
123
ff1a75a1
MD
124#define PHYSMAP_ENTRIES 10
125
d678dc17
MN
126extern void init386(int first);
127extern void dblfault_handler(void);
984263bc
MD
128
129extern void printcpuinfo(void); /* XXX header file */
130extern void finishidentcpu(void);
131extern void panicifcpuunsupported(void);
132extern void initializecpu(void);
133
d678dc17 134static void cpu_startup(void *);
642a6e88 135#ifndef CPU_DISABLE_SSE
d678dc17
MN
136static void set_fpregs_xmm(struct save87 *, struct savexmm *);
137static void fill_fpregs_xmm(struct savexmm *, struct save87 *);
642a6e88 138#endif /* CPU_DISABLE_SSE */
984263bc
MD
139#ifdef DIRECTIO
140extern void ffs_rawread_setup(void);
141#endif /* DIRECTIO */
8a8d5d85 142static void init_locks(void);
984263bc 143
ba39e2e0 144SYSINIT(cpu, SI_BOOT2_SMP, SI_ORDER_FIRST, cpu_startup, NULL)
984263bc 145
984263bc
MD
146int _udatasel, _ucodesel;
147u_int atdevbase;
c2751817
MD
148#ifdef SMP
149int64_t tsc_offsets[MAXCPU];
150#else
151int64_t tsc_offsets[1];
152#endif
984263bc
MD
153
154#if defined(SWTCH_OPTIM_STATS)
155extern int swtch_optim_stats;
156SYSCTL_INT(_debug, OID_AUTO, swtch_optim_stats,
157 CTLFLAG_RD, &swtch_optim_stats, 0, "");
158SYSCTL_INT(_debug, OID_AUTO, tlb_flush_count,
159 CTLFLAG_RD, &tlb_flush_count, 0, "");
160#endif
161
984263bc 162int physmem = 0;
984263bc 163
1876681a
SZ
164u_long ebda_addr = 0;
165
984263bc
MD
166static int
167sysctl_hw_physmem(SYSCTL_HANDLER_ARGS)
168{
169 int error = sysctl_handle_int(oidp, 0, ctob(physmem), req);
170 return (error);
171}
172
173SYSCTL_PROC(_hw, HW_PHYSMEM, physmem, CTLTYPE_INT|CTLFLAG_RD,
174 0, 0, sysctl_hw_physmem, "IU", "");
175
176static int
177sysctl_hw_usermem(SYSCTL_HANDLER_ARGS)
178{
179 int error = sysctl_handle_int(oidp, 0,
12e4aaff 180 ctob(physmem - vmstats.v_wire_count), req);
984263bc
MD
181 return (error);
182}
183
184SYSCTL_PROC(_hw, HW_USERMEM, usermem, CTLTYPE_INT|CTLFLAG_RD,
185 0, 0, sysctl_hw_usermem, "IU", "");
186
187static int
188sysctl_hw_availpages(SYSCTL_HANDLER_ARGS)
189{
190 int error = sysctl_handle_int(oidp, 0,
191 i386_btop(avail_end - avail_start), req);
192 return (error);
193}
194
195SYSCTL_PROC(_hw, OID_AUTO, availpages, CTLTYPE_INT|CTLFLAG_RD,
196 0, 0, sysctl_hw_availpages, "I", "");
197
1bda0d3d
MD
198vm_paddr_t Maxmem;
199vm_paddr_t Realmem;
984263bc 200
ff1a75a1 201vm_paddr_t phys_avail[PHYSMAP_ENTRIES*2+2];
b24cd69c
AH
202vm_paddr_t dump_avail[PHYSMAP_ENTRIES*2+2];
203
984263bc
MD
204
205static vm_offset_t buffer_sva, buffer_eva;
206vm_offset_t clean_sva, clean_eva;
207static vm_offset_t pager_sva, pager_eva;
208static struct trapframe proc0_tf;
209
210static void
f123d5a1 211cpu_startup(void *dummy)
984263bc 212{
c9faf524 213 caddr_t v;
984263bc 214 vm_size_t size = 0;
e4846942 215 vm_offset_t firstaddr;
984263bc
MD
216
217 if (boothowto & RB_VERBOSE)
218 bootverbose++;
219
220 /*
221 * Good {morning,afternoon,evening,night}.
222 */
26be20a0 223 kprintf("%s", version);
984263bc
MD
224 startrtclock();
225 printcpuinfo();
226 panicifcpuunsupported();
227#ifdef PERFMON
228 perfmon_init();
229#endif
15dc6550 230 kprintf("real memory = %ju (%ju MB)\n",
1bda0d3d
MD
231 (intmax_t)Realmem,
232 (intmax_t)Realmem / 1024 / 1024);
984263bc
MD
233 /*
234 * Display any holes after the first chunk of extended memory.
235 */
236 if (bootverbose) {
237 int indx;
238
26be20a0 239 kprintf("Physical memory chunk(s):\n");
984263bc 240 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
6ef943a3 241 vm_paddr_t size1 = phys_avail[indx + 1] - phys_avail[indx];
984263bc 242
26be20a0 243 kprintf("0x%08llx - 0x%08llx, %llu bytes (%llu pages)\n",
984263bc
MD
244 phys_avail[indx], phys_avail[indx + 1] - 1, size1,
245 size1 / PAGE_SIZE);
246 }
247 }
248
249 /*
984263bc
MD
250 * Allocate space for system data structures.
251 * The first available kernel virtual address is in "v".
252 * As pages of kernel virtual memory are allocated, "v" is incremented.
253 * As pages of memory are allocated and cleared,
254 * "firstaddr" is incremented.
255 * An index into the kernel page table corresponding to the
256 * virtual memory address maintained in "v" is kept in "mapaddr".
257 */
258
259 /*
260 * Make two passes. The first pass calculates how much memory is
261 * needed and allocates it. The second pass assigns virtual
262 * addresses to the various data structures.
263 */
264 firstaddr = 0;
265again:
266 v = (caddr_t)firstaddr;
267
268#define valloc(name, type, num) \
269 (name) = (type *)v; v = (caddr_t)((name)+(num))
270#define valloclim(name, type, num, lim) \
271 (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num)))
272
984263bc
MD
273 /*
274 * The nominal buffer size (and minimum KVA allocation) is BKVASIZE.
275 * For the first 64MB of ram nominally allocate sufficient buffers to
276 * cover 1/4 of our ram. Beyond the first 64MB allocate additional
277 * buffers to cover 1/20 of our ram over 64MB. When auto-sizing
278 * the buffer cache we limit the eventual kva reservation to
279 * maxbcache bytes.
280 *
281 * factor represents the 1/4 x ram conversion.
282 */
283 if (nbuf == 0) {
284 int factor = 4 * BKVASIZE / 1024;
285 int kbytes = physmem * (PAGE_SIZE / 1024);
286
287 nbuf = 50;
288 if (kbytes > 4096)
289 nbuf += min((kbytes - 4096) / factor, 65536 / factor);
290 if (kbytes > 65536)
291 nbuf += (kbytes - 65536) * 2 / (factor * 5);
292 if (maxbcache && nbuf > maxbcache / BKVASIZE)
293 nbuf = maxbcache / BKVASIZE;
294 }
295
296 /*
297 * Do not allow the buffer_map to be more then 1/2 the size of the
298 * kernel_map.
299 */
e4846942
MD
300 if (nbuf > (virtual_end - virtual_start) / (BKVASIZE * 2)) {
301 nbuf = (virtual_end - virtual_start) / (BKVASIZE * 2);
26be20a0 302 kprintf("Warning: nbufs capped at %d\n", nbuf);
984263bc
MD
303 }
304
948209ce
MD
305 /* limit to 128 on i386 */
306 nswbuf = max(min(nbuf/4, 128), 16);
984263bc
MD
307#ifdef NSWBUF_MIN
308 if (nswbuf < NSWBUF_MIN)
309 nswbuf = NSWBUF_MIN;
310#endif
311#ifdef DIRECTIO
312 ffs_rawread_setup();
313#endif
314
315 valloc(swbuf, struct buf, nswbuf);
316 valloc(buf, struct buf, nbuf);
984263bc
MD
317
318 /*
319 * End of first pass, size has been calculated so allocate memory
320 */
321 if (firstaddr == 0) {
322 size = (vm_size_t)(v - firstaddr);
e4846942 323 firstaddr = kmem_alloc(&kernel_map, round_page(size));
984263bc
MD
324 if (firstaddr == 0)
325 panic("startup: no room for tables");
326 goto again;
327 }
328
329 /*
330 * End of second pass, addresses have been assigned
331 */
332 if ((vm_size_t)(v - firstaddr) != size)
333 panic("startup: table size inconsistency");
334
e4846942
MD
335 kmem_suballoc(&kernel_map, &clean_map, &clean_sva, &clean_eva,
336 (nbuf*BKVASIZE) + (nswbuf*MAXPHYS) + pager_map_size);
337 kmem_suballoc(&clean_map, &buffer_map, &buffer_sva, &buffer_eva,
338 (nbuf*BKVASIZE));
339 buffer_map.system_map = 1;
340 kmem_suballoc(&clean_map, &pager_map, &pager_sva, &pager_eva,
341 (nswbuf*MAXPHYS) + pager_map_size);
342 pager_map.system_map = 1;
984263bc 343
984263bc
MD
344#if defined(USERCONFIG)
345 userconfig();
346 cninit(); /* the preferred console may have changed */
347#endif
348
15dc6550 349 kprintf("avail memory = %ju (%ju MB)\n",
f9ab53b8 350 (intmax_t)ptoa(vmstats.v_free_count),
15dc6550 351 (intmax_t)ptoa(vmstats.v_free_count) / 1024 / 1024);
984263bc
MD
352
353 /*
354 * Set up buffers, so they can be used to read disk labels.
355 */
356 bufinit();
357 vm_pager_bufferinit();
358
e24dd6e0
SZ
359 /* Log ELCR information */
360 elcr_dump();
361
984263bc
MD
362#ifdef SMP
363 /*
364 * OK, enough kmem_alloc/malloc state should be up, lets get on with it!
365 */
366 mp_start(); /* fire up the APs and APICs */
367 mp_announce();
368#endif /* SMP */
369 cpu_setregs();
370}
371
984263bc
MD
372/*
373 * Send an interrupt to process.
374 *
375 * Stack is set up to allow sigcode stored
376 * at top to call routine, followed by kcall
377 * to sigreturn routine below. After sigreturn
378 * resets the signal mask, the stack, and the
379 * frame pointer, it returns to the user
380 * specified pc, psl.
381 */
984263bc 382void
f123d5a1 383sendsig(sig_t catcher, int sig, sigset_t *mask, u_long code)
984263bc 384{
065b709a
SS
385 struct lwp *lp = curthread->td_lwp;
386 struct proc *p = lp->lwp_proc;
984263bc
MD
387 struct trapframe *regs;
388 struct sigacts *psp = p->p_sigacts;
389 struct sigframe sf, *sfp;
390 int oonstack;
391
065b709a
SS
392 regs = lp->lwp_md.md_regs;
393 oonstack = (lp->lwp_sigstk.ss_flags & SS_ONSTACK) ? 1 : 0;
984263bc
MD
394
395 /* save user context */
396 bzero(&sf, sizeof(struct sigframe));
397 sf.sf_uc.uc_sigmask = *mask;
065b709a 398 sf.sf_uc.uc_stack = lp->lwp_sigstk;
984263bc 399 sf.sf_uc.uc_mcontext.mc_onstack = oonstack;
4e7c41c5 400 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_gs, sizeof(struct trapframe));
984263bc 401
ce3d8c4d
MD
402 /* make the size of the saved context visible to userland */
403 sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext);
404
4b486183
MD
405 /* save mailbox pending state for syscall interlock semantics */
406 if (p->p_flag & P_MAILBOX)
407 sf.sf_uc.uc_mcontext.mc_xflags |= PGEX_MAILBOX;
408
984263bc 409 /* Allocate and validate space for the signal handler context. */
08f2f1bb 410 if ((lp->lwp_flag & LWP_ALTSTACK) != 0 && !oonstack &&
984263bc 411 SIGISMEMBER(psp->ps_sigonstack, sig)) {
065b709a
SS
412 sfp = (struct sigframe *)(lp->lwp_sigstk.ss_sp +
413 lp->lwp_sigstk.ss_size - sizeof(struct sigframe));
414 lp->lwp_sigstk.ss_flags |= SS_ONSTACK;
4e7c41c5 415 } else {
984263bc 416 sfp = (struct sigframe *)regs->tf_esp - 1;
4e7c41c5 417 }
984263bc
MD
418
419 /* Translate the signal is appropriate */
420 if (p->p_sysent->sv_sigtbl) {
421 if (sig <= p->p_sysent->sv_sigsize)
422 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
423 }
424
425 /* Build the argument list for the signal handler. */
426 sf.sf_signum = sig;
427 sf.sf_ucontext = (register_t)&sfp->sf_uc;
065b709a 428 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
984263bc
MD
429 /* Signal handler installed with SA_SIGINFO. */
430 sf.sf_siginfo = (register_t)&sfp->sf_si;
431 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
432
433 /* fill siginfo structure */
434 sf.sf_si.si_signo = sig;
435 sf.sf_si.si_code = code;
436 sf.sf_si.si_addr = (void*)regs->tf_err;
437 }
438 else {
439 /* Old FreeBSD-style arguments. */
440 sf.sf_siginfo = code;
441 sf.sf_addr = regs->tf_err;
442 sf.sf_ahu.sf_handler = catcher;
443 }
444
445 /*
446 * If we're a vm86 process, we want to save the segment registers.
447 * We also change eflags to be our emulated eflags, not the actual
448 * eflags.
449 */
450 if (regs->tf_eflags & PSL_VM) {
451 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
065b709a 452 struct vm86_kernel *vm86 = &lp->lwp_thread->td_pcb->pcb_ext->ext_vm86;
984263bc
MD
453
454 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
455 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
456 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
457 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
458
459 if (vm86->vm86_has_vme == 0)
460 sf.sf_uc.uc_mcontext.mc_eflags =
461 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
462 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
463
464 /*
465 * Clear PSL_NT to inhibit T_TSSFLT faults on return from
466 * syscalls made by the signal handler. This just avoids
467 * wasting time for our lazy fixup of such faults. PSL_NT
468 * does nothing in vm86 mode, but vm86 programs can set it
469 * almost legitimately in probes for old cpu types.
470 */
471 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
472 }
473
474 /*
1b251f0a
MD
475 * Save the FPU state and reinit the FP unit
476 */
477 npxpush(&sf.sf_uc.uc_mcontext);
478
479 /*
984263bc
MD
480 * Copy the sigframe out to the user's stack.
481 */
482 if (copyout(&sf, sfp, sizeof(struct sigframe)) != 0) {
483 /*
484 * Something is wrong with the stack pointer.
485 * ...Kill the process.
486 */
b276424c 487 sigexit(lp, SIGILL);
984263bc
MD
488 }
489
490 regs->tf_esp = (int)sfp;
491 regs->tf_eip = PS_STRINGS - *(p->p_sysent->sv_szsigcode);
8688c24a
AE
492
493 /*
494 * i386 abi specifies that the direction flag must be cleared
495 * on function entry
496 */
497 regs->tf_eflags &= ~(PSL_T|PSL_D);
498
984263bc
MD
499 regs->tf_cs = _ucodesel;
500 regs->tf_ds = _udatasel;
501 regs->tf_es = _udatasel;
dd4ad62d
MD
502
503 /*
504 * Allow the signal handler to inherit %fs in addition to %gs as
4e7c41c5
MD
505 * the userland program might be using both.
506 *
507 * However, if a T_PROTFLT occured the segment registers could be
508 * totally broken. They must be reset in order to be able to
509 * return to userland.
dd4ad62d 510 */
4e7c41c5
MD
511 if (regs->tf_trapno == T_PROTFLT) {
512 regs->tf_fs = _udatasel;
513 regs->tf_gs = _udatasel;
514 }
984263bc
MD
515 regs->tf_ss = _udatasel;
516}
517
518/*
4a22e893 519 * Sanitize the trapframe for a virtual kernel passing control to a custom
4e7c41c5
MD
520 * VM context. Remove any items that would otherwise create a privilage
521 * issue.
4a22e893 522 *
4e7c41c5
MD
523 * XXX at the moment we allow userland to set the resume flag. Is this a
524 * bad idea?
4a22e893
MD
525 */
526int
527cpu_sanitize_frame(struct trapframe *frame)
528{
529 frame->tf_cs = _ucodesel;
530 frame->tf_ds = _udatasel;
4e7c41c5
MD
531 frame->tf_es = _udatasel; /* XXX allow userland this one too? */
532#if 0
4a22e893 533 frame->tf_fs = _udatasel;
4e7c41c5
MD
534 frame->tf_gs = _udatasel;
535#endif
4a22e893 536 frame->tf_ss = _udatasel;
4e7c41c5 537 frame->tf_eflags &= (PSL_RF | PSL_USERCHANGE);
4a22e893
MD
538 frame->tf_eflags |= PSL_RESERVED_DEFAULT | PSL_I;
539 return(0);
540}
541
4e7c41c5
MD
542int
543cpu_sanitize_tls(struct savetls *tls)
544{
545 struct segment_descriptor *desc;
546 int i;
547
548 for (i = 0; i < NGTLS; ++i) {
549 desc = &tls->tls[i];
550 if (desc->sd_dpl == 0 && desc->sd_type == 0)
551 continue;
552 if (desc->sd_def32 == 0)
553 return(ENXIO);
554 if (desc->sd_type != SDT_MEMRWA)
555 return(ENXIO);
556 if (desc->sd_dpl != SEL_UPL)
557 return(ENXIO);
558 if (desc->sd_xx != 0 || desc->sd_p != 1)
559 return(ENXIO);
560 }
561 return(0);
562}
563
4a22e893 564/*
65957d54 565 * sigreturn(ucontext_t *sigcntxp)
41c20dac 566 *
984263bc
MD
567 * System call to cleanup state after a signal
568 * has been taken. Reset signal mask and
569 * stack state from context left by sendsig (above).
570 * Return to previous pc and psl as specified by
571 * context left by sendsig. Check carefully to
572 * make sure that the user has not modified the
573 * state to gain improper privileges.
3919ced0
MD
574 *
575 * MPSAFE
984263bc
MD
576 */
577#define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0)
578#define CS_SECURE(cs) (ISPL(cs) == SEL_UPL)
579
580int
753fd850 581sys_sigreturn(struct sigreturn_args *uap)
984263bc 582{
065b709a 583 struct lwp *lp = curthread->td_lwp;
4b486183 584 struct proc *p = lp->lwp_proc;
984263bc 585 struct trapframe *regs;
1b251f0a 586 ucontext_t uc;
984263bc 587 ucontext_t *ucp;
1b251f0a
MD
588 int cs;
589 int eflags;
590 int error;
984263bc 591
1b251f0a
MD
592 /*
593 * We have to copy the information into kernel space so userland
594 * can't modify it while we are sniffing it.
595 */
065b709a 596 regs = lp->lwp_md.md_regs;
1b251f0a
MD
597 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
598 if (error)
599 return (error);
600 ucp = &uc;
984263bc
MD
601 eflags = ucp->uc_mcontext.mc_eflags;
602
603 if (eflags & PSL_VM) {
604 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
605 struct vm86_kernel *vm86;
606
607 /*
608 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
609 * set up the vm86 area, and we can't enter vm86 mode.
610 */
065b709a 611 if (lp->lwp_thread->td_pcb->pcb_ext == 0)
984263bc 612 return (EINVAL);
065b709a 613 vm86 = &lp->lwp_thread->td_pcb->pcb_ext->ext_vm86;
984263bc
MD
614 if (vm86->vm86_inited == 0)
615 return (EINVAL);
616
617 /* go back to user mode if both flags are set */
618 if ((eflags & PSL_VIP) && (eflags & PSL_VIF))
08f2f1bb 619 trapsignal(lp, SIGBUS, 0);
984263bc
MD
620
621 if (vm86->vm86_has_vme) {
622 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
623 (eflags & VME_USERCHANGE) | PSL_VM;
624 } else {
625 vm86->vm86_eflags = eflags; /* save VIF, VIP */
e2742650
MD
626 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
627 (eflags & VM_USERCHANGE) | PSL_VM;
984263bc 628 }
4e7c41c5 629 bcopy(&ucp->uc_mcontext.mc_gs, tf, sizeof(struct trapframe));
984263bc
MD
630 tf->tf_eflags = eflags;
631 tf->tf_vm86_ds = tf->tf_ds;
632 tf->tf_vm86_es = tf->tf_es;
633 tf->tf_vm86_fs = tf->tf_fs;
4e7c41c5 634 tf->tf_vm86_gs = tf->tf_gs;
984263bc
MD
635 tf->tf_ds = _udatasel;
636 tf->tf_es = _udatasel;
4e7c41c5 637#if 0
984263bc 638 tf->tf_fs = _udatasel;
4e7c41c5
MD
639 tf->tf_gs = _udatasel;
640#endif
984263bc
MD
641 } else {
642 /*
643 * Don't allow users to change privileged or reserved flags.
644 */
645 /*
646 * XXX do allow users to change the privileged flag PSL_RF.
647 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
648 * should sometimes set it there too. tf_eflags is kept in
649 * the signal context during signal handling and there is no
650 * other place to remember it, so the PSL_RF bit may be
651 * corrupted by the signal handler without us knowing.
652 * Corruption of the PSL_RF bit at worst causes one more or
653 * one less debugger trap, so allowing it is fairly harmless.
654 */
655 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
26be20a0 656 kprintf("sigreturn: eflags = 0x%x\n", eflags);
984263bc
MD
657 return(EINVAL);
658 }
659
660 /*
661 * Don't allow users to load a valid privileged %cs. Let the
662 * hardware check for invalid selectors, excess privilege in
663 * other selectors, invalid %eip's and invalid %esp's.
664 */
665 cs = ucp->uc_mcontext.mc_cs;
666 if (!CS_SECURE(cs)) {
26be20a0 667 kprintf("sigreturn: cs = 0x%x\n", cs);
08f2f1bb 668 trapsignal(lp, SIGBUS, T_PROTFLT);
984263bc
MD
669 return(EINVAL);
670 }
4e7c41c5 671 bcopy(&ucp->uc_mcontext.mc_gs, regs, sizeof(struct trapframe));
984263bc
MD
672 }
673
4b486183 674 /*
1b251f0a
MD
675 * Restore the FPU state from the frame
676 */
3919ced0 677 crit_enter();
1b251f0a
MD
678 npxpop(&ucp->uc_mcontext);
679
680 /*
4b486183
MD
681 * Merge saved signal mailbox pending flag to maintain interlock
682 * semantics against system calls.
683 */
684 if (ucp->uc_mcontext.mc_xflags & PGEX_MAILBOX)
685 p->p_flag |= P_MAILBOX;
686
984263bc 687 if (ucp->uc_mcontext.mc_onstack & 1)
065b709a 688 lp->lwp_sigstk.ss_flags |= SS_ONSTACK;
984263bc 689 else
065b709a 690 lp->lwp_sigstk.ss_flags &= ~SS_ONSTACK;
984263bc 691
065b709a
SS
692 lp->lwp_sigmask = ucp->uc_sigmask;
693 SIG_CANTMASK(lp->lwp_sigmask);
3919ced0 694 crit_exit();
984263bc
MD
695 return(EJUSTRETURN);
696}
697
698/*
a722be49
MD
699 * Stack frame on entry to function. %eax will contain the function vector,
700 * %ecx will contain the function data. flags, ecx, and eax will have
701 * already been pushed on the stack.
702 */
703struct upc_frame {
704 register_t eax;
705 register_t ecx;
0a455ac5 706 register_t edx;
a722be49
MD
707 register_t flags;
708 register_t oldip;
709};
710
711void
712sendupcall(struct vmupcall *vu, int morepending)
713{
065b709a 714 struct lwp *lp = curthread->td_lwp;
a722be49
MD
715 struct trapframe *regs;
716 struct upcall upcall;
717 struct upc_frame upc_frame;
6e58b5df 718 int crit_count = 0;
a722be49
MD
719
720 /*
69c61fbe
MD
721 * If we are a virtual kernel running an emulated user process
722 * context, switch back to the virtual kernel context before
723 * trying to post the signal.
724 */
39005e16 725 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) {
69c61fbe 726 lp->lwp_md.md_regs->tf_trapno = 0;
287ebb09 727 vkernel_trap(lp, lp->lwp_md.md_regs);
69c61fbe
MD
728 }
729
730 /*
a722be49
MD
731 * Get the upcall data structure
732 */
065b709a 733 if (copyin(lp->lwp_upcall, &upcall, sizeof(upcall)) ||
6e58b5df
MD
734 copyin((char *)upcall.upc_uthread + upcall.upc_critoff, &crit_count, sizeof(int))
735 ) {
a722be49 736 vu->vu_pending = 0;
26be20a0 737 kprintf("bad upcall address\n");
a722be49
MD
738 return;
739 }
740
741 /*
742 * If the data structure is already marked pending or has a critical
743 * section count, mark the data structure as pending and return
744 * without doing an upcall. vu_pending is left set.
745 */
6e58b5df
MD
746 if (upcall.upc_pending || crit_count >= vu->vu_pending) {
747 if (upcall.upc_pending < vu->vu_pending) {
748 upcall.upc_pending = vu->vu_pending;
065b709a 749 copyout(&upcall.upc_pending, &lp->lwp_upcall->upc_pending,
6e58b5df 750 sizeof(upcall.upc_pending));
a722be49
MD
751 }
752 return;
753 }
754
755 /*
756 * We can run this upcall now, clear vu_pending.
757 *
758 * Bump our critical section count and set or clear the
759 * user pending flag depending on whether more upcalls are
760 * pending. The user will be responsible for calling
761 * upc_dispatch(-1) to process remaining upcalls.
762 */
763 vu->vu_pending = 0;
6e58b5df 764 upcall.upc_pending = morepending;
f9235b6d 765 ++crit_count;
065b709a 766 copyout(&upcall.upc_pending, &lp->lwp_upcall->upc_pending,
6e58b5df
MD
767 sizeof(upcall.upc_pending));
768 copyout(&crit_count, (char *)upcall.upc_uthread + upcall.upc_critoff,
769 sizeof(int));
a722be49
MD
770
771 /*
772 * Construct a stack frame and issue the upcall
773 */
065b709a 774 regs = lp->lwp_md.md_regs;
a722be49
MD
775 upc_frame.eax = regs->tf_eax;
776 upc_frame.ecx = regs->tf_ecx;
0a455ac5 777 upc_frame.edx = regs->tf_edx;
a722be49
MD
778 upc_frame.flags = regs->tf_eflags;
779 upc_frame.oldip = regs->tf_eip;
780 if (copyout(&upc_frame, (void *)(regs->tf_esp - sizeof(upc_frame)),
781 sizeof(upc_frame)) != 0) {
26be20a0 782 kprintf("bad stack on upcall\n");
a722be49
MD
783 } else {
784 regs->tf_eax = (register_t)vu->vu_func;
785 regs->tf_ecx = (register_t)vu->vu_data;
065b709a 786 regs->tf_edx = (register_t)lp->lwp_upcall;
a722be49
MD
787 regs->tf_eip = (register_t)vu->vu_ctx;
788 regs->tf_esp -= sizeof(upc_frame);
789 }
790}
791
792/*
793 * fetchupcall occurs in the context of a system call, which means that
0a455ac5
MD
794 * we have to return EJUSTRETURN in order to prevent eax and edx from
795 * being overwritten by the syscall return value.
a722be49
MD
796 *
797 * if vu is not NULL we return the new context in %edx, the new data in %ecx,
798 * and the function pointer in %eax.
799 */
800int
d678dc17 801fetchupcall(struct vmupcall *vu, int morepending, void *rsp)
a722be49
MD
802{
803 struct upc_frame upc_frame;
065b709a 804 struct lwp *lp = curthread->td_lwp;
a722be49
MD
805 struct trapframe *regs;
806 int error;
6e58b5df
MD
807 struct upcall upcall;
808 int crit_count;
a722be49 809
065b709a 810 regs = lp->lwp_md.md_regs;
a722be49 811
065b709a 812 error = copyout(&morepending, &lp->lwp_upcall->upc_pending, sizeof(int));
a722be49
MD
813 if (error == 0) {
814 if (vu) {
815 /*
816 * This jumps us to the next ready context.
817 */
818 vu->vu_pending = 0;
065b709a 819 error = copyin(lp->lwp_upcall, &upcall, sizeof(upcall));
6e58b5df
MD
820 crit_count = 0;
821 if (error == 0)
822 error = copyin((char *)upcall.upc_uthread + upcall.upc_critoff, &crit_count, sizeof(int));
f9235b6d 823 ++crit_count;
a722be49 824 if (error == 0)
6e58b5df 825 error = copyout(&crit_count, (char *)upcall.upc_uthread + upcall.upc_critoff, sizeof(int));
a722be49
MD
826 regs->tf_eax = (register_t)vu->vu_func;
827 regs->tf_ecx = (register_t)vu->vu_data;
065b709a 828 regs->tf_edx = (register_t)lp->lwp_upcall;
a722be49
MD
829 regs->tf_eip = (register_t)vu->vu_ctx;
830 regs->tf_esp = (register_t)rsp;
831 } else {
832 /*
833 * This returns us to the originally interrupted code.
834 */
835 error = copyin(rsp, &upc_frame, sizeof(upc_frame));
836 regs->tf_eax = upc_frame.eax;
837 regs->tf_ecx = upc_frame.ecx;
0a455ac5 838 regs->tf_edx = upc_frame.edx;
6e58b5df
MD
839 regs->tf_eflags = (regs->tf_eflags & ~PSL_USERCHANGE) |
840 (upc_frame.flags & PSL_USERCHANGE);
a722be49
MD
841 regs->tf_eip = upc_frame.oldip;
842 regs->tf_esp = (register_t)((char *)rsp + sizeof(upc_frame));
843 }
844 }
845 if (error == 0)
846 error = EJUSTRETURN;
847 return(error);
848}
849
850/*
984263bc
MD
851 * Machine dependent boot() routine
852 *
853 * I haven't seen anything to put here yet
854 * Possibly some stuff might be grafted back here from boot()
855 */
856void
857cpu_boot(int howto)
858{
859}
860
861/*
862 * Shutdown the CPU as much as possible
863 */
864void
865cpu_halt(void)
866{
867 for (;;)
1184892f 868 __asm__ __volatile("hlt");
984263bc
MD
869}
870
871/*
8ad65e08
MD
872 * cpu_idle() represents the idle LWKT. You cannot return from this function
873 * (unless you want to blow things up!). Instead we look for runnable threads
874 * and loop or halt as appropriate. Giant is not held on entry to the thread.
984263bc 875 *
26a0694b 876 * The main loop is entered with a critical section held, we must release
a2a5ad0d
MD
877 * the critical section before doing anything else. lwkt_switch() will
878 * check for pending interrupts due to entering and exiting its own
879 * critical section.
26a0694b 880 *
7d4d6fdb
MD
881 * NOTE: On an SMP system we rely on a scheduler IPI to wake a HLTed cpu up.
882 * However, there are cases where the idlethread will be entered with
883 * the possibility that no IPI will occur and in such cases
cbdd23b1
MD
884 * lwkt_switch() sets RQF_WAKEUP. We usually check
885 * RQF_IDLECHECK_WK_MASK.
7d4d6fdb 886 *
46e562ce
MD
887 * NOTE: cpu_idle_hlt again defaults to 2 (use ACPI sleep states). Set to
888 * 1 to just use hlt and for debugging purposes.
984263bc 889 */
46e562ce 890static int cpu_idle_hlt = 2;
60f945af
MD
891static int cpu_idle_hltcnt;
892static int cpu_idle_spincnt;
be71787b 893static u_int cpu_idle_repeat = 4;
984263bc
MD
894SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW,
895 &cpu_idle_hlt, 0, "Idle loop HLT enable");
60f945af
MD
896SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hltcnt, CTLFLAG_RW,
897 &cpu_idle_hltcnt, 0, "Idle loop entry halts");
898SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_spincnt, CTLFLAG_RW,
899 &cpu_idle_spincnt, 0, "Idle loop entry spins");
be71787b
MD
900SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_repeat, CTLFLAG_RW,
901 &cpu_idle_repeat, 0, "Idle entries before acpi hlt");
984263bc 902
f9d8cd12
MD
903static void
904cpu_idle_default_hook(void)
905{
906 /*
907 * We must guarentee that hlt is exactly the instruction
908 * following the sti.
909 */
910 __asm __volatile("sti; hlt");
911}
912
913/* Other subsystems (e.g., ACPI) can hook this later. */
914void (*cpu_idle_hook)(void) = cpu_idle_default_hook;
915
984263bc
MD
916void
917cpu_idle(void)
918{
be71787b
MD
919 globaldata_t gd = mycpu;
920 struct thread *td = gd->gd_curthread;
921 int reqflags;
922 int quick;
a2a5ad0d 923
26a0694b 924 crit_exit();
f9235b6d 925 KKASSERT(td->td_critcount == 0);
8ad65e08 926 for (;;) {
a2a5ad0d
MD
927 /*
928 * See if there are any LWKTs ready to go.
929 */
8ad65e08 930 lwkt_switch();
a2a5ad0d
MD
931
932 /*
be71787b
MD
933 * When halting inside a cli we must check for reqflags
934 * races, particularly [re]schedule requests. Running
935 * splz() does the job.
936 *
937 * cpu_idle_hlt:
938 * 0 Never halt, just spin
939 *
940 * 1 Always use HLT (or MONITOR/MWAIT if avail).
941 * This typically eats more power than the
942 * ACPI halt.
943 *
944 * 2 Use HLT/MONITOR/MWAIT up to a point and then
945 * use the ACPI halt (default). This is a hybrid
946 * approach. See machdep.cpu_idle_repeat.
947 *
948 * 3 Always use the ACPI halt. This typically
949 * eats the least amount of power but the cpu
950 * will be slow waking up. Slows down e.g.
951 * compiles and other pipe/event oriented stuff.
952 *
953 *
954 * NOTE: Interrupts are enabled and we are not in a critical
955 * section.
956 *
957 * NOTE: Preemptions do not reset gd_idle_repeat. Also we
958 * don't bother capping gd_idle_repeat, it is ok if
959 * it overflows.
a2a5ad0d 960 */
be71787b
MD
961 ++gd->gd_idle_repeat;
962 reqflags = gd->gd_reqflags;
963 quick = (cpu_idle_hlt == 1) ||
964 (cpu_idle_hlt < 3 &&
965 gd->gd_idle_repeat < cpu_idle_repeat);
966
967 if (quick && (cpu_mi_feature & CPU_MI_MONITOR) &&
968 (reqflags & RQF_IDLECHECK_WK_MASK) == 0) {
969 cpu_mmw_pause_int(&gd->gd_reqflags, reqflags);
970 ++cpu_idle_hltcnt;
971 } else if (cpu_idle_hlt) {
a2a5ad0d
MD
972 __asm __volatile("cli");
973 splz();
be71787b
MD
974 if ((gd->gd_reqflags & RQF_IDLECHECK_WK_MASK) == 0) {
975 if (quick)
7d4d6fdb
MD
976 cpu_idle_default_hook();
977 else
978 cpu_idle_hook();
979 }
7d4d6fdb 980 __asm __volatile("sti");
60f945af 981 ++cpu_idle_hltcnt;
8ad65e08 982 } else {
60f945af 983 splz();
c5724852 984 __asm __volatile("sti");
60f945af 985 ++cpu_idle_spincnt;
8ad65e08 986 }
984263bc
MD
987 }
988}
989
7ad8cc6c
SW
990#ifdef SMP
991
984263bc 992/*
06615ccb
MD
993 * This routine is called if a spinlock has been held through the
994 * exponential backoff period and is seriously contested. On a real cpu
995 * we let it spin.
996 */
997void
998cpu_spinlock_contested(void)
999{
1000 cpu_pause();
1001}
1002
7ad8cc6c
SW
1003#endif
1004
06615ccb 1005/*
984263bc
MD
1006 * Clear registers on exec
1007 */
1008void
08f2f1bb 1009exec_setregs(u_long entry, u_long stack, u_long ps_strings)
984263bc 1010{
08f2f1bb
SS
1011 struct thread *td = curthread;
1012 struct lwp *lp = td->td_lwp;
1013 struct pcb *pcb = td->td_pcb;
bb3cd951 1014 struct trapframe *regs = lp->lwp_md.md_regs;
984263bc 1015
984263bc
MD
1016 /* was i386_user_cleanup() in NetBSD */
1017 user_ldt_free(pcb);
984263bc
MD
1018
1019 bzero((char *)regs, sizeof(struct trapframe));
1020 regs->tf_eip = entry;
1021 regs->tf_esp = stack;
1022 regs->tf_eflags = PSL_USER | (regs->tf_eflags & PSL_T);
1023 regs->tf_ss = _udatasel;
1024 regs->tf_ds = _udatasel;
1025 regs->tf_es = _udatasel;
1026 regs->tf_fs = _udatasel;
4e7c41c5 1027 regs->tf_gs = _udatasel;
984263bc
MD
1028 regs->tf_cs = _ucodesel;
1029
1030 /* PS_STRINGS value for BSD/OS binaries. It is 0 for non-BSD/OS. */
1031 regs->tf_ebx = ps_strings;
1032
1033 /*
1034 * Reset the hardware debug registers if they were in use.
1035 * They won't have any meaning for the newly exec'd process.
1036 */
1037 if (pcb->pcb_flags & PCB_DBREGS) {
1038 pcb->pcb_dr0 = 0;
1039 pcb->pcb_dr1 = 0;
1040 pcb->pcb_dr2 = 0;
1041 pcb->pcb_dr3 = 0;
1042 pcb->pcb_dr6 = 0;
1043 pcb->pcb_dr7 = 0;
08f2f1bb 1044 if (pcb == td->td_pcb) {
984263bc
MD
1045 /*
1046 * Clear the debug registers on the running
1047 * CPU, otherwise they will end up affecting
1048 * the next process we switch to.
1049 */
1050 reset_dbregs();
1051 }
1052 pcb->pcb_flags &= ~PCB_DBREGS;
1053 }
1054
1055 /*
1056 * Initialize the math emulator (if any) for the current process.
1057 * Actually, just clear the bit that says that the emulator has
1058 * been initialized. Initialization is delayed until the process
1059 * traps to the emulator (if it is done at all) mainly because
1060 * emulators don't provide an entry point for initialization.
1061 */
08f2f1bb 1062 pcb->pcb_flags &= ~FP_SOFTFP;
984263bc
MD
1063
1064 /*
a02705a9
MD
1065 * note: do not set CR0_TS here. npxinit() must do it after clearing
1066 * gd_npxthread. Otherwise a preemptive interrupt thread may panic
1067 * in npxdna().
984263bc 1068 */
a02705a9
MD
1069 crit_enter();
1070 load_cr0(rcr0() | CR0_MP);
984263bc
MD
1071
1072#if NNPX > 0
1073 /* Initialize the npx (if any) for the current process. */
1074 npxinit(__INITIAL_NPXCW__);
1075#endif
a02705a9 1076 crit_exit();
984263bc 1077
90b9818c
MD
1078 /*
1079 * note: linux emulator needs edx to be 0x0 on entry, which is
c0510e9a
MD
1080 * handled in execve simply by setting the 64 bit syscall
1081 * return value to 0.
90b9818c 1082 */
984263bc
MD
1083}
1084
1085void
1086cpu_setregs(void)
1087{
1088 unsigned int cr0;
1089
1090 cr0 = rcr0();
1091 cr0 |= CR0_NE; /* Done by npxinit() */
1092 cr0 |= CR0_MP | CR0_TS; /* Done at every execve() too. */
4db955e1 1093 cr0 |= CR0_WP | CR0_AM;
984263bc
MD
1094 load_cr0(cr0);
1095 load_gs(_udatasel);
1096}
1097
1098static int
1099sysctl_machdep_adjkerntz(SYSCTL_HANDLER_ARGS)
1100{
1101 int error;
1102 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2,
1103 req);
1104 if (!error && req->newptr)
1105 resettodr();
1106 return (error);
1107}
1108
1109SYSCTL_PROC(_machdep, CPU_ADJKERNTZ, adjkerntz, CTLTYPE_INT|CTLFLAG_RW,
1110 &adjkerntz, 0, sysctl_machdep_adjkerntz, "I", "");
1111
1112SYSCTL_INT(_machdep, CPU_DISRTCSET, disable_rtc_set,
1113 CTLFLAG_RW, &disable_rtc_set, 0, "");
1114
1115SYSCTL_STRUCT(_machdep, CPU_BOOTINFO, bootinfo,
1116 CTLFLAG_RD, &bootinfo, bootinfo, "");
1117
1118SYSCTL_INT(_machdep, CPU_WALLCLOCK, wall_cmos_clock,
1119 CTLFLAG_RW, &wall_cmos_clock, 0, "");
1120
b13267a5 1121extern u_long bootdev; /* not a cdev_t - encoding is different */
984263bc 1122SYSCTL_ULONG(_machdep, OID_AUTO, guessed_bootdev,
b13267a5 1123 CTLFLAG_RD, &bootdev, 0, "Boot device (not in cdev_t format)");
984263bc
MD
1124
1125/*
1126 * Initialize 386 and configure to run kernel
1127 */
1128
1129/*
1130 * Initialize segments & interrupt table
1131 */
1132
1133int _default_ldt;
1134union descriptor gdt[NGDT * MAXCPU]; /* global descriptor table */
1135static struct gate_descriptor idt0[NIDT];
1136struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */
1137union descriptor ldt[NLDT]; /* local descriptor table */
17a9f566
MD
1138
1139/* table descriptors - used to load tables by cpu */
984263bc 1140struct region_descriptor r_gdt, r_idt;
984263bc 1141
984263bc
MD
1142#if defined(I586_CPU) && !defined(NO_F00F_HACK)
1143extern int has_f00f_bug;
1144#endif
1145
1146static struct i386tss dblfault_tss;
1147static char dblfault_stack[PAGE_SIZE];
1148
1149extern struct user *proc0paddr;
1150
1151
1152/* software prototypes -- in more palatable form */
1153struct soft_segment_descriptor gdt_segs[] = {
1154/* GNULL_SEL 0 Null Descriptor */
1155{ 0x0, /* segment base address */
1156 0x0, /* length */
1157 0, /* segment type */
1158 0, /* segment descriptor priority level */
1159 0, /* segment descriptor present */
1160 0, 0,
1161 0, /* default 32 vs 16 bit size */
1162 0 /* limit granularity (byte/page units)*/ },
1163/* GCODE_SEL 1 Code Descriptor for kernel */
1164{ 0x0, /* segment base address */
1165 0xfffff, /* length - all address space */
1166 SDT_MEMERA, /* segment type */
1167 0, /* segment descriptor priority level */
1168 1, /* segment descriptor present */
1169 0, 0,
1170 1, /* default 32 vs 16 bit size */
1171 1 /* limit granularity (byte/page units)*/ },
1172/* GDATA_SEL 2 Data Descriptor for kernel */
1173{ 0x0, /* segment base address */
1174 0xfffff, /* length - all address space */
1175 SDT_MEMRWA, /* segment type */
1176 0, /* segment descriptor priority level */
1177 1, /* segment descriptor present */
1178 0, 0,
1179 1, /* default 32 vs 16 bit size */
1180 1 /* limit granularity (byte/page units)*/ },
1181/* GPRIV_SEL 3 SMP Per-Processor Private Data Descriptor */
1182{ 0x0, /* segment base address */
1183 0xfffff, /* length - all address space */
1184 SDT_MEMRWA, /* segment type */
1185 0, /* segment descriptor priority level */
1186 1, /* segment descriptor present */
1187 0, 0,
1188 1, /* default 32 vs 16 bit size */
1189 1 /* limit granularity (byte/page units)*/ },
1190/* GPROC0_SEL 4 Proc 0 Tss Descriptor */
1191{
1192 0x0, /* segment base address */
1193 sizeof(struct i386tss)-1,/* length - all address space */
1194 SDT_SYS386TSS, /* segment type */
1195 0, /* segment descriptor priority level */
1196 1, /* segment descriptor present */
1197 0, 0,
1198 0, /* unused - default 32 vs 16 bit size */
1199 0 /* limit granularity (byte/page units)*/ },
1200/* GLDT_SEL 5 LDT Descriptor */
1201{ (int) ldt, /* segment base address */
1202 sizeof(ldt)-1, /* length - all address space */
1203 SDT_SYSLDT, /* segment type */
1204 SEL_UPL, /* segment descriptor priority level */
1205 1, /* segment descriptor present */
1206 0, 0,
1207 0, /* unused - default 32 vs 16 bit size */
1208 0 /* limit granularity (byte/page units)*/ },
1209/* GUSERLDT_SEL 6 User LDT Descriptor per process */
1210{ (int) ldt, /* segment base address */
1211 (512 * sizeof(union descriptor)-1), /* length */
1212 SDT_SYSLDT, /* segment type */
1213 0, /* segment descriptor priority level */
1214 1, /* segment descriptor present */
1215 0, 0,
1216 0, /* unused - default 32 vs 16 bit size */
1217 0 /* limit granularity (byte/page units)*/ },
1218/* GTGATE_SEL 7 Null Descriptor - Placeholder */
1219{ 0x0, /* segment base address */
1220 0x0, /* length - all address space */
1221 0, /* segment type */
1222 0, /* segment descriptor priority level */
1223 0, /* segment descriptor present */
1224 0, 0,
1225 0, /* default 32 vs 16 bit size */
1226 0 /* limit granularity (byte/page units)*/ },
1227/* GBIOSLOWMEM_SEL 8 BIOS access to realmode segment 0x40, must be #8 in GDT */
1228{ 0x400, /* segment base address */
1229 0xfffff, /* length */
1230 SDT_MEMRWA, /* segment type */
1231 0, /* segment descriptor priority level */
1232 1, /* segment descriptor present */
1233 0, 0,
1234 1, /* default 32 vs 16 bit size */
1235 1 /* limit granularity (byte/page units)*/ },
1236/* GPANIC_SEL 9 Panic Tss Descriptor */
1237{ (int) &dblfault_tss, /* segment base address */
1238 sizeof(struct i386tss)-1,/* length - all address space */
1239 SDT_SYS386TSS, /* segment type */
1240 0, /* segment descriptor priority level */
1241 1, /* segment descriptor present */
1242 0, 0,
1243 0, /* unused - default 32 vs 16 bit size */
1244 0 /* limit granularity (byte/page units)*/ },
1245/* GBIOSCODE32_SEL 10 BIOS 32-bit interface (32bit Code) */
1246{ 0, /* segment base address (overwritten) */
1247 0xfffff, /* length */
1248 SDT_MEMERA, /* segment type */
1249 0, /* segment descriptor priority level */
1250 1, /* segment descriptor present */
1251 0, 0,
1252 0, /* default 32 vs 16 bit size */
1253 1 /* limit granularity (byte/page units)*/ },
1254/* GBIOSCODE16_SEL 11 BIOS 32-bit interface (16bit Code) */
1255{ 0, /* segment base address (overwritten) */
1256 0xfffff, /* length */
1257 SDT_MEMERA, /* segment type */
1258 0, /* segment descriptor priority level */
1259 1, /* segment descriptor present */
1260 0, 0,
1261 0, /* default 32 vs 16 bit size */
1262 1 /* limit granularity (byte/page units)*/ },
1263/* GBIOSDATA_SEL 12 BIOS 32-bit interface (Data) */
1264{ 0, /* segment base address (overwritten) */
1265 0xfffff, /* length */
1266 SDT_MEMRWA, /* segment type */
1267 0, /* segment descriptor priority level */
1268 1, /* segment descriptor present */
1269 0, 0,
1270 1, /* default 32 vs 16 bit size */
1271 1 /* limit granularity (byte/page units)*/ },
1272/* GBIOSUTIL_SEL 13 BIOS 16-bit interface (Utility) */
1273{ 0, /* segment base address (overwritten) */
1274 0xfffff, /* length */
1275 SDT_MEMRWA, /* segment type */
1276 0, /* segment descriptor priority level */
1277 1, /* segment descriptor present */
1278 0, 0,
1279 0, /* default 32 vs 16 bit size */
1280 1 /* limit granularity (byte/page units)*/ },
1281/* GBIOSARGS_SEL 14 BIOS 16-bit interface (Arguments) */
1282{ 0, /* segment base address (overwritten) */
1283 0xfffff, /* length */
1284 SDT_MEMRWA, /* segment type */
1285 0, /* segment descriptor priority level */
1286 1, /* segment descriptor present */
1287 0, 0,
1288 0, /* default 32 vs 16 bit size */
1289 1 /* limit granularity (byte/page units)*/ },
806bf111
MD
1290/* GTLS_START 15 TLS */
1291{ 0x0, /* segment base address */
1292 0x0, /* length */
1293 0, /* segment type */
1294 0, /* segment descriptor priority level */
1295 0, /* segment descriptor present */
1296 0, 0,
1297 0, /* default 32 vs 16 bit size */
1298 0 /* limit granularity (byte/page units)*/ },
1299/* GTLS_START+1 16 TLS */
1300{ 0x0, /* segment base address */
1301 0x0, /* length */
1302 0, /* segment type */
1303 0, /* segment descriptor priority level */
1304 0, /* segment descriptor present */
1305 0, 0,
1306 0, /* default 32 vs 16 bit size */
1307 0 /* limit granularity (byte/page units)*/ },
1308/* GTLS_END 17 TLS */
1309{ 0x0, /* segment base address */
1310 0x0, /* length */
1311 0, /* segment type */
1312 0, /* segment descriptor priority level */
1313 0, /* segment descriptor present */
1314 0, 0,
1315 0, /* default 32 vs 16 bit size */
1316 0 /* limit granularity (byte/page units)*/ },
984263bc
MD
1317};
1318
1319static struct soft_segment_descriptor ldt_segs[] = {
1320 /* Null Descriptor - overwritten by call gate */
1321{ 0x0, /* segment base address */
1322 0x0, /* length - all address space */
1323 0, /* segment type */
1324 0, /* segment descriptor priority level */
1325 0, /* segment descriptor present */
1326 0, 0,
1327 0, /* default 32 vs 16 bit size */
1328 0 /* limit granularity (byte/page units)*/ },
1329 /* Null Descriptor - overwritten by call gate */
1330{ 0x0, /* segment base address */
1331 0x0, /* length - all address space */
1332 0, /* segment type */
1333 0, /* segment descriptor priority level */
1334 0, /* segment descriptor present */
1335 0, 0,
1336 0, /* default 32 vs 16 bit size */
1337 0 /* limit granularity (byte/page units)*/ },
1338 /* Null Descriptor - overwritten by call gate */
1339{ 0x0, /* segment base address */
1340 0x0, /* length - all address space */
1341 0, /* segment type */
1342 0, /* segment descriptor priority level */
1343 0, /* segment descriptor present */
1344 0, 0,
1345 0, /* default 32 vs 16 bit size */
1346 0 /* limit granularity (byte/page units)*/ },
1347 /* Code Descriptor for user */
1348{ 0x0, /* segment base address */
1349 0xfffff, /* length - all address space */
1350 SDT_MEMERA, /* segment type */
1351 SEL_UPL, /* segment descriptor priority level */
1352 1, /* segment descriptor present */
1353 0, 0,
1354 1, /* default 32 vs 16 bit size */
1355 1 /* limit granularity (byte/page units)*/ },
1356 /* Null Descriptor - overwritten by call gate */
1357{ 0x0, /* segment base address */
1358 0x0, /* length - all address space */
1359 0, /* segment type */
1360 0, /* segment descriptor priority level */
1361 0, /* segment descriptor present */
1362 0, 0,
1363 0, /* default 32 vs 16 bit size */
1364 0 /* limit granularity (byte/page units)*/ },
1365 /* Data Descriptor for user */
1366{ 0x0, /* segment base address */
1367 0xfffff, /* length - all address space */
1368 SDT_MEMRWA, /* segment type */
1369 SEL_UPL, /* segment descriptor priority level */
1370 1, /* segment descriptor present */
1371 0, 0,
1372 1, /* default 32 vs 16 bit size */
1373 1 /* limit granularity (byte/page units)*/ },
1374};
1375
1376void
f123d5a1 1377setidt(int idx, inthand_t *func, int typ, int dpl, int selec)
984263bc
MD
1378{
1379 struct gate_descriptor *ip;
1380
1381 ip = idt + idx;
1382 ip->gd_looffset = (int)func;
1383 ip->gd_selector = selec;
1384 ip->gd_stkcpy = 0;
1385 ip->gd_xx = 0;
1386 ip->gd_type = typ;
1387 ip->gd_dpl = dpl;
1388 ip->gd_p = 1;
1389 ip->gd_hioffset = ((int)func)>>16 ;
1390}
1391
1392#define IDTVEC(name) __CONCAT(X,name)
1393
1394extern inthand_t
1395 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
1396 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm),
1397 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
f7bc9806
MD
1398 IDTVEC(page), IDTVEC(mchk), IDTVEC(fpu), IDTVEC(align),
1399 IDTVEC(xmm), IDTVEC(syscall),
1400 IDTVEC(rsvd0);
a64ba182 1401extern inthand_t
f9a13fc4 1402 IDTVEC(int0x80_syscall);
984263bc 1403
f7bc9806
MD
1404#ifdef DEBUG_INTERRUPTS
1405extern inthand_t *Xrsvdary[256];
1406#endif
1407
984263bc 1408void
f123d5a1 1409sdtossd(struct segment_descriptor *sd, struct soft_segment_descriptor *ssd)
984263bc
MD
1410{
1411 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase;
1412 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit;
1413 ssd->ssd_type = sd->sd_type;
1414 ssd->ssd_dpl = sd->sd_dpl;
1415 ssd->ssd_p = sd->sd_p;
1416 ssd->ssd_def32 = sd->sd_def32;
1417 ssd->ssd_gran = sd->sd_gran;
1418}
1419
984263bc
MD
1420/*
1421 * Populate the (physmap) array with base/bound pairs describing the
1422 * available physical memory in the system, then test this memory and
1423 * build the phys_avail array describing the actually-available memory.
1424 *
1425 * If we cannot accurately determine the physical memory map, then use
1426 * value from the 0xE801 call, and failing that, the RTC.
1427 *
1428 * Total memory size may be set by the kernel environment variable
1429 * hw.physmem or the compile-time define MAXMEM.
1430 */
1431static void
1432getmemsize(int first)
1433{
b24cd69c 1434 int i, physmap_idx, pa_indx, da_indx;
984263bc
MD
1435 int hasbrokenint12;
1436 u_int basemem, extmem;
1437 struct vm86frame vmf;
1438 struct vm86context vmc;
ff1a75a1
MD
1439 vm_offset_t pa;
1440 vm_offset_t physmap[PHYSMAP_ENTRIES*2];
b5b32410 1441 pt_entry_t *pte;
555da584 1442 quad_t maxmem;
984263bc
MD
1443 struct {
1444 u_int64_t base;
1445 u_int64_t length;
1446 u_int32_t type;
1447 } *smap;
28abdbbb 1448 quad_t dcons_addr, dcons_size;
984263bc 1449
984263bc
MD
1450 bzero(&vmf, sizeof(struct vm86frame));
1451 bzero(physmap, sizeof(physmap));
1452 basemem = 0;
1453
1454 /*
1455 * Some newer BIOSes has broken INT 12H implementation which cause
1456 * kernel panic immediately. In this case, we need to scan SMAP
1457 * with INT 15:E820 first, then determine base memory size.
1458 */
2ed482dc
MN
1459 hasbrokenint12 = 0;
1460 TUNABLE_INT_FETCH("hw.hasbrokenint12", &hasbrokenint12);
984263bc
MD
1461 if (hasbrokenint12) {
1462 goto int15e820;
1463 }
1464
1465 /*
7febcc6e
MD
1466 * Perform "base memory" related probes & setup. If we get a crazy
1467 * value give the bios some scribble space just in case.
984263bc
MD
1468 */
1469 vm86_intcall(0x12, &vmf);
1470 basemem = vmf.vmf_ax;
1471 if (basemem > 640) {
26be20a0 1472 kprintf("Preposterous BIOS basemem of %uK, "
7febcc6e
MD
1473 "truncating to < 640K\n", basemem);
1474 basemem = 636;
984263bc
MD
1475 }
1476
1477 /*
1478 * XXX if biosbasemem is now < 640, there is a `hole'
1479 * between the end of base memory and the start of
1480 * ISA memory. The hole may be empty or it may
1481 * contain BIOS code or data. Map it read/write so
1482 * that the BIOS can write to it. (Memory from 0 to
1483 * the physical end of the kernel is mapped read-only
1484 * to begin with and then parts of it are remapped.
1485 * The parts that aren't remapped form holes that
1486 * remain read-only and are unused by the kernel.
1487 * The base memory area is below the physical end of
1488 * the kernel and right now forms a read-only hole.
1489 * The part of it from PAGE_SIZE to
1490 * (trunc_page(biosbasemem * 1024) - 1) will be
1491 * remapped and used by the kernel later.)
1492 *
1493 * This code is similar to the code used in
1494 * pmap_mapdev, but since no memory needs to be
1495 * allocated we simply change the mapping.
1496 */
1497 for (pa = trunc_page(basemem * 1024);
1498 pa < ISA_HOLE_START; pa += PAGE_SIZE) {
b5b32410 1499 pte = vtopte(pa + KERNBASE);
984263bc
MD
1500 *pte = pa | PG_RW | PG_V;
1501 }
1502
1503 /*
1504 * if basemem != 640, map pages r/w into vm86 page table so
1505 * that the bios can scribble on it.
1506 */
b5b32410 1507 pte = vm86paddr;
984263bc
MD
1508 for (i = basemem / 4; i < 160; i++)
1509 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U;
1510
1511int15e820:
1512 /*
1513 * map page 1 R/W into the kernel page table so we can use it
1514 * as a buffer. The kernel will unmap this page later.
1515 */
b5b32410 1516 pte = vtopte(KERNBASE + (1 << PAGE_SHIFT));
984263bc
MD
1517 *pte = (1 << PAGE_SHIFT) | PG_RW | PG_V;
1518
1519 /*
1520 * get memory map with INT 15:E820
1521 */
1522#define SMAPSIZ sizeof(*smap)
1523#define SMAP_SIG 0x534D4150 /* 'SMAP' */
1524
1525 vmc.npages = 0;
1526 smap = (void *)vm86_addpage(&vmc, 1, KERNBASE + (1 << PAGE_SHIFT));
1527 vm86_getptr(&vmc, (vm_offset_t)smap, &vmf.vmf_es, &vmf.vmf_di);
1528
1529 physmap_idx = 0;
1530 vmf.vmf_ebx = 0;
1531 do {
1532 vmf.vmf_eax = 0xE820;
1533 vmf.vmf_edx = SMAP_SIG;
1534 vmf.vmf_ecx = SMAPSIZ;
1535 i = vm86_datacall(0x15, &vmf, &vmc);
1536 if (i || vmf.vmf_eax != SMAP_SIG)
1537 break;
1538 if (boothowto & RB_VERBOSE)
26be20a0 1539 kprintf("SMAP type=%02x base=%08x %08x len=%08x %08x\n",
984263bc
MD
1540 smap->type,
1541 *(u_int32_t *)((char *)&smap->base + 4),
1542 (u_int32_t)smap->base,
1543 *(u_int32_t *)((char *)&smap->length + 4),
1544 (u_int32_t)smap->length);
1545
1546 if (smap->type != 0x01)
1547 goto next_run;
1548
1549 if (smap->length == 0)
1550 goto next_run;
1551
1bda0d3d
MD
1552 Realmem += smap->length;
1553
1554 if (smap->base >= 0xffffffffLLU) {
9c3db322 1555 kprintf("%ju MB of memory above 4GB ignored\n",
1bda0d3d 1556 (uintmax_t)(smap->length / 1024 / 1024));
984263bc
MD
1557 goto next_run;
1558 }
1559
1560 for (i = 0; i <= physmap_idx; i += 2) {
1561 if (smap->base < physmap[i + 1]) {
1bda0d3d
MD
1562 if (boothowto & RB_VERBOSE) {
1563 kprintf("Overlapping or non-montonic "
1564 "memory region, ignoring "
1565 "second region\n");
1566 }
1567 Realmem -= smap->length;
984263bc
MD
1568 goto next_run;
1569 }
1570 }
1571
1572 if (smap->base == physmap[physmap_idx + 1]) {
1573 physmap[physmap_idx + 1] += smap->length;
1574 goto next_run;
1575 }
1576
1577 physmap_idx += 2;
ff1a75a1 1578 if (physmap_idx == PHYSMAP_ENTRIES*2) {
1bda0d3d
MD
1579 kprintf("Too many segments in the physical "
1580 "address map, giving up\n");
984263bc
MD
1581 break;
1582 }
1583 physmap[physmap_idx] = smap->base;
1584 physmap[physmap_idx + 1] = smap->base + smap->length;
1585next_run:
6b08710e 1586 ; /* fix GCC3.x warning */
984263bc
MD
1587 } while (vmf.vmf_ebx != 0);
1588
1589 /*
1590 * Perform "base memory" related probes & setup based on SMAP
1591 */
1592 if (basemem == 0) {
1593 for (i = 0; i <= physmap_idx; i += 2) {
1594 if (physmap[i] == 0x00000000) {
1595 basemem = physmap[i + 1] / 1024;
1596 break;
1597 }
1598 }
1599
1600 if (basemem == 0) {
1601 basemem = 640;
1602 }
1603
1604 if (basemem > 640) {
1bda0d3d
MD
1605 kprintf("Preposterous BIOS basemem of %uK, "
1606 "truncating to 640K\n", basemem);
984263bc
MD
1607 basemem = 640;
1608 }
1609
1610 for (pa = trunc_page(basemem * 1024);
1611 pa < ISA_HOLE_START; pa += PAGE_SIZE) {
b5b32410 1612 pte = vtopte(pa + KERNBASE);
984263bc
MD
1613 *pte = pa | PG_RW | PG_V;
1614 }
1615
b5b32410 1616 pte = vm86paddr;
984263bc
MD
1617 for (i = basemem / 4; i < 160; i++)
1618 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U;
1619 }
1620
1621 if (physmap[1] != 0)
1622 goto physmap_done;
1623
1624 /*
1625 * If we failed above, try memory map with INT 15:E801
1626 */
1627 vmf.vmf_ax = 0xE801;
1628 if (vm86_intcall(0x15, &vmf) == 0) {
1629 extmem = vmf.vmf_cx + vmf.vmf_dx * 64;
1630 } else {
1631#if 0
1632 vmf.vmf_ah = 0x88;
1633 vm86_intcall(0x15, &vmf);
1634 extmem = vmf.vmf_ax;
1635#else
1636 /*
1637 * Prefer the RTC value for extended memory.
1638 */
1639 extmem = rtcin(RTC_EXTLO) + (rtcin(RTC_EXTHI) << 8);
1640#endif
1641 }
1642
1643 /*
1644 * Special hack for chipsets that still remap the 384k hole when
1645 * there's 16MB of memory - this really confuses people that
1646 * are trying to use bus mastering ISA controllers with the
1647 * "16MB limit"; they only have 16MB, but the remapping puts
1648 * them beyond the limit.
1649 *
1650 * If extended memory is between 15-16MB (16-17MB phys address range),
1651 * chop it to 15MB.
1652 */
1653 if ((extmem > 15 * 1024) && (extmem < 16 * 1024))
1654 extmem = 15 * 1024;
1655
1656 physmap[0] = 0;
1657 physmap[1] = basemem * 1024;
1658 physmap_idx = 2;
1659 physmap[physmap_idx] = 0x100000;
1660 physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024;
1661
1662physmap_done:
1663 /*
1664 * Now, physmap contains a map of physical memory.
1665 */
1666
1667#ifdef SMP
17a9f566 1668 /* make hole for AP bootstrap code YYY */
c0c5de70 1669 physmap[1] = mp_bootaddress(physmap[1]);
984263bc 1670
1876681a
SZ
1671 /* Save EBDA address, if any */
1672 ebda_addr = (u_long)(*(u_short *)(KERNBASE + 0x40e));
1673 ebda_addr <<= 4;
984263bc
MD
1674#endif
1675
1676 /*
1677 * Maxmem isn't the "maximum memory", it's one larger than the
1678 * highest page of the physical address space. It should be
1679 * called something like "Maxphyspage". We may adjust this
1680 * based on ``hw.physmem'' and the results of the memory test.
1681 */
1682 Maxmem = atop(physmap[physmap_idx + 1]);
1683
1684#ifdef MAXMEM
1685 Maxmem = MAXMEM / 4;
1686#endif
1687
555da584
MD
1688 if (kgetenv_quad("hw.physmem", &maxmem))
1689 Maxmem = atop(maxmem);
984263bc
MD
1690
1691 if (atop(physmap[physmap_idx + 1]) != Maxmem &&
1692 (boothowto & RB_VERBOSE))
26be20a0 1693 kprintf("Physical memory use set to %lluK\n", Maxmem * 4);
984263bc
MD
1694
1695 /*
1696 * If Maxmem has been increased beyond what the system has detected,
1697 * extend the last memory segment to the new limit.
1698 */
1699 if (atop(physmap[physmap_idx + 1]) < Maxmem)
1700 physmap[physmap_idx + 1] = ptoa(Maxmem);
1701
1702 /* call pmap initialization to make new kernel address space */
1703 pmap_bootstrap(first, 0);
1704
1705 /*
1706 * Size up each available chunk of physical memory.
1707 */
1708 physmap[0] = PAGE_SIZE; /* mask off page 0 */
1709 pa_indx = 0;
b24cd69c 1710 da_indx = 1;
984263bc
MD
1711 phys_avail[pa_indx++] = physmap[0];
1712 phys_avail[pa_indx] = physmap[0];
b24cd69c
AH
1713 dump_avail[da_indx] = physmap[0];
1714
b5b32410 1715 pte = CMAP1;
984263bc
MD
1716
1717 /*
28abdbbb
HS
1718 * Get dcons buffer address
1719 */
bc01a404
MD
1720 if (kgetenv_quad("dcons.addr", &dcons_addr) == 0 ||
1721 kgetenv_quad("dcons.size", &dcons_size) == 0)
28abdbbb
HS
1722 dcons_addr = 0;
1723
1724 /*
984263bc
MD
1725 * physmap is in bytes, so when converting to page boundaries,
1726 * round up the start address and round down the end address.
1727 */
1728 for (i = 0; i <= physmap_idx; i += 2) {
1729 vm_offset_t end;
1730
1731 end = ptoa(Maxmem);
1732 if (physmap[i + 1] < end)
1733 end = trunc_page(physmap[i + 1]);
1734 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) {
b24cd69c 1735 int tmp, page_bad, full;
984263bc
MD
1736#if 0
1737 int *ptr = 0;
1738#else
1739 int *ptr = (int *)CADDR1;
1740#endif
b24cd69c 1741 full = FALSE;
984263bc
MD
1742
1743 /*
1744 * block out kernel memory as not available.
1745 */
1746 if (pa >= 0x100000 && pa < first)
b24cd69c 1747 goto do_dump_avail;
984263bc 1748
28abdbbb
HS
1749 /*
1750 * block out dcons buffer
1751 */
1752 if (dcons_addr > 0
1753 && pa >= trunc_page(dcons_addr)
1754 && pa < dcons_addr + dcons_size)
b24cd69c 1755 goto do_dump_avail;
28abdbbb 1756
984263bc
MD
1757 page_bad = FALSE;
1758
1759 /*
1760 * map page into kernel: valid, read/write,non-cacheable
1761 */
1762 *pte = pa | PG_V | PG_RW | PG_N;
0f7a3396 1763 cpu_invltlb();
984263bc
MD
1764
1765 tmp = *(int *)ptr;
1766 /*
1767 * Test for alternating 1's and 0's
1768 */
1769 *(volatile int *)ptr = 0xaaaaaaaa;
1770 if (*(volatile int *)ptr != 0xaaaaaaaa) {
1771 page_bad = TRUE;
1772 }
1773 /*
1774 * Test for alternating 0's and 1's
1775 */
1776 *(volatile int *)ptr = 0x55555555;
1777 if (*(volatile int *)ptr != 0x55555555) {
1778 page_bad = TRUE;
1779 }
1780 /*
1781 * Test for all 1's
1782 */
1783 *(volatile int *)ptr = 0xffffffff;
1784 if (*(volatile int *)ptr != 0xffffffff) {
1785 page_bad = TRUE;
1786 }
1787 /*
1788 * Test for all 0's
1789 */
1790 *(volatile int *)ptr = 0x0;
1791 if (*(volatile int *)ptr != 0x0) {
1792 page_bad = TRUE;
1793 }
1794 /*
1795 * Restore original value.
1796 */
1797 *(int *)ptr = tmp;
1798
1799 /*
1800 * Adjust array of valid/good pages.
1801 */
1802 if (page_bad == TRUE) {
1803 continue;
1804 }
1805 /*
1806 * If this good page is a continuation of the
1807 * previous set of good pages, then just increase
1808 * the end pointer. Otherwise start a new chunk.
1809 * Note that "end" points one higher than end,
1810 * making the range >= start and < end.
1811 * If we're also doing a speculative memory
1812 * test and we at or past the end, bump up Maxmem
1813 * so that we keep going. The first bad page
1814 * will terminate the loop.
1815 */
1816 if (phys_avail[pa_indx] == pa) {
1817 phys_avail[pa_indx] += PAGE_SIZE;
1818 } else {
1819 pa_indx++;
ff1a75a1 1820 if (pa_indx >= PHYSMAP_ENTRIES*2) {
26be20a0 1821 kprintf("Too many holes in the physical address space, giving up\n");
984263bc 1822 pa_indx--;
b24cd69c
AH
1823 full = TRUE;
1824 goto do_dump_avail;
984263bc
MD
1825 }
1826 phys_avail[pa_indx++] = pa; /* start */
1827 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */
1828 }
1829 physmem++;
b24cd69c
AH
1830do_dump_avail:
1831 if (dump_avail[da_indx] == pa) {
1832 dump_avail[da_indx] += PAGE_SIZE;
1833 } else {
1834 da_indx++;
1835 if (da_indx >= PHYSMAP_ENTRIES*2) {
1836 da_indx--;
1837 goto do_next;
1838 }
1839 dump_avail[da_indx++] = pa; /* start */
1840 dump_avail[da_indx] = pa + PAGE_SIZE; /* end */
1841 }
1842do_next:
1843 if (full)
1844 break;
1845
984263bc
MD
1846 }
1847 }
1848 *pte = 0;
0f7a3396 1849 cpu_invltlb();
984263bc
MD
1850
1851 /*
1852 * XXX
1853 * The last chunk must contain at least one page plus the message
1854 * buffer to avoid complicating other code (message buffer address
1855 * calculation, etc.).
1856 */
1857 while (phys_avail[pa_indx - 1] + PAGE_SIZE +
1858 round_page(MSGBUF_SIZE) >= phys_avail[pa_indx]) {
1859 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]);
1860 phys_avail[pa_indx--] = 0;
1861 phys_avail[pa_indx--] = 0;
1862 }
1863
1864 Maxmem = atop(phys_avail[pa_indx]);
1865
1866 /* Trim off space for the message buffer. */
1867 phys_avail[pa_indx] -= round_page(MSGBUF_SIZE);
1868
1869 avail_end = phys_avail[pa_indx];
1870}
1871
30c5f287 1872#ifdef SMP
eac0bf8f
MD
1873#ifdef APIC_IO
1874int apic_io_enable = 1; /* Enabled by default for kernels compiled w/APIC_IO */
1875#else
1876int apic_io_enable = 0; /* Disabled by default for kernels compiled without */
1877#endif
30c5f287
MN
1878TUNABLE_INT("hw.apic_io_enable", &apic_io_enable);
1879extern struct machintr_abi MachIntrABI_APIC;
1880#endif
1881
1882extern struct machintr_abi MachIntrABI_ICU;
1883struct machintr_abi MachIntrABI;
1884
f7bc9806
MD
1885/*
1886 * IDT VECTORS:
1887 * 0 Divide by zero
1888 * 1 Debug
1889 * 2 NMI
1890 * 3 BreakPoint
1891 * 4 OverFlow
1892 * 5 Bound-Range
1893 * 6 Invalid OpCode
1894 * 7 Device Not Available (x87)
1895 * 8 Double-Fault
1896 * 9 Coprocessor Segment overrun (unsupported, reserved)
1897 * 10 Invalid-TSS
1898 * 11 Segment not present
1899 * 12 Stack
1900 * 13 General Protection
1901 * 14 Page Fault
1902 * 15 Reserved
1903 * 16 x87 FP Exception pending
1904 * 17 Alignment Check
1905 * 18 Machine Check
1906 * 19 SIMD floating point
1907 * 20-31 reserved
1908 * 32-255 INTn/external sources
1909 */
984263bc 1910void
17a9f566 1911init386(int first)
984263bc
MD
1912{
1913 struct gate_descriptor *gdp;
1914 int gsel_tss, metadata_missing, off, x;
85100692 1915 struct mdglobaldata *gd;
984263bc
MD
1916
1917 /*
1918 * Prevent lowering of the ipl if we call tsleep() early.
1919 */
85100692 1920 gd = &CPU_prvspace[0].mdglobaldata;
8a8d5d85 1921 bzero(gd, sizeof(*gd));
984263bc 1922
85100692 1923 gd->mi.gd_curthread = &thread0;
4e7c41c5 1924 thread0.td_gd = &gd->mi;
984263bc
MD
1925
1926 atdevbase = ISA_HOLE_START + KERNBASE;
1927
1928 metadata_missing = 0;
1929 if (bootinfo.bi_modulep) {
1930 preload_metadata = (caddr_t)bootinfo.bi_modulep + KERNBASE;
1931 preload_bootstrap_relocate(KERNBASE);
1932 } else {
1933 metadata_missing = 1;
1934 }
1935 if (bootinfo.bi_envp)
1936 kern_envp = (caddr_t)bootinfo.bi_envp + KERNBASE;
1937
c5cc06e3 1938 /*
10db3cc6 1939 * Default MachIntrABI to ICU
30c5f287
MN
1940 */
1941 MachIntrABI = MachIntrABI_ICU;
1942#ifdef SMP
1943 TUNABLE_INT_FETCH("hw.apic_io_enable", &apic_io_enable);
30c5f287
MN
1944#endif
1945
1946 /*
b45759e1
MD
1947 * start with one cpu. Note: with one cpu, ncpus2_shift, ncpus2_mask,
1948 * and ncpus_fit_mask remain 0.
c5cc06e3 1949 */
4e8e646b 1950 ncpus = 1;
c5cc06e3 1951 ncpus2 = 1;
b45759e1 1952 ncpus_fit = 1;
984263bc
MD
1953 /* Init basic tunables, hz etc */
1954 init_param1();
1955
1956 /*
1957 * make gdt memory segments, the code segment goes up to end of the
1958 * page with etext in it, the data segment goes to the end of
1959 * the address space
1960 */
1961 /*
1962 * XXX text protection is temporarily (?) disabled. The limit was
1963 * i386_btop(round_page(etext)) - 1.
1964 */
1965 gdt_segs[GCODE_SEL].ssd_limit = atop(0 - 1);
1966 gdt_segs[GDATA_SEL].ssd_limit = atop(0 - 1);
17a9f566 1967
984263bc
MD
1968 gdt_segs[GPRIV_SEL].ssd_limit =
1969 atop(sizeof(struct privatespace) - 1);
8ad65e08 1970 gdt_segs[GPRIV_SEL].ssd_base = (int) &CPU_prvspace[0];
984263bc 1971 gdt_segs[GPROC0_SEL].ssd_base =
85100692 1972 (int) &CPU_prvspace[0].mdglobaldata.gd_common_tss;
17a9f566 1973
85100692 1974 gd->mi.gd_prvspace = &CPU_prvspace[0];
17a9f566 1975
84b592ba
MD
1976 /*
1977 * Note: on both UP and SMP curthread must be set non-NULL
1978 * early in the boot sequence because the system assumes
1979 * that 'curthread' is never NULL.
1980 */
984263bc
MD
1981
1982 for (x = 0; x < NGDT; x++) {
1983#ifdef BDE_DEBUGGER
1984 /* avoid overwriting db entries with APM ones */
1985 if (x >= GAPMCODE32_SEL && x <= GAPMDATA_SEL)
1986 continue;
1987#endif
1988 ssdtosd(&gdt_segs[x], &gdt[x].sd);
1989 }
1990
1991 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
1992 r_gdt.rd_base = (int) gdt;
1993 lgdt(&r_gdt);
1994
73e4f7b9
MD
1995 mi_gdinit(&gd->mi, 0);
1996 cpu_gdinit(gd, 0);
6bf59cd2 1997 mi_proc0init(&gd->mi, proc0paddr);
e43a034f 1998 safepri = TDPRI_MAX;
73e4f7b9 1999
984263bc
MD
2000 /* make ldt memory segments */
2001 /*
88181b08 2002 * XXX - VM_MAX_USER_ADDRESS is an end address, not a max. And it
984263bc
MD
2003 * should be spelled ...MAX_USER...
2004 */
88181b08
MD
2005 ldt_segs[LUCODE_SEL].ssd_limit = atop(VM_MAX_USER_ADDRESS - 1);
2006 ldt_segs[LUDATA_SEL].ssd_limit = atop(VM_MAX_USER_ADDRESS - 1);
984263bc
MD
2007 for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++)
2008 ssdtosd(&ldt_segs[x], &ldt[x].sd);
2009
2010 _default_ldt = GSEL(GLDT_SEL, SEL_KPL);
2011 lldt(_default_ldt);
17a9f566 2012 gd->gd_currentldt = _default_ldt;
8a8d5d85
MD
2013 /* spinlocks and the BGL */
2014 init_locks();
984263bc 2015
2f839e54
MD
2016 /*
2017 * Setup the hardware exception table. Most exceptions use
2018 * SDT_SYS386TGT, known as a 'trap gate'. Trap gates leave
2019 * interrupts enabled. VM page faults use SDT_SYS386IGT, known as
2020 * an 'interrupt trap gate', which disables interrupts on entry,
2021 * in order to be able to poll the appropriate CRn register to
2022 * determine the fault address.
2023 */
f7bc9806
MD
2024 for (x = 0; x < NIDT; x++) {
2025#ifdef DEBUG_INTERRUPTS
2026 setidt(x, Xrsvdary[x], SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
2027#else
2028 setidt(x, &IDTVEC(rsvd0), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
2029#endif
2030 }
984263bc
MD
2031 setidt(0, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
2032 setidt(1, &IDTVEC(dbg), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
2033 setidt(2, &IDTVEC(nmi), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
2034 setidt(3, &IDTVEC(bpt), SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL));
2035 setidt(4, &IDTVEC(ofl), SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL));
2036 setidt(5, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
2037 setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
2038 setidt(7, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
2039 setidt(8, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, SEL_KPL));
2040 setidt(9, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
2041 setidt(10, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
2042 setidt(11, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
2043 setidt(12, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
2044 setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
2045 setidt(14, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
f7bc9806 2046 setidt(15, &IDTVEC(rsvd0), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
984263bc
MD
2047 setidt(16, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
2048 setidt(17, &IDTVEC(align), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
2049 setidt(18, &IDTVEC(mchk), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
2050 setidt(19, &IDTVEC(xmm), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
2051 setidt(0x80, &IDTVEC(int0x80_syscall),
2052 SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL));
2053
2054 r_idt.rd_limit = sizeof(idt0) - 1;
2055 r_idt.rd_base = (int) idt;
2056 lidt(&r_idt);
2057
2058 /*
2059 * Initialize the console before we print anything out.
2060 */
2061 cninit();
2062
2063 if (metadata_missing)
26be20a0 2064 kprintf("WARNING: loader(8) metadata is missing!\n");
984263bc 2065
984263bc 2066#if NISA >0
e24dd6e0 2067 elcr_probe();
984263bc
MD
2068 isa_defaultirq();
2069#endif
2070 rand_initialize();
2071
2072#ifdef DDB
2073 kdb_init();
2074 if (boothowto & RB_KDB)
2075 Debugger("Boot flags requested debugger");
2076#endif
2077
2078 finishidentcpu(); /* Final stage of CPU initialization */
2079 setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
2080 setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
2081 initializecpu(); /* Initialize CPU registers */
2082
b7c628e4
MD
2083 /*
2084 * make an initial tss so cpu can get interrupt stack on syscall!
2085 * The 16 bytes is to save room for a VM86 context.
2086 */
17a9f566
MD
2087 gd->gd_common_tss.tss_esp0 = (int) thread0.td_pcb - 16;
2088 gd->gd_common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL) ;
984263bc 2089 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
17a9f566
MD
2090 gd->gd_tss_gdt = &gdt[GPROC0_SEL].sd;
2091 gd->gd_common_tssd = *gd->gd_tss_gdt;
85100692 2092 gd->gd_common_tss.tss_ioopt = (sizeof gd->gd_common_tss) << 16;
984263bc
MD
2093 ltr(gsel_tss);
2094
2095 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
2096 dblfault_tss.tss_esp2 = (int) &dblfault_stack[sizeof(dblfault_stack)];
2097 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
2098 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
2099 dblfault_tss.tss_cr3 = (int)IdlePTD;
2100 dblfault_tss.tss_eip = (int) dblfault_handler;
2101 dblfault_tss.tss_eflags = PSL_KERNEL;
2102 dblfault_tss.tss_ds = dblfault_tss.tss_es =
2103 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL);
2104 dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL);
2105 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
2106 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
2107
2108 vm86_initialize();
2109 getmemsize(first);
2110 init_param2(physmem);
2111
2112 /* now running on new page tables, configured,and u/iom is accessible */
2113
2114 /* Map the message buffer. */
2115 for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE)
2116 pmap_kenter((vm_offset_t)msgbufp + off, avail_end + off);
2117
2118 msgbufinit(msgbufp, MSGBUF_SIZE);
2119
2120 /* make a call gate to reenter kernel with */
2121 gdp = &ldt[LSYS5CALLS_SEL].gd;
2122
2123 x = (int) &IDTVEC(syscall);
2124 gdp->gd_looffset = x++;
2125 gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL);
2126 gdp->gd_stkcpy = 1;
2127 gdp->gd_type = SDT_SYS386CGT;
2128 gdp->gd_dpl = SEL_UPL;
2129 gdp->gd_p = 1;
2130 gdp->gd_hioffset = ((int) &IDTVEC(syscall)) >>16;
2131
2132 /* XXX does this work? */
2133 ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL];
2134 ldt[LSOL26CALLS_SEL] = ldt[LSYS5CALLS_SEL];
2135
2136 /* transfer to user mode */
2137
2138 _ucodesel = LSEL(LUCODE_SEL, SEL_UPL);
2139 _udatasel = LSEL(LUDATA_SEL, SEL_UPL);
2140
2141 /* setup proc 0's pcb */
b7c628e4
MD
2142 thread0.td_pcb->pcb_flags = 0;
2143 thread0.td_pcb->pcb_cr3 = (int)IdlePTD; /* should already be setup */
b7c628e4 2144 thread0.td_pcb->pcb_ext = 0;
08f2f1bb 2145 lwp0.lwp_md.md_regs = &proc0_tf;
984263bc
MD
2146}
2147
8ad65e08 2148/*
17a9f566
MD
2149 * Initialize machine-dependant portions of the global data structure.
2150 * Note that the global data area and cpu0's idlestack in the private
2151 * data space were allocated in locore.
ef0fdad1
MD
2152 *
2153 * Note: the idlethread's cpl is 0
73e4f7b9
MD
2154 *
2155 * WARNING! Called from early boot, 'mycpu' may not work yet.
8ad65e08
MD
2156 */
2157void
85100692 2158cpu_gdinit(struct mdglobaldata *gd, int cpu)
8ad65e08 2159{
7d0bac62 2160 if (cpu)
a2a5ad0d 2161 gd->mi.gd_curthread = &gd->mi.gd_idlethread;
17a9f566 2162
f470d0c8
MD
2163 lwkt_init_thread(&gd->mi.gd_idlethread,
2164 gd->mi.gd_prvspace->idlestack,
d3d32139 2165 sizeof(gd->mi.gd_prvspace->idlestack),
fdce8919 2166 0, &gd->mi);
a2a5ad0d
MD
2167 lwkt_set_comm(&gd->mi.gd_idlethread, "idle_%d", cpu);
2168 gd->mi.gd_idlethread.td_switch = cpu_lwkt_switch;
2169 gd->mi.gd_idlethread.td_sp -= sizeof(void *);
2170 *(void **)gd->mi.gd_idlethread.td_sp = cpu_idle_restore;
8ad65e08
MD
2171}
2172
0cd275af
MD
2173int
2174is_globaldata_space(vm_offset_t saddr, vm_offset_t eaddr)
2175{
2176 if (saddr >= (vm_offset_t)&CPU_prvspace[0] &&
2177 eaddr <= (vm_offset_t)&CPU_prvspace[MAXCPU]) {
2178 return (TRUE);
2179 }
2180 return (FALSE);
2181}
2182
12e4aaff
MD
2183struct globaldata *
2184globaldata_find(int cpu)
2185{
2186 KKASSERT(cpu >= 0 && cpu < ncpus);
2187 return(&CPU_prvspace[cpu].mdglobaldata.mi);
2188}
2189
984263bc
MD
2190#if defined(I586_CPU) && !defined(NO_F00F_HACK)
2191static void f00f_hack(void *unused);
ba39e2e0 2192SYSINIT(f00f_hack, SI_BOOT2_BIOS, SI_ORDER_ANY, f00f_hack, NULL);
984263bc
MD
2193
2194static void
17a9f566
MD
2195f00f_hack(void *unused)
2196{
984263bc 2197 struct gate_descriptor *new_idt;
984263bc
MD
2198 vm_offset_t tmp;
2199
2200 if (!has_f00f_bug)
2201 return;
2202
26be20a0 2203 kprintf("Intel Pentium detected, installing workaround for F00F bug\n");
984263bc
MD
2204
2205 r_idt.rd_limit = sizeof(idt0) - 1;
2206
e4846942 2207 tmp = kmem_alloc(&kernel_map, PAGE_SIZE * 2);
984263bc
MD
2208 if (tmp == 0)
2209 panic("kmem_alloc returned 0");
2210 if (((unsigned int)tmp & (PAGE_SIZE-1)) != 0)
2211 panic("kmem_alloc returned non-page-aligned memory");
2212 /* Put the first seven entries in the lower page */
2213 new_idt = (struct gate_descriptor*)(tmp + PAGE_SIZE - (7*8));
2214 bcopy(idt, new_idt, sizeof(idt0));
2215 r_idt.rd_base = (int)new_idt;
2216 lidt(&r_idt);
2217 idt = new_idt;
e4846942 2218 if (vm_map_protect(&kernel_map, tmp, tmp + PAGE_SIZE,
984263bc
MD
2219 VM_PROT_READ, FALSE) != KERN_SUCCESS)
2220 panic("vm_map_protect failed");
2221 return;
2222}
2223#endif /* defined(I586_CPU) && !NO_F00F_HACK */
2224
2225int
08f2f1bb 2226ptrace_set_pc(struct lwp *lp, unsigned long addr)
984263bc 2227{
08f2f1bb 2228 lp->lwp_md.md_regs->tf_eip = addr;
984263bc
MD
2229 return (0);
2230}
2231
2232int
e9182c58 2233ptrace_single_step(struct lwp *lp)
984263bc 2234{
e9182c58 2235 lp->lwp_md.md_regs->tf_eflags |= PSL_T;
984263bc
MD
2236 return (0);
2237}
2238
f123d5a1 2239int
e9182c58 2240fill_regs(struct lwp *lp, struct reg *regs)
984263bc 2241{
984263bc
MD
2242 struct trapframe *tp;
2243
e9182c58 2244 tp = lp->lwp_md.md_regs;
4e7c41c5 2245 regs->r_gs = tp->tf_gs;
984263bc
MD
2246 regs->r_fs = tp->tf_fs;
2247 regs->r_es = tp->tf_es;
2248 regs->r_ds = tp->tf_ds;
2249 regs->r_edi = tp->tf_edi;
2250 regs->r_esi = tp->tf_esi;
2251 regs->r_ebp = tp->tf_ebp;
2252 regs->r_ebx = tp->tf_ebx;
2253 regs->r_edx = tp->tf_edx;
2254 regs->r_ecx = tp->tf_ecx;
2255 regs->r_eax = tp->tf_eax;
2256 regs->r_eip = tp->tf_eip;
2257 regs->r_cs = tp->tf_cs;
2258 regs->r_eflags = tp->tf_eflags;
2259 regs->r_esp = tp->tf_esp;
2260 regs->r_ss = tp->tf_ss;
984263bc
MD
2261 return (0);
2262}
2263
2264int
e9182c58 2265set_regs(struct lwp *lp, struct reg *regs)
984263bc 2266{
984263bc
MD
2267 struct trapframe *tp;
2268
e9182c58 2269 tp = lp->lwp_md.md_regs;
984263bc
MD
2270 if (!EFL_SECURE(regs->r_eflags, tp->tf_eflags) ||
2271 !CS_SECURE(regs->r_cs))
2272 return (EINVAL);
4e7c41c5 2273 tp->tf_gs = regs->r_gs;
984263bc
MD
2274 tp->tf_fs = regs->r_fs;
2275 tp->tf_es = regs->r_es;
2276 tp->tf_ds = regs->r_ds;
2277 tp->tf_edi = regs->r_edi;
2278 tp->tf_esi = regs->r_esi;
2279 tp->tf_ebp = regs->r_ebp;
2280 tp->tf_ebx = regs->r_ebx;
2281 tp->tf_edx = regs->r_edx;
2282 tp->tf_ecx = regs->r_ecx;
2283 tp->tf_eax = regs->r_eax;
2284 tp->tf_eip = regs->r_eip;
2285 tp->tf_cs = regs->r_cs;
2286 tp->tf_eflags = regs->r_eflags;
2287 tp->tf_esp = regs->r_esp;
2288 tp->tf_ss = regs->r_ss;
984263bc
MD
2289 return (0);
2290}
2291
642a6e88 2292#ifndef CPU_DISABLE_SSE
984263bc 2293static void
f123d5a1 2294fill_fpregs_xmm(struct savexmm *sv_xmm, struct save87 *sv_87)
984263bc 2295{
c9faf524
RG
2296 struct env87 *penv_87 = &sv_87->sv_env;
2297 struct envxmm *penv_xmm = &sv_xmm->sv_env;
984263bc
MD
2298 int i;
2299
2300 /* FPU control/status */
2301 penv_87->en_cw = penv_xmm->en_cw;
2302 penv_87->en_sw = penv_xmm->en_sw;
2303 penv_87->en_tw = penv_xmm->en_tw;
2304 penv_87->en_fip = penv_xmm->en_fip;
2305 penv_87->en_fcs = penv_xmm->en_fcs;
2306 penv_87->en_opcode = penv_xmm->en_opcode;
2307 penv_87->en_foo = penv_xmm->en_foo;
2308 penv_87->en_fos = penv_xmm->en_fos;
2309
2310 /* FPU registers */
2311 for (i = 0; i < 8; ++i)
2312 sv_87->sv_ac[i] = sv_xmm->sv_fp[i].fp_acc;
2313
2314 sv_87->sv_ex_sw = sv_xmm->sv_ex_sw;
2315}
2316
2317static void
f123d5a1 2318set_fpregs_xmm(struct save87 *sv_87, struct savexmm *sv_xmm)
984263bc 2319{
c9faf524
RG
2320 struct env87 *penv_87 = &sv_87->sv_env;
2321 struct envxmm *penv_xmm = &sv_xmm->sv_env;
984263bc
MD
2322 int i;
2323
2324 /* FPU control/status */
2325 penv_xmm->en_cw = penv_87->en_cw;
2326 penv_xmm->en_sw = penv_87->en_sw;
2327 penv_xmm->en_tw = penv_87->en_tw;
2328 penv_xmm->en_fip = penv_87->en_fip;
2329 penv_xmm->en_fcs = penv_87->en_fcs;
2330 penv_xmm->en_opcode = penv_87->en_opcode;
2331 penv_xmm->en_foo = penv_87->en_foo;
2332 penv_xmm->en_fos = penv_87->en_fos;
2333
2334 /* FPU registers */
2335 for (i = 0; i < 8; ++i)
2336 sv_xmm->sv_fp[i].fp_acc = sv_87->sv_ac[i];
2337
2338 sv_xmm->sv_ex_sw = sv_87->sv_ex_sw;
2339}
642a6e88 2340#endif /* CPU_DISABLE_SSE */
984263bc
MD
2341
2342int
e9182c58 2343fill_fpregs(struct lwp *lp, struct fpreg *fpregs)
984263bc 2344{
642a6e88 2345#ifndef CPU_DISABLE_SSE
984263bc 2346 if (cpu_fxsr) {
e9182c58
SZ
2347 fill_fpregs_xmm(&lp->lwp_thread->td_pcb->pcb_save.sv_xmm,
2348 (struct save87 *)fpregs);
984263bc
MD
2349 return (0);
2350 }
642a6e88 2351#endif /* CPU_DISABLE_SSE */
e9182c58 2352 bcopy(&lp->lwp_thread->td_pcb->pcb_save.sv_87, fpregs, sizeof *fpregs);
984263bc
MD
2353 return (0);
2354}
2355
2356int
e9182c58 2357set_fpregs(struct lwp *lp, struct fpreg *fpregs)
984263bc 2358{
642a6e88 2359#ifndef CPU_DISABLE_SSE
984263bc
MD
2360 if (cpu_fxsr) {
2361 set_fpregs_xmm((struct save87 *)fpregs,
e9182c58 2362 &lp->lwp_thread->td_pcb->pcb_save.sv_xmm);
984263bc
MD
2363 return (0);
2364 }
642a6e88 2365#endif /* CPU_DISABLE_SSE */
e9182c58 2366 bcopy(fpregs, &lp->lwp_thread->td_pcb->pcb_save.sv_87, sizeof *fpregs);
984263bc
MD
2367 return (0);
2368}
2369
2370int
e9182c58 2371fill_dbregs(struct lwp *lp, struct dbreg *dbregs)
984263bc 2372{
e9182c58 2373 if (lp == NULL) {
984263bc
MD
2374 dbregs->dr0 = rdr0();
2375 dbregs->dr1 = rdr1();
2376 dbregs->dr2 = rdr2();
2377 dbregs->dr3 = rdr3();
2378 dbregs->dr4 = rdr4();
2379 dbregs->dr5 = rdr5();
2380 dbregs->dr6 = rdr6();
2381 dbregs->dr7 = rdr7();
e9182c58
SZ
2382 } else {
2383 struct pcb *pcb;
2384
2385 pcb = lp->lwp_thread->td_pcb;
984263bc
MD
2386 dbregs->dr0 = pcb->pcb_dr0;
2387 dbregs->dr1 = pcb->pcb_dr1;
2388 dbregs->dr2 = pcb->pcb_dr2;
2389 dbregs->dr3 = pcb->pcb_dr3;
2390 dbregs->dr4 = 0;
2391 dbregs->dr5 = 0;
2392 dbregs->dr6 = pcb->pcb_dr6;
2393 dbregs->dr7 = pcb->pcb_dr7;
2394 }
2395 return (0);
2396}
2397
2398int
e9182c58 2399set_dbregs(struct lwp *lp, struct dbreg *dbregs)
984263bc 2400{
e9182c58 2401 if (lp == NULL) {
984263bc
MD
2402 load_dr0(dbregs->dr0);
2403 load_dr1(dbregs->dr1);
2404 load_dr2(dbregs->dr2);
2405 load_dr3(dbregs->dr3);
2406 load_dr4(dbregs->dr4);
2407 load_dr5(dbregs->dr5);
2408 load_dr6(dbregs->dr6);
2409 load_dr7(dbregs->dr7);
e9182c58
SZ
2410 } else {
2411 struct pcb *pcb;
2412 struct ucred *ucred;
2413 int i;
2414 uint32_t mask1, mask2;
2415
984263bc
MD
2416 /*
2417 * Don't let an illegal value for dr7 get set. Specifically,
2418 * check for undefined settings. Setting these bit patterns
2419 * result in undefined behaviour and can lead to an unexpected
2420 * TRCTRAP.
2421 */
2422 for (i = 0, mask1 = 0x3<<16, mask2 = 0x2<<16; i < 8;
2423 i++, mask1 <<= 2, mask2 <<= 2)
2424 if ((dbregs->dr7 & mask1) == mask2)
2425 return (EINVAL);
2426
e9182c58
SZ
2427 pcb = lp->lwp_thread->td_pcb;
2428 ucred = lp->lwp_proc->p_ucred;
2429
984263bc
MD
2430 /*
2431 * Don't let a process set a breakpoint that is not within the
2432 * process's address space. If a process could do this, it
2433 * could halt the system by setting a breakpoint in the kernel
2434 * (if ddb was enabled). Thus, we need to check to make sure
2435 * that no breakpoints are being enabled for addresses outside
2436 * process's address space, unless, perhaps, we were called by
2437 * uid 0.
2438 *
2439 * XXX - what about when the watched area of the user's
2440 * address space is written into from within the kernel
2441 * ... wouldn't that still cause a breakpoint to be generated
2442 * from within kernel mode?
2443 */
e9182c58 2444
895c1f85 2445 if (priv_check_cred(ucred, PRIV_ROOT, 0) != 0) {
984263bc
MD
2446 if (dbregs->dr7 & 0x3) {
2447 /* dr0 is enabled */
88181b08 2448 if (dbregs->dr0 >= VM_MAX_USER_ADDRESS)
984263bc
MD
2449 return (EINVAL);
2450 }
e9182c58 2451
984263bc
MD
2452 if (dbregs->dr7 & (0x3<<2)) {
2453 /* dr1 is enabled */
88181b08 2454 if (dbregs->dr1 >= VM_MAX_USER_ADDRESS)
984263bc
MD
2455 return (EINVAL);
2456 }
e9182c58 2457
984263bc
MD
2458 if (dbregs->dr7 & (0x3<<4)) {
2459 /* dr2 is enabled */
88181b08 2460 if (dbregs->dr2 >= VM_MAX_USER_ADDRESS)
984263bc
MD
2461 return (EINVAL);
2462 }
e9182c58 2463
984263bc
MD
2464 if (dbregs->dr7 & (0x3<<6)) {
2465 /* dr3 is enabled */
88181b08 2466 if (dbregs->dr3 >= VM_MAX_USER_ADDRESS)
984263bc
MD
2467 return (EINVAL);
2468 }
2469 }
e9182c58 2470
984263bc
MD
2471 pcb->pcb_dr0 = dbregs->dr0;
2472 pcb->pcb_dr1 = dbregs->dr1;
2473 pcb->pcb_dr2 = dbregs->dr2;
2474 pcb->pcb_dr3 = dbregs->dr3;
2475 pcb->pcb_dr6 = dbregs->dr6;
2476 pcb->pcb_dr7 = dbregs->dr7;
e9182c58 2477
984263bc
MD
2478 pcb->pcb_flags |= PCB_DBREGS;
2479 }
2480
2481 return (0);
2482}
2483
2484/*
2485 * Return > 0 if a hardware breakpoint has been hit, and the
2486 * breakpoint was in user space. Return 0, otherwise.
2487 */
2488int
2489user_dbreg_trap(void)
2490{
2491 u_int32_t dr7, dr6; /* debug registers dr6 and dr7 */
2492 u_int32_t bp; /* breakpoint bits extracted from dr6 */
2493 int nbp; /* number of breakpoints that triggered */
2494 caddr_t addr[4]; /* breakpoint addresses */
2495 int i;
2496
2497 dr7 = rdr7();
2498 if ((dr7 & 0x000000ff) == 0) {
2499 /*
2500 * all GE and LE bits in the dr7 register are zero,
2501 * thus the trap couldn't have been caused by the
2502 * hardware debug registers
2503 */
2504 return 0;
2505 }
2506
2507 nbp = 0;
2508 dr6 = rdr6();
2509 bp = dr6 & 0x0000000f;
2510
2511 if (!bp) {
2512 /*
2513 * None of the breakpoint bits are set meaning this
2514 * trap was not caused by any of the debug registers
2515 */
2516 return 0;
2517 }
2518
2519 /*
2520 * at least one of the breakpoints were hit, check to see
2521 * which ones and if any of them are user space addresses
2522 */
2523
2524 if (bp & 0x01) {
2525 addr[nbp++] = (caddr_t)rdr0();
2526 }
2527 if (bp & 0x02) {
2528 addr[nbp++] = (caddr_t)rdr1();
2529 }
2530 if (bp & 0x04) {
2531 addr[nbp++] = (caddr_t)rdr2();
2532 }
2533 if (bp & 0x08) {
2534 addr[nbp++] = (caddr_t)rdr3();
2535 }
2536
2537 for (i=0; i<nbp; i++) {
2538 if (addr[i] <
88181b08 2539 (caddr_t)VM_MAX_USER_ADDRESS) {
984263bc
MD
2540 /*
2541 * addr[i] is in user space
2542 */
2543 return nbp;
2544 }
2545 }
2546
2547 /*
2548 * None of the breakpoints are in user space.
2549 */
2550 return 0;
2551}
2552
2553
2554#ifndef DDB
2555void
2556Debugger(const char *msg)
2557{
26be20a0 2558 kprintf("Debugger(\"%s\") called.\n", msg);
984263bc
MD
2559}
2560#endif /* no DDB */
2561
984263bc
MD
2562#ifdef DDB
2563
2564/*
2565 * Provide inb() and outb() as functions. They are normally only
2566 * available as macros calling inlined functions, thus cannot be
2567 * called inside DDB.
2568 *
2569 * The actual code is stolen from <machine/cpufunc.h>, and de-inlined.
2570 */
2571
2572#undef inb
2573#undef outb
2574
2575/* silence compiler warnings */
2576u_char inb(u_int);
2577void outb(u_int, u_char);
2578
2579u_char
2580inb(u_int port)
2581{
2582 u_char data;
2583 /*
2584 * We use %%dx and not %1 here because i/o is done at %dx and not at
2585 * %edx, while gcc generates inferior code (movw instead of movl)
2586 * if we tell it to load (u_short) port.
2587 */
2588 __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port));
2589 return (data);
2590}
2591
2592void
2593outb(u_int port, u_char data)
2594{
2595 u_char al;
2596 /*
2597 * Use an unnecessary assignment to help gcc's register allocator.
2598 * This make a large difference for gcc-1.40 and a tiny difference
2599 * for gcc-2.6.0. For gcc-1.40, al had to be ``asm("ax")'' for
2600 * best results. gcc-2.6.0 can't handle this.
2601 */
2602 al = data;
2603 __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port));
2604}
2605
2606#endif /* DDB */
8a8d5d85
MD
2607
2608
2609
2610#include "opt_cpu.h"
8a8d5d85
MD
2611
2612
2613/*
2614 * initialize all the SMP locks
2615 */
2616
97359a5b 2617/* critical region when masking or unmasking interupts */
b1af91cb 2618struct spinlock_deprecated imen_spinlock;
8a8d5d85 2619
8a8d5d85 2620/* critical region for old style disable_intr/enable_intr */
b1af91cb 2621struct spinlock_deprecated mpintr_spinlock;
8a8d5d85
MD
2622
2623/* critical region around INTR() routines */
b1af91cb 2624struct spinlock_deprecated intr_spinlock;
8a8d5d85
MD
2625
2626/* lock region used by kernel profiling */
b1af91cb 2627struct spinlock_deprecated mcount_spinlock;
8a8d5d85
MD
2628
2629/* locks com (tty) data/hardware accesses: a FASTINTR() */
b1af91cb 2630struct spinlock_deprecated com_spinlock;
8a8d5d85 2631
8a8d5d85 2632/* lock regions around the clock hardware */
b1af91cb 2633struct spinlock_deprecated clock_spinlock;
8a8d5d85
MD
2634
2635/* lock around the MP rendezvous */
b1af91cb 2636struct spinlock_deprecated smp_rv_spinlock;
8a8d5d85
MD
2637
2638static void
2639init_locks(void)
2640{
b5d16701 2641#ifdef SMP
8a8d5d85 2642 /*
b5d16701 2643 * Get the initial mplock with a count of 1 for the BSP.
8a8d5d85
MD
2644 * This uses a LOGICAL cpu ID, ie BSP == 0.
2645 */
8a8d5d85
MD
2646 cpu_get_initial_mplock();
2647#endif
41a01a4d 2648 /* DEPRECATED */
8a8d5d85 2649 spin_lock_init(&mcount_spinlock);
8a8d5d85
MD
2650 spin_lock_init(&intr_spinlock);
2651 spin_lock_init(&mpintr_spinlock);
2652 spin_lock_init(&imen_spinlock);
2653 spin_lock_init(&smp_rv_spinlock);
2654 spin_lock_init(&com_spinlock);
2655 spin_lock_init(&clock_spinlock);
41a01a4d
MD
2656
2657 /* our token pool needs to work early */
2658 lwkt_token_pool_init();
8a8d5d85 2659}