kernel tree reorganization stage 1: Major cvs repository work (not logged as
[dragonfly.git] / sys / i386 / i386 / locore.s
CommitLineData
984263bc
MD
1/*-
2 * Copyright (c) 1990 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * William Jolitz.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * from: @(#)locore.s 7.3 (Berkeley) 5/13/91
37 * $FreeBSD: src/sys/i386/i386/locore.s,v 1.132.2.10 2003/02/03 20:54:49 jhb Exp $
4b938842 38 * $DragonFly: src/sys/i386/i386/Attic/locore.s,v 1.7 2003/07/31 19:56:59 dillon Exp $
984263bc
MD
39 *
40 * originally from: locore.s, by William F. Jolitz
41 *
42 * Substantially rewritten by David Greenman, Rod Grimes,
43 * Bruce Evans, Wolfgang Solfrank, Poul-Henning Kamp
44 * and many others.
45 */
46
47#include "opt_bootp.h"
48#include "opt_nfsroot.h"
49
50#include <sys/syscall.h>
51#include <sys/reboot.h>
52
53#include <machine/asmacros.h>
54#include <machine/cputypes.h>
55#include <machine/psl.h>
56#include <machine/pmap.h>
57#include <machine/specialreg.h>
58
59#include "assym.s"
60
61/*
62 * XXX
63 *
64 * Note: This version greatly munged to avoid various assembler errors
65 * that may be fixed in newer versions of gas. Perhaps newer versions
66 * will have more pleasant appearance.
67 */
68
69/*
70 * PTmap is recursive pagemap at top of virtual address space.
71 * Within PTmap, the page directory can be found (third indirection).
72 */
2954c92f
MD
73 .globl PTmap,PTD,PTDpde
74 .set PTmap,(PTDPTDI << PDRSHIFT)
75 .set PTD,PTmap + (PTDPTDI * PAGE_SIZE)
76 .set PTDpde,PTD + (PTDPTDI * PDESIZE)
984263bc
MD
77
78/*
79 * APTmap, APTD is the alternate recursive pagemap.
80 * It's used when modifying another process's page tables.
81 */
2954c92f
MD
82 .globl APTmap,APTD,APTDpde
83 .set APTmap,APTDPTDI << PDRSHIFT
84 .set APTD,APTmap + (APTDPTDI * PAGE_SIZE)
85 .set APTDpde,PTD + (APTDPTDI * PDESIZE)
984263bc
MD
86
87/*
88 * Compiled KERNBASE location
89 */
2954c92f
MD
90 .globl kernbase
91 .set kernbase,KERNBASE
984263bc
MD
92
93/*
94 * Globals
95 */
96 .data
97 ALIGN_DATA /* just to be sure */
98
99 .globl HIDENAME(tmpstk)
100 .space 0x2000 /* space for tmpstk - temporary stack */
101HIDENAME(tmpstk):
102
2954c92f 103 .globl boothowto,bootdev
984263bc 104
2954c92f
MD
105 .globl cpu,cpu_vendor,cpu_id,bootinfo
106 .globl cpu_high, cpu_feature, cpu_procinfo
984263bc 107
2954c92f
MD
108cpu: .long 0 /* are we 386, 386sx, or 486 */
109cpu_id: .long 0 /* stepping ID */
110cpu_high: .long 0 /* highest arg to CPUID */
111cpu_feature: .long 0 /* features */
112cpu_procinfo: .long 0 /* brand index / HTT info */
113cpu_vendor: .space 20 /* CPU origin code */
114bootinfo: .space BOOTINFO_SIZE /* bootinfo buffer space */
984263bc 115
2954c92f 116KERNend: .long 0 /* phys addr end of kernel (just after bss) */
984263bc
MD
117physfree: .long 0 /* phys addr of next free page */
118
2954c92f 119 .globl cpu0prvpage
984263bc 120cpu0pp: .long 0 /* phys addr cpu0 private pg */
2954c92f 121cpu0prvpage: .long 0 /* relocated version */
17a9f566 122cpu0idlestk: .long 0 /* stack for the idle thread */
984263bc 123
2954c92f 124 .globl SMPpt
984263bc 125SMPptpa: .long 0 /* phys addr SMP page table */
2954c92f 126SMPpt: .long 0 /* relocated version */
984263bc 127
2954c92f
MD
128 .globl IdlePTD
129IdlePTD: .long 0 /* phys addr of kernel PTD */
984263bc 130
2954c92f
MD
131 .globl KPTphys
132KPTphys: .long 0 /* PA of kernel page tables */
984263bc 133
2954c92f
MD
134 .globl proc0paddr
135proc0paddr: .long 0 /* VA of proc 0 address space */
136p0upa: .long 0 /* PA of proc0's UPAGES */
984263bc
MD
137
138vm86phystk: .long 0 /* PA of vm86/bios stack */
139
2954c92f
MD
140 .globl vm86paddr, vm86pa
141vm86paddr: .long 0 /* address of vm86 region */
142vm86pa: .long 0 /* phys addr of vm86 region */
984263bc
MD
143
144#ifdef BDE_DEBUGGER
2954c92f
MD
145 .globl bdb_exists /* BDE debugger is present */
146bdb_exists: .long 0
984263bc
MD
147#endif
148
149#ifdef PC98
2954c92f
MD
150 .globl pc98_system_parameter
151pc98_system_parameter:
984263bc
MD
152 .space 0x240
153#endif
154
155/**********************************************************************
156 *
157 * Some handy macros
158 *
159 */
160
161#define R(foo) ((foo)-KERNBASE)
162
17a9f566
MD
163#define ALLOCPAGES(foo) \
164 movl R(physfree), %esi ; \
165 movl $((foo)*PAGE_SIZE), %eax ; \
166 addl %esi, %eax ; \
167 movl %eax, R(physfree) ; \
168 movl %esi, %edi ; \
169 movl $((foo)*PAGE_SIZE),%ecx ; \
170 xorl %eax,%eax ; \
171 cld ; \
172 rep ; \
984263bc
MD
173 stosb
174
175/*
176 * fillkpt
177 * eax = page frame address
178 * ebx = index into page table
179 * ecx = how many pages to map
180 * base = base address of page dir/table
181 * prot = protection bits
182 */
183#define fillkpt(base, prot) \
184 shll $2,%ebx ; \
185 addl base,%ebx ; \
186 orl $PG_V,%eax ; \
187 orl prot,%eax ; \
1881: movl %eax,(%ebx) ; \
189 addl $PAGE_SIZE,%eax ; /* increment physical address */ \
190 addl $4,%ebx ; /* next pte */ \
191 loop 1b
192
193/*
194 * fillkptphys(prot)
195 * eax = physical address
196 * ecx = how many pages to map
197 * prot = protection bits
198 */
199#define fillkptphys(prot) \
200 movl %eax, %ebx ; \
201 shrl $PAGE_SHIFT, %ebx ; \
2954c92f 202 fillkpt(R(KPTphys), prot)
984263bc
MD
203
204 .text
205/**********************************************************************
206 *
207 * This is where the bootblocks start us, set the ball rolling...
208 *
209 */
210NON_GPROF_ENTRY(btext)
211
212#ifdef PC98
213 /* save SYSTEM PARAMETER for resume (NS/T or other) */
214 movl $0xa1400,%esi
2954c92f 215 movl $R(pc98_system_parameter),%edi
984263bc
MD
216 movl $0x0240,%ecx
217 cld
218 rep
219 movsb
220#else /* IBM-PC */
221#ifdef BDE_DEBUGGER
222#ifdef BIOS_STEALS_3K
223 cmpl $0x0375c339,0x95504
224#else
225 cmpl $0x0375c339,0x96104 /* XXX - debugger signature */
226#endif
227 jne 1f
2954c92f 228 movb $1,R(bdb_exists)
984263bc
MD
2291:
230#endif
231/* Tell the bios to warmboot next time */
232 movw $0x1234,0x472
233#endif /* PC98 */
234
235/* Set up a real frame in case the double return in newboot is executed. */
236 pushl %ebp
237 movl %esp, %ebp
238
239/* Don't trust what the BIOS gives for eflags. */
240 pushl $PSL_KERNEL
241 popfl
242
243/*
244 * Don't trust what the BIOS gives for %fs and %gs. Trust the bootstrap
245 * to set %cs, %ds, %es and %ss.
246 */
247 mov %ds, %ax
248 mov %ax, %fs
249 mov %ax, %gs
250
4b938842
MD
251/*
252 * Clear the bss. Not all boot programs do it, and it is our job anyway.
253 *
254 * XXX we don't check that there is memory for our bss and page tables
255 * before using it.
256 *
257 * Note: we must be careful to not overwrite an active gdt or idt. They
258 * inactive from now until we switch to new ones, since we don't load any
259 * more segment registers or permit interrupts until after the switch.
260 */
261 movl $R(_end),%ecx
262 movl $R(_edata),%edi
263 subl %edi,%ecx
264 xorl %eax,%eax
265 cld
266 rep
267 stosb
268
984263bc
MD
269 call recover_bootinfo
270
271/* Get onto a stack that we can trust. */
272/*
273 * XXX this step is delayed in case recover_bootinfo needs to return via
274 * the old stack, but it need not be, since recover_bootinfo actually
275 * returns via the old frame.
276 */
277 movl $R(HIDENAME(tmpstk)),%esp
278
279#ifdef PC98
280 /* pc98_machine_type & M_EPSON_PC98 */
281 testb $0x02,R(_pc98_system_parameter)+220
282 jz 3f
283 /* epson_machine_id <= 0x0b */
284 cmpb $0x0b,R(_pc98_system_parameter)+224
285 ja 3f
286
287 /* count up memory */
288 movl $0x100000,%eax /* next, talley remaining memory */
289 movl $0xFFF-0x100,%ecx
2901: movl 0(%eax),%ebx /* save location to check */
291 movl $0xa55a5aa5,0(%eax) /* write test pattern */
292 cmpl $0xa55a5aa5,0(%eax) /* does not check yet for rollover */
293 jne 2f
294 movl %ebx,0(%eax) /* restore memory */
295 addl $PAGE_SIZE,%eax
296 loop 1b
2972: subl $0x100000,%eax
298 shrl $17,%eax
299 movb %al,R(_pc98_system_parameter)+1
3003:
301
302 movw R(_pc98_system_parameter+0x86),%ax
2954c92f 303 movw %ax,R(cpu_id)
984263bc
MD
304#endif
305
306 call identify_cpu
307
984263bc
MD
308 call create_pagetables
309
310/*
311 * If the CPU has support for VME, turn it on.
312 */
2954c92f 313 testl $CPUID_VME, R(cpu_feature)
984263bc
MD
314 jz 1f
315 movl %cr4, %eax
316 orl $CR4_VME, %eax
317 movl %eax, %cr4
3181:
319
320#ifdef BDE_DEBUGGER
321/*
322 * Adjust as much as possible for paging before enabling paging so that the
323 * adjustments can be traced.
324 */
325 call bdb_prepare_paging
326#endif
327
328/* Now enable paging */
2954c92f 329 movl R(IdlePTD), %eax
984263bc
MD
330 movl %eax,%cr3 /* load ptd addr into mmu */
331 movl %cr0,%eax /* get control word */
332 orl $CR0_PE|CR0_PG,%eax /* enable paging */
333 movl %eax,%cr0 /* and let's page NOW! */
334
17a9f566 335
984263bc
MD
336#ifdef BDE_DEBUGGER
337/*
338 * Complete the adjustments for paging so that we can keep tracing through
339 * initi386() after the low (physical) addresses for the gdt and idt become
340 * invalid.
341 */
342 call bdb_commit_paging
343#endif
344
345 pushl $begin /* jump to high virtualized address */
346 ret
347
348/* now running relocated at KERNBASE where the system is linked to run */
349begin:
17a9f566 350
b7c628e4
MD
351 /*
352 * set up the bootstrap stack. The pcb sits at the end of the
353 * bootstrap stack.
354 */
984263bc 355 /* set up bootstrap stack */
2954c92f 356 movl proc0paddr,%esp /* location of in-kernel pages */
b7c628e4
MD
357 addl $UPAGES*PAGE_SIZE-PCB_SIZE,%esp
358 xorl %eax,%eax /* mark end of frames */
984263bc 359 movl %eax,%ebp
2954c92f
MD
360 /*movl proc0paddr,%eax*/
361 movl IdlePTD, %esi
b7c628e4 362 movl %esi,PCB_CR3(%esp)
984263bc 363
2954c92f 364 testl $CPUID_PGE, R(cpu_feature)
984263bc
MD
365 jz 1f
366 movl %cr4, %eax
367 orl $CR4_PGE, %eax
368 movl %eax, %cr4
3691:
370
371 movl physfree, %esi
2954c92f 372 pushl %esi /* value of first for init386(first) */
17a9f566 373
2954c92f 374 call init386 /* wire 386 chip for unix operation */
984263bc
MD
375 popl %esi
376
2954c92f 377 call mi_startup /* autoconfiguration, mountroot etc */
984263bc
MD
378
379 hlt /* never returns to here */
380
381/*
382 * Signal trampoline, copied to top of user stack
383 */
384NON_GPROF_ENTRY(sigcode)
385 call *SIGF_HANDLER(%esp) /* call signal handler */
386 lea SIGF_UC(%esp),%eax /* get ucontext_t */
387 pushl %eax
388 testl $PSL_VM,UC_EFLAGS(%eax)
389 jne 9f
390 movl UC_GS(%eax),%gs /* restore %gs */
3919:
392 movl $SYS_sigreturn,%eax
393 pushl %eax /* junk to fake return addr. */
394 int $0x80 /* enter kernel with args */
3950: jmp 0b
396
397 ALIGN_TEXT
2954c92f 398osigcode:
984263bc
MD
399 call *SIGF_HANDLER(%esp) /* call signal handler */
400 lea SIGF_SC(%esp),%eax /* get sigcontext */
401 pushl %eax
402 testl $PSL_VM,SC_PS(%eax)
403 jne 9f
404 movl SC_GS(%eax),%gs /* restore %gs */
4059:
406 movl $0x01d516,SC_TRAPNO(%eax) /* magic: 0ldSiG */
407 movl $SYS_sigreturn,%eax
408 pushl %eax /* junk to fake return addr. */
409 int $0x80 /* enter kernel with args */
4100: jmp 0b
411
412 ALIGN_TEXT
2954c92f 413esigcode:
984263bc
MD
414
415 .data
2954c92f
MD
416 .globl szsigcode, szosigcode
417szsigcode:
418 .long esigcode - sigcode
419szosigcode:
420 .long esigcode - osigcode
984263bc
MD
421 .text
422
423/**********************************************************************
424 *
425 * Recover the bootinfo passed to us from the boot program
426 *
427 */
428recover_bootinfo:
429 /*
430 * This code is called in different ways depending on what loaded
431 * and started the kernel. This is used to detect how we get the
432 * arguments from the other code and what we do with them.
433 *
434 * Old disk boot blocks:
435 * (*btext)(howto, bootdev, cyloffset, esym);
436 * [return address == 0, and can NOT be returned to]
437 * [cyloffset was not supported by the FreeBSD boot code
438 * and always passed in as 0]
439 * [esym is also known as total in the boot code, and
440 * was never properly supported by the FreeBSD boot code]
441 *
442 * Old diskless netboot code:
443 * (*btext)(0,0,0,0,&nfsdiskless,0,0,0);
444 * [return address != 0, and can NOT be returned to]
445 * If we are being booted by this code it will NOT work,
446 * so we are just going to halt if we find this case.
447 *
448 * New uniform boot code:
449 * (*btext)(howto, bootdev, 0, 0, 0, &bootinfo)
450 * [return address != 0, and can be returned to]
451 *
452 * There may seem to be a lot of wasted arguments in here, but
453 * that is so the newer boot code can still load very old kernels
454 * and old boot code can load new kernels.
455 */
456
457 /*
458 * The old style disk boot blocks fake a frame on the stack and
459 * did an lret to get here. The frame on the stack has a return
460 * address of 0.
461 */
462 cmpl $0,4(%ebp)
463 je olddiskboot
464
465 /*
466 * We have some form of return address, so this is either the
467 * old diskless netboot code, or the new uniform code. That can
468 * be detected by looking at the 5th argument, if it is 0
469 * we are being booted by the new uniform boot code.
470 */
471 cmpl $0,24(%ebp)
472 je newboot
473
474 /*
475 * Seems we have been loaded by the old diskless boot code, we
476 * don't stand a chance of running as the diskless structure
477 * changed considerably between the two, so just halt.
478 */
479 hlt
480
481 /*
482 * We have been loaded by the new uniform boot code.
483 * Let's check the bootinfo version, and if we do not understand
484 * it we return to the loader with a status of 1 to indicate this error
485 */
486newboot:
487 movl 28(%ebp),%ebx /* &bootinfo.version */
488 movl BI_VERSION(%ebx),%eax
489 cmpl $1,%eax /* We only understand version 1 */
490 je 1f
491 movl $1,%eax /* Return status */
492 leave
493 /*
494 * XXX this returns to our caller's caller (as is required) since
495 * we didn't set up a frame and our caller did.
496 */
497 ret
498
4991:
500 /*
501 * If we have a kernelname copy it in
502 */
503 movl BI_KERNELNAME(%ebx),%esi
504 cmpl $0,%esi
505 je 2f /* No kernelname */
506 movl $MAXPATHLEN,%ecx /* Brute force!!! */
2954c92f 507 movl $R(kernelname),%edi
984263bc
MD
508 cmpb $'/',(%esi) /* Make sure it starts with a slash */
509 je 1f
510 movb $'/',(%edi)
511 incl %edi
512 decl %ecx
5131:
514 cld
515 rep
516 movsb
517
5182:
519 /*
520 * Determine the size of the boot loader's copy of the bootinfo
521 * struct. This is impossible to do properly because old versions
522 * of the struct don't contain a size field and there are 2 old
523 * versions with the same version number.
524 */
525 movl $BI_ENDCOMMON,%ecx /* prepare for sizeless version */
526 testl $RB_BOOTINFO,8(%ebp) /* bi_size (and bootinfo) valid? */
527 je got_bi_size /* no, sizeless version */
528 movl BI_SIZE(%ebx),%ecx
529got_bi_size:
530
531 /*
532 * Copy the common part of the bootinfo struct
533 */
534 movl %ebx,%esi
2954c92f 535 movl $R(bootinfo),%edi
984263bc
MD
536 cmpl $BOOTINFO_SIZE,%ecx
537 jbe got_common_bi_size
538 movl $BOOTINFO_SIZE,%ecx
539got_common_bi_size:
540 cld
541 rep
542 movsb
543
544#ifdef NFS_ROOT
545#ifndef BOOTP_NFSV3
546 /*
547 * If we have a nfs_diskless structure copy it in
548 */
549 movl BI_NFS_DISKLESS(%ebx),%esi
550 cmpl $0,%esi
551 je olddiskboot
2954c92f 552 movl $R(nfs_diskless),%edi
984263bc
MD
553 movl $NFSDISKLESS_SIZE,%ecx
554 cld
555 rep
556 movsb
2954c92f 557 movl $R(nfs_diskless_valid),%edi
984263bc
MD
558 movl $1,(%edi)
559#endif
560#endif
561
562 /*
563 * The old style disk boot.
564 * (*btext)(howto, bootdev, cyloffset, esym);
565 * Note that the newer boot code just falls into here to pick
566 * up howto and bootdev, cyloffset and esym are no longer used
567 */
568olddiskboot:
569 movl 8(%ebp),%eax
2954c92f 570 movl %eax,R(boothowto)
984263bc 571 movl 12(%ebp),%eax
2954c92f 572 movl %eax,R(bootdev)
984263bc
MD
573
574 ret
575
576
577/**********************************************************************
578 *
579 * Identify the CPU and initialize anything special about it
580 *
581 */
582identify_cpu:
583
584 /* Try to toggle alignment check flag; does not exist on 386. */
585 pushfl
586 popl %eax
587 movl %eax,%ecx
588 orl $PSL_AC,%eax
589 pushl %eax
590 popfl
591 pushfl
592 popl %eax
593 xorl %ecx,%eax
594 andl $PSL_AC,%eax
595 pushl %ecx
596 popfl
597
598 testl %eax,%eax
599 jnz try486
600
601 /* NexGen CPU does not have aligment check flag. */
602 pushfl
603 movl $0x5555, %eax
604 xorl %edx, %edx
605 movl $2, %ecx
606 clc
607 divl %ecx
608 jz trynexgen
609 popfl
2954c92f 610 movl $CPU_386,R(cpu)
984263bc
MD
611 jmp 3f
612
613trynexgen:
614 popfl
2954c92f
MD
615 movl $CPU_NX586,R(cpu)
616 movl $0x4778654e,R(cpu_vendor) # store vendor string
617 movl $0x72446e65,R(cpu_vendor+4)
618 movl $0x6e657669,R(cpu_vendor+8)
619 movl $0,R(cpu_vendor+12)
984263bc
MD
620 jmp 3f
621
622try486: /* Try to toggle identification flag; does not exist on early 486s. */
623 pushfl
624 popl %eax
625 movl %eax,%ecx
626 xorl $PSL_ID,%eax
627 pushl %eax
628 popfl
629 pushfl
630 popl %eax
631 xorl %ecx,%eax
632 andl $PSL_ID,%eax
633 pushl %ecx
634 popfl
635
636 testl %eax,%eax
637 jnz trycpuid
2954c92f 638 movl $CPU_486,R(cpu)
984263bc
MD
639
640 /*
641 * Check Cyrix CPU
642 * Cyrix CPUs do not change the undefined flags following
643 * execution of the divide instruction which divides 5 by 2.
644 *
645 * Note: CPUID is enabled on M2, so it passes another way.
646 */
647 pushfl
648 movl $0x5555, %eax
649 xorl %edx, %edx
650 movl $2, %ecx
651 clc
652 divl %ecx
653 jnc trycyrix
654 popfl
655 jmp 3f /* You may use Intel CPU. */
656
657trycyrix:
658 popfl
659 /*
660 * IBM Bluelighting CPU also doesn't change the undefined flags.
661 * Because IBM doesn't disclose the information for Bluelighting
662 * CPU, we couldn't distinguish it from Cyrix's (including IBM
663 * brand of Cyrix CPUs).
664 */
2954c92f
MD
665 movl $0x69727943,R(cpu_vendor) # store vendor string
666 movl $0x736e4978,R(cpu_vendor+4)
667 movl $0x64616574,R(cpu_vendor+8)
984263bc
MD
668 jmp 3f
669
670trycpuid: /* Use the `cpuid' instruction. */
671 xorl %eax,%eax
672 cpuid # cpuid 0
2954c92f
MD
673 movl %eax,R(cpu_high) # highest capability
674 movl %ebx,R(cpu_vendor) # store vendor string
675 movl %edx,R(cpu_vendor+4)
676 movl %ecx,R(cpu_vendor+8)
677 movb $0,R(cpu_vendor+12)
984263bc
MD
678
679 movl $1,%eax
680 cpuid # cpuid 1
2954c92f
MD
681 movl %eax,R(cpu_id) # store cpu_id
682 movl %ebx,R(cpu_procinfo) # store cpu_procinfo
683 movl %edx,R(cpu_feature) # store cpu_feature
984263bc
MD
684 rorl $8,%eax # extract family type
685 andl $15,%eax
686 cmpl $5,%eax
687 jae 1f
688
689 /* less than Pentium; must be 486 */
2954c92f 690 movl $CPU_486,R(cpu)
984263bc
MD
691 jmp 3f
6921:
693 /* a Pentium? */
694 cmpl $5,%eax
695 jne 2f
2954c92f 696 movl $CPU_586,R(cpu)
984263bc
MD
697 jmp 3f
6982:
699 /* Greater than Pentium...call it a Pentium Pro */
2954c92f 700 movl $CPU_686,R(cpu)
984263bc
MD
7013:
702 ret
703
704
705/**********************************************************************
706 *
707 * Create the first page directory and its page tables.
708 *
709 */
710
711create_pagetables:
712
713/* Find end of kernel image (rounded up to a page boundary). */
2954c92f 714 movl $R(end),%esi
984263bc
MD
715
716/* Include symbols, if any. */
2954c92f 717 movl R(bootinfo+BI_ESYMTAB),%edi
984263bc
MD
718 testl %edi,%edi
719 je over_symalloc
720 movl %edi,%esi
721 movl $KERNBASE,%edi
2954c92f
MD
722 addl %edi,R(bootinfo+BI_SYMTAB)
723 addl %edi,R(bootinfo+BI_ESYMTAB)
984263bc
MD
724over_symalloc:
725
726/* If we are told where the end of the kernel space is, believe it. */
2954c92f 727 movl R(bootinfo+BI_KERNEND),%edi
984263bc
MD
728 testl %edi,%edi
729 je no_kernend
730 movl %edi,%esi
731no_kernend:
732
733 addl $PAGE_MASK,%esi
734 andl $~PAGE_MASK,%esi
2954c92f 735 movl %esi,R(KERNend) /* save end of kernel */
984263bc
MD
736 movl %esi,R(physfree) /* next free page is at end of kernel */
737
17a9f566 738
984263bc
MD
739/* Allocate Kernel Page Tables */
740 ALLOCPAGES(NKPT)
2954c92f 741 movl %esi,R(KPTphys)
984263bc
MD
742
743/* Allocate Page Table Directory */
744 ALLOCPAGES(1)
2954c92f 745 movl %esi,R(IdlePTD)
984263bc
MD
746
747/* Allocate UPAGES */
748 ALLOCPAGES(UPAGES)
749 movl %esi,R(p0upa)
750 addl $KERNBASE, %esi
2954c92f 751 movl %esi, R(proc0paddr)
984263bc
MD
752
753 ALLOCPAGES(1) /* vm86/bios stack */
754 movl %esi,R(vm86phystk)
755
756 ALLOCPAGES(3) /* pgtable + ext + IOPAGES */
2954c92f 757 movl %esi,R(vm86pa)
984263bc 758 addl $KERNBASE, %esi
2954c92f 759 movl %esi, R(vm86paddr)
984263bc 760
984263bc
MD
761/* Allocate cpu0's private data page */
762 ALLOCPAGES(1)
763 movl %esi,R(cpu0pp)
764 addl $KERNBASE, %esi
2954c92f 765 movl %esi, R(cpu0prvpage) /* relocated to KVM space */
984263bc 766
17a9f566
MD
767/* Allocate cpu0's idle stack */
768 ALLOCPAGES(UPAGES)
769 movl %esi,R(cpu0idlestk)
770
984263bc
MD
771/* Allocate SMP page table page */
772 ALLOCPAGES(1)
773 movl %esi,R(SMPptpa)
774 addl $KERNBASE, %esi
2954c92f 775 movl %esi, R(SMPpt) /* relocated to KVM space */
984263bc
MD
776
777/* Map read-only from zero to the end of the kernel text section */
778 xorl %eax, %eax
779#ifdef BDE_DEBUGGER
780/* If the debugger is present, actually map everything read-write. */
2954c92f 781 cmpl $0,R(bdb_exists)
984263bc
MD
782 jne map_read_write
783#endif
784 xorl %edx,%edx
785
786#if !defined(SMP)
2954c92f 787 testl $CPUID_PGE, R(cpu_feature)
984263bc
MD
788 jz 2f
789 orl $PG_G,%edx
790#endif
791
2954c92f 7922: movl $R(etext),%ecx
984263bc
MD
793 addl $PAGE_MASK,%ecx
794 shrl $PAGE_SHIFT,%ecx
795 fillkptphys(%edx)
796
797/* Map read-write, data, bss and symbols */
2954c92f 798 movl $R(etext),%eax
984263bc
MD
799 addl $PAGE_MASK, %eax
800 andl $~PAGE_MASK, %eax
801map_read_write:
802 movl $PG_RW,%edx
803#if !defined(SMP)
2954c92f 804 testl $CPUID_PGE, R(cpu_feature)
984263bc
MD
805 jz 1f
806 orl $PG_G,%edx
807#endif
808
2954c92f 8091: movl R(KERNend),%ecx
984263bc
MD
810 subl %eax,%ecx
811 shrl $PAGE_SHIFT,%ecx
812 fillkptphys(%edx)
813
814/* Map page directory. */
2954c92f 815 movl R(IdlePTD), %eax
984263bc
MD
816 movl $1, %ecx
817 fillkptphys($PG_RW)
818
819/* Map proc0's UPAGES in the physical way ... */
820 movl R(p0upa), %eax
821 movl $UPAGES, %ecx
822 fillkptphys($PG_RW)
823
824/* Map ISA hole */
825 movl $ISA_HOLE_START, %eax
826 movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
827 fillkptphys($PG_RW)
828
829/* Map space for the vm86 region */
830 movl R(vm86phystk), %eax
831 movl $4, %ecx
832 fillkptphys($PG_RW)
833
834/* Map page 0 into the vm86 page table */
835 movl $0, %eax
836 movl $0, %ebx
837 movl $1, %ecx
2954c92f 838 fillkpt(R(vm86pa), $PG_RW|PG_U)
984263bc
MD
839
840/* ...likewise for the ISA hole */
841 movl $ISA_HOLE_START, %eax
842 movl $ISA_HOLE_START>>PAGE_SHIFT, %ebx
843 movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
2954c92f 844 fillkpt(R(vm86pa), $PG_RW|PG_U)
984263bc 845
984263bc
MD
846/* Map cpu0's private page into global kmem (4K @ cpu0prvpage) */
847 movl R(cpu0pp), %eax
848 movl $1, %ecx
849 fillkptphys($PG_RW)
850
851/* Map SMP page table page into global kmem FWIW */
852 movl R(SMPptpa), %eax
853 movl $1, %ecx
854 fillkptphys($PG_RW)
855
856/* Map the private page into the SMP page table */
857 movl R(cpu0pp), %eax
858 movl $0, %ebx /* pte offset = 0 */
859 movl $1, %ecx /* one private page coming right up */
860 fillkpt(R(SMPptpa), $PG_RW)
861
17a9f566
MD
862/* Map the cpu0's idle thread stack */
863 movl R(cpu0idlestk), %eax
864 movl $PS_IDLESTACK_PAGE, %ebx
865 movl $UPAGES, %ecx
866 fillkpt(R(SMPptpa), $PG_RW)
867
984263bc
MD
868/* ... and put the page table table in the pde. */
869 movl R(SMPptpa), %eax
870 movl $MPPTDI, %ebx
871 movl $1, %ecx
2954c92f 872 fillkpt(R(IdlePTD), $PG_RW)
984263bc
MD
873
874/* Fakeup VA for the local apic to allow early traps. */
875 ALLOCPAGES(1)
876 movl %esi, %eax
877 movl $(NPTEPG-1), %ebx /* pte offset = NTEPG-1 */
878 movl $1, %ecx /* one private pt coming right up */
879 fillkpt(R(SMPptpa), $PG_RW)
880
17a9f566 881#ifdef SMP
984263bc 882/* Initialize mp lock to allow early traps */
96728c05 883 movl $0, R(mp_lock)
984263bc
MD
884#endif /* SMP */
885
886/* install a pde for temporary double map of bottom of VA */
2954c92f 887 movl R(KPTphys), %eax
984263bc
MD
888 xorl %ebx, %ebx
889 movl $NKPT, %ecx
2954c92f 890 fillkpt(R(IdlePTD), $PG_RW)
984263bc
MD
891
892/* install pde's for pt's */
2954c92f 893 movl R(KPTphys), %eax
984263bc
MD
894 movl $KPTDI, %ebx
895 movl $NKPT, %ecx
2954c92f 896 fillkpt(R(IdlePTD), $PG_RW)
984263bc
MD
897
898/* install a pde recursively mapping page directory as a page table */
2954c92f 899 movl R(IdlePTD), %eax
984263bc
MD
900 movl $PTDPTDI, %ebx
901 movl $1,%ecx
2954c92f 902 fillkpt(R(IdlePTD), $PG_RW)
984263bc
MD
903
904 ret
905
906#ifdef BDE_DEBUGGER
907bdb_prepare_paging:
2954c92f 908 cmpl $0,R(bdb_exists)
984263bc
MD
909 je bdb_prepare_paging_exit
910
911 subl $6,%esp
912
913 /*
914 * Copy and convert debugger entries from the bootstrap gdt and idt
915 * to the kernel gdt and idt. Everything is still in low memory.
916 * Tracing continues to work after paging is enabled because the
917 * low memory addresses remain valid until everything is relocated.
918 * However, tracing through the setidt() that initializes the trace
919 * trap will crash.
920 */
921 sgdt (%esp)
922 movl 2(%esp),%esi /* base address of bootstrap gdt */
2954c92f 923 movl $R(gdt),%edi
984263bc
MD
924 movl %edi,2(%esp) /* prepare to load kernel gdt */
925 movl $8*18/4,%ecx
926 cld
927 rep /* copy gdt */
928 movsl
2954c92f 929 movl $R(gdt),-8+2(%edi) /* adjust gdt self-ptr */
984263bc
MD
930 movb $0x92,-8+5(%edi)
931 lgdt (%esp)
932
933 sidt (%esp)
934 movl 2(%esp),%esi /* base address of current idt */
935 movl 8+4(%esi),%eax /* convert dbg descriptor to ... */
936 movw 8(%esi),%ax
937 movl %eax,R(bdb_dbg_ljmp+1) /* ... immediate offset ... */
938 movl 8+2(%esi),%eax
939 movw %ax,R(bdb_dbg_ljmp+5) /* ... and selector for ljmp */
940 movl 24+4(%esi),%eax /* same for bpt descriptor */
941 movw 24(%esi),%ax
942 movl %eax,R(bdb_bpt_ljmp+1)
943 movl 24+2(%esi),%eax
944 movw %ax,R(bdb_bpt_ljmp+5)
2954c92f 945 movl R(idt),%edi
984263bc
MD
946 movl %edi,2(%esp) /* prepare to load kernel idt */
947 movl $8*4/4,%ecx
948 cld
949 rep /* copy idt */
950 movsl
951 lidt (%esp)
952
953 addl $6,%esp
954
955bdb_prepare_paging_exit:
956 ret
957
958/* Relocate debugger gdt entries and gdt and idt pointers. */
959bdb_commit_paging:
960 cmpl $0,_bdb_exists
961 je bdb_commit_paging_exit
962
963 movl $_gdt+8*9,%eax /* adjust slots 9-17 */
964 movl $9,%ecx
965reloc_gdt:
966 movb $KERNBASE>>24,7(%eax) /* top byte of base addresses, was 0, */
967 addl $8,%eax /* now KERNBASE>>24 */
968 loop reloc_gdt
969
970 subl $6,%esp
971 sgdt (%esp)
972 addl $KERNBASE,2(%esp)
973 lgdt (%esp)
974 sidt (%esp)
975 addl $KERNBASE,2(%esp)
976 lidt (%esp)
977 addl $6,%esp
978
979 int $3
980
981bdb_commit_paging_exit:
982 ret
983
984#endif /* BDE_DEBUGGER */