Remove i386 support.
[dragonfly.git] / sys / platform / pc32 / i386 / support.s
... / ...
CommitLineData
1/*-
2 * Copyright (c) 1993 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * $FreeBSD: src/sys/i386/i386/support.s,v 1.67.2.5 2001/08/15 01:23:50 peter Exp $
34 * $DragonFly: src/sys/platform/pc32/i386/support.s,v 1.20 2007/11/07 17:42:50 dillon Exp $
35 */
36
37#include "use_npx.h"
38
39#include <machine/asmacros.h>
40#include <machine/cputypes.h>
41#include <machine/pmap.h>
42#include <machine/specialreg.h>
43
44#include "assym.s"
45
46#define IDXSHIFT 10
47
48 .data
49
50 .globl memcpy_vector
51memcpy_vector:
52 .long asm_generic_memcpy
53
54 .globl bcopy_vector
55bcopy_vector:
56 .long asm_generic_bcopy
57
58 .globl ovbcopy_vector
59ovbcopy_vector:
60 .long asm_generic_bcopy
61
62 .text
63
64/* fillw(pat, base, cnt) */
65ENTRY(fillw)
66 pushl %edi
67 movl 8(%esp),%eax
68 movl 12(%esp),%edi
69 movl 16(%esp),%ecx
70 cld
71 rep
72 stosw
73 popl %edi
74 ret
75
76/*
77 * void bcopy(const void *s, void *d, size_t count)
78 *
79 * Normal bcopy() vector, an optimized bcopy may be installed in
80 * bcopy_vector.
81 */
82ENTRY(bcopy)
83 pushl %esi
84 pushl %edi
85 movl 4+8(%esp),%esi /* caddr_t from */
86 movl 8+8(%esp),%edi /* caddr_t to */
87 movl 12+8(%esp),%ecx /* size_t len */
88 call *bcopy_vector
89 popl %edi
90 popl %esi
91 ret
92
93/*
94 * Generic (integer-only) bcopy() vector.
95 */
96ENTRY(generic_bcopy)
97 pushl %esi
98 pushl %edi
99 movl 4+8(%esp),%esi /* caddr_t from */
100 movl 8+8(%esp),%edi /* caddr_t to */
101 movl 12+8(%esp),%ecx /* size_t len */
102 call asm_generic_bcopy
103 popl %edi
104 popl %esi
105 ret
106
107ENTRY(ovbcopy)
108 pushl %esi
109 pushl %edi
110 movl 4+8(%esp),%esi /* caddr_t from */
111 movl 8+8(%esp),%edi /* caddr_t to */
112 movl 12+8(%esp),%ecx /* size_t len */
113 call *ovbcopy_vector
114 popl %edi
115 popl %esi
116 ret
117
118/*
119 * void *memcpy(void *d, const void *s, size_t count)
120 *
121 * Note: memcpy does not have to support overlapping copies.
122 *
123 * Note: (d, s) arguments reversed from bcopy, and memcpy() returns d
124 * while bcopy() returns void.
125 */
126ENTRY(memcpy)
127 pushl %esi
128 pushl %edi
129 movl 4+8(%esp),%edi
130 movl 8+8(%esp),%esi
131 movl 12+8(%esp),%ecx
132 call *memcpy_vector
133 movl 4+8(%esp),%eax
134 popl %edi
135 popl %esi
136 ret
137
138/*
139 * A stack-based on-fault routine is used for more complex PCB_ONFAULT
140 * situations (such as memcpy/bcopy/bzero). In this case the on-fault
141 * routine must be pushed on the stack.
142 */
143stack_onfault:
144 ret
145
146/*****************************************************************************/
147/* copyout and fubyte family */
148/*****************************************************************************/
149/*
150 * Access user memory from inside the kernel. These routines and possibly
151 * the math- and DOS emulators should be the only places that do this.
152 *
153 * We have to access the memory with user's permissions, so use a segment
154 * selector with RPL 3. For writes to user space we have to additionally
155 * check the PTE for write permission, because the 386 does not check
156 * write permissions when we are executing with EPL 0. The 486 does check
157 * this if the WP bit is set in CR0, so we can use a simpler version here.
158 *
159 * These routines set curpcb->onfault for the time they execute. When a
160 * protection violation occurs inside the functions, the trap handler
161 * returns to *curpcb->onfault instead of the function.
162 */
163
164/*
165 * copyout(from_kernel, to_user, len) - MP SAFE
166 */
167ENTRY(copyout)
168 movl PCPU(curthread),%eax
169 movl TD_PCB(%eax),%eax
170 pushl %esi
171 pushl %edi
172 pushl %ebx
173 pushl $copyout_fault2
174 movl $stack_onfault,PCB_ONFAULT(%eax)
175 movl 4+16(%esp),%esi
176 movl 8+16(%esp),%edi
177 movl 12+16(%esp),%ebx
178 testl %ebx,%ebx /* anything to do? */
179 jz done_copyout
180
181 /*
182 * Check explicitly for non-user addresses. If 486 write protection
183 * is being used, this check is essential because we are in kernel
184 * mode so the h/w does not provide any protection against writing
185 * kernel addresses.
186 */
187
188 /*
189 * First, prevent address wrapping.
190 */
191 movl %edi,%eax
192 addl %ebx,%eax
193 jc copyout_fault1
194/*
195 * XXX STOP USING VM_MAX_USER_ADDRESS.
196 * It is an end address, not a max, so every time it is used correctly it
197 * looks like there is an off by one error, and of course it caused an off
198 * by one error in several places.
199 */
200 cmpl $VM_MAX_USER_ADDRESS,%eax
201 ja copyout_fault1
202
203 /*
204 * Convert copyout to memcpy_vector(dest:%edi, src:%esi, conut:%ecx)
205 */
206 movl %ebx,%ecx
207 call *memcpy_vector
208
209done_copyout:
210 /*
211 * non-error return
212 */
213 addl $4,%esp
214 movl PCPU(curthread),%edx
215 xorl %eax,%eax
216 movl TD_PCB(%edx),%edx
217 popl %ebx
218 popl %edi
219 popl %esi
220 movl %eax,PCB_ONFAULT(%edx)
221 ret
222
223 ALIGN_TEXT
224copyout_fault1:
225 addl $4,%esp /* skip pushed copyout_fault vector */
226copyout_fault2:
227 popl %ebx
228 popl %edi
229 popl %esi
230 movl PCPU(curthread),%edx
231 movl TD_PCB(%edx),%edx
232 movl $0,PCB_ONFAULT(%edx)
233 movl $EFAULT,%eax
234 ret
235
236/*
237 * copyin(from_user, to_kernel, len) - MP SAFE
238 */
239
240ENTRY(copyin)
241 movl PCPU(curthread),%eax
242 movl TD_PCB(%eax),%eax
243 pushl %esi
244 pushl %edi
245 pushl $copyin_fault2
246 movl $stack_onfault,PCB_ONFAULT(%eax)
247 movl 4+12(%esp),%esi /* caddr_t from */
248 movl 8+12(%esp),%edi /* caddr_t to */
249 movl 12+12(%esp),%ecx /* size_t len */
250
251 /*
252 * make sure address is valid
253 */
254 movl %esi,%edx
255 addl %ecx,%edx
256 jc copyin_fault1
257 cmpl $VM_MAX_USER_ADDRESS,%edx
258 ja copyin_fault1
259
260 /*
261 * Call memcpy(destination:%edi, source:%esi, bytes:%ecx)
262 */
263 call *memcpy_vector
264
265 /*
266 * return 0 (no error)
267 */
268 addl $4,%esp
269 movl PCPU(curthread),%edx
270 xorl %eax,%eax
271 movl TD_PCB(%edx),%edx
272 popl %edi
273 popl %esi
274 movl %eax,PCB_ONFAULT(%edx)
275 ret
276
277 /*
278 * return EFAULT
279 */
280 ALIGN_TEXT
281copyin_fault1:
282 addl $4,%esp /* skip pushed copyin_fault vector */
283copyin_fault2:
284 popl %edi
285 popl %esi
286 movl PCPU(curthread),%edx
287 movl TD_PCB(%edx),%edx
288 movl $0,PCB_ONFAULT(%edx)
289 movl $EFAULT,%eax
290 ret
291
292/*
293 * fu{byte,sword,word} - MP SAFE
294 *
295 * Fetch a byte (sword, word) from user memory
296 */
297ENTRY(fuword)
298 movl PCPU(curthread),%ecx
299 movl TD_PCB(%ecx),%ecx
300 movl $fusufault,PCB_ONFAULT(%ecx)
301 movl 4(%esp),%edx /* from */
302
303 cmpl $VM_MAX_USER_ADDRESS-4,%edx /* verify address is valid */
304 ja fusufault
305
306 movl (%edx),%eax
307 movl $0,PCB_ONFAULT(%ecx)
308 ret
309
310/*
311 * fusword - MP SAFE
312 */
313ENTRY(fusword)
314 movl PCPU(curthread),%ecx
315 movl TD_PCB(%ecx),%ecx
316 movl $fusufault,PCB_ONFAULT(%ecx)
317 movl 4(%esp),%edx
318
319 cmpl $VM_MAX_USER_ADDRESS-2,%edx
320 ja fusufault
321
322 movzwl (%edx),%eax
323 movl $0,PCB_ONFAULT(%ecx)
324 ret
325
326/*
327 * fubyte - MP SAFE
328 */
329ENTRY(fubyte)
330 movl PCPU(curthread),%ecx
331 movl TD_PCB(%ecx),%ecx
332 movl $fusufault,PCB_ONFAULT(%ecx)
333 movl 4(%esp),%edx
334
335 cmpl $VM_MAX_USER_ADDRESS-1,%edx
336 ja fusufault
337
338 movzbl (%edx),%eax
339 movl $0,PCB_ONFAULT(%ecx)
340 ret
341
342 ALIGN_TEXT
343fusufault:
344 movl PCPU(curthread),%ecx
345 movl TD_PCB(%ecx),%ecx
346 xorl %eax,%eax
347 movl %eax,PCB_ONFAULT(%ecx)
348 decl %eax
349 ret
350
351/*
352 * su{byte,sword,word} - MP SAFE
353 *
354 * Write a byte (word, longword) to user memory
355 */
356ENTRY(suword)
357 movl PCPU(curthread),%ecx
358 movl TD_PCB(%ecx),%ecx
359 movl $fusufault,PCB_ONFAULT(%ecx)
360 movl 4(%esp),%edx
361
362 cmpl $VM_MAX_USER_ADDRESS-4,%edx /* verify address validity */
363 ja fusufault
364
365 movl 8(%esp),%eax
366 movl %eax,(%edx)
367 xorl %eax,%eax
368 movl PCPU(curthread),%ecx
369 movl TD_PCB(%ecx),%ecx
370 movl %eax,PCB_ONFAULT(%ecx)
371 ret
372
373/*
374 * susword - MP SAFE
375 */
376ENTRY(susword)
377 movl PCPU(curthread),%ecx
378 movl TD_PCB(%ecx),%ecx
379 movl $fusufault,PCB_ONFAULT(%ecx)
380 movl 4(%esp),%edx
381
382 cmpl $VM_MAX_USER_ADDRESS-2,%edx /* verify address validity */
383 ja fusufault
384
385 movw 8(%esp),%ax
386 movw %ax,(%edx)
387 xorl %eax,%eax
388 movl PCPU(curthread),%ecx /* restore trashed register */
389 movl TD_PCB(%ecx),%ecx
390 movl %eax,PCB_ONFAULT(%ecx)
391 ret
392
393/*
394 * subyte - MP SAFE
395 */
396ENTRY(subyte)
397 movl PCPU(curthread),%ecx
398 movl TD_PCB(%ecx),%ecx
399 movl $fusufault,PCB_ONFAULT(%ecx)
400 movl 4(%esp),%edx
401
402 cmpl $VM_MAX_USER_ADDRESS-1,%edx /* verify address validity */
403 ja fusufault
404
405 movb 8(%esp),%al
406 movb %al,(%edx)
407 xorl %eax,%eax
408 movl PCPU(curthread),%ecx /* restore trashed register */
409 movl TD_PCB(%ecx),%ecx
410 movl %eax,PCB_ONFAULT(%ecx)
411 ret
412
413/*
414 * copyinstr(from, to, maxlen, int *lencopied) - MP SAFE
415 *
416 * copy a string from from to to, stop when a 0 character is reached.
417 * return ENAMETOOLONG if string is longer than maxlen, and
418 * EFAULT on protection violations. If lencopied is non-zero,
419 * return the actual length in *lencopied.
420 */
421ENTRY(copyinstr)
422 pushl %esi
423 pushl %edi
424 movl PCPU(curthread),%ecx
425 movl TD_PCB(%ecx),%ecx
426 movl $cpystrflt,PCB_ONFAULT(%ecx)
427
428 movl 12(%esp),%esi /* %esi = from */
429 movl 16(%esp),%edi /* %edi = to */
430 movl 20(%esp),%edx /* %edx = maxlen */
431
432 movl $VM_MAX_USER_ADDRESS,%eax
433
434 /* make sure 'from' is within bounds */
435 subl %esi,%eax
436 jbe cpystrflt
437
438 /* restrict maxlen to <= VM_MAX_USER_ADDRESS-from */
439 cmpl %edx,%eax
440 jae 1f
441 movl %eax,%edx
442 movl %eax,20(%esp)
4431:
444 incl %edx
445 cld
446
4472:
448 decl %edx
449 jz 3f
450
451 lodsb
452 stosb
453 orb %al,%al
454 jnz 2b
455
456 /* Success -- 0 byte reached */
457 decl %edx
458 xorl %eax,%eax
459 jmp cpystrflt_x
4603:
461 /* edx is zero - return ENAMETOOLONG or EFAULT */
462 cmpl $VM_MAX_USER_ADDRESS,%esi
463 jae cpystrflt
4644:
465 movl $ENAMETOOLONG,%eax
466 jmp cpystrflt_x
467
468cpystrflt:
469 movl $EFAULT,%eax
470
471cpystrflt_x:
472 /* set *lencopied and return %eax */
473 movl PCPU(curthread),%ecx
474 movl TD_PCB(%ecx),%ecx
475 movl $0,PCB_ONFAULT(%ecx)
476 movl 20(%esp),%ecx
477 subl %edx,%ecx
478 movl 24(%esp),%edx
479 testl %edx,%edx
480 jz 1f
481 movl %ecx,(%edx)
4821:
483 popl %edi
484 popl %esi
485 ret
486
487
488/*
489 * copystr(from, to, maxlen, int *lencopied) - MP SAFE
490 */
491ENTRY(copystr)
492 pushl %esi
493 pushl %edi
494
495 movl 12(%esp),%esi /* %esi = from */
496 movl 16(%esp),%edi /* %edi = to */
497 movl 20(%esp),%edx /* %edx = maxlen */
498 incl %edx
499 cld
5001:
501 decl %edx
502 jz 4f
503 lodsb
504 stosb
505 orb %al,%al
506 jnz 1b
507
508 /* Success -- 0 byte reached */
509 decl %edx
510 xorl %eax,%eax
511 jmp 6f
5124:
513 /* edx is zero -- return ENAMETOOLONG */
514 movl $ENAMETOOLONG,%eax
515
5166:
517 /* set *lencopied and return %eax */
518 movl 20(%esp),%ecx
519 subl %edx,%ecx
520 movl 24(%esp),%edx
521 testl %edx,%edx
522 jz 7f
523 movl %ecx,(%edx)
5247:
525 popl %edi
526 popl %esi
527 ret
528
529ENTRY(bcmp)
530 pushl %edi
531 pushl %esi
532 movl 12(%esp),%edi
533 movl 16(%esp),%esi
534 movl 20(%esp),%edx
535 xorl %eax,%eax
536
537 movl %edx,%ecx
538 shrl $2,%ecx
539 cld /* compare forwards */
540 repe
541 cmpsl
542 jne 1f
543
544 movl %edx,%ecx
545 andl $3,%ecx
546 repe
547 cmpsb
548 je 2f
5491:
550 incl %eax
5512:
552 popl %esi
553 popl %edi
554 ret
555
556
557/*
558 * Handling of special 386 registers and descriptor tables etc
559 */
560/* void lgdt(struct region_descriptor *rdp); */
561ENTRY(lgdt)
562 /* reload the descriptor table */
563 movl 4(%esp),%eax
564 lgdt (%eax)
565
566 /* flush the prefetch q */
567 jmp 1f
568 nop
5691:
570 /* reload "stale" selectors */
571 movl $KDSEL,%eax
572 mov %ax,%ds
573 mov %ax,%es
574 mov %ax,%gs
575 mov %ax,%ss
576 movl $KPSEL,%eax
577 mov %ax,%fs
578 mov %ax,%gs
579
580 /* reload code selector by turning return into intersegmental return */
581 movl (%esp),%eax
582 pushl %eax
583 movl $KCSEL,4(%esp)
584 lret
585
586/*
587 * void lidt(struct region_descriptor *rdp);
588 */
589ENTRY(lidt)
590 movl 4(%esp),%eax
591 lidt (%eax)
592 ret
593
594/*
595 * void lldt(u_short sel)
596 */
597ENTRY(lldt)
598 lldt 4(%esp)
599 ret
600
601/*
602 * void ltr(u_short sel)
603 */
604ENTRY(ltr)
605 ltr 4(%esp)
606 ret
607
608/* ssdtosd(*ssdp,*sdp) */
609ENTRY(ssdtosd)
610 pushl %ebx
611 movl 8(%esp),%ecx
612 movl 8(%ecx),%ebx
613 shll $16,%ebx
614 movl (%ecx),%edx
615 roll $16,%edx
616 movb %dh,%bl
617 movb %dl,%bh
618 rorl $8,%ebx
619 movl 4(%ecx),%eax
620 movw %ax,%dx
621 andl $0xf0000,%eax
622 orl %eax,%ebx
623 movl 12(%esp),%ecx
624 movl %edx,(%ecx)
625 movl %ebx,4(%ecx)
626 popl %ebx
627 ret
628
629/* load_cr0(cr0) */
630ENTRY(load_cr0)
631 movl 4(%esp),%eax
632 movl %eax,%cr0
633 ret
634
635/* rcr0() */
636ENTRY(rcr0)
637 movl %cr0,%eax
638 ret
639
640/* rcr3() */
641ENTRY(rcr3)
642 movl %cr3,%eax
643 ret
644
645/* void load_cr3(caddr_t cr3) */
646ENTRY(load_cr3)
647#if defined(SWTCH_OPTIM_STATS)
648 incl _tlb_flush_count
649#endif
650 movl 4(%esp),%eax
651 movl %eax,%cr3
652 ret
653
654/* rcr4() */
655ENTRY(rcr4)
656 movl %cr4,%eax
657 ret
658
659/* void load_cr4(caddr_t cr4) */
660ENTRY(load_cr4)
661 movl 4(%esp),%eax
662 movl %eax,%cr4
663 ret
664
665/* void reset_dbregs() */
666ENTRY(reset_dbregs)
667 movl $0,%eax
668 movl %eax,%dr7 /* disable all breapoints first */
669 movl %eax,%dr0
670 movl %eax,%dr1
671 movl %eax,%dr2
672 movl %eax,%dr3
673 movl %eax,%dr6
674 ret
675
676/*****************************************************************************/
677/* setjump, longjump */
678/*****************************************************************************/
679
680ENTRY(setjmp)
681 movl 4(%esp),%eax
682 movl %ebx,(%eax) /* save ebx */
683 movl %esp,4(%eax) /* save esp */
684 movl %ebp,8(%eax) /* save ebp */
685 movl %esi,12(%eax) /* save esi */
686 movl %edi,16(%eax) /* save edi */
687 movl (%esp),%edx /* get rta */
688 movl %edx,20(%eax) /* save eip */
689 xorl %eax,%eax /* return(0); */
690 ret
691
692ENTRY(longjmp)
693 movl 4(%esp),%eax
694 movl (%eax),%ebx /* restore ebx */
695 movl 4(%eax),%esp /* restore esp */
696 movl 8(%eax),%ebp /* restore ebp */
697 movl 12(%eax),%esi /* restore esi */
698 movl 16(%eax),%edi /* restore edi */
699 movl 20(%eax),%edx /* get rta */
700 movl %edx,(%esp) /* put in return frame */
701 xorl %eax,%eax /* return(1); */
702 incl %eax
703 ret
704
705/*
706 * Support for BB-profiling (gcc -a). The kernbb program will extract
707 * the data from the kernel.
708 */
709
710 .data
711 ALIGN_DATA
712 .globl bbhead
713bbhead:
714 .long 0
715
716 .text
717NON_GPROF_ENTRY(__bb_init_func)
718 movl 4(%esp),%eax
719 movl $1,(%eax)
720 movl bbhead,%edx
721 movl %edx,16(%eax)
722 movl %eax,bbhead
723 .byte 0xc3 /* avoid macro for `ret' */