kernel - Make pcb_onfault more robust.
[dragonfly.git] / sys / platform / pc32 / i386 / support.s
... / ...
CommitLineData
1/*-
2 * Copyright (c) 1993 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * $FreeBSD: src/sys/i386/i386/support.s,v 1.67.2.5 2001/08/15 01:23:50 peter Exp $
34 */
35
36#include <machine/asmacros.h>
37#include <machine/cputypes.h>
38#include <machine/pmap.h>
39#include <machine/specialreg.h>
40
41#include "assym.s"
42
43#define IDXSHIFT 10
44
45 .data
46
47 .globl memcpy_vector
48memcpy_vector:
49 .long asm_generic_memcpy
50
51 .globl bcopy_vector
52bcopy_vector:
53 .long asm_generic_bcopy
54
55 .globl ovbcopy_vector
56ovbcopy_vector:
57 .long asm_generic_bcopy
58
59 .text
60
61/* fillw(pat, base, cnt) */
62ENTRY(fillw)
63 pushl %edi
64 movl 8(%esp),%eax
65 movl 12(%esp),%edi
66 movl 16(%esp),%ecx
67 cld
68 rep
69 stosw
70 popl %edi
71 ret
72
73/*
74 * void bcopy(const void *s, void *d, size_t count)
75 *
76 * Normal bcopy() vector, an optimized bcopy may be installed in
77 * bcopy_vector.
78 */
79ENTRY(bcopy)
80 pushl %esi
81 pushl %edi
82 movl 4+8(%esp),%esi /* caddr_t from */
83 movl 8+8(%esp),%edi /* caddr_t to */
84 movl 12+8(%esp),%ecx /* size_t len */
85 call *bcopy_vector
86 popl %edi
87 popl %esi
88 ret
89
90/*
91 * Generic (integer-only) bcopy() vector.
92 */
93ENTRY(generic_bcopy)
94 pushl %esi
95 pushl %edi
96 movl 4+8(%esp),%esi /* caddr_t from */
97 movl 8+8(%esp),%edi /* caddr_t to */
98 movl 12+8(%esp),%ecx /* size_t len */
99 call asm_generic_bcopy
100 popl %edi
101 popl %esi
102 ret
103
104ENTRY(ovbcopy)
105 pushl %esi
106 pushl %edi
107 movl 4+8(%esp),%esi /* caddr_t from */
108 movl 8+8(%esp),%edi /* caddr_t to */
109 movl 12+8(%esp),%ecx /* size_t len */
110 call *ovbcopy_vector
111 popl %edi
112 popl %esi
113 ret
114
115/*
116 * void *memcpy(void *d, const void *s, size_t count)
117 *
118 * Note: memcpy does not have to support overlapping copies.
119 *
120 * Note: (d, s) arguments reversed from bcopy, and memcpy() returns d
121 * while bcopy() returns void.
122 */
123ENTRY(memcpy)
124 pushl %esi
125 pushl %edi
126 movl 4+8(%esp),%edi
127 movl 8+8(%esp),%esi
128 movl 12+8(%esp),%ecx
129 call *memcpy_vector
130 movl 4+8(%esp),%eax
131 popl %edi
132 popl %esi
133 ret
134
135/*
136 * A stack-based on-fault routine is used for more complex PCB_ONFAULT
137 * situations (such as memcpy/bcopy/bzero). In this case the on-fault
138 * routine must be pushed on the stack.
139 */
140stack_onfault:
141 ret
142
143/*****************************************************************************/
144/* copyout and fubyte family */
145/*****************************************************************************/
146/*
147 * Access user memory from inside the kernel. These routines and possibly
148 * the math- and DOS emulators should be the only places that do this.
149 *
150 * We have to access the memory with user's permissions, so use a segment
151 * selector with RPL 3. For writes to user space we have to additionally
152 * check the PTE for write permission, because the 386 does not check
153 * write permissions when we are executing with EPL 0. The 486 does check
154 * this if the WP bit is set in CR0, so we can use a simpler version here.
155 *
156 * These routines set curpcb->onfault for the time they execute. When a
157 * protection violation occurs inside the functions, the trap handler
158 * returns to *curpcb->onfault instead of the function.
159 */
160
161/*
162 * copyout(from_kernel, to_user, len) - MP SAFE
163 */
164ENTRY(copyout)
165 movl PCPU(curthread),%eax
166 movl TD_PCB(%eax),%eax
167 pushl %esi
168 pushl %edi
169 pushl %ebx
170 pushl $copyout_fault2
171 movl $stack_onfault,PCB_ONFAULT(%eax)
172 movl %esp,PCB_ONFAULT_SP(%eax)
173 subl $12,PCB_ONFAULT_SP(%eax) /* call,ebx,stackedfault */
174 /* for *memcpy_vector */
175 movl 4+16(%esp),%esi
176 movl 8+16(%esp),%edi
177 movl 12+16(%esp),%ebx
178 testl %ebx,%ebx /* anything to do? */
179 jz done_copyout
180
181 /*
182 * Check explicitly for non-user addresses. If 486 write protection
183 * is being used, this check is essential because we are in kernel
184 * mode so the h/w does not provide any protection against writing
185 * kernel addresses.
186 */
187
188 /*
189 * First, prevent address wrapping.
190 */
191 movl %edi,%eax
192 addl %ebx,%eax
193 jc copyout_fault1
194/*
195 * XXX STOP USING VM_MAX_USER_ADDRESS.
196 * It is an end address, not a max, so every time it is used correctly it
197 * looks like there is an off by one error, and of course it caused an off
198 * by one error in several places.
199 */
200 cmpl $VM_MAX_USER_ADDRESS,%eax
201 ja copyout_fault1
202
203 /*
204 * Convert copyout to memcpy_vector(dest:%edi, src:%esi, count:%ecx)
205 */
206 movl %ebx,%ecx
207 call *memcpy_vector
208
209done_copyout:
210 /*
211 * non-error return
212 */
213 addl $4,%esp
214 movl PCPU(curthread),%edx
215 xorl %eax,%eax
216 movl TD_PCB(%edx),%edx
217 popl %ebx
218 popl %edi
219 popl %esi
220 movl %eax,PCB_ONFAULT(%edx)
221 ret
222
223 ALIGN_TEXT
224copyout_fault1:
225 addl $4,%esp /* skip pushed copyout_fault vector */
226copyout_fault2:
227 popl %ebx
228 popl %edi
229 popl %esi
230 movl PCPU(curthread),%edx
231 movl TD_PCB(%edx),%edx
232 movl $0,PCB_ONFAULT(%edx)
233 movl $EFAULT,%eax
234 ret
235
236/*
237 * copyin(from_user, to_kernel, len) - MP SAFE
238 */
239
240ENTRY(copyin)
241 movl PCPU(curthread),%eax
242 movl TD_PCB(%eax),%eax
243 pushl %esi
244 pushl %edi
245 pushl $copyin_fault2
246 movl $stack_onfault,PCB_ONFAULT(%eax)
247 movl %esp,PCB_ONFAULT_SP(%eax)
248 subl $12,PCB_ONFAULT_SP(%eax) /* call,ebx,stackedfault */
249 /* for *memcpy_vector */
250 movl 4+12(%esp),%esi /* caddr_t from */
251 movl 8+12(%esp),%edi /* caddr_t to */
252 movl 12+12(%esp),%ecx /* size_t len */
253
254 /*
255 * make sure address is valid
256 */
257 movl %esi,%edx
258 addl %ecx,%edx
259 jc copyin_fault1
260 cmpl $VM_MAX_USER_ADDRESS,%edx
261 ja copyin_fault1
262
263 /*
264 * Call memcpy(destination:%edi, source:%esi, bytes:%ecx)
265 */
266 call *memcpy_vector
267
268 /*
269 * return 0 (no error)
270 */
271 addl $4,%esp
272 movl PCPU(curthread),%edx
273 xorl %eax,%eax
274 movl TD_PCB(%edx),%edx
275 popl %edi
276 popl %esi
277 movl %eax,PCB_ONFAULT(%edx)
278 ret
279
280 /*
281 * return EFAULT
282 */
283 ALIGN_TEXT
284copyin_fault1:
285 addl $4,%esp /* skip pushed copyin_fault vector */
286copyin_fault2:
287 popl %edi
288 popl %esi
289 movl PCPU(curthread),%edx
290 movl TD_PCB(%edx),%edx
291 movl $0,PCB_ONFAULT(%edx)
292 movl $EFAULT,%eax
293 ret
294
295/*
296 * casuword. Compare and set user word. Returns -1 or the current value.
297 */
298
299ENTRY(casuword)
300 movl PCPU(curthread),%ecx
301 movl TD_PCB(%ecx),%ecx
302 movl $fusufault,PCB_ONFAULT(%ecx)
303 movl %esp,PCB_ONFAULT_SP(%ecx)
304 movl 4(%esp),%edx /* dst */
305 movl 8(%esp),%eax /* old */
306 movl 12(%esp),%ecx /* new */
307
308 cmpl $VM_MAX_USER_ADDRESS-4,%edx /* verify address is valid */
309 ja fusufault
310
311#ifdef SMP
312 lock
313#endif
314 cmpxchgl %ecx,(%edx) /* Compare and set. */
315
316 /*
317 * The old value is in %eax. If the store succeeded it will be the
318 * value we expected (old) from before the store, otherwise it will
319 * be the current value.
320 */
321
322 movl PCPU(curthread),%ecx
323 movl TD_PCB(%ecx),%ecx
324 movl $0,PCB_ONFAULT(%ecx)
325 ret
326END(casuword)
327
328/*
329 * fu{byte,sword,word} - MP SAFE
330 *
331 * Fetch a byte (sword, word) from user memory
332 */
333ENTRY(fuword)
334 movl PCPU(curthread),%ecx
335 movl TD_PCB(%ecx),%ecx
336 movl $fusufault,PCB_ONFAULT(%ecx)
337 movl %esp,PCB_ONFAULT_SP(%ecx)
338 movl 4(%esp),%edx /* from */
339
340 cmpl $VM_MAX_USER_ADDRESS-4,%edx /* verify address is valid */
341 ja fusufault
342
343 movl (%edx),%eax
344 movl $0,PCB_ONFAULT(%ecx)
345 ret
346
347/*
348 * fusword - MP SAFE
349 */
350ENTRY(fusword)
351 movl PCPU(curthread),%ecx
352 movl TD_PCB(%ecx),%ecx
353 movl $fusufault,PCB_ONFAULT(%ecx)
354 movl %esp,PCB_ONFAULT_SP(%ecx)
355 movl 4(%esp),%edx
356
357 cmpl $VM_MAX_USER_ADDRESS-2,%edx
358 ja fusufault
359
360 movzwl (%edx),%eax
361 movl $0,PCB_ONFAULT(%ecx)
362 ret
363
364/*
365 * fubyte - MP SAFE
366 */
367ENTRY(fubyte)
368 movl PCPU(curthread),%ecx
369 movl TD_PCB(%ecx),%ecx
370 movl $fusufault,PCB_ONFAULT(%ecx)
371 movl %esp,PCB_ONFAULT_SP(%ecx)
372 movl 4(%esp),%edx
373
374 cmpl $VM_MAX_USER_ADDRESS-1,%edx
375 ja fusufault
376
377 movzbl (%edx),%eax
378 movl $0,PCB_ONFAULT(%ecx)
379 ret
380
381 ALIGN_TEXT
382fusufault:
383 movl PCPU(curthread),%ecx
384 movl TD_PCB(%ecx),%ecx
385 xorl %eax,%eax
386 movl %eax,PCB_ONFAULT(%ecx)
387 decl %eax
388 ret
389
390/*
391 * su{byte,sword,word,word32} - MP SAFE
392 *
393 * Write a long to user memory
394 */
395ENTRY(suword)
396 movl PCPU(curthread),%ecx
397 movl TD_PCB(%ecx),%ecx
398 movl $fusufault,PCB_ONFAULT(%ecx)
399 movl %esp,PCB_ONFAULT_SP(%ecx)
400 movl 4(%esp),%edx
401
402 cmpl $VM_MAX_USER_ADDRESS-4,%edx /* verify address validity */
403 ja fusufault
404
405 movl 8(%esp),%eax
406 movl %eax,(%edx)
407 xorl %eax,%eax
408 movl PCPU(curthread),%ecx
409 movl TD_PCB(%ecx),%ecx
410 movl %eax,PCB_ONFAULT(%ecx)
411 ret
412
413/*
414 * Write an integer to user memory
415 */
416ENTRY(suword32)
417 movl PCPU(curthread),%ecx
418 movl TD_PCB(%ecx),%ecx
419 movl $fusufault,PCB_ONFAULT(%ecx)
420 movl %esp,PCB_ONFAULT_SP(%ecx)
421 movl 4(%esp),%edx
422
423 cmpl $VM_MAX_USER_ADDRESS-4,%edx /* verify address validity */
424 ja fusufault
425
426 movl 8(%esp),%eax
427 movl %eax,(%edx)
428 xorl %eax,%eax
429 movl PCPU(curthread),%ecx
430 movl TD_PCB(%ecx),%ecx
431 movl %eax,PCB_ONFAULT(%ecx)
432 ret
433
434/*
435 * susword - MP SAFE
436 */
437ENTRY(susword)
438 movl PCPU(curthread),%ecx
439 movl TD_PCB(%ecx),%ecx
440 movl $fusufault,PCB_ONFAULT(%ecx)
441 movl %esp,PCB_ONFAULT_SP(%ecx)
442 movl 4(%esp),%edx
443
444 cmpl $VM_MAX_USER_ADDRESS-2,%edx /* verify address validity */
445 ja fusufault
446
447 movw 8(%esp),%ax
448 movw %ax,(%edx)
449 xorl %eax,%eax
450 movl PCPU(curthread),%ecx /* restore trashed register */
451 movl TD_PCB(%ecx),%ecx
452 movl %eax,PCB_ONFAULT(%ecx)
453 ret
454
455/*
456 * subyte - MP SAFE
457 */
458ENTRY(subyte)
459 movl PCPU(curthread),%ecx
460 movl TD_PCB(%ecx),%ecx
461 movl $fusufault,PCB_ONFAULT(%ecx)
462 movl %esp,PCB_ONFAULT_SP(%ecx)
463 movl 4(%esp),%edx
464
465 cmpl $VM_MAX_USER_ADDRESS-1,%edx /* verify address validity */
466 ja fusufault
467
468 movb 8(%esp),%al
469 movb %al,(%edx)
470 xorl %eax,%eax
471 movl PCPU(curthread),%ecx /* restore trashed register */
472 movl TD_PCB(%ecx),%ecx
473 movl %eax,PCB_ONFAULT(%ecx)
474 ret
475
476/*
477 * copyinstr(from, to, maxlen, int *lencopied) - MP SAFE
478 *
479 * copy a string from from to to, stop when a 0 character is reached.
480 * return ENAMETOOLONG if string is longer than maxlen, and
481 * EFAULT on protection violations. If lencopied is non-zero,
482 * return the actual length in *lencopied.
483 */
484ENTRY(copyinstr)
485 pushl %esi
486 pushl %edi
487 movl PCPU(curthread),%ecx
488 movl TD_PCB(%ecx),%ecx
489 movl $cpystrflt,PCB_ONFAULT(%ecx)
490 movl %esp,PCB_ONFAULT_SP(%ecx)
491
492 movl 12(%esp),%esi /* %esi = from */
493 movl 16(%esp),%edi /* %edi = to */
494 movl 20(%esp),%edx /* %edx = maxlen */
495
496 movl $VM_MAX_USER_ADDRESS,%eax
497
498 /* make sure 'from' is within bounds */
499 subl %esi,%eax
500 jbe cpystrflt
501
502 /* restrict maxlen to <= VM_MAX_USER_ADDRESS-from */
503 cmpl %edx,%eax
504 jae 1f
505 movl %eax,%edx
506 movl %eax,20(%esp)
5071:
508 incl %edx
509 cld
510
5112:
512 decl %edx
513 jz 3f
514
515 lodsb
516 stosb
517 orb %al,%al
518 jnz 2b
519
520 /* Success -- 0 byte reached */
521 decl %edx
522 xorl %eax,%eax
523 jmp cpystrflt_x
5243:
525 /* edx is zero - return ENAMETOOLONG or EFAULT */
526 cmpl $VM_MAX_USER_ADDRESS,%esi
527 jae cpystrflt
5284:
529 movl $ENAMETOOLONG,%eax
530 jmp cpystrflt_x
531
532cpystrflt:
533 movl $EFAULT,%eax
534
535cpystrflt_x:
536 /* set *lencopied and return %eax */
537 movl PCPU(curthread),%ecx
538 movl TD_PCB(%ecx),%ecx
539 movl $0,PCB_ONFAULT(%ecx)
540 movl 20(%esp),%ecx
541 subl %edx,%ecx
542 movl 24(%esp),%edx
543 testl %edx,%edx
544 jz 1f
545 movl %ecx,(%edx)
5461:
547 popl %edi
548 popl %esi
549 ret
550
551
552/*
553 * copystr(from, to, maxlen, int *lencopied) - MP SAFE
554 */
555ENTRY(copystr)
556 pushl %esi
557 pushl %edi
558
559 movl 12(%esp),%esi /* %esi = from */
560 movl 16(%esp),%edi /* %edi = to */
561 movl 20(%esp),%edx /* %edx = maxlen */
562 incl %edx
563 cld
5641:
565 decl %edx
566 jz 4f
567 lodsb
568 stosb
569 orb %al,%al
570 jnz 1b
571
572 /* Success -- 0 byte reached */
573 decl %edx
574 xorl %eax,%eax
575 jmp 6f
5764:
577 /* edx is zero -- return ENAMETOOLONG */
578 movl $ENAMETOOLONG,%eax
579
5806:
581 /* set *lencopied and return %eax */
582 movl 20(%esp),%ecx
583 subl %edx,%ecx
584 movl 24(%esp),%edx
585 testl %edx,%edx
586 jz 7f
587 movl %ecx,(%edx)
5887:
589 popl %edi
590 popl %esi
591 ret
592
593ENTRY(bcmp)
594 pushl %edi
595 pushl %esi
596 movl 12(%esp),%edi
597 movl 16(%esp),%esi
598 movl 20(%esp),%edx
599 xorl %eax,%eax
600
601 movl %edx,%ecx
602 shrl $2,%ecx
603 cld /* compare forwards */
604 repe
605 cmpsl
606 jne 1f
607
608 movl %edx,%ecx
609 andl $3,%ecx
610 repe
611 cmpsb
612 je 2f
6131:
614 incl %eax
6152:
616 popl %esi
617 popl %edi
618 ret
619
620
621/*
622 * Handling of special 386 registers and descriptor tables etc
623 */
624/* void lgdt(struct region_descriptor *rdp); */
625ENTRY(lgdt)
626 /* reload the descriptor table */
627 movl 4(%esp),%eax
628 lgdt (%eax)
629
630 /* flush the prefetch q */
631 jmp 1f
632 nop
6331:
634 /* reload "stale" selectors */
635 movl $KDSEL,%eax
636 mov %ax,%ds
637 mov %ax,%es
638 mov %ax,%gs
639 mov %ax,%ss
640 movl $KPSEL,%eax
641 mov %ax,%fs
642 mov %ax,%gs
643
644 /* reload code selector by turning return into intersegmental return */
645 movl (%esp),%eax
646 pushl %eax
647 movl $KCSEL,4(%esp)
648 lret
649
650/*
651 * void lidt(struct region_descriptor *rdp);
652 */
653ENTRY(lidt)
654 movl 4(%esp),%eax
655 lidt (%eax)
656 ret
657
658/*
659 * void lldt(u_short sel)
660 */
661ENTRY(lldt)
662 lldt 4(%esp)
663 ret
664
665/*
666 * void ltr(u_short sel)
667 */
668ENTRY(ltr)
669 ltr 4(%esp)
670 ret
671
672/* ssdtosd(*ssdp,*sdp) */
673ENTRY(ssdtosd)
674 pushl %ebx
675 movl 8(%esp),%ecx
676 movl 8(%ecx),%ebx
677 shll $16,%ebx
678 movl (%ecx),%edx
679 roll $16,%edx
680 movb %dh,%bl
681 movb %dl,%bh
682 rorl $8,%ebx
683 movl 4(%ecx),%eax
684 movw %ax,%dx
685 andl $0xf0000,%eax
686 orl %eax,%ebx
687 movl 12(%esp),%ecx
688 movl %edx,(%ecx)
689 movl %ebx,4(%ecx)
690 popl %ebx
691 ret
692
693/* load_cr0(cr0) */
694ENTRY(load_cr0)
695 movl 4(%esp),%eax
696 movl %eax,%cr0
697 ret
698
699/* rcr0() */
700ENTRY(rcr0)
701 movl %cr0,%eax
702 ret
703
704/* rcr3() */
705ENTRY(rcr3)
706 movl %cr3,%eax
707 ret
708
709/* void load_cr3(caddr_t cr3) */
710ENTRY(load_cr3)
711#if defined(SWTCH_OPTIM_STATS)
712 incl _tlb_flush_count
713#endif
714 movl 4(%esp),%eax
715 movl %eax,%cr3
716 ret
717
718/* rcr4() */
719ENTRY(rcr4)
720 movl %cr4,%eax
721 ret
722
723/* void load_cr4(caddr_t cr4) */
724ENTRY(load_cr4)
725 movl 4(%esp),%eax
726 movl %eax,%cr4
727 ret
728
729/* void reset_dbregs() */
730ENTRY(reset_dbregs)
731 movl $0,%eax
732 movl %eax,%dr7 /* disable all breapoints first */
733 movl %eax,%dr0
734 movl %eax,%dr1
735 movl %eax,%dr2
736 movl %eax,%dr3
737 movl %eax,%dr6
738 ret
739
740/*****************************************************************************/
741/* setjump, longjump */
742/*****************************************************************************/
743
744ENTRY(setjmp)
745 movl 4(%esp),%eax
746 movl %ebx,(%eax) /* save ebx */
747 movl %esp,4(%eax) /* save esp */
748 movl %ebp,8(%eax) /* save ebp */
749 movl %esi,12(%eax) /* save esi */
750 movl %edi,16(%eax) /* save edi */
751 movl (%esp),%edx /* get rta */
752 movl %edx,20(%eax) /* save eip */
753 xorl %eax,%eax /* return(0); */
754 ret
755
756ENTRY(longjmp)
757 movl 4(%esp),%eax
758 movl (%eax),%ebx /* restore ebx */
759 movl 4(%eax),%esp /* restore esp */
760 movl 8(%eax),%ebp /* restore ebp */
761 movl 12(%eax),%esi /* restore esi */
762 movl 16(%eax),%edi /* restore edi */
763 movl 20(%eax),%edx /* get rta */
764 movl %edx,(%esp) /* put in return frame */
765 xorl %eax,%eax /* return(1); */
766 incl %eax
767 ret
768
769/*
770 * Support for reading MSRs in the safe manner.
771 */
772ENTRY(rdmsr_safe)
773/* int rdmsr_safe(u_int msr, uint64_t *data) */
774 movl PCPU(curthread),%ecx
775 movl TD_PCB(%ecx), %ecx
776 movl $msr_onfault,PCB_ONFAULT(%ecx)
777 movl %esp,PCB_ONFAULT_SP(%ecx)
778
779 movl 4(%esp),%ecx
780 rdmsr
781 movl 8(%esp),%ecx
782 movl %eax,(%ecx)
783 movl %edx,4(%ecx)
784 xorl %eax,%eax
785
786 movl PCPU(curthread),%ecx
787 movl TD_PCB(%ecx), %ecx
788 movl %eax,PCB_ONFAULT(%ecx)
789
790 ret
791
792/*
793 * MSR operations fault handler
794 */
795 ALIGN_TEXT
796msr_onfault:
797 movl PCPU(curthread),%ecx
798 movl TD_PCB(%ecx), %ecx
799 movl $0,PCB_ONFAULT(%ecx)
800 movl $EFAULT,%eax
801 ret
802
803/*
804 * Support for BB-profiling (gcc -a). The kernbb program will extract
805 * the data from the kernel.
806 */
807
808 .data
809 ALIGN_DATA
810 .globl bbhead
811bbhead:
812 .long 0
813
814 .text
815NON_GPROF_ENTRY(__bb_init_func)
816 movl 4(%esp),%eax
817 movl $1,(%eax)
818 movl bbhead,%edx
819 movl %edx,16(%eax)
820 movl %eax,bbhead
821 .byte 0xc3 /* avoid macro for `ret' */