Merge from vendor branch GCC:
[dragonfly.git] / sys / i386 / apic / apic_vector.s
1 /*
2  *      from: vector.s, 386BSD 0.1 unknown origin
3  * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
4  * $DragonFly: src/sys/i386/apic/Attic/apic_vector.s,v 1.18 2004/02/21 06:37:08 dillon Exp $
5  */
6
7
8 #include <machine/apicreg.h>
9 #include <machine/smp.h>
10 #include "i386/isa/intr_machdep.h"
11
12 /* convert an absolute IRQ# into a bitmask */
13 #define IRQ_LBIT(irq_num)       (1 << (irq_num))
14
15 /* make an index into the IO APIC from the IRQ# */
16 #define REDTBL_IDX(irq_num)     (0x10 + ((irq_num) * 2))
17
18 /*
19  * Push an interrupt frame in a format acceptable to doreti, reload
20  * the segment registers for the kernel.
21  */
22 #define PUSH_FRAME                                                      \
23         pushl   $0 ;            /* dummy error code */                  \
24         pushl   $0 ;            /* dummy trap type */                   \
25         pushal ;                                                        \
26         pushl   %ds ;           /* save data and extra segments ... */  \
27         pushl   %es ;                                                   \
28         pushl   %fs ;                                                   \
29         mov     $KDSEL,%ax ;                                            \
30         mov     %ax,%ds ;                                               \
31         mov     %ax,%es ;                                               \
32         mov     $KPSEL,%ax ;                                            \
33         mov     %ax,%fs ;                                               \
34
35 #define PUSH_DUMMY                                                      \
36         pushfl ;                /* phys int frame / flags */            \
37         pushl %cs ;             /* phys int frame / cs */               \
38         pushl   12(%esp) ;      /* original caller eip */               \
39         pushl   $0 ;            /* dummy error code */                  \
40         pushl   $0 ;            /* dummy trap type */                   \
41         subl    $12*4,%esp ;    /* pushal + 3 seg regs (dummy) + CPL */ \
42
43 /*
44  * Warning: POP_FRAME can only be used if there is no chance of a
45  * segment register being changed (e.g. by procfs), which is why syscalls
46  * have to use doreti.
47  */
48 #define POP_FRAME                                                       \
49         popl    %fs ;                                                   \
50         popl    %es ;                                                   \
51         popl    %ds ;                                                   \
52         popal ;                                                         \
53         addl    $2*4,%esp ;     /* dummy trap & error codes */          \
54
55 #define POP_DUMMY                                                       \
56         addl    $17*4,%esp ;                                            \
57
58 #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
59 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
60
61 /*
62  * Interrupts are expected to already be disabled when using these
63  * IMASK_*() macros.
64  */
65 #define IMASK_LOCK                                                      \
66         SPIN_LOCK(imen_spinlock) ;                                      \
67
68 #define IMASK_UNLOCK                                                    \
69         SPIN_UNLOCK(imen_spinlock) ;                                    \
70         
71 #define MASK_IRQ(irq_num)                                               \
72         IMASK_LOCK ;                            /* into critical reg */ \
73         testl   $IRQ_LBIT(irq_num), apic_imen ;                         \
74         jne     7f ;                    /* masked, don't mask */        \
75         orl     $IRQ_LBIT(irq_num), apic_imen ; /* set the mask bit */  \
76         movl    IOAPICADDR(irq_num), %ecx ;     /* ioapic addr */       \
77         movl    REDIRIDX(irq_num), %eax ;       /* get the index */     \
78         movl    %eax, (%ecx) ;                  /* write the index */   \
79         movl    IOAPIC_WINDOW(%ecx), %eax ;     /* current value */     \
80         orl     $IOART_INTMASK, %eax ;          /* set the mask */      \
81         movl    %eax, IOAPIC_WINDOW(%ecx) ;     /* new value */         \
82 7: ;                                            /* already masked */    \
83         IMASK_UNLOCK ;                                                  \
84
85 /*
86  * Test to see whether we are handling an edge or level triggered INT.
87  *  Level-triggered INTs must still be masked as we don't clear the source,
88  *  and the EOI cycle would cause redundant INTs to occur.
89  */
90 #define MASK_LEVEL_IRQ(irq_num)                                         \
91         testl   $IRQ_LBIT(irq_num), apic_pin_trigger ;                  \
92         jz      9f ;                            /* edge, don't mask */  \
93         MASK_IRQ(irq_num) ;                                             \
94 9: ;                                                                    \
95
96
97 #ifdef APIC_INTR_REORDER
98 #define EOI_IRQ(irq_num)                                                \
99         movl    apic_isrbit_location + 8 * (irq_num), %eax ;            \
100         movl    (%eax), %eax ;                                          \
101         testl   apic_isrbit_location + 4 + 8 * (irq_num), %eax ;        \
102         jz      9f ;                            /* not active */        \
103         movl    $0, lapic_eoi ;                                         \
104 9:                                                                      \
105
106 #else
107
108 #define EOI_IRQ(irq_num)                                                \
109         testl   $IRQ_LBIT(irq_num), lapic_isr1;                         \
110         jz      9f      ;                       /* not active */        \
111         movl    $0, lapic_eoi;                                          \
112 9:                                                                      \
113
114 #endif
115         
116 /*
117  * Test to see if the source is currntly masked, clear if so.
118  */
119 #define UNMASK_IRQ(irq_num)                                     \
120         IMASK_LOCK ;                            /* into critical reg */ \
121         testl   $IRQ_LBIT(irq_num), apic_imen ;                         \
122         je      7f ;                    /* bit clear, not masked */     \
123         andl    $~IRQ_LBIT(irq_num), apic_imen ;/* clear mask bit */    \
124         movl    IOAPICADDR(irq_num),%ecx ;      /* ioapic addr */       \
125         movl    REDIRIDX(irq_num), %eax ;       /* get the index */     \
126         movl    %eax,(%ecx) ;                   /* write the index */   \
127         movl    IOAPIC_WINDOW(%ecx),%eax ;      /* current value */     \
128         andl    $~IOART_INTMASK,%eax ;          /* clear the mask */    \
129         movl    %eax,IOAPIC_WINDOW(%ecx) ;      /* new value */         \
130 7: ;                                                                    \
131         IMASK_UNLOCK ;                                                  \
132
133 /*
134  * Fast interrupt call handlers run in the following sequence:
135  *
136  *      - Push the trap frame required by doreti
137  *      - Mask the interrupt and reenable its source
138  *      - If we cannot take the interrupt set its fpending bit and
139  *        doreti.  Note that we cannot mess with mp_lock at all
140  *        if we entered from a critical section!
141  *      - If we can take the interrupt clear its fpending bit,
142  *        call the handler, then unmask and doreti.
143  *
144  * YYY can cache gd base opitner instead of using hidden %fs prefixes.
145  */
146
147 #define FAST_INTR(irq_num, vec_name)                                    \
148         .text ;                                                         \
149         SUPERALIGN_TEXT ;                                               \
150 IDTVEC(vec_name) ;                                                      \
151         PUSH_FRAME ;                                                    \
152         FAKE_MCOUNT(13*4(%esp)) ;                                       \
153         MASK_LEVEL_IRQ(irq_num) ;                                       \
154         EOI_IRQ(irq_num) ;                                              \
155         movl    PCPU(curthread),%ebx ;                                  \
156         movl    TD_CPL(%ebx),%eax ;                                     \
157         pushl   %eax ;                                                  \
158         cmpl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
159         jge     1f ;                                                    \
160         testl   $IRQ_LBIT(irq_num), %eax ;                              \
161         jz      2f ;                                                    \
162 1: ;                                                                    \
163         /* in critical section, make interrupt pending */               \
164         /* set the pending bit and return, leave interrupt masked */    \
165         orl     $IRQ_LBIT(irq_num),PCPU(fpending) ;                     \
166         orl     $RQF_INTPEND,PCPU(reqflags) ;                           \
167         jmp     5f ;                                                    \
168 2: ;                                                                    \
169         /* try to get the MP lock */                                    \
170         call    try_mplock ;                                            \
171         testl   %eax,%eax ;                                             \
172         jz      6f ;                                                    \
173         /* clear pending bit, run handler */                            \
174         incl    PCPU(intr_nesting_level) ;                              \
175         addl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
176         andl    $~IRQ_LBIT(irq_num),PCPU(fpending) ;                    \
177         pushl   intr_unit + (irq_num) * 4 ;                             \
178         call    *intr_handler + (irq_num) * 4 ; /* do the work ASAP */  \
179         addl    $4, %esp ;                                              \
180         subl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
181         incl    PCPU(cnt)+V_INTR ;      /* book-keeping make per cpu YYY */ \
182         movl    intr_countp + (irq_num) * 4, %eax ;                     \
183         incl    (%eax) ;                                                \
184         decl    PCPU(intr_nesting_level) ;                              \
185         call    rel_mplock ;                                            \
186         UNMASK_IRQ(irq_num) ;                                           \
187 5: ;                                                                    \
188         MEXITCOUNT ;                                                    \
189         jmp     doreti ;                                                \
190 6: ;                                                                    \
191         /* could not get the MP lock, forward the interrupt */          \
192         movl    mp_lock, %eax ;          /* check race */               \
193         cmpl    $MP_FREE_LOCK,%eax ;                                    \
194         je      2b ;                                                    \
195         incl    PCPU(cnt)+V_FORWARDED_INTS ;                            \
196         subl    $12,%esp ;                                              \
197         movl    $irq_num,8(%esp) ;                                      \
198         movl    $forward_fastint_remote,4(%esp) ;                       \
199         movl    %eax,(%esp) ;                                           \
200         call    lwkt_send_ipiq_bycpu ;                                  \
201         addl    $12,%esp ;                                              \
202         jmp     5f ;                                                    \
203
204 /*
205  * Restart fast interrupt held up by critical section or cpl.
206  *
207  *      - Push a dummy trape frame as required by doreti
208  *      - The interrupt source is already masked
209  *      - Clear the fpending bit
210  *      - Run the handler
211  *      - Unmask the interrupt
212  *      - Pop the dummy frame and do a normal return
213  *
214  *      The BGL is held on call and left held on return.
215  *
216  *      YYY can cache gd base pointer instead of using hidden %fs
217  *      prefixes.
218  */
219
220 #define FAST_UNPEND(irq_num, vec_name)                                  \
221         .text ;                                                         \
222         SUPERALIGN_TEXT ;                                               \
223 IDTVEC(vec_name) ;                                                      \
224         pushl   %ebp ;                                                  \
225         movl    %esp,%ebp ;                                             \
226         PUSH_DUMMY ;                                                    \
227         pushl   intr_unit + (irq_num) * 4 ;                             \
228         call    *intr_handler + (irq_num) * 4 ; /* do the work ASAP */  \
229         addl    $4, %esp ;                                              \
230         incl    PCPU(cnt)+V_INTR ;      /* book-keeping make per cpu YYY */ \
231         movl    intr_countp + (irq_num) * 4, %eax ;                     \
232         incl    (%eax) ;                                                \
233         UNMASK_IRQ(irq_num) ;                                           \
234         POP_DUMMY ;                                                     \
235         popl %ebp ;                                                     \
236         ret ;                                                           \
237
238 /*
239  * Slow interrupt call handlers run in the following sequence:
240  *
241  *      - Push the trap frame required by doreti.
242  *      - Mask the interrupt and reenable its source.
243  *      - If we cannot take the interrupt set its ipending bit and
244  *        doreti.  In addition to checking for a critical section
245  *        and cpl mask we also check to see if the thread is still
246  *        running.  Note that we cannot mess with mp_lock at all
247  *        if we entered from a critical section!
248  *      - If we can take the interrupt clear its ipending bit
249  *        and schedule the thread.  Leave interrupts masked and doreti.
250  *
251  *      Note that calls to sched_ithd() are made with interrupts enabled
252  *      and outside a critical section.  YYY sched_ithd may preempt us
253  *      synchronously (fix interrupt stacking).
254  *
255  *      YYY can cache gd base pointer instead of using hidden %fs
256  *      prefixes.
257  */
258
259 #define INTR(irq_num, vec_name, maybe_extra_ipending)                   \
260         .text ;                                                         \
261         SUPERALIGN_TEXT ;                                               \
262 IDTVEC(vec_name) ;                                                      \
263         PUSH_FRAME ;                                                    \
264         maybe_extra_ipending ;                                          \
265 ;                                                                       \
266         MASK_LEVEL_IRQ(irq_num) ;                                       \
267         EOI_IRQ(irq_num) ;                                              \
268         movl    PCPU(curthread),%ebx ;                                  \
269         movl    TD_CPL(%ebx),%eax ;                                     \
270         pushl   %eax ;          /* cpl do restore */                    \
271         cmpl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
272         jge     1f ;                                                    \
273         testl   $IRQ_LBIT(irq_num),%eax ;                               \
274         jz      2f ;                                                    \
275 1: ;                                                                    \
276         /* set the pending bit and return, leave the interrupt masked */ \
277         orl     $IRQ_LBIT(irq_num), PCPU(ipending) ;                    \
278         orl     $RQF_INTPEND,PCPU(reqflags) ;                           \
279         jmp     5f ;                                                    \
280 2: ;                                                                    \
281         /* set running bit, clear pending bit, run handler */           \
282         andl    $~IRQ_LBIT(irq_num), PCPU(ipending) ;                   \
283         sti ;                                                           \
284         pushl   $irq_num ;                                              \
285         call    sched_ithd ;                                            \
286         addl    $4,%esp ;                                               \
287         incl    PCPU(cnt)+V_INTR ; /* book-keeping YYY make per-cpu */  \
288         movl    intr_countp + (irq_num) * 4,%eax ;                      \
289         incl    (%eax) ;                                                \
290 5: ;                                                                    \
291         MEXITCOUNT ;                                                    \
292         jmp     doreti ;                                                \
293
294
295 /*
296  * Handle "spurious INTerrupts".
297  * Notes:
298  *  This is different than the "spurious INTerrupt" generated by an
299  *   8259 PIC for missing INTs.  See the APIC documentation for details.
300  *  This routine should NOT do an 'EOI' cycle.
301  */
302         .text
303         SUPERALIGN_TEXT
304         .globl Xspuriousint
305 Xspuriousint:
306
307         /* No EOI cycle used here */
308
309         iret
310
311
312 /*
313  * Handle TLB shootdowns.
314  */
315         .text
316         SUPERALIGN_TEXT
317         .globl  Xinvltlb
318 Xinvltlb:
319         pushl   %eax
320
321 #ifdef COUNT_XINVLTLB_HITS
322         pushl   %fs
323         movl    $KPSEL, %eax
324         mov     %ax, %fs
325         movl    PCPU(cpuid), %eax
326         popl    %fs
327         ss
328         incl    _xhits(,%eax,4)
329 #endif /* COUNT_XINVLTLB_HITS */
330
331         movl    %cr3, %eax              /* invalidate the TLB */
332         movl    %eax, %cr3
333
334         ss                              /* stack segment, avoid %ds load */
335         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
336
337         popl    %eax
338         iret
339
340
341 /*
342  * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
343  *
344  *  - Signals its receipt.
345  *  - Waits for permission to restart.
346  *  - Signals its restart.
347  */
348
349         .text
350         SUPERALIGN_TEXT
351         .globl Xcpustop
352 Xcpustop:
353         pushl   %ebp
354         movl    %esp, %ebp
355         pushl   %eax
356         pushl   %ecx
357         pushl   %edx
358         pushl   %ds                     /* save current data segment */
359         pushl   %fs
360
361         movl    $KDSEL, %eax
362         mov     %ax, %ds                /* use KERNEL data segment */
363         movl    $KPSEL, %eax
364         mov     %ax, %fs
365
366         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
367
368         movl    PCPU(cpuid), %eax
369         imull   $PCB_SIZE, %eax
370         leal    CNAME(stoppcbs)(%eax), %eax
371         pushl   %eax
372         call    CNAME(savectx)          /* Save process context */
373         addl    $4, %esp
374         
375                 
376         movl    PCPU(cpuid), %eax
377
378         lock
379         btsl    %eax, stopped_cpus      /* stopped_cpus |= (1<<id) */
380 1:
381         btl     %eax, started_cpus      /* while (!(started_cpus & (1<<id))) */
382         jnc     1b
383
384         lock
385         btrl    %eax, started_cpus      /* started_cpus &= ~(1<<id) */
386         lock
387         btrl    %eax, stopped_cpus      /* stopped_cpus &= ~(1<<id) */
388
389         test    %eax, %eax
390         jnz     2f
391
392         movl    CNAME(cpustop_restartfunc), %eax
393         test    %eax, %eax
394         jz      2f
395         movl    $0, CNAME(cpustop_restartfunc)  /* One-shot */
396
397         call    *%eax
398 2:
399         popl    %fs
400         popl    %ds                     /* restore previous data segment */
401         popl    %edx
402         popl    %ecx
403         popl    %eax
404         movl    %ebp, %esp
405         popl    %ebp
406         iret
407
408         /*
409          * For now just have one ipiq IPI, but what we really want is
410          * to have one for each source cpu to the APICs don't get stalled
411          * backlogging the requests.
412          */
413         .text
414         SUPERALIGN_TEXT
415         .globl Xipiq
416 Xipiq:
417         PUSH_FRAME
418         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
419         FAKE_MCOUNT(13*4(%esp))
420
421         movl    PCPU(curthread),%ebx
422         cmpl    $TDPRI_CRIT,TD_PRI(%ebx)
423         jge     1f
424         subl    $8,%esp                 /* make same as interrupt frame */
425         incl    PCPU(intr_nesting_level)
426         addl    $TDPRI_CRIT,TD_PRI(%ebx)
427         call    lwkt_process_ipiq_frame
428         subl    $TDPRI_CRIT,TD_PRI(%ebx)
429         decl    PCPU(intr_nesting_level)
430         addl    $8,%esp
431         pushl   TD_CPL(%ebx)
432         MEXITCOUNT
433         jmp     doreti
434 1:
435         orl     $RQF_IPIQ,PCPU(reqflags)
436         MEXITCOUNT
437         POP_FRAME
438         iret
439
440 MCOUNT_LABEL(bintr)
441         FAST_INTR(0,fastintr0)
442         FAST_INTR(1,fastintr1)
443         FAST_INTR(2,fastintr2)
444         FAST_INTR(3,fastintr3)
445         FAST_INTR(4,fastintr4)
446         FAST_INTR(5,fastintr5)
447         FAST_INTR(6,fastintr6)
448         FAST_INTR(7,fastintr7)
449         FAST_INTR(8,fastintr8)
450         FAST_INTR(9,fastintr9)
451         FAST_INTR(10,fastintr10)
452         FAST_INTR(11,fastintr11)
453         FAST_INTR(12,fastintr12)
454         FAST_INTR(13,fastintr13)
455         FAST_INTR(14,fastintr14)
456         FAST_INTR(15,fastintr15)
457         FAST_INTR(16,fastintr16)
458         FAST_INTR(17,fastintr17)
459         FAST_INTR(18,fastintr18)
460         FAST_INTR(19,fastintr19)
461         FAST_INTR(20,fastintr20)
462         FAST_INTR(21,fastintr21)
463         FAST_INTR(22,fastintr22)
464         FAST_INTR(23,fastintr23)
465         
466         /* YYY what is this garbage? */
467
468         INTR(0,intr0,)
469         INTR(1,intr1,)
470         INTR(2,intr2,)
471         INTR(3,intr3,)
472         INTR(4,intr4,)
473         INTR(5,intr5,)
474         INTR(6,intr6,)
475         INTR(7,intr7,)
476         INTR(8,intr8,)
477         INTR(9,intr9,)
478         INTR(10,intr10,)
479         INTR(11,intr11,)
480         INTR(12,intr12,)
481         INTR(13,intr13,)
482         INTR(14,intr14,)
483         INTR(15,intr15,)
484         INTR(16,intr16,)
485         INTR(17,intr17,)
486         INTR(18,intr18,)
487         INTR(19,intr19,)
488         INTR(20,intr20,)
489         INTR(21,intr21,)
490         INTR(22,intr22,)
491         INTR(23,intr23,)
492
493         FAST_UNPEND(0,fastunpend0)
494         FAST_UNPEND(1,fastunpend1)
495         FAST_UNPEND(2,fastunpend2)
496         FAST_UNPEND(3,fastunpend3)
497         FAST_UNPEND(4,fastunpend4)
498         FAST_UNPEND(5,fastunpend5)
499         FAST_UNPEND(6,fastunpend6)
500         FAST_UNPEND(7,fastunpend7)
501         FAST_UNPEND(8,fastunpend8)
502         FAST_UNPEND(9,fastunpend9)
503         FAST_UNPEND(10,fastunpend10)
504         FAST_UNPEND(11,fastunpend11)
505         FAST_UNPEND(12,fastunpend12)
506         FAST_UNPEND(13,fastunpend13)
507         FAST_UNPEND(14,fastunpend14)
508         FAST_UNPEND(15,fastunpend15)
509         FAST_UNPEND(16,fastunpend16)
510         FAST_UNPEND(17,fastunpend17)
511         FAST_UNPEND(18,fastunpend18)
512         FAST_UNPEND(19,fastunpend19)
513         FAST_UNPEND(20,fastunpend20)
514         FAST_UNPEND(21,fastunpend21)
515         FAST_UNPEND(22,fastunpend22)
516         FAST_UNPEND(23,fastunpend23)
517 MCOUNT_LABEL(eintr)
518
519         /*
520          * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
521          *
522          * - Calls the generic rendezvous action function.
523          */
524         .text
525         SUPERALIGN_TEXT
526         .globl  Xrendezvous
527 Xrendezvous:
528         PUSH_FRAME
529         movl    $KDSEL, %eax
530         mov     %ax, %ds                /* use KERNEL data segment */
531         mov     %ax, %es
532         movl    $KPSEL, %eax
533         mov     %ax, %fs
534
535         call    smp_rendezvous_action
536
537         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
538         POP_FRAME
539         iret
540         
541         
542         .data
543
544 #if 0
545 /*
546  * Addresses of interrupt handlers.
547  *  XresumeNN: Resumption addresses for HWIs.
548  */
549         .globl _ihandlers
550 _ihandlers:
551 /*
552  * used by:
553  *  ipl.s:      doreti_unpend
554  */
555         .long   Xresume0,  Xresume1,  Xresume2,  Xresume3 
556         .long   Xresume4,  Xresume5,  Xresume6,  Xresume7
557         .long   Xresume8,  Xresume9,  Xresume10, Xresume11
558         .long   Xresume12, Xresume13, Xresume14, Xresume15 
559         .long   Xresume16, Xresume17, Xresume18, Xresume19
560         .long   Xresume20, Xresume21, Xresume22, Xresume23
561 /*
562  * used by:
563  *  ipl.s:      doreti_unpend
564  *  apic_ipl.s: splz_unpend
565  */
566         .long   _swi_null, swi_net, _swi_null, _swi_null
567         .long   _swi_vm, _swi_null, _softclock
568
569 imasks:                         /* masks for interrupt handlers */
570         .space  NHWI*4          /* padding; HWI masks are elsewhere */
571
572         .long   SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
573         .long   SWI_VM_MASK, SWI_TQ_MASK, SWI_CLOCK_MASK
574 #endif  /* 0 */
575
576
577 #ifdef COUNT_XINVLTLB_HITS
578         .globl  xhits
579 xhits:
580         .space  (NCPU * 4), 0
581 #endif /* COUNT_XINVLTLB_HITS */
582
583 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
584         .globl stopped_cpus, started_cpus
585 stopped_cpus:
586         .long   0
587 started_cpus:
588         .long   0
589
590         .globl CNAME(cpustop_restartfunc)
591 CNAME(cpustop_restartfunc):
592         .long 0
593                 
594         .globl  apic_pin_trigger
595 apic_pin_trigger:
596         .long   0
597
598         .text