Merge from vendor branch GCC:
[dragonfly.git] / sys / i386 / apic / apic_vector.s
1 /*
2  *      from: vector.s, 386BSD 0.1 unknown origin
3  * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
4  * $DragonFly: src/sys/i386/apic/Attic/apic_vector.s,v 1.22 2005/09/10 06:48:08 dillon Exp $
5  */
6
7
8 #include <machine/apicreg.h>
9 #include <machine/smp.h>
10 #include "i386/isa/intr_machdep.h"
11
12 /* convert an absolute IRQ# into a bitmask */
13 #define IRQ_LBIT(irq_num)       (1 << (irq_num))
14
15 /* make an index into the IO APIC from the IRQ# */
16 #define REDTBL_IDX(irq_num)     (0x10 + ((irq_num) * 2))
17
18 /*
19  * Push an interrupt frame in a format acceptable to doreti, reload
20  * the segment registers for the kernel.
21  */
22 #define PUSH_FRAME                                                      \
23         pushl   $0 ;            /* dummy error code */                  \
24         pushl   $0 ;            /* dummy trap type */                   \
25         pushal ;                                                        \
26         pushl   %ds ;           /* save data and extra segments ... */  \
27         pushl   %es ;                                                   \
28         pushl   %fs ;                                                   \
29         mov     $KDSEL,%ax ;                                            \
30         mov     %ax,%ds ;                                               \
31         mov     %ax,%es ;                                               \
32         mov     $KPSEL,%ax ;                                            \
33         mov     %ax,%fs ;                                               \
34
35 #define PUSH_DUMMY                                                      \
36         pushfl ;                /* phys int frame / flags */            \
37         pushl %cs ;             /* phys int frame / cs */               \
38         pushl   12(%esp) ;      /* original caller eip */               \
39         pushl   $0 ;            /* dummy error code */                  \
40         pushl   $0 ;            /* dummy trap type */                   \
41         subl    $12*4,%esp ;    /* pushal + 3 seg regs (dummy) + CPL */ \
42
43 /*
44  * Warning: POP_FRAME can only be used if there is no chance of a
45  * segment register being changed (e.g. by procfs), which is why syscalls
46  * have to use doreti.
47  */
48 #define POP_FRAME                                                       \
49         popl    %fs ;                                                   \
50         popl    %es ;                                                   \
51         popl    %ds ;                                                   \
52         popal ;                                                         \
53         addl    $2*4,%esp ;     /* dummy trap & error codes */          \
54
55 #define POP_DUMMY                                                       \
56         addl    $17*4,%esp ;                                            \
57
58 #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
59 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
60
61 /*
62  * Interrupts are expected to already be disabled when using these
63  * IMASK_*() macros.
64  */
65 #define IMASK_LOCK                                                      \
66         SPIN_LOCK(imen_spinlock) ;                                      \
67
68 #define IMASK_UNLOCK                                                    \
69         SPIN_UNLOCK(imen_spinlock) ;                                    \
70         
71 #define MASK_IRQ(irq_num)                                               \
72         IMASK_LOCK ;                            /* into critical reg */ \
73         testl   $IRQ_LBIT(irq_num), apic_imen ;                         \
74         jne     7f ;                    /* masked, don't mask */        \
75         orl     $IRQ_LBIT(irq_num), apic_imen ; /* set the mask bit */  \
76         movl    IOAPICADDR(irq_num), %ecx ;     /* ioapic addr */       \
77         movl    REDIRIDX(irq_num), %eax ;       /* get the index */     \
78         movl    %eax, (%ecx) ;                  /* write the index */   \
79         movl    IOAPIC_WINDOW(%ecx), %eax ;     /* current value */     \
80         orl     $IOART_INTMASK, %eax ;          /* set the mask */      \
81         movl    %eax, IOAPIC_WINDOW(%ecx) ;     /* new value */         \
82 7: ;                                            /* already masked */    \
83         IMASK_UNLOCK ;                                                  \
84
85 /*
86  * Test to see whether we are handling an edge or level triggered INT.
87  *  Level-triggered INTs must still be masked as we don't clear the source,
88  *  and the EOI cycle would cause redundant INTs to occur.
89  */
90 #define MASK_LEVEL_IRQ(irq_num)                                         \
91         testl   $IRQ_LBIT(irq_num), apic_pin_trigger ;                  \
92         jz      9f ;                            /* edge, don't mask */  \
93         MASK_IRQ(irq_num) ;                                             \
94 9: ;                                                                    \
95
96
97 #ifdef APIC_INTR_REORDER
98 #define EOI_IRQ(irq_num)                                                \
99         movl    apic_isrbit_location + 8 * (irq_num), %eax ;            \
100         movl    (%eax), %eax ;                                          \
101         testl   apic_isrbit_location + 4 + 8 * (irq_num), %eax ;        \
102         jz      9f ;                            /* not active */        \
103         movl    $0, lapic_eoi ;                                         \
104 9:                                                                      \
105
106 #else
107
108 #define EOI_IRQ(irq_num)                                                \
109         testl   $IRQ_LBIT(irq_num), lapic_isr1;                         \
110         jz      9f      ;                       /* not active */        \
111         movl    $0, lapic_eoi;                                          \
112 9:                                                                      \
113
114 #endif
115         
116 /*
117  * Test to see if the source is currntly masked, clear if so.
118  */
119 #define UNMASK_IRQ(irq_num)                                     \
120         IMASK_LOCK ;                            /* into critical reg */ \
121         testl   $IRQ_LBIT(irq_num), apic_imen ;                         \
122         je      7f ;                    /* bit clear, not masked */     \
123         andl    $~IRQ_LBIT(irq_num), apic_imen ;/* clear mask bit */    \
124         movl    IOAPICADDR(irq_num),%ecx ;      /* ioapic addr */       \
125         movl    REDIRIDX(irq_num), %eax ;       /* get the index */     \
126         movl    %eax,(%ecx) ;                   /* write the index */   \
127         movl    IOAPIC_WINDOW(%ecx),%eax ;      /* current value */     \
128         andl    $~IOART_INTMASK,%eax ;          /* clear the mask */    \
129         movl    %eax,IOAPIC_WINDOW(%ecx) ;      /* new value */         \
130 7: ;                                                                    \
131         IMASK_UNLOCK ;                                                  \
132
133 /*
134  * Fast interrupt call handlers run in the following sequence:
135  *
136  *      - Push the trap frame required by doreti
137  *      - Mask the interrupt and reenable its source
138  *      - If we cannot take the interrupt set its fpending bit and
139  *        doreti.  Note that we cannot mess with mp_lock at all
140  *        if we entered from a critical section!
141  *      - If we can take the interrupt clear its fpending bit,
142  *        call the handler, then unmask and doreti.
143  *
144  * YYY can cache gd base opitner instead of using hidden %fs prefixes.
145  */
146
147 #define FAST_INTR(irq_num, vec_name)                                    \
148         .text ;                                                         \
149         SUPERALIGN_TEXT ;                                               \
150 IDTVEC(vec_name) ;                                                      \
151         PUSH_FRAME ;                                                    \
152         FAKE_MCOUNT(13*4(%esp)) ;                                       \
153         MASK_LEVEL_IRQ(irq_num) ;                                       \
154         EOI_IRQ(irq_num) ;                                              \
155         movl    PCPU(curthread),%ebx ;                                  \
156         movl    $0,%eax ;       /* CURRENT CPL IN FRAME (REMOVED) */    \
157         pushl   %eax ;                                                  \
158         cmpl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
159         jl      2f ;                                                    \
160 1: ;                                                                    \
161         /* in critical section, make interrupt pending */               \
162         /* set the pending bit and return, leave interrupt masked */    \
163         orl     $IRQ_LBIT(irq_num),PCPU(fpending) ;                     \
164         orl     $RQF_INTPEND,PCPU(reqflags) ;                           \
165         jmp     5f ;                                                    \
166 2: ;                                                                    \
167         /* try to get the MP lock */                                    \
168         call    try_mplock ;                                            \
169         testl   %eax,%eax ;                                             \
170         jz      6f ;                                                    \
171         /* clear pending bit, run handler */                            \
172         incl    PCPU(intr_nesting_level) ;                              \
173         addl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
174         andl    $~IRQ_LBIT(irq_num),PCPU(fpending) ;                    \
175         pushl   intr_unit + (irq_num) * 4 ;                             \
176         call    *intr_handler + (irq_num) * 4 ; /* do the work ASAP */  \
177         addl    $4, %esp ;                                              \
178         subl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
179         incl    PCPU(cnt)+V_INTR ;      /* book-keeping make per cpu YYY */ \
180         movl    intr_countp + (irq_num) * 4, %eax ;                     \
181         incl    (%eax) ;                                                \
182         decl    PCPU(intr_nesting_level) ;                              \
183         call    rel_mplock ;                                            \
184         UNMASK_IRQ(irq_num) ;                                           \
185 5: ;                                                                    \
186         MEXITCOUNT ;                                                    \
187         jmp     doreti ;                                                \
188 6: ;                                                                    \
189         /* could not get the MP lock, forward the interrupt */          \
190         movl    mp_lock, %eax ;          /* check race */               \
191         cmpl    $MP_FREE_LOCK,%eax ;                                    \
192         je      2b ;                                                    \
193         incl    PCPU(cnt)+V_FORWARDED_INTS ;                            \
194         subl    $12,%esp ;                                              \
195         movl    $irq_num,8(%esp) ;                                      \
196         movl    $forward_fastint_remote,4(%esp) ;                       \
197         movl    %eax,(%esp) ;                                           \
198         call    lwkt_send_ipiq_bycpu ;                                  \
199         addl    $12,%esp ;                                              \
200         jmp     5f ;                                                    \
201
202 /*
203  * Restart fast interrupt held up by critical section or cpl.
204  *
205  *      - Push a dummy trape frame as required by doreti
206  *      - The interrupt source is already masked
207  *      - Clear the fpending bit
208  *      - Run the handler
209  *      - Unmask the interrupt
210  *      - Pop the dummy frame and do a normal return
211  *
212  *      The BGL is held on call and left held on return.
213  *
214  *      YYY can cache gd base pointer instead of using hidden %fs
215  *      prefixes.
216  */
217
218 #define FAST_UNPEND(irq_num, vec_name)                                  \
219         .text ;                                                         \
220         SUPERALIGN_TEXT ;                                               \
221 IDTVEC(vec_name) ;                                                      \
222         pushl   %ebp ;                                                  \
223         movl    %esp,%ebp ;                                             \
224         PUSH_DUMMY ;                                                    \
225         pushl   intr_unit + (irq_num) * 4 ;                             \
226         call    *intr_handler + (irq_num) * 4 ; /* do the work ASAP */  \
227         addl    $4, %esp ;                                              \
228         incl    PCPU(cnt)+V_INTR ;      /* book-keeping make per cpu YYY */ \
229         movl    intr_countp + (irq_num) * 4, %eax ;                     \
230         incl    (%eax) ;                                                \
231         UNMASK_IRQ(irq_num) ;                                           \
232         POP_DUMMY ;                                                     \
233         popl %ebp ;                                                     \
234         ret ;                                                           \
235
236 /*
237  * Slow interrupt call handlers run in the following sequence:
238  *
239  *      - Push the trap frame required by doreti.
240  *      - Mask the interrupt and reenable its source.
241  *      - If we cannot take the interrupt set its ipending bit and
242  *        doreti.  In addition to checking for a critical section
243  *        and cpl mask we also check to see if the thread is still
244  *        running.  Note that we cannot mess with mp_lock at all
245  *        if we entered from a critical section!
246  *      - If we can take the interrupt clear its ipending bit
247  *        and schedule the thread.  Leave interrupts masked and doreti.
248  *
249  *      Note that calls to sched_ithd() are made with interrupts enabled
250  *      and outside a critical section.  YYY sched_ithd may preempt us
251  *      synchronously (fix interrupt stacking).
252  *
253  *      YYY can cache gd base pointer instead of using hidden %fs
254  *      prefixes.
255  */
256
257 #define INTR(irq_num, vec_name, maybe_extra_ipending)                   \
258         .text ;                                                         \
259         SUPERALIGN_TEXT ;                                               \
260 IDTVEC(vec_name) ;                                                      \
261         PUSH_FRAME ;                                                    \
262         maybe_extra_ipending ;                                          \
263 ;                                                                       \
264         MASK_LEVEL_IRQ(irq_num) ;                                       \
265         EOI_IRQ(irq_num) ;                                              \
266         movl    PCPU(curthread),%ebx ;                                  \
267         movl    $0,%eax ;       /* CURRENT CPL IN FRAME (REMOVED) */    \
268         pushl   %eax ;          /* cpl do restore */                    \
269         cmpl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
270         jl      2f ;                                                    \
271 1: ;                                                                    \
272         /* set the pending bit and return, leave the interrupt masked */ \
273         orl     $IRQ_LBIT(irq_num), PCPU(ipending) ;                    \
274         orl     $RQF_INTPEND,PCPU(reqflags) ;                           \
275         jmp     5f ;                                                    \
276 2: ;                                                                    \
277         /* set running bit, clear pending bit, run handler */           \
278         andl    $~IRQ_LBIT(irq_num), PCPU(ipending) ;                   \
279         sti ;                                                           \
280         pushl   $irq_num ;                                              \
281         call    sched_ithd ;                                            \
282         addl    $4,%esp ;                                               \
283         incl    PCPU(cnt)+V_INTR ; /* book-keeping YYY make per-cpu */  \
284         movl    intr_countp + (irq_num) * 4,%eax ;                      \
285         incl    (%eax) ;                                                \
286 5: ;                                                                    \
287         MEXITCOUNT ;                                                    \
288         jmp     doreti ;                                                \
289
290 /*
291  * Wrong interrupt call handlers.  We program these into APIC vectors
292  * that should otherwise never occur.  For example, we program the SLOW
293  * vector for irq N with this when we program the FAST vector with the
294  * real interrupt.
295  *
296  * XXX for now all we can do is EOI it.  We can't call do_wrongintr
297  * (yet) because we could be in a critical section.
298  */
299 #define WRONGINTR(irq_num,vec_name)                                     \
300         .text ;                                                         \
301         SUPERALIGN_TEXT  ;                                              \
302 IDTVEC(vec_name) ;                                                      \
303         PUSH_FRAME ;                                                    \
304         movl    $0, lapic_eoi ; /* End Of Interrupt to APIC */          \
305         /*pushl $irq_num ;*/                                            \
306         /*call  do_wrongintr ;*/                                        \
307         /*addl  $4,%esp ;*/                                             \
308         POP_FRAME ;                                                     \
309         iret  ;                                                         \
310
311 /*
312  * Handle "spurious INTerrupts".
313  * Notes:
314  *  This is different than the "spurious INTerrupt" generated by an
315  *   8259 PIC for missing INTs.  See the APIC documentation for details.
316  *  This routine should NOT do an 'EOI' cycle.
317  */
318         .text
319         SUPERALIGN_TEXT
320         .globl Xspuriousint
321 Xspuriousint:
322
323         /* No EOI cycle used here */
324
325         iret
326
327
328 /*
329  * Handle TLB shootdowns.
330  */
331         .text
332         SUPERALIGN_TEXT
333         .globl  Xinvltlb
334 Xinvltlb:
335         pushl   %eax
336
337 #ifdef COUNT_XINVLTLB_HITS
338         pushl   %fs
339         movl    $KPSEL, %eax
340         mov     %ax, %fs
341         movl    PCPU(cpuid), %eax
342         popl    %fs
343         ss
344         incl    _xhits(,%eax,4)
345 #endif /* COUNT_XINVLTLB_HITS */
346
347         movl    %cr3, %eax              /* invalidate the TLB */
348         movl    %eax, %cr3
349
350         ss                              /* stack segment, avoid %ds load */
351         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
352
353         popl    %eax
354         iret
355
356
357 /*
358  * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
359  *
360  *  - Signals its receipt.
361  *  - Waits for permission to restart.
362  *  - Processing pending IPIQ events while waiting.
363  *  - Signals its restart.
364  */
365
366         .text
367         SUPERALIGN_TEXT
368         .globl Xcpustop
369 Xcpustop:
370         pushl   %ebp
371         movl    %esp, %ebp
372         pushl   %eax
373         pushl   %ecx
374         pushl   %edx
375         pushl   %ds                     /* save current data segment */
376         pushl   %fs
377
378         movl    $KDSEL, %eax
379         mov     %ax, %ds                /* use KERNEL data segment */
380         movl    $KPSEL, %eax
381         mov     %ax, %fs
382
383         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
384
385         movl    PCPU(cpuid), %eax
386         imull   $PCB_SIZE, %eax
387         leal    CNAME(stoppcbs)(%eax), %eax
388         pushl   %eax
389         call    CNAME(savectx)          /* Save process context */
390         addl    $4, %esp
391         
392                 
393         movl    PCPU(cpuid), %eax
394
395         /*
396          * Indicate that we have stopped and loop waiting for permission
397          * to start again.  We must still process IPI events while in a
398          * stopped state.
399          */
400         lock
401         btsl    %eax, stopped_cpus      /* stopped_cpus |= (1<<id) */
402 1:
403         andl    $~RQF_IPIQ,PCPU(reqflags)
404         pushl   %eax
405         call    lwkt_smp_stopped
406         popl    %eax
407         btl     %eax, started_cpus      /* while (!(started_cpus & (1<<id))) */
408         jnc     1b
409
410         lock
411         btrl    %eax, started_cpus      /* started_cpus &= ~(1<<id) */
412         lock
413         btrl    %eax, stopped_cpus      /* stopped_cpus &= ~(1<<id) */
414
415         test    %eax, %eax
416         jnz     2f
417
418         movl    CNAME(cpustop_restartfunc), %eax
419         test    %eax, %eax
420         jz      2f
421         movl    $0, CNAME(cpustop_restartfunc)  /* One-shot */
422
423         call    *%eax
424 2:
425         popl    %fs
426         popl    %ds                     /* restore previous data segment */
427         popl    %edx
428         popl    %ecx
429         popl    %eax
430         movl    %ebp, %esp
431         popl    %ebp
432         iret
433
434         /*
435          * For now just have one ipiq IPI, but what we really want is
436          * to have one for each source cpu to the APICs don't get stalled
437          * backlogging the requests.
438          */
439         .text
440         SUPERALIGN_TEXT
441         .globl Xipiq
442 Xipiq:
443         PUSH_FRAME
444         movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
445         FAKE_MCOUNT(13*4(%esp))
446
447         movl    PCPU(curthread),%ebx
448         cmpl    $TDPRI_CRIT,TD_PRI(%ebx)
449         jge     1f
450         subl    $8,%esp                 /* make same as interrupt frame */
451         incl    PCPU(intr_nesting_level)
452         addl    $TDPRI_CRIT,TD_PRI(%ebx)
453         call    lwkt_process_ipiq_frame
454         subl    $TDPRI_CRIT,TD_PRI(%ebx)
455         decl    PCPU(intr_nesting_level)
456         addl    $8,%esp
457         pushl   $0                      /* CPL for frame (REMOVED) */
458         MEXITCOUNT
459         jmp     doreti
460 1:
461         orl     $RQF_IPIQ,PCPU(reqflags)
462         MEXITCOUNT
463         POP_FRAME
464         iret
465
466 MCOUNT_LABEL(bintr)
467         FAST_INTR(0,fastintr0)
468         FAST_INTR(1,fastintr1)
469         FAST_INTR(2,fastintr2)
470         FAST_INTR(3,fastintr3)
471         FAST_INTR(4,fastintr4)
472         FAST_INTR(5,fastintr5)
473         FAST_INTR(6,fastintr6)
474         FAST_INTR(7,fastintr7)
475         FAST_INTR(8,fastintr8)
476         FAST_INTR(9,fastintr9)
477         FAST_INTR(10,fastintr10)
478         FAST_INTR(11,fastintr11)
479         FAST_INTR(12,fastintr12)
480         FAST_INTR(13,fastintr13)
481         FAST_INTR(14,fastintr14)
482         FAST_INTR(15,fastintr15)
483         FAST_INTR(16,fastintr16)
484         FAST_INTR(17,fastintr17)
485         FAST_INTR(18,fastintr18)
486         FAST_INTR(19,fastintr19)
487         FAST_INTR(20,fastintr20)
488         FAST_INTR(21,fastintr21)
489         FAST_INTR(22,fastintr22)
490         FAST_INTR(23,fastintr23)
491         
492         /* YYY what is this garbage? */
493
494         INTR(0,intr0,)
495         INTR(1,intr1,)
496         INTR(2,intr2,)
497         INTR(3,intr3,)
498         INTR(4,intr4,)
499         INTR(5,intr5,)
500         INTR(6,intr6,)
501         INTR(7,intr7,)
502         INTR(8,intr8,)
503         INTR(9,intr9,)
504         INTR(10,intr10,)
505         INTR(11,intr11,)
506         INTR(12,intr12,)
507         INTR(13,intr13,)
508         INTR(14,intr14,)
509         INTR(15,intr15,)
510         INTR(16,intr16,)
511         INTR(17,intr17,)
512         INTR(18,intr18,)
513         INTR(19,intr19,)
514         INTR(20,intr20,)
515         INTR(21,intr21,)
516         INTR(22,intr22,)
517         INTR(23,intr23,)
518
519         FAST_UNPEND(0,fastunpend0)
520         FAST_UNPEND(1,fastunpend1)
521         FAST_UNPEND(2,fastunpend2)
522         FAST_UNPEND(3,fastunpend3)
523         FAST_UNPEND(4,fastunpend4)
524         FAST_UNPEND(5,fastunpend5)
525         FAST_UNPEND(6,fastunpend6)
526         FAST_UNPEND(7,fastunpend7)
527         FAST_UNPEND(8,fastunpend8)
528         FAST_UNPEND(9,fastunpend9)
529         FAST_UNPEND(10,fastunpend10)
530         FAST_UNPEND(11,fastunpend11)
531         FAST_UNPEND(12,fastunpend12)
532         FAST_UNPEND(13,fastunpend13)
533         FAST_UNPEND(14,fastunpend14)
534         FAST_UNPEND(15,fastunpend15)
535         FAST_UNPEND(16,fastunpend16)
536         FAST_UNPEND(17,fastunpend17)
537         FAST_UNPEND(18,fastunpend18)
538         FAST_UNPEND(19,fastunpend19)
539         FAST_UNPEND(20,fastunpend20)
540         FAST_UNPEND(21,fastunpend21)
541         FAST_UNPEND(22,fastunpend22)
542         FAST_UNPEND(23,fastunpend23)
543
544         WRONGINTR(0,wrongintr0)
545         WRONGINTR(1,wrongintr1)
546         WRONGINTR(2,wrongintr2)
547         WRONGINTR(3,wrongintr3)
548         WRONGINTR(4,wrongintr4)
549         WRONGINTR(5,wrongintr5)
550         WRONGINTR(6,wrongintr6)
551         WRONGINTR(7,wrongintr7)
552         WRONGINTR(8,wrongintr8)
553         WRONGINTR(9,wrongintr9)
554         WRONGINTR(10,wrongintr10)
555         WRONGINTR(11,wrongintr11)
556         WRONGINTR(12,wrongintr12)
557         WRONGINTR(13,wrongintr13)
558         WRONGINTR(14,wrongintr14)
559         WRONGINTR(15,wrongintr15)
560         WRONGINTR(16,wrongintr16)
561         WRONGINTR(17,wrongintr17)
562         WRONGINTR(18,wrongintr18)
563         WRONGINTR(19,wrongintr19)
564         WRONGINTR(20,wrongintr20)
565         WRONGINTR(21,wrongintr21)
566         WRONGINTR(22,wrongintr22)
567         WRONGINTR(23,wrongintr23)
568 MCOUNT_LABEL(eintr)
569
570         .data
571
572 #ifdef COUNT_XINVLTLB_HITS
573         .globl  xhits
574 xhits:
575         .space  (NCPU * 4), 0
576 #endif /* COUNT_XINVLTLB_HITS */
577
578 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
579         .globl stopped_cpus, started_cpus
580 stopped_cpus:
581         .long   0
582 started_cpus:
583         .long   0
584
585         .globl CNAME(cpustop_restartfunc)
586 CNAME(cpustop_restartfunc):
587         .long 0
588                 
589         .globl  apic_pin_trigger
590 apic_pin_trigger:
591         .long   0
592
593         .text