Remove all remaining SPL code. Replace the mtd_cpl field in the machine
[dragonfly.git] / sys / platform / pc32 / apic / apic_vector.s
CommitLineData
984263bc
MD
1/*
2 * from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
38787eef 4 * $DragonFly: src/sys/platform/pc32/apic/apic_vector.s,v 1.19 2005/06/16 21:12:47 dillon Exp $
984263bc
MD
5 */
6
7
e126caf1 8#include <machine/apicreg.h>
984263bc 9#include <machine/smp.h>
984263bc
MD
10#include "i386/isa/intr_machdep.h"
11
12/* convert an absolute IRQ# into a bitmask */
8a8d5d85 13#define IRQ_LBIT(irq_num) (1 << (irq_num))
984263bc
MD
14
15/* make an index into the IO APIC from the IRQ# */
16#define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2))
17
984263bc 18/*
8a8d5d85
MD
19 * Push an interrupt frame in a format acceptable to doreti, reload
20 * the segment registers for the kernel.
984263bc
MD
21 */
22#define PUSH_FRAME \
23 pushl $0 ; /* dummy error code */ \
24 pushl $0 ; /* dummy trap type */ \
25 pushal ; \
26 pushl %ds ; /* save data and extra segments ... */ \
27 pushl %es ; \
8a8d5d85
MD
28 pushl %fs ; \
29 mov $KDSEL,%ax ; \
30 mov %ax,%ds ; \
31 mov %ax,%es ; \
32 mov $KPSEL,%ax ; \
33 mov %ax,%fs ; \
984263bc 34
8a8d5d85
MD
35#define PUSH_DUMMY \
36 pushfl ; /* phys int frame / flags */ \
37 pushl %cs ; /* phys int frame / cs */ \
38 pushl 12(%esp) ; /* original caller eip */ \
39 pushl $0 ; /* dummy error code */ \
40 pushl $0 ; /* dummy trap type */ \
96728c05 41 subl $12*4,%esp ; /* pushal + 3 seg regs (dummy) + CPL */ \
8a8d5d85
MD
42
43/*
44 * Warning: POP_FRAME can only be used if there is no chance of a
45 * segment register being changed (e.g. by procfs), which is why syscalls
46 * have to use doreti.
47 */
984263bc
MD
48#define POP_FRAME \
49 popl %fs ; \
50 popl %es ; \
51 popl %ds ; \
52 popal ; \
8a8d5d85
MD
53 addl $2*4,%esp ; /* dummy trap & error codes */ \
54
55#define POP_DUMMY \
96728c05 56 addl $17*4,%esp ; \
984263bc
MD
57
58#define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
59#define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
8a8d5d85
MD
60
61/*
62 * Interrupts are expected to already be disabled when using these
63 * IMASK_*() macros.
64 */
65#define IMASK_LOCK \
66 SPIN_LOCK(imen_spinlock) ; \
67
68#define IMASK_UNLOCK \
69 SPIN_UNLOCK(imen_spinlock) ; \
984263bc
MD
70
71#define MASK_IRQ(irq_num) \
72 IMASK_LOCK ; /* into critical reg */ \
8a8d5d85 73 testl $IRQ_LBIT(irq_num), apic_imen ; \
984263bc 74 jne 7f ; /* masked, don't mask */ \
8a8d5d85 75 orl $IRQ_LBIT(irq_num), apic_imen ; /* set the mask bit */ \
984263bc
MD
76 movl IOAPICADDR(irq_num), %ecx ; /* ioapic addr */ \
77 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
78 movl %eax, (%ecx) ; /* write the index */ \
79 movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \
80 orl $IOART_INTMASK, %eax ; /* set the mask */ \
81 movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \
827: ; /* already masked */ \
8a8d5d85
MD
83 IMASK_UNLOCK ; \
84
984263bc
MD
85/*
86 * Test to see whether we are handling an edge or level triggered INT.
87 * Level-triggered INTs must still be masked as we don't clear the source,
88 * and the EOI cycle would cause redundant INTs to occur.
89 */
90#define MASK_LEVEL_IRQ(irq_num) \
8a8d5d85 91 testl $IRQ_LBIT(irq_num), apic_pin_trigger ; \
984263bc
MD
92 jz 9f ; /* edge, don't mask */ \
93 MASK_IRQ(irq_num) ; \
8a8d5d85 949: ; \
984263bc
MD
95
96
97#ifdef APIC_INTR_REORDER
98#define EOI_IRQ(irq_num) \
2954c92f 99 movl apic_isrbit_location + 8 * (irq_num), %eax ; \
984263bc 100 movl (%eax), %eax ; \
2954c92f 101 testl apic_isrbit_location + 4 + 8 * (irq_num), %eax ; \
984263bc
MD
102 jz 9f ; /* not active */ \
103 movl $0, lapic_eoi ; \
8a8d5d85 1049: \
984263bc
MD
105
106#else
8a8d5d85 107
984263bc 108#define EOI_IRQ(irq_num) \
8a8d5d85 109 testl $IRQ_LBIT(irq_num), lapic_isr1; \
984263bc
MD
110 jz 9f ; /* not active */ \
111 movl $0, lapic_eoi; \
8a8d5d85
MD
1129: \
113
984263bc
MD
114#endif
115
984263bc
MD
116/*
117 * Test to see if the source is currntly masked, clear if so.
118 */
119#define UNMASK_IRQ(irq_num) \
120 IMASK_LOCK ; /* into critical reg */ \
8a8d5d85 121 testl $IRQ_LBIT(irq_num), apic_imen ; \
984263bc 122 je 7f ; /* bit clear, not masked */ \
8a8d5d85 123 andl $~IRQ_LBIT(irq_num), apic_imen ;/* clear mask bit */ \
984263bc
MD
124 movl IOAPICADDR(irq_num),%ecx ; /* ioapic addr */ \
125 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
126 movl %eax,(%ecx) ; /* write the index */ \
127 movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \
128 andl $~IOART_INTMASK,%eax ; /* clear the mask */ \
129 movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */ \
1307: ; \
8a8d5d85 131 IMASK_UNLOCK ; \
984263bc 132
8a8d5d85
MD
133/*
134 * Fast interrupt call handlers run in the following sequence:
135 *
136 * - Push the trap frame required by doreti
137 * - Mask the interrupt and reenable its source
138 * - If we cannot take the interrupt set its fpending bit and
71ef2f5c
MD
139 * doreti. Note that we cannot mess with mp_lock at all
140 * if we entered from a critical section!
8a8d5d85
MD
141 * - If we can take the interrupt clear its fpending bit,
142 * call the handler, then unmask and doreti.
143 *
144 * YYY can cache gd base opitner instead of using hidden %fs prefixes.
145 */
146
147#define FAST_INTR(irq_num, vec_name) \
148 .text ; \
149 SUPERALIGN_TEXT ; \
150IDTVEC(vec_name) ; \
151 PUSH_FRAME ; \
152 FAKE_MCOUNT(13*4(%esp)) ; \
153 MASK_LEVEL_IRQ(irq_num) ; \
154 EOI_IRQ(irq_num) ; \
8a8d5d85 155 movl PCPU(curthread),%ebx ; \
38787eef 156 movl $0,%eax ; /* CURRENT CPL IN FRAME (REMOVED) */ \
984263bc 157 pushl %eax ; \
8a8d5d85 158 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
38787eef 159 jl 2f ; \
8a8d5d85 1601: ; \
545a1cd3 161 /* in critical section, make interrupt pending */ \
8a8d5d85
MD
162 /* set the pending bit and return, leave interrupt masked */ \
163 orl $IRQ_LBIT(irq_num),PCPU(fpending) ; \
235957ed 164 orl $RQF_INTPEND,PCPU(reqflags) ; \
8a8d5d85
MD
165 jmp 5f ; \
1662: ; \
545a1cd3 167 /* try to get the MP lock */ \
96728c05
MD
168 call try_mplock ; \
169 testl %eax,%eax ; \
545a1cd3 170 jz 6f ; \
8a8d5d85 171 /* clear pending bit, run handler */ \
03aa8d99 172 incl PCPU(intr_nesting_level) ; \
8a8d5d85
MD
173 addl $TDPRI_CRIT,TD_PRI(%ebx) ; \
174 andl $~IRQ_LBIT(irq_num),PCPU(fpending) ; \
175 pushl intr_unit + (irq_num) * 4 ; \
176 call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
984263bc 177 addl $4, %esp ; \
8a8d5d85
MD
178 subl $TDPRI_CRIT,TD_PRI(%ebx) ; \
179 incl PCPU(cnt)+V_INTR ; /* book-keeping make per cpu YYY */ \
180 movl intr_countp + (irq_num) * 4, %eax ; \
181 incl (%eax) ; \
03aa8d99 182 decl PCPU(intr_nesting_level) ; \
96728c05 183 call rel_mplock ; \
8a8d5d85
MD
184 UNMASK_IRQ(irq_num) ; \
1855: ; \
186 MEXITCOUNT ; \
187 jmp doreti ; \
545a1cd3 1886: ; \
2db3b277 189 /* could not get the MP lock, forward the interrupt */ \
545a1cd3
MD
190 movl mp_lock, %eax ; /* check race */ \
191 cmpl $MP_FREE_LOCK,%eax ; \
192 je 2b ; \
193 incl PCPU(cnt)+V_FORWARDED_INTS ; \
194 subl $12,%esp ; \
195 movl $irq_num,8(%esp) ; \
196 movl $forward_fastint_remote,4(%esp) ; \
197 movl %eax,(%esp) ; \
2db3b277 198 call lwkt_send_ipiq_bycpu ; \
545a1cd3
MD
199 addl $12,%esp ; \
200 jmp 5f ; \
984263bc 201
8a8d5d85
MD
202/*
203 * Restart fast interrupt held up by critical section or cpl.
204 *
205 * - Push a dummy trape frame as required by doreti
206 * - The interrupt source is already masked
207 * - Clear the fpending bit
208 * - Run the handler
209 * - Unmask the interrupt
210 * - Pop the dummy frame and do a normal return
211 *
96728c05
MD
212 * The BGL is held on call and left held on return.
213 *
8a8d5d85
MD
214 * YYY can cache gd base pointer instead of using hidden %fs
215 * prefixes.
216 */
217
218#define FAST_UNPEND(irq_num, vec_name) \
219 .text ; \
220 SUPERALIGN_TEXT ; \
221IDTVEC(vec_name) ; \
222 pushl %ebp ; \
223 movl %esp,%ebp ; \
224 PUSH_DUMMY ; \
225 pushl intr_unit + (irq_num) * 4 ; \
226 call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
227 addl $4, %esp ; \
228 incl PCPU(cnt)+V_INTR ; /* book-keeping make per cpu YYY */ \
229 movl intr_countp + (irq_num) * 4, %eax ; \
230 incl (%eax) ; \
231 UNMASK_IRQ(irq_num) ; \
232 POP_DUMMY ; \
233 popl %ebp ; \
234 ret ; \
235
236/*
237 * Slow interrupt call handlers run in the following sequence:
238 *
239 * - Push the trap frame required by doreti.
240 * - Mask the interrupt and reenable its source.
241 * - If we cannot take the interrupt set its ipending bit and
242 * doreti. In addition to checking for a critical section
243 * and cpl mask we also check to see if the thread is still
71ef2f5c
MD
244 * running. Note that we cannot mess with mp_lock at all
245 * if we entered from a critical section!
96728c05
MD
246 * - If we can take the interrupt clear its ipending bit
247 * and schedule the thread. Leave interrupts masked and doreti.
8a8d5d85 248 *
96728c05
MD
249 * Note that calls to sched_ithd() are made with interrupts enabled
250 * and outside a critical section. YYY sched_ithd may preempt us
03aa8d99 251 * synchronously (fix interrupt stacking).
8a8d5d85
MD
252 *
253 * YYY can cache gd base pointer instead of using hidden %fs
254 * prefixes.
255 */
256
257#define INTR(irq_num, vec_name, maybe_extra_ipending) \
984263bc
MD
258 .text ; \
259 SUPERALIGN_TEXT ; \
984263bc
MD
260IDTVEC(vec_name) ; \
261 PUSH_FRAME ; \
984263bc 262 maybe_extra_ipending ; \
984263bc
MD
263; \
264 MASK_LEVEL_IRQ(irq_num) ; \
265 EOI_IRQ(irq_num) ; \
2954c92f 266 movl PCPU(curthread),%ebx ; \
38787eef 267 movl $0,%eax ; /* CURRENT CPL IN FRAME (REMOVED) */ \
8a8d5d85 268 pushl %eax ; /* cpl do restore */ \
8f41e33b 269 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
38787eef 270 jl 2f ; \
8a8d5d85
MD
2711: ; \
272 /* set the pending bit and return, leave the interrupt masked */ \
273 orl $IRQ_LBIT(irq_num), PCPU(ipending) ; \
235957ed 274 orl $RQF_INTPEND,PCPU(reqflags) ; \
8a8d5d85
MD
275 jmp 5f ; \
2762: ; \
8a8d5d85 277 /* set running bit, clear pending bit, run handler */ \
8a8d5d85 278 andl $~IRQ_LBIT(irq_num), PCPU(ipending) ; \
984263bc 279 sti ; \
8a8d5d85
MD
280 pushl $irq_num ; \
281 call sched_ithd ; \
ef0fdad1 282 addl $4,%esp ; \
8a8d5d85
MD
283 incl PCPU(cnt)+V_INTR ; /* book-keeping YYY make per-cpu */ \
284 movl intr_countp + (irq_num) * 4,%eax ; \
285 incl (%eax) ; \
2865: ; \
984263bc 287 MEXITCOUNT ; \
2954c92f 288 jmp doreti ; \
8a8d5d85 289
984263bc
MD
290
291/*
292 * Handle "spurious INTerrupts".
293 * Notes:
294 * This is different than the "spurious INTerrupt" generated by an
295 * 8259 PIC for missing INTs. See the APIC documentation for details.
296 * This routine should NOT do an 'EOI' cycle.
297 */
298 .text
299 SUPERALIGN_TEXT
2954c92f
MD
300 .globl Xspuriousint
301Xspuriousint:
984263bc
MD
302
303 /* No EOI cycle used here */
304
305 iret
306
307
308/*
309 * Handle TLB shootdowns.
310 */
311 .text
312 SUPERALIGN_TEXT
8a8d5d85
MD
313 .globl Xinvltlb
314Xinvltlb:
984263bc
MD
315 pushl %eax
316
317#ifdef COUNT_XINVLTLB_HITS
318 pushl %fs
319 movl $KPSEL, %eax
320 mov %ax, %fs
2954c92f 321 movl PCPU(cpuid), %eax
984263bc
MD
322 popl %fs
323 ss
324 incl _xhits(,%eax,4)
325#endif /* COUNT_XINVLTLB_HITS */
326
327 movl %cr3, %eax /* invalidate the TLB */
328 movl %eax, %cr3
329
330 ss /* stack segment, avoid %ds load */
331 movl $0, lapic_eoi /* End Of Interrupt to APIC */
332
333 popl %eax
334 iret
335
336
984263bc
MD
337/*
338 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
339 *
340 * - Signals its receipt.
341 * - Waits for permission to restart.
342 * - Signals its restart.
343 */
344
345 .text
346 SUPERALIGN_TEXT
2954c92f
MD
347 .globl Xcpustop
348Xcpustop:
984263bc
MD
349 pushl %ebp
350 movl %esp, %ebp
351 pushl %eax
352 pushl %ecx
353 pushl %edx
354 pushl %ds /* save current data segment */
355 pushl %fs
356
357 movl $KDSEL, %eax
358 mov %ax, %ds /* use KERNEL data segment */
359 movl $KPSEL, %eax
360 mov %ax, %fs
361
362 movl $0, lapic_eoi /* End Of Interrupt to APIC */
363
2954c92f 364 movl PCPU(cpuid), %eax
984263bc
MD
365 imull $PCB_SIZE, %eax
366 leal CNAME(stoppcbs)(%eax), %eax
367 pushl %eax
368 call CNAME(savectx) /* Save process context */
369 addl $4, %esp
370
371
2954c92f 372 movl PCPU(cpuid), %eax
984263bc
MD
373
374 lock
2954c92f 375 btsl %eax, stopped_cpus /* stopped_cpus |= (1<<id) */
984263bc 3761:
2954c92f 377 btl %eax, started_cpus /* while (!(started_cpus & (1<<id))) */
984263bc
MD
378 jnc 1b
379
380 lock
2954c92f 381 btrl %eax, started_cpus /* started_cpus &= ~(1<<id) */
984263bc 382 lock
2954c92f 383 btrl %eax, stopped_cpus /* stopped_cpus &= ~(1<<id) */
984263bc
MD
384
385 test %eax, %eax
386 jnz 2f
387
388 movl CNAME(cpustop_restartfunc), %eax
389 test %eax, %eax
390 jz 2f
391 movl $0, CNAME(cpustop_restartfunc) /* One-shot */
392
393 call *%eax
3942:
395 popl %fs
396 popl %ds /* restore previous data segment */
397 popl %edx
398 popl %ecx
399 popl %eax
400 movl %ebp, %esp
401 popl %ebp
402 iret
403
96728c05
MD
404 /*
405 * For now just have one ipiq IPI, but what we really want is
406 * to have one for each source cpu to the APICs don't get stalled
407 * backlogging the requests.
408 */
409 .text
410 SUPERALIGN_TEXT
411 .globl Xipiq
412Xipiq:
413 PUSH_FRAME
414 movl $0, lapic_eoi /* End Of Interrupt to APIC */
415 FAKE_MCOUNT(13*4(%esp))
416
417 movl PCPU(curthread),%ebx
418 cmpl $TDPRI_CRIT,TD_PRI(%ebx)
419 jge 1f
88c4d2f6 420 subl $8,%esp /* make same as interrupt frame */
03aa8d99 421 incl PCPU(intr_nesting_level)
96728c05 422 addl $TDPRI_CRIT,TD_PRI(%ebx)
88c4d2f6 423 call lwkt_process_ipiq_frame
96728c05 424 subl $TDPRI_CRIT,TD_PRI(%ebx)
03aa8d99 425 decl PCPU(intr_nesting_level)
88c4d2f6 426 addl $8,%esp
38787eef 427 pushl $0 /* CPL for frame (REMOVED) */
96728c05
MD
428 MEXITCOUNT
429 jmp doreti
4301:
235957ed 431 orl $RQF_IPIQ,PCPU(reqflags)
96728c05
MD
432 MEXITCOUNT
433 POP_FRAME
434 iret
984263bc
MD
435
436MCOUNT_LABEL(bintr)
437 FAST_INTR(0,fastintr0)
438 FAST_INTR(1,fastintr1)
439 FAST_INTR(2,fastintr2)
440 FAST_INTR(3,fastintr3)
441 FAST_INTR(4,fastintr4)
442 FAST_INTR(5,fastintr5)
443 FAST_INTR(6,fastintr6)
444 FAST_INTR(7,fastintr7)
445 FAST_INTR(8,fastintr8)
446 FAST_INTR(9,fastintr9)
447 FAST_INTR(10,fastintr10)
448 FAST_INTR(11,fastintr11)
449 FAST_INTR(12,fastintr12)
450 FAST_INTR(13,fastintr13)
451 FAST_INTR(14,fastintr14)
452 FAST_INTR(15,fastintr15)
453 FAST_INTR(16,fastintr16)
454 FAST_INTR(17,fastintr17)
455 FAST_INTR(18,fastintr18)
456 FAST_INTR(19,fastintr19)
457 FAST_INTR(20,fastintr20)
458 FAST_INTR(21,fastintr21)
459 FAST_INTR(22,fastintr22)
460 FAST_INTR(23,fastintr23)
461
8a8d5d85 462 /* YYY what is this garbage? */
984263bc 463
88c4d2f6 464 INTR(0,intr0,)
984263bc
MD
465 INTR(1,intr1,)
466 INTR(2,intr2,)
467 INTR(3,intr3,)
468 INTR(4,intr4,)
469 INTR(5,intr5,)
470 INTR(6,intr6,)
471 INTR(7,intr7,)
472 INTR(8,intr8,)
473 INTR(9,intr9,)
474 INTR(10,intr10,)
475 INTR(11,intr11,)
476 INTR(12,intr12,)
477 INTR(13,intr13,)
478 INTR(14,intr14,)
479 INTR(15,intr15,)
480 INTR(16,intr16,)
481 INTR(17,intr17,)
482 INTR(18,intr18,)
483 INTR(19,intr19,)
484 INTR(20,intr20,)
485 INTR(21,intr21,)
486 INTR(22,intr22,)
487 INTR(23,intr23,)
8a8d5d85
MD
488
489 FAST_UNPEND(0,fastunpend0)
490 FAST_UNPEND(1,fastunpend1)
491 FAST_UNPEND(2,fastunpend2)
492 FAST_UNPEND(3,fastunpend3)
493 FAST_UNPEND(4,fastunpend4)
494 FAST_UNPEND(5,fastunpend5)
495 FAST_UNPEND(6,fastunpend6)
496 FAST_UNPEND(7,fastunpend7)
497 FAST_UNPEND(8,fastunpend8)
498 FAST_UNPEND(9,fastunpend9)
499 FAST_UNPEND(10,fastunpend10)
500 FAST_UNPEND(11,fastunpend11)
501 FAST_UNPEND(12,fastunpend12)
502 FAST_UNPEND(13,fastunpend13)
503 FAST_UNPEND(14,fastunpend14)
504 FAST_UNPEND(15,fastunpend15)
505 FAST_UNPEND(16,fastunpend16)
506 FAST_UNPEND(17,fastunpend17)
507 FAST_UNPEND(18,fastunpend18)
508 FAST_UNPEND(19,fastunpend19)
509 FAST_UNPEND(20,fastunpend20)
510 FAST_UNPEND(21,fastunpend21)
511 FAST_UNPEND(22,fastunpend22)
512 FAST_UNPEND(23,fastunpend23)
984263bc
MD
513MCOUNT_LABEL(eintr)
514
8a8d5d85
MD
515 /*
516 * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
517 *
518 * - Calls the generic rendezvous action function.
519 */
984263bc
MD
520 .text
521 SUPERALIGN_TEXT
8a8d5d85
MD
522 .globl Xrendezvous
523Xrendezvous:
984263bc
MD
524 PUSH_FRAME
525 movl $KDSEL, %eax
526 mov %ax, %ds /* use KERNEL data segment */
527 mov %ax, %es
528 movl $KPSEL, %eax
529 mov %ax, %fs
530
8a8d5d85 531 call smp_rendezvous_action
984263bc
MD
532
533 movl $0, lapic_eoi /* End Of Interrupt to APIC */
534 POP_FRAME
535 iret
536
537
538 .data
ef0fdad1 539
984263bc 540#ifdef COUNT_XINVLTLB_HITS
2954c92f
MD
541 .globl xhits
542xhits:
984263bc
MD
543 .space (NCPU * 4), 0
544#endif /* COUNT_XINVLTLB_HITS */
545
546/* variables used by stop_cpus()/restart_cpus()/Xcpustop */
2954c92f
MD
547 .globl stopped_cpus, started_cpus
548stopped_cpus:
984263bc 549 .long 0
2954c92f 550started_cpus:
984263bc
MD
551 .long 0
552
984263bc 553 .globl CNAME(cpustop_restartfunc)
984263bc
MD
554CNAME(cpustop_restartfunc):
555 .long 0
556
2954c92f
MD
557 .globl apic_pin_trigger
558apic_pin_trigger:
984263bc
MD
559 .long 0
560
561 .text