2 * Copyright (c) 1996, by Steve Passe
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. The name of the developer may NOT be used to endorse or promote products
11 * derived from this software without specific prior written permission.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * $FreeBSD: src/sys/i386/include/smptests.h,v 1.33.2.1 2000/05/16 06:58:10 dillon Exp $
26 * $DragonFly: src/sys/i386/include/Attic/smptests.h,v 1.2 2003/06/17 04:28:36 dillon Exp $
29 #ifndef _MACHINE_SMPTESTS_H_
30 #define _MACHINE_SMPTESTS_H_
34 * Various 'tests in progress' and configuration parameters.
39 * Tor's clock improvements.
41 * When the giant kernel lock disappears, a different strategy should
42 * probably be used, thus this patch can only be considered a temporary
45 * This patch causes (NCPU-1)*(128+100) extra IPIs per second.
46 * During profiling, the number is (NCPU-1)*(1024+100) extra IPIs/s
47 * in addition to extra IPIs due to forwarding ASTs to other CPUs.
49 * Having a shared AST flag in an SMP configuration is wrong, and I've
50 * just kludged around it, based upon the kernel lock blocking other
51 * processors from entering the kernel while handling an AST for one
52 * processor. When the giant kernel lock disappers, this kludge breaks.
60 * Control the "giant lock" pushdown by logical steps.
62 #define PUSHDOWN_LEVEL_1
63 #define PUSHDOWN_LEVEL_2
64 #define PUSHDOWN_LEVEL_3_NOT
65 #define PUSHDOWN_LEVEL_4_NOT
68 * Debug version of simple_lock. This will store the CPU id of the
69 * holding CPU along with the lock. When a CPU fails to get the lock
70 * it compares its own id to the holder id. If they are the same it
71 * panic()s, as simple locks are binary, and this would cause a deadlock.
78 * Put FAST_INTR() ISRs at an APIC priority above the regular INTs.
79 * Allow the mp_lock() routines to handle FAST interrupts while spinning.
81 #ifdef PUSHDOWN_LEVEL_1
87 * These defines enable critical region locking of areas that were
88 * protected via cli/sti in the UP kernel.
90 * MPINTRLOCK protects all the generic areas.
91 * COMLOCK protects the sio/cy drivers.
92 * CLOCKLOCK protects clock hardware and data
93 * known to be incomplete:
97 #ifdef PUSHDOWN_LEVEL_1
98 #define USE_MPINTRLOCK
100 #define USE_CLOCKLOCK
105 * INTR_SIMPLELOCK has been removed, as the interrupt mechanism will likely
106 * not use this sort of optimization if we move to interrupt threads.
108 #ifdef PUSHDOWN_LEVEL_4
113 * CPL_AND_CML has been removed. Interrupt threads will eventually not
114 * use either mechanism so there is no point trying to optimize it.
116 #ifdef PUSHDOWN_LEVEL_3
121 * SPL_DEBUG_POSTCODE/INTR_SPL/SPL_DEBUG - removed
123 * These functions were too expensive for the standard case but, more
124 * importantly, we should be able to come up with a much cleaner way
125 * to handle the cpl. Having to do any locking at all is a mistake
126 * for something that is modified as often as cpl is.
130 * FAST_WITHOUTCPL - now made the default (define removed). Text below
131 * contains the current discussion. I am confident we can find a solution
132 * that does not require us to process softints from a hard int, which can
133 * kill serial performance due to the lack of true hardware ipl's.
137 * Ignore the ipending bits when exiting FAST_INTR() routines.
139 * according to Bruce:
141 * setsoft*() may set ipending. setsofttty() is actually used in the
142 * FAST_INTR handler in some serial drivers. This is necessary to get
143 * output completions and other urgent events handled as soon as possible.
144 * The flag(s) could be set in a variable other than ipending, but they
145 * needs to be checked against cpl to decide whether the software interrupt
146 * handler can/should run.
148 * (FAST_INTR used to just return
149 * in all cases until rev.1.7 of vector.s. This worked OK provided there
150 * were no user-mode CPU hogs. CPU hogs caused an average latency of 1/2
151 * clock tick for output completions...)
154 * So I need to restore cpl handling someday, but AFTER
155 * I finish making spl/cpl MP-safe.
157 #ifdef PUSHDOWN_LEVEL_1
162 * FAST_SIMPLELOCK no longer exists, because it doesn't help us. The cpu
163 * is likely to already hold the MP lock and recursive MP locks are now
164 * very cheap, so we do not need this optimization. Eventually *ALL*
165 * interrupts will run in their own thread, so there is no sense complicating
168 #ifdef PUSHDOWN_LEVEL_1
173 * Portions of the old TEST_LOPRIO code, back from the grave!
179 * Send CPUSTOP IPI for stop/restart of other CPUs on DDB break.
181 #define VERBOSE_CPUSTOP_ON_DDBBREAK
183 #define CPUSTOP_ON_DDBBREAK
187 * Bracket code/comments relevant to the current 'giant lock' model.
188 * Everything is now the 'giant lock' model, but we will use this as
189 * we start to "push down" the lock.
195 * Enable extra counters for some selected locations in the interrupt handlers.
196 * Look in apic_vector.s, apic_ipl.s and ipl.s for APIC_ITRACE or
197 * APIC_INTR_DIAGNOSTIC.
199 #undef APIC_INTR_DIAGNOSTIC
202 * Add extra tracking of a specific interrupt. Look in apic_vector.s,
203 * apic_ipl.s and ipl.s for APIC_ITRACE and log_intr_event.
204 * APIC_INTR_DIAGNOSTIC must be defined for this to work.
206 #ifdef APIC_INTR_DIAGNOSTIC
207 #define APIC_INTR_DIAGNOSTIC_IRQ 17
211 * Don't assume that slow interrupt handler X is called from vector
214 #define APIC_INTR_REORDER
217 * Redirect clock interrupts to a higher priority (fast intr) vector,
218 * while still using the slow interrupt handler. Only effective when
219 * APIC_INTR_REORDER is defined.
221 #define APIC_INTR_HIGHPRI_CLOCK
228 #define COUNT_XINVLTLB_HITS
233 * Hack to "fake-out" kernel into thinking it is running on a 'default config'.
235 * value == default type
236 #define TEST_DEFAULT_CONFIG 6
241 * Simple test code for IPI interaction, save for future...
244 #define IPI_TARGET_TEST1 1
249 * Address of POST hardware port.
250 * Defining this enables POSTCODE macros.
252 #define POST_ADDR 0x80
257 * POST hardware macros.
260 #define ASMPOSTCODE_INC \
262 movl _current_postcode, %eax ; \
265 movl %eax, _current_postcode ; \
266 outb %al, $POST_ADDR ; \
270 * Overwrite the current_postcode value.
272 #define ASMPOSTCODE(X) \
275 movl %eax, _current_postcode ; \
276 outb %al, $POST_ADDR ; \
280 * Overwrite the current_postcode low nibble.
282 #define ASMPOSTCODE_LO(X) \
284 movl _current_postcode, %eax ; \
287 movl %eax, _current_postcode ; \
288 outb %al, $POST_ADDR ; \
292 * Overwrite the current_postcode high nibble.
294 #define ASMPOSTCODE_HI(X) \
296 movl _current_postcode, %eax ; \
298 orl $(X<<4), %eax ; \
299 movl %eax, _current_postcode ; \
300 outb %al, $POST_ADDR ; \
303 #define ASMPOSTCODE_INC
304 #define ASMPOSTCODE(X)
305 #define ASMPOSTCODE_LO(X)
306 #define ASMPOSTCODE_HI(X)
307 #endif /* POST_ADDR */
311 * These are all temps for debugging...
317 * This macro traps unexpected INTs to a specific CPU, eg. GUARD_CPU.
321 #define MAYBE_PANIC(irq_num) \
322 cmpl $GUARD_CPU, _cpuid ; \
324 cmpl $1, _ok_test1 ; \
341 #define MAYBE_PANIC(irq_num)
342 #endif /* GUARD_INTS */
344 #endif /* _MACHINE_SMPTESTS_H_ */