2 * Copyright (c) 1996, by Steve Passe
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. The name of the developer may NOT be used to endorse or promote products
11 * derived from this software without specific prior written permission.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * $FreeBSD: src/sys/i386/include/smptests.h,v 1.33.2.1 2000/05/16 06:58:10 dillon Exp $
28 #ifndef _MACHINE_SMPTESTS_H_
29 #define _MACHINE_SMPTESTS_H_
33 * Various 'tests in progress' and configuration parameters.
38 * Tor's clock improvements.
40 * When the giant kernel lock disappears, a different strategy should
41 * probably be used, thus this patch can only be considered a temporary
44 * This patch causes (NCPU-1)*(128+100) extra IPIs per second.
45 * During profiling, the number is (NCPU-1)*(1024+100) extra IPIs/s
46 * in addition to extra IPIs due to forwarding ASTs to other CPUs.
48 * Having a shared AST flag in an SMP configuration is wrong, and I've
49 * just kludged around it, based upon the kernel lock blocking other
50 * processors from entering the kernel while handling an AST for one
51 * processor. When the giant kernel lock disappers, this kludge breaks.
59 * Control the "giant lock" pushdown by logical steps.
61 #define PUSHDOWN_LEVEL_1
62 #define PUSHDOWN_LEVEL_2
63 #define PUSHDOWN_LEVEL_3_NOT
64 #define PUSHDOWN_LEVEL_4_NOT
67 * Debug version of simple_lock. This will store the CPU id of the
68 * holding CPU along with the lock. When a CPU fails to get the lock
69 * it compares its own id to the holder id. If they are the same it
70 * panic()s, as simple locks are binary, and this would cause a deadlock.
77 * Put FAST_INTR() ISRs at an APIC priority above the regular INTs.
78 * Allow the mp_lock() routines to handle FAST interrupts while spinning.
80 #ifdef PUSHDOWN_LEVEL_1
86 * These defines enable critical region locking of areas that were
87 * protected via cli/sti in the UP kernel.
89 * MPINTRLOCK protects all the generic areas.
90 * COMLOCK protects the sio/cy drivers.
91 * CLOCKLOCK protects clock hardware and data
92 * known to be incomplete:
96 #ifdef PUSHDOWN_LEVEL_1
97 #define USE_MPINTRLOCK
104 * INTR_SIMPLELOCK has been removed, as the interrupt mechanism will likely
105 * not use this sort of optimization if we move to interrupt threads.
107 #ifdef PUSHDOWN_LEVEL_4
112 * CPL_AND_CML has been removed. Interrupt threads will eventually not
113 * use either mechanism so there is no point trying to optimize it.
115 #ifdef PUSHDOWN_LEVEL_3
120 * SPL_DEBUG_POSTCODE/INTR_SPL/SPL_DEBUG - removed
122 * These functions were too expensive for the standard case but, more
123 * importantly, we should be able to come up with a much cleaner way
124 * to handle the cpl. Having to do any locking at all is a mistake
125 * for something that is modified as often as cpl is.
129 * FAST_WITHOUTCPL - now made the default (define removed). Text below
130 * contains the current discussion. I am confident we can find a solution
131 * that does not require us to process softints from a hard int, which can
132 * kill serial performance due to the lack of true hardware ipl's.
136 * Ignore the ipending bits when exiting FAST_INTR() routines.
138 * according to Bruce:
140 * setsoft*() may set ipending. setsofttty() is actually used in the
141 * FAST_INTR handler in some serial drivers. This is necessary to get
142 * output completions and other urgent events handled as soon as possible.
143 * The flag(s) could be set in a variable other than ipending, but they
144 * needs to be checked against cpl to decide whether the software interrupt
145 * handler can/should run.
147 * (FAST_INTR used to just return
148 * in all cases until rev.1.7 of vector.s. This worked OK provided there
149 * were no user-mode CPU hogs. CPU hogs caused an average latency of 1/2
150 * clock tick for output completions...)
153 * So I need to restore cpl handling someday, but AFTER
154 * I finish making spl/cpl MP-safe.
156 #ifdef PUSHDOWN_LEVEL_1
161 * FAST_SIMPLELOCK no longer exists, because it doesn't help us. The cpu
162 * is likely to already hold the MP lock and recursive MP locks are now
163 * very cheap, so we do not need this optimization. Eventually *ALL*
164 * interrupts will run in their own thread, so there is no sense complicating
167 #ifdef PUSHDOWN_LEVEL_1
172 * Portions of the old TEST_LOPRIO code, back from the grave!
178 * Send CPUSTOP IPI for stop/restart of other CPUs on DDB break.
180 #define VERBOSE_CPUSTOP_ON_DDBBREAK
182 #define CPUSTOP_ON_DDBBREAK
186 * Bracket code/comments relevant to the current 'giant lock' model.
187 * Everything is now the 'giant lock' model, but we will use this as
188 * we start to "push down" the lock.
194 * Enable extra counters for some selected locations in the interrupt handlers.
195 * Look in apic_vector.s, apic_ipl.s and ipl.s for APIC_ITRACE or
196 * APIC_INTR_DIAGNOSTIC.
198 #undef APIC_INTR_DIAGNOSTIC
201 * Add extra tracking of a specific interrupt. Look in apic_vector.s,
202 * apic_ipl.s and ipl.s for APIC_ITRACE and log_intr_event.
203 * APIC_INTR_DIAGNOSTIC must be defined for this to work.
205 #ifdef APIC_INTR_DIAGNOSTIC
206 #define APIC_INTR_DIAGNOSTIC_IRQ 17
210 * Don't assume that slow interrupt handler X is called from vector
213 #define APIC_INTR_REORDER
216 * Redirect clock interrupts to a higher priority (fast intr) vector,
217 * while still using the slow interrupt handler. Only effective when
218 * APIC_INTR_REORDER is defined.
220 #define APIC_INTR_HIGHPRI_CLOCK
227 #define COUNT_XINVLTLB_HITS
232 * Hack to "fake-out" kernel into thinking it is running on a 'default config'.
234 * value == default type
235 #define TEST_DEFAULT_CONFIG 6
240 * Simple test code for IPI interaction, save for future...
243 #define IPI_TARGET_TEST1 1
248 * Address of POST hardware port.
249 * Defining this enables POSTCODE macros.
251 #define POST_ADDR 0x80
256 * POST hardware macros.
259 #define ASMPOSTCODE_INC \
261 movl _current_postcode, %eax ; \
264 movl %eax, _current_postcode ; \
265 outb %al, $POST_ADDR ; \
269 * Overwrite the current_postcode value.
271 #define ASMPOSTCODE(X) \
274 movl %eax, _current_postcode ; \
275 outb %al, $POST_ADDR ; \
279 * Overwrite the current_postcode low nibble.
281 #define ASMPOSTCODE_LO(X) \
283 movl _current_postcode, %eax ; \
286 movl %eax, _current_postcode ; \
287 outb %al, $POST_ADDR ; \
291 * Overwrite the current_postcode high nibble.
293 #define ASMPOSTCODE_HI(X) \
295 movl _current_postcode, %eax ; \
297 orl $(X<<4), %eax ; \
298 movl %eax, _current_postcode ; \
299 outb %al, $POST_ADDR ; \
302 #define ASMPOSTCODE_INC
303 #define ASMPOSTCODE(X)
304 #define ASMPOSTCODE_LO(X)
305 #define ASMPOSTCODE_HI(X)
306 #endif /* POST_ADDR */
310 * These are all temps for debugging...
316 * This macro traps unexpected INTs to a specific CPU, eg. GUARD_CPU.
320 #define MAYBE_PANIC(irq_num) \
321 cmpl $GUARD_CPU, _cpuid ; \
323 cmpl $1, _ok_test1 ; \
340 #define MAYBE_PANIC(irq_num)
341 #endif /* GUARD_INTS */
343 #endif /* _MACHINE_SMPTESTS_H_ */