summaryrefslogtreecommitdiffstats
path: root/sys/i386/include/smptests.h
blob: 304e99051295d27837ede8f0c31317c3f07ba4ad (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
/*
 * Copyright (c) 1996, by Steve Passe
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. The name of the developer may NOT be used to endorse or promote products
 *    derived from this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 * SUCH DAMAGE.
 *
 * $FreeBSD$
 */

#ifndef _MACHINE_SMPTESTS_H_
#define _MACHINE_SMPTESTS_H_


/*
 * Various 'tests in progress' and configuration parameters.
 */


/*
 * Tor's clock improvements.
 *
 *  When the giant kernel lock disappears, a different strategy should
 *  probably be used, thus this patch can only be considered a temporary
 *  measure.
 *
 *  This patch causes (NCPU-1)*(128+100) extra IPIs per second.
 *  During profiling, the number is (NCPU-1)*(1024+100) extra IPIs/s
 *  in addition to extra IPIs due to forwarding ASTs to other CPUs.
 *
 *  Having a shared AST flag in an SMP configuration is wrong, and I've
 *  just kludged around it, based upon the kernel lock blocking other
 *  processors from entering the kernel while handling an AST for one
 *  processor. When the giant kernel lock disappers, this kludge breaks.
 *
 *  -- Tor
 */
#define BETTER_CLOCK


/*
 * Control the "giant lock" pushdown by logical steps.
 */
#define PUSHDOWN_LEVEL_1
#define PUSHDOWN_LEVEL_2
#define PUSHDOWN_LEVEL_3_NOT
#define PUSHDOWN_LEVEL_4_NOT

/*
 * Debug version of simple_lock.  This will store the CPU id of the
 * holding CPU along with the lock.  When a CPU fails to get the lock
 * it compares its own id to the holder id.  If they are the same it
 * panic()s, as simple locks are binary, and this would cause a deadlock.
 *
 */
#define SL_DEBUG


/*
 * Put FAST_INTR() ISRs at an APIC priority above the regular INTs.
 * Allow the mp_lock() routines to handle FAST interrupts while spinning.
 */
#ifdef PUSHDOWN_LEVEL_1
#define FAST_HI
#endif


/*
 * These defines enable critical region locking of areas that were
 * protected via cli/sti in the UP kernel.
 *
 * COMLOCK protects the sio/cy drivers.
 * CLOCKLOCK protects clock hardware and data
 * known to be incomplete:
 *	joystick lkm
 *	?
 */
#ifdef PUSHDOWN_LEVEL_1
#define USE_COMLOCK
#define USE_CLOCKLOCK
#endif


/*
 * INTR_SIMPLELOCK has been removed, as the interrupt mechanism will likely
 * not use this sort of optimization if we move to interrupt threads.
 */
#ifdef PUSHDOWN_LEVEL_4
#endif


/*
 * CPL_AND_CML has been removed.  Interrupt threads will eventually not
 * use either mechanism so there is no point trying to optimize it.
 */
#ifdef PUSHDOWN_LEVEL_3
#endif


/*
 * SPL_DEBUG_POSTCODE/INTR_SPL/SPL_DEBUG - removed
 *
 * These functions were too expensive for the standard case but, more 
 * importantly, we should be able to come up with a much cleaner way
 * to handle the cpl.  Having to do any locking at all is a mistake
 * for something that is modified as often as cpl is.
 */

/*
 * FAST_WITHOUTCPL - now made the default (define removed).  Text below 
 * contains the current discussion.  I am confident we can find a solution
 * that does not require us to process softints from a hard int, which can
 * kill serial performance due to the lack of true hardware ipl's.
 *
 ****
 *
 * Ignore the ipending bits when exiting FAST_INTR() routines.
 *
 * according to Bruce:
 *
 * setsoft*() may set ipending.  setsofttty() is actually used in the
 * FAST_INTR handler in some serial drivers.  This is necessary to get
 * output completions and other urgent events handled as soon as possible.
 * The flag(s) could be set in a variable other than ipending, but they
 * needs to be checked against cpl to decide whether the software interrupt
 * handler can/should run.
 *
 *  (FAST_INTR used to just return
 * in all cases until rev.1.7 of vector.s.  This worked OK provided there
 * were no user-mode CPU hogs.  CPU hogs caused an average latency of 1/2
 * clock tick for output completions...)
 ***
 *
 * So I need to restore cpl handling someday, but AFTER
 *  I finish making spl/cpl MP-safe.
 */
#ifdef PUSHDOWN_LEVEL_1
#endif


/*
 * FAST_SIMPLELOCK no longer exists, because it doesn't help us.  The cpu
 * is likely to already hold the MP lock and recursive MP locks are now
 * very cheap, so we do not need this optimization.  Eventually *ALL* 
 * interrupts will run in their own thread, so there is no sense complicating
 * matters now.
 */
#ifdef PUSHDOWN_LEVEL_1
#endif


/*
 * Portions of the old TEST_LOPRIO code, back from the grave!
 */
#define GRAB_LOPRIO


/*
 * Send CPUSTOP IPI for stop/restart of other CPUs on DDB break.
 */
#define VERBOSE_CPUSTOP_ON_DDBBREAK
#define CPUSTOP_ON_DDBBREAK


/*
 * Bracket code/comments relevant to the current 'giant lock' model.
 * Everything is now the 'giant lock' model, but we will use this as
 * we start to "push down" the lock.
 */
#define GIANT_LOCK

#ifdef APIC_IO
/*
 * Enable extra counters for some selected locations in the interrupt handlers.
 * Look in apic_vector.s, apic_ipl.s and ipl.s for APIC_ITRACE or 
 * APIC_INTR_DIAGNOSTIC.
 */
#undef APIC_INTR_DIAGNOSTIC

/*
 * Add extra tracking of a specific interrupt. Look in apic_vector.s, 
 * apic_ipl.s and ipl.s for APIC_ITRACE and log_intr_event.
 * APIC_INTR_DIAGNOSTIC must be defined for this to work.
 */
#ifdef APIC_INTR_DIAGNOSTIC
#define APIC_INTR_DIAGNOSTIC_IRQ 17
#endif

/*
 * Don't assume that slow interrupt handler X is called from vector
 * X + ICU_OFFSET.
 */
#define APIC_INTR_REORDER

/*
 * Redirect clock interrupts to a higher priority (fast intr) vector,
 * while still using the slow interrupt handler. Only effective when 
 * APIC_INTR_REORDER is defined.
 */
#define APIC_INTR_HIGHPRI_CLOCK

#endif /* APIC_IO */

/*
 * Misc. counters.
 *
#define COUNT_XINVLTLB_HITS
 */


/**
 * Hack to "fake-out" kernel into thinking it is running on a 'default config'.
 *
 * value == default type
#define TEST_DEFAULT_CONFIG	6
 */


/*
 * Simple test code for IPI interaction, save for future...
 *
#define TEST_TEST1
#define IPI_TARGET_TEST1	1
 */


/*
 * Address of POST hardware port.
 * Defining this enables POSTCODE macros.
 *
#define POST_ADDR		0x80
 */


/*
 * POST hardware macros.
 */
#ifdef POST_ADDR
#define ASMPOSTCODE_INC				\
	pushl	%eax ;				\
	movl	_current_postcode, %eax ;	\
	incl	%eax ;				\
	andl	$0xff, %eax ;			\
	movl	%eax, _current_postcode ;	\
	outb	%al, $POST_ADDR ;		\
	popl	%eax

/*
 * Overwrite the current_postcode value.
 */
#define ASMPOSTCODE(X)				\
	pushl	%eax ;				\
	movl	$X, %eax ;			\
	movl	%eax, _current_postcode ;	\
	outb	%al, $POST_ADDR ;		\
	popl	%eax

/*
 * Overwrite the current_postcode low nibble.
 */
#define ASMPOSTCODE_LO(X)			\
	pushl	%eax ;				\
	movl	_current_postcode, %eax ;	\
	andl	$0xf0, %eax ;			\
	orl	$X, %eax ;			\
	movl	%eax, _current_postcode ;	\
	outb	%al, $POST_ADDR ;		\
	popl	%eax

/*
 * Overwrite the current_postcode high nibble.
 */
#define ASMPOSTCODE_HI(X)			\
	pushl	%eax ;				\
	movl	_current_postcode, %eax ;	\
	andl	$0x0f, %eax ;			\
	orl	$(X<<4), %eax ;			\
	movl	%eax, _current_postcode ;	\
	outb	%al, $POST_ADDR ;		\
	popl	%eax
#else
#define ASMPOSTCODE_INC
#define ASMPOSTCODE(X)
#define ASMPOSTCODE_LO(X)
#define ASMPOSTCODE_HI(X)
#endif /* POST_ADDR */


/*
 * These are all temps for debugging...
 *
#define GUARD_INTS
 */

/*
 * This macro traps unexpected INTs to a specific CPU, eg. GUARD_CPU.
 */
#ifdef GUARD_INTS
#define GUARD_CPU	1
#define MAYBE_PANIC(irq_num)		\
	cmpl	$GUARD_CPU, _cpuid ;	\
	jne	9f ;			\
	cmpl	$1, _ok_test1 ;		\
	jne	9f ;			\
	pushl	lapic_isr3 ;		\
	pushl	lapic_isr2 ;		\
	pushl	lapic_isr1 ;		\
	pushl	lapic_isr0 ;		\
	pushl	lapic_irr3 ;		\
	pushl	lapic_irr2 ;		\
	pushl	lapic_irr1 ;		\
	pushl	lapic_irr0 ;		\
	pushl	$irq_num ;		\
	pushl	_cpuid ;		\
	pushl	$panic_msg ;		\
	call	_printf ;		\
	addl	$44, %esp ;		\
9:
#else
#define MAYBE_PANIC(irq_num)
#endif /* GUARD_INTS */

#endif /* _MACHINE_SMPTESTS_H_ */
OpenPOWER on IntegriCloud