summaryrefslogtreecommitdiffstats
path: root/sys/i386/include/smptests.h
blob: a4093eee1c69803058eb05634abcbde436c6c431 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
/*
 * Copyright (c) 1996, by Steve Passe
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. The name of the developer may NOT be used to endorse or promote products
 *    derived from this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 * SUCH DAMAGE.
 *
 *	$Id: smptests.h,v 1.31 1998/03/07 20:48:16 tegge Exp $
 */

#ifndef _MACHINE_SMPTESTS_H_
#define _MACHINE_SMPTESTS_H_


/*
 * Various 'tests in progress' and configuration parameters.
 */


/*
 * Tor's clock improvements.
 *
 *  When the giant kernel lock disappears, a different strategy should
 *  probably be used, thus this patch can only be considered a temporary
 *  measure.
 *
 *  This patch causes (NCPU-1)*(128+100) extra IPIs per second.
 *  During profiling, the number is (NCPU-1)*(1024+100) extra IPIs/s
 *  in addition to extra IPIs due to forwarding ASTs to other CPUs.
 *
 *  Having a shared AST flag in an SMP configuration is wrong, and I've
 *  just kludged around it, based upon the kernel lock blocking other
 *  processors from entering the kernel while handling an AST for one
 *  processor. When the giant kernel lock disappers, this kludge breaks.
 *
 *  -- Tor
 */
#define BETTER_CLOCK


/*
 * Control the "giant lock" pushdown by logical steps.
 */
#define PUSHDOWN_LEVEL_1
#define PUSHDOWN_LEVEL_2
#define PUSHDOWN_LEVEL_3_NOT
#define PUSHDOWN_LEVEL_4_NOT


/*
 * XXX some temp debug control of cpl locks
 */
#ifdef PUSHDOWN_LEVEL_2
#define REAL_ECPL	/* exception.s:		SCPL_LOCK/SCPL_UNLOCK */
#define REAL_ICPL	/* ipl.s:		CPL_LOCK/CPL_UNLOCK/FAST */
#define REAL_AICPL	/* apic_ipl.s:		SCPL_LOCK/SCPL_UNLOCK */
#define REAL_AVCPL	/* apic_vector.s:	CPL_LOCK/CPL_UNLOCK */
#define REAL_IFCPL	/* ipl_funcs.c:		SCPL_LOCK/SCPL_UNLOCK */
#endif /* PUSHDOWN_LEVEL_2 */

/*
 * The xCPL_LOCK/xCPL_UNLOCK defines control the spinlocks
 * that protect cpl/cml/cil and the spl functions.
 */
#ifdef REAL_ECPL
#define ECPL_LOCK		SCPL_LOCK
#define ECPL_UNLOCK		SCPL_UNLOCK
#else
#define ECPL_LOCK
#define ECPL_UNLOCK
#endif /* REAL_ECPL */

#ifdef REAL_ICPL
#define ICPL_LOCK		CPL_LOCK
#define ICPL_UNLOCK		CPL_UNLOCK
#define FAST_ICPL_UNLOCK	movl	$0, _cpl_lock
#else
#define ICPL_LOCK
#define ICPL_UNLOCK
#define FAST_ICPL_UNLOCK
#endif /* REAL_ICPL */

#ifdef REAL_AICPL
#define AICPL_LOCK		SCPL_LOCK
#define AICPL_UNLOCK		SCPL_UNLOCK
#else
#define AICPL_LOCK
#define AICPL_UNLOCK
#endif /* REAL_AICPL */

#ifdef REAL_AVCPL
#define AVCPL_LOCK		CPL_LOCK
#define AVCPL_UNLOCK		CPL_UNLOCK
#else
#define AVCPL_LOCK
#define AVCPL_UNLOCK
#endif /* REAL_AVCPL */

#ifdef REAL_IFCPL
#define IFCPL_LOCK()		SCPL_LOCK()
#define IFCPL_UNLOCK()		SCPL_UNLOCK()
#else
#define IFCPL_LOCK()
#define IFCPL_UNLOCK()
#endif /* REAL_IFCPL */


/*
 * Debug version of simple_lock.  This will store the CPU id of the
 * holding CPU along with the lock.  When a CPU fails to get the lock
 * it compares its own id to the holder id.  If they are the same it
 * panic()s, as simple locks are binary, and this would cause a deadlock.
 *
 */
#define SL_DEBUG


/*
 * Put FAST_INTR() ISRs at an APIC priority above the regular INTs.
 * Allow the mp_lock() routines to handle FAST interrupts while spinning.
 */
#ifdef PUSHDOWN_LEVEL_1
#define FAST_HI
#endif


/*
 * These defines enable critical region locking of areas that were
 * protected via cli/sti in the UP kernel.
 *
 * MPINTRLOCK protects all the generic areas.
 * COMLOCK protects the sio/cy drivers.
 * CLOCKLOCK protects clock hardware and data
 * known to be incomplete:
 *	joystick lkm
 *	?
 */
#ifdef PUSHDOWN_LEVEL_1
#define USE_MPINTRLOCK
#define USE_COMLOCK
#define USE_CLOCKLOCK
#endif


/*
 * Regular INTerrupts without the giant lock, NOT READY YET!!!
 */
#ifdef PUSHDOWN_LEVEL_4
#define INTR_SIMPLELOCK
#endif


/*
 * Separate the INTR() portion of cpl into another variable: cml.
 */
#ifdef PUSHDOWN_LEVEL_3
#define CPL_AND_CML
#endif


/*
 * Forces spl functions to spin while waiting for safe time to change cpl.
 *
#define SPL_DEBUG_POSTCODE	(slows the system down noticably)
 */
#ifdef PUSHDOWN_LEVEL_3
#define INTR_SPL
#define SPL_DEBUG
#endif


/*
 * Ignore the ipending bits when exiting FAST_INTR() routines.
 *
 ***
 * according to Bruce:
 *
 * setsoft*() may set ipending.  setsofttty() is actually used in the
 * FAST_INTR handler in some serial drivers.  This is necessary to get
 * output completions and other urgent events handled as soon as possible.
 * The flag(s) could be set in a variable other than ipending, but they
 * needs to be checked against cpl to decide whether the software interrupt
 * handler can/should run.
 *
 *  (FAST_INTR used to just return
 * in all cases until rev.1.7 of vector.s.  This worked OK provided there
 * were no user-mode CPU hogs.  CPU hogs caused an average latency of 1/2
 * clock tick for output completions...)
 ***
 *
 * So I need to restore cpl handling someday, but AFTER
 *  I finish making spl/cpl MP-safe.
 */
#ifdef PUSHDOWN_LEVEL_1
#define FAST_WITHOUTCPL
#endif


/*
 * Use a simplelock to serialize FAST_INTR()s.
 * sio.c, and probably other FAST_INTR() drivers, never expected several CPUs
 * to be inside them at once.  Things such as global vars prevent more
 * than 1 thread of execution from existing at once, so we serialize
 * the access of FAST_INTR()s via a simple lock.
 * One optimization on this would be a simple lock per DRIVER, but I'm
 * not sure how to organize that yet...
 */
#ifdef PUSHDOWN_LEVEL_1
#define FAST_SIMPLELOCK
#endif


/*
 * Portions of the old TEST_LOPRIO code, back from the grave!
 */
#define GRAB_LOPRIO


/*
 * Send CPUSTOP IPI for stop/restart of other CPUs on DDB break.
 *
#define VERBOSE_CPUSTOP_ON_DDBBREAK
 */
#define CPUSTOP_ON_DDBBREAK


/*
 * Bracket code/comments relevant to the current 'giant lock' model.
 * Everything is now the 'giant lock' model, but we will use this as
 * we start to "push down" the lock.
 */
#define GIANT_LOCK

#ifdef APIC_IO
/*
 * Enable extra counters for some selected locations in the interrupt handlers.
 * Look in apic_vector.s, apic_ipl.s and ipl.s for APIC_ITRACE or 
 * APIC_INTR_DIAGNOSTIC.
 */
#undef APIC_INTR_DIAGNOSTIC

/*
 * Add extra tracking of a specific interrupt. Look in apic_vector.s, 
 * apic_ipl.s and ipl.s for APIC_ITRACE and log_intr_event.
 * APIC_INTR_DIAGNOSTIC must be defined for this to work.
 */
#ifdef APIC_INTR_DIAGNOSTIC
#define APIC_INTR_DIAGNOSTIC_IRQ 17
#endif

/*
 * Don't assume that slow interrupt handler X is called from vector
 * X + ICU_OFFSET.
 */
#define APIC_INTR_REORDER

/*
 * Redirect clock interrupts to a higher priority (fast intr) vector,
 * while still using the slow interrupt handler. Only effective when 
 * APIC_INTR_REORDER is defined.
 */
#define APIC_INTR_HIGHPRI_CLOCK

#endif /* APIC_IO */

/*
 * Misc. counters.
 *
#define COUNT_XINVLTLB_HITS
 */


/**
 * Hack to "fake-out" kernel into thinking it is running on a 'default config'.
 *
 * value == default type
#define TEST_DEFAULT_CONFIG	6
 */


/*
 * Simple test code for IPI interaction, save for future...
 *
#define TEST_TEST1
#define IPI_TARGET_TEST1	1
 */


/*
 * Address of POST hardware port.
 * Defining this enables POSTCODE macros.
 *
#define POST_ADDR		0x80
 */


/*
 * POST hardware macros.
 */
#ifdef POST_ADDR
#define ASMPOSTCODE_INC				\
	pushl	%eax ;				\
	movl	_current_postcode, %eax ;	\
	incl	%eax ;				\
	andl	$0xff, %eax ;			\
	movl	%eax, _current_postcode ;	\
	outb	%al, $POST_ADDR ;		\
	popl	%eax

/*
 * Overwrite the current_postcode value.
 */
#define ASMPOSTCODE(X)				\
	pushl	%eax ;				\
	movl	$X, %eax ;			\
	movl	%eax, _current_postcode ;	\
	outb	%al, $POST_ADDR ;		\
	popl	%eax

/*
 * Overwrite the current_postcode low nibble.
 */
#define ASMPOSTCODE_LO(X)			\
	pushl	%eax ;				\
	movl	_current_postcode, %eax ;	\
	andl	$0xf0, %eax ;			\
	orl	$X, %eax ;			\
	movl	%eax, _current_postcode ;	\
	outb	%al, $POST_ADDR ;		\
	popl	%eax

/*
 * Overwrite the current_postcode high nibble.
 */
#define ASMPOSTCODE_HI(X)			\
	pushl	%eax ;				\
	movl	_current_postcode, %eax ;	\
	andl	$0x0f, %eax ;			\
	orl	$(X<<4), %eax ;			\
	movl	%eax, _current_postcode ;	\
	outb	%al, $POST_ADDR ;		\
	popl	%eax
#else
#define ASMPOSTCODE_INC
#define ASMPOSTCODE(X)
#define ASMPOSTCODE_LO(X)
#define ASMPOSTCODE_HI(X)
#endif /* POST_ADDR */


/*
 * These are all temps for debugging...
 *
#define GUARD_INTS
 */

/*
 * This macro traps unexpected INTs to a specific CPU, eg. GUARD_CPU.
 */
#ifdef GUARD_INTS
#define GUARD_CPU	1
#define MAYBE_PANIC(irq_num)		\
	cmpl	$GUARD_CPU, _cpuid ;	\
	jne	9f ;			\
	cmpl	$1, _ok_test1 ;		\
	jne	9f ;			\
	pushl	lapic_isr3 ;		\
	pushl	lapic_isr2 ;		\
	pushl	lapic_isr1 ;		\
	pushl	lapic_isr0 ;		\
	pushl	lapic_irr3 ;		\
	pushl	lapic_irr2 ;		\
	pushl	lapic_irr1 ;		\
	pushl	lapic_irr0 ;		\
	pushl	$irq_num ;		\
	pushl	_cpuid ;		\
	pushl	$panic_msg ;		\
	call	_printf ;		\
	addl	$44, %esp ;		\
9:
#else
#define MAYBE_PANIC(irq_num)
#endif /* GUARD_INTS */

#endif /* _MACHINE_SMPTESTS_H_ */
OpenPOWER on IntegriCloud