summaryrefslogtreecommitdiffstats
path: root/sys/i386/include/critical.h
blob: 7cc7ff2b26cb4c245a0c374df127d3497ded502e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
/*-
 * Copyright (c) 2002 Matthew Dillon.  This code is distributed under
 * the BSD copyright, /usr/src/COPYRIGHT.
 *
 * This file contains prototypes and high-level inlines related to
 * machine-level critical function support:
 *
 *	cpu_critical_enter()		- inlined
 *	cpu_critical_exit()		- inlined
 *	cpu_critical_fork_exit()	- prototyped
 *	cpu_thread_link()		- prototyped
 *	related support functions residing
 *	in <arch>/<arch>/critical.c	- prototyped
 *
 * $FreeBSD$
 */

#ifndef _MACHINE_CRITICAL_H_
#define	_MACHINE_CRITICAL_H_

__BEGIN_DECLS

/*
 * Prototypes - see <arch>/<arch>/critical.c
 */
void cpu_unpend(void);
void cpu_critical_fork_exit(void);
void cpu_thread_link(struct thread *td);

#ifdef	__GNUC__

/*
 *	cpu_critical_enter:
 *
 *	This routine is called from critical_enter() on the 0->1 transition
 *	of td_critnest, prior to it being incremented to 1.
 *
 *	If new-style critical section handling we do not have to do anything.
 *	However, as a side effect any interrupts occuring while td_critnest
 *	is non-zero will be deferred.
 */
#define cpu_critical_enter()

/*
 *	cpu_critical_exit:
 *
 *	This routine is called from critical_exit() on a 1->0 transition
 *	of td_critnest, after it has been decremented to 0.  We are
 *	exiting the last critical section.
 *
 *	Note that the td->critnest (1->0) transition interrupt race against
 *	our int_pending/unpend() check below is handled by the interrupt
 *	code for us, so we do not have to do anything fancy.
 */
static __inline void
cpu_critical_exit(void)
{
	/*
	 * We may have to schedule pending interrupts.  Create
	 * conditions similar to an interrupt context and call
	 * unpend().
	 *
	 * note: we do this even if we are in an interrupt
	 * nesting level.  Deep nesting is protected by
	 * critical_*() and if we conditionalized it then we
	 * would have to check int_pending again whenever
	 * we decrement td_intr_nesting_level to 0.
	 */
	if (PCPU_GET(int_pending))
		cpu_unpend();
}

#else /* !__GNUC__ */

void cpu_critical_enter(void)
void cpu_critical_exit(void)

#endif	/* __GNUC__ */

__END_DECLS

#endif /* !_MACHINE_CRITICAL_H_ */

OpenPOWER on IntegriCloud