summaryrefslogtreecommitdiffstats
path: root/sys/amd64/include/cpu.h
diff options
context:
space:
mode:
authordillon <dillon@FreeBSD.org>2000-03-28 07:16:37 +0000
committerdillon <dillon@FreeBSD.org>2000-03-28 07:16:37 +0000
commit689641c1ea53b9b5f18384314b488d0369596cf4 (patch)
tree19fb936349a321ee7ee797aaae79a14fa3863394 /sys/amd64/include/cpu.h
parent9c60490f9c6bfc1cca909053f05b8c1553e089fb (diff)
downloadFreeBSD-src-689641c1ea53b9b5f18384314b488d0369596cf4.zip
FreeBSD-src-689641c1ea53b9b5f18384314b488d0369596cf4.tar.gz
Commit major SMP cleanups and move the BGL (big giant lock) in the
syscall path inward. A system call may select whether it needs the MP lock or not (the default being that it does need it). A great deal of conditional SMP code for various deadended experiments has been removed. 'cil' and 'cml' have been removed entirely, and the locking around the cpl has been removed. The conditional separately-locked fast-interrupt code has been removed, meaning that interrupts must hold the CPL now (but they pretty much had to anyway). Another reason for doing this is that the original separate-lock for interrupts just doesn't apply to the interrupt thread mechanism being contemplated. Modifications to the cpl may now ONLY occur while holding the MP lock. For example, if an otherwise MP safe syscall needs to mess with the cpl, it must hold the MP lock for the duration and must (as usual) save/restore the cpl in a nested fashion. This is precursor work for the real meat coming later: avoiding having to hold the MP lock for common syscalls and I/O's and interrupt threads. It is expected that the spl mechanisms and new interrupt threading mechanisms will be able to run in tandem, allowing a slow piecemeal transition to occur. This patch should result in a moderate performance improvement due to the considerable amount of code that has been removed from the critical path, especially the simplification of the spl*() calls. The real performance gains will come later. Approved by: jkh Reviewed by: current, bde (exception.s) Some work taken from: luoqi's patch
Diffstat (limited to 'sys/amd64/include/cpu.h')
-rw-r--r--sys/amd64/include/cpu.h18
1 files changed, 12 insertions, 6 deletions
diff --git a/sys/amd64/include/cpu.h b/sys/amd64/include/cpu.h
index 18049d0..c6aa46f 100644
--- a/sys/amd64/include/cpu.h
+++ b/sys/amd64/include/cpu.h
@@ -82,10 +82,13 @@
/*
* Preempt the current process if in interrupt from user mode,
* or after the current trap/syscall if in system mode.
+ *
+ * XXX: if astpending is later changed to an |= here due to more flags being
+ * added, we will have an atomicy problem. The type of atomicy we need is
+ * a non-locked orl.
*/
-#define need_resched() do { want_resched = 1; aston(); } while (0)
-
-#define resched_wanted() want_resched
+#define need_resched() do { astpending = AST_RESCHED|AST_PENDING; } while (0)
+#define resched_wanted() (astpending & AST_RESCHED)
/*
* Arrange to handle pending profiling ticks before returning to user mode.
@@ -100,10 +103,15 @@
/*
* Notify the current process (p) that it has a signal pending,
* process as soon as possible.
+ *
+ * XXX: aston() really needs to be an atomic (not locked, but an orl),
+ * in case need_resched() is set by an interrupt. But with astpending a
+ * per-cpu variable this is not trivial to do efficiently. For now we blow
+ * it off (asynchronous need_resched() conflicts are not critical).
*/
#define signotify(p) aston()
-#define aston() do { astpending = 1; } while (0)
+#define aston() do { astpending |= AST_PENDING; } while (0)
#define astoff()
/*
@@ -126,11 +134,9 @@
}
#ifdef _KERNEL
-extern int astpending;
extern char btext[];
extern char etext[];
extern u_char intr_nesting_level;
-extern int want_resched; /* resched was called */
void fork_trampoline __P((void));
void fork_return __P((struct proc *, struct trapframe));
OpenPOWER on IntegriCloud