summaryrefslogtreecommitdiffstats
path: root/sys/amd64
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2001-12-11 23:33:44 +0000
committerjhb <jhb@FreeBSD.org>2001-12-11 23:33:44 +0000
commit21b6b26912b00bb37f9f16080ba7d49241814935 (patch)
treec785835e70070309148a72c55669ff0bf043a20a /sys/amd64
parent279222ba62c185d7d7ec09017bb3e7760fd333f0 (diff)
downloadFreeBSD-src-21b6b26912b00bb37f9f16080ba7d49241814935.zip
FreeBSD-src-21b6b26912b00bb37f9f16080ba7d49241814935.tar.gz
Overhaul the per-CPU support a bit:
- The MI portions of struct globaldata have been consolidated into a MI struct pcpu. The MD per-CPU data are specified via a macro defined in machine/pcpu.h. A macro was chosen over a struct mdpcpu so that the interface would be cleaner (PCPU_GET(my_md_field) vs. PCPU_GET(md.md_my_md_field)). - All references to globaldata are changed to pcpu instead. In a UP kernel, this data was stored as global variables which is where the original name came from. In an SMP world this data is per-CPU and ideally private to each CPU outside of the context of debuggers. This also included combining machine/globaldata.h and machine/globals.h into machine/pcpu.h. - The pointer to the thread using the FPU on i386 was renamed from npxthread to fpcurthread to be identical with other architectures. - Make the show pcpu ddb command MI with a MD callout to display MD fields. - The globaldata_register() function was renamed to pcpu_init() and now init's MI fields of a struct pcpu in addition to registering it with the internal array and list. - A pcpu_destroy() function was added to remove a struct pcpu from the internal array and list. Tested on: alpha, i386 Reviewed by: peter, jake
Diffstat (limited to 'sys/amd64')
-rw-r--r--sys/amd64/amd64/bios.c2
-rw-r--r--sys/amd64/amd64/cpu_switch.S10
-rw-r--r--sys/amd64/amd64/db_interface.c47
-rw-r--r--sys/amd64/amd64/fpu.c34
-rw-r--r--sys/amd64/amd64/genassym.c39
-rw-r--r--sys/amd64/amd64/machdep.c29
-rw-r--r--sys/amd64/amd64/mp_machdep.c16
-rw-r--r--sys/amd64/amd64/mptable.c16
-rw-r--r--sys/amd64/amd64/pmap.c1
-rw-r--r--sys/amd64/amd64/support.S30
-rw-r--r--sys/amd64/amd64/support.s30
-rw-r--r--sys/amd64/amd64/swtch.s10
-rw-r--r--sys/amd64/amd64/vm_machdep.c2
-rw-r--r--sys/amd64/include/asmacros.h6
-rw-r--r--sys/amd64/include/cpu.h1
-rw-r--r--sys/amd64/include/mptable.h16
-rw-r--r--sys/amd64/include/pcpu.h138
-rw-r--r--sys/amd64/include/proc.h1
-rw-r--r--sys/amd64/isa/npx.c34
19 files changed, 235 insertions, 227 deletions
diff --git a/sys/amd64/amd64/bios.c b/sys/amd64/amd64/bios.c
index cc887e7..6eb90b2 100644
--- a/sys/amd64/amd64/bios.c
+++ b/sys/amd64/amd64/bios.c
@@ -36,9 +36,9 @@
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/bus.h>
+#include <sys/pcpu.h>
#include <vm/vm.h>
#include <vm/pmap.h>
-#include <machine/globals.h>
#include <machine/md_var.h>
#include <machine/segments.h>
#include <machine/stdarg.h>
diff --git a/sys/amd64/amd64/cpu_switch.S b/sys/amd64/amd64/cpu_switch.S
index 14e4495..ae967c4 100644
--- a/sys/amd64/amd64/cpu_switch.S
+++ b/sys/amd64/amd64/cpu_switch.S
@@ -123,7 +123,7 @@ ENTRY(cpu_switch)
#ifdef DEV_NPX
/* have we used fp, and need a save? */
- cmpl %ecx,PCPU(NPXTHREAD)
+ cmpl %ecx,PCPU(FPCURTHREAD)
jne 1f
addl $PCB_SAVEFPU,%edx /* h/w bugs make saving complicated */
pushl %edx
@@ -337,20 +337,20 @@ ENTRY(savectx)
#ifdef DEV_NPX
/*
- * If npxthread == NULL, then the npx h/w state is irrelevant and the
+ * If fpcurthread == NULL, then the npx h/w state is irrelevant and the
* state had better already be in the pcb. This is true for forks
* but not for dumps (the old book-keeping with FP flags in the pcb
* always lost for dumps because the dump pcb has 0 flags).
*
- * If npxthread != NULL, then we have to save the npx h/w state to
- * npxthread's pcb and copy it to the requested pcb, or save to the
+ * If fpcurthread != NULL, then we have to save the npx h/w state to
+ * fpcurthread's pcb and copy it to the requested pcb, or save to the
* requested pcb and reload. Copying is easier because we would
* have to handle h/w bugs for reloading. We used to lose the
* parent's npx state for forks by forgetting to reload.
*/
pushfl
cli
- movl PCPU(NPXTHREAD),%eax
+ movl PCPU(FPCURTHREAD),%eax
testl %eax,%eax
je 1f
diff --git a/sys/amd64/amd64/db_interface.c b/sys/amd64/amd64/db_interface.c
index 7e78088..9708cad 100644
--- a/sys/amd64/amd64/db_interface.c
+++ b/sys/amd64/amd64/db_interface.c
@@ -332,48 +332,9 @@ Debugger(msg)
}
}
-DB_SHOW_COMMAND(pcpu, db_show_pcpu)
+void
+db_show_mdpcpu(struct pcpu *pc)
{
- struct globaldata *gd;
-#ifdef SMP
- int id;
-
- if (have_addr)
- id = ((addr >> 4) % 16) * 10 + (addr % 16);
- else
- id = PCPU_GET(cpuid);
- gd = globaldata_find(id);
- if (gd == NULL) {
- db_printf("CPU %d not found\n", id);
- return;
- }
-#else
- gd = GLOBALDATA;
-#endif
- db_printf("cpuid = %d\n", gd->gd_cpuid);
- db_printf("curthread = ");
- if (gd->gd_curthread != NULL)
- db_printf("%p: pid %d \"%s\"\n", gd->gd_curthread,
- gd->gd_curthread->td_proc->p_pid, gd->gd_curthread->td_proc->p_comm);
- else
- db_printf("none\n");
- db_printf("curpcb = %p\n", gd->gd_curpcb);
- db_printf("npxthread = ");
- if (gd->gd_npxthread != NULL)
- db_printf("%p: pid %d \"%s\"\n", gd->gd_npxthread,
- gd->gd_npxthread->td_proc->p_pid, gd->gd_npxthread->td_proc->p_comm);
- else
- db_printf("none\n");
- db_printf("idlethread = ");
- if (gd->gd_idlethread != NULL)
- db_printf("%p: pid %d \"%s\"\n", gd->gd_idlethread,
- gd->gd_idlethread->td_proc->p_pid,
- gd->gd_idlethread->td_proc->p_comm);
- else
- db_printf("none\n");
-
-#ifdef WITNESS
- db_printf("spin locks held:\n");
- witness_list_locks(&gd->gd_spinlocks);
-#endif
+
+ db_printf("currentldt = 0x%x\n", pc->pc_currentldt);
}
diff --git a/sys/amd64/amd64/fpu.c b/sys/amd64/amd64/fpu.c
index 1238d67..ff8c4cf 100644
--- a/sys/amd64/amd64/fpu.c
+++ b/sys/amd64/amd64/fpu.c
@@ -237,7 +237,7 @@ npx_intr(dummy)
#endif
/*
- * npxthread is normally non-null here. In that case, schedule an
+ * fpcurthread is normally non-null here. In that case, schedule an
* AST to finish the exception handling in the correct context
* (this interrupt may occur after the thread has entered the
* kernel via a syscall or an interrupt). Otherwise, the npx
@@ -248,7 +248,7 @@ npx_intr(dummy)
* that caused it and it will repeat. We will eventually (usually
* soon) win the race to handle the interrupt properly.
*/
- td = PCPU_GET(npxthread);
+ td = PCPU_GET(fpcurthread);
if (td != NULL) {
td->td_pcb->pcb_flags |= PCB_NPXTRAP;
mtx_lock_spin(&sched_lock);
@@ -513,7 +513,7 @@ npxinit(control)
/*
* fninit has the same h/w bugs as fnsave. Use the detoxified
* fnsave to throw away any junk in the fpu. npxsave() initializes
- * the fpu and sets npxthread = NULL as important side effects.
+ * the fpu and sets fpcurthread = NULL as important side effects.
*/
savecrit = critical_enter();
npxsave(&dummy);
@@ -540,7 +540,7 @@ npxexit(td)
critical_t savecrit;
savecrit = critical_enter();
- if (td == PCPU_GET(npxthread))
+ if (td == PCPU_GET(fpcurthread))
npxsave(&PCPU_GET(curpcb)->pcb_save);
critical_exit(savecrit);
#ifdef NPX_DEBUG
@@ -758,8 +758,8 @@ npxtrap()
u_long *exstat;
if (!npx_exists) {
- printf("npxtrap: npxthread = %p, curthread = %p, npx_exists = %d\n",
- PCPU_GET(npxthread), curthread, npx_exists);
+ printf("npxtrap: fpcurthread = %p, curthread = %p, npx_exists = %d\n",
+ PCPU_GET(fpcurthread), curthread, npx_exists);
panic("npxtrap from nowhere");
}
savecrit = critical_enter();
@@ -769,7 +769,7 @@ npxtrap()
* state to memory. Fetch the relevant parts of the state from
* wherever they are.
*/
- if (PCPU_GET(npxthread) != curthread) {
+ if (PCPU_GET(fpcurthread) != curthread) {
control = GET_FPU_CW(curthread);
status = GET_FPU_SW(curthread);
} else {
@@ -779,7 +779,7 @@ npxtrap()
exstat = GET_FPU_EXSW_PTR(curthread->td_pcb);
*exstat = status;
- if (PCPU_GET(npxthread) != curthread)
+ if (PCPU_GET(fpcurthread) != curthread)
GET_FPU_SW(curthread) &= ~0x80bf;
else
fnclex();
@@ -790,7 +790,7 @@ npxtrap()
/*
* Implement device not available (DNA) exception
*
- * It would be better to switch FP context here (if curthread != npxthread)
+ * It would be better to switch FP context here (if curthread != fpcurthread)
* and not necessarily for every context switch, but it is too hard to
* access foreign pcb's.
*/
@@ -802,9 +802,9 @@ npxdna()
if (!npx_exists)
return (0);
- if (PCPU_GET(npxthread) != NULL) {
- printf("npxdna: npxthread = %p, curthread = %p\n",
- PCPU_GET(npxthread), curthread);
+ if (PCPU_GET(fpcurthread) != NULL) {
+ printf("npxdna: fpcurthread = %p, curthread = %p\n",
+ PCPU_GET(fpcurthread), curthread);
panic("npxdna");
}
s = critical_enter();
@@ -812,7 +812,7 @@ npxdna()
/*
* Record new context early in case frstor causes an IRQ13.
*/
- PCPU_SET(npxthread, curthread);
+ PCPU_SET(fpcurthread, curthread);
exstat = GET_FPU_EXSW_PTR(PCPU_GET(curpcb));
*exstat = 0;
@@ -844,13 +844,13 @@ npxdna()
* after the process has entered the kernel. It may even be delivered after
* the fnsave here completes. A spurious IRQ13 for the fnsave is handled in
* the same way as a very-late-arriving non-spurious IRQ13 from user mode:
- * it is normally ignored at first because we set npxthread to NULL; it is
+ * it is normally ignored at first because we set fpcurthread to NULL; it is
* normally retriggered in npxdna() after return to user mode.
*
* npxsave() must be called with interrupts disabled, so that it clears
- * npxthread atomically with saving the state. We require callers to do the
+ * fpcurthread atomically with saving the state. We require callers to do the
* disabling, since most callers need to disable interrupts anyway to call
- * npxsave() atomically with checking npxthread.
+ * npxsave() atomically with checking fpcurthread.
*
* A previous version of npxsave() went to great lengths to excecute fnsave
* with interrupts enabled in case executing it froze the CPU. This case
@@ -866,7 +866,7 @@ npxsave(addr)
fpusave(addr);
start_emulating();
- PCPU_SET(npxthread, NULL);
+ PCPU_SET(fpcurthread, NULL);
}
static void
diff --git a/sys/amd64/amd64/genassym.c b/sys/amd64/amd64/genassym.c
index 9f41665..59f3147 100644
--- a/sys/amd64/amd64/genassym.c
+++ b/sys/amd64/amd64/genassym.c
@@ -51,10 +51,6 @@
#include <sys/socket.h>
#include <sys/resourcevar.h>
#include <sys/user.h>
-/* XXX */
-#ifdef KTR_PERCPU
-#include <sys/ktr.h>
-#endif
#include <machine/bootinfo.h>
#include <machine/tss.h>
#include <sys/vmmeter.h>
@@ -75,7 +71,6 @@
#endif
#include <machine/cpu.h>
#include <machine/sigframe.h>
-#include <machine/globaldata.h>
#include <machine/vm86.h>
#include <machine/proc.h>
@@ -177,27 +172,19 @@ ASSYM(BI_SIZE, offsetof(struct bootinfo, bi_size));
ASSYM(BI_SYMTAB, offsetof(struct bootinfo, bi_symtab));
ASSYM(BI_ESYMTAB, offsetof(struct bootinfo, bi_esymtab));
ASSYM(BI_KERNEND, offsetof(struct bootinfo, bi_kernend));
-ASSYM(GD_SIZEOF, sizeof(struct globaldata));
-ASSYM(GD_PRVSPACE, offsetof(struct globaldata, gd_prvspace));
-ASSYM(GD_CURTHREAD, offsetof(struct globaldata, gd_curthread));
-ASSYM(GD_NPXTHREAD, offsetof(struct globaldata, gd_npxthread));
-ASSYM(GD_IDLETHREAD, offsetof(struct globaldata, gd_idlethread));
-ASSYM(GD_CURPCB, offsetof(struct globaldata, gd_curpcb));
-ASSYM(GD_COMMON_TSS, offsetof(struct globaldata, gd_common_tss));
-ASSYM(GD_SWITCHTIME, offsetof(struct globaldata, gd_switchtime));
-ASSYM(GD_SWITCHTICKS, offsetof(struct globaldata, gd_switchticks));
-ASSYM(GD_COMMON_TSSD, offsetof(struct globaldata, gd_common_tssd));
-ASSYM(GD_TSS_GDT, offsetof(struct globaldata, gd_tss_gdt));
-ASSYM(GD_CURRENTLDT, offsetof(struct globaldata, gd_currentldt));
-
-/* XXX */
-#ifdef KTR_PERCPU
-ASSYM(GD_KTR_IDX, offsetof(struct globaldata, gd_ktr_idx));
-ASSYM(GD_KTR_BUF, offsetof(struct globaldata, gd_ktr_buf));
-ASSYM(GD_KTR_BUF_DATA, offsetof(struct globaldata, gd_ktr_buf_data));
-#endif
-
-ASSYM(GD_CPUID, offsetof(struct globaldata, gd_cpuid));
+ASSYM(PC_SIZEOF, sizeof(struct pcpu));
+ASSYM(PC_PRVSPACE, offsetof(struct pcpu, pc_prvspace));
+ASSYM(PC_CURTHREAD, offsetof(struct pcpu, pc_curthread));
+ASSYM(PC_FPCURTHREAD, offsetof(struct pcpu, pc_fpcurthread));
+ASSYM(PC_IDLETHREAD, offsetof(struct pcpu, pc_idlethread));
+ASSYM(PC_CURPCB, offsetof(struct pcpu, pc_curpcb));
+ASSYM(PC_COMMON_TSS, offsetof(struct pcpu, pc_common_tss));
+ASSYM(PC_SWITCHTIME, offsetof(struct pcpu, pc_switchtime));
+ASSYM(PC_SWITCHTICKS, offsetof(struct pcpu, pc_switchticks));
+ASSYM(PC_COMMON_TSSD, offsetof(struct pcpu, pc_common_tssd));
+ASSYM(PC_TSS_GDT, offsetof(struct pcpu, pc_tss_gdt));
+ASSYM(PC_CURRENTLDT, offsetof(struct pcpu, pc_currentldt));
+ASSYM(PC_CPUID, offsetof(struct pcpu, pc_cpuid));
#ifdef SMP
ASSYM(LA_VER, offsetof(struct LAPIC, version));
diff --git a/sys/amd64/amd64/machdep.c b/sys/amd64/amd64/machdep.c
index bdc0980..c2ef5dc 100644
--- a/sys/amd64/amd64/machdep.c
+++ b/sys/amd64/amd64/machdep.c
@@ -104,7 +104,6 @@
#include <machine/pc/bios.h>
#include <machine/pcb_ext.h> /* pcb.h included via sys/user.h */
#include <machine/proc.h>
-#include <machine/globals.h>
#ifdef PERFMON
#include <machine/perfmon.h>
#endif
@@ -207,7 +206,7 @@ struct kva_md_info kmi;
static struct trapframe proc0_tf;
#ifndef SMP
-static struct globaldata __globaldata;
+static struct pcpu __pcpu;
#endif
struct mtx sched_lock;
@@ -262,7 +261,6 @@ cpu_startup(dummy)
bufinit();
vm_pager_bufferinit();
- globaldata_register(GLOBALDATA);
#ifndef SMP
/* For SMP, we delay the cpu_setregs() until after SMP startup. */
cpu_setregs();
@@ -1670,6 +1668,7 @@ init386(first)
/* table descriptors - used to load tables by microp */
struct region_descriptor r_gdt, r_idt;
#endif
+ struct pcpu *pc;
proc_linkup(&proc0);
proc0.p_uarea = proc0uarea;
@@ -1706,20 +1705,16 @@ init386(first)
gdt_segs[GCODE_SEL].ssd_limit = atop(0 - 1);
gdt_segs[GDATA_SEL].ssd_limit = atop(0 - 1);
#ifdef SMP
+ pc = &SMP_prvspace[0];
gdt_segs[GPRIV_SEL].ssd_limit =
atop(sizeof(struct privatespace) - 1);
- gdt_segs[GPRIV_SEL].ssd_base = (int) &SMP_prvspace[0];
- gdt_segs[GPROC0_SEL].ssd_base =
- (int) &SMP_prvspace[0].globaldata.gd_common_tss;
- SMP_prvspace[0].globaldata.gd_prvspace = &SMP_prvspace[0].globaldata;
#else
+ pc = &__pcpu;
gdt_segs[GPRIV_SEL].ssd_limit =
- atop(sizeof(struct globaldata) - 1);
- gdt_segs[GPRIV_SEL].ssd_base = (int) &__globaldata;
- gdt_segs[GPROC0_SEL].ssd_base =
- (int) &__globaldata.gd_common_tss;
- __globaldata.gd_prvspace = &__globaldata;
+ atop(sizeof(struct pcpu) - 1);
#endif
+ gdt_segs[GPRIV_SEL].ssd_base = (int) pc;
+ gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss;
for (x = 0; x < NGDT; x++) {
#ifdef BDE_DEBUGGER
@@ -1734,10 +1729,11 @@ init386(first)
r_gdt.rd_base = (int) gdt;
lgdt(&r_gdt);
- /* setup curproc so that mutexes work */
+ pcpu_init(pc, 0, sizeof(struct pcpu));
+ PCPU_SET(prvspace, pc);
+ /* setup curproc so that mutexes work */
PCPU_SET(curthread, thread0);
- PCPU_SET(spinlocks, NULL);
LIST_INIT(&thread0->td_contested);
@@ -1907,6 +1903,11 @@ init386(first)
thread0->td_frame = &proc0_tf;
}
+void
+cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
+{
+}
+
#if defined(I586_CPU) && !defined(NO_F00F_HACK)
static void f00f_hack(void *unused);
SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL);
diff --git a/sys/amd64/amd64/mp_machdep.c b/sys/amd64/amd64/mp_machdep.c
index 13144b9..9d86dae 100644
--- a/sys/amd64/amd64/mp_machdep.c
+++ b/sys/amd64/amd64/mp_machdep.c
@@ -71,7 +71,6 @@
#include <machine/smptests.h> /** TEST_DEFAULT_CONFIG, TEST_TEST1 */
#include <machine/tss.h>
#include <machine/specialreg.h>
-#include <machine/globaldata.h>
#include <machine/privatespace.h>
#if defined(APIC_IO)
@@ -477,9 +476,9 @@ init_secondary(void)
gdt_segs[GPRIV_SEL].ssd_base = (int) &SMP_prvspace[myid];
gdt_segs[GPROC0_SEL].ssd_base =
- (int) &SMP_prvspace[myid].globaldata.gd_common_tss;
- SMP_prvspace[myid].globaldata.gd_prvspace =
- &SMP_prvspace[myid].globaldata;
+ (int) &SMP_prvspace[myid].pcpu.pc_common_tss;
+ SMP_prvspace[myid].pcpu.pc_prvspace =
+ &SMP_prvspace[myid].pcpu;
for (x = 0; x < NGDT; x++) {
ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x].sd);
@@ -1915,7 +1914,7 @@ start_all_aps(u_int boot_addr)
int x, i, pg;
u_char mpbiosreason;
u_long mpbioswarmvec;
- struct globaldata *gd;
+ struct pcpu *pc;
char *stack;
uintptr_t kptbase;
@@ -1955,10 +1954,10 @@ start_all_aps(u_int boot_addr)
pg = x * i386_btop(sizeof(struct privatespace));
/* allocate a new private data page */
- gd = (struct globaldata *)kmem_alloc(kernel_map, PAGE_SIZE);
+ pc = (struct pcpu *)kmem_alloc(kernel_map, PAGE_SIZE);
/* wire it into the private page table page */
- SMPpt[pg] = (pt_entry_t)(PG_V | PG_RW | vtophys(gd));
+ SMPpt[pg] = (pt_entry_t)(PG_V | PG_RW | vtophys(pc));
/* allocate and set up an idle stack data page */
stack = (char *)kmem_alloc(kernel_map, KSTACK_PAGES * PAGE_SIZE); /* XXXKSE */
@@ -1967,8 +1966,7 @@ start_all_aps(u_int boot_addr)
(PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack));
/* prime data page for it to use */
- gd->gd_cpuid = x;
- globaldata_register(gd);
+ pcpu_init(pc, x, sizeof(struct pcpu));
/* setup a vector to our boot code */
*((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
diff --git a/sys/amd64/amd64/mptable.c b/sys/amd64/amd64/mptable.c
index 13144b9..9d86dae 100644
--- a/sys/amd64/amd64/mptable.c
+++ b/sys/amd64/amd64/mptable.c
@@ -71,7 +71,6 @@
#include <machine/smptests.h> /** TEST_DEFAULT_CONFIG, TEST_TEST1 */
#include <machine/tss.h>
#include <machine/specialreg.h>
-#include <machine/globaldata.h>
#include <machine/privatespace.h>
#if defined(APIC_IO)
@@ -477,9 +476,9 @@ init_secondary(void)
gdt_segs[GPRIV_SEL].ssd_base = (int) &SMP_prvspace[myid];
gdt_segs[GPROC0_SEL].ssd_base =
- (int) &SMP_prvspace[myid].globaldata.gd_common_tss;
- SMP_prvspace[myid].globaldata.gd_prvspace =
- &SMP_prvspace[myid].globaldata;
+ (int) &SMP_prvspace[myid].pcpu.pc_common_tss;
+ SMP_prvspace[myid].pcpu.pc_prvspace =
+ &SMP_prvspace[myid].pcpu;
for (x = 0; x < NGDT; x++) {
ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x].sd);
@@ -1915,7 +1914,7 @@ start_all_aps(u_int boot_addr)
int x, i, pg;
u_char mpbiosreason;
u_long mpbioswarmvec;
- struct globaldata *gd;
+ struct pcpu *pc;
char *stack;
uintptr_t kptbase;
@@ -1955,10 +1954,10 @@ start_all_aps(u_int boot_addr)
pg = x * i386_btop(sizeof(struct privatespace));
/* allocate a new private data page */
- gd = (struct globaldata *)kmem_alloc(kernel_map, PAGE_SIZE);
+ pc = (struct pcpu *)kmem_alloc(kernel_map, PAGE_SIZE);
/* wire it into the private page table page */
- SMPpt[pg] = (pt_entry_t)(PG_V | PG_RW | vtophys(gd));
+ SMPpt[pg] = (pt_entry_t)(PG_V | PG_RW | vtophys(pc));
/* allocate and set up an idle stack data page */
stack = (char *)kmem_alloc(kernel_map, KSTACK_PAGES * PAGE_SIZE); /* XXXKSE */
@@ -1967,8 +1966,7 @@ start_all_aps(u_int boot_addr)
(PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack));
/* prime data page for it to use */
- gd->gd_cpuid = x;
- globaldata_register(gd);
+ pcpu_init(pc, x, sizeof(struct pcpu));
/* setup a vector to our boot code */
*((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 789c491..d7a95a4 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -105,7 +105,6 @@
#include <machine/apic.h>
#include <machine/segments.h>
#include <machine/tss.h>
-#include <machine/globaldata.h>
#endif /* SMP || APIC_IO */
#define PMAP_KEEP_PDIRS
diff --git a/sys/amd64/amd64/support.S b/sys/amd64/amd64/support.S
index 81181cc..bc58672 100644
--- a/sys/amd64/amd64/support.S
+++ b/sys/amd64/amd64/support.S
@@ -216,8 +216,8 @@ ENTRY(i586_bzero)
* complicated since we avoid it if possible at all levels. We
* want to localize the complications even when that increases them.
* Here the extra work involves preserving CR0_TS in TS.
- * `npxthread != NULL' is supposed to be the condition that all the
- * FPU resources belong to an application, but npxthread and CR0_TS
+ * `fpcurthread != NULL' is supposed to be the condition that all the
+ * FPU resources belong to an application, but fpcurthread and CR0_TS
* aren't set atomically enough for this condition to work in
* interrupt handlers.
*
@@ -241,7 +241,7 @@ ENTRY(i586_bzero)
* method. CR0_TS must be preserved although it is very likely to
* always end up as clear.
*/
- cmpl $0,PCPU(NPXTHREAD)
+ cmpl $0,PCPU(FPCURTHREAD)
je i586_bz1
/*
@@ -303,7 +303,7 @@ fpureg_i586_bzero_loop:
cmpl $8,%ecx
jae fpureg_i586_bzero_loop
- cmpl $0,PCPU(NPXTHREAD)
+ cmpl $0,PCPU(FPCURTHREAD)
je i586_bz3
/* XXX check that the condition for cases 1-2 stayed false. */
@@ -517,7 +517,7 @@ ENTRY(i586_bcopy)
sarb $1,kernel_fpu_lock
jc small_i586_bcopy
- cmpl $0,PCPU(NPXTHREAD)
+ cmpl $0,PCPU(FPCURTHREAD)
je i586_bc1
/* XXX turn off handling of cases 1-2, as above. */
@@ -593,7 +593,7 @@ large_i586_bcopy_loop:
cmpl $64,%ecx
jae 4b
- cmpl $0,PCPU(NPXTHREAD)
+ cmpl $0,PCPU(FPCURTHREAD)
je i586_bc2
/* XXX check that the condition for cases 1-2 stayed false. */
@@ -991,14 +991,14 @@ ENTRY(fastmove)
/* XXX grab FPU context atomically. */
cli
-/* if (npxthread != NULL) { */
- cmpl $0,PCPU(NPXTHREAD)
+/* if (fpcurthread != NULL) { */
+ cmpl $0,PCPU(FPCURTHREAD)
je 6f
/* fnsave(&curpcb->pcb_savefpu); */
movl PCPU(CURPCB),%eax
fnsave PCB_SAVEFPU(%eax)
-/* NPXTHREAD = NULL; */
- movl $0,PCPU(NPXTHREAD)
+/* FPCURTHREAD = NULL; */
+ movl $0,PCPU(FPCURTHREAD)
/* } */
6:
/* now we own the FPU. */
@@ -1026,9 +1026,9 @@ ENTRY(fastmove)
movl -4(%ebp),%edi
/* stop_emulating(); */
clts
-/* npxthread = curthread; */
+/* fpcurthread = curthread; */
movl PCPU(CURTHREAD),%eax
- movl %eax,PCPU(NPXTHREAD)
+ movl %eax,PCPU(FPCURTHREAD)
movl PCPU(CURPCB),%eax
/* XXX end of atomic FPU context grab. */
@@ -1113,8 +1113,8 @@ fastmove_loop:
smsw %ax
orb $CR0_TS,%al
lmsw %ax
-/* npxthread = NULL; */
- movl $0,PCPU(NPXTHREAD)
+/* fpcurthread = NULL; */
+ movl $0,PCPU(FPCURTHREAD)
/* XXX end of atomic FPU context ungrab. */
sti
@@ -1154,7 +1154,7 @@ fastmove_fault:
smsw %ax
orb $CR0_TS,%al
lmsw %ax
- movl $0,PCPU(NPXTHREAD)
+ movl $0,PCPU(FPCURTHREAD)
/* XXX end of atomic FPU context ungrab. */
sti
diff --git a/sys/amd64/amd64/support.s b/sys/amd64/amd64/support.s
index 81181cc..bc58672 100644
--- a/sys/amd64/amd64/support.s
+++ b/sys/amd64/amd64/support.s
@@ -216,8 +216,8 @@ ENTRY(i586_bzero)
* complicated since we avoid it if possible at all levels. We
* want to localize the complications even when that increases them.
* Here the extra work involves preserving CR0_TS in TS.
- * `npxthread != NULL' is supposed to be the condition that all the
- * FPU resources belong to an application, but npxthread and CR0_TS
+ * `fpcurthread != NULL' is supposed to be the condition that all the
+ * FPU resources belong to an application, but fpcurthread and CR0_TS
* aren't set atomically enough for this condition to work in
* interrupt handlers.
*
@@ -241,7 +241,7 @@ ENTRY(i586_bzero)
* method. CR0_TS must be preserved although it is very likely to
* always end up as clear.
*/
- cmpl $0,PCPU(NPXTHREAD)
+ cmpl $0,PCPU(FPCURTHREAD)
je i586_bz1
/*
@@ -303,7 +303,7 @@ fpureg_i586_bzero_loop:
cmpl $8,%ecx
jae fpureg_i586_bzero_loop
- cmpl $0,PCPU(NPXTHREAD)
+ cmpl $0,PCPU(FPCURTHREAD)
je i586_bz3
/* XXX check that the condition for cases 1-2 stayed false. */
@@ -517,7 +517,7 @@ ENTRY(i586_bcopy)
sarb $1,kernel_fpu_lock
jc small_i586_bcopy
- cmpl $0,PCPU(NPXTHREAD)
+ cmpl $0,PCPU(FPCURTHREAD)
je i586_bc1
/* XXX turn off handling of cases 1-2, as above. */
@@ -593,7 +593,7 @@ large_i586_bcopy_loop:
cmpl $64,%ecx
jae 4b
- cmpl $0,PCPU(NPXTHREAD)
+ cmpl $0,PCPU(FPCURTHREAD)
je i586_bc2
/* XXX check that the condition for cases 1-2 stayed false. */
@@ -991,14 +991,14 @@ ENTRY(fastmove)
/* XXX grab FPU context atomically. */
cli
-/* if (npxthread != NULL) { */
- cmpl $0,PCPU(NPXTHREAD)
+/* if (fpcurthread != NULL) { */
+ cmpl $0,PCPU(FPCURTHREAD)
je 6f
/* fnsave(&curpcb->pcb_savefpu); */
movl PCPU(CURPCB),%eax
fnsave PCB_SAVEFPU(%eax)
-/* NPXTHREAD = NULL; */
- movl $0,PCPU(NPXTHREAD)
+/* FPCURTHREAD = NULL; */
+ movl $0,PCPU(FPCURTHREAD)
/* } */
6:
/* now we own the FPU. */
@@ -1026,9 +1026,9 @@ ENTRY(fastmove)
movl -4(%ebp),%edi
/* stop_emulating(); */
clts
-/* npxthread = curthread; */
+/* fpcurthread = curthread; */
movl PCPU(CURTHREAD),%eax
- movl %eax,PCPU(NPXTHREAD)
+ movl %eax,PCPU(FPCURTHREAD)
movl PCPU(CURPCB),%eax
/* XXX end of atomic FPU context grab. */
@@ -1113,8 +1113,8 @@ fastmove_loop:
smsw %ax
orb $CR0_TS,%al
lmsw %ax
-/* npxthread = NULL; */
- movl $0,PCPU(NPXTHREAD)
+/* fpcurthread = NULL; */
+ movl $0,PCPU(FPCURTHREAD)
/* XXX end of atomic FPU context ungrab. */
sti
@@ -1154,7 +1154,7 @@ fastmove_fault:
smsw %ax
orb $CR0_TS,%al
lmsw %ax
- movl $0,PCPU(NPXTHREAD)
+ movl $0,PCPU(FPCURTHREAD)
/* XXX end of atomic FPU context ungrab. */
sti
diff --git a/sys/amd64/amd64/swtch.s b/sys/amd64/amd64/swtch.s
index 14e4495..ae967c4 100644
--- a/sys/amd64/amd64/swtch.s
+++ b/sys/amd64/amd64/swtch.s
@@ -123,7 +123,7 @@ ENTRY(cpu_switch)
#ifdef DEV_NPX
/* have we used fp, and need a save? */
- cmpl %ecx,PCPU(NPXTHREAD)
+ cmpl %ecx,PCPU(FPCURTHREAD)
jne 1f
addl $PCB_SAVEFPU,%edx /* h/w bugs make saving complicated */
pushl %edx
@@ -337,20 +337,20 @@ ENTRY(savectx)
#ifdef DEV_NPX
/*
- * If npxthread == NULL, then the npx h/w state is irrelevant and the
+ * If fpcurthread == NULL, then the npx h/w state is irrelevant and the
* state had better already be in the pcb. This is true for forks
* but not for dumps (the old book-keeping with FP flags in the pcb
* always lost for dumps because the dump pcb has 0 flags).
*
- * If npxthread != NULL, then we have to save the npx h/w state to
- * npxthread's pcb and copy it to the requested pcb, or save to the
+ * If fpcurthread != NULL, then we have to save the npx h/w state to
+ * fpcurthread's pcb and copy it to the requested pcb, or save to the
* requested pcb and reload. Copying is easier because we would
* have to handle h/w bugs for reloading. We used to lose the
* parent's npx state for forks by forgetting to reload.
*/
pushfl
cli
- movl PCPU(NPXTHREAD),%eax
+ movl PCPU(FPCURTHREAD),%eax
testl %eax,%eax
je 1f
diff --git a/sys/amd64/amd64/vm_machdep.c b/sys/amd64/amd64/vm_machdep.c
index 59898e3..322b5e7 100644
--- a/sys/amd64/amd64/vm_machdep.c
+++ b/sys/amd64/amd64/vm_machdep.c
@@ -154,7 +154,7 @@ cpu_fork(td1, p2, flags)
if (td1 == curthread)
td1->td_pcb->pcb_gs = rgs();
savecrit = critical_enter();
- if (PCPU_GET(npxthread) == td1)
+ if (PCPU_GET(fpcurthread) == td1)
npxsave(&td1->td_pcb->pcb_save);
critical_exit(savecrit);
#endif
diff --git a/sys/amd64/include/asmacros.h b/sys/amd64/include/asmacros.h
index ec43cda..325e3d6 100644
--- a/sys/amd64/include/asmacros.h
+++ b/sys/amd64/include/asmacros.h
@@ -64,9 +64,9 @@
#define NON_GPROF_RET .byte 0xc3 /* opcode for `ret' */
#ifdef LOCORE
-#define PCPU(member) %fs:GD_ ## member
-#define PCPU_ADDR(member, reg) movl %fs:GD_PRVSPACE,reg; \
- addl $GD_ ## member,reg
+#define PCPU(member) %fs:PC_ ## member
+#define PCPU_ADDR(member, reg) movl %fs:PC_PRVSPACE,reg; \
+ addl $PC_ ## member,reg
#endif
#ifdef GPROF
diff --git a/sys/amd64/include/cpu.h b/sys/amd64/include/cpu.h
index a7783a0..10b9499 100644
--- a/sys/amd64/include/cpu.h
+++ b/sys/amd64/include/cpu.h
@@ -46,7 +46,6 @@
#include <machine/psl.h>
#include <machine/frame.h>
#include <machine/segments.h>
-#include <machine/globals.h>
/*
* definitions of cpu-dependent requirements
diff --git a/sys/amd64/include/mptable.h b/sys/amd64/include/mptable.h
index 13144b9..9d86dae 100644
--- a/sys/amd64/include/mptable.h
+++ b/sys/amd64/include/mptable.h
@@ -71,7 +71,6 @@
#include <machine/smptests.h> /** TEST_DEFAULT_CONFIG, TEST_TEST1 */
#include <machine/tss.h>
#include <machine/specialreg.h>
-#include <machine/globaldata.h>
#include <machine/privatespace.h>
#if defined(APIC_IO)
@@ -477,9 +476,9 @@ init_secondary(void)
gdt_segs[GPRIV_SEL].ssd_base = (int) &SMP_prvspace[myid];
gdt_segs[GPROC0_SEL].ssd_base =
- (int) &SMP_prvspace[myid].globaldata.gd_common_tss;
- SMP_prvspace[myid].globaldata.gd_prvspace =
- &SMP_prvspace[myid].globaldata;
+ (int) &SMP_prvspace[myid].pcpu.pc_common_tss;
+ SMP_prvspace[myid].pcpu.pc_prvspace =
+ &SMP_prvspace[myid].pcpu;
for (x = 0; x < NGDT; x++) {
ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x].sd);
@@ -1915,7 +1914,7 @@ start_all_aps(u_int boot_addr)
int x, i, pg;
u_char mpbiosreason;
u_long mpbioswarmvec;
- struct globaldata *gd;
+ struct pcpu *pc;
char *stack;
uintptr_t kptbase;
@@ -1955,10 +1954,10 @@ start_all_aps(u_int boot_addr)
pg = x * i386_btop(sizeof(struct privatespace));
/* allocate a new private data page */
- gd = (struct globaldata *)kmem_alloc(kernel_map, PAGE_SIZE);
+ pc = (struct pcpu *)kmem_alloc(kernel_map, PAGE_SIZE);
/* wire it into the private page table page */
- SMPpt[pg] = (pt_entry_t)(PG_V | PG_RW | vtophys(gd));
+ SMPpt[pg] = (pt_entry_t)(PG_V | PG_RW | vtophys(pc));
/* allocate and set up an idle stack data page */
stack = (char *)kmem_alloc(kernel_map, KSTACK_PAGES * PAGE_SIZE); /* XXXKSE */
@@ -1967,8 +1966,7 @@ start_all_aps(u_int boot_addr)
(PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack));
/* prime data page for it to use */
- gd->gd_cpuid = x;
- globaldata_register(gd);
+ pcpu_init(pc, x, sizeof(struct pcpu));
/* setup a vector to our boot code */
*((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
diff --git a/sys/amd64/include/pcpu.h b/sys/amd64/include/pcpu.h
index 46c26c4..5fecb98 100644
--- a/sys/amd64/include/pcpu.h
+++ b/sys/amd64/include/pcpu.h
@@ -26,54 +26,122 @@
* $FreeBSD$
*/
-#ifndef _MACHINE_GLOBALDATA_H_
-#define _MACHINE_GLOBALDATA_H_
+#ifndef _MACHINE_PCPU_H_
+#define _MACHINE_PCPU_H_
#ifdef _KERNEL
+#ifndef __GNUC__
+#error gcc is required to use this file
+#endif
+
#include <machine/segments.h>
#include <machine/tss.h>
-/* XXX */
-#ifdef KTR_PERCPU
-#include <sys/ktr.h>
-#endif
-
/*
- * This structure maps out the global data that needs to be kept on a
- * per-cpu basis. genassym uses this to generate offsets for the assembler
- * code, which also provides external symbols so that C can get at them as
- * though they were really globals.
- *
* The SMP parts are setup in pmap.c and locore.s for the BSP, and
* mp_machdep.c sets up the data for the AP's to "see" when they awake.
* The reason for doing it via a struct is so that an array of pointers
* to each CPU's data can be set up for things like "check curproc on all
* other processors"
*/
-struct globaldata {
- struct globaldata *gd_prvspace; /* Self-reference */
- struct thread *gd_curthread;
- struct thread *gd_npxthread;
- struct pcb *gd_curpcb;
- struct thread *gd_idlethread;
- struct timeval gd_switchtime;
- struct i386tss gd_common_tss;
- int gd_switchticks;
- struct segment_descriptor gd_common_tssd;
- struct segment_descriptor *gd_tss_gdt;
- int gd_currentldt;
- u_int gd_cpuid;
- u_int gd_other_cpus;
- SLIST_ENTRY(globaldata) gd_allcpu;
- struct lock_list_entry *gd_spinlocks;
-#ifdef KTR_PERCPU
- int gd_ktr_idx; /* Index into trace table */
- char *gd_ktr_buf;
- char gd_ktr_buf_data[KTR_SIZE];
-#endif
-};
+#define PCPU_MD_FIELDS \
+ struct pcpu *pc_prvspace; /* Self-reference */ \
+ struct i386tss pc_common_tss; \
+ struct segment_descriptor pc_common_tssd; \
+ struct segment_descriptor *pc_tss_gdt; \
+ int pc_currentldt
+
+/*
+ * Evaluates to the byte offset of the per-cpu variable name.
+ */
+#define __pcpu_offset(name) \
+ __offsetof(struct pcpu, name)
+
+/*
+ * Evaluates to the type of the per-cpu variable name.
+ */
+#define __pcpu_type(name) \
+ __typeof(((struct pcpu *)0)->name)
+
+/*
+ * Evaluates to the address of the per-cpu variable name.
+ */
+#define __PCPU_PTR(name) ({ \
+ __pcpu_type(name) *__p; \
+ \
+ __asm __volatile("movl %%fs:%1,%0; addl %2,%0" \
+ : "=r" (__p) \
+ : "m" (*(struct pcpu *)(__pcpu_offset(pc_prvspace))), \
+ "i" (__pcpu_offset(name))); \
+ \
+ __p; \
+})
+
+/*
+ * Evaluates to the value of the per-cpu variable name.
+ */
+#define __PCPU_GET(name) ({ \
+ __pcpu_type(name) __result; \
+ \
+ if (sizeof(__result) == 1) { \
+ u_char __b; \
+ __asm __volatile("movb %%fs:%1,%0" \
+ : "=r" (__b) \
+ : "m" (*(u_char *)(__pcpu_offset(name)))); \
+ __result = *(__pcpu_type(name) *)&__b; \
+ } else if (sizeof(__result) == 2) { \
+ u_short __w; \
+ __asm __volatile("movw %%fs:%1,%0" \
+ : "=r" (__w) \
+ : "m" (*(u_short *)(__pcpu_offset(name)))); \
+ __result = *(__pcpu_type(name) *)&__w; \
+ } else if (sizeof(__result) == 4) { \
+ u_int __i; \
+ __asm __volatile("movl %%fs:%1,%0" \
+ : "=r" (__i) \
+ : "m" (*(u_int *)(__pcpu_offset(name)))); \
+ __result = *(__pcpu_type(name) *)&__i; \
+ } else { \
+ __result = *__PCPU_PTR(name); \
+ } \
+ \
+ __result; \
+})
+
+/*
+ * Sets the value of the per-cpu variable name to value val.
+ */
+#define __PCPU_SET(name, val) ({ \
+ __pcpu_type(name) __val = (val); \
+ \
+ if (sizeof(__val) == 1) { \
+ u_char __b; \
+ __b = *(u_char *)&__val; \
+ __asm __volatile("movb %1,%%fs:%0" \
+ : "=m" (*(u_char *)(__pcpu_offset(name))) \
+ : "r" (__b)); \
+ } else if (sizeof(__val) == 2) { \
+ u_short __w; \
+ __w = *(u_short *)&__val; \
+ __asm __volatile("movw %1,%%fs:%0" \
+ : "=m" (*(u_short *)(__pcpu_offset(name))) \
+ : "r" (__w)); \
+ } else if (sizeof(__val) == 4) { \
+ u_int __i; \
+ __i = *(u_int *)&__val; \
+ __asm __volatile("movl %1,%%fs:%0" \
+ : "=m" (*(u_int *)(__pcpu_offset(name))) \
+ : "r" (__i)); \
+ } else { \
+ *__PCPU_PTR(name) = __val; \
+ } \
+})
+
+#define PCPU_GET(member) __PCPU_GET(pc_ ## member)
+#define PCPU_PTR(member) __PCPU_PTR(pc_ ## member)
+#define PCPU_SET(member, val) __PCPU_SET(pc_ ## member, val)
#endif /* _KERNEL */
-#endif /* ! _MACHINE_GLOBALDATA_H_ */
+#endif /* ! _MACHINE_PCPU_H_ */
diff --git a/sys/amd64/include/proc.h b/sys/amd64/include/proc.h
index 911f6b8..5b50824 100644
--- a/sys/amd64/include/proc.h
+++ b/sys/amd64/include/proc.h
@@ -37,7 +37,6 @@
#ifndef _MACHINE_PROC_H_
#define _MACHINE_PROC_H_
-#include <machine/globals.h>
#include <machine/segments.h>
struct proc_ldt {
diff --git a/sys/amd64/isa/npx.c b/sys/amd64/isa/npx.c
index 1238d67..ff8c4cf 100644
--- a/sys/amd64/isa/npx.c
+++ b/sys/amd64/isa/npx.c
@@ -237,7 +237,7 @@ npx_intr(dummy)
#endif
/*
- * npxthread is normally non-null here. In that case, schedule an
+ * fpcurthread is normally non-null here. In that case, schedule an
* AST to finish the exception handling in the correct context
* (this interrupt may occur after the thread has entered the
* kernel via a syscall or an interrupt). Otherwise, the npx
@@ -248,7 +248,7 @@ npx_intr(dummy)
* that caused it and it will repeat. We will eventually (usually
* soon) win the race to handle the interrupt properly.
*/
- td = PCPU_GET(npxthread);
+ td = PCPU_GET(fpcurthread);
if (td != NULL) {
td->td_pcb->pcb_flags |= PCB_NPXTRAP;
mtx_lock_spin(&sched_lock);
@@ -513,7 +513,7 @@ npxinit(control)
/*
* fninit has the same h/w bugs as fnsave. Use the detoxified
* fnsave to throw away any junk in the fpu. npxsave() initializes
- * the fpu and sets npxthread = NULL as important side effects.
+ * the fpu and sets fpcurthread = NULL as important side effects.
*/
savecrit = critical_enter();
npxsave(&dummy);
@@ -540,7 +540,7 @@ npxexit(td)
critical_t savecrit;
savecrit = critical_enter();
- if (td == PCPU_GET(npxthread))
+ if (td == PCPU_GET(fpcurthread))
npxsave(&PCPU_GET(curpcb)->pcb_save);
critical_exit(savecrit);
#ifdef NPX_DEBUG
@@ -758,8 +758,8 @@ npxtrap()
u_long *exstat;
if (!npx_exists) {
- printf("npxtrap: npxthread = %p, curthread = %p, npx_exists = %d\n",
- PCPU_GET(npxthread), curthread, npx_exists);
+ printf("npxtrap: fpcurthread = %p, curthread = %p, npx_exists = %d\n",
+ PCPU_GET(fpcurthread), curthread, npx_exists);
panic("npxtrap from nowhere");
}
savecrit = critical_enter();
@@ -769,7 +769,7 @@ npxtrap()
* state to memory. Fetch the relevant parts of the state from
* wherever they are.
*/
- if (PCPU_GET(npxthread) != curthread) {
+ if (PCPU_GET(fpcurthread) != curthread) {
control = GET_FPU_CW(curthread);
status = GET_FPU_SW(curthread);
} else {
@@ -779,7 +779,7 @@ npxtrap()
exstat = GET_FPU_EXSW_PTR(curthread->td_pcb);
*exstat = status;
- if (PCPU_GET(npxthread) != curthread)
+ if (PCPU_GET(fpcurthread) != curthread)
GET_FPU_SW(curthread) &= ~0x80bf;
else
fnclex();
@@ -790,7 +790,7 @@ npxtrap()
/*
* Implement device not available (DNA) exception
*
- * It would be better to switch FP context here (if curthread != npxthread)
+ * It would be better to switch FP context here (if curthread != fpcurthread)
* and not necessarily for every context switch, but it is too hard to
* access foreign pcb's.
*/
@@ -802,9 +802,9 @@ npxdna()
if (!npx_exists)
return (0);
- if (PCPU_GET(npxthread) != NULL) {
- printf("npxdna: npxthread = %p, curthread = %p\n",
- PCPU_GET(npxthread), curthread);
+ if (PCPU_GET(fpcurthread) != NULL) {
+ printf("npxdna: fpcurthread = %p, curthread = %p\n",
+ PCPU_GET(fpcurthread), curthread);
panic("npxdna");
}
s = critical_enter();
@@ -812,7 +812,7 @@ npxdna()
/*
* Record new context early in case frstor causes an IRQ13.
*/
- PCPU_SET(npxthread, curthread);
+ PCPU_SET(fpcurthread, curthread);
exstat = GET_FPU_EXSW_PTR(PCPU_GET(curpcb));
*exstat = 0;
@@ -844,13 +844,13 @@ npxdna()
* after the process has entered the kernel. It may even be delivered after
* the fnsave here completes. A spurious IRQ13 for the fnsave is handled in
* the same way as a very-late-arriving non-spurious IRQ13 from user mode:
- * it is normally ignored at first because we set npxthread to NULL; it is
+ * it is normally ignored at first because we set fpcurthread to NULL; it is
* normally retriggered in npxdna() after return to user mode.
*
* npxsave() must be called with interrupts disabled, so that it clears
- * npxthread atomically with saving the state. We require callers to do the
+ * fpcurthread atomically with saving the state. We require callers to do the
* disabling, since most callers need to disable interrupts anyway to call
- * npxsave() atomically with checking npxthread.
+ * npxsave() atomically with checking fpcurthread.
*
* A previous version of npxsave() went to great lengths to excecute fnsave
* with interrupts enabled in case executing it froze the CPU. This case
@@ -866,7 +866,7 @@ npxsave(addr)
fpusave(addr);
start_emulating();
- PCPU_SET(npxthread, NULL);
+ PCPU_SET(fpcurthread, NULL);
}
static void
OpenPOWER on IntegriCloud