summaryrefslogtreecommitdiffstats
path: root/sys/amd64
diff options
context:
space:
mode:
authorkib <kib@FreeBSD.org>2012-01-21 17:45:27 +0000
committerkib <kib@FreeBSD.org>2012-01-21 17:45:27 +0000
commit361bfae5c2c758540993427b5fd4d32422b143d4 (patch)
tree2514905a9aa4f850d72848118ca709f59f175020 /sys/amd64
parent8fd18c5b0a668ecb5094c18e7210dc6b3c2ce7e4 (diff)
downloadFreeBSD-src-361bfae5c2c758540993427b5fd4d32422b143d4.zip
FreeBSD-src-361bfae5c2c758540993427b5fd4d32422b143d4.tar.gz
Add support for the extended FPU states on amd64, both for native
64bit and 32bit ABIs. As a side-effect, it enables AVX on capable CPUs. In particular: - Query the CPU support for XSAVE, list of the supported extensions and the required size of FPU save area. The hw.use_xsave tunable is provided for disabling XSAVE, and hw.xsave_mask may be used to select the enabled extensions. - Remove the FPU save area from PCB and dynamically allocate the (run-time sized) user save area on the top of the kernel stack, right above the PCB. Reorganize the thread0 PCB initialization to postpone it after BSP is queried for save area size. - The dumppcb, stoppcbs and susppcbs now do not carry the FPU state as well. FPU state is only useful for suspend, where it is saved in dynamically allocated suspfpusave area. - Use XSAVE and XRSTOR to save/restore FPU state, if supported and enabled. - Define new mcontext_t flag _MC_HASFPXSTATE, indicating that mcontext_t has a valid pointer to out-of-struct extended FPU state. Signal handlers are supplied with stack-allocated fpu state. The sigreturn(2) and setcontext(2) syscall honour the flag, allowing the signal handlers to inspect and manipilate extended state in the interrupted context. - The getcontext(2) never returns extended state, since there is no place in the fixed-sized mcontext_t to place variable-sized save area. And, since mcontext_t is embedded into ucontext_t, makes it impossible to fix in a reasonable way. Instead of extending getcontext(2) syscall, provide a sysarch(2) facility to query extended FPU state. - Add ptrace(2) support for getting and setting extended state; while there, implement missed PT_I386_{GET,SET}XMMREGS for 32bit binaries. - Change fpu_kern KPI to not expose struct fpu_kern_ctx layout to consumers, making it opaque. Internally, struct fpu_kern_ctx now contains a space for the extended state. Convert in-kernel consumers of fpu_kern KPI both on i386 and amd64. First version of the support for AVX was submitted by Tim Bird <tim.bird am sony com> on behalf of Sony. This version was written from scratch. Tested by: pho (previous version), Yamagi Burmeister <lists yamagi org> MFC after: 1 month
Diffstat (limited to 'sys/amd64')
-rw-r--r--sys/amd64/acpica/acpi_switch.S13
-rw-r--r--sys/amd64/acpica/acpi_wakecode.S2
-rw-r--r--sys/amd64/acpica/acpi_wakeup.c11
-rw-r--r--sys/amd64/amd64/cpu_switch.S30
-rw-r--r--sys/amd64/amd64/fpu.c275
-rw-r--r--sys/amd64/amd64/genassym.c2
-rw-r--r--sys/amd64/amd64/initcpu.c1
-rw-r--r--sys/amd64/amd64/machdep.c140
-rw-r--r--sys/amd64/amd64/mp_machdep.c4
-rw-r--r--sys/amd64/amd64/ptrace_machdep.c141
-rw-r--r--sys/amd64/amd64/sys_machdep.c26
-rw-r--r--sys/amd64/amd64/trap.c2
-rw-r--r--sys/amd64/amd64/vm_machdep.c72
-rw-r--r--sys/amd64/ia32/ia32_reg.c4
-rw-r--r--sys/amd64/ia32/ia32_signal.c117
-rw-r--r--sys/amd64/include/fpu.h22
-rw-r--r--sys/amd64/include/frame.h3
-rw-r--r--sys/amd64/include/md_var.h10
-rw-r--r--sys/amd64/include/pcb.h4
-rw-r--r--sys/amd64/include/ptrace.h5
-rw-r--r--sys/amd64/include/sysarch.h12
-rw-r--r--sys/amd64/include/ucontext.h8
22 files changed, 791 insertions, 113 deletions
diff --git a/sys/amd64/acpica/acpi_switch.S b/sys/amd64/acpica/acpi_switch.S
index 45bad1f..6091e2a 100644
--- a/sys/amd64/acpica/acpi_switch.S
+++ b/sys/amd64/acpica/acpi_switch.S
@@ -146,11 +146,22 @@ ENTRY(acpi_restorecpu)
/* Restore FPU state. */
fninit
- fxrstor PCB_USERFPU(%rdi)
+ movq WAKEUP_CTX(fpusave),%rdi
+ cmpl $0,use_xsave
+ jne 1f
+ fxrstor (%rdi)
+ jmp 2f
+1: movl xsave_mask,%eax
+ movl xsave_mask+4,%edx
+/* xrstor (%rdi) */
+ .byte 0x0f,0xae,0x2f
+2:
/* Reload CR0. */
movq %rcx, %cr0
+ movq WAKEUP_CTX(pcb),%rdi
+
/* Restore return address. */
movq PCB_RIP(%rdi), %rax
movq %rax, (%rsp)
diff --git a/sys/amd64/acpica/acpi_wakecode.S b/sys/amd64/acpica/acpi_wakecode.S
index 49d14f5..6e44e6c5 100644
--- a/sys/amd64/acpica/acpi_wakecode.S
+++ b/sys/amd64/acpica/acpi_wakecode.S
@@ -270,6 +270,8 @@ wakeup_pcb:
wakeup_gdt:
.word 0
.quad 0
+wakeup_fpusave:
+ .quad 0
ALIGN_DATA
wakeup_efer:
diff --git a/sys/amd64/acpica/acpi_wakeup.c b/sys/amd64/acpica/acpi_wakeup.c
index 43aeec3..f862435 100644
--- a/sys/amd64/acpica/acpi_wakeup.c
+++ b/sys/amd64/acpica/acpi_wakeup.c
@@ -45,6 +45,7 @@ __FBSDID("$FreeBSD$");
#include <machine/pcb.h>
#include <machine/pmap.h>
#include <machine/specialreg.h>
+#include <machine/md_var.h>
#ifdef SMP
#include <x86/apicreg.h>
@@ -67,8 +68,10 @@ extern int acpi_reset_video;
#ifdef SMP
extern struct pcb **susppcbs;
+extern void **suspfpusave;
#else
static struct pcb **susppcbs;
+static void **suspfpusave;
#endif
int acpi_restorecpu(vm_offset_t, struct pcb *);
@@ -105,6 +108,7 @@ acpi_wakeup_ap(struct acpi_softc *sc, int cpu)
int ms;
WAKECODE_FIXUP(wakeup_pcb, struct pcb *, susppcbs[cpu]);
+ WAKECODE_FIXUP(wakeup_fpusave, void *, suspfpusave[cpu]);
WAKECODE_FIXUP(wakeup_gdt, uint16_t, susppcbs[cpu]->pcb_gdt.rd_limit);
WAKECODE_FIXUP(wakeup_gdt + 2, uint64_t,
susppcbs[cpu]->pcb_gdt.rd_base);
@@ -244,6 +248,7 @@ acpi_sleep_machdep(struct acpi_softc *sc, int state)
load_cr3(KPML4phys);
if (savectx(susppcbs[0])) {
+ ctx_fpusave(suspfpusave[0]);
#ifdef SMP
if (!CPU_EMPTY(&wakeup_cpus) &&
suspend_cpus(wakeup_cpus) == 0) {
@@ -256,6 +261,7 @@ acpi_sleep_machdep(struct acpi_softc *sc, int state)
WAKECODE_FIXUP(reset_video, uint8_t, (acpi_reset_video != 0));
WAKECODE_FIXUP(wakeup_pcb, struct pcb *, susppcbs[0]);
+ WAKECODE_FIXUP(wakeup_fpusave, void *, suspfpusave[0]);
WAKECODE_FIXUP(wakeup_gdt, uint16_t,
susppcbs[0]->pcb_gdt.rd_limit);
WAKECODE_FIXUP(wakeup_gdt + 2, uint64_t,
@@ -333,8 +339,11 @@ acpi_alloc_wakeup_handler(void)
return (NULL);
}
susppcbs = malloc(mp_ncpus * sizeof(*susppcbs), M_DEVBUF, M_WAITOK);
- for (i = 0; i < mp_ncpus; i++)
+ suspfpusave = malloc(mp_ncpus * sizeof(void *), M_DEVBUF, M_WAITOK);
+ for (i = 0; i < mp_ncpus; i++) {
susppcbs[i] = malloc(sizeof(**susppcbs), M_DEVBUF, M_WAITOK);
+ suspfpusave[i] = alloc_fpusave(M_WAITOK);
+ }
return (wakeaddr);
}
diff --git a/sys/amd64/amd64/cpu_switch.S b/sys/amd64/amd64/cpu_switch.S
index efb01b1..bef4b75 100644
--- a/sys/amd64/amd64/cpu_switch.S
+++ b/sys/amd64/amd64/cpu_switch.S
@@ -112,16 +112,25 @@ done_store_dr:
/* have we used fp, and need a save? */
cmpq %rdi,PCPU(FPCURTHREAD)
- jne 1f
+ jne 3f
movq PCB_SAVEFPU(%r8),%r8
clts
+ cmpl $0,use_xsave
+ jne 1f
fxsave (%r8)
- smsw %ax
+ jmp 2f
+1: movq %rdx,%rcx
+ movl xsave_mask,%eax
+ movl xsave_mask+4,%edx
+/* xsave (%r8) */
+ .byte 0x41,0x0f,0xae,0x20
+ movq %rcx,%rdx
+2: smsw %ax
orb $CR0_TS,%al
lmsw %ax
xorl %eax,%eax
movq %rax,PCPU(FPCURTHREAD)
-1:
+3:
/* Save is done. Now fire up new thread. Leave old vmspace. */
movq TD_PCB(%rsi),%r8
@@ -354,10 +363,19 @@ ENTRY(savectx)
sldt PCB_LDT(%rdi)
str PCB_TR(%rdi)
- clts
- fxsave PCB_USERFPU(%rdi)
- movq %rsi,%cr0 /* The previous %cr0 is saved in %rsi. */
+2: movq %rsi,%cr0 /* The previous %cr0 is saved in %rsi. */
movl $1,%eax
ret
END(savectx)
+
+/*
+ * Wrapper around fpusave to care about TS0_CR.
+ */
+ENTRY(ctx_fpusave)
+ movq %cr0,%rsi
+ clts
+ call fpusave
+ movq %rsi,%cr0
+ ret
+END(ctx_fpusave)
diff --git a/sys/amd64/amd64/fpu.c b/sys/amd64/amd64/fpu.c
index 08e5e57..af1fc5e 100644
--- a/sys/amd64/amd64/fpu.c
+++ b/sys/amd64/amd64/fpu.c
@@ -96,19 +96,97 @@ void stop_emulating(void);
#define GET_FPU_CW(thread) ((thread)->td_pcb->pcb_save->sv_env.en_cw)
#define GET_FPU_SW(thread) ((thread)->td_pcb->pcb_save->sv_env.en_sw)
-typedef u_char bool_t;
+CTASSERT(sizeof(struct savefpu) == 512);
+CTASSERT(sizeof(struct xstate_hdr) == 64);
+CTASSERT(sizeof(struct savefpu_ymm) == 832);
+
+/*
+ * This requirement is to make it easier for asm code to calculate
+ * offset of the fpu save area from the pcb address. FPU save area
+ * must by 64-bytes aligned.
+ */
+CTASSERT(sizeof(struct pcb) % XSAVE_AREA_ALIGN == 0);
static void fpu_clean_state(void);
SYSCTL_INT(_hw, HW_FLOATINGPT, floatingpoint, CTLFLAG_RD,
NULL, 1, "Floating point instructions executed in hardware");
-static struct savefpu fpu_initialstate;
+int use_xsave; /* non-static for cpu_switch.S */
+uint64_t xsave_mask; /* the same */
+static struct savefpu *fpu_initialstate;
+
+void
+fpusave(void *addr)
+{
+
+ if (use_xsave)
+ xsave((char *)addr, xsave_mask);
+ else
+ fxsave((char *)addr);
+}
+
+static void
+fpurestore(void *addr)
+{
+
+ if (use_xsave)
+ xrstor((char *)addr, xsave_mask);
+ else
+ fxrstor((char *)addr);
+}
+
+/*
+ * Enable XSAVE if supported and allowed by user.
+ * Calculate the xsave_mask.
+ */
+static void
+fpuinit_bsp1(void)
+{
+ u_int cp[4];
+ uint64_t xsave_mask_user;
+
+ if ((cpu_feature2 & CPUID2_XSAVE) != 0) {
+ use_xsave = 1;
+ TUNABLE_INT_FETCH("hw.use_xsave", &use_xsave);
+ }
+ if (!use_xsave)
+ return;
+
+ cpuid_count(0xd, 0x0, cp);
+ xsave_mask = XFEATURE_ENABLED_X87 | XFEATURE_ENABLED_SSE;
+ if ((cp[0] & xsave_mask) != xsave_mask)
+ panic("CPU0 does not support X87 or SSE: %x", cp[0]);
+ xsave_mask = ((uint64_t)cp[3] << 32) | cp[0];
+ xsave_mask_user = xsave_mask;
+ TUNABLE_ULONG_FETCH("hw.xsave_mask", &xsave_mask_user);
+ xsave_mask_user |= XFEATURE_ENABLED_X87 | XFEATURE_ENABLED_SSE;
+ xsave_mask &= xsave_mask_user;
+}
/*
- * Initialize the floating point unit. On the boot CPU we generate a
- * clean state that is used to initialize the floating point unit when
- * it is first used by a process.
+ * Calculate the fpu save area size.
+ */
+static void
+fpuinit_bsp2(void)
+{
+ u_int cp[4];
+
+ if (use_xsave) {
+ cpuid_count(0xd, 0x0, cp);
+ cpu_max_ext_state_size = cp[1];
+
+ /*
+ * Reload the cpu_feature2, since we enabled OSXSAVE.
+ */
+ do_cpuid(1, cp);
+ cpu_feature2 = cp[2];
+ } else
+ cpu_max_ext_state_size = sizeof(struct savefpu);
+}
+
+/*
+ * Initialize the floating point unit.
*/
void
fpuinit(void)
@@ -117,6 +195,20 @@ fpuinit(void)
u_int mxcsr;
u_short control;
+ if (IS_BSP())
+ fpuinit_bsp1();
+
+ if (use_xsave) {
+ load_cr4(rcr4() | CR4_XSAVE);
+ xsetbv(XCR0, xsave_mask);
+ }
+
+ /*
+ * XCR0 shall be set up before CPU can report the save area size.
+ */
+ if (IS_BSP())
+ fpuinit_bsp2();
+
/*
* It is too early for critical_enter() to work on AP.
*/
@@ -127,20 +219,46 @@ fpuinit(void)
fldcw(control);
mxcsr = __INITIAL_MXCSR__;
ldmxcsr(mxcsr);
- if (PCPU_GET(cpuid) == 0) {
- fxsave(&fpu_initialstate);
- if (fpu_initialstate.sv_env.en_mxcsr_mask)
- cpu_mxcsr_mask = fpu_initialstate.sv_env.en_mxcsr_mask;
- else
- cpu_mxcsr_mask = 0xFFBF;
- bzero(fpu_initialstate.sv_fp, sizeof(fpu_initialstate.sv_fp));
- bzero(fpu_initialstate.sv_xmm, sizeof(fpu_initialstate.sv_xmm));
- }
start_emulating();
intr_restore(saveintr);
}
/*
+ * On the boot CPU we generate a clean state that is used to
+ * initialize the floating point unit when it is first used by a
+ * process.
+ */
+static void
+fpuinitstate(void *arg __unused)
+{
+ register_t saveintr;
+
+ fpu_initialstate = malloc(cpu_max_ext_state_size, M_DEVBUF,
+ M_WAITOK | M_ZERO);
+ saveintr = intr_disable();
+ stop_emulating();
+
+ fpusave(fpu_initialstate);
+ if (fpu_initialstate->sv_env.en_mxcsr_mask)
+ cpu_mxcsr_mask = fpu_initialstate->sv_env.en_mxcsr_mask;
+ else
+ cpu_mxcsr_mask = 0xFFBF;
+
+ /*
+ * The fninit instruction does not modify XMM registers. The
+ * fpusave call dumped the garbage contained in the registers
+ * after reset to the initial state saved. Clear XMM
+ * registers file image to make the startup program state and
+ * signal handler XMM register content predictable.
+ */
+ bzero(&fpu_initialstate->sv_xmm[0], sizeof(struct xmmacc));
+
+ start_emulating();
+ intr_restore(saveintr);
+}
+SYSINIT(fpuinitstate, SI_SUB_DRIVERS, SI_ORDER_ANY, fpuinitstate, NULL);
+
+/*
* Free coprocessor (if we have it).
*/
void
@@ -150,7 +268,7 @@ fpuexit(struct thread *td)
critical_enter();
if (curthread == PCPU_GET(fpcurthread)) {
stop_emulating();
- fxsave(PCPU_GET(curpcb)->pcb_save);
+ fpusave(PCPU_GET(curpcb)->pcb_save);
start_emulating();
PCPU_SET(fpcurthread, 0);
}
@@ -423,7 +541,7 @@ fpudna(void)
* the PCB doesn't contain a clean FPU state. Explicitly
* load an initial state.
*/
- fxrstor(&fpu_initialstate);
+ fpurestore(fpu_initialstate);
if (pcb->pcb_initial_fpucw != __INITIAL_FPUCW__)
fldcw(pcb->pcb_initial_fpucw);
if (PCB_USER_FPU(pcb))
@@ -432,7 +550,7 @@ fpudna(void)
else
set_pcb_flags(pcb, PCB_FPUINITDONE);
} else
- fxrstor(pcb->pcb_save);
+ fpurestore(pcb->pcb_save);
critical_exit();
}
@@ -461,15 +579,16 @@ fpugetregs(struct thread *td)
pcb = td->td_pcb;
if ((pcb->pcb_flags & PCB_USERFPUINITDONE) == 0) {
- bcopy(&fpu_initialstate, &pcb->pcb_user_save,
- sizeof(fpu_initialstate));
- pcb->pcb_user_save.sv_env.en_cw = pcb->pcb_initial_fpucw;
+ bcopy(fpu_initialstate, get_pcb_user_save_pcb(pcb),
+ cpu_max_ext_state_size);
+ get_pcb_user_save_pcb(pcb)->sv_env.en_cw =
+ pcb->pcb_initial_fpucw;
fpuuserinited(td);
return (_MC_FPOWNED_PCB);
}
critical_enter();
if (td == PCPU_GET(fpcurthread) && PCB_USER_FPU(pcb)) {
- fxsave(&pcb->pcb_user_save);
+ fpusave(get_pcb_user_save_pcb(pcb));
critical_exit();
return (_MC_FPOWNED_FPU);
} else {
@@ -491,25 +610,78 @@ fpuuserinited(struct thread *td)
set_pcb_flags(pcb, PCB_FPUINITDONE);
}
+int
+fpusetxstate(struct thread *td, char *xfpustate, size_t xfpustate_size)
+{
+ struct xstate_hdr *hdr, *ehdr;
+ size_t len, max_len;
+ uint64_t bv;
+
+ /* XXXKIB should we clear all extended state in xstate_bv instead ? */
+ if (xfpustate == NULL)
+ return (0);
+ if (!use_xsave)
+ return (EOPNOTSUPP);
+
+ len = xfpustate_size;
+ if (len < sizeof(struct xstate_hdr))
+ return (EINVAL);
+ max_len = cpu_max_ext_state_size - sizeof(struct savefpu);
+ if (len > max_len)
+ return (EINVAL);
+
+ ehdr = (struct xstate_hdr *)xfpustate;
+ bv = ehdr->xstate_bv;
+
+ /*
+ * Avoid #gp.
+ */
+ if (bv & ~xsave_mask)
+ return (EINVAL);
+ if ((bv & (XFEATURE_ENABLED_X87 | XFEATURE_ENABLED_SSE)) !=
+ (XFEATURE_ENABLED_X87 | XFEATURE_ENABLED_SSE))
+ return (EINVAL);
+
+ hdr = (struct xstate_hdr *)(get_pcb_user_save_td(td) + 1);
+
+ hdr->xstate_bv = bv;
+ bcopy(xfpustate + sizeof(struct xstate_hdr),
+ (char *)(hdr + 1), len - sizeof(struct xstate_hdr));
+
+ return (0);
+}
+
/*
* Set the state of the FPU.
*/
-void
-fpusetregs(struct thread *td, struct savefpu *addr)
+int
+fpusetregs(struct thread *td, struct savefpu *addr, char *xfpustate,
+ size_t xfpustate_size)
{
struct pcb *pcb;
+ int error;
pcb = td->td_pcb;
critical_enter();
if (td == PCPU_GET(fpcurthread) && PCB_USER_FPU(pcb)) {
- fxrstor(addr);
+ error = fpusetxstate(td, xfpustate, xfpustate_size);
+ if (error != 0) {
+ critical_exit();
+ return (error);
+ }
+ bcopy(addr, get_pcb_user_save_td(td), sizeof(*addr));
+ fpurestore(get_pcb_user_save_td(td));
critical_exit();
set_pcb_flags(pcb, PCB_FPUINITDONE | PCB_USERFPUINITDONE);
} else {
critical_exit();
- bcopy(addr, &td->td_pcb->pcb_user_save, sizeof(*addr));
+ error = fpusetxstate(td, xfpustate, xfpustate_size);
+ if (error != 0)
+ return (error);
+ bcopy(addr, get_pcb_user_save_td(td), sizeof(*addr));
fpuuserinited(td);
}
+ return (0);
}
/*
@@ -599,20 +771,62 @@ static devclass_t fpupnp_devclass;
DRIVER_MODULE(fpupnp, acpi, fpupnp_driver, fpupnp_devclass, 0, 0);
#endif /* DEV_ISA */
+static MALLOC_DEFINE(M_FPUKERN_CTX, "fpukern_ctx",
+ "Kernel contexts for FPU state");
+
+#define FPU_KERN_CTX_FPUINITDONE 0x01
+
+struct fpu_kern_ctx {
+ struct savefpu *prev;
+ uint32_t flags;
+ char hwstate1[];
+};
+
+struct fpu_kern_ctx *
+fpu_kern_alloc_ctx(u_int flags)
+{
+ struct fpu_kern_ctx *res;
+ size_t sz;
+
+ sz = sizeof(struct fpu_kern_ctx) + XSAVE_AREA_ALIGN +
+ cpu_max_ext_state_size;
+ res = malloc(sz, M_FPUKERN_CTX, ((flags & FPU_KERN_NOWAIT) ?
+ M_NOWAIT : M_WAITOK) | M_ZERO);
+ return (res);
+}
+
+void
+fpu_kern_free_ctx(struct fpu_kern_ctx *ctx)
+{
+
+ /* XXXKIB clear the memory ? */
+ free(ctx, M_FPUKERN_CTX);
+}
+
+static struct savefpu *
+fpu_kern_ctx_savefpu(struct fpu_kern_ctx *ctx)
+{
+ vm_offset_t p;
+
+ p = (vm_offset_t)&ctx->hwstate1;
+ p = roundup2(p, XSAVE_AREA_ALIGN);
+ return ((struct savefpu *)p);
+}
+
int
fpu_kern_enter(struct thread *td, struct fpu_kern_ctx *ctx, u_int flags)
{
struct pcb *pcb;
pcb = td->td_pcb;
- KASSERT(!PCB_USER_FPU(pcb) || pcb->pcb_save == &pcb->pcb_user_save,
- ("mangled pcb_save"));
+ KASSERT(!PCB_USER_FPU(pcb) || pcb->pcb_save ==
+ get_pcb_user_save_pcb(pcb), ("mangled pcb_save"));
ctx->flags = 0;
if ((pcb->pcb_flags & PCB_FPUINITDONE) != 0)
ctx->flags |= FPU_KERN_CTX_FPUINITDONE;
fpuexit(td);
ctx->prev = pcb->pcb_save;
- pcb->pcb_save = &ctx->hwstate;
+ pcb->pcb_save = fpu_kern_ctx_savefpu(ctx);
set_pcb_flags(pcb, PCB_KERNFPU);
clear_pcb_flags(pcb, PCB_FPUINITDONE);
return (0);
@@ -629,7 +843,7 @@ fpu_kern_leave(struct thread *td, struct fpu_kern_ctx *ctx)
fpudrop();
critical_exit();
pcb->pcb_save = ctx->prev;
- if (pcb->pcb_save == &pcb->pcb_user_save) {
+ if (pcb->pcb_save == get_pcb_user_save_pcb(pcb)) {
if ((pcb->pcb_flags & PCB_USERFPUINITDONE) != 0) {
set_pcb_flags(pcb, PCB_FPUINITDONE);
clear_pcb_flags(pcb, PCB_KERNFPU);
@@ -653,7 +867,8 @@ fpu_kern_thread(u_int flags)
pcb = PCPU_GET(curpcb);
KASSERT((curthread->td_pflags & TDP_KTHREAD) != 0,
("Only kthread may use fpu_kern_thread"));
- KASSERT(pcb->pcb_save == &pcb->pcb_user_save, ("mangled pcb_save"));
+ KASSERT(pcb->pcb_save == get_pcb_user_save_pcb(pcb),
+ ("mangled pcb_save"));
KASSERT(PCB_USER_FPU(pcb), ("recursive call"));
set_pcb_flags(pcb, PCB_KERNFPU);
diff --git a/sys/amd64/amd64/genassym.c b/sys/amd64/amd64/genassym.c
index d133223..3796aa8 100644
--- a/sys/amd64/amd64/genassym.c
+++ b/sys/amd64/amd64/genassym.c
@@ -156,7 +156,7 @@ ASSYM(PCB_GS32SD, offsetof(struct pcb, pcb_gs32sd));
ASSYM(PCB_TSSP, offsetof(struct pcb, pcb_tssp));
ASSYM(PCB_SAVEFPU, offsetof(struct pcb, pcb_save));
ASSYM(PCB_SAVEFPU_SIZE, sizeof(struct savefpu));
-ASSYM(PCB_USERFPU, offsetof(struct pcb, pcb_user_save));
+ASSYM(PCB_USERFPU, sizeof(struct pcb));
ASSYM(PCB_SIZE, sizeof(struct pcb));
ASSYM(PCB_FULL_IRET, PCB_FULL_IRET);
ASSYM(PCB_DBREGS, PCB_DBREGS);
diff --git a/sys/amd64/amd64/initcpu.c b/sys/amd64/amd64/initcpu.c
index 5a832a6..02588af 100644
--- a/sys/amd64/amd64/initcpu.c
+++ b/sys/amd64/amd64/initcpu.c
@@ -72,6 +72,7 @@ u_int cpu_vendor_id; /* CPU vendor ID */
u_int cpu_fxsr; /* SSE enabled */
u_int cpu_mxcsr_mask; /* Valid bits in mxcsr */
u_int cpu_clflush_line_size = 32;
+u_int cpu_max_ext_state_size;
SYSCTL_UINT(_hw, OID_AUTO, via_feature_rng, CTLFLAG_RD,
&via_feature_rng, 0, "VIA RNG feature available in CPU");
diff --git a/sys/amd64/amd64/machdep.c b/sys/amd64/amd64/machdep.c
index e9d160f..bc14745 100644
--- a/sys/amd64/amd64/machdep.c
+++ b/sys/amd64/amd64/machdep.c
@@ -154,8 +154,10 @@ extern void panicifcpuunsupported(void);
#define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0)
static void cpu_startup(void *);
-static void get_fpcontext(struct thread *td, mcontext_t *mcp);
-static int set_fpcontext(struct thread *td, const mcontext_t *mcp);
+static void get_fpcontext(struct thread *td, mcontext_t *mcp,
+ char *xfpusave, size_t xfpusave_len);
+static int set_fpcontext(struct thread *td, const mcontext_t *mcp,
+ char *xfpustate, size_t xfpustate_len);
SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
/*
@@ -315,6 +317,8 @@ sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
struct sigacts *psp;
char *sp;
struct trapframe *regs;
+ char *xfpusave;
+ size_t xfpusave_len;
int sig;
int oonstack;
@@ -328,6 +332,14 @@ sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
regs = td->td_frame;
oonstack = sigonstack(regs->tf_rsp);
+ if (cpu_max_ext_state_size > sizeof(struct savefpu) && use_xsave) {
+ xfpusave_len = cpu_max_ext_state_size - sizeof(struct savefpu);
+ xfpusave = __builtin_alloca(xfpusave_len);
+ } else {
+ xfpusave_len = 0;
+ xfpusave = NULL;
+ }
+
/* Save user context. */
bzero(&sf, sizeof(sf));
sf.sf_uc.uc_sigmask = *mask;
@@ -337,7 +349,7 @@ sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
bcopy(regs, &sf.sf_uc.uc_mcontext.mc_rdi, sizeof(*regs));
sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */
- get_fpcontext(td, &sf.sf_uc.uc_mcontext);
+ get_fpcontext(td, &sf.sf_uc.uc_mcontext, xfpusave, xfpusave_len);
fpstate_drop(td);
sf.sf_uc.uc_mcontext.mc_fsbase = pcb->pcb_fsbase;
sf.sf_uc.uc_mcontext.mc_gsbase = pcb->pcb_gsbase;
@@ -348,13 +360,18 @@ sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
/* Allocate space for the signal handler context. */
if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
SIGISMEMBER(psp->ps_sigonstack, sig)) {
- sp = td->td_sigstk.ss_sp +
- td->td_sigstk.ss_size - sizeof(struct sigframe);
+ sp = td->td_sigstk.ss_sp + td->td_sigstk.ss_size;
#if defined(COMPAT_43)
td->td_sigstk.ss_flags |= SS_ONSTACK;
#endif
} else
- sp = (char *)regs->tf_rsp - sizeof(struct sigframe) - 128;
+ sp = (char *)regs->tf_rsp - 128;
+ if (xfpusave != NULL) {
+ sp -= xfpusave_len;
+ sp = (char *)((unsigned long)sp & ~0x3Ful);
+ sf.sf_uc.uc_mcontext.mc_xfpustate = (register_t)sp;
+ }
+ sp -= sizeof(struct sigframe);
/* Align to 16 bytes. */
sfp = (struct sigframe *)((unsigned long)sp & ~0xFul);
@@ -387,7 +404,10 @@ sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
/*
* Copy the sigframe out to the user's stack.
*/
- if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
+ if (copyout(&sf, sfp, sizeof(*sfp)) != 0 ||
+ (xfpusave != NULL && copyout(xfpusave,
+ (void *)sf.sf_uc.uc_mcontext.mc_xfpustate, xfpusave_len)
+ != 0)) {
#ifdef DEBUG
printf("process %ld has trashed its stack\n", (long)p->p_pid);
#endif
@@ -432,6 +452,8 @@ sys_sigreturn(td, uap)
struct proc *p;
struct trapframe *regs;
ucontext_t *ucp;
+ char *xfpustate;
+ size_t xfpustate_len;
long rflags;
int cs, error, ret;
ksiginfo_t ksi;
@@ -490,7 +512,28 @@ sys_sigreturn(td, uap)
return (EINVAL);
}
- ret = set_fpcontext(td, &ucp->uc_mcontext);
+ if ((uc.uc_mcontext.mc_flags & _MC_HASFPXSTATE) != 0) {
+ xfpustate_len = uc.uc_mcontext.mc_xfpustate_len;
+ if (xfpustate_len > cpu_max_ext_state_size -
+ sizeof(struct savefpu)) {
+ uprintf("pid %d (%s): sigreturn xfpusave_len = 0x%zx\n",
+ p->p_pid, td->td_name, xfpustate_len);
+ return (EINVAL);
+ }
+ xfpustate = __builtin_alloca(xfpustate_len);
+ error = copyin((const void *)uc.uc_mcontext.mc_xfpustate,
+ xfpustate, xfpustate_len);
+ if (error != 0) {
+ uprintf(
+ "pid %d (%s): sigreturn copying xfpustate failed\n",
+ p->p_pid, td->td_name);
+ return (error);
+ }
+ } else {
+ xfpustate = NULL;
+ xfpustate_len = 0;
+ }
+ ret = set_fpcontext(td, &ucp->uc_mcontext, xfpustate, xfpustate_len);
if (ret != 0) {
uprintf("pid %d (%s): sigreturn set_fpcontext err %d\n",
p->p_pid, td->td_name, ret);
@@ -1592,6 +1635,7 @@ hammer_time(u_int64_t modulep, u_int64_t physfree)
int gsel_tss, x;
struct pcpu *pc;
struct nmi_pcpu *np;
+ struct xstate_hdr *xhdr;
u_int64_t msr;
char *env;
size_t kstack0_sz;
@@ -1601,7 +1645,6 @@ hammer_time(u_int64_t modulep, u_int64_t physfree)
kstack0_sz = thread0.td_kstack_pages * PAGE_SIZE;
bzero((void *)thread0.td_kstack, kstack0_sz);
physfree += kstack0_sz;
- thread0.td_pcb = (struct pcb *)(thread0.td_kstack + kstack0_sz) - 1;
/*
* This may be done better later if it gets more high level
@@ -1650,7 +1693,6 @@ hammer_time(u_int64_t modulep, u_int64_t physfree)
physfree += DPCPU_SIZE;
PCPU_SET(prvspace, pc);
PCPU_SET(curthread, &thread0);
- PCPU_SET(curpcb, thread0.td_pcb);
PCPU_SET(tssp, &common_tss[0]);
PCPU_SET(commontssp, &common_tss[0]);
PCPU_SET(tss, (struct system_segment_descriptor *)&gdt[GPROC0_SEL]);
@@ -1742,13 +1784,6 @@ hammer_time(u_int64_t modulep, u_int64_t physfree)
initializecpu(); /* Initialize CPU registers */
initializecpucache();
- /* make an initial tss so cpu can get interrupt stack on syscall! */
- common_tss[0].tss_rsp0 = thread0.td_kstack +
- kstack0_sz - sizeof(struct pcb);
- /* Ensure the stack is aligned to 16 bytes */
- common_tss[0].tss_rsp0 &= ~0xFul;
- PCPU_SET(rsp0, common_tss[0].tss_rsp0);
-
/* doublefault stack space, runs on ist1 */
common_tss[0].tss_ist1 = (long)&dblfault_stack[sizeof(dblfault_stack)];
@@ -1785,6 +1820,25 @@ hammer_time(u_int64_t modulep, u_int64_t physfree)
msgbufinit(msgbufp, msgbufsize);
fpuinit();
+ /*
+ * Set up thread0 pcb after fpuinit calculated pcb + fpu save
+ * area size. Zero out the extended state header in fpu save
+ * area.
+ */
+ thread0.td_pcb = get_pcb_td(&thread0);
+ bzero(get_pcb_user_save_td(&thread0), cpu_max_ext_state_size);
+ if (use_xsave) {
+ xhdr = (struct xstate_hdr *)(get_pcb_user_save_td(&thread0) +
+ 1);
+ xhdr->xstate_bv = xsave_mask;
+ }
+ /* make an initial tss so cpu can get interrupt stack on syscall! */
+ common_tss[0].tss_rsp0 = (vm_offset_t)thread0.td_pcb;
+ /* Ensure the stack is aligned to 16 bytes */
+ common_tss[0].tss_rsp0 &= ~0xFul;
+ PCPU_SET(rsp0, common_tss[0].tss_rsp0);
+ PCPU_SET(curpcb, thread0.td_pcb);
+
/* transfer to user mode */
_ucodesel = GSEL(GUCODE_SEL, SEL_UPL);
@@ -2054,7 +2108,7 @@ fill_fpregs(struct thread *td, struct fpreg *fpregs)
P_SHOULDSTOP(td->td_proc),
("not suspended thread %p", td));
fpugetregs(td);
- fill_fpregs_xmm(&td->td_pcb->pcb_user_save, fpregs);
+ fill_fpregs_xmm(get_pcb_user_save_td(td), fpregs);
return (0);
}
@@ -2063,7 +2117,7 @@ int
set_fpregs(struct thread *td, struct fpreg *fpregs)
{
- set_fpregs_xmm(fpregs, &td->td_pcb->pcb_user_save);
+ set_fpregs_xmm(fpregs, get_pcb_user_save_td(td));
fpuuserinited(td);
return (0);
}
@@ -2114,9 +2168,11 @@ get_mcontext(struct thread *td, mcontext_t *mcp, int flags)
mcp->mc_gs = tp->tf_gs;
mcp->mc_flags = tp->tf_flags;
mcp->mc_len = sizeof(*mcp);
- get_fpcontext(td, mcp);
+ get_fpcontext(td, mcp, NULL, 0);
mcp->mc_fsbase = pcb->pcb_fsbase;
mcp->mc_gsbase = pcb->pcb_gsbase;
+ mcp->mc_xfpustate = 0;
+ mcp->mc_xfpustate_len = 0;
bzero(mcp->mc_spare, sizeof(mcp->mc_spare));
return (0);
}
@@ -2132,6 +2188,7 @@ set_mcontext(struct thread *td, const mcontext_t *mcp)
{
struct pcb *pcb;
struct trapframe *tp;
+ char *xfpustate;
long rflags;
int ret;
@@ -2142,7 +2199,18 @@ set_mcontext(struct thread *td, const mcontext_t *mcp)
return (EINVAL);
rflags = (mcp->mc_rflags & PSL_USERCHANGE) |
(tp->tf_rflags & ~PSL_USERCHANGE);
- ret = set_fpcontext(td, mcp);
+ if (mcp->mc_flags & _MC_HASFPXSTATE) {
+ if (mcp->mc_xfpustate_len > cpu_max_ext_state_size -
+ sizeof(struct savefpu))
+ return (EINVAL);
+ xfpustate = __builtin_alloca(mcp->mc_xfpustate_len);
+ ret = copyin((void *)mcp->mc_xfpustate, xfpustate,
+ mcp->mc_xfpustate_len);
+ if (ret != 0)
+ return (ret);
+ } else
+ xfpustate = NULL;
+ ret = set_fpcontext(td, mcp, xfpustate, mcp->mc_xfpustate_len);
if (ret != 0)
return (ret);
tp->tf_r15 = mcp->mc_r15;
@@ -2180,35 +2248,51 @@ set_mcontext(struct thread *td, const mcontext_t *mcp)
}
static void
-get_fpcontext(struct thread *td, mcontext_t *mcp)
+get_fpcontext(struct thread *td, mcontext_t *mcp, char *xfpusave,
+ size_t xfpusave_len)
{
+ size_t max_len, len;
mcp->mc_ownedfp = fpugetregs(td);
- bcopy(&td->td_pcb->pcb_user_save, &mcp->mc_fpstate,
+ bcopy(get_pcb_user_save_td(td), &mcp->mc_fpstate,
sizeof(mcp->mc_fpstate));
mcp->mc_fpformat = fpuformat();
+ if (!use_xsave || xfpusave_len == 0)
+ return;
+ max_len = cpu_max_ext_state_size - sizeof(struct savefpu);
+ len = xfpusave_len;
+ if (len > max_len) {
+ len = max_len;
+ bzero(xfpusave + max_len, len - max_len);
+ }
+ mcp->mc_flags |= _MC_HASFPXSTATE;
+ mcp->mc_xfpustate_len = len;
+ bcopy(get_pcb_user_save_td(td) + 1, xfpusave, len);
}
static int
-set_fpcontext(struct thread *td, const mcontext_t *mcp)
+set_fpcontext(struct thread *td, const mcontext_t *mcp, char *xfpustate,
+ size_t xfpustate_len)
{
struct savefpu *fpstate;
+ int error;
if (mcp->mc_fpformat == _MC_FPFMT_NODEV)
return (0);
else if (mcp->mc_fpformat != _MC_FPFMT_XMM)
return (EINVAL);
- else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE)
+ else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE) {
/* We don't care what state is left in the FPU or PCB. */
fpstate_drop(td);
- else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU ||
+ error = 0;
+ } else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU ||
mcp->mc_ownedfp == _MC_FPOWNED_PCB) {
fpstate = (struct savefpu *)&mcp->mc_fpstate;
fpstate->sv_env.en_mxcsr &= cpu_mxcsr_mask;
- fpusetregs(td, fpstate);
+ error = fpusetregs(td, fpstate, xfpustate, xfpustate_len);
} else
return (EINVAL);
- return (0);
+ return (error);
}
void
diff --git a/sys/amd64/amd64/mp_machdep.c b/sys/amd64/amd64/mp_machdep.c
index 50b52c8..da1812d 100644
--- a/sys/amd64/amd64/mp_machdep.c
+++ b/sys/amd64/amd64/mp_machdep.c
@@ -99,7 +99,8 @@ char *nmi_stack;
void *dpcpu;
struct pcb stoppcbs[MAXCPU];
-struct pcb **susppcbs = NULL;
+struct pcb **susppcbs;
+void **suspfpusave;
/* Variables needed for SMP tlb shootdown. */
vm_offset_t smp_tlb_addr1;
@@ -1422,6 +1423,7 @@ cpususpend_handler(void)
cr3 = rcr3();
if (savectx(susppcbs[cpu])) {
+ ctx_fpusave(suspfpusave[cpu]);
wbinvd();
CPU_SET_ATOMIC(cpu, &stopped_cpus);
} else {
diff --git a/sys/amd64/amd64/ptrace_machdep.c b/sys/amd64/amd64/ptrace_machdep.c
new file mode 100644
index 0000000..8eea132
--- /dev/null
+++ b/sys/amd64/amd64/ptrace_machdep.c
@@ -0,0 +1,141 @@
+/*-
+ * Copyright (c) 2011 Konstantin Belousov <kib@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_compat.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/proc.h>
+#include <sys/ptrace.h>
+#include <sys/sysent.h>
+#include <machine/md_var.h>
+#include <machine/pcb.h>
+
+static int
+cpu_ptrace_xstate(struct thread *td, int req, void *addr, int data)
+{
+ char *savefpu;
+ int error;
+
+ if (!use_xsave)
+ return (EOPNOTSUPP);
+
+ switch (req) {
+ case PT_GETXSTATE:
+ savefpu = (char *)(get_pcb_user_save_td(td) + 1);
+ error = copyout(savefpu, addr,
+ cpu_max_ext_state_size - sizeof(struct savefpu));
+ break;
+
+ case PT_SETXSTATE:
+ if (data > cpu_max_ext_state_size - sizeof(struct savefpu)) {
+ error = EINVAL;
+ break;
+ }
+ savefpu = malloc(data, M_TEMP, M_WAITOK);
+ error = copyin(addr, savefpu, data);
+ if (error == 0)
+ error = fpusetxstate(td, savefpu, data);
+ free(savefpu, M_TEMP);
+ break;
+
+ default:
+ error = EINVAL;
+ break;
+ }
+
+ return (error);
+}
+
+#ifdef COMPAT_FREEBSD32
+#define PT_I386_GETXMMREGS (PT_FIRSTMACH + 0)
+#define PT_I386_SETXMMREGS (PT_FIRSTMACH + 1)
+#define PT_I386_GETXSTATE (PT_FIRSTMACH + 2)
+#define PT_I386_SETXSTATE (PT_FIRSTMACH + 3)
+
+static int
+cpu32_ptrace(struct thread *td, int req, void *addr, int data)
+{
+ struct savefpu *fpstate;
+ int error;
+
+ switch (req) {
+ case PT_I386_GETXMMREGS:
+ error = copyout(get_pcb_user_save_td(td), addr,
+ sizeof(*fpstate));
+ break;
+
+ case PT_I386_SETXMMREGS:
+ fpstate = get_pcb_user_save_td(td);
+ error = copyin(addr, fpstate, sizeof(*fpstate));
+ fpstate->sv_env.en_mxcsr &= cpu_mxcsr_mask;
+ break;
+
+ case PT_I386_GETXSTATE:
+ error = cpu_ptrace_xstate(td, PT_GETXSTATE, addr, data);
+ break;
+
+ case PT_I386_SETXSTATE:
+ error = cpu_ptrace_xstate(td, PT_SETXSTATE, addr, data);
+ break;
+
+ default:
+ error = EINVAL;
+ break;
+ }
+
+ return (error);
+}
+#endif
+
+int
+cpu_ptrace(struct thread *td, int req, void *addr, int data)
+{
+ int error;
+
+#ifdef COMPAT_FREEBSD32
+ if (SV_CURPROC_FLAG(SV_ILP32))
+ return (cpu32_ptrace(td, req, addr, data));
+#endif
+
+ switch (req) {
+ case PT_GETXSTATE:
+ case PT_SETXSTATE:
+ error = cpu_ptrace_xstate(td, req, addr, data);
+ break;
+
+ default:
+ error = EINVAL;
+ break;
+ }
+
+ return (error);
+}
diff --git a/sys/amd64/amd64/sys_machdep.c b/sys/amd64/amd64/sys_machdep.c
index bcd7fa3..2f136ab 100644
--- a/sys/amd64/amd64/sys_machdep.c
+++ b/sys/amd64/amd64/sys_machdep.c
@@ -179,6 +179,8 @@ sysarch(td, uap)
uint32_t i386base;
uint64_t a64base;
struct i386_ioperm_args iargs;
+ struct i386_get_xfpustate i386xfpu;
+ struct amd64_get_xfpustate a64xfpu;
#ifdef CAPABILITY_MODE
/*
@@ -195,10 +197,12 @@ sysarch(td, uap)
case I386_SET_FSBASE:
case I386_GET_GSBASE:
case I386_SET_GSBASE:
+ case I386_GET_XFPUSTATE:
case AMD64_GET_FSBASE:
case AMD64_SET_FSBASE:
case AMD64_GET_GSBASE:
case AMD64_SET_GSBASE:
+ case AMD64_GET_XFPUSTATE:
break;
case I386_SET_IOPERM:
@@ -226,6 +230,18 @@ sysarch(td, uap)
sizeof(struct i386_ioperm_args))) != 0)
return (error);
break;
+ case I386_GET_XFPUSTATE:
+ if ((error = copyin(uap->parms, &i386xfpu,
+ sizeof(struct i386_get_xfpustate))) != 0)
+ return (error);
+ a64xfpu.addr = (void *)(uintptr_t)i386xfpu.addr;
+ a64xfpu.len = i386xfpu.len;
+ break;
+ case AMD64_GET_XFPUSTATE:
+ if ((error = copyin(uap->parms, &a64xfpu,
+ sizeof(struct amd64_get_xfpustate))) != 0)
+ return (error);
+ break;
default:
break;
}
@@ -296,6 +312,16 @@ sysarch(td, uap)
}
break;
+ case I386_GET_XFPUSTATE:
+ case AMD64_GET_XFPUSTATE:
+ if (a64xfpu.len > cpu_max_ext_state_size -
+ sizeof(struct savefpu))
+ return (EINVAL);
+ fpugetregs(td);
+ error = copyout((char *)(get_pcb_user_save_td(td) + 1),
+ a64xfpu.addr, a64xfpu.len);
+ return (error);
+
default:
error = EINVAL;
break;
diff --git a/sys/amd64/amd64/trap.c b/sys/amd64/amd64/trap.c
index 16b9c1a..a2363db 100644
--- a/sys/amd64/amd64/trap.c
+++ b/sys/amd64/amd64/trap.c
@@ -934,7 +934,7 @@ amd64_syscall(struct thread *td, int traced)
KASSERT(PCB_USER_FPU(td->td_pcb),
("System call %s returing with kernel FPU ctx leaked",
syscallname(td->td_proc, sa.code)));
- KASSERT(td->td_pcb->pcb_save == &td->td_pcb->pcb_user_save,
+ KASSERT(td->td_pcb->pcb_save == get_pcb_user_save_td(td),
("System call %s returning with mangled pcb_save",
syscallname(td->td_proc, sa.code)));
diff --git a/sys/amd64/amd64/vm_machdep.c b/sys/amd64/amd64/vm_machdep.c
index d05880f..acb2188 100644
--- a/sys/amd64/amd64/vm_machdep.c
+++ b/sys/amd64/amd64/vm_machdep.c
@@ -90,6 +90,51 @@ static u_int cpu_reset_proxyid;
static volatile u_int cpu_reset_proxy_active;
#endif
+struct savefpu *
+get_pcb_user_save_td(struct thread *td)
+{
+ vm_offset_t p;
+
+ p = td->td_kstack + td->td_kstack_pages * PAGE_SIZE -
+ cpu_max_ext_state_size;
+ KASSERT((p % 64) == 0, ("Unaligned pcb_user_save area"));
+ return ((struct savefpu *)p);
+}
+
+struct savefpu *
+get_pcb_user_save_pcb(struct pcb *pcb)
+{
+ vm_offset_t p;
+
+ p = (vm_offset_t)(pcb + 1);
+ return ((struct savefpu *)p);
+}
+
+struct pcb *
+get_pcb_td(struct thread *td)
+{
+ vm_offset_t p;
+
+ p = td->td_kstack + td->td_kstack_pages * PAGE_SIZE -
+ cpu_max_ext_state_size - sizeof(struct pcb);
+ return ((struct pcb *)p);
+}
+
+void *
+alloc_fpusave(int flags)
+{
+ struct pcb *res;
+ struct savefpu_ymm *sf;
+
+ res = malloc(cpu_max_ext_state_size, M_DEVBUF, flags);
+ if (use_xsave) {
+ sf = (struct savefpu_ymm *)res;
+ bzero(&sf->sv_xstate.sx_hd, sizeof(sf->sv_xstate.sx_hd));
+ sf->sv_xstate.sx_hd.xstate_bv = xsave_mask;
+ }
+ return (res);
+}
+
/*
* Finish a fork operation, with process p2 nearly set up.
* Copy and update the pcb, set up the stack so that the child
@@ -127,15 +172,16 @@ cpu_fork(td1, p2, td2, flags)
fpuexit(td1);
/* Point the pcb to the top of the stack */
- pcb2 = (struct pcb *)(td2->td_kstack +
- td2->td_kstack_pages * PAGE_SIZE) - 1;
+ pcb2 = get_pcb_td(td2);
td2->td_pcb = pcb2;
/* Copy td1's pcb */
bcopy(td1->td_pcb, pcb2, sizeof(*pcb2));
/* Properly initialize pcb_save */
- pcb2->pcb_save = &pcb2->pcb_user_save;
+ pcb2->pcb_save = get_pcb_user_save_pcb(pcb2);
+ bcopy(get_pcb_user_save_td(td1), get_pcb_user_save_pcb(pcb2),
+ cpu_max_ext_state_size);
/* Point mdproc and then copy over td1's contents */
mdp2 = &p2->p_md;
@@ -310,11 +356,17 @@ cpu_thread_swapout(struct thread *td)
void
cpu_thread_alloc(struct thread *td)
{
-
- td->td_pcb = (struct pcb *)(td->td_kstack +
- td->td_kstack_pages * PAGE_SIZE) - 1;
- td->td_frame = (struct trapframe *)td->td_pcb - 1;
- td->td_pcb->pcb_save = &td->td_pcb->pcb_user_save;
+ struct pcb *pcb;
+ struct xstate_hdr *xhdr;
+
+ td->td_pcb = pcb = get_pcb_td(td);
+ td->td_frame = (struct trapframe *)pcb - 1;
+ pcb->pcb_save = get_pcb_user_save_pcb(pcb);
+ if (use_xsave) {
+ xhdr = (struct xstate_hdr *)(pcb->pcb_save + 1);
+ bzero(xhdr, sizeof(*xhdr));
+ xhdr->xstate_bv = xsave_mask;
+ }
}
void
@@ -387,7 +439,9 @@ cpu_set_upcall(struct thread *td, struct thread *td0)
*/
bcopy(td0->td_pcb, pcb2, sizeof(*pcb2));
clear_pcb_flags(pcb2, PCB_FPUINITDONE | PCB_USERFPUINITDONE);
- pcb2->pcb_save = &pcb2->pcb_user_save;
+ pcb2->pcb_save = get_pcb_user_save_pcb(pcb2);
+ bcopy(get_pcb_user_save_td(td0), pcb2->pcb_save,
+ cpu_max_ext_state_size);
set_pcb_flags(pcb2, PCB_FULL_IRET);
/*
diff --git a/sys/amd64/ia32/ia32_reg.c b/sys/amd64/ia32/ia32_reg.c
index 279df9a..71eed5e 100644
--- a/sys/amd64/ia32/ia32_reg.c
+++ b/sys/amd64/ia32/ia32_reg.c
@@ -155,7 +155,7 @@ fill_fpregs32(struct thread *td, struct fpreg32 *regs)
sv_87 = (struct save87 *)regs;
penv_87 = &sv_87->sv_env;
fpugetregs(td);
- sv_fpu = &td->td_pcb->pcb_user_save;
+ sv_fpu = get_pcb_user_save_td(td);
penv_xmm = &sv_fpu->sv_env;
/* FPU control/status */
@@ -187,7 +187,7 @@ set_fpregs32(struct thread *td, struct fpreg32 *regs)
{
struct save87 *sv_87 = (struct save87 *)regs;
struct env87 *penv_87 = &sv_87->sv_env;
- struct savefpu *sv_fpu = &td->td_pcb->pcb_user_save;
+ struct savefpu *sv_fpu = get_pcb_user_save_td(td);
struct envxmm *penv_xmm = &sv_fpu->sv_env;
int i;
diff --git a/sys/amd64/ia32/ia32_signal.c b/sys/amd64/ia32/ia32_signal.c
index 2f41870..09ec7ab 100644
--- a/sys/amd64/ia32/ia32_signal.c
+++ b/sys/amd64/ia32/ia32_signal.c
@@ -71,6 +71,7 @@ __FBSDID("$FreeBSD$");
#include <compat/freebsd32/freebsd32_signal.h>
#include <compat/freebsd32/freebsd32_util.h>
#include <compat/freebsd32/freebsd32_proto.h>
+#include <compat/freebsd32/freebsd32.h>
#include <compat/ia32/ia32_signal.h>
#include <machine/psl.h>
#include <machine/segments.h>
@@ -83,15 +84,15 @@ __FBSDID("$FreeBSD$");
#ifdef COMPAT_FREEBSD4
static void freebsd4_ia32_sendsig(sig_t, ksiginfo_t *, sigset_t *);
#endif
-static void ia32_get_fpcontext(struct thread *td, struct ia32_mcontext *mcp);
-static int ia32_set_fpcontext(struct thread *td, const struct ia32_mcontext *mcp);
#define CS_SECURE(cs) (ISPL(cs) == SEL_UPL)
#define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0)
static void
-ia32_get_fpcontext(struct thread *td, struct ia32_mcontext *mcp)
+ia32_get_fpcontext(struct thread *td, struct ia32_mcontext *mcp,
+ char *xfpusave, size_t xfpusave_len)
{
+ size_t max_len, len;
/*
* XXX Format of 64bit and 32bit FXSAVE areas differs. FXSAVE
@@ -100,28 +101,43 @@ ia32_get_fpcontext(struct thread *td, struct ia32_mcontext *mcp)
* for now, it should be irrelevant for most applications.
*/
mcp->mc_ownedfp = fpugetregs(td);
- bcopy(&td->td_pcb->pcb_user_save, &mcp->mc_fpstate,
+ bcopy(get_pcb_user_save_td(td), &mcp->mc_fpstate,
sizeof(mcp->mc_fpstate));
mcp->mc_fpformat = fpuformat();
+ if (!use_xsave || xfpusave_len == 0)
+ return;
+ max_len = cpu_max_ext_state_size - sizeof(struct savefpu);
+ len = xfpusave_len;
+ if (len > max_len) {
+ len = max_len;
+ bzero(xfpusave + max_len, len - max_len);
+ }
+ mcp->mc_flags |= _MC_HASFPXSTATE;
+ mcp->mc_xfpustate_len = len;
+ bcopy(get_pcb_user_save_td(td) + 1, xfpusave, len);
}
static int
-ia32_set_fpcontext(struct thread *td, const struct ia32_mcontext *mcp)
+ia32_set_fpcontext(struct thread *td, const struct ia32_mcontext *mcp,
+ char *xfpustate, size_t xfpustate_len)
{
+ int error;
if (mcp->mc_fpformat == _MC_FPFMT_NODEV)
return (0);
else if (mcp->mc_fpformat != _MC_FPFMT_XMM)
return (EINVAL);
- else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE)
+ else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE) {
/* We don't care what state is left in the FPU or PCB. */
fpstate_drop(td);
- else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU ||
+ error = 0;
+ } else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU ||
mcp->mc_ownedfp == _MC_FPOWNED_PCB) {
- fpusetregs(td, (struct savefpu *)&mcp->mc_fpstate);
+ error = fpusetregs(td, (struct savefpu *)&mcp->mc_fpstate,
+ xfpustate, xfpustate_len);
} else
return (EINVAL);
- return (0);
+ return (error);
}
/*
@@ -164,10 +180,12 @@ ia32_get_mcontext(struct thread *td, struct ia32_mcontext *mcp, int flags)
mcp->mc_esp = tp->tf_rsp;
mcp->mc_ss = tp->tf_ss;
mcp->mc_len = sizeof(*mcp);
- ia32_get_fpcontext(td, mcp);
+ mcp->mc_flags = tp->tf_flags;
+ ia32_get_fpcontext(td, mcp, NULL, 0);
mcp->mc_fsbase = pcb->pcb_fsbase;
mcp->mc_gsbase = pcb->pcb_gsbase;
- bzero(mcp->mc_spare1, sizeof(mcp->mc_spare1));
+ mcp->mc_xfpustate = 0;
+ mcp->mc_xfpustate_len = 0;
bzero(mcp->mc_spare2, sizeof(mcp->mc_spare2));
set_pcb_flags(pcb, PCB_FULL_IRET);
return (0);
@@ -183,6 +201,7 @@ static int
ia32_set_mcontext(struct thread *td, const struct ia32_mcontext *mcp)
{
struct trapframe *tp;
+ char *xfpustate;
long rflags;
int ret;
@@ -191,7 +210,18 @@ ia32_set_mcontext(struct thread *td, const struct ia32_mcontext *mcp)
return (EINVAL);
rflags = (mcp->mc_eflags & PSL_USERCHANGE) |
(tp->tf_rflags & ~PSL_USERCHANGE);
- ret = ia32_set_fpcontext(td, mcp);
+ if (mcp->mc_flags & _MC_IA32_HASFPXSTATE) {
+ if (mcp->mc_xfpustate_len > cpu_max_ext_state_size -
+ sizeof(struct savefpu))
+ return (EINVAL);
+ xfpustate = __builtin_alloca(mcp->mc_xfpustate_len);
+ ret = copyin(PTRIN(mcp->mc_xfpustate), xfpustate,
+ mcp->mc_xfpustate_len);
+ if (ret != 0)
+ return (ret);
+ } else
+ xfpustate = NULL;
+ ret = ia32_set_fpcontext(td, mcp, xfpustate, mcp->mc_xfpustate_len);
if (ret != 0)
return (ret);
tp->tf_gs = mcp->mc_gs;
@@ -529,6 +559,8 @@ ia32_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
struct sigacts *psp;
char *sp;
struct trapframe *regs;
+ char *xfpusave;
+ size_t xfpusave_len;
int oonstack;
int sig;
@@ -554,6 +586,14 @@ ia32_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
regs = td->td_frame;
oonstack = sigonstack(regs->tf_rsp);
+ if (cpu_max_ext_state_size > sizeof(struct savefpu) && use_xsave) {
+ xfpusave_len = cpu_max_ext_state_size - sizeof(struct savefpu);
+ xfpusave = __builtin_alloca(xfpusave_len);
+ } else {
+ xfpusave_len = 0;
+ xfpusave = NULL;
+ }
+
/* Save user context. */
bzero(&sf, sizeof(sf));
sf.sf_uc.uc_sigmask = *mask;
@@ -582,7 +622,7 @@ ia32_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
sf.sf_uc.uc_mcontext.mc_fs = regs->tf_fs;
sf.sf_uc.uc_mcontext.mc_gs = regs->tf_gs;
sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */
- ia32_get_fpcontext(td, &sf.sf_uc.uc_mcontext);
+ ia32_get_fpcontext(td, &sf.sf_uc.uc_mcontext, xfpusave, xfpusave_len);
fpstate_drop(td);
sf.sf_uc.uc_mcontext.mc_fsbase = td->td_pcb->pcb_fsbase;
sf.sf_uc.uc_mcontext.mc_gsbase = td->td_pcb->pcb_gsbase;
@@ -590,11 +630,16 @@ ia32_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
/* Allocate space for the signal handler context. */
if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
- SIGISMEMBER(psp->ps_sigonstack, sig)) {
- sp = td->td_sigstk.ss_sp +
- td->td_sigstk.ss_size - sizeof(sf);
- } else
- sp = (char *)regs->tf_rsp - sizeof(sf);
+ SIGISMEMBER(psp->ps_sigonstack, sig))
+ sp = td->td_sigstk.ss_sp + td->td_sigstk.ss_size;
+ else
+ sp = (char *)regs->tf_rsp;
+ if (xfpusave != NULL) {
+ sp -= xfpusave_len;
+ sp = (char *)((unsigned long)sp & ~0x3Ful);
+ sf.sf_uc.uc_mcontext.mc_xfpustate = (register_t)sp;
+ }
+ sp -= sizeof(sf);
/* Align to 16 bytes. */
sfp = (struct ia32_sigframe *)((uintptr_t)sp & ~0xF);
PROC_UNLOCK(p);
@@ -626,7 +671,10 @@ ia32_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
/*
* Copy the sigframe out to the user's stack.
*/
- if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
+ if (copyout(&sf, sfp, sizeof(*sfp)) != 0 ||
+ (xfpusave != NULL && copyout(xfpusave,
+ PTRIN(sf.sf_uc.uc_mcontext.mc_xfpustate), xfpusave_len)
+ != 0)) {
#ifdef DEBUG
printf("process %ld has trashed its stack\n", (long)p->p_pid);
#endif
@@ -812,6 +860,8 @@ freebsd32_sigreturn(td, uap)
struct ia32_ucontext uc;
struct trapframe *regs;
struct ia32_ucontext *ucp;
+ char *xfpustate;
+ size_t xfpustate_len;
int cs, eflags, error, ret;
ksiginfo_t ksi;
@@ -858,9 +908,34 @@ freebsd32_sigreturn(td, uap)
return (EINVAL);
}
- ret = ia32_set_fpcontext(td, &ucp->uc_mcontext);
- if (ret != 0)
+ if ((ucp->uc_mcontext.mc_flags & _MC_HASFPXSTATE) != 0) {
+ xfpustate_len = uc.uc_mcontext.mc_xfpustate_len;
+ if (xfpustate_len > cpu_max_ext_state_size -
+ sizeof(struct savefpu)) {
+ uprintf("pid %d (%s): sigreturn xfpusave_len = 0x%zx\n",
+ td->td_proc->p_pid, td->td_name, xfpustate_len);
+ return (EINVAL);
+ }
+ xfpustate = __builtin_alloca(xfpustate_len);
+ error = copyin(PTRIN(ucp->uc_mcontext.mc_xfpustate),
+ xfpustate, xfpustate_len);
+ if (error != 0) {
+ uprintf(
+ "pid %d (%s): sigreturn copying xfpustate failed\n",
+ td->td_proc->p_pid, td->td_name);
+ return (error);
+ }
+ } else {
+ xfpustate = NULL;
+ xfpustate_len = 0;
+ }
+ ret = ia32_set_fpcontext(td, &ucp->uc_mcontext, xfpustate,
+ xfpustate_len);
+ if (ret != 0) {
+ uprintf("pid %d (%s): sigreturn set_fpcontext err %d\n",
+ td->td_proc->p_pid, td->td_name, ret);
return (ret);
+ }
regs->tf_rdi = ucp->uc_mcontext.mc_edi;
regs->tf_rsi = ucp->uc_mcontext.mc_esi;
diff --git a/sys/amd64/include/fpu.h b/sys/amd64/include/fpu.h
index 1e7103e..d208789 100644
--- a/sys/amd64/include/fpu.h
+++ b/sys/amd64/include/fpu.h
@@ -101,14 +101,13 @@ struct savefpu_ymm {
} __aligned(64);
#ifdef _KERNEL
-struct fpu_kern_ctx {
- struct savefpu hwstate;
- struct savefpu *prev;
- uint32_t flags;
-};
-#define FPU_KERN_CTX_FPUINITDONE 0x01
+
+struct fpu_kern_ctx;
#define PCB_USER_FPU(pcb) (((pcb)->pcb_flags & PCB_KERNFPU) == 0)
+
+#define XSAVE_AREA_ALIGN 64
+
#endif
/*
@@ -141,9 +140,15 @@ void fpuexit(struct thread *td);
int fpuformat(void);
int fpugetregs(struct thread *td);
void fpuinit(void);
-void fpusetregs(struct thread *td, struct savefpu *addr);
+void fpusave(void *addr);
+int fpusetregs(struct thread *td, struct savefpu *addr,
+ char *xfpustate, size_t xfpustate_size);
+int fpusetxstate(struct thread *td, char *xfpustate,
+ size_t xfpustate_size);
int fputrap(void);
void fpuuserinited(struct thread *td);
+struct fpu_kern_ctx *fpu_kern_alloc_ctx(u_int flags);
+void fpu_kern_free_ctx(struct fpu_kern_ctx *ctx);
int fpu_kern_enter(struct thread *td, struct fpu_kern_ctx *ctx,
u_int flags);
int fpu_kern_leave(struct thread *td, struct fpu_kern_ctx *ctx);
@@ -151,9 +156,10 @@ int fpu_kern_thread(u_int flags);
int is_fpu_kern_thread(u_int flags);
/*
- * Flags for fpu_kern_enter() and fpu_kern_thread().
+ * Flags for fpu_kern_alloc_ctx(), fpu_kern_enter() and fpu_kern_thread().
*/
#define FPU_KERN_NORMAL 0x0000
+#define FPU_KERN_NOWAIT 0x0001
#endif
diff --git a/sys/amd64/include/frame.h b/sys/amd64/include/frame.h
index 12722a4..e171407 100644
--- a/sys/amd64/include/frame.h
+++ b/sys/amd64/include/frame.h
@@ -81,6 +81,7 @@ struct trapframe {
};
#define TF_HASSEGS 0x1
-/* #define _MC_HASBASES 0x2 */
+#define TF_HASBASES 0x2
+#define TF_HASFPXSTATE 0x4
#endif /* _MACHINE_FRAME_H_ */
diff --git a/sys/amd64/include/md_var.h b/sys/amd64/include/md_var.h
index 479c84e..ff11ea1 100644
--- a/sys/amd64/include/md_var.h
+++ b/sys/amd64/include/md_var.h
@@ -51,6 +51,7 @@ extern u_int cpu_clflush_line_size;
extern u_int cpu_fxsr;
extern u_int cpu_high;
extern u_int cpu_id;
+extern u_int cpu_max_ext_state_size;
extern u_int cpu_mxcsr_mask;
extern u_int cpu_procinfo;
extern u_int cpu_procinfo2;
@@ -67,17 +68,23 @@ extern int _ucodesel;
extern int _ucode32sel;
extern int _ufssel;
extern int _ugssel;
+extern int use_xsave;
+extern uint64_t xsave_mask;
typedef void alias_for_inthand_t(u_int cs, u_int ef, u_int esp, u_int ss);
+struct pcb;
+struct savefpu;
struct thread;
struct reg;
struct fpreg;
struct dbreg;
struct dumperinfo;
+void *alloc_fpusave(int flags);
void amd64_syscall(struct thread *td, int traced);
void busdma_swi(void);
void cpu_setregs(void);
+void ctx_fpusave(void *);
void doreti_iret(void) __asm(__STRING(doreti_iret));
void doreti_iret_fault(void) __asm(__STRING(doreti_iret_fault));
void ld_ds(void) __asm(__STRING(ld_ds));
@@ -105,5 +112,8 @@ void pagezero(void *addr);
void setidt(int idx, alias_for_inthand_t *func, int typ, int dpl, int ist);
int user_dbreg_trap(void);
void minidumpsys(struct dumperinfo *);
+struct savefpu *get_pcb_user_save_td(struct thread *td);
+struct savefpu *get_pcb_user_save_pcb(struct pcb *pcb);
+struct pcb *get_pcb_td(struct thread *td);
#endif /* !_MACHINE_MD_VAR_H_ */
diff --git a/sys/amd64/include/pcb.h b/sys/amd64/include/pcb.h
index 1af8f6d..61f651bb 100644
--- a/sys/amd64/include/pcb.h
+++ b/sys/amd64/include/pcb.h
@@ -92,7 +92,8 @@ struct pcb {
struct amd64tss *pcb_tssp;
struct savefpu *pcb_save;
- struct savefpu pcb_user_save;
+
+ uint64_t pcb_pad[2];
};
#ifdef _KERNEL
@@ -130,6 +131,7 @@ clear_pcb_flags(struct pcb *pcb, const u_int flags)
void makectx(struct trapframe *, struct pcb *);
int savectx(struct pcb *);
+
#endif
#endif /* _AMD64_PCB_H_ */
diff --git a/sys/amd64/include/ptrace.h b/sys/amd64/include/ptrace.h
index eef24f8..28f94f7 100644
--- a/sys/amd64/include/ptrace.h
+++ b/sys/amd64/include/ptrace.h
@@ -33,4 +33,9 @@
#ifndef _MACHINE_PTRACE_H_
#define _MACHINE_PTRACE_H_
+#define __HAVE_PTRACE_MACHDEP
+
+#define PT_GETXSTATE (PT_FIRSTMACH + 0)
+#define PT_SETXSTATE (PT_FIRSTMACH + 1)
+
#endif
diff --git a/sys/amd64/include/sysarch.h b/sys/amd64/include/sysarch.h
index 7b95a8b..195d882 100644
--- a/sys/amd64/include/sysarch.h
+++ b/sys/amd64/include/sysarch.h
@@ -50,12 +50,14 @@
#define I386_SET_FSBASE 8
#define I386_GET_GSBASE 9
#define I386_SET_GSBASE 10
+#define I386_GET_XFPUSTATE 11
/* Leave space for 0-127 for to avoid translating syscalls */
#define AMD64_GET_FSBASE 128
#define AMD64_SET_FSBASE 129
#define AMD64_GET_GSBASE 130
#define AMD64_SET_GSBASE 131
+#define AMD64_GET_XFPUSTATE 132
struct i386_ldt_args {
unsigned int start;
@@ -69,6 +71,16 @@ struct i386_ioperm_args {
int enable;
};
+struct i386_get_xfpustate {
+ unsigned int addr;
+ int len;
+};
+
+struct amd64_get_xfpustate {
+ void *addr;
+ int len;
+};
+
#ifndef _KERNEL
__BEGIN_DECLS
int amd64_get_fsbase(void **);
diff --git a/sys/amd64/include/ucontext.h b/sys/amd64/include/ucontext.h
index 75b7bd2..5ab841e 100644
--- a/sys/amd64/include/ucontext.h
+++ b/sys/amd64/include/ucontext.h
@@ -37,7 +37,8 @@
*/
#define _MC_HASSEGS 0x1
#define _MC_HASBASES 0x2
-#define _MC_FLAG_MASK (_MC_HASSEGS | _MC_HASBASES)
+#define _MC_HASFPXSTATE 0x4
+#define _MC_FLAG_MASK (_MC_HASSEGS | _MC_HASBASES | _MC_HASFPXSTATE)
typedef struct __mcontext {
/*
@@ -93,7 +94,10 @@ typedef struct __mcontext {
__register_t mc_fsbase;
__register_t mc_gsbase;
- long mc_spare[6];
+ __register_t mc_xfpustate;
+ __register_t mc_xfpustate_len;
+
+ long mc_spare[4];
} mcontext_t;
#endif /* !_MACHINE_UCONTEXT_H_ */
OpenPOWER on IntegriCloud