summaryrefslogtreecommitdiffstats
path: root/sys/crypto
diff options
context:
space:
mode:
authorkib <kib@FreeBSD.org>2012-01-21 17:45:27 +0000
committerkib <kib@FreeBSD.org>2012-01-21 17:45:27 +0000
commit361bfae5c2c758540993427b5fd4d32422b143d4 (patch)
tree2514905a9aa4f850d72848118ca709f59f175020 /sys/crypto
parent8fd18c5b0a668ecb5094c18e7210dc6b3c2ce7e4 (diff)
downloadFreeBSD-src-361bfae5c2c758540993427b5fd4d32422b143d4.zip
FreeBSD-src-361bfae5c2c758540993427b5fd4d32422b143d4.tar.gz
Add support for the extended FPU states on amd64, both for native
64bit and 32bit ABIs. As a side-effect, it enables AVX on capable CPUs. In particular: - Query the CPU support for XSAVE, list of the supported extensions and the required size of FPU save area. The hw.use_xsave tunable is provided for disabling XSAVE, and hw.xsave_mask may be used to select the enabled extensions. - Remove the FPU save area from PCB and dynamically allocate the (run-time sized) user save area on the top of the kernel stack, right above the PCB. Reorganize the thread0 PCB initialization to postpone it after BSP is queried for save area size. - The dumppcb, stoppcbs and susppcbs now do not carry the FPU state as well. FPU state is only useful for suspend, where it is saved in dynamically allocated suspfpusave area. - Use XSAVE and XRSTOR to save/restore FPU state, if supported and enabled. - Define new mcontext_t flag _MC_HASFPXSTATE, indicating that mcontext_t has a valid pointer to out-of-struct extended FPU state. Signal handlers are supplied with stack-allocated fpu state. The sigreturn(2) and setcontext(2) syscall honour the flag, allowing the signal handlers to inspect and manipilate extended state in the interrupted context. - The getcontext(2) never returns extended state, since there is no place in the fixed-sized mcontext_t to place variable-sized save area. And, since mcontext_t is embedded into ucontext_t, makes it impossible to fix in a reasonable way. Instead of extending getcontext(2) syscall, provide a sysarch(2) facility to query extended FPU state. - Add ptrace(2) support for getting and setting extended state; while there, implement missed PT_I386_{GET,SET}XMMREGS for 32bit binaries. - Change fpu_kern KPI to not expose struct fpu_kern_ctx layout to consumers, making it opaque. Internally, struct fpu_kern_ctx now contains a space for the extended state. Convert in-kernel consumers of fpu_kern KPI both on i386 and amd64. First version of the support for AVX was submitted by Tim Bird <tim.bird am sony com> on behalf of Sony. This version was written from scratch. Tested by: pho (previous version), Yamagi Burmeister <lists yamagi org> MFC after: 1 month
Diffstat (limited to 'sys/crypto')
-rw-r--r--sys/crypto/aesni/aesni.c13
-rw-r--r--sys/crypto/aesni/aesni.h2
-rw-r--r--sys/crypto/aesni/aesni_wrap.c8
-rw-r--r--sys/crypto/via/padlock.c15
-rw-r--r--sys/crypto/via/padlock.h2
-rw-r--r--sys/crypto/via/padlock_cipher.c4
-rw-r--r--sys/crypto/via/padlock_hash.c4
7 files changed, 34 insertions, 14 deletions
diff --git a/sys/crypto/aesni/aesni.c b/sys/crypto/aesni/aesni.c
index ed12f28..ca00a57 100644
--- a/sys/crypto/aesni/aesni.c
+++ b/sys/crypto/aesni/aesni.c
@@ -116,6 +116,7 @@ aesni_detach(device_t dev)
}
while ((ses = TAILQ_FIRST(&sc->sessions)) != NULL) {
TAILQ_REMOVE(&sc->sessions, ses, next);
+ fpu_kern_free_ctx(ses->fpu_ctx);
free(ses, M_AESNI);
}
rw_wunlock(&sc->lock);
@@ -165,8 +166,13 @@ aesni_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri)
rw_wunlock(&sc->lock);
return (ENOMEM);
}
- KASSERT(((uintptr_t)ses) % 0x10 == 0,
- ("malloc returned unaligned pointer"));
+ ses->fpu_ctx = fpu_kern_alloc_ctx(FPU_KERN_NORMAL |
+ FPU_KERN_NOWAIT);
+ if (ses->fpu_ctx == NULL) {
+ free(ses, M_AESNI);
+ rw_wunlock(&sc->lock);
+ return (ENOMEM);
+ }
ses->id = sc->sid++;
} else {
TAILQ_REMOVE(&sc->sessions, ses, next);
@@ -191,12 +197,15 @@ aesni_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri)
static void
aesni_freesession_locked(struct aesni_softc *sc, struct aesni_session *ses)
{
+ struct fpu_kern_ctx *ctx;
uint32_t sid;
sid = ses->id;
TAILQ_REMOVE(&sc->sessions, ses, next);
+ ctx = ses->fpu_ctx;
bzero(ses, sizeof(*ses));
ses->id = sid;
+ ses->fpu_ctx = ctx;
TAILQ_INSERT_HEAD(&sc->sessions, ses, next);
}
diff --git a/sys/crypto/aesni/aesni.h b/sys/crypto/aesni/aesni.h
index b07ec8b..78255b7 100644
--- a/sys/crypto/aesni/aesni.h
+++ b/sys/crypto/aesni/aesni.h
@@ -65,7 +65,7 @@ struct aesni_session {
int used;
uint32_t id;
TAILQ_ENTRY(aesni_session) next;
- struct fpu_kern_ctx fpu_ctx;
+ struct fpu_kern_ctx *fpu_ctx;
};
/*
diff --git a/sys/crypto/aesni/aesni_wrap.c b/sys/crypto/aesni/aesni_wrap.c
index 1492640..787c1a9 100644
--- a/sys/crypto/aesni/aesni_wrap.c
+++ b/sys/crypto/aesni/aesni_wrap.c
@@ -227,7 +227,7 @@ aesni_cipher_setup(struct aesni_session *ses, struct cryptoini *encini)
td = curthread;
if (!is_fpu_kern_thread(0)) {
- error = fpu_kern_enter(td, &ses->fpu_ctx, FPU_KERN_NORMAL);
+ error = fpu_kern_enter(td, ses->fpu_ctx, FPU_KERN_NORMAL);
saved_ctx = 1;
} else {
error = 0;
@@ -237,7 +237,7 @@ aesni_cipher_setup(struct aesni_session *ses, struct cryptoini *encini)
error = aesni_cipher_setup_common(ses, encini->cri_key,
encini->cri_klen);
if (saved_ctx)
- fpu_kern_leave(td, &ses->fpu_ctx);
+ fpu_kern_leave(td, ses->fpu_ctx);
}
return (error);
}
@@ -256,7 +256,7 @@ aesni_cipher_process(struct aesni_session *ses, struct cryptodesc *enccrd,
td = curthread;
if (!is_fpu_kern_thread(0)) {
- error = fpu_kern_enter(td, &ses->fpu_ctx, FPU_KERN_NORMAL);
+ error = fpu_kern_enter(td, ses->fpu_ctx, FPU_KERN_NORMAL);
if (error != 0)
goto out;
saved_ctx = 1;
@@ -302,7 +302,7 @@ aesni_cipher_process(struct aesni_session *ses, struct cryptodesc *enccrd,
}
}
if (saved_ctx)
- fpu_kern_leave(td, &ses->fpu_ctx);
+ fpu_kern_leave(td, ses->fpu_ctx);
if (allocated)
crypto_copyback(crp->crp_flags, crp->crp_buf, enccrd->crd_skip,
enccrd->crd_len, buf);
diff --git a/sys/crypto/via/padlock.c b/sys/crypto/via/padlock.c
index ba63093..f601d2a 100644
--- a/sys/crypto/via/padlock.c
+++ b/sys/crypto/via/padlock.c
@@ -156,6 +156,7 @@ padlock_detach(device_t dev)
}
while ((ses = TAILQ_FIRST(&sc->sc_sessions)) != NULL) {
TAILQ_REMOVE(&sc->sc_sessions, ses, ses_next);
+ fpu_kern_free_ctx(ses->ses_fpu_ctx);
free(ses, M_PADLOCK);
}
rw_destroy(&sc->sc_sessions_lock);
@@ -222,6 +223,13 @@ padlock_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri)
rw_wunlock(&sc->sc_sessions_lock);
return (ENOMEM);
}
+ ses->ses_fpu_ctx = fpu_kern_alloc_ctx(FPU_KERN_NORMAL |
+ FPU_KERN_NOWAIT);
+ if (ses->ses_fpu_ctx == NULL) {
+ free(ses, M_PADLOCK);
+ rw_wunlock(&sc->sc_sessions_lock);
+ return (ENOMEM);
+ }
ses->ses_id = sc->sc_sid++;
} else {
TAILQ_REMOVE(&sc->sc_sessions, ses, ses_next);
@@ -239,7 +247,7 @@ padlock_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri)
if (macini != NULL) {
td = curthread;
if (!is_fpu_kern_thread(0)) {
- error = fpu_kern_enter(td, &ses->ses_fpu_ctx,
+ error = fpu_kern_enter(td, ses->ses_fpu_ctx,
FPU_KERN_NORMAL);
saved_ctx = 1;
} else {
@@ -249,7 +257,7 @@ padlock_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri)
if (error == 0) {
error = padlock_hash_setup(ses, macini);
if (saved_ctx)
- fpu_kern_leave(td, &ses->ses_fpu_ctx);
+ fpu_kern_leave(td, ses->ses_fpu_ctx);
}
if (error != 0) {
padlock_freesession_one(sc, ses, 0);
@@ -265,15 +273,18 @@ static void
padlock_freesession_one(struct padlock_softc *sc, struct padlock_session *ses,
int locked)
{
+ struct fpu_kern_ctx *ctx;
uint32_t sid = ses->ses_id;
if (!locked)
rw_wlock(&sc->sc_sessions_lock);
TAILQ_REMOVE(&sc->sc_sessions, ses, ses_next);
padlock_hash_free(ses);
+ ctx = ses->ses_fpu_ctx;
bzero(ses, sizeof(*ses));
ses->ses_used = 0;
ses->ses_id = sid;
+ ses->ses_fpu_ctx = ctx;
TAILQ_INSERT_HEAD(&sc->sc_sessions, ses, ses_next);
if (!locked)
rw_wunlock(&sc->sc_sessions_lock);
diff --git a/sys/crypto/via/padlock.h b/sys/crypto/via/padlock.h
index c8ee9bd..77a4673 100644
--- a/sys/crypto/via/padlock.h
+++ b/sys/crypto/via/padlock.h
@@ -76,7 +76,7 @@ struct padlock_session {
int ses_used;
uint32_t ses_id;
TAILQ_ENTRY(padlock_session) ses_next;
- struct fpu_kern_ctx ses_fpu_ctx;
+ struct fpu_kern_ctx *ses_fpu_ctx;
};
#define PADLOCK_ALIGN(p) (void *)(roundup2((uintptr_t)(p), 16))
diff --git a/sys/crypto/via/padlock_cipher.c b/sys/crypto/via/padlock_cipher.c
index 1456ddf..7170211 100644
--- a/sys/crypto/via/padlock_cipher.c
+++ b/sys/crypto/via/padlock_cipher.c
@@ -251,7 +251,7 @@ padlock_cipher_process(struct padlock_session *ses, struct cryptodesc *enccrd,
td = curthread;
if (!is_fpu_kern_thread(0)) {
- error = fpu_kern_enter(td, &ses->ses_fpu_ctx, FPU_KERN_NORMAL);
+ error = fpu_kern_enter(td, ses->ses_fpu_ctx, FPU_KERN_NORMAL);
saved_ctx = 1;
} else {
error = 0;
@@ -264,7 +264,7 @@ padlock_cipher_process(struct padlock_session *ses, struct cryptodesc *enccrd,
ses->ses_iv);
if (saved_ctx)
- fpu_kern_leave(td, &ses->ses_fpu_ctx);
+ fpu_kern_leave(td, ses->ses_fpu_ctx);
if (allocated) {
crypto_copyback(crp->crp_flags, crp->crp_buf, enccrd->crd_skip,
diff --git a/sys/crypto/via/padlock_hash.c b/sys/crypto/via/padlock_hash.c
index 0fe182b..924a9ec 100644
--- a/sys/crypto/via/padlock_hash.c
+++ b/sys/crypto/via/padlock_hash.c
@@ -370,7 +370,7 @@ padlock_hash_process(struct padlock_session *ses, struct cryptodesc *maccrd,
td = curthread;
if (!is_fpu_kern_thread(0)) {
- error = fpu_kern_enter(td, &ses->ses_fpu_ctx, FPU_KERN_NORMAL);
+ error = fpu_kern_enter(td, ses->ses_fpu_ctx, FPU_KERN_NORMAL);
saved_ctx = 1;
} else {
error = 0;
@@ -383,7 +383,7 @@ padlock_hash_process(struct padlock_session *ses, struct cryptodesc *maccrd,
error = padlock_authcompute(ses, maccrd, crp->crp_buf, crp->crp_flags);
if (saved_ctx)
- fpu_kern_leave(td, &ses->ses_fpu_ctx);
+ fpu_kern_leave(td, ses->ses_fpu_ctx);
return (error);
}
OpenPOWER on IntegriCloud