diff options
author | Russell King <rmk@dyn-67.arm.linux.org.uk> | 2007-07-18 09:37:10 +0100 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2007-07-20 09:39:57 +0100 |
commit | 228adef16d6e7b7725ef6b9ba760810d5966afa5 (patch) | |
tree | f7473090e2284a7f3b2933d97e684e4b2445d79c /arch/arm/vfp/vfpmodule.c | |
parent | 21d1ca04532005c50ed57c2b2948e465b2e90720 (diff) | |
download | op-kernel-dev-228adef16d6e7b7725ef6b9ba760810d5966afa5.zip op-kernel-dev-228adef16d6e7b7725ef6b9ba760810d5966afa5.tar.gz |
[ARM] vfp: make fpexc bit names less verbose
Use the fpexc abbreviated names instead of long verbose names
for fpexc bits.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/vfp/vfpmodule.c')
-rw-r--r-- | arch/arm/vfp/vfpmodule.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index 1106b5f..04ddab2 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -53,7 +53,7 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v) * case the thread migrates to a different CPU. The * restoring is done lazily. */ - if ((fpexc & FPEXC_ENABLE) && last_VFP_context[cpu]) { + if ((fpexc & FPEXC_EN) && last_VFP_context[cpu]) { vfp_save_state(last_VFP_context[cpu], fpexc); last_VFP_context[cpu]->hard.cpu = cpu; } @@ -70,7 +70,7 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v) * Always disable VFP so we can lazily save/restore the * old state. */ - fmxr(FPEXC, fpexc & ~FPEXC_ENABLE); + fmxr(FPEXC, fpexc & ~FPEXC_EN); return NOTIFY_DONE; } @@ -81,13 +81,13 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v) */ memset(vfp, 0, sizeof(union vfp_state)); - vfp->hard.fpexc = FPEXC_ENABLE; + vfp->hard.fpexc = FPEXC_EN; vfp->hard.fpscr = FPSCR_ROUND_NEAREST; /* * Disable VFP to ensure we initialise it first. */ - fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_ENABLE); + fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); } /* flush and release case: Per-thread VFP cleanup. */ @@ -229,7 +229,7 @@ void VFP9_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs) /* * Enable access to the VFP so we can handle the bounce. */ - fmxr(FPEXC, fpexc & ~(FPEXC_EXCEPTION|FPEXC_INV|FPEXC_UFC|FPEXC_IOC)); + fmxr(FPEXC, fpexc & ~(FPEXC_EX|FPEXC_INV|FPEXC_UFC|FPEXC_IOC)); orig_fpscr = fpscr = fmrx(FPSCR); @@ -248,7 +248,7 @@ void VFP9_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs) /* * Modify fpscr to indicate the number of iterations remaining */ - if (fpexc & FPEXC_EXCEPTION) { + if (fpexc & FPEXC_EX) { u32 len; len = fpexc + (1 << FPEXC_LENGTH_BIT); |