diff options
author | Mark Rutland <mark.rutland@arm.com> | 2014-11-24 13:59:30 +0000 |
---|---|---|
committer | Mark Rutland <mark.rutland@arm.com> | 2015-01-15 12:24:25 +0000 |
commit | c6d01a947a51193e839516165286bc8d14a0e409 (patch) | |
tree | da329869e3bd4187a6ebfc4f80ef0efd31629c07 /arch/arm64/kvm/hyp.S | |
parent | 60a1f02c9e91e0796b54e83b14fb8a07f7a568b6 (diff) | |
download | op-kernel-dev-c6d01a947a51193e839516165286bc8d14a0e409.zip op-kernel-dev-c6d01a947a51193e839516165286bc8d14a0e409.tar.gz |
arm64: kvm: move to ESR_ELx macros
Now that we have common ESR_ELx macros, make use of them in the arm64
KVM code. The addition of <asm/esr.h> to the include path highlighted
badly ordered (i.e. not alphabetical) include lists; these are changed
to alphabetical order.
There should be no functional change as a result of this patch.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Marc Zyngier <marc.zyngier@arm.com>
Cc: Peter Maydell <peter.maydell@linaro.org>
Cc: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm64/kvm/hyp.S')
-rw-r--r-- | arch/arm64/kvm/hyp.S | 17 |
1 files changed, 9 insertions, 8 deletions
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index fbe909f..c0d8202 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S @@ -17,15 +17,16 @@ #include <linux/linkage.h> -#include <asm/assembler.h> -#include <asm/memory.h> #include <asm/asm-offsets.h> +#include <asm/assembler.h> #include <asm/debug-monitors.h> +#include <asm/esr.h> #include <asm/fpsimdmacros.h> #include <asm/kvm.h> -#include <asm/kvm_asm.h> #include <asm/kvm_arm.h> +#include <asm/kvm_asm.h> #include <asm/kvm_mmu.h> +#include <asm/memory.h> #define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x) #define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x) @@ -1140,9 +1141,9 @@ el1_sync: // Guest trapped into EL2 push x2, x3 mrs x1, esr_el2 - lsr x2, x1, #ESR_EL2_EC_SHIFT + lsr x2, x1, #ESR_ELx_EC_SHIFT - cmp x2, #ESR_EL2_EC_HVC64 + cmp x2, #ESR_ELx_EC_HVC64 b.ne el1_trap mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest @@ -1177,13 +1178,13 @@ el1_trap: * x1: ESR * x2: ESR_EC */ - cmp x2, #ESR_EL2_EC_DABT - mov x0, #ESR_EL2_EC_IABT + cmp x2, #ESR_ELx_EC_DABT_LOW + mov x0, #ESR_ELx_EC_IABT_LOW ccmp x2, x0, #4, ne b.ne 1f // Not an abort we care about /* This is an abort. Check for permission fault */ - and x2, x1, #ESR_EL2_FSC_TYPE + and x2, x1, #ESR_ELx_FSC_TYPE cmp x2, #FSC_PERM b.ne 1f // Not a permission fault |