summaryrefslogtreecommitdiffstats
path: root/virt/kvm
diff options
context:
space:
mode:
authorChristoffer Dall <christoffer.dall@linaro.org>2014-09-28 16:04:26 +0200
committerChristoffer Dall <christoffer.dall@linaro.org>2014-10-16 10:57:41 +0200
commit2df36a5dd6792870bef48f63bfca42055ea5b79c (patch)
treeef822d51fb8cd4adb030eab48c7dab131b632461 /virt/kvm
parent3d08c629244257473450a8ba17cb8184b91e68f8 (diff)
downloadop-kernel-dev-2df36a5dd6792870bef48f63bfca42055ea5b79c.zip
op-kernel-dev-2df36a5dd6792870bef48f63bfca42055ea5b79c.tar.gz
arm/arm64: KVM: Fix BE accesses to GICv2 EISR and ELRSR regs
The EIRSR and ELRSR registers are 32-bit registers on GICv2, and we store these as an array of two such registers on the vgic vcpu struct. However, we access them as a single 64-bit value or as a bitmap pointer in the generic vgic code, which breaks BE support. Instead, store them as u64 values on the vgic structure and do the word-swapping in the assembly code, which already handles the byte order for BE systems. Tested-by: Victor Kamensky <victor.kamensky@linaro.org> Acked-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Diffstat (limited to 'virt/kvm')
-rw-r--r--virt/kvm/arm/vgic-v2.c24
-rw-r--r--virt/kvm/arm/vgic.c18
2 files changed, 19 insertions, 23 deletions
diff --git a/virt/kvm/arm/vgic-v2.c b/virt/kvm/arm/vgic-v2.c
index 01124ef..2935405 100644
--- a/virt/kvm/arm/vgic-v2.c
+++ b/virt/kvm/arm/vgic-v2.c
@@ -71,35 +71,17 @@ static void vgic_v2_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
struct vgic_lr lr_desc)
{
if (!(lr_desc.state & LR_STATE_MASK))
- set_bit(lr, (unsigned long *)vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr);
+ vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr |= (1ULL << lr);
}
static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu)
{
- u64 val;
-
-#if BITS_PER_LONG == 64
- val = vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr[1];
- val <<= 32;
- val |= vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr[0];
-#else
- val = *(u64 *)vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr;
-#endif
- return val;
+ return vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr;
}
static u64 vgic_v2_get_eisr(const struct kvm_vcpu *vcpu)
{
- u64 val;
-
-#if BITS_PER_LONG == 64
- val = vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr[1];
- val <<= 32;
- val |= vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr[0];
-#else
- val = *(u64 *)vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr;
-#endif
- return val;
+ return vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr;
}
static u32 vgic_v2_get_interrupt_status(const struct kvm_vcpu *vcpu)
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 382fb5a..3aaca49 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -145,6 +145,20 @@ static void vgic_free_bitmap(struct vgic_bitmap *b)
b->shared = NULL;
}
+/*
+ * Call this function to convert a u64 value to an unsigned long * bitmask
+ * in a way that works on both 32-bit and 64-bit LE and BE platforms.
+ *
+ * Warning: Calling this function may modify *val.
+ */
+static unsigned long *u64_to_bitmask(u64 *val)
+{
+#if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 32
+ *val = (*val >> 32) | (*val << 32);
+#endif
+ return (unsigned long *)val;
+}
+
static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x,
int cpuid, u32 offset)
{
@@ -1442,7 +1456,7 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
* active bit.
*/
u64 eisr = vgic_get_eisr(vcpu);
- unsigned long *eisr_ptr = (unsigned long *)&eisr;
+ unsigned long *eisr_ptr = u64_to_bitmask(&eisr);
int lr;
for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) {
@@ -1505,7 +1519,7 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
level_pending = vgic_process_maintenance(vcpu);
elrsr = vgic_get_elrsr(vcpu);
- elrsr_ptr = (unsigned long *)&elrsr;
+ elrsr_ptr = u64_to_bitmask(&elrsr);
/* Clear mappings for empty LRs */
for_each_set_bit(lr, elrsr_ptr, vgic->nr_lr) {
OpenPOWER on IntegriCloud