summaryrefslogtreecommitdiffstats
path: root/hw/virtio.c
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2011-09-20 12:05:20 +1000
committerAnthony Liguori <aliguori@us.ibm.com>2011-09-23 11:51:05 -0500
commitb90d2f35125490b8f62484c5ea7e6bbecbe43b6f (patch)
treec084ffd8b99ea4afb249cabdb95ce9e101f3ff9f /hw/virtio.c
parent87751797c707092108700159b570e917bae346b9 (diff)
downloadhqemu-b90d2f35125490b8f62484c5ea7e6bbecbe43b6f.zip
hqemu-b90d2f35125490b8f62484c5ea7e6bbecbe43b6f.tar.gz
virtio: Use global memory barrier macros
The virtio code uses wmb() macros in several places, as required by the SMP-aware virtio protocol. However the wmb() macro is locally defined to be a compiler barrier only. This is probably sufficient on x86 due to its strong storage ordering model, but it certainly isn't on other platforms, such as ppc. In any case, qemu already has some globally defined memory barrier macros in qemu-barrier.h. This patch, therefore converts virtio.c to use those barrier macros. The macros in qemu-barrier.h are also wrong (or at least, safe for x86 only) but this way at least there's only one place to fix them. Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru> Signed-off-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru> Signed-off-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
Diffstat (limited to 'hw/virtio.c')
-rw-r--r--hw/virtio.c14
1 files changed, 3 insertions, 11 deletions
diff --git a/hw/virtio.c b/hw/virtio.c
index d9bf266..7011b5b 100644
--- a/hw/virtio.c
+++ b/hw/virtio.c
@@ -16,20 +16,12 @@
#include "trace.h"
#include "qemu-error.h"
#include "virtio.h"
+#include "qemu-barrier.h"
/* The alignment to use between consumer and producer parts of vring.
* x86 pagesize again. */
#define VIRTIO_PCI_VRING_ALIGN 4096
-/* QEMU doesn't strictly need write barriers since everything runs in
- * lock-step. We'll leave the calls to wmb() in though to make it obvious for
- * KVM or if kqemu gets SMP support.
- * In any case, we must prevent the compiler from reordering the code.
- * TODO: we likely need some rmb()/mb() as well.
- */
-
-#define wmb() __asm__ __volatile__("": : :"memory")
-
typedef struct VRingDesc
{
uint64_t addr;
@@ -264,7 +256,7 @@ void virtqueue_flush(VirtQueue *vq, unsigned int count)
{
uint16_t old, new;
/* Make sure buffer is written before we update index. */
- wmb();
+ smp_wmb();
trace_virtqueue_flush(vq, count);
old = vring_used_idx(vq);
new = old + count;
@@ -324,7 +316,7 @@ static unsigned virtqueue_next_desc(target_phys_addr_t desc_pa,
/* Check they're not leading us off end of descriptors. */
next = vring_desc_next(desc_pa, i);
/* Make sure compiler knows to grab that: we don't want it changing! */
- wmb();
+ smp_wmb();
if (next >= max) {
error_report("Desc next is %u", next);
OpenPOWER on IntegriCloud