summaryrefslogtreecommitdiffstats
path: root/include/qemu/seqlock.h
blob: 3ff118a1a1f5d2a2b3cc32c957b153452f995ae2 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
/*
 * Seqlock implementation for QEMU
 *
 * Copyright Red Hat, Inc. 2013
 *
 * Author:
 *  Paolo Bonzini <pbonzini@redhat.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2 or later.
 * See the COPYING file in the top-level directory.
 *
 */
#ifndef QEMU_SEQLOCK_H
#define QEMU_SEQLOCK_H 1

#include <qemu/atomic.h>
#include <qemu/thread.h>

typedef struct QemuSeqLock QemuSeqLock;

struct QemuSeqLock {
    QemuMutex *mutex;
    unsigned sequence;
};

static inline void seqlock_init(QemuSeqLock *sl, QemuMutex *mutex)
{
    sl->mutex = mutex;
    sl->sequence = 0;
}

/* Lock out other writers and update the count.  */
static inline void seqlock_write_lock(QemuSeqLock *sl)
{
    if (sl->mutex) {
        qemu_mutex_lock(sl->mutex);
    }
    ++sl->sequence;

    /* Write sequence before updating other fields.  */
    smp_wmb();
}

static inline void seqlock_write_unlock(QemuSeqLock *sl)
{
    /* Write other fields before finalizing sequence.  */
    smp_wmb();

    ++sl->sequence;
    if (sl->mutex) {
        qemu_mutex_unlock(sl->mutex);
    }
}

static inline unsigned seqlock_read_begin(QemuSeqLock *sl)
{
    /* Always fail if a write is in progress.  */
    unsigned ret = sl->sequence & ~1;

    /* Read sequence before reading other fields.  */
    smp_rmb();
    return ret;
}

static int seqlock_read_retry(const QemuSeqLock *sl, unsigned start)
{
    /* Read other fields before reading final sequence.  */
    smp_rmb();
    return unlikely(sl->sequence != start);
}

#endif
OpenPOWER on IntegriCloud