summaryrefslogtreecommitdiffstats
path: root/usr.sbin
diff options
context:
space:
mode:
authormav <mav@FreeBSD.org>2015-03-27 08:52:57 +0000
committermav <mav@FreeBSD.org>2015-03-27 08:52:57 +0000
commitaaa4bfa29497aab9c1f33620881bea8ca70e7865 (patch)
tree25c45b5ac48ea187687cd2a073513f80ecb99080 /usr.sbin
parent3cab0e383150be183450000ede57f24643d45155 (diff)
downloadFreeBSD-src-aaa4bfa29497aab9c1f33620881bea8ca70e7865.zip
FreeBSD-src-aaa4bfa29497aab9c1f33620881bea8ca70e7865.tar.gz
MFC r280026, r280041:
Modify virtqueue helpers added in r253440 to allow queuing. Original virtqueue design allows queued and out-of-order processing, but helpers added in r253440 suppose only direct blocking in-order one. It could be fine for network, etc., but it is a huge limitation for storage devices.
Diffstat (limited to 'usr.sbin')
-rw-r--r--usr.sbin/bhyve/pci_virtio_block.c7
-rw-r--r--usr.sbin/bhyve/pci_virtio_net.c13
-rw-r--r--usr.sbin/bhyve/pci_virtio_rnd.c7
-rw-r--r--usr.sbin/bhyve/virtio.c36
-rw-r--r--usr.sbin/bhyve/virtio.h19
5 files changed, 41 insertions, 41 deletions
diff --git a/usr.sbin/bhyve/pci_virtio_block.c b/usr.sbin/bhyve/pci_virtio_block.c
index fef7fec..7248b1f 100644
--- a/usr.sbin/bhyve/pci_virtio_block.c
+++ b/usr.sbin/bhyve/pci_virtio_block.c
@@ -170,9 +170,9 @@ pci_vtblk_proc(struct pci_vtblk_softc *sc, struct vqueue_info *vq)
int writeop, type;
off_t offset;
struct iovec iov[VTBLK_MAXSEGS + 2];
- uint16_t flags[VTBLK_MAXSEGS + 2];
+ uint16_t idx, flags[VTBLK_MAXSEGS + 2];
- n = vq_getchain(vq, iov, VTBLK_MAXSEGS + 2, flags);
+ n = vq_getchain(vq, &idx, iov, VTBLK_MAXSEGS + 2, flags);
/*
* The first descriptor will be the read-only fixed header,
@@ -258,7 +258,7 @@ pci_vtblk_proc(struct pci_vtblk_softc *sc, struct vqueue_info *vq)
* Return the descriptor back to the host.
* We wrote 1 byte (our status) to host.
*/
- vq_relchain(vq, 1);
+ vq_relchain(vq, idx, 1);
}
static void
@@ -266,7 +266,6 @@ pci_vtblk_notify(void *vsc, struct vqueue_info *vq)
{
struct pci_vtblk_softc *sc = vsc;
- vq_startchains(vq);
while (vq_has_descs(vq))
pci_vtblk_proc(sc, vq);
vq_endchains(vq, 1); /* Generate interrupt if appropriate. */
diff --git a/usr.sbin/bhyve/pci_virtio_net.c b/usr.sbin/bhyve/pci_virtio_net.c
index 5ac9ecd..f3656b7 100644
--- a/usr.sbin/bhyve/pci_virtio_net.c
+++ b/usr.sbin/bhyve/pci_virtio_net.c
@@ -288,6 +288,7 @@ pci_vtnet_tap_rx(struct pci_vtnet_softc *sc)
struct vqueue_info *vq;
void *vrx;
int len, n;
+ uint16_t idx;
/*
* Should never be called without a valid tap fd
@@ -310,7 +311,6 @@ pci_vtnet_tap_rx(struct pci_vtnet_softc *sc)
* Check for available rx buffers
*/
vq = &sc->vsc_queues[VTNET_RXQ];
- vq_startchains(vq);
if (!vq_has_descs(vq)) {
/*
* Drop the packet and try later. Interrupt on
@@ -325,7 +325,7 @@ pci_vtnet_tap_rx(struct pci_vtnet_softc *sc)
/*
* Get descriptor chain.
*/
- n = vq_getchain(vq, iov, VTNET_MAXSEGS, NULL);
+ n = vq_getchain(vq, &idx, iov, VTNET_MAXSEGS, NULL);
assert(n >= 1 && n <= VTNET_MAXSEGS);
/*
@@ -342,6 +342,7 @@ pci_vtnet_tap_rx(struct pci_vtnet_softc *sc)
* No more packets, but still some avail ring
* entries. Interrupt if needed/appropriate.
*/
+ vq_retchain(vq);
vq_endchains(vq, 0);
return;
}
@@ -362,7 +363,7 @@ pci_vtnet_tap_rx(struct pci_vtnet_softc *sc)
/*
* Release this chain and handle more chains.
*/
- vq_relchain(vq, len + sc->rx_vhdrlen);
+ vq_relchain(vq, idx, len + sc->rx_vhdrlen);
} while (vq_has_descs(vq));
/* Interrupt if needed, including for NOTIFY_ON_EMPTY. */
@@ -401,13 +402,14 @@ pci_vtnet_proctx(struct pci_vtnet_softc *sc, struct vqueue_info *vq)
struct iovec iov[VTNET_MAXSEGS + 1];
int i, n;
int plen, tlen;
+ uint16_t idx;
/*
* Obtain chain of descriptors. The first one is
* really the header descriptor, so we need to sum
* up two lengths: packet length and transfer length.
*/
- n = vq_getchain(vq, iov, VTNET_MAXSEGS, NULL);
+ n = vq_getchain(vq, &idx, iov, VTNET_MAXSEGS, NULL);
assert(n >= 1 && n <= VTNET_MAXSEGS);
plen = 0;
tlen = iov[0].iov_len;
@@ -420,7 +422,7 @@ pci_vtnet_proctx(struct pci_vtnet_softc *sc, struct vqueue_info *vq)
pci_vtnet_tap_tx(sc, &iov[1], n - 1, plen);
/* chain is processed, release it and set tlen */
- vq_relchain(vq, tlen);
+ vq_relchain(vq, idx, tlen);
}
static void
@@ -479,7 +481,6 @@ pci_vtnet_tx_thread(void *param)
sc->tx_in_progress = 1;
pthread_mutex_unlock(&sc->tx_mtx);
- vq_startchains(vq);
do {
/*
* Run through entries, placing them into
diff --git a/usr.sbin/bhyve/pci_virtio_rnd.c b/usr.sbin/bhyve/pci_virtio_rnd.c
index 0a31080..92d1d6f 100644
--- a/usr.sbin/bhyve/pci_virtio_rnd.c
+++ b/usr.sbin/bhyve/pci_virtio_rnd.c
@@ -103,18 +103,17 @@ pci_vtrnd_notify(void *vsc, struct vqueue_info *vq)
struct iovec iov;
struct pci_vtrnd_softc *sc;
int len;
+ uint16_t idx;
sc = vsc;
- vq_startchains(vq);
-
if (sc->vrsc_fd < 0) {
vq_endchains(vq, 0);
return;
}
while (vq_has_descs(vq)) {
- vq_getchain(vq, &iov, 1, NULL);
+ vq_getchain(vq, &idx, &iov, 1, NULL);
len = read(sc->vrsc_fd, iov.iov_base, iov.iov_len);
@@ -126,7 +125,7 @@ pci_vtrnd_notify(void *vsc, struct vqueue_info *vq)
/*
* Release this chain and handle more
*/
- vq_relchain(vq, len);
+ vq_relchain(vq, idx, len);
}
vq_endchains(vq, 1); /* Generate interrupt if appropriate. */
}
diff --git a/usr.sbin/bhyve/virtio.c b/usr.sbin/bhyve/virtio.c
index 19c0d47..e985805 100644
--- a/usr.sbin/bhyve/virtio.c
+++ b/usr.sbin/bhyve/virtio.c
@@ -97,6 +97,7 @@ vi_reset_dev(struct virtio_softc *vs)
for (vq = vs->vs_queues, i = 0; i < nvq; vq++, i++) {
vq->vq_flags = 0;
vq->vq_last_avail = 0;
+ vq->vq_save_used = 0;
vq->vq_pfn = 0;
vq->vq_msix_idx = VIRTIO_MSI_NO_VECTOR;
}
@@ -188,6 +189,7 @@ vi_vq_init(struct virtio_softc *vs, uint32_t pfn)
/* Mark queue as allocated, and start at 0 when we use it. */
vq->vq_flags = VQ_ALLOC;
vq->vq_last_avail = 0;
+ vq->vq_save_used = 0;
}
/*
@@ -247,12 +249,12 @@ _vq_record(int i, volatile struct virtio_desc *vd, struct vmctx *ctx,
* that vq_has_descs() does one).
*/
int
-vq_getchain(struct vqueue_info *vq,
+vq_getchain(struct vqueue_info *vq, uint16_t *pidx,
struct iovec *iov, int n_iov, uint16_t *flags)
{
int i;
u_int ndesc, n_indir;
- u_int idx, head, next;
+ u_int idx, next;
volatile struct virtio_desc *vdir, *vindir, *vp;
struct vmctx *ctx;
struct virtio_softc *vs;
@@ -295,8 +297,8 @@ vq_getchain(struct vqueue_info *vq,
* index, but we just abort if the count gets excessive.
*/
ctx = vs->vs_pi->pi_vmctx;
- head = vq->vq_avail->va_ring[idx & (vq->vq_qsize - 1)];
- next = head;
+ *pidx = next = vq->vq_avail->va_ring[idx & (vq->vq_qsize - 1)];
+ vq->vq_last_avail++;
for (i = 0; i < VQ_MAX_DESCRIPTORS; next = vdir->vd_next) {
if (next >= vq->vq_qsize) {
fprintf(stderr,
@@ -370,16 +372,29 @@ loopy:
}
/*
- * Return the currently-first request chain to the guest, setting
- * its I/O length to the provided value.
+ * Return the currently-first request chain back to the available queue.
*
* (This chain is the one you handled when you called vq_getchain()
* and used its positive return value.)
*/
void
-vq_relchain(struct vqueue_info *vq, uint32_t iolen)
+vq_retchain(struct vqueue_info *vq)
{
- uint16_t head, uidx, mask;
+
+ vq->vq_last_avail--;
+}
+
+/*
+ * Return specified request chain to the guest, setting its I/O length
+ * to the provided value.
+ *
+ * (This chain is the one you handled when you called vq_getchain()
+ * and used its positive return value.)
+ */
+void
+vq_relchain(struct vqueue_info *vq, uint16_t idx, uint32_t iolen)
+{
+ uint16_t uidx, mask;
volatile struct vring_used *vuh;
volatile struct virtio_used *vue;
@@ -395,11 +410,10 @@ vq_relchain(struct vqueue_info *vq, uint32_t iolen)
*/
mask = vq->vq_qsize - 1;
vuh = vq->vq_used;
- head = vq->vq_avail->va_ring[vq->vq_last_avail++ & mask];
uidx = vuh->vu_idx;
vue = &vuh->vu_ring[uidx++ & mask];
- vue->vu_idx = head; /* ie, vue->id = head */
+ vue->vu_idx = idx;
vue->vu_tlen = iolen;
vuh->vu_idx = uidx;
}
@@ -436,8 +450,8 @@ vq_endchains(struct vqueue_info *vq, int used_all_avail)
* entire avail was processed, we need to interrupt always.
*/
vs = vq->vq_vs;
- new_idx = vq->vq_used->vu_idx;
old_idx = vq->vq_save_used;
+ vq->vq_save_used = new_idx = vq->vq_used->vu_idx;
if (used_all_avail &&
(vs->vs_negotiated_caps & VIRTIO_F_NOTIFY_ON_EMPTY))
intr = 1;
diff --git a/usr.sbin/bhyve/virtio.h b/usr.sbin/bhyve/virtio.h
index 6f655f3..0e96a1d 100644
--- a/usr.sbin/bhyve/virtio.h
+++ b/usr.sbin/bhyve/virtio.h
@@ -425,20 +425,6 @@ vq_has_descs(struct vqueue_info *vq)
}
/*
- * Called by virtio driver as it starts processing chains. Each
- * completed chain (obtained from vq_getchain()) is released by
- * calling vq_relchain(), then when all are done, vq_endchains()
- * can tell if / how-many chains were processed and know whether
- * and how to generate an interrupt.
- */
-static inline void
-vq_startchains(struct vqueue_info *vq)
-{
-
- vq->vq_save_used = vq->vq_used->vu_idx;
-}
-
-/*
* Deliver an interrupt to guest on the given virtual queue
* (if possible, or a generic MSI interrupt if not using MSI-X).
*/
@@ -465,9 +451,10 @@ int vi_intr_init(struct virtio_softc *vs, int barnum, int use_msix);
void vi_reset_dev(struct virtio_softc *);
void vi_set_io_bar(struct virtio_softc *, int);
-int vq_getchain(struct vqueue_info *vq,
+int vq_getchain(struct vqueue_info *vq, uint16_t *pidx,
struct iovec *iov, int n_iov, uint16_t *flags);
-void vq_relchain(struct vqueue_info *vq, uint32_t iolen);
+void vq_retchain(struct vqueue_info *vq);
+void vq_relchain(struct vqueue_info *vq, uint16_t idx, uint32_t iolen);
void vq_endchains(struct vqueue_info *vq, int used_all_avail);
uint64_t vi_pci_read(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
OpenPOWER on IntegriCloud