summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorwpaul <wpaul@FreeBSD.org>2004-09-28 18:39:04 +0000
committerwpaul <wpaul@FreeBSD.org>2004-09-28 18:39:04 +0000
commit2f2f7359cc2aba3a3344e8b341e920fdcb52a1e1 (patch)
tree101d056c296a3c0fa3a14c3ac8930e695e573338 /sys
parent49fe0299db39762cc4cbcb6af27ed3a1aad69514 (diff)
downloadFreeBSD-src-2f2f7359cc2aba3a3344e8b341e920fdcb52a1e1.zip
FreeBSD-src-2f2f7359cc2aba3a3344e8b341e920fdcb52a1e1.tar.gz
Arrgh. Recently I tried using ugen(4) in an application that uses
select(2), and discovered to my horror that ugen(4)'s bulk in/out support is horribly lobotomized. Bulk transfers are done using the synchronous API instead of the asynchronous one. This causes the following broken behavior to occur: - You open the bulk in/out ugen device and get a descriptor - You create some other descriptor (socket, other device, etc...) - You select on both the descriptors waiting until either one has data ready to read - Because of ugen's brokenness, you block in usb_bulk_transfer() inside ugen_do_read() instead of blocking in select() - The non-USB descriptor becomes ready for reading, but you remain blocked on select() - The USB descriptor becomes ready for reading - Only now are you woken up so that you can ready data from either descriptor. The result is select() can only wake up when there's USB data pending. If any other descriptor becomes ready, you lose: until the USB descriptor becomes ready, you stay asleep. The correct approach is to use async bulk transfers, so I changed the read code to use the async bulk transfer API. I left the write side alone for now since it's less of an issue. Note that the uscanner driver has the same brokenness in it.
Diffstat (limited to 'sys')
-rw-r--r--sys/dev/usb/ugen.c148
1 files changed, 122 insertions, 26 deletions
diff --git a/sys/dev/usb/ugen.c b/sys/dev/usb/ugen.c
index 4ff006f..b195da7 100644
--- a/sys/dev/usb/ugen.c
+++ b/sys/dev/usb/ugen.c
@@ -122,6 +122,13 @@ struct ugen_endpoint {
void *dmabuf;
u_int16_t sizes[UGEN_NISORFRMS];
} isoreqs[UGEN_NISOREQS];
+ struct {
+ usbd_xfer_handle xfer;
+ int err;
+ int len;
+ void *buf;
+ int datardy;
+ } bulkreq;
};
struct ugen_softc {
@@ -171,6 +178,7 @@ Static void ugenintr(usbd_xfer_handle xfer, usbd_private_handle addr,
usbd_status status);
Static void ugen_isoc_rintr(usbd_xfer_handle xfer, usbd_private_handle addr,
usbd_status status);
+Static void ugen_rdcb(usbd_xfer_handle, usbd_private_handle, usbd_status);
Static int ugen_do_read(struct ugen_softc *, int, struct uio *, int);
Static int ugen_do_write(struct ugen_softc *, int, struct uio *, int);
Static int ugen_do_ioctl(struct ugen_softc *, int, u_long,
@@ -463,6 +471,32 @@ ugenopen(struct cdev *dev, int flag, int mode, usb_proc_ptr p)
edesc->bEndpointAddress, 0, &sce->pipeh);
if (err)
return (EIO);
+
+ if (dir == OUT)
+ break;
+
+ /* If this is the read pipe, set up an async xfer. */
+ isize = UGETW(edesc->wMaxPacketSize);
+ if (isize == 0) /* shouldn't happen */
+ return (EINVAL);
+ sce->bulkreq.buf = malloc(isize, M_USBDEV, M_WAITOK);
+ if (sce->bulkreq.buf == 0)
+ return (ENOMEM);
+ sce->bulkreq.xfer = usbd_alloc_xfer(sc->sc_udev);
+ if (sce->bulkreq.xfer == 0) {
+ free(sce->bulkreq.buf, M_USBDEV);
+ return (ENOMEM);
+ }
+ sce->bulkreq.len = isize;
+ sce->bulkreq.err = 0;
+ sce->bulkreq.datardy = 0;
+ usbd_setup_xfer(sce->bulkreq.xfer, sce->pipeh, sce,
+ sce->bulkreq.buf, sce->bulkreq.len,
+ sce->state & UGEN_SHORT_OK ?
+ USBD_SHORT_XFER_OK : 0, sce->timeout,
+ ugen_rdcb);
+ usbd_transfer(sce->bulkreq.xfer);
+
break;
case UE_ISOCHRONOUS:
if (dir == OUT)
@@ -579,6 +613,15 @@ ugenclose(struct cdev *dev, int flag, int mode, usb_proc_ptr p)
sce->ibuf = NULL;
clfree(&sce->q);
}
+
+ if (sce->bulkreq.buf != NULL)
+ free(sce->bulkreq.buf, M_USBDEV);
+ if (sce->bulkreq.xfer != NULL) {
+ ugen_rdcb(sce->bulkreq.xfer, sce, USBD_INTERRUPTED);
+ usbd_free_xfer(sce->bulkreq.xfer);
+ sce->bulkreq.xfer = NULL;
+ }
+
}
sc->sc_is_open[endpt] = 0;
if (--sc->sc_refcnt == 0)
@@ -587,13 +630,43 @@ ugenclose(struct cdev *dev, int flag, int mode, usb_proc_ptr p)
return (0);
}
+Static void
+ugen_rdcb(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status)
+{
+ struct ugen_endpoint *sce;
+
+ sce = priv;
+
+ if (status != USBD_NORMAL_COMPLETION) {
+ if (status == USBD_INTERRUPTED)
+ sce->bulkreq.err = EINTR;
+ else if (status == USBD_TIMEOUT)
+ sce->bulkreq.err = ETIMEDOUT;
+ else
+ sce->bulkreq.err = EIO;
+ } else {
+ sce->bulkreq.err = 0;
+ sce->bulkreq.datardy = 1;
+ usbd_get_xfer_status(xfer, NULL, NULL,
+ &sce->bulkreq.len, NULL);
+ }
+
+ if (sce->state & UGEN_ASLP) {
+ sce->state &= ~UGEN_ASLP;
+ wakeup(sce);
+ }
+
+ selwakeuppri(&sce->rsel, PZERO);
+
+ return;
+}
+
Static int
ugen_do_read(struct ugen_softc *sc, int endpt, struct uio *uio, int flag)
{
struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][IN];
u_int32_t n, tn;
- char buf[UGEN_BBSIZE];
- usbd_xfer_handle xfer;
+ int isize;
usbd_status err;
int s;
int error = 0;
@@ -658,32 +731,43 @@ ugen_do_read(struct ugen_softc *sc, int endpt, struct uio *uio, int flag)
}
break;
case UE_BULK:
- xfer = usbd_alloc_xfer(sc->sc_udev);
- if (xfer == 0)
- return (ENOMEM);
- while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) {
+ isize = UGETW(sce->edesc->wMaxPacketSize);
+ while ((n = min(isize, uio->uio_resid)) != 0) {
DPRINTFN(1, ("ugenread: start transfer %d bytes\n",n));
tn = n;
- err = usbd_bulk_transfer(
- xfer, sce->pipeh,
- sce->state & UGEN_SHORT_OK ?
- USBD_SHORT_XFER_OK : 0,
- sce->timeout, buf, &tn, "ugenrb");
- if (err) {
- if (err == USBD_INTERRUPTED)
- error = EINTR;
- else if (err == USBD_TIMEOUT)
- error = ETIMEDOUT;
- else
- error = EIO;
- break;
+
+ /* Wait for data to be ready. */
+
+ while (sce->bulkreq.datardy == 0) {
+ sce->state |= UGEN_ASLP;
+ error = tsleep(sce, PCATCH | PZERO,
+ "ugenrd", 0);
+ if (error) {
+ sce->state &= ~UGEN_ASLP;
+ break;
+ }
}
+
+ tn = sce->bulkreq.len;
+ err = sce->bulkreq.err;
+ error = uiomove(sce->bulkreq.buf, tn, uio);
+
+ /* Set up a new transfer. */
+
+ sce->bulkreq.datardy = 0;
+ usbd_setup_xfer(sce->bulkreq.xfer, sce->pipeh, sce,
+ sce->bulkreq.buf, sce->bulkreq.len,
+ sce->state & UGEN_SHORT_OK ?
+ USBD_SHORT_XFER_OK : 0, sce->timeout, ugen_rdcb);
+ usbd_transfer(sce->bulkreq.xfer);
+
+ if (err)
+ break;
+
DPRINTFN(1, ("ugenread: got %d bytes\n", tn));
- error = uiomove(buf, tn, uio);
if (error || tn < n)
break;
}
- usbd_free_xfer(xfer);
break;
case UE_ISOCHRONOUS:
s = splusb();
@@ -881,6 +965,10 @@ USB_DETACH(ugen)
sce = &sc->sc_endpoints[i][dir];
if (sce && sce->pipeh)
usbd_abort_pipe(sce->pipeh);
+ /* cancel async bulk transfer */
+ if (sce->bulkreq.xfer != NULL)
+ ugen_rdcb(sce->bulkreq.xfer,
+ sce, USBD_INTERRUPTED);
}
}
@@ -1452,12 +1540,20 @@ ugenpoll(struct cdev *dev, int events, usb_proc_ptr p)
break;
case UE_BULK:
/*
- * We have no easy way of determining if a read will
- * yield any data or a write will happen.
- * Pretend they will.
+ * We have async transfers for reads now, so we can
+ * select on those. Writes tend to complete immediately
+ * so we can get away without async code for those,
+ * though we should probably do async bulk out transfers
+ * too at some point.
*/
- revents |= events &
- (POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM);
+ if (events & (POLLIN | POLLRDNORM)) {
+ if (sce->bulkreq.datardy)
+ revents |= events & (POLLIN | POLLRDNORM);
+ else
+ selrecord(p, &sce->rsel);
+ }
+ if (events & (POLLOUT | POLLWRNORM))
+ revents |= events & (POLLOUT | POLLWRNORM);
break;
default:
break;
OpenPOWER on IntegriCloud