summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/usb/host/xhci-hcd.c370
-rw-r--r--drivers/usb/host/xhci-mem.c174
-rw-r--r--drivers/usb/host/xhci-pci.c4
-rw-r--r--drivers/usb/host/xhci-ring.c11
-rw-r--r--drivers/usb/host/xhci.h18
5 files changed, 572 insertions, 5 deletions
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c
index 5d94b4f..50ab525 100644
--- a/drivers/usb/host/xhci-hcd.c
+++ b/drivers/usb/host/xhci-hcd.c
@@ -530,6 +530,26 @@ unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
return index;
}
+/* Find the flag for this endpoint (for use in the control context). Use the
+ * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
+ * bit 1, etc.
+ */
+unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
+{
+ return 1 << (xhci_get_endpoint_index(desc) + 1);
+}
+
+/* Compute the last valid endpoint context index. Basically, this is the
+ * endpoint index plus one. For slot contexts with more than valid endpoint,
+ * we find the most significant bit set in the added contexts flags.
+ * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
+ * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
+ */
+static inline unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
+{
+ return fls(added_ctxs) - 1;
+}
+
/* Returns 1 if the arguments are OK;
* returns 0 this is a root hub; returns -EINVAL for NULL pointers.
*/
@@ -602,6 +622,349 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
return -ENOSYS;
}
+/* Drop an endpoint from a new bandwidth configuration for this device.
+ * Only one call to this function is allowed per endpoint before
+ * check_bandwidth() or reset_bandwidth() must be called.
+ * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
+ * add the endpoint to the schedule with possibly new parameters denoted by a
+ * different endpoint descriptor in usb_host_endpoint.
+ * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
+ * not allowed.
+ */
+int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
+ struct usb_host_endpoint *ep)
+{
+ unsigned long flags;
+ struct xhci_hcd *xhci;
+ struct xhci_device_control *in_ctx;
+ unsigned int last_ctx;
+ unsigned int ep_index;
+ struct xhci_ep_ctx *ep_ctx;
+ u32 drop_flag;
+ u32 new_add_flags, new_drop_flags, new_slot_info;
+ int ret;
+
+ ret = xhci_check_args(hcd, udev, ep, 1, __func__);
+ xhci_dbg(xhci, "%s called for udev %#x\n", __func__, (unsigned int) udev);
+ if (ret <= 0)
+ return ret;
+ xhci = hcd_to_xhci(hcd);
+
+ drop_flag = xhci_get_endpoint_flag(&ep->desc);
+ if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
+ xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
+ __func__, drop_flag);
+ return 0;
+ }
+
+ spin_lock_irqsave(&xhci->lock, flags);
+ if (!xhci->devs || !xhci->devs[udev->slot_id]) {
+ xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
+ __func__);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return -EINVAL;
+ }
+
+ in_ctx = xhci->devs[udev->slot_id]->in_ctx;
+ ep_index = xhci_get_endpoint_index(&ep->desc);
+ ep_ctx = &xhci->devs[udev->slot_id]->out_ctx->ep[ep_index];
+ /* If the HC already knows the endpoint is disabled,
+ * or the HCD has noted it is disabled, ignore this request
+ */
+ if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED ||
+ in_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) {
+ xhci_warn(xhci, "xHCI %s called with disabled ep %#x\n",
+ __func__, (unsigned int) ep);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return 0;
+ }
+
+ in_ctx->drop_flags |= drop_flag;
+ new_drop_flags = in_ctx->drop_flags;
+
+ in_ctx->add_flags = ~drop_flag;
+ new_add_flags = in_ctx->add_flags;
+
+ last_ctx = xhci_last_valid_endpoint(in_ctx->add_flags);
+ /* Update the last valid endpoint context, if we deleted the last one */
+ if ((in_ctx->slot.dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) {
+ in_ctx->slot.dev_info &= ~LAST_CTX_MASK;
+ in_ctx->slot.dev_info |= LAST_CTX(last_ctx);
+ }
+ new_slot_info = in_ctx->slot.dev_info;
+
+ xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
+
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
+ xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
+ (unsigned int) ep->desc.bEndpointAddress,
+ udev->slot_id,
+ (unsigned int) new_drop_flags,
+ (unsigned int) new_add_flags,
+ (unsigned int) new_slot_info);
+ return 0;
+}
+
+/* Add an endpoint to a new possible bandwidth configuration for this device.
+ * Only one call to this function is allowed per endpoint before
+ * check_bandwidth() or reset_bandwidth() must be called.
+ * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
+ * add the endpoint to the schedule with possibly new parameters denoted by a
+ * different endpoint descriptor in usb_host_endpoint.
+ * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
+ * not allowed.
+ */
+int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
+ struct usb_host_endpoint *ep)
+{
+ unsigned long flags;
+ struct xhci_hcd *xhci;
+ struct xhci_device_control *in_ctx;
+ unsigned int ep_index;
+ struct xhci_ep_ctx *ep_ctx;
+ u32 added_ctxs;
+ unsigned int last_ctx;
+ u32 new_add_flags, new_drop_flags, new_slot_info;
+ int ret = 0;
+
+ ret = xhci_check_args(hcd, udev, ep, 1, __func__);
+ if (ret <= 0)
+ return ret;
+ xhci = hcd_to_xhci(hcd);
+
+ added_ctxs = xhci_get_endpoint_flag(&ep->desc);
+ last_ctx = xhci_last_valid_endpoint(added_ctxs);
+ if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
+ /* FIXME when we have to issue an evaluate endpoint command to
+ * deal with ep0 max packet size changing once we get the
+ * descriptors
+ */
+ xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
+ __func__, added_ctxs);
+ return 0;
+ }
+
+ spin_lock_irqsave(&xhci->lock, flags);
+ if (!xhci->devs || !xhci->devs[udev->slot_id]) {
+ xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
+ __func__);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return -EINVAL;
+ }
+
+ in_ctx = xhci->devs[udev->slot_id]->in_ctx;
+ ep_index = xhci_get_endpoint_index(&ep->desc);
+ ep_ctx = &xhci->devs[udev->slot_id]->out_ctx->ep[ep_index];
+ /* If the HCD has already noted the endpoint is enabled,
+ * ignore this request.
+ */
+ if (in_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) {
+ xhci_warn(xhci, "xHCI %s called with enabled ep %#x\n",
+ __func__, (unsigned int) ep);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return 0;
+ }
+
+ if (xhci_endpoint_init(xhci, xhci->devs[udev->slot_id], udev, ep) < 0) {
+ dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
+ __func__, ep->desc.bEndpointAddress);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return -ENOMEM;
+ }
+
+ in_ctx->add_flags |= added_ctxs;
+ new_add_flags = in_ctx->add_flags;
+
+ /* If xhci_endpoint_disable() was called for this endpoint, but the
+ * xHC hasn't been notified yet through the check_bandwidth() call,
+ * this re-adds a new state for the endpoint from the new endpoint
+ * descriptors. We must drop and re-add this endpoint, so we leave the
+ * drop flags alone.
+ */
+ new_drop_flags = in_ctx->drop_flags;
+
+ /* Update the last valid endpoint context, if we just added one past */
+ if ((in_ctx->slot.dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) {
+ in_ctx->slot.dev_info &= ~LAST_CTX_MASK;
+ in_ctx->slot.dev_info |= LAST_CTX(last_ctx);
+ }
+ new_slot_info = in_ctx->slot.dev_info;
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
+ xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
+ (unsigned int) ep->desc.bEndpointAddress,
+ udev->slot_id,
+ (unsigned int) new_drop_flags,
+ (unsigned int) new_add_flags,
+ (unsigned int) new_slot_info);
+ return 0;
+}
+
+static void xhci_zero_in_ctx(struct xhci_virt_device *virt_dev)
+{
+ struct xhci_ep_ctx *ep_ctx;
+ int i;
+
+ /* When a device's add flag and drop flag are zero, any subsequent
+ * configure endpoint command will leave that endpoint's state
+ * untouched. Make sure we don't leave any old state in the input
+ * endpoint contexts.
+ */
+ virt_dev->in_ctx->drop_flags = 0;
+ virt_dev->in_ctx->add_flags = 0;
+ virt_dev->in_ctx->slot.dev_info &= ~LAST_CTX_MASK;
+ /* Endpoint 0 is always valid */
+ virt_dev->in_ctx->slot.dev_info |= LAST_CTX(1);
+ for (i = 1; i < 31; ++i) {
+ ep_ctx = &virt_dev->in_ctx->ep[i];
+ ep_ctx->ep_info = 0;
+ ep_ctx->ep_info2 = 0;
+ ep_ctx->deq[0] = 0;
+ ep_ctx->deq[1] = 0;
+ ep_ctx->tx_info = 0;
+ }
+}
+
+int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+{
+ int i;
+ int ret = 0;
+ int timeleft;
+ unsigned long flags;
+ struct xhci_hcd *xhci;
+ struct xhci_virt_device *virt_dev;
+
+ ret = xhci_check_args(hcd, udev, NULL, 0, __func__);
+ if (ret <= 0)
+ return ret;
+ xhci = hcd_to_xhci(hcd);
+
+ spin_lock_irqsave(&xhci->lock, flags);
+ if (!udev->slot_id || !xhci->devs || !xhci->devs[udev->slot_id]) {
+ xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
+ __func__);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return -EINVAL;
+ }
+ xhci_dbg(xhci, "%s called for udev %#x\n", __func__, (unsigned int) udev);
+ virt_dev = xhci->devs[udev->slot_id];
+
+ /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
+ virt_dev->in_ctx->add_flags |= SLOT_FLAG;
+ virt_dev->in_ctx->add_flags &= ~EP0_FLAG;
+ virt_dev->in_ctx->drop_flags &= ~SLOT_FLAG;
+ virt_dev->in_ctx->drop_flags &= ~EP0_FLAG;
+ xhci_dbg(xhci, "New Input Control Context:\n");
+ xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma,
+ LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info));
+
+ ret = queue_configure_endpoint(xhci, virt_dev->in_ctx_dma, udev->slot_id);
+ if (ret < 0) {
+ xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return -ENOMEM;
+ }
+ ring_cmd_db(xhci);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
+ /* Wait for the configure endpoint command to complete */
+ timeleft = wait_for_completion_interruptible_timeout(
+ &virt_dev->cmd_completion,
+ USB_CTRL_SET_TIMEOUT);
+ if (timeleft <= 0) {
+ xhci_warn(xhci, "%s while waiting for configure endpoint command\n",
+ timeleft == 0 ? "Timeout" : "Signal");
+ /* FIXME cancel the configure endpoint command */
+ return -ETIME;
+ }
+
+ spin_lock_irqsave(&xhci->lock, flags);
+ switch (virt_dev->cmd_status) {
+ case COMP_ENOMEM:
+ dev_warn(&udev->dev, "Not enough host controller resources "
+ "for new device state.\n");
+ ret = -ENOMEM;
+ /* FIXME: can we allocate more resources for the HC? */
+ break;
+ case COMP_BW_ERR:
+ dev_warn(&udev->dev, "Not enough bandwidth "
+ "for new device state.\n");
+ ret = -ENOSPC;
+ /* FIXME: can we go back to the old state? */
+ break;
+ case COMP_TRB_ERR:
+ /* the HCD set up something wrong */
+ dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, add flag = 1, "
+ "and endpoint is not disabled.\n");
+ ret = -EINVAL;
+ break;
+ case COMP_SUCCESS:
+ dev_dbg(&udev->dev, "Successful Endpoint Configure command\n");
+ break;
+ default:
+ xhci_err(xhci, "ERROR: unexpected command completion "
+ "code 0x%x.\n", virt_dev->cmd_status);
+ ret = -EINVAL;
+ break;
+ }
+ if (ret) {
+ /* Callee should call reset_bandwidth() */
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return ret;
+ }
+
+ xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
+ xhci_dbg_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma,
+ LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info));
+
+ xhci_zero_in_ctx(virt_dev);
+ /* Free any old rings */
+ for (i = 1; i < 31; ++i) {
+ if (virt_dev->new_ep_rings[i]) {
+ xhci_ring_free(xhci, virt_dev->ep_rings[i]);
+ virt_dev->ep_rings[i] = virt_dev->new_ep_rings[i];
+ virt_dev->new_ep_rings[i] = NULL;
+ }
+ }
+
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
+ return ret;
+}
+
+void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+{
+ unsigned long flags;
+ struct xhci_hcd *xhci;
+ struct xhci_virt_device *virt_dev;
+ int i, ret;
+
+ ret = xhci_check_args(hcd, udev, NULL, 0, __func__);
+ if (ret <= 0)
+ return;
+ xhci = hcd_to_xhci(hcd);
+
+ spin_lock_irqsave(&xhci->lock, flags);
+ if (!xhci->devs || !xhci->devs[udev->slot_id]) {
+ xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
+ __func__);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return;
+ }
+ xhci_dbg(xhci, "%s called for udev %#x\n", __func__, (unsigned int) udev);
+ virt_dev = xhci->devs[udev->slot_id];
+ /* Free any rings allocated for added endpoints */
+ for (i = 0; i < 31; ++i) {
+ if (virt_dev->new_ep_rings[i]) {
+ xhci_ring_free(xhci, virt_dev->new_ep_rings[i]);
+ virt_dev->new_ep_rings[i] = NULL;
+ }
+ }
+ xhci_zero_in_ctx(virt_dev);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+}
+
/*
* At this point, the struct usb_device is about to go away, the device has
* disconnected, and all traffic has been stopped and the endpoints have been
@@ -783,7 +1146,12 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
* address given back to us by the HC.
*/
udev->devnum = (virt_dev->out_ctx->slot.dev_state & DEV_ADDR_MASK) + 1;
- /* FIXME: Zero the input context control for later use? */
+ /* Zero the input context control for later use */
+ virt_dev->in_ctx->add_flags = 0;
+ virt_dev->in_ctx->drop_flags = 0;
+ /* Mirror flags in the output context for future ep enable/disable */
+ virt_dev->out_ctx->add_flags = SLOT_FLAG | EP0_FLAG;
+ virt_dev->out_ctx->drop_flags = 0;
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg(xhci, "Device address = %d\n", udev->devnum);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 6ff2e29..8cd55f0 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -103,7 +103,7 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
}
/* XXX: Do we need the hcd structure in all these functions? */
-static void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
+void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
{
struct xhci_segment *seg;
struct xhci_segment *first_seg;
@@ -257,6 +257,8 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
if (!dev->ep_rings[0])
goto fail;
+ init_completion(&dev->cmd_completion);
+
/*
* Point to output device context in dcbaa; skip the output control
* context, which is eight 32 bit fields (or 32 bytes long)
@@ -366,6 +368,176 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
return 0;
}
+/* Return the polling or NAK interval.
+ *
+ * The polling interval is expressed in "microframes". If xHCI's Interval field
+ * is set to N, it will service the endpoint every 2^(Interval)*125us.
+ *
+ * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
+ * is set to 0.
+ */
+static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
+ struct usb_host_endpoint *ep)
+{
+ unsigned int interval = 0;
+
+ switch (udev->speed) {
+ case USB_SPEED_HIGH:
+ /* Max NAK rate */
+ if (usb_endpoint_xfer_control(&ep->desc) ||
+ usb_endpoint_xfer_bulk(&ep->desc))
+ interval = ep->desc.bInterval;
+ /* Fall through - SS and HS isoc/int have same decoding */
+ case USB_SPEED_SUPER:
+ if (usb_endpoint_xfer_int(&ep->desc) ||
+ usb_endpoint_xfer_isoc(&ep->desc)) {
+ if (ep->desc.bInterval == 0)
+ interval = 0;
+ else
+ interval = ep->desc.bInterval - 1;
+ if (interval > 15)
+ interval = 15;
+ if (interval != ep->desc.bInterval + 1)
+ dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n",
+ ep->desc.bEndpointAddress, 1 << interval);
+ }
+ break;
+ /* Convert bInterval (in 1-255 frames) to microframes and round down to
+ * nearest power of 2.
+ */
+ case USB_SPEED_FULL:
+ case USB_SPEED_LOW:
+ if (usb_endpoint_xfer_int(&ep->desc) ||
+ usb_endpoint_xfer_isoc(&ep->desc)) {
+ interval = fls(8*ep->desc.bInterval) - 1;
+ if (interval > 10)
+ interval = 10;
+ if (interval < 3)
+ interval = 3;
+ if ((1 << interval) != 8*ep->desc.bInterval)
+ dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n",
+ ep->desc.bEndpointAddress, 1 << interval);
+ }
+ break;
+ default:
+ BUG();
+ }
+ return EP_INTERVAL(interval);
+}
+
+static inline u32 xhci_get_endpoint_type(struct usb_device *udev,
+ struct usb_host_endpoint *ep)
+{
+ int in;
+ u32 type;
+
+ in = usb_endpoint_dir_in(&ep->desc);
+ if (usb_endpoint_xfer_control(&ep->desc)) {
+ type = EP_TYPE(CTRL_EP);
+ } else if (usb_endpoint_xfer_bulk(&ep->desc)) {
+ if (in)
+ type = EP_TYPE(BULK_IN_EP);
+ else
+ type = EP_TYPE(BULK_OUT_EP);
+ } else if (usb_endpoint_xfer_isoc(&ep->desc)) {
+ if (in)
+ type = EP_TYPE(ISOC_IN_EP);
+ else
+ type = EP_TYPE(ISOC_OUT_EP);
+ } else if (usb_endpoint_xfer_int(&ep->desc)) {
+ if (in)
+ type = EP_TYPE(INT_IN_EP);
+ else
+ type = EP_TYPE(INT_OUT_EP);
+ } else {
+ BUG();
+ }
+ return type;
+}
+
+int xhci_endpoint_init(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev,
+ struct usb_device *udev,
+ struct usb_host_endpoint *ep)
+{
+ unsigned int ep_index;
+ struct xhci_ep_ctx *ep_ctx;
+ struct xhci_ring *ep_ring;
+ unsigned int max_packet;
+ unsigned int max_burst;
+
+ ep_index = xhci_get_endpoint_index(&ep->desc);
+ ep_ctx = &virt_dev->in_ctx->ep[ep_index];
+
+ /* Set up the endpoint ring */
+ virt_dev->new_ep_rings[ep_index] = xhci_ring_alloc(xhci, 1, true, GFP_KERNEL);
+ if (!virt_dev->new_ep_rings[ep_index])
+ return -ENOMEM;
+ ep_ring = virt_dev->new_ep_rings[ep_index];
+ ep_ctx->deq[1] = 0;
+ ep_ctx->deq[0] = ep_ring->first_seg->dma | ep_ring->cycle_state;
+
+ ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep);
+
+ /* FIXME dig Mult and streams info out of ep companion desc */
+
+ /* Allow 3 retries for everything but isoc */
+ if (!usb_endpoint_xfer_isoc(&ep->desc))
+ ep_ctx->ep_info2 = ERROR_COUNT(3);
+ else
+ ep_ctx->ep_info2 = ERROR_COUNT(0);
+
+ ep_ctx->ep_info2 |= xhci_get_endpoint_type(udev, ep);
+
+ /* Set the max packet size and max burst */
+ switch (udev->speed) {
+ case USB_SPEED_SUPER:
+ max_packet = ep->desc.wMaxPacketSize;
+ ep_ctx->ep_info2 |= MAX_PACKET(max_packet);
+ /* FIXME dig out burst from ep companion desc */
+ break;
+ case USB_SPEED_HIGH:
+ /* bits 11:12 specify the number of additional transaction
+ * opportunities per microframe (USB 2.0, section 9.6.6)
+ */
+ if (usb_endpoint_xfer_isoc(&ep->desc) ||
+ usb_endpoint_xfer_int(&ep->desc)) {
+ max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11;
+ ep_ctx->ep_info2 |= MAX_BURST(max_burst);
+ }
+ /* Fall through */
+ case USB_SPEED_FULL:
+ case USB_SPEED_LOW:
+ max_packet = ep->desc.wMaxPacketSize & 0x3ff;
+ ep_ctx->ep_info2 |= MAX_PACKET(max_packet);
+ break;
+ default:
+ BUG();
+ }
+ /* FIXME Debug endpoint context */
+ return 0;
+}
+
+void xhci_endpoint_zero(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev,
+ struct usb_host_endpoint *ep)
+{
+ unsigned int ep_index;
+ struct xhci_ep_ctx *ep_ctx;
+
+ ep_index = xhci_get_endpoint_index(&ep->desc);
+ ep_ctx = &virt_dev->in_ctx->ep[ep_index];
+
+ ep_ctx->ep_info = 0;
+ ep_ctx->ep_info2 = 0;
+ ep_ctx->deq[1] = 0;
+ ep_ctx->deq[0] = 0;
+ ep_ctx->tx_info = 0;
+ /* Don't free the endpoint ring until the set interface or configuration
+ * request succeeds.
+ */
+}
+
void xhci_mem_cleanup(struct xhci_hcd *xhci)
{
struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index ff9a4ef..1462709 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -115,6 +115,10 @@ static const struct hc_driver xhci_pci_hc_driver = {
.urb_dequeue = xhci_urb_dequeue,
.alloc_dev = xhci_alloc_dev,
.free_dev = xhci_free_dev,
+ .add_endpoint = xhci_add_endpoint,
+ .drop_endpoint = xhci_drop_endpoint,
+ .check_bandwidth = xhci_check_bandwidth,
+ .reset_bandwidth = xhci_reset_bandwidth,
.address_device = xhci_address_device,
/*
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index f04162a..b4ccf0d 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -281,6 +281,10 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
if (xhci->devs[slot_id])
xhci_free_virt_device(xhci, slot_id);
break;
+ case TRB_TYPE(TRB_CONFIG_EP):
+ xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status);
+ complete(&xhci->devs[slot_id]->cmd_completion);
+ break;
case TRB_TYPE(TRB_ADDR_DEV):
xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status);
complete(&xhci->addr_dev);
@@ -809,3 +813,10 @@ int queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_
return queue_command(xhci, in_ctx_ptr, 0, 0,
TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id));
}
+
+/* Queue a configure endpoint command TRB */
+int queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id)
+{
+ return queue_command(xhci, in_ctx_ptr, 0, 0,
+ TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id));
+}
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index fc8dcd2..1a6fd99 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -486,8 +486,6 @@ struct xhci_slot_ctx {
#define LAST_CTX_MASK (0x1f << 27)
#define LAST_CTX(p) ((p) << 27)
#define LAST_CTX_TO_EP_NUM(p) (((p) >> 27) - 1)
-/* Plus one for the slot context flag */
-#define EPI_TO_FLAG(p) (1 << ((p) + 1))
#define SLOT_FLAG (1 << 0)
#define EP0_FLAG (1 << 1)
@@ -566,7 +564,7 @@ struct xhci_ep_ctx {
/* bits 10:14 are Max Primary Streams */
/* bit 15 is Linear Stream Array */
/* Interval - period between requests to an endpoint - 125u increments. */
-#define EP_INTERVAL (0xff << 16)
+#define EP_INTERVAL(p) ((p & 0xff) << 16)
/* ep_info2 bitmasks */
/*
@@ -626,6 +624,11 @@ struct xhci_virt_device {
dma_addr_t in_ctx_dma;
/* FIXME when stream support is added */
struct xhci_ring *ep_rings[31];
+ /* Temporary storage in case the configure endpoint command fails and we
+ * have to restore the device state to the previous state
+ */
+ struct xhci_ring *new_ep_rings[31];
+ struct completion cmd_completion;
/* Status of the last command issued for this device */
u32 cmd_status;
};
@@ -1075,6 +1078,10 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id);
int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, struct usb_device *udev, gfp_t flags);
int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev);
unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc);
+unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc);
+void xhci_endpoint_zero(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_host_endpoint *ep);
+int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_device *udev, struct usb_host_endpoint *ep);
+void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring);
#ifdef CONFIG_PCI
/* xHCI PCI glue */
@@ -1096,6 +1103,10 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev);
int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev);
int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags);
int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status);
+int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
+int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
+int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
+void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
/* xHCI ring, segment, TRB, and TD functions */
dma_addr_t trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb);
@@ -1106,6 +1117,7 @@ void set_hc_event_deq(struct xhci_hcd *xhci);
int queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id);
int queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id);
int queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, int slot_id, unsigned int ep_index);
+int queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id);
/* xHCI roothub code */
int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
OpenPOWER on IntegriCloud