summaryrefslogtreecommitdiffstats
path: root/drivers/usb/host
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-08 11:31:16 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-08 11:31:16 -0700
commit3f17ea6dea8ba5668873afa54628a91aaa3fb1c0 (patch)
treeafbeb2accd4c2199ddd705ae943995b143a0af02 /drivers/usb/host
parent1860e379875dfe7271c649058aeddffe5afd9d0d (diff)
parent1a5700bc2d10cd379a795fd2bb377a190af5acd4 (diff)
downloadop-kernel-dev-3f17ea6dea8ba5668873afa54628a91aaa3fb1c0.zip
op-kernel-dev-3f17ea6dea8ba5668873afa54628a91aaa3fb1c0.tar.gz
Merge branch 'next' (accumulated 3.16 merge window patches) into master
Now that 3.15 is released, this merges the 'next' branch into 'master', bringing us to the normal situation where my 'master' branch is the merge window. * accumulated work in next: (6809 commits) ufs: sb mutex merge + mutex_destroy powerpc: update comments for generic idle conversion cris: update comments for generic idle conversion idle: remove cpu_idle() forward declarations nbd: zero from and len fields in NBD_CMD_DISCONNECT. mm: convert some level-less printks to pr_* MAINTAINERS: adi-buildroot-devel is moderated MAINTAINERS: add linux-api for review of API/ABI changes mm/kmemleak-test.c: use pr_fmt for logging fs/dlm/debug_fs.c: replace seq_printf by seq_puts fs/dlm/lockspace.c: convert simple_str to kstr fs/dlm/config.c: convert simple_str to kstr mm: mark remap_file_pages() syscall as deprecated mm: memcontrol: remove unnecessary memcg argument from soft limit functions mm: memcontrol: clean up memcg zoneinfo lookup mm/memblock.c: call kmemleak directly from memblock_(alloc|free) mm/mempool.c: update the kmemleak stack trace for mempool allocations lib/radix-tree.c: update the kmemleak stack trace for radix tree allocations mm: introduce kmemleak_update_trace() mm/kmemleak.c: use %u to print ->checksum ...
Diffstat (limited to 'drivers/usb/host')
-rw-r--r--drivers/usb/host/Kconfig34
-rw-r--r--drivers/usb/host/Makefile4
-rw-r--r--drivers/usb/host/ehci-exynos.c141
-rw-r--r--drivers/usb/host/ehci-hub.c12
-rw-r--r--drivers/usb/host/ehci-msm.c7
-rw-r--r--drivers/usb/host/ehci-mv.c16
-rw-r--r--drivers/usb/host/ehci-orion.c92
-rw-r--r--drivers/usb/host/ehci-platform.c26
-rw-r--r--drivers/usb/host/ehci-spear.c13
-rw-r--r--drivers/usb/host/ehci-tegra.c15
-rw-r--r--drivers/usb/host/ehci-tilegx.c8
-rw-r--r--drivers/usb/host/ehci.h3
-rw-r--r--drivers/usb/host/max3421-hcd.c1957
-rw-r--r--drivers/usb/host/ohci-at91.c11
-rw-r--r--drivers/usb/host/ohci-exynos.c145
-rw-r--r--drivers/usb/host/ohci-hcd.c2
-rw-r--r--drivers/usb/host/ohci-hub.c8
-rw-r--r--drivers/usb/host/ohci-platform.c27
-rw-r--r--drivers/usb/host/ohci-pxa27x.c68
-rw-r--r--drivers/usb/host/ohci-s3c2410.c13
-rw-r--r--drivers/usb/host/ohci-tilegx.c8
-rw-r--r--drivers/usb/host/ohci.h3
-rw-r--r--drivers/usb/host/pci-quirks.h1
-rw-r--r--drivers/usb/host/xhci-hub.c43
-rw-r--r--drivers/usb/host/xhci-mem.c17
-rw-r--r--drivers/usb/host/xhci-mvebu.c72
-rw-r--r--drivers/usb/host/xhci-mvebu.h21
-rw-r--r--drivers/usb/host/xhci-pci.c10
-rw-r--r--drivers/usb/host/xhci-plat.c51
-rw-r--r--drivers/usb/host/xhci-ring.c587
-rw-r--r--drivers/usb/host/xhci.c271
-rw-r--r--drivers/usb/host/xhci.h49
32 files changed, 3008 insertions, 727 deletions
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 3d9e540..61b7817 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -29,6 +29,14 @@ if USB_XHCI_HCD
config USB_XHCI_PLATFORM
tristate
+config USB_XHCI_MVEBU
+ tristate "xHCI support for Marvell Armada 375/38x"
+ select USB_XHCI_PLATFORM
+ depends on ARCH_MVEBU || COMPILE_TEST
+ ---help---
+ Say 'Y' to enable the support for the xHCI host controller
+ found in Marvell Armada 375/38x ARM SOCs.
+
endif # USB_XHCI_HCD
config USB_EHCI_HCD
@@ -170,7 +178,6 @@ config USB_EHCI_MSM
tristate "Support for Qualcomm QSD/MSM on-chip EHCI USB controller"
depends on ARCH_MSM
select USB_EHCI_ROOT_HUB_TT
- select USB_MSM_OTG
---help---
Enables support for the USB Host controller present on the
Qualcomm chipsets. Root Hub has inbuilt TT.
@@ -343,10 +350,19 @@ config USB_FOTG210_HCD
To compile this driver as a module, choose M here: the
module will be called fotg210-hcd.
+config USB_MAX3421_HCD
+ tristate "MAX3421 HCD (USB-over-SPI) support"
+ depends on USB && SPI
+ ---help---
+ The Maxim MAX3421E chip supports standard USB 2.0-compliant
+ full-speed devices either in host or peripheral mode. This
+ driver supports the host-mode of the MAX3421E only.
+
+ To compile this driver as a module, choose M here: the module will
+ be called max3421-hcd.
+
config USB_OHCI_HCD
tristate "OHCI HCD (USB 1.1) support"
- select ISP1301_OMAP if MACH_OMAP_H2 || MACH_OMAP_H3
- depends on USB_ISP1301 || !ARCH_LPC32XX
---help---
The Open Host Controller Interface (OHCI) is a standard for accessing
USB 1.1 host controller hardware. It does more in hardware than Intel's
@@ -365,6 +381,7 @@ if USB_OHCI_HCD
config USB_OHCI_HCD_OMAP1
tristate "OHCI support for OMAP1/2 chips"
depends on ARCH_OMAP1
+ depends on ISP1301_OMAP || !(MACH_OMAP_H2 || MACH_OMAP_H3)
default y
---help---
Enables support for the OHCI controller on OMAP1/2 chips.
@@ -388,6 +405,7 @@ config USB_OHCI_HCD_S3C2410
config USB_OHCI_HCD_LPC32XX
tristate "Support for LPC on-chip OHCI USB controller"
depends on USB_OHCI_HCD && ARCH_LPC32XX
+ depends on USB_ISP1301
default y
---help---
Enables support for the on-chip OHCI controller on
@@ -417,6 +435,16 @@ config USB_OHCI_HCD_OMAP3
Enables support for the on-chip OHCI controller on
OMAP3 and later chips.
+config USB_OHCI_HCD_DAVINCI
+ bool "OHCI support for TI DaVinci DA8xx"
+ depends on ARCH_DAVINCI_DA8XX
+ depends on USB_OHCI_HCD=y
+ default y
+ help
+ Enables support for the DaVinci DA8xx integrated OHCI
+ controller. This driver cannot currently be a loadable
+ module because it lacks a proper PHY abstraction.
+
config USB_OHCI_ATH79
bool "USB OHCI support for the Atheros AR71XX/AR7240 SoCs (DEPRECATED)"
depends on (SOC_AR71XX || SOC_AR724X)
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index 7530468..af89a90 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -19,6 +19,9 @@ xhci-hcd-$(CONFIG_PCI) += xhci-pci.o
ifneq ($(CONFIG_USB_XHCI_PLATFORM), )
xhci-hcd-y += xhci-plat.o
+ifneq ($(CONFIG_USB_XHCI_MVEBU), )
+ xhci-hcd-y += xhci-mvebu.o
+endif
endif
obj-$(CONFIG_USB_WHCI_HCD) += whci/
@@ -70,3 +73,4 @@ obj-$(CONFIG_USB_HCD_BCMA) += bcma-hcd.o
obj-$(CONFIG_USB_HCD_SSB) += ssb-hcd.o
obj-$(CONFIG_USB_FUSBH200_HCD) += fusbh200-hcd.o
obj-$(CONFIG_USB_FOTG210_HCD) += fotg210-hcd.o
+obj-$(CONFIG_USB_MAX3421_HCD) += max3421-hcd.o
diff --git a/drivers/usb/host/ehci-exynos.c b/drivers/usb/host/ehci-exynos.c
index 7f425ac..d1c7621 100644
--- a/drivers/usb/host/ehci-exynos.c
+++ b/drivers/usb/host/ehci-exynos.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
+#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/usb/phy.h>
#include <linux/usb/samsung_usb_phy.h>
@@ -42,17 +43,106 @@
static const char hcd_name[] = "ehci-exynos";
static struct hc_driver __read_mostly exynos_ehci_hc_driver;
+#define PHY_NUMBER 3
+
struct exynos_ehci_hcd {
struct clk *clk;
struct usb_phy *phy;
struct usb_otg *otg;
+ struct phy *phy_g[PHY_NUMBER];
};
#define to_exynos_ehci(hcd) (struct exynos_ehci_hcd *)(hcd_to_ehci(hcd)->priv)
-static void exynos_setup_vbus_gpio(struct platform_device *pdev)
+static int exynos_ehci_get_phy(struct device *dev,
+ struct exynos_ehci_hcd *exynos_ehci)
+{
+ struct device_node *child;
+ struct phy *phy;
+ int phy_number;
+ int ret = 0;
+
+ exynos_ehci->phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
+ if (IS_ERR(exynos_ehci->phy)) {
+ ret = PTR_ERR(exynos_ehci->phy);
+ if (ret != -ENXIO && ret != -ENODEV) {
+ dev_err(dev, "no usb2 phy configured\n");
+ return ret;
+ }
+ dev_dbg(dev, "Failed to get usb2 phy\n");
+ } else {
+ exynos_ehci->otg = exynos_ehci->phy->otg;
+ }
+
+ for_each_available_child_of_node(dev->of_node, child) {
+ ret = of_property_read_u32(child, "reg", &phy_number);
+ if (ret) {
+ dev_err(dev, "Failed to parse device tree\n");
+ of_node_put(child);
+ return ret;
+ }
+
+ if (phy_number >= PHY_NUMBER) {
+ dev_err(dev, "Invalid number of PHYs\n");
+ of_node_put(child);
+ return -EINVAL;
+ }
+
+ phy = devm_of_phy_get(dev, child, 0);
+ of_node_put(child);
+ if (IS_ERR(phy)) {
+ ret = PTR_ERR(phy);
+ if (ret != -ENOSYS && ret != -ENODEV) {
+ dev_err(dev, "no usb2 phy configured\n");
+ return ret;
+ }
+ dev_dbg(dev, "Failed to get usb2 phy\n");
+ }
+ exynos_ehci->phy_g[phy_number] = phy;
+ }
+
+ return ret;
+}
+
+static int exynos_ehci_phy_enable(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct exynos_ehci_hcd *exynos_ehci = to_exynos_ehci(hcd);
+ int i;
+ int ret = 0;
+
+ if (!IS_ERR(exynos_ehci->phy))
+ return usb_phy_init(exynos_ehci->phy);
+
+ for (i = 0; ret == 0 && i < PHY_NUMBER; i++)
+ if (!IS_ERR(exynos_ehci->phy_g[i]))
+ ret = phy_power_on(exynos_ehci->phy_g[i]);
+ if (ret)
+ for (i--; i >= 0; i--)
+ if (!IS_ERR(exynos_ehci->phy_g[i]))
+ phy_power_off(exynos_ehci->phy_g[i]);
+
+ return ret;
+}
+
+static void exynos_ehci_phy_disable(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct exynos_ehci_hcd *exynos_ehci = to_exynos_ehci(hcd);
+ int i;
+
+ if (!IS_ERR(exynos_ehci->phy)) {
+ usb_phy_shutdown(exynos_ehci->phy);
+ return;
+ }
+
+ for (i = 0; i < PHY_NUMBER; i++)
+ if (!IS_ERR(exynos_ehci->phy_g[i]))
+ phy_power_off(exynos_ehci->phy_g[i]);
+}
+
+static void exynos_setup_vbus_gpio(struct device *dev)
{
- struct device *dev = &pdev->dev;
int err;
int gpio;
@@ -75,7 +165,6 @@ static int exynos_ehci_probe(struct platform_device *pdev)
struct usb_hcd *hcd;
struct ehci_hcd *ehci;
struct resource *res;
- struct usb_phy *phy;
int irq;
int err;
@@ -88,7 +177,7 @@ static int exynos_ehci_probe(struct platform_device *pdev)
if (err)
return err;
- exynos_setup_vbus_gpio(pdev);
+ exynos_setup_vbus_gpio(&pdev->dev);
hcd = usb_create_hcd(&exynos_ehci_hc_driver,
&pdev->dev, dev_name(&pdev->dev));
@@ -102,15 +191,9 @@ static int exynos_ehci_probe(struct platform_device *pdev)
"samsung,exynos5440-ehci"))
goto skip_phy;
- phy = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2);
- if (IS_ERR(phy)) {
- usb_put_hcd(hcd);
- dev_warn(&pdev->dev, "no platform data or transceiver defined\n");
- return -EPROBE_DEFER;
- } else {
- exynos_ehci->phy = phy;
- exynos_ehci->otg = phy->otg;
- }
+ err = exynos_ehci_get_phy(&pdev->dev, exynos_ehci);
+ if (err)
+ goto fail_clk;
skip_phy:
@@ -135,10 +218,9 @@ skip_phy:
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
- hcd->regs = devm_ioremap(&pdev->dev, res->start, hcd->rsrc_len);
- if (!hcd->regs) {
- dev_err(&pdev->dev, "Failed to remap I/O memory\n");
- err = -ENOMEM;
+ hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hcd->regs)) {
+ err = PTR_ERR(hcd->regs);
goto fail_io;
}
@@ -152,8 +234,11 @@ skip_phy:
if (exynos_ehci->otg)
exynos_ehci->otg->set_host(exynos_ehci->otg, &hcd->self);
- if (exynos_ehci->phy)
- usb_phy_init(exynos_ehci->phy);
+ err = exynos_ehci_phy_enable(&pdev->dev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable USB phy\n");
+ goto fail_io;
+ }
ehci = hcd_to_ehci(hcd);
ehci->caps = hcd->regs;
@@ -173,8 +258,7 @@ skip_phy:
return 0;
fail_add_hcd:
- if (exynos_ehci->phy)
- usb_phy_shutdown(exynos_ehci->phy);
+ exynos_ehci_phy_disable(&pdev->dev);
fail_io:
clk_disable_unprepare(exynos_ehci->clk);
fail_clk:
@@ -192,8 +276,7 @@ static int exynos_ehci_remove(struct platform_device *pdev)
if (exynos_ehci->otg)
exynos_ehci->otg->set_host(exynos_ehci->otg, &hcd->self);
- if (exynos_ehci->phy)
- usb_phy_shutdown(exynos_ehci->phy);
+ exynos_ehci_phy_disable(&pdev->dev);
clk_disable_unprepare(exynos_ehci->clk);
@@ -218,8 +301,7 @@ static int exynos_ehci_suspend(struct device *dev)
if (exynos_ehci->otg)
exynos_ehci->otg->set_host(exynos_ehci->otg, &hcd->self);
- if (exynos_ehci->phy)
- usb_phy_shutdown(exynos_ehci->phy);
+ exynos_ehci_phy_disable(dev);
clk_disable_unprepare(exynos_ehci->clk);
@@ -230,14 +312,19 @@ static int exynos_ehci_resume(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct exynos_ehci_hcd *exynos_ehci = to_exynos_ehci(hcd);
+ int ret;
clk_prepare_enable(exynos_ehci->clk);
if (exynos_ehci->otg)
exynos_ehci->otg->set_host(exynos_ehci->otg, &hcd->self);
- if (exynos_ehci->phy)
- usb_phy_init(exynos_ehci->phy);
+ ret = exynos_ehci_phy_enable(dev);
+ if (ret) {
+ dev_err(dev, "Failed to enable USB phy\n");
+ clk_disable_unprepare(exynos_ehci->clk);
+ return ret;
+ }
/* DMA burst Enable */
writel(EHCI_INSNREG00_ENABLE_DMA_BURST, EHCI_INSNREG00(hcd->regs));
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 7ae0c4d..cc305c7 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -33,15 +33,6 @@
#ifdef CONFIG_PM
-static int ehci_hub_control(
- struct usb_hcd *hcd,
- u16 typeReq,
- u16 wValue,
- u16 wIndex,
- char *buf,
- u16 wLength
-);
-
static int persist_enabled_on_companion(struct usb_device *udev, void *unused)
{
return !udev->maxchild && udev->persist_enabled &&
@@ -865,7 +856,7 @@ cleanup:
#endif /* CONFIG_USB_HCD_TEST_MODE */
/*-------------------------------------------------------------------------*/
-static int ehci_hub_control (
+int ehci_hub_control(
struct usb_hcd *hcd,
u16 typeReq,
u16 wValue,
@@ -1285,6 +1276,7 @@ error_exit:
spin_unlock_irqrestore (&ehci->lock, flags);
return retval;
}
+EXPORT_SYMBOL_GPL(ehci_hub_control);
static void ehci_relinquish_port(struct usb_hcd *hcd, int portnum)
{
diff --git a/drivers/usb/host/ehci-msm.c b/drivers/usb/host/ehci-msm.c
index f341651..982c09b 100644
--- a/drivers/usb/host/ehci-msm.c
+++ b/drivers/usb/host/ehci-msm.c
@@ -96,10 +96,9 @@ static int ehci_msm_probe(struct platform_device *pdev)
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
- hcd->regs = devm_ioremap(&pdev->dev, hcd->rsrc_start, hcd->rsrc_len);
- if (!hcd->regs) {
- dev_err(&pdev->dev, "ioremap failed\n");
- ret = -ENOMEM;
+ hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hcd->regs)) {
+ ret = PTR_ERR(hcd->regs);
goto put_hcd;
}
diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c
index bd61612..08147c3 100644
--- a/drivers/usb/host/ehci-mv.c
+++ b/drivers/usb/host/ehci-mv.c
@@ -176,11 +176,9 @@ static int mv_ehci_probe(struct platform_device *pdev)
goto err_put_hcd;
}
- ehci_mv->phy_regs = devm_ioremap(&pdev->dev, r->start,
- resource_size(r));
- if (!ehci_mv->phy_regs) {
- dev_err(&pdev->dev, "failed to map phy I/O memory\n");
- retval = -EFAULT;
+ ehci_mv->phy_regs = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(ehci_mv->phy_regs)) {
+ retval = PTR_ERR(ehci_mv->phy_regs);
goto err_put_hcd;
}
@@ -191,11 +189,9 @@ static int mv_ehci_probe(struct platform_device *pdev)
goto err_put_hcd;
}
- ehci_mv->cap_regs = devm_ioremap(&pdev->dev, r->start,
- resource_size(r));
- if (ehci_mv->cap_regs == NULL) {
- dev_err(&pdev->dev, "failed to map I/O memory\n");
- retval = -EFAULT;
+ ehci_mv->cap_regs = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(ehci_mv->cap_regs)) {
+ retval = PTR_ERR(ehci_mv->cap_regs);
goto err_put_hcd;
}
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index 30d35e5..22e15ca 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -15,6 +15,7 @@
#include <linux/clk.h>
#include <linux/platform_data/usb-ehci-orion.h>
#include <linux/of.h>
+#include <linux/phy/phy.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/usb.h>
@@ -42,6 +43,13 @@
#define DRIVER_DESC "EHCI orion driver"
+#define hcd_to_orion_priv(h) ((struct orion_ehci_hcd *)hcd_to_ehci(h)->priv)
+
+struct orion_ehci_hcd {
+ struct clk *clk;
+ struct phy *phy;
+};
+
static const char hcd_name[] = "ehci-orion";
static struct hc_driver __read_mostly ehci_orion_hc_driver;
@@ -137,6 +145,10 @@ ehci_orion_conf_mbus_windows(struct usb_hcd *hcd,
}
}
+static const struct ehci_driver_overrides orion_overrides __initconst = {
+ .extra_priv_size = sizeof(struct orion_ehci_hcd),
+};
+
static int ehci_orion_drv_probe(struct platform_device *pdev)
{
struct orion_ehci_data *pd = dev_get_platdata(&pdev->dev);
@@ -144,26 +156,23 @@ static int ehci_orion_drv_probe(struct platform_device *pdev)
struct resource *res;
struct usb_hcd *hcd;
struct ehci_hcd *ehci;
- struct clk *clk;
void __iomem *regs;
int irq, err;
enum orion_ehci_phy_ver phy_version;
+ struct orion_ehci_hcd *priv;
if (usb_disabled())
return -ENODEV;
pr_debug("Initializing Orion-SoC USB Host Controller\n");
- if (pdev->dev.of_node)
- irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
- else
- irq = platform_get_irq(pdev, 0);
+ irq = platform_get_irq(pdev, 0);
if (irq <= 0) {
dev_err(&pdev->dev,
"Found HC with no IRQ. Check %s setup!\n",
dev_name(&pdev->dev));
err = -ENODEV;
- goto err1;
+ goto err;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -172,7 +181,7 @@ static int ehci_orion_drv_probe(struct platform_device *pdev)
"Found HC with no register addr. Check %s setup!\n",
dev_name(&pdev->dev));
err = -ENODEV;
- goto err1;
+ goto err;
}
/*
@@ -182,25 +191,19 @@ static int ehci_orion_drv_probe(struct platform_device *pdev)
*/
err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err)
- goto err1;
+ goto err;
regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(regs)) {
err = PTR_ERR(regs);
- goto err1;
+ goto err;
}
- /* Not all platforms can gate the clock, so it is not
- an error if the clock does not exists. */
- clk = devm_clk_get(&pdev->dev, NULL);
- if (!IS_ERR(clk))
- clk_prepare_enable(clk);
-
hcd = usb_create_hcd(&ehci_orion_hc_driver,
&pdev->dev, dev_name(&pdev->dev));
if (!hcd) {
err = -ENOMEM;
- goto err2;
+ goto err;
}
hcd->rsrc_start = res->start;
@@ -211,6 +214,29 @@ static int ehci_orion_drv_probe(struct platform_device *pdev)
ehci->caps = hcd->regs + 0x100;
hcd->has_tt = 1;
+ priv = hcd_to_orion_priv(hcd);
+ /*
+ * Not all platforms can gate the clock, so it is not an error if
+ * the clock does not exists.
+ */
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(priv->clk))
+ clk_prepare_enable(priv->clk);
+
+ priv->phy = devm_phy_optional_get(&pdev->dev, "usb");
+ if (IS_ERR(priv->phy)) {
+ err = PTR_ERR(priv->phy);
+ goto err_phy_get;
+ } else {
+ err = phy_init(priv->phy);
+ if (err)
+ goto err_phy_init;
+
+ err = phy_power_on(priv->phy);
+ if (err)
+ goto err_phy_power_on;
+ }
+
/*
* (Re-)program MBUS remapping windows if we are asked to.
*/
@@ -240,17 +266,23 @@ static int ehci_orion_drv_probe(struct platform_device *pdev)
err = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (err)
- goto err3;
+ goto err_add_hcd;
device_wakeup_enable(hcd->self.controller);
return 0;
-err3:
+err_add_hcd:
+ if (!IS_ERR(priv->phy))
+ phy_power_off(priv->phy);
+err_phy_power_on:
+ if (!IS_ERR(priv->phy))
+ phy_exit(priv->phy);
+err_phy_init:
+err_phy_get:
+ if (!IS_ERR(priv->clk))
+ clk_disable_unprepare(priv->clk);
usb_put_hcd(hcd);
-err2:
- if (!IS_ERR(clk))
- clk_disable_unprepare(clk);
-err1:
+err:
dev_err(&pdev->dev, "init %s fail, %d\n",
dev_name(&pdev->dev), err);
@@ -260,14 +292,20 @@ err1:
static int ehci_orion_drv_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
- struct clk *clk;
+ struct orion_ehci_hcd *priv = hcd_to_orion_priv(hcd);
usb_remove_hcd(hcd);
+
+ if (!IS_ERR(priv->phy)) {
+ phy_power_off(priv->phy);
+ phy_exit(priv->phy);
+ }
+
+ if (!IS_ERR(priv->clk))
+ clk_disable_unprepare(priv->clk);
+
usb_put_hcd(hcd);
- clk = devm_clk_get(&pdev->dev, NULL);
- if (!IS_ERR(clk))
- clk_disable_unprepare(clk);
return 0;
}
@@ -295,7 +333,7 @@ static int __init ehci_orion_init(void)
pr_info("%s: " DRIVER_DESC "\n", hcd_name);
- ehci_init_driver(&ehci_orion_hc_driver, NULL);
+ ehci_init_driver(&ehci_orion_hc_driver, &orion_overrides);
return platform_driver_register(&ehci_orion_driver);
}
module_init(ehci_orion_init);
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
index c7dd93a..2f5b9ce 100644
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -29,6 +29,7 @@
#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/reset.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/usb/ehci_pdriver.h>
@@ -41,6 +42,7 @@
struct ehci_platform_priv {
struct clk *clks[EHCI_MAX_CLKS];
+ struct reset_control *rst;
struct phy *phy;
};
@@ -208,6 +210,18 @@ static int ehci_platform_probe(struct platform_device *dev)
}
}
+ priv->rst = devm_reset_control_get_optional(&dev->dev, NULL);
+ if (IS_ERR(priv->rst)) {
+ err = PTR_ERR(priv->rst);
+ if (err == -EPROBE_DEFER)
+ goto err_put_clks;
+ priv->rst = NULL;
+ } else {
+ err = reset_control_deassert(priv->rst);
+ if (err)
+ goto err_put_clks;
+ }
+
if (pdata->big_endian_desc)
ehci->big_endian_desc = 1;
if (pdata->big_endian_mmio)
@@ -218,7 +232,7 @@ static int ehci_platform_probe(struct platform_device *dev)
dev_err(&dev->dev,
"Error: CONFIG_USB_EHCI_BIG_ENDIAN_MMIO not set\n");
err = -EINVAL;
- goto err_put_clks;
+ goto err_reset;
}
#endif
#ifndef CONFIG_USB_EHCI_BIG_ENDIAN_DESC
@@ -226,14 +240,14 @@ static int ehci_platform_probe(struct platform_device *dev)
dev_err(&dev->dev,
"Error: CONFIG_USB_EHCI_BIG_ENDIAN_DESC not set\n");
err = -EINVAL;
- goto err_put_clks;
+ goto err_reset;
}
#endif
if (pdata->power_on) {
err = pdata->power_on(dev);
if (err < 0)
- goto err_put_clks;
+ goto err_reset;
}
hcd->rsrc_start = res_mem->start;
@@ -256,6 +270,9 @@ static int ehci_platform_probe(struct platform_device *dev)
err_power:
if (pdata->power_off)
pdata->power_off(dev);
+err_reset:
+ if (priv->rst)
+ reset_control_assert(priv->rst);
err_put_clks:
while (--clk >= 0)
clk_put(priv->clks[clk]);
@@ -280,6 +297,9 @@ static int ehci_platform_remove(struct platform_device *dev)
if (pdata->power_off)
pdata->power_off(dev);
+ if (priv->rst)
+ reset_control_assert(priv->rst);
+
for (clk = 0; clk < EHCI_MAX_CLKS && priv->clks[clk]; clk++)
clk_put(priv->clks[clk]);
diff --git a/drivers/usb/host/ehci-spear.c b/drivers/usb/host/ehci-spear.c
index 8bd915b..1d59958 100644
--- a/drivers/usb/host/ehci-spear.c
+++ b/drivers/usb/host/ehci-spear.c
@@ -106,16 +106,9 @@ static int spear_ehci_hcd_drv_probe(struct platform_device *pdev)
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
- if (!devm_request_mem_region(&pdev->dev, hcd->rsrc_start, hcd->rsrc_len,
- driver->description)) {
- retval = -EBUSY;
- goto err_put_hcd;
- }
-
- hcd->regs = devm_ioremap(&pdev->dev, hcd->rsrc_start, hcd->rsrc_len);
- if (hcd->regs == NULL) {
- dev_dbg(&pdev->dev, "error mapping memory\n");
- retval = -ENOMEM;
+ hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hcd->regs)) {
+ retval = PTR_ERR(hcd->regs);
goto err_put_hcd;
}
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index 7ef00ec..6fdcb8a 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -51,10 +51,6 @@ struct tegra_ehci_soc_config {
bool has_hostpc;
};
-static int (*orig_hub_control)(struct usb_hcd *hcd,
- u16 typeReq, u16 wValue, u16 wIndex,
- char *buf, u16 wLength);
-
struct tegra_ehci_hcd {
struct tegra_usb_phy *phy;
struct clk *clk;
@@ -236,7 +232,7 @@ static int tegra_ehci_hub_control(
spin_unlock_irqrestore(&ehci->lock, flags);
/* Handle the hub control events here */
- return orig_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
+ return ehci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
done:
spin_unlock_irqrestore(&ehci->lock, flags);
@@ -415,10 +411,9 @@ static int tegra_ehci_probe(struct platform_device *pdev)
}
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
- hcd->regs = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (!hcd->regs) {
- dev_err(&pdev->dev, "Failed to remap I/O memory\n");
- err = -ENOMEM;
+ hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hcd->regs)) {
+ err = PTR_ERR(hcd->regs);
goto cleanup_clk_en;
}
ehci->caps = hcd->regs + 0x100;
@@ -554,8 +549,6 @@ static int __init ehci_tegra_init(void)
* too easy.
*/
- orig_hub_control = tegra_ehci_hc_driver.hub_control;
-
tegra_ehci_hc_driver.map_urb_for_dma = tegra_ehci_map_urb_for_dma;
tegra_ehci_hc_driver.unmap_urb_for_dma = tegra_ehci_unmap_urb_for_dma;
tegra_ehci_hc_driver.hub_control = tegra_ehci_hub_control;
diff --git a/drivers/usb/host/ehci-tilegx.c b/drivers/usb/host/ehci-tilegx.c
index f3713d3..0d24767 100644
--- a/drivers/usb/host/ehci-tilegx.c
+++ b/drivers/usb/host/ehci-tilegx.c
@@ -142,8 +142,8 @@ static int ehci_hcd_tilegx_drv_probe(struct platform_device *pdev)
ehci->hcs_params = readl(&ehci->caps->hcs_params);
/* Create our IRQs and register them. */
- pdata->irq = create_irq();
- if (pdata->irq < 0) {
+ pdata->irq = irq_alloc_hwirq(-1);
+ if (!pdata->irq) {
ret = -ENXIO;
goto err_no_irq;
}
@@ -175,7 +175,7 @@ static int ehci_hcd_tilegx_drv_probe(struct platform_device *pdev)
}
err_have_irq:
- destroy_irq(pdata->irq);
+ irq_free_hwirq(pdata->irq);
err_no_irq:
tilegx_stop_ehc();
usb_put_hcd(hcd);
@@ -193,7 +193,7 @@ static int ehci_hcd_tilegx_drv_remove(struct platform_device *pdev)
usb_put_hcd(hcd);
tilegx_stop_ehc();
gxio_usb_host_destroy(&pdata->usb_ctx);
- destroy_irq(pdata->irq);
+ irq_free_hwirq(pdata->irq);
return 0;
}
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index 9dfc6c1..eee228a 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -872,4 +872,7 @@ extern int ehci_suspend(struct usb_hcd *hcd, bool do_wakeup);
extern int ehci_resume(struct usb_hcd *hcd, bool hibernated);
#endif /* CONFIG_PM */
+extern int ehci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ u16 wIndex, char *buf, u16 wLength);
+
#endif /* __LINUX_EHCI_HCD_H */
diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c
new file mode 100644
index 0000000..858efcf
--- /dev/null
+++ b/drivers/usb/host/max3421-hcd.c
@@ -0,0 +1,1957 @@
+/*
+ * MAX3421 Host Controller driver for USB.
+ *
+ * Author: David Mosberger-Tang <davidm@egauge.net>
+ *
+ * (C) Copyright 2014 David Mosberger-Tang <davidm@egauge.net>
+ *
+ * MAX3421 is a chip implementing a USB 2.0 Full-/Low-Speed host
+ * controller on a SPI bus.
+ *
+ * Based on:
+ * o MAX3421E datasheet
+ * http://datasheets.maximintegrated.com/en/ds/MAX3421E.pdf
+ * o MAX3421E Programming Guide
+ * http://www.hdl.co.jp/ftpdata/utl-001/AN3785.pdf
+ * o gadget/dummy_hcd.c
+ * For USB HCD implementation.
+ * o Arduino MAX3421 driver
+ * https://github.com/felis/USB_Host_Shield_2.0/blob/master/Usb.cpp
+ *
+ * This file is licenced under the GPL v2.
+ *
+ * Important note on worst-case (full-speed) packet size constraints
+ * (See USB 2.0 Section 5.6.3 and following):
+ *
+ * - control: 64 bytes
+ * - isochronous: 1023 bytes
+ * - interrupt: 64 bytes
+ * - bulk: 64 bytes
+ *
+ * Since the MAX3421 FIFO size is 64 bytes, we do not have to work about
+ * multi-FIFO writes/reads for a single USB packet *except* for isochronous
+ * transfers. We don't support isochronous transfers at this time, so we
+ * just assume that a USB packet always fits into a single FIFO buffer.
+ *
+ * NOTE: The June 2006 version of "MAX3421E Programming Guide"
+ * (AN3785) has conflicting info for the RCVDAVIRQ bit:
+ *
+ * The description of RCVDAVIRQ says "The CPU *must* clear
+ * this IRQ bit (by writing a 1 to it) before reading the
+ * RCVFIFO data.
+ *
+ * However, the earlier section on "Programming BULK-IN
+ * Transfers" says * that:
+ *
+ * After the CPU retrieves the data, it clears the
+ * RCVDAVIRQ bit.
+ *
+ * The December 2006 version has been corrected and it consistently
+ * states the second behavior is the correct one.
+ *
+ * Synchronous SPI transactions sleep so we can't perform any such
+ * transactions while holding a spin-lock (and/or while interrupts are
+ * masked). To achieve this, all SPI transactions are issued from a
+ * single thread (max3421_spi_thread).
+ */
+
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+
+#include <linux/platform_data/max3421-hcd.h>
+
+#define DRIVER_DESC "MAX3421 USB Host-Controller Driver"
+#define DRIVER_VERSION "1.0"
+
+/* 11-bit counter that wraps around (USB 2.0 Section 8.3.3): */
+#define USB_MAX_FRAME_NUMBER 0x7ff
+#define USB_MAX_RETRIES 3 /* # of retries before error is reported */
+
+/*
+ * Max. # of times we're willing to retransmit a request immediately in
+ * resposne to a NAK. Afterwards, we fall back on trying once a frame.
+ */
+#define NAK_MAX_FAST_RETRANSMITS 2
+
+#define POWER_BUDGET 500 /* in mA; use 8 for low-power port testing */
+
+/* Port-change mask: */
+#define PORT_C_MASK ((USB_PORT_STAT_C_CONNECTION | \
+ USB_PORT_STAT_C_ENABLE | \
+ USB_PORT_STAT_C_SUSPEND | \
+ USB_PORT_STAT_C_OVERCURRENT | \
+ USB_PORT_STAT_C_RESET) << 16)
+
+enum max3421_rh_state {
+ MAX3421_RH_RESET,
+ MAX3421_RH_SUSPENDED,
+ MAX3421_RH_RUNNING
+};
+
+enum pkt_state {
+ PKT_STATE_SETUP, /* waiting to send setup packet to ctrl pipe */
+ PKT_STATE_TRANSFER, /* waiting to xfer transfer_buffer */
+ PKT_STATE_TERMINATE /* waiting to terminate control transfer */
+};
+
+enum scheduling_pass {
+ SCHED_PASS_PERIODIC,
+ SCHED_PASS_NON_PERIODIC,
+ SCHED_PASS_DONE
+};
+
+struct max3421_dma_buf {
+ u8 data[2];
+};
+
+struct max3421_hcd {
+ spinlock_t lock;
+
+ struct task_struct *spi_thread;
+
+ struct max3421_hcd *next;
+
+ enum max3421_rh_state rh_state;
+ /* lower 16 bits contain port status, upper 16 bits the change mask: */
+ u32 port_status;
+
+ unsigned active:1;
+
+ struct list_head ep_list; /* list of EP's with work */
+
+ /*
+ * The following are owned by spi_thread (may be accessed by
+ * SPI-thread without acquiring the HCD lock:
+ */
+ u8 rev; /* chip revision */
+ u16 frame_number;
+ /*
+ * kmalloc'd buffers guaranteed to be in separate (DMA)
+ * cache-lines:
+ */
+ struct max3421_dma_buf *tx;
+ struct max3421_dma_buf *rx;
+ /*
+ * URB we're currently processing. Must not be reset to NULL
+ * unless MAX3421E chip is idle:
+ */
+ struct urb *curr_urb;
+ enum scheduling_pass sched_pass;
+ struct usb_device *loaded_dev; /* dev that's loaded into the chip */
+ int loaded_epnum; /* epnum whose toggles are loaded */
+ int urb_done; /* > 0 -> no errors, < 0: errno */
+ size_t curr_len;
+ u8 hien;
+ u8 mode;
+ u8 iopins[2];
+ unsigned int do_enable_irq:1;
+ unsigned int do_reset_hcd:1;
+ unsigned int do_reset_port:1;
+ unsigned int do_check_unlink:1;
+ unsigned int do_iopin_update:1;
+#ifdef DEBUG
+ unsigned long err_stat[16];
+#endif
+};
+
+struct max3421_ep {
+ struct usb_host_endpoint *ep;
+ struct list_head ep_list;
+ u32 naks;
+ u16 last_active; /* frame # this ep was last active */
+ enum pkt_state pkt_state;
+ u8 retries;
+ u8 retransmit; /* packet needs retransmission */
+};
+
+static struct max3421_hcd *max3421_hcd_list;
+
+#define MAX3421_FIFO_SIZE 64
+
+#define MAX3421_SPI_DIR_RD 0 /* read register from MAX3421 */
+#define MAX3421_SPI_DIR_WR 1 /* write register to MAX3421 */
+
+/* SPI commands: */
+#define MAX3421_SPI_DIR_SHIFT 1
+#define MAX3421_SPI_REG_SHIFT 3
+
+#define MAX3421_REG_RCVFIFO 1
+#define MAX3421_REG_SNDFIFO 2
+#define MAX3421_REG_SUDFIFO 4
+#define MAX3421_REG_RCVBC 6
+#define MAX3421_REG_SNDBC 7
+#define MAX3421_REG_USBIRQ 13
+#define MAX3421_REG_USBIEN 14
+#define MAX3421_REG_USBCTL 15
+#define MAX3421_REG_CPUCTL 16
+#define MAX3421_REG_PINCTL 17
+#define MAX3421_REG_REVISION 18
+#define MAX3421_REG_IOPINS1 20
+#define MAX3421_REG_IOPINS2 21
+#define MAX3421_REG_GPINIRQ 22
+#define MAX3421_REG_GPINIEN 23
+#define MAX3421_REG_GPINPOL 24
+#define MAX3421_REG_HIRQ 25
+#define MAX3421_REG_HIEN 26
+#define MAX3421_REG_MODE 27
+#define MAX3421_REG_PERADDR 28
+#define MAX3421_REG_HCTL 29
+#define MAX3421_REG_HXFR 30
+#define MAX3421_REG_HRSL 31
+
+enum {
+ MAX3421_USBIRQ_OSCOKIRQ_BIT = 0,
+ MAX3421_USBIRQ_NOVBUSIRQ_BIT = 5,
+ MAX3421_USBIRQ_VBUSIRQ_BIT
+};
+
+enum {
+ MAX3421_CPUCTL_IE_BIT = 0,
+ MAX3421_CPUCTL_PULSEWID0_BIT = 6,
+ MAX3421_CPUCTL_PULSEWID1_BIT
+};
+
+enum {
+ MAX3421_USBCTL_PWRDOWN_BIT = 4,
+ MAX3421_USBCTL_CHIPRES_BIT
+};
+
+enum {
+ MAX3421_PINCTL_GPXA_BIT = 0,
+ MAX3421_PINCTL_GPXB_BIT,
+ MAX3421_PINCTL_POSINT_BIT,
+ MAX3421_PINCTL_INTLEVEL_BIT,
+ MAX3421_PINCTL_FDUPSPI_BIT,
+ MAX3421_PINCTL_EP0INAK_BIT,
+ MAX3421_PINCTL_EP2INAK_BIT,
+ MAX3421_PINCTL_EP3INAK_BIT,
+};
+
+enum {
+ MAX3421_HI_BUSEVENT_BIT = 0, /* bus-reset/-resume */
+ MAX3421_HI_RWU_BIT, /* remote wakeup */
+ MAX3421_HI_RCVDAV_BIT, /* receive FIFO data available */
+ MAX3421_HI_SNDBAV_BIT, /* send buffer available */
+ MAX3421_HI_SUSDN_BIT, /* suspend operation done */
+ MAX3421_HI_CONDET_BIT, /* peripheral connect/disconnect */
+ MAX3421_HI_FRAME_BIT, /* frame generator */
+ MAX3421_HI_HXFRDN_BIT, /* host transfer done */
+};
+
+enum {
+ MAX3421_HCTL_BUSRST_BIT = 0,
+ MAX3421_HCTL_FRMRST_BIT,
+ MAX3421_HCTL_SAMPLEBUS_BIT,
+ MAX3421_HCTL_SIGRSM_BIT,
+ MAX3421_HCTL_RCVTOG0_BIT,
+ MAX3421_HCTL_RCVTOG1_BIT,
+ MAX3421_HCTL_SNDTOG0_BIT,
+ MAX3421_HCTL_SNDTOG1_BIT
+};
+
+enum {
+ MAX3421_MODE_HOST_BIT = 0,
+ MAX3421_MODE_LOWSPEED_BIT,
+ MAX3421_MODE_HUBPRE_BIT,
+ MAX3421_MODE_SOFKAENAB_BIT,
+ MAX3421_MODE_SEPIRQ_BIT,
+ MAX3421_MODE_DELAYISO_BIT,
+ MAX3421_MODE_DMPULLDN_BIT,
+ MAX3421_MODE_DPPULLDN_BIT
+};
+
+enum {
+ MAX3421_HRSL_OK = 0,
+ MAX3421_HRSL_BUSY,
+ MAX3421_HRSL_BADREQ,
+ MAX3421_HRSL_UNDEF,
+ MAX3421_HRSL_NAK,
+ MAX3421_HRSL_STALL,
+ MAX3421_HRSL_TOGERR,
+ MAX3421_HRSL_WRONGPID,
+ MAX3421_HRSL_BADBC,
+ MAX3421_HRSL_PIDERR,
+ MAX3421_HRSL_PKTERR,
+ MAX3421_HRSL_CRCERR,
+ MAX3421_HRSL_KERR,
+ MAX3421_HRSL_JERR,
+ MAX3421_HRSL_TIMEOUT,
+ MAX3421_HRSL_BABBLE,
+ MAX3421_HRSL_RESULT_MASK = 0xf,
+ MAX3421_HRSL_RCVTOGRD_BIT = 4,
+ MAX3421_HRSL_SNDTOGRD_BIT,
+ MAX3421_HRSL_KSTATUS_BIT,
+ MAX3421_HRSL_JSTATUS_BIT
+};
+
+/* Return same error-codes as ohci.h:cc_to_error: */
+static const int hrsl_to_error[] = {
+ [MAX3421_HRSL_OK] = 0,
+ [MAX3421_HRSL_BUSY] = -EINVAL,
+ [MAX3421_HRSL_BADREQ] = -EINVAL,
+ [MAX3421_HRSL_UNDEF] = -EINVAL,
+ [MAX3421_HRSL_NAK] = -EAGAIN,
+ [MAX3421_HRSL_STALL] = -EPIPE,
+ [MAX3421_HRSL_TOGERR] = -EILSEQ,
+ [MAX3421_HRSL_WRONGPID] = -EPROTO,
+ [MAX3421_HRSL_BADBC] = -EREMOTEIO,
+ [MAX3421_HRSL_PIDERR] = -EPROTO,
+ [MAX3421_HRSL_PKTERR] = -EPROTO,
+ [MAX3421_HRSL_CRCERR] = -EILSEQ,
+ [MAX3421_HRSL_KERR] = -EIO,
+ [MAX3421_HRSL_JERR] = -EIO,
+ [MAX3421_HRSL_TIMEOUT] = -ETIME,
+ [MAX3421_HRSL_BABBLE] = -EOVERFLOW
+};
+
+/*
+ * See http://www.beyondlogic.org/usbnutshell/usb4.shtml#Control for a
+ * reasonable overview of how control transfers use the the IN/OUT
+ * tokens.
+ */
+#define MAX3421_HXFR_BULK_IN(ep) (0x00 | (ep)) /* bulk or interrupt */
+#define MAX3421_HXFR_SETUP 0x10
+#define MAX3421_HXFR_BULK_OUT(ep) (0x20 | (ep)) /* bulk or interrupt */
+#define MAX3421_HXFR_ISO_IN(ep) (0x40 | (ep))
+#define MAX3421_HXFR_ISO_OUT(ep) (0x60 | (ep))
+#define MAX3421_HXFR_HS_IN 0x80 /* handshake in */
+#define MAX3421_HXFR_HS_OUT 0xa0 /* handshake out */
+
+#define field(val, bit) ((val) << (bit))
+
+static inline s16
+frame_diff(u16 left, u16 right)
+{
+ return ((unsigned) (left - right)) % (USB_MAX_FRAME_NUMBER + 1);
+}
+
+static inline struct max3421_hcd *
+hcd_to_max3421(struct usb_hcd *hcd)
+{
+ return (struct max3421_hcd *) hcd->hcd_priv;
+}
+
+static inline struct usb_hcd *
+max3421_to_hcd(struct max3421_hcd *max3421_hcd)
+{
+ return container_of((void *) max3421_hcd, struct usb_hcd, hcd_priv);
+}
+
+static u8
+spi_rd8(struct usb_hcd *hcd, unsigned int reg)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ struct spi_device *spi = to_spi_device(hcd->self.controller);
+ struct spi_transfer transfer;
+ struct spi_message msg;
+
+ memset(&transfer, 0, sizeof(transfer));
+
+ spi_message_init(&msg);
+
+ max3421_hcd->tx->data[0] =
+ (field(reg, MAX3421_SPI_REG_SHIFT) |
+ field(MAX3421_SPI_DIR_RD, MAX3421_SPI_DIR_SHIFT));
+
+ transfer.tx_buf = max3421_hcd->tx->data;
+ transfer.rx_buf = max3421_hcd->rx->data;
+ transfer.len = 2;
+
+ spi_message_add_tail(&transfer, &msg);
+ spi_sync(spi, &msg);
+
+ return max3421_hcd->rx->data[1];
+}
+
+static void
+spi_wr8(struct usb_hcd *hcd, unsigned int reg, u8 val)
+{
+ struct spi_device *spi = to_spi_device(hcd->self.controller);
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ struct spi_transfer transfer;
+ struct spi_message msg;
+
+ memset(&transfer, 0, sizeof(transfer));
+
+ spi_message_init(&msg);
+
+ max3421_hcd->tx->data[0] =
+ (field(reg, MAX3421_SPI_REG_SHIFT) |
+ field(MAX3421_SPI_DIR_WR, MAX3421_SPI_DIR_SHIFT));
+ max3421_hcd->tx->data[1] = val;
+
+ transfer.tx_buf = max3421_hcd->tx->data;
+ transfer.len = 2;
+
+ spi_message_add_tail(&transfer, &msg);
+ spi_sync(spi, &msg);
+}
+
+static void
+spi_rd_buf(struct usb_hcd *hcd, unsigned int reg, void *buf, size_t len)
+{
+ struct spi_device *spi = to_spi_device(hcd->self.controller);
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ struct spi_transfer transfer[2];
+ struct spi_message msg;
+
+ memset(transfer, 0, sizeof(transfer));
+
+ spi_message_init(&msg);
+
+ max3421_hcd->tx->data[0] =
+ (field(reg, MAX3421_SPI_REG_SHIFT) |
+ field(MAX3421_SPI_DIR_RD, MAX3421_SPI_DIR_SHIFT));
+ transfer[0].tx_buf = max3421_hcd->tx->data;
+ transfer[0].len = 1;
+
+ transfer[1].rx_buf = buf;
+ transfer[1].len = len;
+
+ spi_message_add_tail(&transfer[0], &msg);
+ spi_message_add_tail(&transfer[1], &msg);
+ spi_sync(spi, &msg);
+}
+
+static void
+spi_wr_buf(struct usb_hcd *hcd, unsigned int reg, void *buf, size_t len)
+{
+ struct spi_device *spi = to_spi_device(hcd->self.controller);
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ struct spi_transfer transfer[2];
+ struct spi_message msg;
+
+ memset(transfer, 0, sizeof(transfer));
+
+ spi_message_init(&msg);
+
+ max3421_hcd->tx->data[0] =
+ (field(reg, MAX3421_SPI_REG_SHIFT) |
+ field(MAX3421_SPI_DIR_WR, MAX3421_SPI_DIR_SHIFT));
+
+ transfer[0].tx_buf = max3421_hcd->tx->data;
+ transfer[0].len = 1;
+
+ transfer[1].tx_buf = buf;
+ transfer[1].len = len;
+
+ spi_message_add_tail(&transfer[0], &msg);
+ spi_message_add_tail(&transfer[1], &msg);
+ spi_sync(spi, &msg);
+}
+
+/*
+ * Figure out the correct setting for the LOWSPEED and HUBPRE mode
+ * bits. The HUBPRE bit needs to be set when MAX3421E operates at
+ * full speed, but it's talking to a low-speed device (i.e., through a
+ * hub). Setting that bit ensures that every low-speed packet is
+ * preceded by a full-speed PRE PID. Possible configurations:
+ *
+ * Hub speed: Device speed: => LOWSPEED bit: HUBPRE bit:
+ * FULL FULL => 0 0
+ * FULL LOW => 1 1
+ * LOW LOW => 1 0
+ * LOW FULL => 1 0
+ */
+static void
+max3421_set_speed(struct usb_hcd *hcd, struct usb_device *dev)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ u8 mode_lowspeed, mode_hubpre, mode = max3421_hcd->mode;
+
+ mode_lowspeed = BIT(MAX3421_MODE_LOWSPEED_BIT);
+ mode_hubpre = BIT(MAX3421_MODE_HUBPRE_BIT);
+ if (max3421_hcd->port_status & USB_PORT_STAT_LOW_SPEED) {
+ mode |= mode_lowspeed;
+ mode &= ~mode_hubpre;
+ } else if (dev->speed == USB_SPEED_LOW) {
+ mode |= mode_lowspeed | mode_hubpre;
+ } else {
+ mode &= ~(mode_lowspeed | mode_hubpre);
+ }
+ if (mode != max3421_hcd->mode) {
+ max3421_hcd->mode = mode;
+ spi_wr8(hcd, MAX3421_REG_MODE, max3421_hcd->mode);
+ }
+
+}
+
+/*
+ * Caller must NOT hold HCD spinlock.
+ */
+static void
+max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum,
+ int force_toggles)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ int old_epnum, same_ep, rcvtog, sndtog;
+ struct usb_device *old_dev;
+ u8 hctl;
+
+ old_dev = max3421_hcd->loaded_dev;
+ old_epnum = max3421_hcd->loaded_epnum;
+
+ same_ep = (dev == old_dev && epnum == old_epnum);
+ if (same_ep && !force_toggles)
+ return;
+
+ if (old_dev && !same_ep) {
+ /* save the old end-points toggles: */
+ u8 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL);
+
+ rcvtog = (hrsl >> MAX3421_HRSL_RCVTOGRD_BIT) & 1;
+ sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1;
+
+ /* no locking: HCD (i.e., we) own toggles, don't we? */
+ usb_settoggle(old_dev, old_epnum, 0, rcvtog);
+ usb_settoggle(old_dev, old_epnum, 1, sndtog);
+ }
+ /* setup new endpoint's toggle bits: */
+ rcvtog = usb_gettoggle(dev, epnum, 0);
+ sndtog = usb_gettoggle(dev, epnum, 1);
+ hctl = (BIT(rcvtog + MAX3421_HCTL_RCVTOG0_BIT) |
+ BIT(sndtog + MAX3421_HCTL_SNDTOG0_BIT));
+
+ max3421_hcd->loaded_epnum = epnum;
+ spi_wr8(hcd, MAX3421_REG_HCTL, hctl);
+
+ /*
+ * Note: devnum for one and the same device can change during
+ * address-assignment so it's best to just always load the
+ * address whenever the end-point changed/was forced.
+ */
+ max3421_hcd->loaded_dev = dev;
+ spi_wr8(hcd, MAX3421_REG_PERADDR, dev->devnum);
+}
+
+static int
+max3421_ctrl_setup(struct usb_hcd *hcd, struct urb *urb)
+{
+ spi_wr_buf(hcd, MAX3421_REG_SUDFIFO, urb->setup_packet, 8);
+ return MAX3421_HXFR_SETUP;
+}
+
+static int
+max3421_transfer_in(struct usb_hcd *hcd, struct urb *urb)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ int epnum = usb_pipeendpoint(urb->pipe);
+
+ max3421_hcd->curr_len = 0;
+ max3421_hcd->hien |= BIT(MAX3421_HI_RCVDAV_BIT);
+ return MAX3421_HXFR_BULK_IN(epnum);
+}
+
+static int
+max3421_transfer_out(struct usb_hcd *hcd, struct urb *urb, int fast_retransmit)
+{
+ struct spi_device *spi = to_spi_device(hcd->self.controller);
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ int epnum = usb_pipeendpoint(urb->pipe);
+ u32 max_packet;
+ void *src;
+
+ src = urb->transfer_buffer + urb->actual_length;
+
+ if (fast_retransmit) {
+ if (max3421_hcd->rev == 0x12) {
+ /* work around rev 0x12 bug: */
+ spi_wr8(hcd, MAX3421_REG_SNDBC, 0);
+ spi_wr8(hcd, MAX3421_REG_SNDFIFO, ((u8 *) src)[0]);
+ spi_wr8(hcd, MAX3421_REG_SNDBC, max3421_hcd->curr_len);
+ }
+ return MAX3421_HXFR_BULK_OUT(epnum);
+ }
+
+ max_packet = usb_maxpacket(urb->dev, urb->pipe, 1);
+
+ if (max_packet > MAX3421_FIFO_SIZE) {
+ /*
+ * We do not support isochronous transfers at this
+ * time.
+ */
+ dev_err(&spi->dev,
+ "%s: packet-size of %u too big (limit is %u bytes)",
+ __func__, max_packet, MAX3421_FIFO_SIZE);
+ max3421_hcd->urb_done = -EMSGSIZE;
+ return -EMSGSIZE;
+ }
+ max3421_hcd->curr_len = min((urb->transfer_buffer_length -
+ urb->actual_length), max_packet);
+
+ spi_wr_buf(hcd, MAX3421_REG_SNDFIFO, src, max3421_hcd->curr_len);
+ spi_wr8(hcd, MAX3421_REG_SNDBC, max3421_hcd->curr_len);
+ return MAX3421_HXFR_BULK_OUT(epnum);
+}
+
+/*
+ * Issue the next host-transfer command.
+ * Caller must NOT hold HCD spinlock.
+ */
+static void
+max3421_next_transfer(struct usb_hcd *hcd, int fast_retransmit)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ struct urb *urb = max3421_hcd->curr_urb;
+ struct max3421_ep *max3421_ep;
+ int cmd = -EINVAL;
+
+ if (!urb)
+ return; /* nothing to do */
+
+ max3421_ep = urb->ep->hcpriv;
+
+ switch (max3421_ep->pkt_state) {
+ case PKT_STATE_SETUP:
+ cmd = max3421_ctrl_setup(hcd, urb);
+ break;
+
+ case PKT_STATE_TRANSFER:
+ if (usb_urb_dir_in(urb))
+ cmd = max3421_transfer_in(hcd, urb);
+ else
+ cmd = max3421_transfer_out(hcd, urb, fast_retransmit);
+ break;
+
+ case PKT_STATE_TERMINATE:
+ /*
+ * IN transfers are terminated with HS_OUT token,
+ * OUT transfers with HS_IN:
+ */
+ if (usb_urb_dir_in(urb))
+ cmd = MAX3421_HXFR_HS_OUT;
+ else
+ cmd = MAX3421_HXFR_HS_IN;
+ break;
+ }
+
+ if (cmd < 0)
+ return;
+
+ /* issue the command and wait for host-xfer-done interrupt: */
+
+ spi_wr8(hcd, MAX3421_REG_HXFR, cmd);
+ max3421_hcd->hien |= BIT(MAX3421_HI_HXFRDN_BIT);
+}
+
+/*
+ * Find the next URB to process and start its execution.
+ *
+ * At this time, we do not anticipate ever connecting a USB hub to the
+ * MAX3421 chip, so at most USB device can be connected and we can use
+ * a simplistic scheduler: at the start of a frame, schedule all
+ * periodic transfers. Once that is done, use the remainder of the
+ * frame to process non-periodic (bulk & control) transfers.
+ *
+ * Preconditions:
+ * o Caller must NOT hold HCD spinlock.
+ * o max3421_hcd->curr_urb MUST BE NULL.
+ * o MAX3421E chip must be idle.
+ */
+static int
+max3421_select_and_start_urb(struct usb_hcd *hcd)
+{
+ struct spi_device *spi = to_spi_device(hcd->self.controller);
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ struct urb *urb, *curr_urb = NULL;
+ struct max3421_ep *max3421_ep;
+ int epnum, force_toggles = 0;
+ struct usb_host_endpoint *ep;
+ struct list_head *pos;
+ unsigned long flags;
+
+ spin_lock_irqsave(&max3421_hcd->lock, flags);
+
+ for (;
+ max3421_hcd->sched_pass < SCHED_PASS_DONE;
+ ++max3421_hcd->sched_pass)
+ list_for_each(pos, &max3421_hcd->ep_list) {
+ urb = NULL;
+ max3421_ep = container_of(pos, struct max3421_ep,
+ ep_list);
+ ep = max3421_ep->ep;
+
+ switch (usb_endpoint_type(&ep->desc)) {
+ case USB_ENDPOINT_XFER_ISOC:
+ case USB_ENDPOINT_XFER_INT:
+ if (max3421_hcd->sched_pass !=
+ SCHED_PASS_PERIODIC)
+ continue;
+ break;
+
+ case USB_ENDPOINT_XFER_CONTROL:
+ case USB_ENDPOINT_XFER_BULK:
+ if (max3421_hcd->sched_pass !=
+ SCHED_PASS_NON_PERIODIC)
+ continue;
+ break;
+ }
+
+ if (list_empty(&ep->urb_list))
+ continue; /* nothing to do */
+ urb = list_first_entry(&ep->urb_list, struct urb,
+ urb_list);
+ if (urb->unlinked) {
+ dev_dbg(&spi->dev, "%s: URB %p unlinked=%d",
+ __func__, urb, urb->unlinked);
+ max3421_hcd->curr_urb = urb;
+ max3421_hcd->urb_done = 1;
+ spin_unlock_irqrestore(&max3421_hcd->lock,
+ flags);
+ return 1;
+ }
+
+ switch (usb_endpoint_type(&ep->desc)) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ /*
+ * Allow one control transaction per
+ * frame per endpoint:
+ */
+ if (frame_diff(max3421_ep->last_active,
+ max3421_hcd->frame_number) == 0)
+ continue;
+ break;
+
+ case USB_ENDPOINT_XFER_BULK:
+ if (max3421_ep->retransmit
+ && (frame_diff(max3421_ep->last_active,
+ max3421_hcd->frame_number)
+ == 0))
+ /*
+ * We already tried this EP
+ * during this frame and got a
+ * NAK or error; wait for next frame
+ */
+ continue;
+ break;
+
+ case USB_ENDPOINT_XFER_ISOC:
+ case USB_ENDPOINT_XFER_INT:
+ if (frame_diff(max3421_hcd->frame_number,
+ max3421_ep->last_active)
+ < urb->interval)
+ /*
+ * We already processed this
+ * end-point in the current
+ * frame
+ */
+ continue;
+ break;
+ }
+
+ /* move current ep to tail: */
+ list_move_tail(pos, &max3421_hcd->ep_list);
+ curr_urb = urb;
+ goto done;
+ }
+done:
+ if (!curr_urb) {
+ spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+ return 0;
+ }
+
+ urb = max3421_hcd->curr_urb = curr_urb;
+ epnum = usb_endpoint_num(&urb->ep->desc);
+ if (max3421_ep->retransmit)
+ /* restart (part of) a USB transaction: */
+ max3421_ep->retransmit = 0;
+ else {
+ /* start USB transaction: */
+ if (usb_endpoint_xfer_control(&ep->desc)) {
+ /*
+ * See USB 2.0 spec section 8.6.1
+ * Initialization via SETUP Token:
+ */
+ usb_settoggle(urb->dev, epnum, 0, 1);
+ usb_settoggle(urb->dev, epnum, 1, 1);
+ max3421_ep->pkt_state = PKT_STATE_SETUP;
+ force_toggles = 1;
+ } else
+ max3421_ep->pkt_state = PKT_STATE_TRANSFER;
+ }
+
+ spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+
+ max3421_ep->last_active = max3421_hcd->frame_number;
+ max3421_set_address(hcd, urb->dev, epnum, force_toggles);
+ max3421_set_speed(hcd, urb->dev);
+ max3421_next_transfer(hcd, 0);
+ return 1;
+}
+
+/*
+ * Check all endpoints for URBs that got unlinked.
+ *
+ * Caller must NOT hold HCD spinlock.
+ */
+static int
+max3421_check_unlink(struct usb_hcd *hcd)
+{
+ struct spi_device *spi = to_spi_device(hcd->self.controller);
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ struct list_head *pos, *upos, *next_upos;
+ struct max3421_ep *max3421_ep;
+ struct usb_host_endpoint *ep;
+ struct urb *urb;
+ unsigned long flags;
+ int retval = 0;
+
+ spin_lock_irqsave(&max3421_hcd->lock, flags);
+ list_for_each(pos, &max3421_hcd->ep_list) {
+ max3421_ep = container_of(pos, struct max3421_ep, ep_list);
+ ep = max3421_ep->ep;
+ list_for_each_safe(upos, next_upos, &ep->urb_list) {
+ urb = container_of(upos, struct urb, urb_list);
+ if (urb->unlinked) {
+ retval = 1;
+ dev_dbg(&spi->dev, "%s: URB %p unlinked=%d",
+ __func__, urb, urb->unlinked);
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+ spin_unlock_irqrestore(&max3421_hcd->lock,
+ flags);
+ usb_hcd_giveback_urb(hcd, urb, 0);
+ spin_lock_irqsave(&max3421_hcd->lock, flags);
+ }
+ }
+ }
+ spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+ return retval;
+}
+
+/*
+ * Caller must NOT hold HCD spinlock.
+ */
+static void
+max3421_slow_retransmit(struct usb_hcd *hcd)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ struct urb *urb = max3421_hcd->curr_urb;
+ struct max3421_ep *max3421_ep;
+
+ max3421_ep = urb->ep->hcpriv;
+ max3421_ep->retransmit = 1;
+ max3421_hcd->curr_urb = NULL;
+}
+
+/*
+ * Caller must NOT hold HCD spinlock.
+ */
+static void
+max3421_recv_data_available(struct usb_hcd *hcd)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ struct urb *urb = max3421_hcd->curr_urb;
+ size_t remaining, transfer_size;
+ u8 rcvbc;
+
+ rcvbc = spi_rd8(hcd, MAX3421_REG_RCVBC);
+
+ if (rcvbc > MAX3421_FIFO_SIZE)
+ rcvbc = MAX3421_FIFO_SIZE;
+ if (urb->actual_length >= urb->transfer_buffer_length)
+ remaining = 0;
+ else
+ remaining = urb->transfer_buffer_length - urb->actual_length;
+ transfer_size = rcvbc;
+ if (transfer_size > remaining)
+ transfer_size = remaining;
+ if (transfer_size > 0) {
+ void *dst = urb->transfer_buffer + urb->actual_length;
+
+ spi_rd_buf(hcd, MAX3421_REG_RCVFIFO, dst, transfer_size);
+ urb->actual_length += transfer_size;
+ max3421_hcd->curr_len = transfer_size;
+ }
+
+ /* ack the RCVDAV irq now that the FIFO has been read: */
+ spi_wr8(hcd, MAX3421_REG_HIRQ, BIT(MAX3421_HI_RCVDAV_BIT));
+}
+
+static void
+max3421_handle_error(struct usb_hcd *hcd, u8 hrsl)
+{
+ struct spi_device *spi = to_spi_device(hcd->self.controller);
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ u8 result_code = hrsl & MAX3421_HRSL_RESULT_MASK;
+ struct urb *urb = max3421_hcd->curr_urb;
+ struct max3421_ep *max3421_ep = urb->ep->hcpriv;
+ int switch_sndfifo;
+
+ /*
+ * If an OUT command results in any response other than OK
+ * (i.e., error or NAK), we have to perform a dummy-write to
+ * SNDBC so the FIFO gets switched back to us. Otherwise, we
+ * get out of sync with the SNDFIFO double buffer.
+ */
+ switch_sndfifo = (max3421_ep->pkt_state == PKT_STATE_TRANSFER &&
+ usb_urb_dir_out(urb));
+
+ switch (result_code) {
+ case MAX3421_HRSL_OK:
+ return; /* this shouldn't happen */
+
+ case MAX3421_HRSL_WRONGPID: /* received wrong PID */
+ case MAX3421_HRSL_BUSY: /* SIE busy */
+ case MAX3421_HRSL_BADREQ: /* bad val in HXFR */
+ case MAX3421_HRSL_UNDEF: /* reserved */
+ case MAX3421_HRSL_KERR: /* K-state instead of response */
+ case MAX3421_HRSL_JERR: /* J-state instead of response */
+ /*
+ * packet experienced an error that we cannot recover
+ * from; report error
+ */
+ max3421_hcd->urb_done = hrsl_to_error[result_code];
+ dev_dbg(&spi->dev, "%s: unexpected error HRSL=0x%02x",
+ __func__, hrsl);
+ break;
+
+ case MAX3421_HRSL_TOGERR:
+ if (usb_urb_dir_in(urb))
+ ; /* don't do anything (device will switch toggle) */
+ else {
+ /* flip the send toggle bit: */
+ int sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1;
+
+ sndtog ^= 1;
+ spi_wr8(hcd, MAX3421_REG_HCTL,
+ BIT(sndtog + MAX3421_HCTL_SNDTOG0_BIT));
+ }
+ /* FALL THROUGH */
+ case MAX3421_HRSL_BADBC: /* bad byte count */
+ case MAX3421_HRSL_PIDERR: /* received PID is corrupted */
+ case MAX3421_HRSL_PKTERR: /* packet error (stuff, EOP) */
+ case MAX3421_HRSL_CRCERR: /* CRC error */
+ case MAX3421_HRSL_BABBLE: /* device talked too long */
+ case MAX3421_HRSL_TIMEOUT:
+ if (max3421_ep->retries++ < USB_MAX_RETRIES)
+ /* retry the packet again in the next frame */
+ max3421_slow_retransmit(hcd);
+ else {
+ /* Based on ohci.h cc_to_err[]: */
+ max3421_hcd->urb_done = hrsl_to_error[result_code];
+ dev_dbg(&spi->dev, "%s: unexpected error HRSL=0x%02x",
+ __func__, hrsl);
+ }
+ break;
+
+ case MAX3421_HRSL_STALL:
+ dev_dbg(&spi->dev, "%s: unexpected error HRSL=0x%02x",
+ __func__, hrsl);
+ max3421_hcd->urb_done = hrsl_to_error[result_code];
+ break;
+
+ case MAX3421_HRSL_NAK:
+ /*
+ * Device wasn't ready for data or has no data
+ * available: retry the packet again.
+ */
+ if (max3421_ep->naks++ < NAK_MAX_FAST_RETRANSMITS) {
+ max3421_next_transfer(hcd, 1);
+ switch_sndfifo = 0;
+ } else
+ max3421_slow_retransmit(hcd);
+ break;
+ }
+ if (switch_sndfifo)
+ spi_wr8(hcd, MAX3421_REG_SNDBC, 0);
+}
+
+/*
+ * Caller must NOT hold HCD spinlock.
+ */
+static int
+max3421_transfer_in_done(struct usb_hcd *hcd, struct urb *urb)
+{
+ struct spi_device *spi = to_spi_device(hcd->self.controller);
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ u32 max_packet;
+
+ if (urb->actual_length >= urb->transfer_buffer_length)
+ return 1; /* read is complete, so we're done */
+
+ /*
+ * USB 2.0 Section 5.3.2 Pipes: packets must be full size
+ * except for last one.
+ */
+ max_packet = usb_maxpacket(urb->dev, urb->pipe, 0);
+ if (max_packet > MAX3421_FIFO_SIZE) {
+ /*
+ * We do not support isochronous transfers at this
+ * time...
+ */
+ dev_err(&spi->dev,
+ "%s: packet-size of %u too big (limit is %u bytes)",
+ __func__, max_packet, MAX3421_FIFO_SIZE);
+ return -EINVAL;
+ }
+
+ if (max3421_hcd->curr_len < max_packet) {
+ if (urb->transfer_flags & URB_SHORT_NOT_OK) {
+ /*
+ * remaining > 0 and received an
+ * unexpected partial packet ->
+ * error
+ */
+ return -EREMOTEIO;
+ } else
+ /* short read, but it's OK */
+ return 1;
+ }
+ return 0; /* not done */
+}
+
+/*
+ * Caller must NOT hold HCD spinlock.
+ */
+static int
+max3421_transfer_out_done(struct usb_hcd *hcd, struct urb *urb)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+
+ urb->actual_length += max3421_hcd->curr_len;
+ if (urb->actual_length < urb->transfer_buffer_length)
+ return 0;
+ if (urb->transfer_flags & URB_ZERO_PACKET) {
+ /*
+ * Some hardware needs a zero-size packet at the end
+ * of a bulk-out transfer if the last transfer was a
+ * full-sized packet (i.e., such hardware use <
+ * max_packet as an indicator that the end of the
+ * packet has been reached).
+ */
+ u32 max_packet = usb_maxpacket(urb->dev, urb->pipe, 1);
+
+ if (max3421_hcd->curr_len == max_packet)
+ return 0;
+ }
+ return 1;
+}
+
+/*
+ * Caller must NOT hold HCD spinlock.
+ */
+static void
+max3421_host_transfer_done(struct usb_hcd *hcd)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ struct urb *urb = max3421_hcd->curr_urb;
+ struct max3421_ep *max3421_ep;
+ u8 result_code, hrsl;
+ int urb_done = 0;
+
+ max3421_hcd->hien &= ~(BIT(MAX3421_HI_HXFRDN_BIT) |
+ BIT(MAX3421_HI_RCVDAV_BIT));
+
+ hrsl = spi_rd8(hcd, MAX3421_REG_HRSL);
+ result_code = hrsl & MAX3421_HRSL_RESULT_MASK;
+
+#ifdef DEBUG
+ ++max3421_hcd->err_stat[result_code];
+#endif
+
+ max3421_ep = urb->ep->hcpriv;
+
+ if (unlikely(result_code != MAX3421_HRSL_OK)) {
+ max3421_handle_error(hcd, hrsl);
+ return;
+ }
+
+ max3421_ep->naks = 0;
+ max3421_ep->retries = 0;
+ switch (max3421_ep->pkt_state) {
+
+ case PKT_STATE_SETUP:
+ if (urb->transfer_buffer_length > 0)
+ max3421_ep->pkt_state = PKT_STATE_TRANSFER;
+ else
+ max3421_ep->pkt_state = PKT_STATE_TERMINATE;
+ break;
+
+ case PKT_STATE_TRANSFER:
+ if (usb_urb_dir_in(urb))
+ urb_done = max3421_transfer_in_done(hcd, urb);
+ else
+ urb_done = max3421_transfer_out_done(hcd, urb);
+ if (urb_done > 0 && usb_pipetype(urb->pipe) == PIPE_CONTROL) {
+ /*
+ * We aren't really done - we still need to
+ * terminate the control transfer:
+ */
+ max3421_hcd->urb_done = urb_done = 0;
+ max3421_ep->pkt_state = PKT_STATE_TERMINATE;
+ }
+ break;
+
+ case PKT_STATE_TERMINATE:
+ urb_done = 1;
+ break;
+ }
+
+ if (urb_done)
+ max3421_hcd->urb_done = urb_done;
+ else
+ max3421_next_transfer(hcd, 0);
+}
+
+/*
+ * Caller must NOT hold HCD spinlock.
+ */
+static void
+max3421_detect_conn(struct usb_hcd *hcd)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ unsigned int jk, have_conn = 0;
+ u32 old_port_status, chg;
+ unsigned long flags;
+ u8 hrsl, mode;
+
+ hrsl = spi_rd8(hcd, MAX3421_REG_HRSL);
+
+ jk = ((((hrsl >> MAX3421_HRSL_JSTATUS_BIT) & 1) << 0) |
+ (((hrsl >> MAX3421_HRSL_KSTATUS_BIT) & 1) << 1));
+
+ mode = max3421_hcd->mode;
+
+ switch (jk) {
+ case 0x0: /* SE0: disconnect */
+ /*
+ * Turn off SOFKAENAB bit to avoid getting interrupt
+ * every milli-second:
+ */
+ mode &= ~BIT(MAX3421_MODE_SOFKAENAB_BIT);
+ break;
+
+ case 0x1: /* J=0,K=1: low-speed (in full-speed or vice versa) */
+ case 0x2: /* J=1,K=0: full-speed (in full-speed or vice versa) */
+ if (jk == 0x2)
+ /* need to switch to the other speed: */
+ mode ^= BIT(MAX3421_MODE_LOWSPEED_BIT);
+ /* turn on SOFKAENAB bit: */
+ mode |= BIT(MAX3421_MODE_SOFKAENAB_BIT);
+ have_conn = 1;
+ break;
+
+ case 0x3: /* illegal */
+ break;
+ }
+
+ max3421_hcd->mode = mode;
+ spi_wr8(hcd, MAX3421_REG_MODE, max3421_hcd->mode);
+
+ spin_lock_irqsave(&max3421_hcd->lock, flags);
+ old_port_status = max3421_hcd->port_status;
+ if (have_conn)
+ max3421_hcd->port_status |= USB_PORT_STAT_CONNECTION;
+ else
+ max3421_hcd->port_status &= ~USB_PORT_STAT_CONNECTION;
+ if (mode & BIT(MAX3421_MODE_LOWSPEED_BIT))
+ max3421_hcd->port_status |= USB_PORT_STAT_LOW_SPEED;
+ else
+ max3421_hcd->port_status &= ~USB_PORT_STAT_LOW_SPEED;
+ chg = (old_port_status ^ max3421_hcd->port_status);
+ max3421_hcd->port_status |= chg << 16;
+ spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+}
+
+static irqreturn_t
+max3421_irq_handler(int irq, void *dev_id)
+{
+ struct usb_hcd *hcd = dev_id;
+ struct spi_device *spi = to_spi_device(hcd->self.controller);
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+
+ if (max3421_hcd->spi_thread &&
+ max3421_hcd->spi_thread->state != TASK_RUNNING)
+ wake_up_process(max3421_hcd->spi_thread);
+ if (!max3421_hcd->do_enable_irq) {
+ max3421_hcd->do_enable_irq = 1;
+ disable_irq_nosync(spi->irq);
+ }
+ return IRQ_HANDLED;
+}
+
+#ifdef DEBUG
+
+static void
+dump_eps(struct usb_hcd *hcd)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ struct max3421_ep *max3421_ep;
+ struct usb_host_endpoint *ep;
+ struct list_head *pos, *upos;
+ char ubuf[512], *dp, *end;
+ unsigned long flags;
+ struct urb *urb;
+ int epnum, ret;
+
+ spin_lock_irqsave(&max3421_hcd->lock, flags);
+ list_for_each(pos, &max3421_hcd->ep_list) {
+ max3421_ep = container_of(pos, struct max3421_ep, ep_list);
+ ep = max3421_ep->ep;
+
+ dp = ubuf;
+ end = dp + sizeof(ubuf);
+ *dp = '\0';
+ list_for_each(upos, &ep->urb_list) {
+ urb = container_of(upos, struct urb, urb_list);
+ ret = snprintf(dp, end - dp, " %p(%d.%s %d/%d)", urb,
+ usb_pipetype(urb->pipe),
+ usb_urb_dir_in(urb) ? "IN" : "OUT",
+ urb->actual_length,
+ urb->transfer_buffer_length);
+ if (ret < 0 || ret >= end - dp)
+ break; /* error or buffer full */
+ dp += ret;
+ }
+
+ epnum = usb_endpoint_num(&ep->desc);
+ pr_info("EP%0u %u lst %04u rtr %u nak %6u rxmt %u: %s\n",
+ epnum, max3421_ep->pkt_state, max3421_ep->last_active,
+ max3421_ep->retries, max3421_ep->naks,
+ max3421_ep->retransmit, ubuf);
+ }
+ spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+}
+
+#endif /* DEBUG */
+
+/* Return zero if no work was performed, 1 otherwise. */
+static int
+max3421_handle_irqs(struct usb_hcd *hcd)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ u32 chg, old_port_status;
+ unsigned long flags;
+ u8 hirq;
+
+ /*
+ * Read and ack pending interrupts (CPU must never
+ * clear SNDBAV directly and RCVDAV must be cleared by
+ * max3421_recv_data_available()!):
+ */
+ hirq = spi_rd8(hcd, MAX3421_REG_HIRQ);
+ hirq &= max3421_hcd->hien;
+ if (!hirq)
+ return 0;
+
+ spi_wr8(hcd, MAX3421_REG_HIRQ,
+ hirq & ~(BIT(MAX3421_HI_SNDBAV_BIT) |
+ BIT(MAX3421_HI_RCVDAV_BIT)));
+
+ if (hirq & BIT(MAX3421_HI_FRAME_BIT)) {
+ max3421_hcd->frame_number = ((max3421_hcd->frame_number + 1)
+ & USB_MAX_FRAME_NUMBER);
+ max3421_hcd->sched_pass = SCHED_PASS_PERIODIC;
+ }
+
+ if (hirq & BIT(MAX3421_HI_RCVDAV_BIT))
+ max3421_recv_data_available(hcd);
+
+ if (hirq & BIT(MAX3421_HI_HXFRDN_BIT))
+ max3421_host_transfer_done(hcd);
+
+ if (hirq & BIT(MAX3421_HI_CONDET_BIT))
+ max3421_detect_conn(hcd);
+
+ /*
+ * Now process interrupts that may affect HCD state
+ * other than the end-points:
+ */
+ spin_lock_irqsave(&max3421_hcd->lock, flags);
+
+ old_port_status = max3421_hcd->port_status;
+ if (hirq & BIT(MAX3421_HI_BUSEVENT_BIT)) {
+ if (max3421_hcd->port_status & USB_PORT_STAT_RESET) {
+ /* BUSEVENT due to completion of Bus Reset */
+ max3421_hcd->port_status &= ~USB_PORT_STAT_RESET;
+ max3421_hcd->port_status |= USB_PORT_STAT_ENABLE;
+ } else {
+ /* BUSEVENT due to completion of Bus Resume */
+ pr_info("%s: BUSEVENT Bus Resume Done\n", __func__);
+ }
+ }
+ if (hirq & BIT(MAX3421_HI_RWU_BIT))
+ pr_info("%s: RWU\n", __func__);
+ if (hirq & BIT(MAX3421_HI_SUSDN_BIT))
+ pr_info("%s: SUSDN\n", __func__);
+
+ chg = (old_port_status ^ max3421_hcd->port_status);
+ max3421_hcd->port_status |= chg << 16;
+
+ spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+
+#ifdef DEBUG
+ {
+ static unsigned long last_time;
+ char sbuf[16 * 16], *dp, *end;
+ int i;
+
+ if (jiffies - last_time > 5*HZ) {
+ dp = sbuf;
+ end = sbuf + sizeof(sbuf);
+ *dp = '\0';
+ for (i = 0; i < 16; ++i) {
+ int ret = snprintf(dp, end - dp, " %lu",
+ max3421_hcd->err_stat[i]);
+ if (ret < 0 || ret >= end - dp)
+ break; /* error or buffer full */
+ dp += ret;
+ }
+ pr_info("%s: hrsl_stats %s\n", __func__, sbuf);
+ memset(max3421_hcd->err_stat, 0,
+ sizeof(max3421_hcd->err_stat));
+ last_time = jiffies;
+
+ dump_eps(hcd);
+ }
+ }
+#endif
+ return 1;
+}
+
+static int
+max3421_reset_hcd(struct usb_hcd *hcd)
+{
+ struct spi_device *spi = to_spi_device(hcd->self.controller);
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ int timeout;
+
+ /* perform a chip reset and wait for OSCIRQ signal to appear: */
+ spi_wr8(hcd, MAX3421_REG_USBCTL, BIT(MAX3421_USBCTL_CHIPRES_BIT));
+ /* clear reset: */
+ spi_wr8(hcd, MAX3421_REG_USBCTL, 0);
+ timeout = 1000;
+ while (1) {
+ if (spi_rd8(hcd, MAX3421_REG_USBIRQ)
+ & BIT(MAX3421_USBIRQ_OSCOKIRQ_BIT))
+ break;
+ if (--timeout < 0) {
+ dev_err(&spi->dev,
+ "timed out waiting for oscillator OK signal");
+ return 1;
+ }
+ cond_resched();
+ }
+
+ /*
+ * Turn on host mode, automatic generation of SOF packets, and
+ * enable pull-down registers on DM/DP:
+ */
+ max3421_hcd->mode = (BIT(MAX3421_MODE_HOST_BIT) |
+ BIT(MAX3421_MODE_SOFKAENAB_BIT) |
+ BIT(MAX3421_MODE_DMPULLDN_BIT) |
+ BIT(MAX3421_MODE_DPPULLDN_BIT));
+ spi_wr8(hcd, MAX3421_REG_MODE, max3421_hcd->mode);
+
+ /* reset frame-number: */
+ max3421_hcd->frame_number = USB_MAX_FRAME_NUMBER;
+ spi_wr8(hcd, MAX3421_REG_HCTL, BIT(MAX3421_HCTL_FRMRST_BIT));
+
+ /* sample the state of the D+ and D- lines */
+ spi_wr8(hcd, MAX3421_REG_HCTL, BIT(MAX3421_HCTL_SAMPLEBUS_BIT));
+ max3421_detect_conn(hcd);
+
+ /* enable frame, connection-detected, and bus-event interrupts: */
+ max3421_hcd->hien = (BIT(MAX3421_HI_FRAME_BIT) |
+ BIT(MAX3421_HI_CONDET_BIT) |
+ BIT(MAX3421_HI_BUSEVENT_BIT));
+ spi_wr8(hcd, MAX3421_REG_HIEN, max3421_hcd->hien);
+
+ /* enable interrupts: */
+ spi_wr8(hcd, MAX3421_REG_CPUCTL, BIT(MAX3421_CPUCTL_IE_BIT));
+ return 1;
+}
+
+static int
+max3421_urb_done(struct usb_hcd *hcd)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ unsigned long flags;
+ struct urb *urb;
+ int status;
+
+ status = max3421_hcd->urb_done;
+ max3421_hcd->urb_done = 0;
+ if (status > 0)
+ status = 0;
+ urb = max3421_hcd->curr_urb;
+ if (urb) {
+ max3421_hcd->curr_urb = NULL;
+ spin_lock_irqsave(&max3421_hcd->lock, flags);
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+ spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+
+ /* must be called without the HCD spinlock: */
+ usb_hcd_giveback_urb(hcd, urb, status);
+ }
+ return 1;
+}
+
+static int
+max3421_spi_thread(void *dev_id)
+{
+ struct usb_hcd *hcd = dev_id;
+ struct spi_device *spi = to_spi_device(hcd->self.controller);
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ int i, i_worked = 1;
+
+ /* set full-duplex SPI mode, low-active interrupt pin: */
+ spi_wr8(hcd, MAX3421_REG_PINCTL,
+ (BIT(MAX3421_PINCTL_FDUPSPI_BIT) | /* full-duplex */
+ BIT(MAX3421_PINCTL_INTLEVEL_BIT))); /* low-active irq */
+
+ while (!kthread_should_stop()) {
+ max3421_hcd->rev = spi_rd8(hcd, MAX3421_REG_REVISION);
+ if (max3421_hcd->rev == 0x12 || max3421_hcd->rev == 0x13)
+ break;
+ dev_err(&spi->dev, "bad rev 0x%02x", max3421_hcd->rev);
+ msleep(10000);
+ }
+ dev_info(&spi->dev, "rev 0x%x, SPI clk %dHz, bpw %u, irq %d\n",
+ max3421_hcd->rev, spi->max_speed_hz, spi->bits_per_word,
+ spi->irq);
+
+ while (!kthread_should_stop()) {
+ if (!i_worked) {
+ /*
+ * We'll be waiting for wakeups from the hard
+ * interrupt handler, so now is a good time to
+ * sync our hien with the chip:
+ */
+ spi_wr8(hcd, MAX3421_REG_HIEN, max3421_hcd->hien);
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (max3421_hcd->do_enable_irq) {
+ max3421_hcd->do_enable_irq = 0;
+ enable_irq(spi->irq);
+ }
+ schedule();
+ __set_current_state(TASK_RUNNING);
+ }
+
+ i_worked = 0;
+
+ if (max3421_hcd->urb_done)
+ i_worked |= max3421_urb_done(hcd);
+ else if (max3421_handle_irqs(hcd))
+ i_worked = 1;
+ else if (!max3421_hcd->curr_urb)
+ i_worked |= max3421_select_and_start_urb(hcd);
+
+ if (max3421_hcd->do_reset_hcd) {
+ /* reset the HCD: */
+ max3421_hcd->do_reset_hcd = 0;
+ i_worked |= max3421_reset_hcd(hcd);
+ }
+ if (max3421_hcd->do_reset_port) {
+ /* perform a USB bus reset: */
+ max3421_hcd->do_reset_port = 0;
+ spi_wr8(hcd, MAX3421_REG_HCTL,
+ BIT(MAX3421_HCTL_BUSRST_BIT));
+ i_worked = 1;
+ }
+ if (max3421_hcd->do_check_unlink) {
+ max3421_hcd->do_check_unlink = 0;
+ i_worked |= max3421_check_unlink(hcd);
+ }
+ if (max3421_hcd->do_iopin_update) {
+ /*
+ * IOPINS1/IOPINS2 do not auto-increment, so we can't
+ * use spi_wr_buf().
+ */
+ for (i = 0; i < ARRAY_SIZE(max3421_hcd->iopins); ++i) {
+ u8 val = spi_rd8(hcd, MAX3421_REG_IOPINS1);
+
+ val = ((val & 0xf0) |
+ (max3421_hcd->iopins[i] & 0x0f));
+ spi_wr8(hcd, MAX3421_REG_IOPINS1 + i, val);
+ max3421_hcd->iopins[i] = val;
+ }
+ max3421_hcd->do_iopin_update = 0;
+ i_worked = 1;
+ }
+ }
+ set_current_state(TASK_RUNNING);
+ dev_info(&spi->dev, "SPI thread exiting");
+ return 0;
+}
+
+static int
+max3421_reset_port(struct usb_hcd *hcd)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+
+ max3421_hcd->port_status &= ~(USB_PORT_STAT_ENABLE |
+ USB_PORT_STAT_LOW_SPEED);
+ max3421_hcd->do_reset_port = 1;
+ wake_up_process(max3421_hcd->spi_thread);
+ return 0;
+}
+
+static int
+max3421_reset(struct usb_hcd *hcd)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+
+ hcd->self.sg_tablesize = 0;
+ hcd->speed = HCD_USB2;
+ hcd->self.root_hub->speed = USB_SPEED_FULL;
+ max3421_hcd->do_reset_hcd = 1;
+ wake_up_process(max3421_hcd->spi_thread);
+ return 0;
+}
+
+static int
+max3421_start(struct usb_hcd *hcd)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+
+ spin_lock_init(&max3421_hcd->lock);
+ max3421_hcd->rh_state = MAX3421_RH_RUNNING;
+
+ INIT_LIST_HEAD(&max3421_hcd->ep_list);
+
+ hcd->power_budget = POWER_BUDGET;
+ hcd->state = HC_STATE_RUNNING;
+ hcd->uses_new_polling = 1;
+ return 0;
+}
+
+static void
+max3421_stop(struct usb_hcd *hcd)
+{
+}
+
+static int
+max3421_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
+{
+ struct spi_device *spi = to_spi_device(hcd->self.controller);
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ struct max3421_ep *max3421_ep;
+ unsigned long flags;
+ int retval;
+
+ switch (usb_pipetype(urb->pipe)) {
+ case PIPE_INTERRUPT:
+ case PIPE_ISOCHRONOUS:
+ if (urb->interval < 0) {
+ dev_err(&spi->dev,
+ "%s: interval=%d for intr-/iso-pipe; expected > 0\n",
+ __func__, urb->interval);
+ return -EINVAL;
+ }
+ default:
+ break;
+ }
+
+ spin_lock_irqsave(&max3421_hcd->lock, flags);
+
+ max3421_ep = urb->ep->hcpriv;
+ if (!max3421_ep) {
+ /* gets freed in max3421_endpoint_disable: */
+ max3421_ep = kzalloc(sizeof(struct max3421_ep), mem_flags);
+ if (!max3421_ep) {
+ retval = -ENOMEM;
+ goto out;
+ }
+ max3421_ep->ep = urb->ep;
+ max3421_ep->last_active = max3421_hcd->frame_number;
+ urb->ep->hcpriv = max3421_ep;
+
+ list_add_tail(&max3421_ep->ep_list, &max3421_hcd->ep_list);
+ }
+
+ retval = usb_hcd_link_urb_to_ep(hcd, urb);
+ if (retval == 0) {
+ /* Since we added to the queue, restart scheduling: */
+ max3421_hcd->sched_pass = SCHED_PASS_PERIODIC;
+ wake_up_process(max3421_hcd->spi_thread);
+ }
+
+out:
+ spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+ return retval;
+}
+
+static int
+max3421_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ unsigned long flags;
+ int retval;
+
+ spin_lock_irqsave(&max3421_hcd->lock, flags);
+
+ /*
+ * This will set urb->unlinked which in turn causes the entry
+ * to be dropped at the next opportunity.
+ */
+ retval = usb_hcd_check_unlink_urb(hcd, urb, status);
+ if (retval == 0) {
+ max3421_hcd->do_check_unlink = 1;
+ wake_up_process(max3421_hcd->spi_thread);
+ }
+ spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+ return retval;
+}
+
+static void
+max3421_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ unsigned long flags;
+
+ spin_lock_irqsave(&max3421_hcd->lock, flags);
+
+ if (ep->hcpriv) {
+ struct max3421_ep *max3421_ep = ep->hcpriv;
+
+ /* remove myself from the ep_list: */
+ if (!list_empty(&max3421_ep->ep_list))
+ list_del(&max3421_ep->ep_list);
+ kfree(max3421_ep);
+ ep->hcpriv = NULL;
+ }
+
+ spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+}
+
+static int
+max3421_get_frame_number(struct usb_hcd *hcd)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ return max3421_hcd->frame_number;
+}
+
+/*
+ * Should return a non-zero value when any port is undergoing a resume
+ * transition while the root hub is suspended.
+ */
+static int
+max3421_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ unsigned long flags;
+ int retval = 0;
+
+ spin_lock_irqsave(&max3421_hcd->lock, flags);
+ if (!HCD_HW_ACCESSIBLE(hcd))
+ goto done;
+
+ *buf = 0;
+ if ((max3421_hcd->port_status & PORT_C_MASK) != 0) {
+ *buf = (1 << 1); /* a hub over-current condition exists */
+ dev_dbg(hcd->self.controller,
+ "port status 0x%08x has changes\n",
+ max3421_hcd->port_status);
+ retval = 1;
+ if (max3421_hcd->rh_state == MAX3421_RH_SUSPENDED)
+ usb_hcd_resume_root_hub(hcd);
+ }
+done:
+ spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+ return retval;
+}
+
+static inline void
+hub_descriptor(struct usb_hub_descriptor *desc)
+{
+ memset(desc, 0, sizeof(*desc));
+ /*
+ * See Table 11-13: Hub Descriptor in USB 2.0 spec.
+ */
+ desc->bDescriptorType = 0x29; /* hub descriptor */
+ desc->bDescLength = 9;
+ desc->wHubCharacteristics = cpu_to_le16(0x0001);
+ desc->bNbrPorts = 1;
+}
+
+/*
+ * Set the MAX3421E general-purpose output with number PIN_NUMBER to
+ * VALUE (0 or 1). PIN_NUMBER may be in the range from 1-8. For
+ * any other value, this function acts as a no-op.
+ */
+static void
+max3421_gpout_set_value(struct usb_hcd *hcd, u8 pin_number, u8 value)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ u8 mask, idx;
+
+ --pin_number;
+ if (pin_number > 7)
+ return;
+
+ mask = 1u << pin_number;
+ idx = pin_number / 4;
+
+ if (value)
+ max3421_hcd->iopins[idx] |= mask;
+ else
+ max3421_hcd->iopins[idx] &= ~mask;
+ max3421_hcd->do_iopin_update = 1;
+ wake_up_process(max3421_hcd->spi_thread);
+}
+
+static int
+max3421_hub_control(struct usb_hcd *hcd, u16 type_req, u16 value, u16 index,
+ char *buf, u16 length)
+{
+ struct spi_device *spi = to_spi_device(hcd->self.controller);
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ struct max3421_hcd_platform_data *pdata;
+ unsigned long flags;
+ int retval = 0;
+
+ spin_lock_irqsave(&max3421_hcd->lock, flags);
+
+ pdata = spi->dev.platform_data;
+
+ switch (type_req) {
+ case ClearHubFeature:
+ break;
+ case ClearPortFeature:
+ switch (value) {
+ case USB_PORT_FEAT_SUSPEND:
+ break;
+ case USB_PORT_FEAT_POWER:
+ dev_dbg(hcd->self.controller, "power-off\n");
+ max3421_gpout_set_value(hcd, pdata->vbus_gpout,
+ !pdata->vbus_active_level);
+ /* FALLS THROUGH */
+ default:
+ max3421_hcd->port_status &= ~(1 << value);
+ }
+ break;
+ case GetHubDescriptor:
+ hub_descriptor((struct usb_hub_descriptor *) buf);
+ break;
+
+ case DeviceRequest | USB_REQ_GET_DESCRIPTOR:
+ case GetPortErrorCount:
+ case SetHubDepth:
+ /* USB3 only */
+ goto error;
+
+ case GetHubStatus:
+ *(__le32 *) buf = cpu_to_le32(0);
+ break;
+
+ case GetPortStatus:
+ if (index != 1) {
+ retval = -EPIPE;
+ goto error;
+ }
+ ((__le16 *) buf)[0] = cpu_to_le16(max3421_hcd->port_status);
+ ((__le16 *) buf)[1] =
+ cpu_to_le16(max3421_hcd->port_status >> 16);
+ break;
+
+ case SetHubFeature:
+ retval = -EPIPE;
+ break;
+
+ case SetPortFeature:
+ switch (value) {
+ case USB_PORT_FEAT_LINK_STATE:
+ case USB_PORT_FEAT_U1_TIMEOUT:
+ case USB_PORT_FEAT_U2_TIMEOUT:
+ case USB_PORT_FEAT_BH_PORT_RESET:
+ goto error;
+ case USB_PORT_FEAT_SUSPEND:
+ if (max3421_hcd->active)
+ max3421_hcd->port_status |=
+ USB_PORT_STAT_SUSPEND;
+ break;
+ case USB_PORT_FEAT_POWER:
+ dev_dbg(hcd->self.controller, "power-on\n");
+ max3421_hcd->port_status |= USB_PORT_STAT_POWER;
+ max3421_gpout_set_value(hcd, pdata->vbus_gpout,
+ pdata->vbus_active_level);
+ break;
+ case USB_PORT_FEAT_RESET:
+ max3421_reset_port(hcd);
+ /* FALLS THROUGH */
+ default:
+ if ((max3421_hcd->port_status & USB_PORT_STAT_POWER)
+ != 0)
+ max3421_hcd->port_status |= (1 << value);
+ }
+ break;
+
+ default:
+ dev_dbg(hcd->self.controller,
+ "hub control req%04x v%04x i%04x l%d\n",
+ type_req, value, index, length);
+error: /* "protocol stall" on error */
+ retval = -EPIPE;
+ }
+
+ spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+ return retval;
+}
+
+static int
+max3421_bus_suspend(struct usb_hcd *hcd)
+{
+ return -1;
+}
+
+static int
+max3421_bus_resume(struct usb_hcd *hcd)
+{
+ return -1;
+}
+
+/*
+ * The SPI driver already takes care of DMA-mapping/unmapping, so no
+ * reason to do it twice.
+ */
+static int
+max3421_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
+{
+ return 0;
+}
+
+static void
+max3421_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
+{
+}
+
+static struct hc_driver max3421_hcd_desc = {
+ .description = "max3421",
+ .product_desc = DRIVER_DESC,
+ .hcd_priv_size = sizeof(struct max3421_hcd),
+ .flags = HCD_USB11,
+ .reset = max3421_reset,
+ .start = max3421_start,
+ .stop = max3421_stop,
+ .get_frame_number = max3421_get_frame_number,
+ .urb_enqueue = max3421_urb_enqueue,
+ .urb_dequeue = max3421_urb_dequeue,
+ .map_urb_for_dma = max3421_map_urb_for_dma,
+ .unmap_urb_for_dma = max3421_unmap_urb_for_dma,
+ .endpoint_disable = max3421_endpoint_disable,
+ .hub_status_data = max3421_hub_status_data,
+ .hub_control = max3421_hub_control,
+ .bus_suspend = max3421_bus_suspend,
+ .bus_resume = max3421_bus_resume,
+};
+
+static int
+max3421_probe(struct spi_device *spi)
+{
+ struct max3421_hcd *max3421_hcd;
+ struct usb_hcd *hcd = NULL;
+ int retval = -ENOMEM;
+
+ if (spi_setup(spi) < 0) {
+ dev_err(&spi->dev, "Unable to setup SPI bus");
+ return -EFAULT;
+ }
+
+ hcd = usb_create_hcd(&max3421_hcd_desc, &spi->dev,
+ dev_name(&spi->dev));
+ if (!hcd) {
+ dev_err(&spi->dev, "failed to create HCD structure\n");
+ goto error;
+ }
+ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+ max3421_hcd = hcd_to_max3421(hcd);
+ max3421_hcd->next = max3421_hcd_list;
+ max3421_hcd_list = max3421_hcd;
+ INIT_LIST_HEAD(&max3421_hcd->ep_list);
+
+ max3421_hcd->tx = kmalloc(sizeof(*max3421_hcd->tx), GFP_KERNEL);
+ if (!max3421_hcd->tx) {
+ dev_err(&spi->dev, "failed to kmalloc tx buffer\n");
+ goto error;
+ }
+ max3421_hcd->rx = kmalloc(sizeof(*max3421_hcd->rx), GFP_KERNEL);
+ if (!max3421_hcd->rx) {
+ dev_err(&spi->dev, "failed to kmalloc rx buffer\n");
+ goto error;
+ }
+
+ max3421_hcd->spi_thread = kthread_run(max3421_spi_thread, hcd,
+ "max3421_spi_thread");
+ if (max3421_hcd->spi_thread == ERR_PTR(-ENOMEM)) {
+ dev_err(&spi->dev,
+ "failed to create SPI thread (out of memory)\n");
+ goto error;
+ }
+
+ retval = usb_add_hcd(hcd, 0, 0);
+ if (retval) {
+ dev_err(&spi->dev, "failed to add HCD\n");
+ goto error;
+ }
+
+ retval = request_irq(spi->irq, max3421_irq_handler,
+ IRQF_TRIGGER_LOW, "max3421", hcd);
+ if (retval < 0) {
+ dev_err(&spi->dev, "failed to request irq %d\n", spi->irq);
+ goto error;
+ }
+ return 0;
+
+error:
+ if (hcd) {
+ kfree(max3421_hcd->tx);
+ kfree(max3421_hcd->rx);
+ if (max3421_hcd->spi_thread)
+ kthread_stop(max3421_hcd->spi_thread);
+ usb_put_hcd(hcd);
+ }
+ return retval;
+}
+
+static int
+max3421_remove(struct spi_device *spi)
+{
+ struct max3421_hcd *max3421_hcd = NULL, **prev;
+ struct usb_hcd *hcd = NULL;
+ unsigned long flags;
+
+ for (prev = &max3421_hcd_list; *prev; prev = &(*prev)->next) {
+ max3421_hcd = *prev;
+ hcd = max3421_to_hcd(max3421_hcd);
+ if (hcd->self.controller == &spi->dev)
+ break;
+ }
+ if (!max3421_hcd) {
+ dev_err(&spi->dev, "no MAX3421 HCD found for SPI device %p\n",
+ spi);
+ return -ENODEV;
+ }
+
+ usb_remove_hcd(hcd);
+
+ spin_lock_irqsave(&max3421_hcd->lock, flags);
+
+ kthread_stop(max3421_hcd->spi_thread);
+ *prev = max3421_hcd->next;
+
+ spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+
+ free_irq(spi->irq, hcd);
+
+ usb_put_hcd(hcd);
+ return 0;
+}
+
+static struct spi_driver max3421_driver = {
+ .probe = max3421_probe,
+ .remove = max3421_remove,
+ .driver = {
+ .name = "max3421-hcd",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_spi_driver(max3421_driver);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("David Mosberger <davidm@egauge.net>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 091ae49..e49eb4f 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -46,9 +46,6 @@ static const char hcd_name[] = "ohci-atmel";
static struct hc_driver __read_mostly ohci_at91_hc_driver;
static int clocked;
-static int (*orig_ohci_hub_control)(struct usb_hcd *hcd, u16 typeReq,
- u16 wValue, u16 wIndex, char *buf, u16 wLength);
-static int (*orig_ohci_hub_status_data)(struct usb_hcd *hcd, char *buf);
extern int usb_disabled(void);
@@ -262,7 +259,7 @@ static int ohci_at91_usb_get_power(struct at91_usbh_data *pdata, int port)
static int ohci_at91_hub_status_data(struct usb_hcd *hcd, char *buf)
{
struct at91_usbh_data *pdata = hcd->self.controller->platform_data;
- int length = orig_ohci_hub_status_data(hcd, buf);
+ int length = ohci_hub_status_data(hcd, buf);
int port;
at91_for_each_port(port) {
@@ -340,8 +337,7 @@ static int ohci_at91_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
break;
}
- ret = orig_ohci_hub_control(hcd, typeReq, wValue, wIndex + 1,
- buf, wLength);
+ ret = ohci_hub_control(hcd, typeReq, wValue, wIndex + 1, buf, wLength);
if (ret)
goto out;
@@ -690,9 +686,6 @@ static int __init ohci_at91_init(void)
* too easy.
*/
- orig_ohci_hub_control = ohci_at91_hc_driver.hub_control;
- orig_ohci_hub_status_data = ohci_at91_hc_driver.hub_status_data;
-
ohci_at91_hc_driver.hub_status_data = ohci_at91_hub_status_data;
ohci_at91_hc_driver.hub_control = ohci_at91_hub_control;
diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c
index 68588d8..060a6a4 100644
--- a/drivers/usb/host/ohci-exynos.c
+++ b/drivers/usb/host/ohci-exynos.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
#include <linux/usb/phy.h>
#include <linux/usb/samsung_usb_phy.h>
#include <linux/usb.h>
@@ -33,28 +34,110 @@ static struct hc_driver __read_mostly exynos_ohci_hc_driver;
#define to_exynos_ohci(hcd) (struct exynos_ohci_hcd *)(hcd_to_ohci(hcd)->priv)
+#define PHY_NUMBER 3
+
struct exynos_ohci_hcd {
struct clk *clk;
struct usb_phy *phy;
struct usb_otg *otg;
+ struct phy *phy_g[PHY_NUMBER];
};
-static void exynos_ohci_phy_enable(struct platform_device *pdev)
+static int exynos_ohci_get_phy(struct device *dev,
+ struct exynos_ohci_hcd *exynos_ohci)
{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct device_node *child;
+ struct phy *phy;
+ int phy_number;
+ int ret = 0;
+
+ exynos_ohci->phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
+ if (IS_ERR(exynos_ohci->phy)) {
+ ret = PTR_ERR(exynos_ohci->phy);
+ if (ret != -ENXIO && ret != -ENODEV) {
+ dev_err(dev, "no usb2 phy configured\n");
+ return ret;
+ }
+ dev_dbg(dev, "Failed to get usb2 phy\n");
+ } else {
+ exynos_ohci->otg = exynos_ohci->phy->otg;
+ }
+
+ /*
+ * Getting generic phy:
+ * We are keeping both types of phys as a part of transiting OHCI
+ * to generic phy framework, so as to maintain backward compatibilty
+ * with old DTB.
+ * If there are existing devices using DTB files built from them,
+ * to remove the support for old bindings in this driver,
+ * we need to make sure that such devices have their DTBs
+ * updated to ones built from new DTS.
+ */
+ for_each_available_child_of_node(dev->of_node, child) {
+ ret = of_property_read_u32(child, "reg", &phy_number);
+ if (ret) {
+ dev_err(dev, "Failed to parse device tree\n");
+ of_node_put(child);
+ return ret;
+ }
+
+ if (phy_number >= PHY_NUMBER) {
+ dev_err(dev, "Invalid number of PHYs\n");
+ of_node_put(child);
+ return -EINVAL;
+ }
+
+ phy = devm_of_phy_get(dev, child, 0);
+ of_node_put(child);
+ if (IS_ERR(phy)) {
+ ret = PTR_ERR(phy);
+ if (ret != -ENOSYS && ret != -ENODEV) {
+ dev_err(dev, "no usb2 phy configured\n");
+ return ret;
+ }
+ dev_dbg(dev, "Failed to get usb2 phy\n");
+ }
+ exynos_ohci->phy_g[phy_number] = phy;
+ }
+
+ return ret;
+}
+
+static int exynos_ohci_phy_enable(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
struct exynos_ohci_hcd *exynos_ohci = to_exynos_ohci(hcd);
+ int i;
+ int ret = 0;
+
+ if (!IS_ERR(exynos_ohci->phy))
+ return usb_phy_init(exynos_ohci->phy);
+
+ for (i = 0; ret == 0 && i < PHY_NUMBER; i++)
+ if (!IS_ERR(exynos_ohci->phy_g[i]))
+ ret = phy_power_on(exynos_ohci->phy_g[i]);
+ if (ret)
+ for (i--; i >= 0; i--)
+ if (!IS_ERR(exynos_ohci->phy_g[i]))
+ phy_power_off(exynos_ohci->phy_g[i]);
- if (exynos_ohci->phy)
- usb_phy_init(exynos_ohci->phy);
+ return ret;
}
-static void exynos_ohci_phy_disable(struct platform_device *pdev)
+static void exynos_ohci_phy_disable(struct device *dev)
{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
struct exynos_ohci_hcd *exynos_ohci = to_exynos_ohci(hcd);
+ int i;
- if (exynos_ohci->phy)
+ if (!IS_ERR(exynos_ohci->phy)) {
usb_phy_shutdown(exynos_ohci->phy);
+ return;
+ }
+
+ for (i = 0; i < PHY_NUMBER; i++)
+ if (!IS_ERR(exynos_ohci->phy_g[i]))
+ phy_power_off(exynos_ohci->phy_g[i]);
}
static int exynos_ohci_probe(struct platform_device *pdev)
@@ -62,7 +145,6 @@ static int exynos_ohci_probe(struct platform_device *pdev)
struct exynos_ohci_hcd *exynos_ohci;
struct usb_hcd *hcd;
struct resource *res;
- struct usb_phy *phy;
int irq;
int err;
@@ -88,15 +170,9 @@ static int exynos_ohci_probe(struct platform_device *pdev)
"samsung,exynos5440-ohci"))
goto skip_phy;
- phy = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2);
- if (IS_ERR(phy)) {
- usb_put_hcd(hcd);
- dev_warn(&pdev->dev, "no platform data or transceiver defined\n");
- return -EPROBE_DEFER;
- } else {
- exynos_ohci->phy = phy;
- exynos_ohci->otg = phy->otg;
- }
+ err = exynos_ohci_get_phy(&pdev->dev, exynos_ohci);
+ if (err)
+ goto fail_clk;
skip_phy:
exynos_ohci->clk = devm_clk_get(&pdev->dev, "usbhost");
@@ -120,10 +196,9 @@ skip_phy:
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
- hcd->regs = devm_ioremap(&pdev->dev, res->start, hcd->rsrc_len);
- if (!hcd->regs) {
- dev_err(&pdev->dev, "Failed to remap I/O memory\n");
- err = -ENOMEM;
+ hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hcd->regs)) {
+ err = PTR_ERR(hcd->regs);
goto fail_io;
}
@@ -139,7 +214,11 @@ skip_phy:
platform_set_drvdata(pdev, hcd);
- exynos_ohci_phy_enable(pdev);
+ err = exynos_ohci_phy_enable(&pdev->dev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable USB phy\n");
+ goto fail_io;
+ }
err = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (err) {
@@ -150,7 +229,7 @@ skip_phy:
return 0;
fail_add_hcd:
- exynos_ohci_phy_disable(pdev);
+ exynos_ohci_phy_disable(&pdev->dev);
fail_io:
clk_disable_unprepare(exynos_ohci->clk);
fail_clk:
@@ -168,7 +247,7 @@ static int exynos_ohci_remove(struct platform_device *pdev)
if (exynos_ohci->otg)
exynos_ohci->otg->set_host(exynos_ohci->otg, &hcd->self);
- exynos_ohci_phy_disable(pdev);
+ exynos_ohci_phy_disable(&pdev->dev);
clk_disable_unprepare(exynos_ohci->clk);
@@ -190,26 +269,19 @@ static int exynos_ohci_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct exynos_ohci_hcd *exynos_ohci = to_exynos_ohci(hcd);
- struct ohci_hcd *ohci = hcd_to_ohci(hcd);
- struct platform_device *pdev = to_platform_device(dev);
bool do_wakeup = device_may_wakeup(dev);
- unsigned long flags;
int rc = ohci_suspend(hcd, do_wakeup);
if (rc)
return rc;
- spin_lock_irqsave(&ohci->lock, flags);
-
if (exynos_ohci->otg)
exynos_ohci->otg->set_host(exynos_ohci->otg, &hcd->self);
- exynos_ohci_phy_disable(pdev);
+ exynos_ohci_phy_disable(dev);
clk_disable_unprepare(exynos_ohci->clk);
- spin_unlock_irqrestore(&ohci->lock, flags);
-
return 0;
}
@@ -217,14 +289,19 @@ static int exynos_ohci_resume(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct exynos_ohci_hcd *exynos_ohci = to_exynos_ohci(hcd);
- struct platform_device *pdev = to_platform_device(dev);
+ int ret;
clk_prepare_enable(exynos_ohci->clk);
if (exynos_ohci->otg)
exynos_ohci->otg->set_host(exynos_ohci->otg, &hcd->self);
- exynos_ohci_phy_enable(pdev);
+ ret = exynos_ohci_phy_enable(dev);
+ if (ret) {
+ dev_err(dev, "Failed to enable USB phy\n");
+ clk_disable_unprepare(exynos_ohci->clk);
+ return ret;
+ }
ohci_resume(hcd, false);
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 3586460..f98d03f 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -1178,7 +1178,7 @@ MODULE_LICENSE ("GPL");
#define SA1111_DRIVER ohci_hcd_sa1111_driver
#endif
-#ifdef CONFIG_ARCH_DAVINCI_DA8XX
+#ifdef CONFIG_USB_OHCI_HCD_DAVINCI
#include "ohci-da8xx.c"
#define DAVINCI_PLATFORM_DRIVER ohci_hcd_da8xx_driver
#endif
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
index cd871b8..b4940de 100644
--- a/drivers/usb/host/ohci-hub.c
+++ b/drivers/usb/host/ohci-hub.c
@@ -456,8 +456,7 @@ static int ohci_root_hub_state_changes(struct ohci_hcd *ohci, int changed,
/* build "status change" packet (one or two bytes) from HC registers */
-static int
-ohci_hub_status_data (struct usb_hcd *hcd, char *buf)
+int ohci_hub_status_data(struct usb_hcd *hcd, char *buf)
{
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
int i, changed = 0, length = 1;
@@ -522,6 +521,7 @@ done:
return changed ? length : 0;
}
+EXPORT_SYMBOL_GPL(ohci_hub_status_data);
/*-------------------------------------------------------------------------*/
@@ -664,7 +664,7 @@ static inline int root_port_reset (struct ohci_hcd *ohci, unsigned port)
return 0;
}
-static int ohci_hub_control (
+int ohci_hub_control(
struct usb_hcd *hcd,
u16 typeReq,
u16 wValue,
@@ -790,4 +790,4 @@ error:
}
return retval;
}
-
+EXPORT_SYMBOL_GPL(ohci_hub_control);
diff --git a/drivers/usb/host/ohci-platform.c b/drivers/usb/host/ohci-platform.c
index b6002c9..4369299 100644
--- a/drivers/usb/host/ohci-platform.c
+++ b/drivers/usb/host/ohci-platform.c
@@ -24,6 +24,7 @@
#include <linux/err.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/reset.h>
#include <linux/usb/ohci_pdriver.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
@@ -36,6 +37,7 @@
struct ohci_platform_priv {
struct clk *clks[OHCI_MAX_CLKS];
+ struct reset_control *rst;
struct phy *phy;
};
@@ -191,6 +193,19 @@ static int ohci_platform_probe(struct platform_device *dev)
break;
}
}
+
+ }
+
+ priv->rst = devm_reset_control_get_optional(&dev->dev, NULL);
+ if (IS_ERR(priv->rst)) {
+ err = PTR_ERR(priv->rst);
+ if (err == -EPROBE_DEFER)
+ goto err_put_clks;
+ priv->rst = NULL;
+ } else {
+ err = reset_control_deassert(priv->rst);
+ if (err)
+ goto err_put_clks;
}
if (pdata->big_endian_desc)
@@ -203,7 +218,7 @@ static int ohci_platform_probe(struct platform_device *dev)
dev_err(&dev->dev,
"Error: CONFIG_USB_OHCI_BIG_ENDIAN_MMIO not set\n");
err = -EINVAL;
- goto err_put_clks;
+ goto err_reset;
}
#endif
#ifndef CONFIG_USB_OHCI_BIG_ENDIAN_DESC
@@ -211,14 +226,14 @@ static int ohci_platform_probe(struct platform_device *dev)
dev_err(&dev->dev,
"Error: CONFIG_USB_OHCI_BIG_ENDIAN_DESC not set\n");
err = -EINVAL;
- goto err_put_clks;
+ goto err_reset;
}
#endif
if (pdata->power_on) {
err = pdata->power_on(dev);
if (err < 0)
- goto err_put_clks;
+ goto err_reset;
}
hcd->rsrc_start = res_mem->start;
@@ -242,6 +257,9 @@ static int ohci_platform_probe(struct platform_device *dev)
err_power:
if (pdata->power_off)
pdata->power_off(dev);
+err_reset:
+ if (priv->rst)
+ reset_control_assert(priv->rst);
err_put_clks:
while (--clk >= 0)
clk_put(priv->clks[clk]);
@@ -266,6 +284,9 @@ static int ohci_platform_remove(struct platform_device *dev)
if (pdata->power_off)
pdata->power_off(dev);
+ if (priv->rst)
+ reset_control_assert(priv->rst);
+
for (clk = 0; clk < OHCI_MAX_CLKS && priv->clks[clk]; clk++)
clk_put(priv->clks[clk]);
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index d21d5fe..e68f3d0 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -30,6 +30,7 @@
#include <linux/platform_data/usb-ohci-pxa27x.h>
#include <linux/platform_data/usb-pxa3xx-ulpi.h>
#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
#include <linux/signal.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
@@ -120,6 +121,8 @@ static struct hc_driver __read_mostly ohci_pxa27x_hc_driver;
struct pxa27x_ohci {
struct clk *clk;
void __iomem *mmio_base;
+ struct regulator *vbus[3];
+ bool vbus_enabled[3];
};
#define to_pxa27x_ohci(hcd) (struct pxa27x_ohci *)(hcd_to_ohci(hcd)->priv)
@@ -166,6 +169,52 @@ static int pxa27x_ohci_select_pmm(struct pxa27x_ohci *pxa_ohci, int mode)
return 0;
}
+static int pxa27x_ohci_set_vbus_power(struct pxa27x_ohci *pxa_ohci,
+ unsigned int port, bool enable)
+{
+ struct regulator *vbus = pxa_ohci->vbus[port];
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(vbus))
+ return 0;
+
+ if (enable && !pxa_ohci->vbus_enabled[port])
+ ret = regulator_enable(vbus);
+ else if (!enable && pxa_ohci->vbus_enabled[port])
+ ret = regulator_disable(vbus);
+
+ if (ret < 0)
+ return ret;
+
+ pxa_ohci->vbus_enabled[port] = enable;
+
+ return 0;
+}
+
+static int pxa27x_ohci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ u16 wIndex, char *buf, u16 wLength)
+{
+ struct pxa27x_ohci *pxa_ohci = to_pxa27x_ohci(hcd);
+ int ret;
+
+ switch (typeReq) {
+ case SetPortFeature:
+ case ClearPortFeature:
+ if (!wIndex || wIndex > 3)
+ return -EPIPE;
+
+ if (wValue != USB_PORT_FEAT_POWER)
+ break;
+
+ ret = pxa27x_ohci_set_vbus_power(pxa_ohci, wIndex - 1,
+ typeReq == SetPortFeature);
+ if (ret)
+ return ret;
+ break;
+ }
+
+ return ohci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
+}
/*-------------------------------------------------------------------------*/
static inline void pxa27x_setup_hc(struct pxa27x_ohci *pxa_ohci,
@@ -372,6 +421,7 @@ int usb_hcd_pxa27x_probe (const struct hc_driver *driver, struct platform_device
struct ohci_hcd *ohci;
struct resource *r;
struct clk *usb_clk;
+ unsigned int i;
retval = ohci_pxa_of_init(pdev);
if (retval)
@@ -417,6 +467,16 @@ int usb_hcd_pxa27x_probe (const struct hc_driver *driver, struct platform_device
pxa_ohci->clk = usb_clk;
pxa_ohci->mmio_base = (void __iomem *)hcd->regs;
+ for (i = 0; i < 3; ++i) {
+ char name[6];
+
+ if (!(inf->flags & (ENABLE_PORT1 << i)))
+ continue;
+
+ sprintf(name, "vbus%u", i + 1);
+ pxa_ohci->vbus[i] = devm_regulator_get(&pdev->dev, name);
+ }
+
retval = pxa27x_start_hc(pxa_ohci, &pdev->dev);
if (retval < 0) {
pr_debug("pxa27x_start_hc failed");
@@ -462,9 +522,14 @@ int usb_hcd_pxa27x_probe (const struct hc_driver *driver, struct platform_device
void usb_hcd_pxa27x_remove (struct usb_hcd *hcd, struct platform_device *pdev)
{
struct pxa27x_ohci *pxa_ohci = to_pxa27x_ohci(hcd);
+ unsigned int i;
usb_remove_hcd(hcd);
pxa27x_stop_hc(pxa_ohci, &pdev->dev);
+
+ for (i = 0; i < 3; ++i)
+ pxa27x_ohci_set_vbus_power(pxa_ohci, i, false);
+
usb_put_hcd(hcd);
}
@@ -563,7 +628,10 @@ static int __init ohci_pxa27x_init(void)
return -ENODEV;
pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+
ohci_init_driver(&ohci_pxa27x_hc_driver, &pxa27x_overrides);
+ ohci_pxa27x_hc_driver.hub_control = pxa27x_ohci_hub_control;
+
return platform_driver_register(&ohci_hcd_pxa27x_driver);
}
module_init(ohci_pxa27x_init);
diff --git a/drivers/usb/host/ohci-s3c2410.c b/drivers/usb/host/ohci-s3c2410.c
index ff7c8f1..3d753a9 100644
--- a/drivers/usb/host/ohci-s3c2410.c
+++ b/drivers/usb/host/ohci-s3c2410.c
@@ -45,10 +45,6 @@ static struct clk *usb_clk;
/* forward definitions */
-static int (*orig_ohci_hub_control)(struct usb_hcd *hcd, u16 typeReq,
- u16 wValue, u16 wIndex, char *buf, u16 wLength);
-static int (*orig_ohci_hub_status_data)(struct usb_hcd *hcd, char *buf);
-
static void s3c2410_hcd_oc(struct s3c2410_hcd_info *info, int port_oc);
/* conversion functions */
@@ -110,7 +106,7 @@ ohci_s3c2410_hub_status_data(struct usb_hcd *hcd, char *buf)
int orig;
int portno;
- orig = orig_ohci_hub_status_data(hcd, buf);
+ orig = ohci_hub_status_data(hcd, buf);
if (info == NULL)
return orig;
@@ -181,7 +177,7 @@ static int ohci_s3c2410_hub_control(
* process the request straight away and exit */
if (info == NULL) {
- ret = orig_ohci_hub_control(hcd, typeReq, wValue,
+ ret = ohci_hub_control(hcd, typeReq, wValue,
wIndex, buf, wLength);
goto out;
}
@@ -231,7 +227,7 @@ static int ohci_s3c2410_hub_control(
break;
}
- ret = orig_ohci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
+ ret = ohci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
if (ret)
goto out;
@@ -489,9 +485,6 @@ static int __init ohci_s3c2410_init(void)
* override these functions by making it too easy.
*/
- orig_ohci_hub_control = ohci_s3c2410_hc_driver.hub_control;
- orig_ohci_hub_status_data = ohci_s3c2410_hc_driver.hub_status_data;
-
ohci_s3c2410_hc_driver.hub_status_data = ohci_s3c2410_hub_status_data;
ohci_s3c2410_hc_driver.hub_control = ohci_s3c2410_hub_control;
diff --git a/drivers/usb/host/ohci-tilegx.c b/drivers/usb/host/ohci-tilegx.c
index 0b183e0..bef6dfb 100644
--- a/drivers/usb/host/ohci-tilegx.c
+++ b/drivers/usb/host/ohci-tilegx.c
@@ -129,8 +129,8 @@ static int ohci_hcd_tilegx_drv_probe(struct platform_device *pdev)
tilegx_start_ohc();
/* Create our IRQs and register them. */
- pdata->irq = create_irq();
- if (pdata->irq < 0) {
+ pdata->irq = irq_alloc_hwirq(-1);
+ if (!pdata->irq) {
ret = -ENXIO;
goto err_no_irq;
}
@@ -164,7 +164,7 @@ static int ohci_hcd_tilegx_drv_probe(struct platform_device *pdev)
}
err_have_irq:
- destroy_irq(pdata->irq);
+ irq_free_hwirq(pdata->irq);
err_no_irq:
tilegx_stop_ohc();
usb_put_hcd(hcd);
@@ -182,7 +182,7 @@ static int ohci_hcd_tilegx_drv_remove(struct platform_device *pdev)
usb_put_hcd(hcd);
tilegx_stop_ohc();
gxio_usb_host_destroy(&pdata->usb_ctx);
- destroy_irq(pdata->irq);
+ irq_free_hwirq(pdata->irq);
return 0;
}
diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h
index 4550ce0..05e02a7 100644
--- a/drivers/usb/host/ohci.h
+++ b/drivers/usb/host/ohci.h
@@ -729,3 +729,6 @@ extern int ohci_setup(struct usb_hcd *hcd);
extern int ohci_suspend(struct usb_hcd *hcd, bool do_wakeup);
extern int ohci_resume(struct usb_hcd *hcd, bool hibernated);
#endif
+extern int ohci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ u16 wIndex, char *buf, u16 wLength);
+extern int ohci_hub_status_data(struct usb_hcd *hcd, char *buf);
diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h
index 638e88f..c622ddf 100644
--- a/drivers/usb/host/pci-quirks.h
+++ b/drivers/usb/host/pci-quirks.h
@@ -5,6 +5,7 @@
void uhci_reset_hc(struct pci_dev *pdev, unsigned long base);
int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base);
int usb_amd_find_chipset_info(void);
+int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev);
bool usb_amd_hang_symptom_quirk(void);
bool usb_amd_prefetch_quirk(void);
void usb_amd_dev_put(void);
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 1ad6bc1..6231ce6 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -20,7 +20,8 @@
* Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/gfp.h>
+
+#include <linux/slab.h>
#include <asm/unaligned.h>
#include "xhci.h"
@@ -270,7 +271,6 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
struct xhci_virt_device *virt_dev;
struct xhci_command *cmd;
unsigned long flags;
- int timeleft;
int ret;
int i;
@@ -284,34 +284,31 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
spin_lock_irqsave(&xhci->lock, flags);
for (i = LAST_EP_INDEX; i > 0; i--) {
- if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue)
- xhci_queue_stop_endpoint(xhci, slot_id, i, suspend);
+ if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue) {
+ struct xhci_command *command;
+ command = xhci_alloc_command(xhci, false, false,
+ GFP_NOIO);
+ if (!command) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_free_command(xhci, cmd);
+ return -ENOMEM;
+
+ }
+ xhci_queue_stop_endpoint(xhci, command, slot_id, i,
+ suspend);
+ }
}
- cmd->command_trb = xhci_find_next_enqueue(xhci->cmd_ring);
- list_add_tail(&cmd->cmd_list, &virt_dev->cmd_list);
- xhci_queue_stop_endpoint(xhci, slot_id, 0, suspend);
+ xhci_queue_stop_endpoint(xhci, cmd, slot_id, 0, suspend);
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
/* Wait for last stop endpoint command to finish */
- timeleft = wait_for_completion_interruptible_timeout(
- cmd->completion,
- XHCI_CMD_DEFAULT_TIMEOUT);
- if (timeleft <= 0) {
- xhci_warn(xhci, "%s while waiting for stop endpoint command\n",
- timeleft == 0 ? "Timeout" : "Signal");
- spin_lock_irqsave(&xhci->lock, flags);
- /* The timeout might have raced with the event ring handler, so
- * only delete from the list if the item isn't poisoned.
- */
- if (cmd->cmd_list.next != LIST_POISON1)
- list_del(&cmd->cmd_list);
- spin_unlock_irqrestore(&xhci->lock, flags);
+ wait_for_completion(cmd->completion);
+
+ if (cmd->status == COMP_CMD_ABORT || cmd->status == COMP_CMD_STOP) {
+ xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n");
ret = -ETIME;
- goto command_cleanup;
}
-
-command_cleanup:
xhci_free_command(xhci, cmd);
return ret;
}
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index b1a8a5f..8056d90 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1020,7 +1020,6 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
dev->num_rings_cached = 0;
init_completion(&dev->cmd_completion);
- INIT_LIST_HEAD(&dev->cmd_list);
dev->udev = udev;
/* Point to output device context in dcbaa. */
@@ -1794,10 +1793,11 @@ void xhci_free_command(struct xhci_hcd *xhci,
void xhci_mem_cleanup(struct xhci_hcd *xhci)
{
struct device *dev = xhci_to_hcd(xhci)->self.controller;
- struct xhci_cd *cur_cd, *next_cd;
int size;
int i, j, num_ports;
+ del_timer_sync(&xhci->cmd_timer);
+
/* Free the Event Ring Segment Table and the actual Event Ring */
size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
if (xhci->erst.entries)
@@ -1816,11 +1816,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
xhci_ring_free(xhci, xhci->cmd_ring);
xhci->cmd_ring = NULL;
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed command ring");
- list_for_each_entry_safe(cur_cd, next_cd,
- &xhci->cancel_cmd_list, cancel_cmd_list) {
- list_del(&cur_cd->cancel_cmd_list);
- kfree(cur_cd);
- }
+ xhci_cleanup_command_queue(xhci);
num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
for (i = 0; i < num_ports; i++) {
@@ -2323,7 +2319,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
u32 page_size, temp;
int i;
- INIT_LIST_HEAD(&xhci->cancel_cmd_list);
+ INIT_LIST_HEAD(&xhci->cmd_list);
page_size = readl(&xhci->op_regs->page_size);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
@@ -2509,6 +2505,11 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
"Wrote ERST address to ir_set 0.");
xhci_print_ir_set(xhci, 0);
+ /* init command timeout timer */
+ init_timer(&xhci->cmd_timer);
+ xhci->cmd_timer.data = (unsigned long) xhci;
+ xhci->cmd_timer.function = xhci_handle_command_timeout;
+
/*
* XXX: Might need to set the Interrupter Moderation Register to
* something other than the default (~1ms minimum between interrupts).
diff --git a/drivers/usb/host/xhci-mvebu.c b/drivers/usb/host/xhci-mvebu.c
new file mode 100644
index 0000000..1eefc98
--- /dev/null
+++ b/drivers/usb/host/xhci-mvebu.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2014 Marvell
+ * Author: Gregory CLEMENT <gregory.clement@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/mbus.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "xhci-mvebu.h"
+
+#define USB3_MAX_WINDOWS 4
+#define USB3_WIN_CTRL(w) (0x0 + ((w) * 8))
+#define USB3_WIN_BASE(w) (0x4 + ((w) * 8))
+
+static void xhci_mvebu_mbus_config(void __iomem *base,
+ const struct mbus_dram_target_info *dram)
+{
+ int win;
+
+ /* Clear all existing windows */
+ for (win = 0; win < USB3_MAX_WINDOWS; win++) {
+ writel(0, base + USB3_WIN_CTRL(win));
+ writel(0, base + USB3_WIN_BASE(win));
+ }
+
+ /* Program each DRAM CS in a seperate window */
+ for (win = 0; win < dram->num_cs; win++) {
+ const struct mbus_dram_window *cs = dram->cs + win;
+
+ writel(((cs->size - 1) & 0xffff0000) | (cs->mbus_attr << 8) |
+ (dram->mbus_dram_target_id << 4) | 1,
+ base + USB3_WIN_CTRL(win));
+
+ writel((cs->base & 0xffff0000), base + USB3_WIN_BASE(win));
+ }
+}
+
+int xhci_mvebu_mbus_init_quirk(struct platform_device *pdev)
+{
+ struct resource *res;
+ void __iomem *base;
+ const struct mbus_dram_target_info *dram;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res)
+ return -ENODEV;
+
+ /*
+ * We don't use devm_ioremap() because this mapping should
+ * only exists for the duration of this probe function.
+ */
+ base = ioremap(res->start, resource_size(res));
+ if (!base)
+ return -ENODEV;
+
+ dram = mv_mbus_dram_info();
+ xhci_mvebu_mbus_config(base, dram);
+
+ /*
+ * This memory area was only needed to configure the MBus
+ * windows, and is therefore no longer useful.
+ */
+ iounmap(base);
+
+ return 0;
+}
diff --git a/drivers/usb/host/xhci-mvebu.h b/drivers/usb/host/xhci-mvebu.h
new file mode 100644
index 0000000..7ede92a
--- /dev/null
+++ b/drivers/usb/host/xhci-mvebu.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2014 Marvell
+ *
+ * Gregory Clement <gregory.clement@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __LINUX_XHCI_MVEBU_H
+#define __LINUX_XHCI_MVEBU_H
+#if IS_ENABLED(CONFIG_USB_XHCI_MVEBU)
+int xhci_mvebu_mbus_init_quirk(struct platform_device *pdev);
+#else
+static inline int xhci_mvebu_mbus_init_quirk(struct platform_device *pdev)
+{
+ return 0;
+}
+#endif
+#endif /* __LINUX_XHCI_MVEBU_H */
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 35d4477..e20520f 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -134,14 +134,14 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
*/
if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)
xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
-
+ }
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
xhci->quirks |= XHCI_SPURIOUS_REBOOT;
}
if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
xhci->quirks |= XHCI_RESET_ON_RESUME;
- xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
- "QUIRK: Resetting on resume");
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
}
if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
@@ -149,6 +149,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
xhci->quirks |= XHCI_RESET_ON_RESUME;
if (pdev->vendor == PCI_VENDOR_ID_VIA)
xhci->quirks |= XHCI_RESET_ON_RESUME;
+
+ if (xhci->quirks & XHCI_RESET_ON_RESUME)
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "QUIRK: Resetting on resume");
}
/* called during probe() after chip reset completes */
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 151901c..29d8adb 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -11,13 +11,15 @@
* version 2 as published by the Free Software Foundation.
*/
-#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
#include <linux/module.h>
-#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
#include "xhci.h"
+#include "xhci-mvebu.h"
static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci)
{
@@ -35,6 +37,11 @@ static int xhci_plat_setup(struct usb_hcd *hcd)
return xhci_gen_setup(hcd, xhci_plat_quirks);
}
+static int xhci_plat_start(struct usb_hcd *hcd)
+{
+ return xhci_run(hcd);
+}
+
static const struct hc_driver xhci_plat_xhci_driver = {
.description = "xhci-hcd",
.product_desc = "xHCI Host Controller",
@@ -50,7 +57,7 @@ static const struct hc_driver xhci_plat_xhci_driver = {
* basic lifecycle operations
*/
.reset = xhci_plat_setup,
- .start = xhci_run,
+ .start = xhci_plat_start,
.stop = xhci_stop,
.shutdown = xhci_shutdown,
@@ -91,6 +98,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
struct xhci_hcd *xhci;
struct resource *res;
struct usb_hcd *hcd;
+ struct clk *clk;
int ret;
int irq;
@@ -107,6 +115,15 @@ static int xhci_plat_probe(struct platform_device *pdev)
if (!res)
return -ENODEV;
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "marvell,armada-375-xhci") ||
+ of_device_is_compatible(pdev->dev.of_node,
+ "marvell,armada-380-xhci")) {
+ ret = xhci_mvebu_mbus_init_quirk(pdev);
+ if (ret)
+ return ret;
+ }
+
/* Initialize dma_mask and coherent_dma_mask to 32-bits */
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret)
@@ -137,14 +154,27 @@ static int xhci_plat_probe(struct platform_device *pdev)
goto release_mem_region;
}
+ /*
+ * Not all platforms have a clk so it is not an error if the
+ * clock does not exists.
+ */
+ clk = devm_clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(clk)) {
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ goto unmap_registers;
+ }
+
ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret)
- goto unmap_registers;
+ goto disable_clk;
+
device_wakeup_enable(hcd->self.controller);
/* USB 2.0 roothub is stored in the platform_device now. */
hcd = platform_get_drvdata(pdev);
xhci = hcd_to_xhci(hcd);
+ xhci->clk = clk;
xhci->shared_hcd = usb_create_shared_hcd(driver, &pdev->dev,
dev_name(&pdev->dev), hcd);
if (!xhci->shared_hcd) {
@@ -173,6 +203,10 @@ put_usb3_hcd:
dealloc_usb2_hcd:
usb_remove_hcd(hcd);
+disable_clk:
+ if (!IS_ERR(clk))
+ clk_disable_unprepare(clk);
+
unmap_registers:
iounmap(hcd->regs);
@@ -189,11 +223,14 @@ static int xhci_plat_remove(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct clk *clk = xhci->clk;
usb_remove_hcd(xhci->shared_hcd);
usb_put_hcd(xhci->shared_hcd);
usb_remove_hcd(hcd);
+ if (!IS_ERR(clk))
+ clk_disable_unprepare(clk);
iounmap(hcd->regs);
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
@@ -202,7 +239,7 @@ static int xhci_plat_remove(struct platform_device *dev)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int xhci_plat_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
@@ -231,6 +268,8 @@ static const struct dev_pm_ops xhci_plat_pm_ops = {
static const struct of_device_id usb_xhci_of_match[] = {
{ .compatible = "generic-xhci" },
{ .compatible = "xhci-platform" },
+ { .compatible = "marvell,armada-375-xhci"},
+ { .compatible = "marvell,armada-380-xhci"},
{ },
};
MODULE_DEVICE_TABLE(of, usb_xhci_of_match);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 7a0e3c7..d67ff71 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -69,10 +69,6 @@
#include "xhci.h"
#include "xhci-trace.h"
-static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
- struct xhci_virt_device *virt_dev,
- struct xhci_event_cmd *event);
-
/*
* Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
* address of the TRB.
@@ -123,16 +119,6 @@ static int enqueue_is_link_trb(struct xhci_ring *ring)
return TRB_TYPE_LINK_LE32(link->control);
}
-union xhci_trb *xhci_find_next_enqueue(struct xhci_ring *ring)
-{
- /* Enqueue pointer can be left pointing to the link TRB,
- * we must handle that
- */
- if (TRB_TYPE_LINK_LE32(ring->enqueue->link.control))
- return ring->enq_seg->next->trbs;
- return ring->enqueue;
-}
-
/* Updates trb to point to the next TRB in the ring, and updates seg if the next
* TRB is in a new segment. This does not skip over link TRBs, and it does not
* effect the ring dequeue or enqueue pointers.
@@ -301,17 +287,7 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
xhci_dbg(xhci, "Abort command ring\n");
- if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING)) {
- xhci_dbg(xhci, "The command ring isn't running, "
- "Have the command ring been stopped?\n");
- return 0;
- }
-
temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
- if (!(temp_64 & CMD_RING_RUNNING)) {
- xhci_dbg(xhci, "Command ring had been stopped\n");
- return 0;
- }
xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
&xhci->op_regs->cmd_ring);
@@ -337,71 +313,6 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
return 0;
}
-static int xhci_queue_cd(struct xhci_hcd *xhci,
- struct xhci_command *command,
- union xhci_trb *cmd_trb)
-{
- struct xhci_cd *cd;
- cd = kzalloc(sizeof(struct xhci_cd), GFP_ATOMIC);
- if (!cd)
- return -ENOMEM;
- INIT_LIST_HEAD(&cd->cancel_cmd_list);
-
- cd->command = command;
- cd->cmd_trb = cmd_trb;
- list_add_tail(&cd->cancel_cmd_list, &xhci->cancel_cmd_list);
-
- return 0;
-}
-
-/*
- * Cancel the command which has issue.
- *
- * Some commands may hang due to waiting for acknowledgement from
- * usb device. It is outside of the xHC's ability to control and
- * will cause the command ring is blocked. When it occurs software
- * should intervene to recover the command ring.
- * See Section 4.6.1.1 and 4.6.1.2
- */
-int xhci_cancel_cmd(struct xhci_hcd *xhci, struct xhci_command *command,
- union xhci_trb *cmd_trb)
-{
- int retval = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&xhci->lock, flags);
-
- if (xhci->xhc_state & XHCI_STATE_DYING) {
- xhci_warn(xhci, "Abort the command ring,"
- " but the xHCI is dead.\n");
- retval = -ESHUTDOWN;
- goto fail;
- }
-
- /* queue the cmd desriptor to cancel_cmd_list */
- retval = xhci_queue_cd(xhci, command, cmd_trb);
- if (retval) {
- xhci_warn(xhci, "Queuing command descriptor failed.\n");
- goto fail;
- }
-
- /* abort command ring */
- retval = xhci_abort_cmd_ring(xhci);
- if (retval) {
- xhci_err(xhci, "Abort command ring failed\n");
- if (unlikely(retval == -ESHUTDOWN)) {
- spin_unlock_irqrestore(&xhci->lock, flags);
- usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
- xhci_dbg(xhci, "xHCI host controller is dead.\n");
- return retval;
- }
- }
-
-fail:
- spin_unlock_irqrestore(&xhci->lock, flags);
- return retval;
-}
-
void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
unsigned int slot_id,
unsigned int ep_index,
@@ -684,12 +595,14 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
}
}
-static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
+static int queue_set_tr_deq(struct xhci_hcd *xhci,
+ struct xhci_command *cmd, int slot_id,
unsigned int ep_index, unsigned int stream_id,
struct xhci_segment *deq_seg,
union xhci_trb *deq_ptr, u32 cycle_state);
void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
+ struct xhci_command *cmd,
unsigned int slot_id, unsigned int ep_index,
unsigned int stream_id,
struct xhci_dequeue_state *deq_state)
@@ -704,7 +617,7 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
deq_state->new_deq_ptr,
(unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr),
deq_state->new_cycle_state);
- queue_set_tr_deq(xhci, slot_id, ep_index, stream_id,
+ queue_set_tr_deq(xhci, cmd, slot_id, ep_index, stream_id,
deq_state->new_deq_seg,
deq_state->new_deq_ptr,
(u32) deq_state->new_cycle_state);
@@ -773,7 +686,6 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
union xhci_trb *trb, struct xhci_event_cmd *event)
{
unsigned int ep_index;
- struct xhci_virt_device *virt_dev;
struct xhci_ring *ep_ring;
struct xhci_virt_ep *ep;
struct list_head *entry;
@@ -783,11 +695,7 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
struct xhci_dequeue_state deq_state;
if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
- virt_dev = xhci->devs[slot_id];
- if (virt_dev)
- handle_cmd_in_cmd_wait_list(xhci, virt_dev,
- event);
- else
+ if (!xhci->devs[slot_id])
xhci_warn(xhci, "Stop endpoint command "
"completion for disabled slot %u\n",
slot_id);
@@ -858,7 +766,9 @@ remove_finished_td:
/* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
- xhci_queue_new_dequeue_state(xhci,
+ struct xhci_command *command;
+ command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
+ xhci_queue_new_dequeue_state(xhci, command,
slot_id, ep_index,
ep->stopped_td->urb->stream_id,
&deq_state);
@@ -1206,9 +1116,11 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
* because the HW can't handle two commands being queued in a row.
*/
if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
+ struct xhci_command *command;
+ command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"Queueing configure endpoint command");
- xhci_queue_configure_endpoint(xhci,
+ xhci_queue_configure_endpoint(xhci, command,
xhci->devs[slot_id]->in_ctx->dma, slot_id,
false);
xhci_ring_cmd_db(xhci);
@@ -1219,187 +1131,6 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
}
}
-/* Complete the command and detele it from the devcie's command queue.
- */
-static void xhci_complete_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
- struct xhci_command *command, u32 status)
-{
- command->status = status;
- list_del(&command->cmd_list);
- if (command->completion)
- complete(command->completion);
- else
- xhci_free_command(xhci, command);
-}
-
-
-/* Check to see if a command in the device's command queue matches this one.
- * Signal the completion or free the command, and return 1. Return 0 if the
- * completed command isn't at the head of the command list.
- */
-static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
- struct xhci_virt_device *virt_dev,
- struct xhci_event_cmd *event)
-{
- struct xhci_command *command;
-
- if (list_empty(&virt_dev->cmd_list))
- return 0;
-
- command = list_entry(virt_dev->cmd_list.next,
- struct xhci_command, cmd_list);
- if (xhci->cmd_ring->dequeue != command->command_trb)
- return 0;
-
- xhci_complete_cmd_in_cmd_wait_list(xhci, command,
- GET_COMP_CODE(le32_to_cpu(event->status)));
- return 1;
-}
-
-/*
- * Finding the command trb need to be cancelled and modifying it to
- * NO OP command. And if the command is in device's command wait
- * list, finishing and freeing it.
- *
- * If we can't find the command trb, we think it had already been
- * executed.
- */
-static void xhci_cmd_to_noop(struct xhci_hcd *xhci, struct xhci_cd *cur_cd)
-{
- struct xhci_segment *cur_seg;
- union xhci_trb *cmd_trb;
- u32 cycle_state;
-
- if (xhci->cmd_ring->dequeue == xhci->cmd_ring->enqueue)
- return;
-
- /* find the current segment of command ring */
- cur_seg = find_trb_seg(xhci->cmd_ring->first_seg,
- xhci->cmd_ring->dequeue, &cycle_state);
-
- if (!cur_seg) {
- xhci_warn(xhci, "Command ring mismatch, dequeue = %p %llx (dma)\n",
- xhci->cmd_ring->dequeue,
- (unsigned long long)
- xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
- xhci->cmd_ring->dequeue));
- xhci_debug_ring(xhci, xhci->cmd_ring);
- xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
- return;
- }
-
- /* find the command trb matched by cd from command ring */
- for (cmd_trb = xhci->cmd_ring->dequeue;
- cmd_trb != xhci->cmd_ring->enqueue;
- next_trb(xhci, xhci->cmd_ring, &cur_seg, &cmd_trb)) {
- /* If the trb is link trb, continue */
- if (TRB_TYPE_LINK_LE32(cmd_trb->generic.field[3]))
- continue;
-
- if (cur_cd->cmd_trb == cmd_trb) {
-
- /* If the command in device's command list, we should
- * finish it and free the command structure.
- */
- if (cur_cd->command)
- xhci_complete_cmd_in_cmd_wait_list(xhci,
- cur_cd->command, COMP_CMD_STOP);
-
- /* get cycle state from the origin command trb */
- cycle_state = le32_to_cpu(cmd_trb->generic.field[3])
- & TRB_CYCLE;
-
- /* modify the command trb to NO OP command */
- cmd_trb->generic.field[0] = 0;
- cmd_trb->generic.field[1] = 0;
- cmd_trb->generic.field[2] = 0;
- cmd_trb->generic.field[3] = cpu_to_le32(
- TRB_TYPE(TRB_CMD_NOOP) | cycle_state);
- break;
- }
- }
-}
-
-static void xhci_cancel_cmd_in_cd_list(struct xhci_hcd *xhci)
-{
- struct xhci_cd *cur_cd, *next_cd;
-
- if (list_empty(&xhci->cancel_cmd_list))
- return;
-
- list_for_each_entry_safe(cur_cd, next_cd,
- &xhci->cancel_cmd_list, cancel_cmd_list) {
- xhci_cmd_to_noop(xhci, cur_cd);
- list_del(&cur_cd->cancel_cmd_list);
- kfree(cur_cd);
- }
-}
-
-/*
- * traversing the cancel_cmd_list. If the command descriptor according
- * to cmd_trb is found, the function free it and return 1, otherwise
- * return 0.
- */
-static int xhci_search_cmd_trb_in_cd_list(struct xhci_hcd *xhci,
- union xhci_trb *cmd_trb)
-{
- struct xhci_cd *cur_cd, *next_cd;
-
- if (list_empty(&xhci->cancel_cmd_list))
- return 0;
-
- list_for_each_entry_safe(cur_cd, next_cd,
- &xhci->cancel_cmd_list, cancel_cmd_list) {
- if (cur_cd->cmd_trb == cmd_trb) {
- if (cur_cd->command)
- xhci_complete_cmd_in_cmd_wait_list(xhci,
- cur_cd->command, COMP_CMD_STOP);
- list_del(&cur_cd->cancel_cmd_list);
- kfree(cur_cd);
- return 1;
- }
- }
-
- return 0;
-}
-
-/*
- * If the cmd_trb_comp_code is COMP_CMD_ABORT, we just check whether the
- * trb pointed by the command ring dequeue pointer is the trb we want to
- * cancel or not. And if the cmd_trb_comp_code is COMP_CMD_STOP, we will
- * traverse the cancel_cmd_list to trun the all of the commands according
- * to command descriptor to NO-OP trb.
- */
-static int handle_stopped_cmd_ring(struct xhci_hcd *xhci,
- int cmd_trb_comp_code)
-{
- int cur_trb_is_good = 0;
-
- /* Searching the cmd trb pointed by the command ring dequeue
- * pointer in command descriptor list. If it is found, free it.
- */
- cur_trb_is_good = xhci_search_cmd_trb_in_cd_list(xhci,
- xhci->cmd_ring->dequeue);
-
- if (cmd_trb_comp_code == COMP_CMD_ABORT)
- xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
- else if (cmd_trb_comp_code == COMP_CMD_STOP) {
- /* traversing the cancel_cmd_list and canceling
- * the command according to command descriptor
- */
- xhci_cancel_cmd_in_cd_list(xhci);
-
- xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
- /*
- * ring command ring doorbell again to restart the
- * command ring
- */
- if (xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue)
- xhci_ring_cmd_db(xhci);
- }
- return cur_trb_is_good;
-}
-
static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id,
u32 cmd_comp_code)
{
@@ -1407,7 +1138,6 @@ static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id,
xhci->slot_id = slot_id;
else
xhci->slot_id = 0;
- complete(&xhci->addr_dev);
}
static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
@@ -1432,9 +1162,6 @@ static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
unsigned int ep_state;
u32 add_flags, drop_flags;
- virt_dev = xhci->devs[slot_id];
- if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
- return;
/*
* Configure endpoint commands can come from the USB core
* configuration or alt setting changes, or because the HW
@@ -1443,6 +1170,7 @@ static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
* If the command was for a halted endpoint, the xHCI driver
* is not waiting on the configure endpoint command.
*/
+ virt_dev = xhci->devs[slot_id];
ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
if (!ctrl_ctx) {
xhci_warn(xhci, "Could not get input context, bad type.\n");
@@ -1465,7 +1193,7 @@ static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
add_flags - SLOT_FLAG == drop_flags) {
ep_state = virt_dev->eps[ep_index].ep_state;
if (!(ep_state & EP_HALTED))
- goto bandwidth_change;
+ return;
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"Completed config ep cmd - "
"last ep index = %d, state = %d",
@@ -1475,43 +1203,14 @@ static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
return;
}
-bandwidth_change:
- xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
- "Completed config ep cmd");
- virt_dev->cmd_status = cmd_comp_code;
- complete(&virt_dev->cmd_completion);
return;
}
-static void xhci_handle_cmd_eval_ctx(struct xhci_hcd *xhci, int slot_id,
- struct xhci_event_cmd *event, u32 cmd_comp_code)
-{
- struct xhci_virt_device *virt_dev;
-
- virt_dev = xhci->devs[slot_id];
- if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
- return;
- virt_dev->cmd_status = cmd_comp_code;
- complete(&virt_dev->cmd_completion);
-}
-
-static void xhci_handle_cmd_addr_dev(struct xhci_hcd *xhci, int slot_id,
- u32 cmd_comp_code)
-{
- xhci->devs[slot_id]->cmd_status = cmd_comp_code;
- complete(&xhci->addr_dev);
-}
-
static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id,
struct xhci_event_cmd *event)
{
- struct xhci_virt_device *virt_dev;
-
xhci_dbg(xhci, "Completed reset device command.\n");
- virt_dev = xhci->devs[slot_id];
- if (virt_dev)
- handle_cmd_in_cmd_wait_list(xhci, virt_dev, event);
- else
+ if (!xhci->devs[slot_id])
xhci_warn(xhci, "Reset device command completion "
"for disabled slot %u\n", slot_id);
}
@@ -1529,6 +1228,116 @@ static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci,
NEC_FW_MINOR(le32_to_cpu(event->status)));
}
+static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status)
+{
+ list_del(&cmd->cmd_list);
+
+ if (cmd->completion) {
+ cmd->status = status;
+ complete(cmd->completion);
+ } else {
+ kfree(cmd);
+ }
+}
+
+void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
+{
+ struct xhci_command *cur_cmd, *tmp_cmd;
+ list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list)
+ xhci_complete_del_and_free_cmd(cur_cmd, COMP_CMD_ABORT);
+}
+
+/*
+ * Turn all commands on command ring with status set to "aborted" to no-op trbs.
+ * If there are other commands waiting then restart the ring and kick the timer.
+ * This must be called with command ring stopped and xhci->lock held.
+ */
+static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
+ struct xhci_command *cur_cmd)
+{
+ struct xhci_command *i_cmd, *tmp_cmd;
+ u32 cycle_state;
+
+ /* Turn all aborted commands in list to no-ops, then restart */
+ list_for_each_entry_safe(i_cmd, tmp_cmd, &xhci->cmd_list,
+ cmd_list) {
+
+ if (i_cmd->status != COMP_CMD_ABORT)
+ continue;
+
+ i_cmd->status = COMP_CMD_STOP;
+
+ xhci_dbg(xhci, "Turn aborted command %p to no-op\n",
+ i_cmd->command_trb);
+ /* get cycle state from the original cmd trb */
+ cycle_state = le32_to_cpu(
+ i_cmd->command_trb->generic.field[3]) & TRB_CYCLE;
+ /* modify the command trb to no-op command */
+ i_cmd->command_trb->generic.field[0] = 0;
+ i_cmd->command_trb->generic.field[1] = 0;
+ i_cmd->command_trb->generic.field[2] = 0;
+ i_cmd->command_trb->generic.field[3] = cpu_to_le32(
+ TRB_TYPE(TRB_CMD_NOOP) | cycle_state);
+
+ /*
+ * caller waiting for completion is called when command
+ * completion event is received for these no-op commands
+ */
+ }
+
+ xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
+
+ /* ring command ring doorbell to restart the command ring */
+ if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
+ !(xhci->xhc_state & XHCI_STATE_DYING)) {
+ xhci->current_cmd = cur_cmd;
+ mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
+ xhci_ring_cmd_db(xhci);
+ }
+ return;
+}
+
+
+void xhci_handle_command_timeout(unsigned long data)
+{
+ struct xhci_hcd *xhci;
+ int ret;
+ unsigned long flags;
+ u64 hw_ring_state;
+ struct xhci_command *cur_cmd = NULL;
+ xhci = (struct xhci_hcd *) data;
+
+ /* mark this command to be cancelled */
+ spin_lock_irqsave(&xhci->lock, flags);
+ if (xhci->current_cmd) {
+ cur_cmd = xhci->current_cmd;
+ cur_cmd->status = COMP_CMD_ABORT;
+ }
+
+
+ /* Make sure command ring is running before aborting it */
+ hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
+ if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
+ (hw_ring_state & CMD_RING_RUNNING)) {
+
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_dbg(xhci, "Command timeout\n");
+ ret = xhci_abort_cmd_ring(xhci);
+ if (unlikely(ret == -ESHUTDOWN)) {
+ xhci_err(xhci, "Abort command ring failed\n");
+ xhci_cleanup_command_queue(xhci);
+ usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
+ xhci_dbg(xhci, "xHCI host controller is dead.\n");
+ }
+ return;
+ }
+ /* command timeout on stopped ring, ring can't be aborted */
+ xhci_dbg(xhci, "Command timeout on stopped ring\n");
+ xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return;
+}
+
static void handle_cmd_completion(struct xhci_hcd *xhci,
struct xhci_event_cmd *event)
{
@@ -1537,6 +1346,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
dma_addr_t cmd_dequeue_dma;
u32 cmd_comp_code;
union xhci_trb *cmd_trb;
+ struct xhci_command *cmd;
u32 cmd_type;
cmd_dma = le64_to_cpu(event->cmd_trb);
@@ -1554,26 +1364,35 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
return;
}
+ cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list);
+
+ if (cmd->command_trb != xhci->cmd_ring->dequeue) {
+ xhci_err(xhci,
+ "Command completion event does not match command\n");
+ return;
+ }
+
+ del_timer(&xhci->cmd_timer);
+
trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event);
cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status));
- if (cmd_comp_code == COMP_CMD_ABORT || cmd_comp_code == COMP_CMD_STOP) {
- /* If the return value is 0, we think the trb pointed by
- * command ring dequeue pointer is a good trb. The good
- * trb means we don't want to cancel the trb, but it have
- * been stopped by host. So we should handle it normally.
- * Otherwise, driver should invoke inc_deq() and return.
- */
- if (handle_stopped_cmd_ring(xhci, cmd_comp_code)) {
- inc_deq(xhci, xhci->cmd_ring);
- return;
- }
- /* There is no command to handle if we get a stop event when the
- * command ring is empty, event->cmd_trb points to the next
- * unset command
- */
- if (xhci->cmd_ring->dequeue == xhci->cmd_ring->enqueue)
- return;
+
+ /* If CMD ring stopped we own the trbs between enqueue and dequeue */
+ if (cmd_comp_code == COMP_CMD_STOP) {
+ xhci_handle_stopped_cmd_ring(xhci, cmd);
+ return;
+ }
+ /*
+ * Host aborted the command ring, check if the current command was
+ * supposed to be aborted, otherwise continue normally.
+ * The command ring is stopped now, but the xHC will issue a Command
+ * Ring Stopped event which will cause us to restart it.
+ */
+ if (cmd_comp_code == COMP_CMD_ABORT) {
+ xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
+ if (cmd->status == COMP_CMD_ABORT)
+ goto event_handled;
}
cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3]));
@@ -1585,13 +1404,13 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
xhci_handle_cmd_disable_slot(xhci, slot_id);
break;
case TRB_CONFIG_EP:
- xhci_handle_cmd_config_ep(xhci, slot_id, event, cmd_comp_code);
+ if (!cmd->completion)
+ xhci_handle_cmd_config_ep(xhci, slot_id, event,
+ cmd_comp_code);
break;
case TRB_EVAL_CONTEXT:
- xhci_handle_cmd_eval_ctx(xhci, slot_id, event, cmd_comp_code);
break;
case TRB_ADDR_DEV:
- xhci_handle_cmd_addr_dev(xhci, slot_id, cmd_comp_code);
break;
case TRB_STOP_RING:
WARN_ON(slot_id != TRB_TO_SLOT_ID(
@@ -1604,6 +1423,9 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code);
break;
case TRB_CMD_NOOP:
+ /* Is this an aborted command turned to NO-OP? */
+ if (cmd->status == COMP_CMD_STOP)
+ cmd_comp_code = COMP_CMD_STOP;
break;
case TRB_RESET_EP:
WARN_ON(slot_id != TRB_TO_SLOT_ID(
@@ -1623,6 +1445,17 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
xhci->error_bitmask |= 1 << 6;
break;
}
+
+ /* restart timer if this wasn't the last command */
+ if (cmd->cmd_list.next != &xhci->cmd_list) {
+ xhci->current_cmd = list_entry(cmd->cmd_list.next,
+ struct xhci_command, cmd_list);
+ mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
+ }
+
+event_handled:
+ xhci_complete_del_and_free_cmd(cmd, cmd_comp_code);
+
inc_deq(xhci, xhci->cmd_ring);
}
@@ -1938,11 +1771,16 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
struct xhci_td *td, union xhci_trb *event_trb)
{
struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
+ struct xhci_command *command;
+ command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
+ if (!command)
+ return;
+
ep->ep_state |= EP_HALTED;
ep->stopped_td = td;
ep->stopped_stream = stream_id;
- xhci_queue_reset_ep(xhci, slot_id, ep_index);
+ xhci_queue_reset_ep(xhci, command, slot_id, ep_index);
xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
ep->stopped_td = NULL;
@@ -2654,7 +2492,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
* successful event after a short transfer.
* Ignore it.
*/
- if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
+ if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
ep_ring->last_td_was_short) {
ep_ring->last_td_was_short = false;
ret = 0;
@@ -3996,11 +3834,14 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
* Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
* because the command event handler may want to resubmit a failed command.
*/
-static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
- u32 field3, u32 field4, bool command_must_succeed)
+static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
+ u32 field1, u32 field2,
+ u32 field3, u32 field4, bool command_must_succeed)
{
int reserved_trbs = xhci->cmd_ring_reserved_trbs;
int ret;
+ if (xhci->xhc_state & XHCI_STATE_DYING)
+ return -ESHUTDOWN;
if (!command_must_succeed)
reserved_trbs++;
@@ -4014,57 +3855,71 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
"unfailable commands failed.\n");
return ret;
}
+
+ cmd->command_trb = xhci->cmd_ring->enqueue;
+ list_add_tail(&cmd->cmd_list, &xhci->cmd_list);
+
+ /* if there are no other commands queued we start the timeout timer */
+ if (xhci->cmd_list.next == &cmd->cmd_list &&
+ !timer_pending(&xhci->cmd_timer)) {
+ xhci->current_cmd = cmd;
+ mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
+ }
+
queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
field4 | xhci->cmd_ring->cycle_state);
return 0;
}
/* Queue a slot enable or disable request on the command ring */
-int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
+int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd,
+ u32 trb_type, u32 slot_id)
{
- return queue_command(xhci, 0, 0, 0,
+ return queue_command(xhci, cmd, 0, 0, 0,
TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
}
/* Queue an address device command TRB */
-int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
- u32 slot_id, enum xhci_setup_dev setup)
+int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
+ dma_addr_t in_ctx_ptr, u32 slot_id, enum xhci_setup_dev setup)
{
- return queue_command(xhci, lower_32_bits(in_ctx_ptr),
+ return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
upper_32_bits(in_ctx_ptr), 0,
TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)
| (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0), false);
}
-int xhci_queue_vendor_command(struct xhci_hcd *xhci,
+int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
u32 field1, u32 field2, u32 field3, u32 field4)
{
- return queue_command(xhci, field1, field2, field3, field4, false);
+ return queue_command(xhci, cmd, field1, field2, field3, field4, false);
}
/* Queue a reset device command TRB */
-int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id)
+int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
+ u32 slot_id)
{
- return queue_command(xhci, 0, 0, 0,
+ return queue_command(xhci, cmd, 0, 0, 0,
TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
false);
}
/* Queue a configure endpoint command TRB */
-int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
+int xhci_queue_configure_endpoint(struct xhci_hcd *xhci,
+ struct xhci_command *cmd, dma_addr_t in_ctx_ptr,
u32 slot_id, bool command_must_succeed)
{
- return queue_command(xhci, lower_32_bits(in_ctx_ptr),
+ return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
upper_32_bits(in_ctx_ptr), 0,
TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
command_must_succeed);
}
/* Queue an evaluate context command TRB */
-int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
- u32 slot_id, bool command_must_succeed)
+int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd,
+ dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed)
{
- return queue_command(xhci, lower_32_bits(in_ctx_ptr),
+ return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
upper_32_bits(in_ctx_ptr), 0,
TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
command_must_succeed);
@@ -4074,25 +3929,26 @@ int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
* Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
* activity on an endpoint that is about to be suspended.
*/
-int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
- unsigned int ep_index, int suspend)
+int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd,
+ int slot_id, unsigned int ep_index, int suspend)
{
u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
u32 type = TRB_TYPE(TRB_STOP_RING);
u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
- return queue_command(xhci, 0, 0, 0,
+ return queue_command(xhci, cmd, 0, 0, 0,
trb_slot_id | trb_ep_index | type | trb_suspend, false);
}
/* Set Transfer Ring Dequeue Pointer command.
* This should not be used for endpoints that have streams enabled.
*/
-static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
- unsigned int ep_index, unsigned int stream_id,
- struct xhci_segment *deq_seg,
- union xhci_trb *deq_ptr, u32 cycle_state)
+static int queue_set_tr_deq(struct xhci_hcd *xhci, struct xhci_command *cmd,
+ int slot_id,
+ unsigned int ep_index, unsigned int stream_id,
+ struct xhci_segment *deq_seg,
+ union xhci_trb *deq_ptr, u32 cycle_state)
{
dma_addr_t addr;
u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
@@ -4119,18 +3975,19 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
ep->queued_deq_ptr = deq_ptr;
if (stream_id)
trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
- return queue_command(xhci, lower_32_bits(addr) | trb_sct | cycle_state,
+ return queue_command(xhci, cmd,
+ lower_32_bits(addr) | trb_sct | cycle_state,
upper_32_bits(addr), trb_stream_id,
trb_slot_id | trb_ep_index | type, false);
}
-int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
- unsigned int ep_index)
+int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
+ int slot_id, unsigned int ep_index)
{
u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
u32 type = TRB_TYPE(TRB_RESET_EP);
- return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type,
- false);
+ return queue_command(xhci, cmd, 0, 0, 0,
+ trb_slot_id | trb_ep_index | type, false);
}
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 3008369..2b8d9a2 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -291,7 +291,7 @@ static int xhci_setup_msix(struct xhci_hcd *xhci)
xhci->msix_entries[i].vector = 0;
}
- ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count);
+ ret = pci_enable_msix_exact(pdev, xhci->msix_entries, xhci->msix_count);
if (ret) {
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Failed to enable MSI-X");
@@ -641,10 +641,14 @@ int xhci_run(struct usb_hcd *hcd)
writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending);
xhci_print_ir_set(xhci, 0);
- if (xhci->quirks & XHCI_NEC_HOST)
- xhci_queue_vendor_command(xhci, 0, 0, 0,
+ if (xhci->quirks & XHCI_NEC_HOST) {
+ struct xhci_command *command;
+ command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
+ if (!command)
+ return -ENOMEM;
+ xhci_queue_vendor_command(xhci, command, 0, 0, 0,
TRB_TYPE(TRB_NEC_GET_FW));
-
+ }
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Finished xhci_run for USB2 roothub");
return 0;
@@ -1187,10 +1191,10 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
unsigned int ep_index, struct urb *urb)
{
- struct xhci_container_ctx *in_ctx;
struct xhci_container_ctx *out_ctx;
struct xhci_input_control_ctx *ctrl_ctx;
struct xhci_ep_ctx *ep_ctx;
+ struct xhci_command *command;
int max_packet_size;
int hw_max_packet_size;
int ret = 0;
@@ -1215,18 +1219,24 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
/* FIXME: This won't work if a non-default control endpoint
* changes max packet sizes.
*/
- in_ctx = xhci->devs[slot_id]->in_ctx;
- ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
+
+ command = xhci_alloc_command(xhci, false, true, GFP_KERNEL);
+ if (!command)
+ return -ENOMEM;
+
+ command->in_ctx = xhci->devs[slot_id]->in_ctx;
+ ctrl_ctx = xhci_get_input_control_ctx(xhci, command->in_ctx);
if (!ctrl_ctx) {
xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
__func__);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto command_cleanup;
}
/* Set up the modified control endpoint 0 */
xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
xhci->devs[slot_id]->out_ctx, ep_index);
- ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
+ ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
@@ -1234,17 +1244,20 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
ctrl_ctx->drop_flags = 0;
xhci_dbg(xhci, "Slot %d input context\n", slot_id);
- xhci_dbg_ctx(xhci, in_ctx, ep_index);
+ xhci_dbg_ctx(xhci, command->in_ctx, ep_index);
xhci_dbg(xhci, "Slot %d output context\n", slot_id);
xhci_dbg_ctx(xhci, out_ctx, ep_index);
- ret = xhci_configure_endpoint(xhci, urb->dev, NULL,
+ ret = xhci_configure_endpoint(xhci, urb->dev, command,
true, false);
/* Clean up the input context for later use by bandwidth
* functions.
*/
ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
+command_cleanup:
+ kfree(command->completion);
+ kfree(command);
}
return ret;
}
@@ -1465,6 +1478,7 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
unsigned int ep_index;
struct xhci_ring *ep_ring;
struct xhci_virt_ep *ep;
+ struct xhci_command *command;
xhci = hcd_to_xhci(hcd);
spin_lock_irqsave(&xhci->lock, flags);
@@ -1534,12 +1548,14 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
* the first cancellation to be handled.
*/
if (!(ep->ep_state & EP_HALT_PENDING)) {
+ command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
ep->ep_state |= EP_HALT_PENDING;
ep->stop_cmds_pending++;
ep->stop_cmd_timer.expires = jiffies +
XHCI_STOP_EP_CMD_TIMEOUT * HZ;
add_timer(&ep->stop_cmd_timer);
- xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index, 0);
+ xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id,
+ ep_index, 0);
xhci_ring_cmd_db(xhci);
}
done:
@@ -1804,6 +1820,11 @@ static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
int ret;
switch (*cmd_status) {
+ case COMP_CMD_ABORT:
+ case COMP_CMD_STOP:
+ xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n");
+ ret = -ETIME;
+ break;
case COMP_ENOMEM:
dev_warn(&udev->dev, "Not enough host controller resources "
"for new device state.\n");
@@ -1850,6 +1871,11 @@ static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
switch (*cmd_status) {
+ case COMP_CMD_ABORT:
+ case COMP_CMD_STOP:
+ xhci_warn(xhci, "Timeout while waiting for evaluate context command\n");
+ ret = -ETIME;
+ break;
case COMP_EINVAL:
dev_warn(&udev->dev, "WARN: xHCI driver setup invalid evaluate "
"context command.\n");
@@ -2574,23 +2600,17 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
bool ctx_change, bool must_succeed)
{
int ret;
- int timeleft;
unsigned long flags;
- struct xhci_container_ctx *in_ctx;
struct xhci_input_control_ctx *ctrl_ctx;
- struct completion *cmd_completion;
- u32 *cmd_status;
struct xhci_virt_device *virt_dev;
- union xhci_trb *cmd_trb;
+
+ if (!command)
+ return -EINVAL;
spin_lock_irqsave(&xhci->lock, flags);
virt_dev = xhci->devs[udev->slot_id];
- if (command)
- in_ctx = command->in_ctx;
- else
- in_ctx = virt_dev->in_ctx;
- ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
+ ctrl_ctx = xhci_get_input_control_ctx(xhci, command->in_ctx);
if (!ctrl_ctx) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
@@ -2607,7 +2627,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
return -ENOMEM;
}
if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
- xhci_reserve_bandwidth(xhci, virt_dev, in_ctx)) {
+ xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) {
if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
xhci_free_host_resources(xhci, ctrl_ctx);
spin_unlock_irqrestore(&xhci->lock, flags);
@@ -2615,27 +2635,15 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
return -ENOMEM;
}
- if (command) {
- cmd_completion = command->completion;
- cmd_status = &command->status;
- command->command_trb = xhci_find_next_enqueue(xhci->cmd_ring);
- list_add_tail(&command->cmd_list, &virt_dev->cmd_list);
- } else {
- cmd_completion = &virt_dev->cmd_completion;
- cmd_status = &virt_dev->cmd_status;
- }
- init_completion(cmd_completion);
-
- cmd_trb = xhci_find_next_enqueue(xhci->cmd_ring);
if (!ctx_change)
- ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
+ ret = xhci_queue_configure_endpoint(xhci, command,
+ command->in_ctx->dma,
udev->slot_id, must_succeed);
else
- ret = xhci_queue_evaluate_context(xhci, in_ctx->dma,
+ ret = xhci_queue_evaluate_context(xhci, command,
+ command->in_ctx->dma,
udev->slot_id, must_succeed);
if (ret < 0) {
- if (command)
- list_del(&command->cmd_list);
if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
xhci_free_host_resources(xhci, ctrl_ctx);
spin_unlock_irqrestore(&xhci->lock, flags);
@@ -2647,26 +2655,14 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
spin_unlock_irqrestore(&xhci->lock, flags);
/* Wait for the configure endpoint command to complete */
- timeleft = wait_for_completion_interruptible_timeout(
- cmd_completion,
- XHCI_CMD_DEFAULT_TIMEOUT);
- if (timeleft <= 0) {
- xhci_warn(xhci, "%s while waiting for %s command\n",
- timeleft == 0 ? "Timeout" : "Signal",
- ctx_change == 0 ?
- "configure endpoint" :
- "evaluate context");
- /* cancel the configure endpoint command */
- ret = xhci_cancel_cmd(xhci, command, cmd_trb);
- if (ret < 0)
- return ret;
- return -ETIME;
- }
+ wait_for_completion(command->completion);
if (!ctx_change)
- ret = xhci_configure_endpoint_result(xhci, udev, cmd_status);
+ ret = xhci_configure_endpoint_result(xhci, udev,
+ &command->status);
else
- ret = xhci_evaluate_context_result(xhci, udev, cmd_status);
+ ret = xhci_evaluate_context_result(xhci, udev,
+ &command->status);
if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
spin_lock_irqsave(&xhci->lock, flags);
@@ -2714,6 +2710,7 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
struct xhci_virt_device *virt_dev;
struct xhci_input_control_ctx *ctrl_ctx;
struct xhci_slot_ctx *slot_ctx;
+ struct xhci_command *command;
ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
if (ret <= 0)
@@ -2725,12 +2722,19 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
virt_dev = xhci->devs[udev->slot_id];
+ command = xhci_alloc_command(xhci, false, true, GFP_KERNEL);
+ if (!command)
+ return -ENOMEM;
+
+ command->in_ctx = virt_dev->in_ctx;
+
/* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
- ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
+ ctrl_ctx = xhci_get_input_control_ctx(xhci, command->in_ctx);
if (!ctrl_ctx) {
xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
__func__);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto command_cleanup;
}
ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
@@ -2738,20 +2742,20 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
/* Don't issue the command if there's no endpoints to update. */
if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) &&
- ctrl_ctx->drop_flags == 0)
- return 0;
-
+ ctrl_ctx->drop_flags == 0) {
+ ret = 0;
+ goto command_cleanup;
+ }
xhci_dbg(xhci, "New Input Control Context:\n");
slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
xhci_dbg_ctx(xhci, virt_dev->in_ctx,
LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
- ret = xhci_configure_endpoint(xhci, udev, NULL,
+ ret = xhci_configure_endpoint(xhci, udev, command,
false, false);
- if (ret) {
+ if (ret)
/* Callee should call reset_bandwidth() */
- return ret;
- }
+ goto command_cleanup;
xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
xhci_dbg_ctx(xhci, virt_dev->out_ctx,
@@ -2783,6 +2787,9 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
virt_dev->eps[i].new_ring = NULL;
}
+command_cleanup:
+ kfree(command->completion);
+ kfree(command);
return ret;
}
@@ -2884,9 +2891,14 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
* issue a configure endpoint command later.
*/
if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
+ struct xhci_command *command;
+ /* Can't sleep if we're called from cleanup_halted_endpoint() */
+ command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
+ if (!command)
+ return;
xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
"Queueing new dequeue state");
- xhci_queue_new_dequeue_state(xhci, udev->slot_id,
+ xhci_queue_new_dequeue_state(xhci, command, udev->slot_id,
ep_index, ep->stopped_stream, &deq_state);
} else {
/* Better hope no one uses the input context between now and the
@@ -2917,6 +2929,7 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
unsigned long flags;
int ret;
struct xhci_virt_ep *virt_ep;
+ struct xhci_command *command;
xhci = hcd_to_xhci(hcd);
udev = (struct usb_device *) ep->hcpriv;
@@ -2939,10 +2952,14 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
return;
}
+ command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
+ if (!command)
+ return;
+
xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
"Queueing reset endpoint command");
spin_lock_irqsave(&xhci->lock, flags);
- ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index);
+ ret = xhci_queue_reset_ep(xhci, command, udev->slot_id, ep_index);
/*
* Can't change the ring dequeue pointer until it's transitioned to the
* stopped state, which is only upon a successful reset endpoint
@@ -3416,7 +3433,6 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
unsigned int slot_id;
struct xhci_virt_device *virt_dev;
struct xhci_command *reset_device_cmd;
- int timeleft;
int last_freed_endpoint;
struct xhci_slot_ctx *slot_ctx;
int old_active_eps = 0;
@@ -3473,13 +3489,10 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
/* Attempt to submit the Reset Device command to the command ring */
spin_lock_irqsave(&xhci->lock, flags);
- reset_device_cmd->command_trb = xhci_find_next_enqueue(xhci->cmd_ring);
- list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list);
- ret = xhci_queue_reset_device(xhci, slot_id);
+ ret = xhci_queue_reset_device(xhci, reset_device_cmd, slot_id);
if (ret) {
xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
- list_del(&reset_device_cmd->cmd_list);
spin_unlock_irqrestore(&xhci->lock, flags);
goto command_cleanup;
}
@@ -3487,22 +3500,7 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
spin_unlock_irqrestore(&xhci->lock, flags);
/* Wait for the Reset Device command to finish */
- timeleft = wait_for_completion_interruptible_timeout(
- reset_device_cmd->completion,
- XHCI_CMD_DEFAULT_TIMEOUT);
- if (timeleft <= 0) {
- xhci_warn(xhci, "%s while waiting for reset device command\n",
- timeleft == 0 ? "Timeout" : "Signal");
- spin_lock_irqsave(&xhci->lock, flags);
- /* The timeout might have raced with the event ring handler, so
- * only delete from the list if the item isn't poisoned.
- */
- if (reset_device_cmd->cmd_list.next != LIST_POISON1)
- list_del(&reset_device_cmd->cmd_list);
- spin_unlock_irqrestore(&xhci->lock, flags);
- ret = -ETIME;
- goto command_cleanup;
- }
+ wait_for_completion(reset_device_cmd->completion);
/* The Reset Device command can't fail, according to the 0.95/0.96 spec,
* unless we tried to reset a slot ID that wasn't enabled,
@@ -3510,6 +3508,11 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
*/
ret = reset_device_cmd->status;
switch (ret) {
+ case COMP_CMD_ABORT:
+ case COMP_CMD_STOP:
+ xhci_warn(xhci, "Timeout waiting for reset device command\n");
+ ret = -ETIME;
+ goto command_cleanup;
case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */
case COMP_CTX_STATE: /* 0.96 completion code for same thing */
xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n",
@@ -3589,6 +3592,11 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
unsigned long flags;
u32 state;
int i, ret;
+ struct xhci_command *command;
+
+ command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
+ if (!command)
+ return;
#ifndef CONFIG_USB_DEFAULT_PERSIST
/*
@@ -3604,8 +3612,10 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
/* If the host is halted due to driver unload, we still need to free the
* device.
*/
- if (ret <= 0 && ret != -ENODEV)
+ if (ret <= 0 && ret != -ENODEV) {
+ kfree(command);
return;
+ }
virt_dev = xhci->devs[udev->slot_id];
@@ -3622,16 +3632,19 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
(xhci->xhc_state & XHCI_STATE_HALTED)) {
xhci_free_virt_device(xhci, udev->slot_id);
spin_unlock_irqrestore(&xhci->lock, flags);
+ kfree(command);
return;
}
- if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) {
+ if (xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
+ udev->slot_id)) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
return;
}
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
+
/*
* Event command completion handler will free any data structures
* associated with the slot. XXX Can free sleep?
@@ -3669,33 +3682,33 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
unsigned long flags;
- int timeleft;
int ret;
- union xhci_trb *cmd_trb;
+ struct xhci_command *command;
+
+ command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
+ if (!command)
+ return 0;
spin_lock_irqsave(&xhci->lock, flags);
- cmd_trb = xhci_find_next_enqueue(xhci->cmd_ring);
- ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
+ command->completion = &xhci->addr_dev;
+ ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0);
if (ret) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
+ kfree(command);
return 0;
}
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
- /* XXX: how much time for xHC slot assignment? */
- timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
- XHCI_CMD_DEFAULT_TIMEOUT);
- if (timeleft <= 0) {
- xhci_warn(xhci, "%s while waiting for a slot\n",
- timeleft == 0 ? "Timeout" : "Signal");
- /* cancel the enable slot request */
- return xhci_cancel_cmd(xhci, NULL, cmd_trb);
- }
+ wait_for_completion(command->completion);
- if (!xhci->slot_id) {
+ if (!xhci->slot_id || command->status != COMP_SUCCESS) {
xhci_err(xhci, "Error while assigning device slot ID\n");
+ xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n",
+ HCS_MAX_SLOTS(
+ readl(&xhci->cap_regs->hcs_params1)));
+ kfree(command);
return 0;
}
@@ -3730,6 +3743,8 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
pm_runtime_get_noresume(hcd->self.controller);
#endif
+
+ kfree(command);
/* Is this a LS or FS device under a HS hub? */
/* Hub or peripherial? */
return 1;
@@ -3737,7 +3752,10 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
disable_slot:
/* Disable slot, if we can do it without mem alloc */
spin_lock_irqsave(&xhci->lock, flags);
- if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
+ command->completion = NULL;
+ command->status = 0;
+ if (!xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
+ udev->slot_id))
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
return 0;
@@ -3754,14 +3772,13 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
{
const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address";
unsigned long flags;
- int timeleft;
struct xhci_virt_device *virt_dev;
int ret = 0;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct xhci_slot_ctx *slot_ctx;
struct xhci_input_control_ctx *ctrl_ctx;
u64 temp_64;
- union xhci_trb *cmd_trb;
+ struct xhci_command *command;
if (!udev->slot_id) {
xhci_dbg_trace(xhci, trace_xhci_dbg_address,
@@ -3782,11 +3799,19 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
return -EINVAL;
}
+ command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
+ if (!command)
+ return -ENOMEM;
+
+ command->in_ctx = virt_dev->in_ctx;
+ command->completion = &xhci->addr_dev;
+
slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
if (!ctrl_ctx) {
xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
__func__);
+ kfree(command);
return -EINVAL;
}
/*
@@ -3808,36 +3833,31 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
le32_to_cpu(slot_ctx->dev_info) >> 27);
spin_lock_irqsave(&xhci->lock, flags);
- cmd_trb = xhci_find_next_enqueue(xhci->cmd_ring);
- ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma,
+ ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma,
udev->slot_id, setup);
if (ret) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg_trace(xhci, trace_xhci_dbg_address,
"FIXME: allocate a command ring segment");
+ kfree(command);
return ret;
}
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
/* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
- timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
- XHCI_CMD_DEFAULT_TIMEOUT);
+ wait_for_completion(command->completion);
+
/* FIXME: From section 4.3.4: "Software shall be responsible for timing
* the SetAddress() "recovery interval" required by USB and aborting the
* command on a timeout.
*/
- if (timeleft <= 0) {
- xhci_warn(xhci, "%s while waiting for setup %s command\n",
- timeleft == 0 ? "Timeout" : "Signal", act);
- /* cancel the address device command */
- ret = xhci_cancel_cmd(xhci, NULL, cmd_trb);
- if (ret < 0)
- return ret;
- return -ETIME;
- }
-
- switch (virt_dev->cmd_status) {
+ switch (command->status) {
+ case COMP_CMD_ABORT:
+ case COMP_CMD_STOP:
+ xhci_warn(xhci, "Timeout while waiting for setup device command\n");
+ ret = -ETIME;
+ break;
case COMP_CTX_STATE:
case COMP_EBADSLT:
xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n",
@@ -3860,7 +3880,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
default:
xhci_err(xhci,
"ERROR: unexpected setup %s command completion code 0x%x.\n",
- act, virt_dev->cmd_status);
+ act, command->status);
xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1);
@@ -3868,6 +3888,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
break;
}
if (ret) {
+ kfree(command);
return ret;
}
temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
@@ -3902,7 +3923,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
xhci_dbg_trace(xhci, trace_xhci_dbg_address,
"Internal device address = %d",
le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
-
+ kfree(command);
return 0;
}
@@ -4092,7 +4113,7 @@ int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
- enable ? "enable" : "disable", port_num);
+ enable ? "enable" : "disable", port_num + 1);
if (enable) {
/* Host supports BESL timeout instead of HIRD */
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 4746816..9ffecd5 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -937,9 +937,6 @@ struct xhci_virt_device {
#define XHCI_MAX_RINGS_CACHED 31
struct xhci_virt_ep eps[31];
struct completion cmd_completion;
- /* Status of the last command issued for this device */
- u32 cmd_status;
- struct list_head cmd_list;
u8 fake_port;
u8 real_port;
struct xhci_interval_bw_table *bw_table;
@@ -1298,7 +1295,6 @@ struct xhci_td {
/* command descriptor */
struct xhci_cd {
- struct list_head cancel_cmd_list;
struct xhci_command *command;
union xhci_trb *cmd_trb;
};
@@ -1476,6 +1472,8 @@ struct xhci_hcd {
/* msi-x vectors */
int msix_count;
struct msix_entry *msix_entries;
+ /* optional clock */
+ struct clk *clk;
/* data structures */
struct xhci_device_context_array *dcbaa;
struct xhci_ring *cmd_ring;
@@ -1483,8 +1481,10 @@ struct xhci_hcd {
#define CMD_RING_STATE_RUNNING (1 << 0)
#define CMD_RING_STATE_ABORTED (1 << 1)
#define CMD_RING_STATE_STOPPED (1 << 2)
- struct list_head cancel_cmd_list;
+ struct list_head cmd_list;
unsigned int cmd_ring_reserved_trbs;
+ struct timer_list cmd_timer;
+ struct xhci_command *current_cmd;
struct xhci_ring *event_ring;
struct xhci_erst erst;
/* Scratchpad */
@@ -1738,8 +1738,7 @@ static inline int xhci_register_pci(void) { return 0; }
static inline void xhci_unregister_pci(void) {}
#endif
-#if defined(CONFIG_USB_XHCI_PLATFORM) \
- || defined(CONFIG_USB_XHCI_PLATFORM_MODULE)
+#if IS_ENABLED(CONFIG_USB_XHCI_PLATFORM)
int xhci_register_plat(void);
void xhci_unregister_plat(void);
#else
@@ -1808,13 +1807,14 @@ struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
dma_addr_t suspect_dma);
int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code);
void xhci_ring_cmd_db(struct xhci_hcd *xhci);
-int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id);
-int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
- u32 slot_id, enum xhci_setup_dev);
-int xhci_queue_vendor_command(struct xhci_hcd *xhci,
+int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd,
+ u32 trb_type, u32 slot_id);
+int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
+ dma_addr_t in_ctx_ptr, u32 slot_id, enum xhci_setup_dev);
+int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
u32 field1, u32 field2, u32 field3, u32 field4);
-int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
- unsigned int ep_index, int suspend);
+int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd,
+ int slot_id, unsigned int ep_index, int suspend);
int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
int slot_id, unsigned int ep_index);
int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
@@ -1823,18 +1823,21 @@ int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
int slot_id, unsigned int ep_index);
int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
struct urb *urb, int slot_id, unsigned int ep_index);
-int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
- u32 slot_id, bool command_must_succeed);
-int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
- u32 slot_id, bool command_must_succeed);
-int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
- unsigned int ep_index);
-int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id);
+int xhci_queue_configure_endpoint(struct xhci_hcd *xhci,
+ struct xhci_command *cmd, dma_addr_t in_ctx_ptr, u32 slot_id,
+ bool command_must_succeed);
+int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd,
+ dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed);
+int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
+ int slot_id, unsigned int ep_index);
+int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
+ u32 slot_id);
void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
unsigned int stream_id, struct xhci_td *cur_td,
struct xhci_dequeue_state *state);
void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
+ struct xhci_command *cmd,
unsigned int slot_id, unsigned int ep_index,
unsigned int stream_id,
struct xhci_dequeue_state *deq_state);
@@ -1844,11 +1847,11 @@ void xhci_queue_config_ep_quirk(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
struct xhci_dequeue_state *deq_state);
void xhci_stop_endpoint_command_watchdog(unsigned long arg);
-int xhci_cancel_cmd(struct xhci_hcd *xhci, struct xhci_command *command,
- union xhci_trb *cmd_trb);
+void xhci_handle_command_timeout(unsigned long data);
+
void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id,
unsigned int ep_index, unsigned int stream_id);
-union xhci_trb *xhci_find_next_enqueue(struct xhci_ring *ring);
+void xhci_cleanup_command_queue(struct xhci_hcd *xhci);
/* xHCI roothub code */
void xhci_set_link_state(struct xhci_hcd *xhci, __le32 __iomem **port_array,
OpenPOWER on IntegriCloud