summaryrefslogtreecommitdiffstats
path: root/drivers/net/sfc/efx.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-12-28 12:49:40 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2008-12-28 12:49:40 -0800
commit0191b625ca5a46206d2fb862bb08f36f2fcb3b31 (patch)
tree454d1842b1833d976da62abcbd5c47521ebe9bd7 /drivers/net/sfc/efx.c
parent54a696bd07c14d3b1192d03ce7269bc59b45209a (diff)
parenteb56092fc168bf5af199d47af50c0d84a96db898 (diff)
downloadop-kernel-dev-0191b625ca5a46206d2fb862bb08f36f2fcb3b31.zip
op-kernel-dev-0191b625ca5a46206d2fb862bb08f36f2fcb3b31.tar.gz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6: (1429 commits) net: Allow dependancies of FDDI & Tokenring to be modular. igb: Fix build warning when DCA is disabled. net: Fix warning fallout from recent NAPI interface changes. gro: Fix potential use after free sfc: If AN is enabled, always read speed/duplex from the AN advertising bits sfc: When disabling the NIC, close the device rather than unregistering it sfc: SFT9001: Add cable diagnostics sfc: Add support for multiple PHY self-tests sfc: Merge top-level functions for self-tests sfc: Clean up PHY mode management in loopback self-test sfc: Fix unreliable link detection in some loopback modes sfc: Generate unique names for per-NIC workqueues 802.3ad: use standard ethhdr instead of ad_header 802.3ad: generalize out mac address initializer 802.3ad: initialize ports LACPDU from const initializer 802.3ad: remove typedef around ad_system 802.3ad: turn ports is_individual into a bool 802.3ad: turn ports is_enabled into a bool 802.3ad: make ntt bool ixgbe: Fix set_ringparam in ixgbe to use the same memory pools. ... Fixed trivial IPv4/6 address printing conflicts in fs/cifs/connect.c due to the conversion to %pI (in this networking merge) and the addition of doing IPv6 addresses (from the earlier merge of CIFS).
Diffstat (limited to 'drivers/net/sfc/efx.c')
-rw-r--r--drivers/net/sfc/efx.c449
1 files changed, 269 insertions, 180 deletions
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 06ea71c..7673fd9 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -21,14 +21,12 @@
#include <linux/ethtool.h>
#include <linux/topology.h>
#include "net_driver.h"
-#include "gmii.h"
#include "ethtool.h"
#include "tx.h"
#include "rx.h"
#include "efx.h"
#include "mdio_10g.h"
#include "falcon.h"
-#include "mac.h"
#define EFX_MAX_MTU (9 * 1024)
@@ -39,6 +37,12 @@
*/
static struct workqueue_struct *refill_workqueue;
+/* Reset workqueue. If any NIC has a hardware failure then a reset will be
+ * queued onto this work queue. This is not a per-nic work queue, because
+ * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
+ */
+static struct workqueue_struct *reset_workqueue;
+
/**************************************************************************
*
* Configurable values
@@ -58,13 +62,15 @@ MODULE_PARM_DESC(lro, "Large receive offload acceleration");
/*
* Use separate channels for TX and RX events
*
- * Set this to 1 to use separate channels for TX and RX. It allows us to
- * apply a higher level of interrupt moderation to TX events.
+ * Set this to 1 to use separate channels for TX and RX. It allows us
+ * to control interrupt affinity separately for TX and RX.
*
- * This is forced to 0 for MSI interrupt mode as the interrupt vector
- * is not written
+ * This is only used in MSI-X interrupt mode
*/
-static unsigned int separate_tx_and_rx_channels = true;
+static unsigned int separate_tx_channels;
+module_param(separate_tx_channels, uint, 0644);
+MODULE_PARM_DESC(separate_tx_channels,
+ "Use separate channels for TX and RX");
/* This is the weight assigned to each of the (per-channel) virtual
* NAPI devices.
@@ -77,11 +83,6 @@ static int napi_weight = 64;
*/
unsigned int efx_monitor_interval = 1 * HZ;
-/* This controls whether or not the hardware monitor will trigger a
- * reset when it detects an error condition.
- */
-static unsigned int monitor_reset = true;
-
/* This controls whether or not the driver will initialise devices
* with invalid MAC addresses stored in the EEPROM or flash. If true,
* such devices will be initialised with a random locally-generated
@@ -128,6 +129,10 @@ static unsigned int rss_cpus;
module_param(rss_cpus, uint, 0444);
MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
+static int phy_flash_cfg;
+module_param(phy_flash_cfg, int, 0644);
+MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
+
/**************************************************************************
*
* Utility functions and prototypes
@@ -211,7 +216,6 @@ static int efx_poll(struct napi_struct *napi, int budget)
{
struct efx_channel *channel =
container_of(napi, struct efx_channel, napi_str);
- struct net_device *napi_dev = channel->napi_dev;
int rx_packets;
EFX_TRACE(channel->efx, "channel %d NAPI poll executing on CPU %d\n",
@@ -225,7 +229,7 @@ static int efx_poll(struct napi_struct *napi, int budget)
* since efx_channel_processed() will have no effect if
* interrupts have already been disabled.
*/
- netif_rx_complete(napi_dev, napi);
+ netif_rx_complete(napi);
efx_channel_processed(channel);
}
@@ -349,6 +353,27 @@ static int efx_probe_channel(struct efx_channel *channel)
}
+static void efx_set_channel_names(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ const char *type = "";
+ int number;
+
+ efx_for_each_channel(channel, efx) {
+ number = channel->channel;
+ if (efx->n_channels > efx->n_rx_queues) {
+ if (channel->channel < efx->n_rx_queues) {
+ type = "-rx";
+ } else {
+ type = "-tx";
+ number -= efx->n_rx_queues;
+ }
+ }
+ snprintf(channel->name, sizeof(channel->name),
+ "%s%s-%d", efx->name, type, number);
+ }
+}
+
/* Channels are shutdown and reinitialised whilst the NIC is running
* to propagate configuration changes (mtu, checksum offload), or
* to clear hardware error conditions
@@ -523,26 +548,8 @@ static void efx_link_status_changed(struct efx_nic *efx)
/* Status message for kernel log */
if (efx->link_up) {
- struct mii_if_info *gmii = &efx->mii;
- unsigned adv, lpa;
- /* NONE here means direct XAUI from the controller, with no
- * MDIO-attached device we can query. */
- if (efx->phy_type != PHY_TYPE_NONE) {
- adv = gmii_advertised(gmii);
- lpa = gmii_lpa(gmii);
- } else {
- lpa = GM_LPA_10000 | LPA_DUPLEX;
- adv = lpa;
- }
- EFX_INFO(efx, "link up at %dMbps %s-duplex "
- "(adv %04x lpa %04x) (MTU %d)%s\n",
- (efx->link_options & GM_LPA_10000 ? 10000 :
- (efx->link_options & GM_LPA_1000 ? 1000 :
- (efx->link_options & GM_LPA_100 ? 100 :
- 10))),
- (efx->link_options & GM_LPA_DUPLEX ?
- "full" : "half"),
- adv, lpa,
+ EFX_INFO(efx, "link up at %uMbps %s-duplex (MTU %d)%s\n",
+ efx->link_speed, efx->link_fd ? "full" : "half",
efx->net_dev->mtu,
(efx->promiscuous ? " [PROMISC]" : ""));
} else {
@@ -566,10 +573,28 @@ void __efx_reconfigure_port(struct efx_nic *efx)
netif_addr_unlock_bh(efx->net_dev);
}
- falcon_reconfigure_xmac(efx);
+ falcon_deconfigure_mac_wrapper(efx);
+
+ /* Reconfigure the PHY, disabling transmit in mac level loopback. */
+ if (LOOPBACK_INTERNAL(efx))
+ efx->phy_mode |= PHY_MODE_TX_DISABLED;
+ else
+ efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
+ efx->phy_op->reconfigure(efx);
+
+ if (falcon_switch_mac(efx))
+ goto fail;
+
+ efx->mac_op->reconfigure(efx);
/* Inform kernel of loss/gain of carrier */
efx_link_status_changed(efx);
+ return;
+
+fail:
+ EFX_ERR(efx, "failed to reconfigure MAC\n");
+ efx->phy_op->fini(efx);
+ efx->port_initialized = false;
}
/* Reinitialise the MAC to pick up new PHY settings, even if the port is
@@ -586,10 +611,9 @@ void efx_reconfigure_port(struct efx_nic *efx)
/* Asynchronous efx_reconfigure_port work item. To speed up efx_flush_all()
* we don't efx_reconfigure_port() if the port is disabled. Care is taken
* in efx_stop_all() and efx_start_port() to prevent PHY events being lost */
-static void efx_reconfigure_work(struct work_struct *data)
+static void efx_phy_work(struct work_struct *data)
{
- struct efx_nic *efx = container_of(data, struct efx_nic,
- reconfigure_work);
+ struct efx_nic *efx = container_of(data, struct efx_nic, phy_work);
mutex_lock(&efx->mac_lock);
if (efx->port_enabled)
@@ -597,6 +621,16 @@ static void efx_reconfigure_work(struct work_struct *data)
mutex_unlock(&efx->mac_lock);
}
+static void efx_mac_work(struct work_struct *data)
+{
+ struct efx_nic *efx = container_of(data, struct efx_nic, mac_work);
+
+ mutex_lock(&efx->mac_lock);
+ if (efx->port_enabled)
+ efx->mac_op->irq(efx);
+ mutex_unlock(&efx->mac_lock);
+}
+
static int efx_probe_port(struct efx_nic *efx)
{
int rc;
@@ -608,21 +642,22 @@ static int efx_probe_port(struct efx_nic *efx)
if (rc)
goto err;
+ if (phy_flash_cfg)
+ efx->phy_mode = PHY_MODE_SPECIAL;
+
/* Sanity check MAC address */
if (is_valid_ether_addr(efx->mac_address)) {
memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN);
} else {
- DECLARE_MAC_BUF(mac);
-
- EFX_ERR(efx, "invalid MAC address %s\n",
- print_mac(mac, efx->mac_address));
+ EFX_ERR(efx, "invalid MAC address %pM\n",
+ efx->mac_address);
if (!allow_bad_hwaddr) {
rc = -EINVAL;
goto err;
}
random_ether_addr(efx->net_dev->dev_addr);
- EFX_INFO(efx, "using locally-generated MAC %s\n",
- print_mac(mac, efx->net_dev->dev_addr));
+ EFX_INFO(efx, "using locally-generated MAC %pM\n",
+ efx->net_dev->dev_addr);
}
return 0;
@@ -638,23 +673,30 @@ static int efx_init_port(struct efx_nic *efx)
EFX_LOG(efx, "init port\n");
- /* Initialise the MAC and PHY */
- rc = falcon_init_xmac(efx);
+ rc = efx->phy_op->init(efx);
if (rc)
return rc;
+ efx->phy_op->reconfigure(efx);
+
+ mutex_lock(&efx->mac_lock);
+ rc = falcon_switch_mac(efx);
+ mutex_unlock(&efx->mac_lock);
+ if (rc)
+ goto fail;
+ efx->mac_op->reconfigure(efx);
efx->port_initialized = true;
efx->stats_enabled = true;
-
- /* Reconfigure port to program MAC registers */
- falcon_reconfigure_xmac(efx);
-
return 0;
+
+fail:
+ efx->phy_op->fini(efx);
+ return rc;
}
/* Allow efx_reconfigure_port() to be scheduled, and close the window
* between efx_stop_port and efx_flush_all whereby a previously scheduled
- * efx_reconfigure_port() may have been cancelled */
+ * efx_phy_work()/efx_mac_work() may have been cancelled */
static void efx_start_port(struct efx_nic *efx)
{
EFX_LOG(efx, "start port\n");
@@ -663,13 +705,14 @@ static void efx_start_port(struct efx_nic *efx)
mutex_lock(&efx->mac_lock);
efx->port_enabled = true;
__efx_reconfigure_port(efx);
+ efx->mac_op->irq(efx);
mutex_unlock(&efx->mac_lock);
}
-/* Prevent efx_reconfigure_work and efx_monitor() from executing, and
- * efx_set_multicast_list() from scheduling efx_reconfigure_work.
- * efx_reconfigure_work can still be scheduled via NAPI processing
- * until efx_flush_all() is called */
+/* Prevent efx_phy_work, efx_mac_work, and efx_monitor() from executing,
+ * and efx_set_multicast_list() from scheduling efx_phy_work. efx_phy_work
+ * and efx_mac_work may still be scheduled via NAPI processing until
+ * efx_flush_all() is called */
static void efx_stop_port(struct efx_nic *efx)
{
EFX_LOG(efx, "stop port\n");
@@ -692,7 +735,7 @@ static void efx_fini_port(struct efx_nic *efx)
if (!efx->port_initialized)
return;
- falcon_fini_xmac(efx);
+ efx->phy_op->fini(efx);
efx->port_initialized = false;
efx->link_up = false;
@@ -840,26 +883,33 @@ static void efx_probe_interrupts(struct efx_nic *efx)
if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
struct msix_entry xentries[EFX_MAX_CHANNELS];
int wanted_ints;
+ int rx_queues;
/* We want one RX queue and interrupt per CPU package
* (or as specified by the rss_cpus module parameter).
* We will need one channel per interrupt.
*/
- wanted_ints = rss_cpus ? rss_cpus : efx_wanted_rx_queues();
- efx->n_rx_queues = min(wanted_ints, max_channels);
+ rx_queues = rss_cpus ? rss_cpus : efx_wanted_rx_queues();
+ wanted_ints = rx_queues + (separate_tx_channels ? 1 : 0);
+ wanted_ints = min(wanted_ints, max_channels);
- for (i = 0; i < efx->n_rx_queues; i++)
+ for (i = 0; i < wanted_ints; i++)
xentries[i].entry = i;
- rc = pci_enable_msix(efx->pci_dev, xentries, efx->n_rx_queues);
+ rc = pci_enable_msix(efx->pci_dev, xentries, wanted_ints);
if (rc > 0) {
- EFX_BUG_ON_PARANOID(rc >= efx->n_rx_queues);
- efx->n_rx_queues = rc;
+ EFX_ERR(efx, "WARNING: Insufficient MSI-X vectors"
+ " available (%d < %d).\n", rc, wanted_ints);
+ EFX_ERR(efx, "WARNING: Performance may be reduced.\n");
+ EFX_BUG_ON_PARANOID(rc >= wanted_ints);
+ wanted_ints = rc;
rc = pci_enable_msix(efx->pci_dev, xentries,
- efx->n_rx_queues);
+ wanted_ints);
}
if (rc == 0) {
- for (i = 0; i < efx->n_rx_queues; i++)
+ efx->n_rx_queues = min(rx_queues, wanted_ints);
+ efx->n_channels = wanted_ints;
+ for (i = 0; i < wanted_ints; i++)
efx->channel[i].irq = xentries[i].vector;
} else {
/* Fall back to single channel MSI */
@@ -871,6 +921,7 @@ static void efx_probe_interrupts(struct efx_nic *efx)
/* Try single interrupt MSI */
if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
efx->n_rx_queues = 1;
+ efx->n_channels = 1;
rc = pci_enable_msi(efx->pci_dev);
if (rc == 0) {
efx->channel[0].irq = efx->pci_dev->irq;
@@ -883,6 +934,7 @@ static void efx_probe_interrupts(struct efx_nic *efx)
/* Assume legacy interrupts */
if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
efx->n_rx_queues = 1;
+ efx->n_channels = 1 + (separate_tx_channels ? 1 : 0);
efx->legacy_irq = efx->pci_dev->irq;
}
}
@@ -907,8 +959,8 @@ static void efx_set_channels(struct efx_nic *efx)
struct efx_rx_queue *rx_queue;
efx_for_each_tx_queue(tx_queue, efx) {
- if (!EFX_INT_MODE_USE_MSI(efx) && separate_tx_and_rx_channels)
- tx_queue->channel = &efx->channel[1];
+ if (separate_tx_channels)
+ tx_queue->channel = &efx->channel[efx->n_channels-1];
else
tx_queue->channel = &efx->channel[0];
tx_queue->channel->used_flags |= EFX_USED_BY_TX;
@@ -985,6 +1037,7 @@ static int efx_probe_all(struct efx_nic *efx)
goto fail3;
}
}
+ efx_set_channel_names(efx);
return 0;
@@ -1050,7 +1103,8 @@ static void efx_flush_all(struct efx_nic *efx)
cancel_delayed_work_sync(&rx_queue->work);
/* Stop scheduled port reconfigurations */
- cancel_work_sync(&efx->reconfigure_work);
+ cancel_work_sync(&efx->mac_work);
+ cancel_work_sync(&efx->phy_work);
}
@@ -1087,7 +1141,7 @@ static void efx_stop_all(struct efx_nic *efx)
* window to loose phy events */
efx_stop_port(efx);
- /* Flush reconfigure_work, refill_workqueue, monitor_work */
+ /* Flush efx_phy_work, efx_mac_work, refill_workqueue, monitor_work */
efx_flush_all(efx);
/* Isolate the MAC from the TX and RX engines, so that queue
@@ -1159,36 +1213,31 @@ static void efx_monitor(struct work_struct *data)
{
struct efx_nic *efx = container_of(data, struct efx_nic,
monitor_work.work);
- int rc = 0;
+ int rc;
EFX_TRACE(efx, "hardware monitor executing on CPU %d\n",
raw_smp_processor_id());
-
/* If the mac_lock is already held then it is likely a port
* reconfiguration is already in place, which will likely do
* most of the work of check_hw() anyway. */
- if (!mutex_trylock(&efx->mac_lock)) {
- queue_delayed_work(efx->workqueue, &efx->monitor_work,
- efx_monitor_interval);
- return;
- }
-
- if (efx->port_enabled)
- rc = falcon_check_xmac(efx);
- mutex_unlock(&efx->mac_lock);
-
+ if (!mutex_trylock(&efx->mac_lock))
+ goto out_requeue;
+ if (!efx->port_enabled)
+ goto out_unlock;
+ rc = efx->board_info.monitor(efx);
if (rc) {
- if (monitor_reset) {
- EFX_ERR(efx, "hardware monitor detected a fault: "
- "triggering reset\n");
- efx_schedule_reset(efx, RESET_TYPE_MONITOR);
- } else {
- EFX_ERR(efx, "hardware monitor detected a fault, "
- "skipping reset\n");
- }
+ EFX_ERR(efx, "Board sensor %s; shutting down PHY\n",
+ (rc == -ERANGE) ? "reported fault" : "failed");
+ efx->phy_mode |= PHY_MODE_LOW_POWER;
+ falcon_sim_phy_event(efx);
}
+ efx->phy_op->poll(efx);
+ efx->mac_op->poll(efx);
+out_unlock:
+ mutex_unlock(&efx->mac_lock);
+out_requeue:
queue_delayed_work(efx->workqueue, &efx->monitor_work,
efx_monitor_interval);
}
@@ -1282,6 +1331,8 @@ static int efx_net_open(struct net_device *net_dev)
EFX_LOG(efx, "opening device %s on CPU %d\n", net_dev->name,
raw_smp_processor_id());
+ if (efx->state == STATE_DISABLED)
+ return -EIO;
if (efx->phy_mode & PHY_MODE_SPECIAL)
return -EBUSY;
@@ -1300,10 +1351,12 @@ static int efx_net_stop(struct net_device *net_dev)
EFX_LOG(efx, "closing %s on CPU %d\n", net_dev->name,
raw_smp_processor_id());
- /* Stop the device and flush all the channels */
- efx_stop_all(efx);
- efx_fini_channels(efx);
- efx_init_channels(efx);
+ if (efx->state != STATE_DISABLED) {
+ /* Stop the device and flush all the channels */
+ efx_stop_all(efx);
+ efx_fini_channels(efx);
+ efx_init_channels(efx);
+ }
return 0;
}
@@ -1322,7 +1375,7 @@ static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
if (!spin_trylock(&efx->stats_lock))
return stats;
if (efx->stats_enabled) {
- falcon_update_stats_xmac(efx);
+ efx->mac_op->update_stats(efx);
falcon_update_nic_stats(efx);
}
spin_unlock(&efx->stats_lock);
@@ -1360,12 +1413,11 @@ static void efx_watchdog(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
- EFX_ERR(efx, "TX stuck with stop_count=%d port_enabled=%d: %s\n",
- atomic_read(&efx->netif_stop_count), efx->port_enabled,
- monitor_reset ? "resetting channels" : "skipping reset");
+ EFX_ERR(efx, "TX stuck with stop_count=%d port_enabled=%d:"
+ " resetting channels\n",
+ atomic_read(&efx->netif_stop_count), efx->port_enabled);
- if (monitor_reset)
- efx_schedule_reset(efx, RESET_TYPE_MONITOR);
+ efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
}
@@ -1401,9 +1453,8 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
EFX_ASSERT_RESET_SERIALISED(efx);
if (!is_valid_ether_addr(new_addr)) {
- DECLARE_MAC_BUF(mac);
- EFX_ERR(efx, "invalid ethernet MAC address requested: %s\n",
- print_mac(mac, new_addr));
+ EFX_ERR(efx, "invalid ethernet MAC address requested: %pM\n",
+ new_addr);
return -EINVAL;
}
@@ -1447,22 +1498,43 @@ static void efx_set_multicast_list(struct net_device *net_dev)
return;
if (changed)
- queue_work(efx->workqueue, &efx->reconfigure_work);
+ queue_work(efx->workqueue, &efx->phy_work);
/* Create and activate new global multicast hash table */
falcon_set_multicast_hash(efx);
}
+static const struct net_device_ops efx_netdev_ops = {
+ .ndo_open = efx_net_open,
+ .ndo_stop = efx_net_stop,
+ .ndo_get_stats = efx_net_stats,
+ .ndo_tx_timeout = efx_watchdog,
+ .ndo_start_xmit = efx_hard_start_xmit,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = efx_ioctl,
+ .ndo_change_mtu = efx_change_mtu,
+ .ndo_set_mac_address = efx_set_mac_address,
+ .ndo_set_multicast_list = efx_set_multicast_list,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = efx_netpoll,
+#endif
+};
+
+static void efx_update_name(struct efx_nic *efx)
+{
+ strcpy(efx->name, efx->net_dev->name);
+ efx_mtd_rename(efx);
+ efx_set_channel_names(efx);
+}
+
static int efx_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *net_dev = ptr;
- if (net_dev->open == efx_net_open && event == NETDEV_CHANGENAME) {
- struct efx_nic *efx = netdev_priv(net_dev);
-
- strcpy(efx->name, net_dev->name);
- }
+ if (net_dev->netdev_ops == &efx_netdev_ops &&
+ event == NETDEV_CHANGENAME)
+ efx_update_name(netdev_priv(net_dev));
return NOTIFY_DONE;
}
@@ -1471,6 +1543,14 @@ static struct notifier_block efx_netdev_notifier = {
.notifier_call = efx_netdev_event,
};
+static ssize_t
+show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ return sprintf(buf, "%d\n", efx->phy_type);
+}
+static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL);
+
static int efx_register_netdev(struct efx_nic *efx)
{
struct net_device *net_dev = efx->net_dev;
@@ -1478,18 +1558,7 @@ static int efx_register_netdev(struct efx_nic *efx)
net_dev->watchdog_timeo = 5 * HZ;
net_dev->irq = efx->pci_dev->irq;
- net_dev->open = efx_net_open;
- net_dev->stop = efx_net_stop;
- net_dev->get_stats = efx_net_stats;
- net_dev->tx_timeout = &efx_watchdog;
- net_dev->hard_start_xmit = efx_hard_start_xmit;
- net_dev->do_ioctl = efx_ioctl;
- net_dev->change_mtu = efx_change_mtu;
- net_dev->set_mac_address = efx_set_mac_address;
- net_dev->set_multicast_list = efx_set_multicast_list;
-#ifdef CONFIG_NET_POLL_CONTROLLER
- net_dev->poll_controller = efx_netpoll;
-#endif
+ net_dev->netdev_ops = &efx_netdev_ops;
SET_NETDEV_DEV(net_dev, &efx->pci_dev->dev);
SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
@@ -1497,7 +1566,7 @@ static int efx_register_netdev(struct efx_nic *efx)
netif_carrier_off(efx->net_dev);
/* Clear MAC statistics */
- falcon_update_stats_xmac(efx);
+ efx->mac_op->update_stats(efx);
memset(&efx->mac_stats, 0, sizeof(efx->mac_stats));
rc = register_netdev(net_dev);
@@ -1505,9 +1574,22 @@ static int efx_register_netdev(struct efx_nic *efx)
EFX_ERR(efx, "could not register net dev\n");
return rc;
}
- strcpy(efx->name, net_dev->name);
+
+ rtnl_lock();
+ efx_update_name(efx);
+ rtnl_unlock();
+
+ rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
+ if (rc) {
+ EFX_ERR(efx, "failed to init net dev attributes\n");
+ goto fail_registered;
+ }
return 0;
+
+fail_registered:
+ unregister_netdev(net_dev);
+ return rc;
}
static void efx_unregister_netdev(struct efx_nic *efx)
@@ -1527,6 +1609,7 @@ static void efx_unregister_netdev(struct efx_nic *efx)
if (efx_dev_registered(efx)) {
strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
unregister_netdev(efx->net_dev);
}
}
@@ -1541,8 +1624,6 @@ static void efx_unregister_netdev(struct efx_nic *efx)
* before reset. */
void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd)
{
- int rc;
-
EFX_ASSERT_RESET_SERIALISED(efx);
/* The net_dev->get_stats handler is quite slow, and will fail
@@ -1553,10 +1634,9 @@ void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd)
efx_stop_all(efx);
mutex_lock(&efx->mac_lock);
+ mutex_lock(&efx->spi_lock);
- rc = falcon_xmac_get_settings(efx, ecmd);
- if (rc)
- EFX_ERR(efx, "could not back up PHY settings\n");
+ efx->phy_op->get_settings(efx, ecmd);
efx_fini_channels(efx);
}
@@ -1581,10 +1661,11 @@ int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd, bool ok)
if (ok) {
efx_init_channels(efx);
- if (falcon_xmac_set_settings(efx, ecmd))
+ if (efx->phy_op->set_settings(efx, ecmd))
EFX_ERR(efx, "could not restore PHY settings\n");
}
+ mutex_unlock(&efx->spi_lock);
mutex_unlock(&efx->mac_lock);
if (ok) {
@@ -1607,7 +1688,7 @@ static int efx_reset(struct efx_nic *efx)
{
struct ethtool_cmd ecmd;
enum reset_type method = efx->reset_pending;
- int rc;
+ int rc = 0;
/* Serialise with kernel interfaces */
rtnl_lock();
@@ -1616,7 +1697,7 @@ static int efx_reset(struct efx_nic *efx)
* flag set so that efx_pci_probe_main will be retried */
if (efx->state != STATE_RUNNING) {
EFX_INFO(efx, "scheduled reset quenched. NIC not RUNNING\n");
- goto unlock_rtnl;
+ goto out_unlock;
}
EFX_INFO(efx, "resetting (%d)\n", method);
@@ -1626,7 +1707,7 @@ static int efx_reset(struct efx_nic *efx)
rc = falcon_reset_hw(efx, method);
if (rc) {
EFX_ERR(efx, "failed to reset hardware\n");
- goto fail;
+ goto out_disable;
}
/* Allow resets to be rescheduled. */
@@ -1640,28 +1721,23 @@ static int efx_reset(struct efx_nic *efx)
/* Leave device stopped if necessary */
if (method == RESET_TYPE_DISABLE) {
+ efx_reset_up(efx, &ecmd, false);
rc = -EIO;
- goto fail;
+ } else {
+ rc = efx_reset_up(efx, &ecmd, true);
}
- rc = efx_reset_up(efx, &ecmd, true);
- if (rc)
- goto disable;
-
- EFX_LOG(efx, "reset complete\n");
- unlock_rtnl:
- rtnl_unlock();
- return 0;
-
- fail:
- efx_reset_up(efx, &ecmd, false);
- disable:
- EFX_ERR(efx, "has been disabled\n");
- efx->state = STATE_DISABLED;
+out_disable:
+ if (rc) {
+ EFX_ERR(efx, "has been disabled\n");
+ efx->state = STATE_DISABLED;
+ dev_close(efx->net_dev);
+ } else {
+ EFX_LOG(efx, "reset complete\n");
+ }
+out_unlock:
rtnl_unlock();
- efx_unregister_netdev(efx);
- efx_fini_port(efx);
return rc;
}
@@ -1709,7 +1785,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
efx->reset_pending = method;
- queue_work(efx->reset_workqueue, &efx->reset_work);
+ queue_work(reset_workqueue, &efx->reset_work);
}
/**************************************************************************
@@ -1743,10 +1819,16 @@ int efx_port_dummy_op_int(struct efx_nic *efx)
void efx_port_dummy_op_void(struct efx_nic *efx) {}
void efx_port_dummy_op_blink(struct efx_nic *efx, bool blink) {}
+static struct efx_mac_operations efx_dummy_mac_operations = {
+ .reconfigure = efx_port_dummy_op_void,
+ .poll = efx_port_dummy_op_void,
+ .irq = efx_port_dummy_op_void,
+};
+
static struct efx_phy_operations efx_dummy_phy_operations = {
.init = efx_port_dummy_op_int,
.reconfigure = efx_port_dummy_op_void,
- .check_hw = efx_port_dummy_op_int,
+ .poll = efx_port_dummy_op_void,
.fini = efx_port_dummy_op_void,
.clear_interrupt = efx_port_dummy_op_void,
};
@@ -1755,6 +1837,7 @@ static struct efx_board efx_dummy_board_info = {
.init = efx_port_dummy_op_int,
.init_leds = efx_port_dummy_op_int,
.set_fault_led = efx_port_dummy_op_blink,
+ .monitor = efx_port_dummy_op_int,
.blink = efx_port_dummy_op_blink,
.fini = efx_port_dummy_op_void,
};
@@ -1774,12 +1857,13 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
- int i, rc;
+ int i;
/* Initialise common structures */
memset(efx, 0, sizeof(*efx));
spin_lock_init(&efx->biu_lock);
spin_lock_init(&efx->phy_lock);
+ mutex_init(&efx->spi_lock);
INIT_WORK(&efx->reset_work, efx_reset_work);
INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
efx->pci_dev = pci_dev;
@@ -1793,9 +1877,11 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
spin_lock_init(&efx->netif_stop_lock);
spin_lock_init(&efx->stats_lock);
mutex_init(&efx->mac_lock);
+ efx->mac_op = &efx_dummy_mac_operations;
efx->phy_op = &efx_dummy_phy_operations;
efx->mii.dev = net_dev;
- INIT_WORK(&efx->reconfigure_work, efx_reconfigure_work);
+ INIT_WORK(&efx->phy_work, efx_phy_work);
+ INIT_WORK(&efx->mac_work, efx_mac_work);
atomic_set(&efx->netif_stop_count, 1);
for (i = 0; i < EFX_MAX_CHANNELS; i++) {
@@ -1841,34 +1927,18 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
efx->interrupt_mode = max(efx->type->max_interrupt_mode,
interrupt_mode);
- efx->workqueue = create_singlethread_workqueue("sfc_work");
- if (!efx->workqueue) {
- rc = -ENOMEM;
- goto fail1;
- }
-
- efx->reset_workqueue = create_singlethread_workqueue("sfc_reset");
- if (!efx->reset_workqueue) {
- rc = -ENOMEM;
- goto fail2;
- }
+ /* Would be good to use the net_dev name, but we're too early */
+ snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s",
+ pci_name(pci_dev));
+ efx->workqueue = create_singlethread_workqueue(efx->workqueue_name);
+ if (!efx->workqueue)
+ return -ENOMEM;
return 0;
-
- fail2:
- destroy_workqueue(efx->workqueue);
- efx->workqueue = NULL;
-
- fail1:
- return rc;
}
static void efx_fini_struct(struct efx_nic *efx)
{
- if (efx->reset_workqueue) {
- destroy_workqueue(efx->reset_workqueue);
- efx->reset_workqueue = NULL;
- }
if (efx->workqueue) {
destroy_workqueue(efx->workqueue);
efx->workqueue = NULL;
@@ -1927,11 +1997,13 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
efx_unregister_netdev(efx);
+ efx_mtd_remove(efx);
+
/* Wait for any scheduled resets to complete. No more will be
* scheduled from this point because efx_stop_all() has been
* called, we are no longer registered with driverlink, and
* the net_device's have been removed. */
- flush_workqueue(efx->reset_workqueue);
+ cancel_work_sync(&efx->reset_work);
efx_pci_remove_main(efx);
@@ -1992,6 +2064,7 @@ static int efx_pci_probe_main(struct efx_nic *efx)
efx_fini_port(efx);
fail5:
fail4:
+ efx->board_info.fini(efx);
fail3:
efx_fini_napi(efx);
fail2:
@@ -2045,14 +2118,23 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
* we're in STATE_INIT. */
for (i = 0; i < 5; i++) {
rc = efx_pci_probe_main(efx);
- if (rc == 0)
- break;
/* Serialise against efx_reset(). No more resets will be
* scheduled since efx_stop_all() has been called, and we
* have not and never have been registered with either
* the rtnetlink or driverlink layers. */
- flush_workqueue(efx->reset_workqueue);
+ cancel_work_sync(&efx->reset_work);
+
+ if (rc == 0) {
+ if (efx->reset_pending != RESET_TYPE_NONE) {
+ /* If there was a scheduled reset during
+ * probe, the NIC is probably hosed anyway */
+ efx_pci_remove_main(efx);
+ rc = -EIO;
+ } else {
+ break;
+ }
+ }
/* Retry if a recoverably reset event has been scheduled */
if ((efx->reset_pending != RESET_TYPE_INVISIBLE) &&
@@ -2070,16 +2152,15 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
/* Switch to the running state before we expose the device to
* the OS. This is to ensure that the initial gathering of
* MAC stats succeeds. */
- rtnl_lock();
efx->state = STATE_RUNNING;
- rtnl_unlock();
+
+ efx_mtd_probe(efx); /* allowed to fail */
rc = efx_register_netdev(efx);
if (rc)
goto fail5;
EFX_LOG(efx, "initialisation successful\n");
-
return 0;
fail5:
@@ -2127,6 +2208,11 @@ static int __init efx_init_module(void)
rc = -ENOMEM;
goto err_refill;
}
+ reset_workqueue = create_singlethread_workqueue("sfc_reset");
+ if (!reset_workqueue) {
+ rc = -ENOMEM;
+ goto err_reset;
+ }
rc = pci_register_driver(&efx_pci_driver);
if (rc < 0)
@@ -2135,6 +2221,8 @@ static int __init efx_init_module(void)
return 0;
err_pci:
+ destroy_workqueue(reset_workqueue);
+ err_reset:
destroy_workqueue(refill_workqueue);
err_refill:
unregister_netdevice_notifier(&efx_netdev_notifier);
@@ -2147,6 +2235,7 @@ static void __exit efx_exit_module(void)
printk(KERN_INFO "Solarflare NET driver unloading\n");
pci_unregister_driver(&efx_pci_driver);
+ destroy_workqueue(reset_workqueue);
destroy_workqueue(refill_workqueue);
unregister_netdevice_notifier(&efx_netdev_notifier);
OpenPOWER on IntegriCloud