From 2e5071bce5ce4037ce852a916e8106811e68677b Mon Sep 17 00:00:00 2001 From: Jay Cliburn Date: Sat, 2 Feb 2008 19:50:03 -0600 Subject: atl1: relocate atl1 driver to /drivers/net/atlx In preparation for a future Atheros L2 NIC driver (called atl2), relocate the atl1 driver into a new /drivers/net/atlx directory that will ultimately be shared with the future atl2 driver. Signed-off-by: Chris Snook Signed-off-by: Jay Cliburn Signed-off-by: Jeff Garzik --- drivers/net/atlx/Makefile | 2 + drivers/net/atlx/atl1.h | 286 +++++ drivers/net/atlx/atl1_ethtool.c | 505 ++++++++ drivers/net/atlx/atl1_hw.c | 720 ++++++++++++ drivers/net/atlx/atl1_hw.h | 946 +++++++++++++++ drivers/net/atlx/atl1_main.c | 2453 +++++++++++++++++++++++++++++++++++++++ drivers/net/atlx/atl1_param.c | 203 ++++ 7 files changed, 5115 insertions(+) create mode 100644 drivers/net/atlx/Makefile create mode 100644 drivers/net/atlx/atl1.h create mode 100644 drivers/net/atlx/atl1_ethtool.c create mode 100644 drivers/net/atlx/atl1_hw.c create mode 100644 drivers/net/atlx/atl1_hw.h create mode 100644 drivers/net/atlx/atl1_main.c create mode 100644 drivers/net/atlx/atl1_param.c (limited to 'drivers/net/atlx') diff --git a/drivers/net/atlx/Makefile b/drivers/net/atlx/Makefile new file mode 100644 index 0000000..a6b707e --- /dev/null +++ b/drivers/net/atlx/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_ATL1) += atl1.o +atl1-y += atl1_main.o atl1_hw.o atl1_ethtool.o atl1_param.o diff --git a/drivers/net/atlx/atl1.h b/drivers/net/atlx/atl1.h new file mode 100644 index 0000000..ff4765f6 --- /dev/null +++ b/drivers/net/atlx/atl1.h @@ -0,0 +1,286 @@ +/* + * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. + * Copyright(c) 2006 Chris Snook + * Copyright(c) 2006 Jay Cliburn + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef _ATL1_H_ +#define _ATL1_H_ + +#include +#include + +#include "atl1_hw.h" + +/* function prototypes needed by multiple files */ +s32 atl1_up(struct atl1_adapter *adapter); +void atl1_down(struct atl1_adapter *adapter); +int atl1_reset(struct atl1_adapter *adapter); +s32 atl1_setup_ring_resources(struct atl1_adapter *adapter); +void atl1_free_ring_resources(struct atl1_adapter *adapter); + +extern char atl1_driver_name[]; +extern char atl1_driver_version[]; +extern const struct ethtool_ops atl1_ethtool_ops; + +struct atl1_adapter; + +#define ATL1_MAX_INTR 3 +#define ATL1_MAX_TX_BUF_LEN 0x3000 /* 12288 bytes */ + +#define ATL1_DEFAULT_TPD 256 +#define ATL1_MAX_TPD 1024 +#define ATL1_MIN_TPD 64 +#define ATL1_DEFAULT_RFD 512 +#define ATL1_MIN_RFD 128 +#define ATL1_MAX_RFD 2048 + +#define ATL1_GET_DESC(R, i, type) (&(((type *)((R)->desc))[i])) +#define ATL1_RFD_DESC(R, i) ATL1_GET_DESC(R, i, struct rx_free_desc) +#define ATL1_TPD_DESC(R, i) ATL1_GET_DESC(R, i, struct tx_packet_desc) +#define ATL1_RRD_DESC(R, i) ATL1_GET_DESC(R, i, struct rx_return_desc) + +/* + * This detached comment is preserved for documentation purposes only. + * It was originally attached to some code that got deleted, but seems + * important enough to keep around... + * + * + * Some workarounds require millisecond delays and are run during interrupt + * context. Most notably, when establishing link, the phy may need tweaking + * but cannot process phy register reads/writes faster than millisecond + * intervals...and we establish link due to a "link status change" interrupt. + * + */ + +/* + * atl1_ring_header represents a single, contiguous block of DMA space + * mapped for the three descriptor rings (tpd, rfd, rrd) and the two + * message blocks (cmb, smb) described below + */ +struct atl1_ring_header { + void *desc; /* virtual address */ + dma_addr_t dma; /* physical address*/ + unsigned int size; /* length in bytes */ +}; + +/* + * atl1_buffer is wrapper around a pointer to a socket buffer + * so a DMA handle can be stored along with the skb + */ +struct atl1_buffer { + struct sk_buff *skb; /* socket buffer */ + u16 length; /* rx buffer length */ + u16 alloced; /* 1 if skb allocated */ + dma_addr_t dma; +}; + +/* transmit packet descriptor (tpd) ring */ +struct atl1_tpd_ring { + void *desc; /* descriptor ring virtual address */ + dma_addr_t dma; /* descriptor ring physical address */ + u16 size; /* descriptor ring length in bytes */ + u16 count; /* number of descriptors in the ring */ + u16 hw_idx; /* hardware index */ + atomic_t next_to_clean; + atomic_t next_to_use; + struct atl1_buffer *buffer_info; +}; + +/* receive free descriptor (rfd) ring */ +struct atl1_rfd_ring { + void *desc; /* descriptor ring virtual address */ + dma_addr_t dma; /* descriptor ring physical address */ + u16 size; /* descriptor ring length in bytes */ + u16 count; /* number of descriptors in the ring */ + atomic_t next_to_use; + u16 next_to_clean; + struct atl1_buffer *buffer_info; +}; + +/* receive return descriptor (rrd) ring */ +struct atl1_rrd_ring { + void *desc; /* descriptor ring virtual address */ + dma_addr_t dma; /* descriptor ring physical address */ + unsigned int size; /* descriptor ring length in bytes */ + u16 count; /* number of descriptors in the ring */ + u16 next_to_use; + atomic_t next_to_clean; +}; + +/* coalescing message block (cmb) */ +struct atl1_cmb { + struct coals_msg_block *cmb; + dma_addr_t dma; +}; + +/* statistics message block (smb) */ +struct atl1_smb { + struct stats_msg_block *smb; + dma_addr_t dma; +}; + +/* Statistics counters */ +struct atl1_sft_stats { + u64 rx_packets; + u64 tx_packets; + u64 rx_bytes; + u64 tx_bytes; + u64 multicast; + u64 collisions; + u64 rx_errors; + u64 rx_length_errors; + u64 rx_crc_errors; + u64 rx_frame_errors; + u64 rx_fifo_errors; + u64 rx_missed_errors; + u64 tx_errors; + u64 tx_fifo_errors; + u64 tx_aborted_errors; + u64 tx_window_errors; + u64 tx_carrier_errors; + u64 tx_pause; /* num pause packets transmitted. */ + u64 excecol; /* num tx packets w/ excessive collisions. */ + u64 deffer; /* num tx packets deferred */ + u64 scc; /* num packets subsequently transmitted + * successfully w/ single prior collision. */ + u64 mcc; /* num packets subsequently transmitted + * successfully w/ multiple prior collisions. */ + u64 latecol; /* num tx packets w/ late collisions. */ + u64 tx_underun; /* num tx packets aborted due to transmit + * FIFO underrun, or TRD FIFO underrun */ + u64 tx_trunc; /* num tx packets truncated due to size + * exceeding MTU, regardless whether truncated + * by the chip or not. (The name doesn't really + * reflect the meaning in this case.) */ + u64 rx_pause; /* num Pause packets received. */ + u64 rx_rrd_ov; + u64 rx_trunc; +}; + +/* hardware structure */ +struct atl1_hw { + u8 __iomem *hw_addr; + struct atl1_adapter *back; + enum atl1_dma_order dma_ord; + enum atl1_dma_rcb rcb_value; + enum atl1_dma_req_block dmar_block; + enum atl1_dma_req_block dmaw_block; + u8 preamble_len; + u8 max_retry; /* Retransmission maximum, after which the + * packet will be discarded */ + u8 jam_ipg; /* IPG to start JAM for collision based flow + * control in half-duplex mode. In units of + * 8-bit time */ + u8 ipgt; /* Desired back to back inter-packet gap. + * The default is 96-bit time */ + u8 min_ifg; /* Minimum number of IFG to enforce in between + * receive frames. Frame gap below such IFP + * is dropped */ + u8 ipgr1; /* 64bit Carrier-Sense window */ + u8 ipgr2; /* 96-bit IPG window */ + u8 tpd_burst; /* Number of TPD to prefetch in cache-aligned + * burst. Each TPD is 16 bytes long */ + u8 rfd_burst; /* Number of RFD to prefetch in cache-aligned + * burst. Each RFD is 12 bytes long */ + u8 rfd_fetch_gap; + u8 rrd_burst; /* Threshold number of RRDs that can be retired + * in a burst. Each RRD is 16 bytes long */ + u8 tpd_fetch_th; + u8 tpd_fetch_gap; + u16 tx_jumbo_task_th; + u16 txf_burst; /* Number of data bytes to read in a cache- + * aligned burst. Each SRAM entry is 8 bytes */ + u16 rx_jumbo_th; /* Jumbo packet size for non-VLAN packet. VLAN + * packets should add 4 bytes */ + u16 rx_jumbo_lkah; + u16 rrd_ret_timer; /* RRD retirement timer. Decrement by 1 after + * every 512ns passes. */ + u16 lcol; /* Collision Window */ + + u16 cmb_tpd; + u16 cmb_rrd; + u16 cmb_rx_timer; + u16 cmb_tx_timer; + u32 smb_timer; + u16 media_type; + u16 autoneg_advertised; + + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg; + + u32 max_frame_size; + u32 min_frame_size; + + u16 dev_rev; + + /* spi flash */ + u8 flash_vendor; + + u8 mac_addr[ETH_ALEN]; + u8 perm_mac_addr[ETH_ALEN]; + + bool phy_configured; +}; + +struct atl1_adapter { + struct net_device *netdev; + struct pci_dev *pdev; + struct net_device_stats net_stats; + struct atl1_sft_stats soft_stats; + struct vlan_group *vlgrp; + u32 rx_buffer_len; + u32 wol; + u16 link_speed; + u16 link_duplex; + spinlock_t lock; + struct work_struct tx_timeout_task; + struct work_struct link_chg_task; + struct work_struct pcie_dma_to_rst_task; + struct timer_list watchdog_timer; + struct timer_list phy_config_timer; + bool phy_timer_pending; + + /* all descriptor rings' memory */ + struct atl1_ring_header ring_header; + + /* TX */ + struct atl1_tpd_ring tpd_ring; + spinlock_t mb_lock; + + /* RX */ + struct atl1_rfd_ring rfd_ring; + struct atl1_rrd_ring rrd_ring; + u64 hw_csum_err; + u64 hw_csum_good; + + u16 imt; /* interrupt moderator timer (2us resolution */ + u16 ict; /* interrupt clear timer (2us resolution */ + struct mii_if_info mii; /* MII interface info */ + + /* structs defined in atl1_hw.h */ + u32 bd_number; /* board number */ + bool pci_using_64; + struct atl1_hw hw; + struct atl1_smb smb; + struct atl1_cmb cmb; +}; + +#endif /* _ATL1_H_ */ diff --git a/drivers/net/atlx/atl1_ethtool.c b/drivers/net/atlx/atl1_ethtool.c new file mode 100644 index 0000000..68a83be --- /dev/null +++ b/drivers/net/atlx/atl1_ethtool.c @@ -0,0 +1,505 @@ +/* + * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. + * Copyright(c) 2006 Chris Snook + * Copyright(c) 2006 Jay Cliburn + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include +#include +#include +#include +#include +#include + +#include "atl1.h" + +struct atl1_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define ATL1_STAT(m) sizeof(((struct atl1_adapter *)0)->m), \ + offsetof(struct atl1_adapter, m) + +static struct atl1_stats atl1_gstrings_stats[] = { + {"rx_packets", ATL1_STAT(soft_stats.rx_packets)}, + {"tx_packets", ATL1_STAT(soft_stats.tx_packets)}, + {"rx_bytes", ATL1_STAT(soft_stats.rx_bytes)}, + {"tx_bytes", ATL1_STAT(soft_stats.tx_bytes)}, + {"rx_errors", ATL1_STAT(soft_stats.rx_errors)}, + {"tx_errors", ATL1_STAT(soft_stats.tx_errors)}, + {"rx_dropped", ATL1_STAT(net_stats.rx_dropped)}, + {"tx_dropped", ATL1_STAT(net_stats.tx_dropped)}, + {"multicast", ATL1_STAT(soft_stats.multicast)}, + {"collisions", ATL1_STAT(soft_stats.collisions)}, + {"rx_length_errors", ATL1_STAT(soft_stats.rx_length_errors)}, + {"rx_over_errors", ATL1_STAT(soft_stats.rx_missed_errors)}, + {"rx_crc_errors", ATL1_STAT(soft_stats.rx_crc_errors)}, + {"rx_frame_errors", ATL1_STAT(soft_stats.rx_frame_errors)}, + {"rx_fifo_errors", ATL1_STAT(soft_stats.rx_fifo_errors)}, + {"rx_missed_errors", ATL1_STAT(soft_stats.rx_missed_errors)}, + {"tx_aborted_errors", ATL1_STAT(soft_stats.tx_aborted_errors)}, + {"tx_carrier_errors", ATL1_STAT(soft_stats.tx_carrier_errors)}, + {"tx_fifo_errors", ATL1_STAT(soft_stats.tx_fifo_errors)}, + {"tx_window_errors", ATL1_STAT(soft_stats.tx_window_errors)}, + {"tx_abort_exce_coll", ATL1_STAT(soft_stats.excecol)}, + {"tx_abort_late_coll", ATL1_STAT(soft_stats.latecol)}, + {"tx_deferred_ok", ATL1_STAT(soft_stats.deffer)}, + {"tx_single_coll_ok", ATL1_STAT(soft_stats.scc)}, + {"tx_multi_coll_ok", ATL1_STAT(soft_stats.mcc)}, + {"tx_underun", ATL1_STAT(soft_stats.tx_underun)}, + {"tx_trunc", ATL1_STAT(soft_stats.tx_trunc)}, + {"tx_pause", ATL1_STAT(soft_stats.tx_pause)}, + {"rx_pause", ATL1_STAT(soft_stats.rx_pause)}, + {"rx_rrd_ov", ATL1_STAT(soft_stats.rx_rrd_ov)}, + {"rx_trunc", ATL1_STAT(soft_stats.rx_trunc)} +}; + +static void atl1_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + int i; + char *p; + + for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) { + p = (char *)adapter+atl1_gstrings_stats[i].stat_offset; + data[i] = (atl1_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + +} + +static int atl1_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(atl1_gstrings_stats); + default: + return -EOPNOTSUPP; + } +} + +static int atl1_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_hw *hw = &adapter->hw; + + ecmd->supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_Autoneg | SUPPORTED_TP); + ecmd->advertising = ADVERTISED_TP; + if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || + hw->media_type == MEDIA_TYPE_1000M_FULL) { + ecmd->advertising |= ADVERTISED_Autoneg; + if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR) { + ecmd->advertising |= ADVERTISED_Autoneg; + ecmd->advertising |= + (ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Full); + } + else + ecmd->advertising |= (ADVERTISED_1000baseT_Full); + } + ecmd->port = PORT_TP; + ecmd->phy_address = 0; + ecmd->transceiver = XCVR_INTERNAL; + + if (netif_carrier_ok(adapter->netdev)) { + u16 link_speed, link_duplex; + atl1_get_speed_and_duplex(hw, &link_speed, &link_duplex); + ecmd->speed = link_speed; + if (link_duplex == FULL_DUPLEX) + ecmd->duplex = DUPLEX_FULL; + else + ecmd->duplex = DUPLEX_HALF; + } else { + ecmd->speed = -1; + ecmd->duplex = -1; + } + if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || + hw->media_type == MEDIA_TYPE_1000M_FULL) + ecmd->autoneg = AUTONEG_ENABLE; + else + ecmd->autoneg = AUTONEG_DISABLE; + + return 0; +} + +static int atl1_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_hw *hw = &adapter->hw; + u16 phy_data; + int ret_val = 0; + u16 old_media_type = hw->media_type; + + if (netif_running(adapter->netdev)) { + dev_dbg(&adapter->pdev->dev, "ethtool shutting down adapter\n"); + atl1_down(adapter); + } + + if (ecmd->autoneg == AUTONEG_ENABLE) + hw->media_type = MEDIA_TYPE_AUTO_SENSOR; + else { + if (ecmd->speed == SPEED_1000) { + if (ecmd->duplex != DUPLEX_FULL) { + dev_warn(&adapter->pdev->dev, + "can't force to 1000M half duplex\n"); + ret_val = -EINVAL; + goto exit_sset; + } + hw->media_type = MEDIA_TYPE_1000M_FULL; + } else if (ecmd->speed == SPEED_100) { + if (ecmd->duplex == DUPLEX_FULL) { + hw->media_type = MEDIA_TYPE_100M_FULL; + } else + hw->media_type = MEDIA_TYPE_100M_HALF; + } else { + if (ecmd->duplex == DUPLEX_FULL) + hw->media_type = MEDIA_TYPE_10M_FULL; + else + hw->media_type = MEDIA_TYPE_10M_HALF; + } + } + switch (hw->media_type) { + case MEDIA_TYPE_AUTO_SENSOR: + ecmd->advertising = + ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Full | + ADVERTISED_Autoneg | ADVERTISED_TP; + break; + case MEDIA_TYPE_1000M_FULL: + ecmd->advertising = + ADVERTISED_1000baseT_Full | + ADVERTISED_Autoneg | ADVERTISED_TP; + break; + default: + ecmd->advertising = 0; + break; + } + if (atl1_phy_setup_autoneg_adv(hw)) { + ret_val = -EINVAL; + dev_warn(&adapter->pdev->dev, + "invalid ethtool speed/duplex setting\n"); + goto exit_sset; + } + if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || + hw->media_type == MEDIA_TYPE_1000M_FULL) + phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN; + else { + switch (hw->media_type) { + case MEDIA_TYPE_100M_FULL: + phy_data = + MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 | + MII_CR_RESET; + break; + case MEDIA_TYPE_100M_HALF: + phy_data = MII_CR_SPEED_100 | MII_CR_RESET; + break; + case MEDIA_TYPE_10M_FULL: + phy_data = + MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET; + break; + default: /* MEDIA_TYPE_10M_HALF: */ + phy_data = MII_CR_SPEED_10 | MII_CR_RESET; + break; + } + } + atl1_write_phy_reg(hw, MII_BMCR, phy_data); +exit_sset: + if (ret_val) + hw->media_type = old_media_type; + + if (netif_running(adapter->netdev)) { + dev_dbg(&adapter->pdev->dev, "ethtool starting adapter\n"); + atl1_up(adapter); + } else if (!ret_val) { + dev_dbg(&adapter->pdev->dev, "ethtool resetting adapter\n"); + atl1_reset(adapter); + } + return ret_val; +} + +static void atl1_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + + strncpy(drvinfo->driver, atl1_driver_name, sizeof(drvinfo->driver)); + strncpy(drvinfo->version, atl1_driver_version, + sizeof(drvinfo->version)); + strncpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); + strncpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); + drvinfo->eedump_len = ATL1_EEDUMP_LEN; +} + +static void atl1_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + + wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC; + wol->wolopts = 0; + if (adapter->wol & ATL1_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & ATL1_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & ATL1_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & ATL1_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; + return; +} + +static int atl1_set_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + + if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) + return -EOPNOTSUPP; + adapter->wol = 0; + if (wol->wolopts & WAKE_UCAST) + adapter->wol |= ATL1_WUFC_EX; + if (wol->wolopts & WAKE_MCAST) + adapter->wol |= ATL1_WUFC_MC; + if (wol->wolopts & WAKE_BCAST) + adapter->wol |= ATL1_WUFC_BC; + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= ATL1_WUFC_MAG; + return 0; +} + +static void atl1_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_tpd_ring *txdr = &adapter->tpd_ring; + struct atl1_rfd_ring *rxdr = &adapter->rfd_ring; + + ring->rx_max_pending = ATL1_MAX_RFD; + ring->tx_max_pending = ATL1_MAX_TPD; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = rxdr->count; + ring->tx_pending = txdr->count; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; +} + +static int atl1_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_tpd_ring *tpdr = &adapter->tpd_ring; + struct atl1_rrd_ring *rrdr = &adapter->rrd_ring; + struct atl1_rfd_ring *rfdr = &adapter->rfd_ring; + + struct atl1_tpd_ring tpd_old, tpd_new; + struct atl1_rfd_ring rfd_old, rfd_new; + struct atl1_rrd_ring rrd_old, rrd_new; + struct atl1_ring_header rhdr_old, rhdr_new; + int err; + + tpd_old = adapter->tpd_ring; + rfd_old = adapter->rfd_ring; + rrd_old = adapter->rrd_ring; + rhdr_old = adapter->ring_header; + + if (netif_running(adapter->netdev)) + atl1_down(adapter); + + rfdr->count = (u16) max(ring->rx_pending, (u32) ATL1_MIN_RFD); + rfdr->count = rfdr->count > ATL1_MAX_RFD ? ATL1_MAX_RFD : + rfdr->count; + rfdr->count = (rfdr->count + 3) & ~3; + rrdr->count = rfdr->count; + + tpdr->count = (u16) max(ring->tx_pending, (u32) ATL1_MIN_TPD); + tpdr->count = tpdr->count > ATL1_MAX_TPD ? ATL1_MAX_TPD : + tpdr->count; + tpdr->count = (tpdr->count + 3) & ~3; + + if (netif_running(adapter->netdev)) { + /* try to get new resources before deleting old */ + err = atl1_setup_ring_resources(adapter); + if (err) + goto err_setup_ring; + + /* + * save the new, restore the old in order to free it, + * then restore the new back again + */ + + rfd_new = adapter->rfd_ring; + rrd_new = adapter->rrd_ring; + tpd_new = adapter->tpd_ring; + rhdr_new = adapter->ring_header; + adapter->rfd_ring = rfd_old; + adapter->rrd_ring = rrd_old; + adapter->tpd_ring = tpd_old; + adapter->ring_header = rhdr_old; + atl1_free_ring_resources(adapter); + adapter->rfd_ring = rfd_new; + adapter->rrd_ring = rrd_new; + adapter->tpd_ring = tpd_new; + adapter->ring_header = rhdr_new; + + err = atl1_up(adapter); + if (err) + return err; + } + return 0; + +err_setup_ring: + adapter->rfd_ring = rfd_old; + adapter->rrd_ring = rrd_old; + adapter->tpd_ring = tpd_old; + adapter->ring_header = rhdr_old; + atl1_up(adapter); + return err; +} + +static void atl1_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *epause) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_hw *hw = &adapter->hw; + + if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || + hw->media_type == MEDIA_TYPE_1000M_FULL) { + epause->autoneg = AUTONEG_ENABLE; + } else { + epause->autoneg = AUTONEG_DISABLE; + } + epause->rx_pause = 1; + epause->tx_pause = 1; +} + +static int atl1_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *epause) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_hw *hw = &adapter->hw; + + if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || + hw->media_type == MEDIA_TYPE_1000M_FULL) { + epause->autoneg = AUTONEG_ENABLE; + } else { + epause->autoneg = AUTONEG_DISABLE; + } + + epause->rx_pause = 1; + epause->tx_pause = 1; + + return 0; +} + +static u32 atl1_get_rx_csum(struct net_device *netdev) +{ + return 1; +} + +static void atl1_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) { + memcpy(p, atl1_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + break; + } +} + +static int atl1_nway_reset(struct net_device *netdev) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_hw *hw = &adapter->hw; + + if (netif_running(netdev)) { + u16 phy_data; + atl1_down(adapter); + + if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || + hw->media_type == MEDIA_TYPE_1000M_FULL) { + phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN; + } else { + switch (hw->media_type) { + case MEDIA_TYPE_100M_FULL: + phy_data = MII_CR_FULL_DUPLEX | + MII_CR_SPEED_100 | MII_CR_RESET; + break; + case MEDIA_TYPE_100M_HALF: + phy_data = MII_CR_SPEED_100 | MII_CR_RESET; + break; + case MEDIA_TYPE_10M_FULL: + phy_data = MII_CR_FULL_DUPLEX | + MII_CR_SPEED_10 | MII_CR_RESET; + break; + default: /* MEDIA_TYPE_10M_HALF */ + phy_data = MII_CR_SPEED_10 | MII_CR_RESET; + } + } + atl1_write_phy_reg(hw, MII_BMCR, phy_data); + atl1_up(adapter); + } + return 0; +} + +const struct ethtool_ops atl1_ethtool_ops = { + .get_settings = atl1_get_settings, + .set_settings = atl1_set_settings, + .get_drvinfo = atl1_get_drvinfo, + .get_wol = atl1_get_wol, + .set_wol = atl1_set_wol, + .get_ringparam = atl1_get_ringparam, + .set_ringparam = atl1_set_ringparam, + .get_pauseparam = atl1_get_pauseparam, + .set_pauseparam = atl1_set_pauseparam, + .get_rx_csum = atl1_get_rx_csum, + .set_tx_csum = ethtool_op_set_tx_hw_csum, + .get_link = ethtool_op_get_link, + .set_sg = ethtool_op_set_sg, + .get_strings = atl1_get_strings, + .nway_reset = atl1_nway_reset, + .get_ethtool_stats = atl1_get_ethtool_stats, + .get_sset_count = atl1_get_sset_count, + .set_tso = ethtool_op_set_tso, +}; diff --git a/drivers/net/atlx/atl1_hw.c b/drivers/net/atlx/atl1_hw.c new file mode 100644 index 0000000..9d3bd22 --- /dev/null +++ b/drivers/net/atlx/atl1_hw.c @@ -0,0 +1,720 @@ +/* + * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. + * Copyright(c) 2006 Chris Snook + * Copyright(c) 2006 Jay Cliburn + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "atl1.h" + +/* + * Reset the transmit and receive units; mask and clear all interrupts. + * hw - Struct containing variables accessed by shared code + * return : ATL1_SUCCESS or idle status (if error) + */ +s32 atl1_reset_hw(struct atl1_hw *hw) +{ + struct pci_dev *pdev = hw->back->pdev; + u32 icr; + int i; + + /* + * Clear Interrupt mask to stop board from generating + * interrupts & Clear any pending interrupt events + */ + /* + * iowrite32(0, hw->hw_addr + REG_IMR); + * iowrite32(0xffffffff, hw->hw_addr + REG_ISR); + */ + + /* + * Issue Soft Reset to the MAC. This will reset the chip's + * transmit, receive, DMA. It will not effect + * the current PCI configuration. The global reset bit is self- + * clearing, and should clear within a microsecond. + */ + iowrite32(MASTER_CTRL_SOFT_RST, hw->hw_addr + REG_MASTER_CTRL); + ioread32(hw->hw_addr + REG_MASTER_CTRL); + + iowrite16(1, hw->hw_addr + REG_GPHY_ENABLE); + ioread16(hw->hw_addr + REG_GPHY_ENABLE); + + msleep(1); /* delay about 1ms */ + + /* Wait at least 10ms for All module to be Idle */ + for (i = 0; i < 10; i++) { + icr = ioread32(hw->hw_addr + REG_IDLE_STATUS); + if (!icr) + break; + msleep(1); /* delay 1 ms */ + cpu_relax(); /* FIXME: is this still the right way to do this? */ + } + + if (icr) { + dev_dbg(&pdev->dev, "ICR = 0x%x\n", icr); + return icr; + } + + return ATL1_SUCCESS; +} + +/* function about EEPROM + * + * check_eeprom_exist + * return 0 if eeprom exist + */ +static int atl1_check_eeprom_exist(struct atl1_hw *hw) +{ + u32 value; + value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL); + if (value & SPI_FLASH_CTRL_EN_VPD) { + value &= ~SPI_FLASH_CTRL_EN_VPD; + iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL); + } + + value = ioread16(hw->hw_addr + REG_PCIE_CAP_LIST); + return ((value & 0xFF00) == 0x6C00) ? 0 : 1; +} + +static bool atl1_read_eeprom(struct atl1_hw *hw, u32 offset, u32 *p_value) +{ + int i; + u32 control; + + if (offset & 3) + return false; /* address do not align */ + + iowrite32(0, hw->hw_addr + REG_VPD_DATA); + control = (offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT; + iowrite32(control, hw->hw_addr + REG_VPD_CAP); + ioread32(hw->hw_addr + REG_VPD_CAP); + + for (i = 0; i < 10; i++) { + msleep(2); + control = ioread32(hw->hw_addr + REG_VPD_CAP); + if (control & VPD_CAP_VPD_FLAG) + break; + } + if (control & VPD_CAP_VPD_FLAG) { + *p_value = ioread32(hw->hw_addr + REG_VPD_DATA); + return true; + } + return false; /* timeout */ +} + +/* + * Reads the value from a PHY register + * hw - Struct containing variables accessed by shared code + * reg_addr - address of the PHY register to read + */ +s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data) +{ + u32 val; + int i; + + val = ((u32) (reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT | + MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW | MDIO_CLK_25_4 << + MDIO_CLK_SEL_SHIFT; + iowrite32(val, hw->hw_addr + REG_MDIO_CTRL); + ioread32(hw->hw_addr + REG_MDIO_CTRL); + + for (i = 0; i < MDIO_WAIT_TIMES; i++) { + udelay(2); + val = ioread32(hw->hw_addr + REG_MDIO_CTRL); + if (!(val & (MDIO_START | MDIO_BUSY))) + break; + } + if (!(val & (MDIO_START | MDIO_BUSY))) { + *phy_data = (u16) val; + return ATL1_SUCCESS; + } + return ATL1_ERR_PHY; +} + +#define CUSTOM_SPI_CS_SETUP 2 +#define CUSTOM_SPI_CLK_HI 2 +#define CUSTOM_SPI_CLK_LO 2 +#define CUSTOM_SPI_CS_HOLD 2 +#define CUSTOM_SPI_CS_HI 3 + +static bool atl1_spi_read(struct atl1_hw *hw, u32 addr, u32 *buf) +{ + int i; + u32 value; + + iowrite32(0, hw->hw_addr + REG_SPI_DATA); + iowrite32(addr, hw->hw_addr + REG_SPI_ADDR); + + value = SPI_FLASH_CTRL_WAIT_READY | + (CUSTOM_SPI_CS_SETUP & SPI_FLASH_CTRL_CS_SETUP_MASK) << + SPI_FLASH_CTRL_CS_SETUP_SHIFT | (CUSTOM_SPI_CLK_HI & + SPI_FLASH_CTRL_CLK_HI_MASK) << + SPI_FLASH_CTRL_CLK_HI_SHIFT | (CUSTOM_SPI_CLK_LO & + SPI_FLASH_CTRL_CLK_LO_MASK) << + SPI_FLASH_CTRL_CLK_LO_SHIFT | (CUSTOM_SPI_CS_HOLD & + SPI_FLASH_CTRL_CS_HOLD_MASK) << + SPI_FLASH_CTRL_CS_HOLD_SHIFT | (CUSTOM_SPI_CS_HI & + SPI_FLASH_CTRL_CS_HI_MASK) << + SPI_FLASH_CTRL_CS_HI_SHIFT | (1 & SPI_FLASH_CTRL_INS_MASK) << + SPI_FLASH_CTRL_INS_SHIFT; + + iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL); + + value |= SPI_FLASH_CTRL_START; + iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL); + ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL); + + for (i = 0; i < 10; i++) { + msleep(1); /* 1ms */ + value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL); + if (!(value & SPI_FLASH_CTRL_START)) + break; + } + + if (value & SPI_FLASH_CTRL_START) + return false; + + *buf = ioread32(hw->hw_addr + REG_SPI_DATA); + + return true; +} + +/* + * get_permanent_address + * return 0 if get valid mac address, + */ +static int atl1_get_permanent_address(struct atl1_hw *hw) +{ + u32 addr[2]; + u32 i, control; + u16 reg; + u8 eth_addr[ETH_ALEN]; + bool key_valid; + + if (is_valid_ether_addr(hw->perm_mac_addr)) + return 0; + + /* init */ + addr[0] = addr[1] = 0; + + if (!atl1_check_eeprom_exist(hw)) { /* eeprom exist */ + reg = 0; + key_valid = false; + /* Read out all EEPROM content */ + i = 0; + while (1) { + if (atl1_read_eeprom(hw, i + 0x100, &control)) { + if (key_valid) { + if (reg == REG_MAC_STA_ADDR) + addr[0] = control; + else if (reg == (REG_MAC_STA_ADDR + 4)) + addr[1] = control; + key_valid = false; + } else if ((control & 0xff) == 0x5A) { + key_valid = true; + reg = (u16) (control >> 16); + } else + break; /* assume data end while encount an invalid KEYWORD */ + } else + break; /* read error */ + i += 4; + } + + *(u32 *) ð_addr[2] = swab32(addr[0]); + *(u16 *) ð_addr[0] = swab16(*(u16 *) &addr[1]); + if (is_valid_ether_addr(eth_addr)) { + memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN); + return 0; + } + return 1; + } + + /* see if SPI FLAGS exist ? */ + addr[0] = addr[1] = 0; + reg = 0; + key_valid = false; + i = 0; + while (1) { + if (atl1_spi_read(hw, i + 0x1f000, &control)) { + if (key_valid) { + if (reg == REG_MAC_STA_ADDR) + addr[0] = control; + else if (reg == (REG_MAC_STA_ADDR + 4)) + addr[1] = control; + key_valid = false; + } else if ((control & 0xff) == 0x5A) { + key_valid = true; + reg = (u16) (control >> 16); + } else + break; /* data end */ + } else + break; /* read error */ + i += 4; + } + + *(u32 *) ð_addr[2] = swab32(addr[0]); + *(u16 *) ð_addr[0] = swab16(*(u16 *) &addr[1]); + if (is_valid_ether_addr(eth_addr)) { + memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN); + return 0; + } + + /* + * On some motherboards, the MAC address is written by the + * BIOS directly to the MAC register during POST, and is + * not stored in eeprom. If all else thus far has failed + * to fetch the permanent MAC address, try reading it directly. + */ + addr[0] = ioread32(hw->hw_addr + REG_MAC_STA_ADDR); + addr[1] = ioread16(hw->hw_addr + (REG_MAC_STA_ADDR + 4)); + *(u32 *) ð_addr[2] = swab32(addr[0]); + *(u16 *) ð_addr[0] = swab16(*(u16 *) &addr[1]); + if (is_valid_ether_addr(eth_addr)) { + memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN); + return 0; + } + + return 1; +} + +/* + * Reads the adapter's MAC address from the EEPROM + * hw - Struct containing variables accessed by shared code + */ +s32 atl1_read_mac_addr(struct atl1_hw *hw) +{ + u16 i; + + if (atl1_get_permanent_address(hw)) + random_ether_addr(hw->perm_mac_addr); + + for (i = 0; i < ETH_ALEN; i++) + hw->mac_addr[i] = hw->perm_mac_addr[i]; + return ATL1_SUCCESS; +} + +/* + * Hashes an address to determine its location in the multicast table + * hw - Struct containing variables accessed by shared code + * mc_addr - the multicast address to hash + * + * atl1_hash_mc_addr + * purpose + * set hash value for a multicast address + * hash calcu processing : + * 1. calcu 32bit CRC for multicast address + * 2. reverse crc with MSB to LSB + */ +u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr) +{ + u32 crc32, value = 0; + int i; + + crc32 = ether_crc_le(6, mc_addr); + for (i = 0; i < 32; i++) + value |= (((crc32 >> i) & 1) << (31 - i)); + + return value; +} + +/* + * Sets the bit in the multicast table corresponding to the hash value. + * hw - Struct containing variables accessed by shared code + * hash_value - Multicast address hash value + */ +void atl1_hash_set(struct atl1_hw *hw, u32 hash_value) +{ + u32 hash_bit, hash_reg; + u32 mta; + + /* + * The HASH Table is a register array of 2 32-bit registers. + * It is treated like an array of 64 bits. We want to set + * bit BitArray[hash_value]. So we figure out what register + * the bit is in, read it, OR in the new bit, then write + * back the new value. The register is determined by the + * upper 7 bits of the hash value and the bit within that + * register are determined by the lower 5 bits of the value. + */ + hash_reg = (hash_value >> 31) & 0x1; + hash_bit = (hash_value >> 26) & 0x1F; + mta = ioread32((hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2)); + mta |= (1 << hash_bit); + iowrite32(mta, (hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2)); +} + +/* + * Writes a value to a PHY register + * hw - Struct containing variables accessed by shared code + * reg_addr - address of the PHY register to write + * data - data to write to the PHY + */ +s32 atl1_write_phy_reg(struct atl1_hw *hw, u32 reg_addr, u16 phy_data) +{ + int i; + u32 val; + + val = ((u32) (phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT | + (reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT | + MDIO_SUP_PREAMBLE | + MDIO_START | MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT; + iowrite32(val, hw->hw_addr + REG_MDIO_CTRL); + ioread32(hw->hw_addr + REG_MDIO_CTRL); + + for (i = 0; i < MDIO_WAIT_TIMES; i++) { + udelay(2); + val = ioread32(hw->hw_addr + REG_MDIO_CTRL); + if (!(val & (MDIO_START | MDIO_BUSY))) + break; + } + + if (!(val & (MDIO_START | MDIO_BUSY))) + return ATL1_SUCCESS; + + return ATL1_ERR_PHY; +} + +/* + * Make L001's PHY out of Power Saving State (bug) + * hw - Struct containing variables accessed by shared code + * when power on, L001's PHY always on Power saving State + * (Gigabit Link forbidden) + */ +static s32 atl1_phy_leave_power_saving(struct atl1_hw *hw) +{ + s32 ret; + ret = atl1_write_phy_reg(hw, 29, 0x0029); + if (ret) + return ret; + return atl1_write_phy_reg(hw, 30, 0); +} + +/* + *TODO: do something or get rid of this + */ +s32 atl1_phy_enter_power_saving(struct atl1_hw *hw) +{ +/* s32 ret_val; + * u16 phy_data; + */ + +/* + ret_val = atl1_write_phy_reg(hw, ...); + ret_val = atl1_write_phy_reg(hw, ...); + .... +*/ + return ATL1_SUCCESS; +} + +/* + * Resets the PHY and make all config validate + * hw - Struct containing variables accessed by shared code + * + * Sets bit 15 and 12 of the MII Control regiser (for F001 bug) + */ +static s32 atl1_phy_reset(struct atl1_hw *hw) +{ + struct pci_dev *pdev = hw->back->pdev; + s32 ret_val; + u16 phy_data; + + if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || + hw->media_type == MEDIA_TYPE_1000M_FULL) + phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN; + else { + switch (hw->media_type) { + case MEDIA_TYPE_100M_FULL: + phy_data = + MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 | + MII_CR_RESET; + break; + case MEDIA_TYPE_100M_HALF: + phy_data = MII_CR_SPEED_100 | MII_CR_RESET; + break; + case MEDIA_TYPE_10M_FULL: + phy_data = + MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET; + break; + default: /* MEDIA_TYPE_10M_HALF: */ + phy_data = MII_CR_SPEED_10 | MII_CR_RESET; + break; + } + } + + ret_val = atl1_write_phy_reg(hw, MII_BMCR, phy_data); + if (ret_val) { + u32 val; + int i; + /* pcie serdes link may be down! */ + dev_dbg(&pdev->dev, "pcie phy link down\n"); + + for (i = 0; i < 25; i++) { + msleep(1); + val = ioread32(hw->hw_addr + REG_MDIO_CTRL); + if (!(val & (MDIO_START | MDIO_BUSY))) + break; + } + + if ((val & (MDIO_START | MDIO_BUSY)) != 0) { + dev_warn(&pdev->dev, "pcie link down at least 25ms\n"); + return ret_val; + } + } + return ATL1_SUCCESS; +} + +/* + * Configures PHY autoneg and flow control advertisement settings + * hw - Struct containing variables accessed by shared code + */ +s32 atl1_phy_setup_autoneg_adv(struct atl1_hw *hw) +{ + s32 ret_val; + s16 mii_autoneg_adv_reg; + s16 mii_1000t_ctrl_reg; + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK; + + /* Read the MII 1000Base-T Control Register (Address 9). */ + mii_1000t_ctrl_reg = MII_AT001_CR_1000T_DEFAULT_CAP_MASK; + + /* + * First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK; + mii_1000t_ctrl_reg &= ~MII_AT001_CR_1000T_SPEED_MASK; + + /* + * Need to parse media_type and set up + * the appropriate PHY registers. + */ + switch (hw->media_type) { + case MEDIA_TYPE_AUTO_SENSOR: + mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS | + MII_AR_10T_FD_CAPS | + MII_AR_100TX_HD_CAPS | + MII_AR_100TX_FD_CAPS); + mii_1000t_ctrl_reg |= MII_AT001_CR_1000T_FD_CAPS; + break; + + case MEDIA_TYPE_1000M_FULL: + mii_1000t_ctrl_reg |= MII_AT001_CR_1000T_FD_CAPS; + break; + + case MEDIA_TYPE_100M_FULL: + mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS; + break; + + case MEDIA_TYPE_100M_HALF: + mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS; + break; + + case MEDIA_TYPE_10M_FULL: + mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS; + break; + + default: + mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS; + break; + } + + /* flow control fixed to enable all */ + mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE); + + hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg; + hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg; + + ret_val = atl1_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + ret_val = atl1_write_phy_reg(hw, MII_AT001_CR, mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + + return ATL1_SUCCESS; +} + +/* + * Configures link settings. + * hw - Struct containing variables accessed by shared code + * Assumes the hardware has previously been reset and the + * transmitter and receiver are not enabled. + */ +static s32 atl1_setup_link(struct atl1_hw *hw) +{ + struct pci_dev *pdev = hw->back->pdev; + s32 ret_val; + + /* + * Options: + * PHY will advertise value(s) parsed from + * autoneg_advertised and fc + * no matter what autoneg is , We will not wait link result. + */ + ret_val = atl1_phy_setup_autoneg_adv(hw); + if (ret_val) { + dev_dbg(&pdev->dev, "error setting up autonegotiation\n"); + return ret_val; + } + /* SW.Reset , En-Auto-Neg if needed */ + ret_val = atl1_phy_reset(hw); + if (ret_val) { + dev_dbg(&pdev->dev, "error resetting phy\n"); + return ret_val; + } + hw->phy_configured = true; + return ret_val; +} + +static struct atl1_spi_flash_dev flash_table[] = { +/* MFR_NAME WRSR READ PRGM WREN WRDI RDSR RDID SECTOR_ERASE CHIP_ERASE */ + {"Atmel", 0x00, 0x03, 0x02, 0x06, 0x04, 0x05, 0x15, 0x52, 0x62}, + {"SST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0x90, 0x20, 0x60}, + {"ST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0xAB, 0xD8, 0xC7}, +}; + +static void atl1_init_flash_opcode(struct atl1_hw *hw) +{ + if (hw->flash_vendor >= ARRAY_SIZE(flash_table)) + hw->flash_vendor = 0; /* ATMEL */ + + /* Init OP table */ + iowrite8(flash_table[hw->flash_vendor].cmd_program, + hw->hw_addr + REG_SPI_FLASH_OP_PROGRAM); + iowrite8(flash_table[hw->flash_vendor].cmd_sector_erase, + hw->hw_addr + REG_SPI_FLASH_OP_SC_ERASE); + iowrite8(flash_table[hw->flash_vendor].cmd_chip_erase, + hw->hw_addr + REG_SPI_FLASH_OP_CHIP_ERASE); + iowrite8(flash_table[hw->flash_vendor].cmd_rdid, + hw->hw_addr + REG_SPI_FLASH_OP_RDID); + iowrite8(flash_table[hw->flash_vendor].cmd_wren, + hw->hw_addr + REG_SPI_FLASH_OP_WREN); + iowrite8(flash_table[hw->flash_vendor].cmd_rdsr, + hw->hw_addr + REG_SPI_FLASH_OP_RDSR); + iowrite8(flash_table[hw->flash_vendor].cmd_wrsr, + hw->hw_addr + REG_SPI_FLASH_OP_WRSR); + iowrite8(flash_table[hw->flash_vendor].cmd_read, + hw->hw_addr + REG_SPI_FLASH_OP_READ); +} + +/* + * Performs basic configuration of the adapter. + * hw - Struct containing variables accessed by shared code + * Assumes that the controller has previously been reset and is in a + * post-reset uninitialized state. Initializes multicast table, + * and Calls routines to setup link + * Leaves the transmit and receive units disabled and uninitialized. + */ +s32 atl1_init_hw(struct atl1_hw *hw) +{ + u32 ret_val = 0; + + /* Zero out the Multicast HASH table */ + iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE); + /* clear the old settings from the multicast hash table */ + iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2)); + + atl1_init_flash_opcode(hw); + + if (!hw->phy_configured) { + /* enable GPHY LinkChange Interrrupt */ + ret_val = atl1_write_phy_reg(hw, 18, 0xC00); + if (ret_val) + return ret_val; + /* make PHY out of power-saving state */ + ret_val = atl1_phy_leave_power_saving(hw); + if (ret_val) + return ret_val; + /* Call a subroutine to configure the link */ + ret_val = atl1_setup_link(hw); + } + return ret_val; +} + +/* + * Detects the current speed and duplex settings of the hardware. + * hw - Struct containing variables accessed by shared code + * speed - Speed of the connection + * duplex - Duplex setting of the connection + */ +s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex) +{ + struct pci_dev *pdev = hw->back->pdev; + s32 ret_val; + u16 phy_data; + + /* ; --- Read PHY Specific Status Register (17) */ + ret_val = atl1_read_phy_reg(hw, MII_AT001_PSSR, &phy_data); + if (ret_val) + return ret_val; + + if (!(phy_data & MII_AT001_PSSR_SPD_DPLX_RESOLVED)) + return ATL1_ERR_PHY_RES; + + switch (phy_data & MII_AT001_PSSR_SPEED) { + case MII_AT001_PSSR_1000MBS: + *speed = SPEED_1000; + break; + case MII_AT001_PSSR_100MBS: + *speed = SPEED_100; + break; + case MII_AT001_PSSR_10MBS: + *speed = SPEED_10; + break; + default: + dev_dbg(&pdev->dev, "error getting speed\n"); + return ATL1_ERR_PHY_SPEED; + break; + } + if (phy_data & MII_AT001_PSSR_DPLX) + *duplex = FULL_DUPLEX; + else + *duplex = HALF_DUPLEX; + + return ATL1_SUCCESS; +} + +void atl1_set_mac_addr(struct atl1_hw *hw) +{ + u32 value; + /* + * 00-0B-6A-F6-00-DC + * 0: 6AF600DC 1: 000B + * low dword + */ + value = (((u32) hw->mac_addr[2]) << 24) | + (((u32) hw->mac_addr[3]) << 16) | + (((u32) hw->mac_addr[4]) << 8) | (((u32) hw->mac_addr[5])); + iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR); + /* high dword */ + value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1])); + iowrite32(value, (hw->hw_addr + REG_MAC_STA_ADDR) + (1 << 2)); +} diff --git a/drivers/net/atlx/atl1_hw.h b/drivers/net/atlx/atl1_hw.h new file mode 100644 index 0000000..939aa0f --- /dev/null +++ b/drivers/net/atlx/atl1_hw.h @@ -0,0 +1,946 @@ +/* + * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. + * Copyright(c) 2006 Chris Snook + * Copyright(c) 2006 Jay Cliburn + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * There are a lot of defines in here that are unused and/or have cryptic + * names. Please leave them alone, as they're the closest thing we have + * to a spec from Attansic at present. *ahem* -- CHS + */ + +#ifndef _ATL1_HW_H_ +#define _ATL1_HW_H_ + +#include +#include + +struct atl1_adapter; +struct atl1_hw; + +/* function prototypes needed by multiple files */ +s32 atl1_phy_setup_autoneg_adv(struct atl1_hw *hw); +s32 atl1_write_phy_reg(struct atl1_hw *hw, u32 reg_addr, u16 phy_data); +s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex); +s32 atl1_read_mac_addr(struct atl1_hw *hw); +s32 atl1_init_hw(struct atl1_hw *hw); +s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex); +s32 atl1_set_speed_and_duplex(struct atl1_hw *hw, u16 speed, u16 duplex); +u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr); +void atl1_hash_set(struct atl1_hw *hw, u32 hash_value); +s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data); +void atl1_set_mac_addr(struct atl1_hw *hw); +s32 atl1_phy_enter_power_saving(struct atl1_hw *hw); +s32 atl1_reset_hw(struct atl1_hw *hw); +void atl1_check_options(struct atl1_adapter *adapter); + +/* register definitions */ +#define REG_PCIE_CAP_LIST 0x58 + +#define REG_VPD_CAP 0x6C +#define VPD_CAP_ID_MASK 0xff +#define VPD_CAP_ID_SHIFT 0 +#define VPD_CAP_NEXT_PTR_MASK 0xFF +#define VPD_CAP_NEXT_PTR_SHIFT 8 +#define VPD_CAP_VPD_ADDR_MASK 0x7FFF +#define VPD_CAP_VPD_ADDR_SHIFT 16 +#define VPD_CAP_VPD_FLAG 0x80000000 + +#define REG_VPD_DATA 0x70 + +#define REG_SPI_FLASH_CTRL 0x200 +#define SPI_FLASH_CTRL_STS_NON_RDY 0x1 +#define SPI_FLASH_CTRL_STS_WEN 0x2 +#define SPI_FLASH_CTRL_STS_WPEN 0x80 +#define SPI_FLASH_CTRL_DEV_STS_MASK 0xFF +#define SPI_FLASH_CTRL_DEV_STS_SHIFT 0 +#define SPI_FLASH_CTRL_INS_MASK 0x7 +#define SPI_FLASH_CTRL_INS_SHIFT 8 +#define SPI_FLASH_CTRL_START 0x800 +#define SPI_FLASH_CTRL_EN_VPD 0x2000 +#define SPI_FLASH_CTRL_LDSTART 0x8000 +#define SPI_FLASH_CTRL_CS_HI_MASK 0x3 +#define SPI_FLASH_CTRL_CS_HI_SHIFT 16 +#define SPI_FLASH_CTRL_CS_HOLD_MASK 0x3 +#define SPI_FLASH_CTRL_CS_HOLD_SHIFT 18 +#define SPI_FLASH_CTRL_CLK_LO_MASK 0x3 +#define SPI_FLASH_CTRL_CLK_LO_SHIFT 20 +#define SPI_FLASH_CTRL_CLK_HI_MASK 0x3 +#define SPI_FLASH_CTRL_CLK_HI_SHIFT 22 +#define SPI_FLASH_CTRL_CS_SETUP_MASK 0x3 +#define SPI_FLASH_CTRL_CS_SETUP_SHIFT 24 +#define SPI_FLASH_CTRL_EROM_PGSZ_MASK 0x3 +#define SPI_FLASH_CTRL_EROM_PGSZ_SHIFT 26 +#define SPI_FLASH_CTRL_WAIT_READY 0x10000000 + +#define REG_SPI_ADDR 0x204 + +#define REG_SPI_DATA 0x208 + +#define REG_SPI_FLASH_CONFIG 0x20C +#define SPI_FLASH_CONFIG_LD_ADDR_MASK 0xFFFFFF +#define SPI_FLASH_CONFIG_LD_ADDR_SHIFT 0 +#define SPI_FLASH_CONFIG_VPD_ADDR_MASK 0x3 +#define SPI_FLASH_CONFIG_VPD_ADDR_SHIFT 24 +#define SPI_FLASH_CONFIG_LD_EXIST 0x4000000 + +#define REG_SPI_FLASH_OP_PROGRAM 0x210 +#define REG_SPI_FLASH_OP_SC_ERASE 0x211 +#define REG_SPI_FLASH_OP_CHIP_ERASE 0x212 +#define REG_SPI_FLASH_OP_RDID 0x213 +#define REG_SPI_FLASH_OP_WREN 0x214 +#define REG_SPI_FLASH_OP_RDSR 0x215 +#define REG_SPI_FLASH_OP_WRSR 0x216 +#define REG_SPI_FLASH_OP_READ 0x217 + +#define REG_TWSI_CTRL 0x218 +#define TWSI_CTRL_LD_OFFSET_MASK 0xFF +#define TWSI_CTRL_LD_OFFSET_SHIFT 0 +#define TWSI_CTRL_LD_SLV_ADDR_MASK 0x7 +#define TWSI_CTRL_LD_SLV_ADDR_SHIFT 8 +#define TWSI_CTRL_SW_LDSTART 0x800 +#define TWSI_CTRL_HW_LDSTART 0x1000 +#define TWSI_CTRL_SMB_SLV_ADDR_MASK 0x7F +#define TWSI_CTRL_SMB_SLV_ADDR_SHIFT 15 +#define TWSI_CTRL_LD_EXIST 0x400000 +#define TWSI_CTRL_READ_FREQ_SEL_MASK 0x3 +#define TWSI_CTRL_READ_FREQ_SEL_SHIFT 23 +#define TWSI_CTRL_FREQ_SEL_100K 0 +#define TWSI_CTRL_FREQ_SEL_200K 1 +#define TWSI_CTRL_FREQ_SEL_300K 2 +#define TWSI_CTRL_FREQ_SEL_400K 3 +#define TWSI_CTRL_SMB_SLV_ADDR +#define TWSI_CTRL_WRITE_FREQ_SEL_MASK 0x3 +#define TWSI_CTRL_WRITE_FREQ_SEL_SHIFT 24 + +#define REG_PCIE_DEV_MISC_CTRL 0x21C +#define PCIE_DEV_MISC_CTRL_EXT_PIPE 0x2 +#define PCIE_DEV_MISC_CTRL_RETRY_BUFDIS 0x1 +#define PCIE_DEV_MISC_CTRL_SPIROM_EXIST 0x4 +#define PCIE_DEV_MISC_CTRL_SERDES_ENDIAN 0x8 +#define PCIE_DEV_MISC_CTRL_SERDES_SEL_DIN 0x10 + +/* Selene Master Control Register */ +#define REG_MASTER_CTRL 0x1400 +#define MASTER_CTRL_SOFT_RST 0x1 +#define MASTER_CTRL_MTIMER_EN 0x2 +#define MASTER_CTRL_ITIMER_EN 0x4 +#define MASTER_CTRL_MANUAL_INT 0x8 +#define MASTER_CTRL_REV_NUM_SHIFT 16 +#define MASTER_CTRL_REV_NUM_MASK 0xff +#define MASTER_CTRL_DEV_ID_SHIFT 24 +#define MASTER_CTRL_DEV_ID_MASK 0xff + +/* Timer Initial Value Register */ +#define REG_MANUAL_TIMER_INIT 0x1404 + +/* IRQ ModeratorTimer Initial Value Register */ +#define REG_IRQ_MODU_TIMER_INIT 0x1408 + +#define REG_GPHY_ENABLE 0x140C + +/* IRQ Anti-Lost Timer Initial Value Register */ +#define REG_CMBDISDMA_TIMER 0x140E + +/* Block IDLE Status Register */ +#define REG_IDLE_STATUS 0x1410 +#define IDLE_STATUS_RXMAC 1 +#define IDLE_STATUS_TXMAC 2 +#define IDLE_STATUS_RXQ 4 +#define IDLE_STATUS_TXQ 8 +#define IDLE_STATUS_DMAR 0x10 +#define IDLE_STATUS_DMAW 0x20 +#define IDLE_STATUS_SMB 0x40 +#define IDLE_STATUS_CMB 0x80 + +/* MDIO Control Register */ +#define REG_MDIO_CTRL 0x1414 +#define MDIO_DATA_MASK 0xffff +#define MDIO_DATA_SHIFT 0 +#define MDIO_REG_ADDR_MASK 0x1f +#define MDIO_REG_ADDR_SHIFT 16 +#define MDIO_RW 0x200000 +#define MDIO_SUP_PREAMBLE 0x400000 +#define MDIO_START 0x800000 +#define MDIO_CLK_SEL_SHIFT 24 +#define MDIO_CLK_25_4 0 +#define MDIO_CLK_25_6 2 +#define MDIO_CLK_25_8 3 +#define MDIO_CLK_25_10 4 +#define MDIO_CLK_25_14 5 +#define MDIO_CLK_25_20 6 +#define MDIO_CLK_25_28 7 +#define MDIO_BUSY 0x8000000 +#define MDIO_WAIT_TIMES 30 + +/* MII PHY Status Register */ +#define REG_PHY_STATUS 0x1418 + +/* BIST Control and Status Register0 (for the Packet Memory) */ +#define REG_BIST0_CTRL 0x141c +#define BIST0_NOW 0x1 +#define BIST0_SRAM_FAIL 0x2 +#define BIST0_FUSE_FLAG 0x4 +#define REG_BIST1_CTRL 0x1420 +#define BIST1_NOW 0x1 +#define BIST1_SRAM_FAIL 0x2 +#define BIST1_FUSE_FLAG 0x4 + +/* MAC Control Register */ +#define REG_MAC_CTRL 0x1480 +#define MAC_CTRL_TX_EN 1 +#define MAC_CTRL_RX_EN 2 +#define MAC_CTRL_TX_FLOW 4 +#define MAC_CTRL_RX_FLOW 8 +#define MAC_CTRL_LOOPBACK 0x10 +#define MAC_CTRL_DUPLX 0x20 +#define MAC_CTRL_ADD_CRC 0x40 +#define MAC_CTRL_PAD 0x80 +#define MAC_CTRL_LENCHK 0x100 +#define MAC_CTRL_HUGE_EN 0x200 +#define MAC_CTRL_PRMLEN_SHIFT 10 +#define MAC_CTRL_PRMLEN_MASK 0xf +#define MAC_CTRL_RMV_VLAN 0x4000 +#define MAC_CTRL_PROMIS_EN 0x8000 +#define MAC_CTRL_TX_PAUSE 0x10000 +#define MAC_CTRL_SCNT 0x20000 +#define MAC_CTRL_SRST_TX 0x40000 +#define MAC_CTRL_TX_SIMURST 0x80000 +#define MAC_CTRL_SPEED_SHIFT 20 +#define MAC_CTRL_SPEED_MASK 0x300000 +#define MAC_CTRL_SPEED_1000 2 +#define MAC_CTRL_SPEED_10_100 1 +#define MAC_CTRL_DBG_TX_BKPRESURE 0x400000 +#define MAC_CTRL_TX_HUGE 0x800000 +#define MAC_CTRL_RX_CHKSUM_EN 0x1000000 +#define MAC_CTRL_MC_ALL_EN 0x2000000 +#define MAC_CTRL_BC_EN 0x4000000 +#define MAC_CTRL_DBG 0x8000000 + +/* MAC IPG/IFG Control Register */ +#define REG_MAC_IPG_IFG 0x1484 +#define MAC_IPG_IFG_IPGT_SHIFT 0 +#define MAC_IPG_IFG_IPGT_MASK 0x7f +#define MAC_IPG_IFG_MIFG_SHIFT 8 +#define MAC_IPG_IFG_MIFG_MASK 0xff +#define MAC_IPG_IFG_IPGR1_SHIFT 16 +#define MAC_IPG_IFG_IPGR1_MASK 0x7f +#define MAC_IPG_IFG_IPGR2_SHIFT 24 +#define MAC_IPG_IFG_IPGR2_MASK 0x7f + +/* MAC STATION ADDRESS */ +#define REG_MAC_STA_ADDR 0x1488 + +/* Hash table for multicast address */ +#define REG_RX_HASH_TABLE 0x1490 + +/* MAC Half-Duplex Control Register */ +#define REG_MAC_HALF_DUPLX_CTRL 0x1498 +#define MAC_HALF_DUPLX_CTRL_LCOL_SHIFT 0 +#define MAC_HALF_DUPLX_CTRL_LCOL_MASK 0x3ff +#define MAC_HALF_DUPLX_CTRL_RETRY_SHIFT 12 +#define MAC_HALF_DUPLX_CTRL_RETRY_MASK 0xf +#define MAC_HALF_DUPLX_CTRL_EXC_DEF_EN 0x10000 +#define MAC_HALF_DUPLX_CTRL_NO_BACK_C 0x20000 +#define MAC_HALF_DUPLX_CTRL_NO_BACK_P 0x40000 +#define MAC_HALF_DUPLX_CTRL_ABEBE 0x80000 +#define MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT 20 +#define MAC_HALF_DUPLX_CTRL_ABEBT_MASK 0xf +#define MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT 24 +#define MAC_HALF_DUPLX_CTRL_JAMIPG_MASK 0xf + +/* Maximum Frame Length Control Register */ +#define REG_MTU 0x149c + +/* Wake-On-Lan control register */ +#define REG_WOL_CTRL 0x14a0 +#define WOL_PATTERN_EN 0x00000001 +#define WOL_PATTERN_PME_EN 0x00000002 +#define WOL_MAGIC_EN 0x00000004 +#define WOL_MAGIC_PME_EN 0x00000008 +#define WOL_LINK_CHG_EN 0x00000010 +#define WOL_LINK_CHG_PME_EN 0x00000020 +#define WOL_PATTERN_ST 0x00000100 +#define WOL_MAGIC_ST 0x00000200 +#define WOL_LINKCHG_ST 0x00000400 +#define WOL_CLK_SWITCH_EN 0x00008000 +#define WOL_PT0_EN 0x00010000 +#define WOL_PT1_EN 0x00020000 +#define WOL_PT2_EN 0x00040000 +#define WOL_PT3_EN 0x00080000 +#define WOL_PT4_EN 0x00100000 +#define WOL_PT5_EN 0x00200000 +#define WOL_PT6_EN 0x00400000 + +/* WOL Length ( 2 DWORD ) */ +#define REG_WOL_PATTERN_LEN 0x14a4 +#define WOL_PT_LEN_MASK 0x7f +#define WOL_PT0_LEN_SHIFT 0 +#define WOL_PT1_LEN_SHIFT 8 +#define WOL_PT2_LEN_SHIFT 16 +#define WOL_PT3_LEN_SHIFT 24 +#define WOL_PT4_LEN_SHIFT 0 +#define WOL_PT5_LEN_SHIFT 8 +#define WOL_PT6_LEN_SHIFT 16 + +/* Internal SRAM Partition Register */ +#define REG_SRAM_RFD_ADDR 0x1500 +#define REG_SRAM_RFD_LEN (REG_SRAM_RFD_ADDR+ 4) +#define REG_SRAM_RRD_ADDR (REG_SRAM_RFD_ADDR+ 8) +#define REG_SRAM_RRD_LEN (REG_SRAM_RFD_ADDR+12) +#define REG_SRAM_TPD_ADDR (REG_SRAM_RFD_ADDR+16) +#define REG_SRAM_TPD_LEN (REG_SRAM_RFD_ADDR+20) +#define REG_SRAM_TRD_ADDR (REG_SRAM_RFD_ADDR+24) +#define REG_SRAM_TRD_LEN (REG_SRAM_RFD_ADDR+28) +#define REG_SRAM_RXF_ADDR (REG_SRAM_RFD_ADDR+32) +#define REG_SRAM_RXF_LEN (REG_SRAM_RFD_ADDR+36) +#define REG_SRAM_TXF_ADDR (REG_SRAM_RFD_ADDR+40) +#define REG_SRAM_TXF_LEN (REG_SRAM_RFD_ADDR+44) +#define REG_SRAM_TCPH_PATH_ADDR (REG_SRAM_RFD_ADDR+48) +#define SRAM_TCPH_ADDR_MASK 0x0fff +#define SRAM_TCPH_ADDR_SHIFT 0 +#define SRAM_PATH_ADDR_MASK 0x0fff +#define SRAM_PATH_ADDR_SHIFT 16 + +/* Load Ptr Register */ +#define REG_LOAD_PTR (REG_SRAM_RFD_ADDR+52) + +/* Descriptor Control register */ +#define REG_DESC_BASE_ADDR_HI 0x1540 +#define REG_DESC_RFD_ADDR_LO (REG_DESC_BASE_ADDR_HI+4) +#define REG_DESC_RRD_ADDR_LO (REG_DESC_BASE_ADDR_HI+8) +#define REG_DESC_TPD_ADDR_LO (REG_DESC_BASE_ADDR_HI+12) +#define REG_DESC_CMB_ADDR_LO (REG_DESC_BASE_ADDR_HI+16) +#define REG_DESC_SMB_ADDR_LO (REG_DESC_BASE_ADDR_HI+20) +#define REG_DESC_RFD_RRD_RING_SIZE (REG_DESC_BASE_ADDR_HI+24) +#define DESC_RFD_RING_SIZE_MASK 0x7ff +#define DESC_RFD_RING_SIZE_SHIFT 0 +#define DESC_RRD_RING_SIZE_MASK 0x7ff +#define DESC_RRD_RING_SIZE_SHIFT 16 +#define REG_DESC_TPD_RING_SIZE (REG_DESC_BASE_ADDR_HI+28) +#define DESC_TPD_RING_SIZE_MASK 0x3ff +#define DESC_TPD_RING_SIZE_SHIFT 0 + +/* TXQ Control Register */ +#define REG_TXQ_CTRL 0x1580 +#define TXQ_CTRL_TPD_BURST_NUM_SHIFT 0 +#define TXQ_CTRL_TPD_BURST_NUM_MASK 0x1f +#define TXQ_CTRL_EN 0x20 +#define TXQ_CTRL_ENH_MODE 0x40 +#define TXQ_CTRL_TPD_FETCH_TH_SHIFT 8 +#define TXQ_CTRL_TPD_FETCH_TH_MASK 0x3f +#define TXQ_CTRL_TXF_BURST_NUM_SHIFT 16 +#define TXQ_CTRL_TXF_BURST_NUM_MASK 0xffff + +/* Jumbo packet Threshold for task offload */ +#define REG_TX_JUMBO_TASK_TH_TPD_IPG 0x1584 +#define TX_JUMBO_TASK_TH_MASK 0x7ff +#define TX_JUMBO_TASK_TH_SHIFT 0 +#define TX_TPD_MIN_IPG_MASK 0x1f +#define TX_TPD_MIN_IPG_SHIFT 16 + +/* RXQ Control Register */ +#define REG_RXQ_CTRL 0x15a0 +#define RXQ_CTRL_RFD_BURST_NUM_SHIFT 0 +#define RXQ_CTRL_RFD_BURST_NUM_MASK 0xff +#define RXQ_CTRL_RRD_BURST_THRESH_SHIFT 8 +#define RXQ_CTRL_RRD_BURST_THRESH_MASK 0xff +#define RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT 16 +#define RXQ_CTRL_RFD_PREF_MIN_IPG_MASK 0x1f +#define RXQ_CTRL_CUT_THRU_EN 0x40000000 +#define RXQ_CTRL_EN 0x80000000 + +/* Rx jumbo packet threshold and rrd retirement timer */ +#define REG_RXQ_JMBOSZ_RRDTIM (REG_RXQ_CTRL+ 4) +#define RXQ_JMBOSZ_TH_MASK 0x7ff +#define RXQ_JMBOSZ_TH_SHIFT 0 +#define RXQ_JMBO_LKAH_MASK 0xf +#define RXQ_JMBO_LKAH_SHIFT 11 +#define RXQ_RRD_TIMER_MASK 0xffff +#define RXQ_RRD_TIMER_SHIFT 16 + +/* RFD flow control register */ +#define REG_RXQ_RXF_PAUSE_THRESH (REG_RXQ_CTRL+ 8) +#define RXQ_RXF_PAUSE_TH_HI_SHIFT 16 +#define RXQ_RXF_PAUSE_TH_HI_MASK 0xfff +#define RXQ_RXF_PAUSE_TH_LO_SHIFT 0 +#define RXQ_RXF_PAUSE_TH_LO_MASK 0xfff + +/* RRD flow control register */ +#define REG_RXQ_RRD_PAUSE_THRESH (REG_RXQ_CTRL+12) +#define RXQ_RRD_PAUSE_TH_HI_SHIFT 0 +#define RXQ_RRD_PAUSE_TH_HI_MASK 0xfff +#define RXQ_RRD_PAUSE_TH_LO_SHIFT 16 +#define RXQ_RRD_PAUSE_TH_LO_MASK 0xfff + +/* DMA Engine Control Register */ +#define REG_DMA_CTRL 0x15c0 +#define DMA_CTRL_DMAR_IN_ORDER 0x1 +#define DMA_CTRL_DMAR_ENH_ORDER 0x2 +#define DMA_CTRL_DMAR_OUT_ORDER 0x4 +#define DMA_CTRL_RCB_VALUE 0x8 +#define DMA_CTRL_DMAR_BURST_LEN_SHIFT 4 +#define DMA_CTRL_DMAR_BURST_LEN_MASK 7 +#define DMA_CTRL_DMAW_BURST_LEN_SHIFT 7 +#define DMA_CTRL_DMAW_BURST_LEN_MASK 7 +#define DMA_CTRL_DMAR_EN 0x400 +#define DMA_CTRL_DMAW_EN 0x800 + +/* CMB/SMB Control Register */ +#define REG_CSMB_CTRL 0x15d0 +#define CSMB_CTRL_CMB_NOW 1 +#define CSMB_CTRL_SMB_NOW 2 +#define CSMB_CTRL_CMB_EN 4 +#define CSMB_CTRL_SMB_EN 8 + +/* CMB DMA Write Threshold Register */ +#define REG_CMB_WRITE_TH (REG_CSMB_CTRL+ 4) +#define CMB_RRD_TH_SHIFT 0 +#define CMB_RRD_TH_MASK 0x7ff +#define CMB_TPD_TH_SHIFT 16 +#define CMB_TPD_TH_MASK 0x7ff + +/* RX/TX count-down timer to trigger CMB-write. 2us resolution. */ +#define REG_CMB_WRITE_TIMER (REG_CSMB_CTRL+ 8) +#define CMB_RX_TM_SHIFT 0 +#define CMB_RX_TM_MASK 0xffff +#define CMB_TX_TM_SHIFT 16 +#define CMB_TX_TM_MASK 0xffff + +/* Number of packet received since last CMB write */ +#define REG_CMB_RX_PKT_CNT (REG_CSMB_CTRL+12) + +/* Number of packet transmitted since last CMB write */ +#define REG_CMB_TX_PKT_CNT (REG_CSMB_CTRL+16) + +/* SMB auto DMA timer register */ +#define REG_SMB_TIMER (REG_CSMB_CTRL+20) + +/* Mailbox Register */ +#define REG_MAILBOX 0x15f0 +#define MB_RFD_PROD_INDX_SHIFT 0 +#define MB_RFD_PROD_INDX_MASK 0x7ff +#define MB_RRD_CONS_INDX_SHIFT 11 +#define MB_RRD_CONS_INDX_MASK 0x7ff +#define MB_TPD_PROD_INDX_SHIFT 22 +#define MB_TPD_PROD_INDX_MASK 0x3ff + +/* Interrupt Status Register */ +#define REG_ISR 0x1600 +#define ISR_SMB 1 +#define ISR_TIMER 2 +#define ISR_MANUAL 4 +#define ISR_RXF_OV 8 +#define ISR_RFD_UNRUN 0x10 +#define ISR_RRD_OV 0x20 +#define ISR_TXF_UNRUN 0x40 +#define ISR_LINK 0x80 +#define ISR_HOST_RFD_UNRUN 0x100 +#define ISR_HOST_RRD_OV 0x200 +#define ISR_DMAR_TO_RST 0x400 +#define ISR_DMAW_TO_RST 0x800 +#define ISR_GPHY 0x1000 +#define ISR_RX_PKT 0x10000 +#define ISR_TX_PKT 0x20000 +#define ISR_TX_DMA 0x40000 +#define ISR_RX_DMA 0x80000 +#define ISR_CMB_RX 0x100000 +#define ISR_CMB_TX 0x200000 +#define ISR_MAC_RX 0x400000 +#define ISR_MAC_TX 0x800000 +#define ISR_UR_DETECTED 0x1000000 +#define ISR_FERR_DETECTED 0x2000000 +#define ISR_NFERR_DETECTED 0x4000000 +#define ISR_CERR_DETECTED 0x8000000 +#define ISR_PHY_LINKDOWN 0x10000000 +#define ISR_DIS_SMB 0x20000000 +#define ISR_DIS_DMA 0x40000000 +#define ISR_DIS_INT 0x80000000 + +/* Interrupt Mask Register */ +#define REG_IMR 0x1604 + +/* Normal Interrupt mask */ +#define IMR_NORMAL_MASK (\ + ISR_SMB |\ + ISR_GPHY |\ + ISR_PHY_LINKDOWN|\ + ISR_DMAR_TO_RST |\ + ISR_DMAW_TO_RST |\ + ISR_CMB_TX |\ + ISR_CMB_RX ) + +/* Debug Interrupt Mask (enable all interrupt) */ +#define IMR_DEBUG_MASK (\ + ISR_SMB |\ + ISR_TIMER |\ + ISR_MANUAL |\ + ISR_RXF_OV |\ + ISR_RFD_UNRUN |\ + ISR_RRD_OV |\ + ISR_TXF_UNRUN |\ + ISR_LINK |\ + ISR_CMB_TX |\ + ISR_CMB_RX |\ + ISR_RX_PKT |\ + ISR_TX_PKT |\ + ISR_MAC_RX |\ + ISR_MAC_TX ) + +/* Interrupt Status Register */ +#define REG_RFD_RRD_IDX 0x1800 +#define REG_TPD_IDX 0x1804 + +/* MII definition */ +/* PHY Common Register */ +#define MII_AT001_CR 0x09 +#define MII_AT001_SR 0x0A +#define MII_AT001_ESR 0x0F +#define MII_AT001_PSCR 0x10 +#define MII_AT001_PSSR 0x11 + +/* PHY Control Register */ +#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ +#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ +#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ +#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ +#define MII_CR_SPEED_MASK 0x2040 +#define MII_CR_SPEED_1000 0x0040 +#define MII_CR_SPEED_100 0x2000 +#define MII_CR_SPEED_10 0x0000 + +/* PHY Status Register */ +#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ +#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ +#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ +#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ +#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ +#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ +#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ +#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ +#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ +#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ +#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ +#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ + +/* Link partner ability register. */ +#define MII_LPA_SLCT 0x001f /* Same as advertise selector */ +#define MII_LPA_10HALF 0x0020 /* Can do 10mbps half-duplex */ +#define MII_LPA_10FULL 0x0040 /* Can do 10mbps full-duplex */ +#define MII_LPA_100HALF 0x0080 /* Can do 100mbps half-duplex */ +#define MII_LPA_100FULL 0x0100 /* Can do 100mbps full-duplex */ +#define MII_LPA_100BASE4 0x0200 /* 100BASE-T4 */ +#define MII_LPA_PAUSE 0x0400 /* PAUSE */ +#define MII_LPA_ASYPAUSE 0x0800 /* Asymmetrical PAUSE */ +#define MII_LPA_RFAULT 0x2000 /* Link partner faulted */ +#define MII_LPA_LPACK 0x4000 /* Link partner acked us */ +#define MII_LPA_NPAGE 0x8000 /* Next page bit */ + +/* Autoneg Advertisement Register */ +#define MII_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */ +#define MII_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +#define MII_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +#define MII_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +#define MII_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +#define MII_AR_100T4_CAPS 0x0200 /* 100T4 Capable */ +#define MII_AR_PAUSE 0x0400 /* Pause operation desired */ +#define MII_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ +#define MII_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */ +#define MII_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */ +#define MII_AR_SPEED_MASK 0x01E0 +#define MII_AR_DEFAULT_CAP_MASK 0x0DE0 + +/* 1000BASE-T Control Register */ +#define MII_AT001_CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ +#define MII_AT001_CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ +#define MII_AT001_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port, 0=DTE device */ +#define MII_AT001_CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master, 0=Configure PHY as Slave */ +#define MII_AT001_CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value, 0=Automatic Master/Slave config */ +#define MII_AT001_CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */ +#define MII_AT001_CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */ +#define MII_AT001_CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */ +#define MII_AT001_CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */ +#define MII_AT001_CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */ +#define MII_AT001_CR_1000T_SPEED_MASK 0x0300 +#define MII_AT001_CR_1000T_DEFAULT_CAP_MASK 0x0300 + +/* 1000BASE-T Status Register */ +#define MII_AT001_SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */ +#define MII_AT001_SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */ +#define MII_AT001_SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ +#define MII_AT001_SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ +#define MII_AT001_SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master, 0=Slave */ +#define MII_AT001_SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */ +#define MII_AT001_SR_1000T_REMOTE_RX_STATUS_SHIFT 12 +#define MII_AT001_SR_1000T_LOCAL_RX_STATUS_SHIFT 13 + +/* Extended Status Register */ +#define MII_AT001_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */ +#define MII_AT001_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */ +#define MII_AT001_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */ +#define MII_AT001_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */ + +/* AT001 PHY Specific Control Register */ +#define MII_AT001_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */ +#define MII_AT001_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */ +#define MII_AT001_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */ +#define MII_AT001_PSCR_MAC_POWERDOWN 0x0008 +#define MII_AT001_PSCR_CLK125_DISABLE 0x0010 /* 1=CLK125 low, 0=CLK125 toggling */ +#define MII_AT001_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5, Manual MDI configuration */ +#define MII_AT001_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ +#define MII_AT001_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */ +#define MII_AT001_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled all speeds. */ +#define MII_AT001_PSCR_10BT_EXT_DIST_ENABLE 0x0080 /* 1=Enable Extended 10BASE-T distance (Lower 10BASE-T RX Threshold), 0=Normal 10BASE-T RX Threshold */ +#define MII_AT001_PSCR_MII_5BIT_ENABLE 0x0100 /* 1=5-Bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */ +#define MII_AT001_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler disable */ +#define MII_AT001_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */ +#define MII_AT001_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ +#define MII_AT001_PSCR_POLARITY_REVERSAL_SHIFT 1 +#define MII_AT001_PSCR_AUTO_X_MODE_SHIFT 5 +#define MII_AT001_PSCR_10BT_EXT_DIST_ENABLE_SHIFT 7 + +/* AT001 PHY Specific Status Register */ +#define MII_AT001_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */ +#define MII_AT001_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */ +#define MII_AT001_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define MII_AT001_PSSR_10MBS 0x0000 /* 00=10Mbs */ +#define MII_AT001_PSSR_100MBS 0x4000 /* 01=100Mbs */ +#define MII_AT001_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +/* PCI Command Register Bit Definitions */ +#define PCI_REG_COMMAND 0x04 /* PCI Command Register */ +#define CMD_IO_SPACE 0x0001 +#define CMD_MEMORY_SPACE 0x0002 +#define CMD_BUS_MASTER 0x0004 + +/* Wake Up Filter Control */ +#define ATL1_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define ATL1_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define ATL1_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define ATL1_WUFC_MC 0x00000008 /* Multicast Wakeup Enable */ +#define ATL1_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ + +/* Error Codes */ +#define ATL1_SUCCESS 0 +#define ATL1_ERR_EEPROM 1 +#define ATL1_ERR_PHY 2 +#define ATL1_ERR_CONFIG 3 +#define ATL1_ERR_PARAM 4 +#define ATL1_ERR_MAC_TYPE 5 +#define ATL1_ERR_PHY_TYPE 6 +#define ATL1_ERR_PHY_SPEED 7 +#define ATL1_ERR_PHY_RES 8 + +#define SPEED_0 0xffff +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +#define MEDIA_TYPE_AUTO_SENSOR 0 +#define MEDIA_TYPE_1000M_FULL 1 +#define MEDIA_TYPE_100M_FULL 2 +#define MEDIA_TYPE_100M_HALF 3 +#define MEDIA_TYPE_10M_FULL 4 +#define MEDIA_TYPE_10M_HALF 5 + +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 +#define ADVERTISE_1000_FULL 0x0020 +#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x002F /* Everything but 1000-Half */ +#define AUTONEG_ADVERTISE_10_100_ALL 0x000F /* All 10/100 speeds */ +#define AUTONEG_ADVERTISE_10_ALL 0x0003 /* 10Mbps Full & Half speeds */ + +#define MAX_JUMBO_FRAME_SIZE 0x2800 + +#define PHY_AUTO_NEG_TIME 45 /* 4.5 Seconds */ +#define PHY_FORCE_TIME 20 /* 2.0 Seconds */ + +/* For checksumming , the sum of all words in the EEPROM should equal 0xBABA */ +#define EEPROM_SUM 0xBABA + +#define ATL1_EEDUMP_LEN 48 + +/* Statistics counters collected by the MAC */ +struct stats_msg_block { + /* rx */ + u32 rx_ok; /* The number of good packet received. */ + u32 rx_bcast; /* The number of good broadcast packet received. */ + u32 rx_mcast; /* The number of good multicast packet received. */ + u32 rx_pause; /* The number of Pause packet received. */ + u32 rx_ctrl; /* The number of Control packet received other than Pause frame. */ + u32 rx_fcs_err; /* The number of packets with bad FCS. */ + u32 rx_len_err; /* The number of packets with mismatch of length field and actual size. */ + u32 rx_byte_cnt; /* The number of bytes of good packet received. FCS is NOT included. */ + u32 rx_runt; /* The number of packets received that are less than 64 byte long and with good FCS. */ + u32 rx_frag; /* The number of packets received that are less than 64 byte long and with bad FCS. */ + u32 rx_sz_64; /* The number of good and bad packets received that are 64 byte long. */ + u32 rx_sz_65_127; /* The number of good and bad packets received that are between 65 and 127-byte long. */ + u32 rx_sz_128_255; /* The number of good and bad packets received that are between 128 and 255-byte long. */ + u32 rx_sz_256_511; /* The number of good and bad packets received that are between 256 and 511-byte long. */ + u32 rx_sz_512_1023; /* The number of good and bad packets received that are between 512 and 1023-byte long. */ + u32 rx_sz_1024_1518; /* The number of good and bad packets received that are between 1024 and 1518-byte long. */ + u32 rx_sz_1519_max; /* The number of good and bad packets received that are between 1519-byte and MTU. */ + u32 rx_sz_ov; /* The number of good and bad packets received that are more than MTU size Å¡C truncated by Selene. */ + u32 rx_rxf_ov; /* The number of frame dropped due to occurrence of RX FIFO overflow. */ + u32 rx_rrd_ov; /* The number of frame dropped due to occurrence of RRD overflow. */ + u32 rx_align_err; /* Alignment Error */ + u32 rx_bcast_byte_cnt; /* The byte count of broadcast packet received, excluding FCS. */ + u32 rx_mcast_byte_cnt; /* The byte count of multicast packet received, excluding FCS. */ + u32 rx_err_addr; /* The number of packets dropped due to address filtering. */ + + /* tx */ + u32 tx_ok; /* The number of good packet transmitted. */ + u32 tx_bcast; /* The number of good broadcast packet transmitted. */ + u32 tx_mcast; /* The number of good multicast packet transmitted. */ + u32 tx_pause; /* The number of Pause packet transmitted. */ + u32 tx_exc_defer; /* The number of packets transmitted with excessive deferral. */ + u32 tx_ctrl; /* The number of packets transmitted is a control frame, excluding Pause frame. */ + u32 tx_defer; /* The number of packets transmitted that is deferred. */ + u32 tx_byte_cnt; /* The number of bytes of data transmitted. FCS is NOT included. */ + u32 tx_sz_64; /* The number of good and bad packets transmitted that are 64 byte long. */ + u32 tx_sz_65_127; /* The number of good and bad packets transmitted that are between 65 and 127-byte long. */ + u32 tx_sz_128_255; /* The number of good and bad packets transmitted that are between 128 and 255-byte long. */ + u32 tx_sz_256_511; /* The number of good and bad packets transmitted that are between 256 and 511-byte long. */ + u32 tx_sz_512_1023; /* The number of good and bad packets transmitted that are between 512 and 1023-byte long. */ + u32 tx_sz_1024_1518; /* The number of good and bad packets transmitted that are between 1024 and 1518-byte long. */ + u32 tx_sz_1519_max; /* The number of good and bad packets transmitted that are between 1519-byte and MTU. */ + u32 tx_1_col; /* The number of packets subsequently transmitted successfully with a single prior collision. */ + u32 tx_2_col; /* The number of packets subsequently transmitted successfully with multiple prior collisions. */ + u32 tx_late_col; /* The number of packets transmitted with late collisions. */ + u32 tx_abort_col; /* The number of transmit packets aborted due to excessive collisions. */ + u32 tx_underrun; /* The number of transmit packets aborted due to transmit FIFO underrun, or TRD FIFO underrun */ + u32 tx_rd_eop; /* The number of times that read beyond the EOP into the next frame area when TRD was not written timely */ + u32 tx_len_err; /* The number of transmit packets with length field does NOT match the actual frame size. */ + u32 tx_trunc; /* The number of transmit packets truncated due to size exceeding MTU. */ + u32 tx_bcast_byte; /* The byte count of broadcast packet transmitted, excluding FCS. */ + u32 tx_mcast_byte; /* The byte count of multicast packet transmitted, excluding FCS. */ + u32 smb_updated; /* 1: SMB Updated. This is used by software as the indication of the statistics update. + * Software should clear this bit as soon as retrieving the statistics information. */ +}; + +/* Coalescing Message Block */ +struct coals_msg_block { + u32 int_stats; /* interrupt status */ + u16 rrd_prod_idx; /* TRD Producer Index. */ + u16 rfd_cons_idx; /* RFD Consumer Index. */ + u16 update; /* Selene sets this bit every time it DMA the CMB to host memory. + * Software supposes to clear this bit when CMB information is processed. */ + u16 tpd_cons_idx; /* TPD Consumer Index. */ +}; + +/* RRD descriptor */ +struct rx_return_desc { + u8 num_buf; /* Number of RFD buffers used by the received packet */ + u8 resved; + u16 buf_indx; /* RFD Index of the first buffer */ + union { + u32 valid; + struct { + u16 rx_chksum; + u16 pkt_size; + } xsum_sz; + } xsz; + + u16 pkt_flg; /* Packet flags */ + u16 err_flg; /* Error flags */ + u16 resved2; + u16 vlan_tag; /* VLAN TAG */ +}; + +#define PACKET_FLAG_ETH_TYPE 0x0080 +#define PACKET_FLAG_VLAN_INS 0x0100 +#define PACKET_FLAG_ERR 0x0200 +#define PACKET_FLAG_IPV4 0x0400 +#define PACKET_FLAG_UDP 0x0800 +#define PACKET_FLAG_TCP 0x1000 +#define PACKET_FLAG_BCAST 0x2000 +#define PACKET_FLAG_MCAST 0x4000 +#define PACKET_FLAG_PAUSE 0x8000 + +#define ERR_FLAG_CRC 0x0001 +#define ERR_FLAG_CODE 0x0002 +#define ERR_FLAG_DRIBBLE 0x0004 +#define ERR_FLAG_RUNT 0x0008 +#define ERR_FLAG_OV 0x0010 +#define ERR_FLAG_TRUNC 0x0020 +#define ERR_FLAG_IP_CHKSUM 0x0040 +#define ERR_FLAG_L4_CHKSUM 0x0080 +#define ERR_FLAG_LEN 0x0100 +#define ERR_FLAG_DES_ADDR 0x0200 + +/* RFD descriptor */ +struct rx_free_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + __le16 buf_len; /* Size of the receive buffer in host memory, in byte */ + u16 coalese; /* Update consumer index to host after the reception of this frame */ + /* __attribute__ ((packed)) is required */ +} __attribute__ ((packed)); + +/* tsopu defines */ +#define TSO_PARAM_BUFLEN_MASK 0x3FFF +#define TSO_PARAM_BUFLEN_SHIFT 0 +#define TSO_PARAM_DMAINT_MASK 0x0001 +#define TSO_PARAM_DMAINT_SHIFT 14 +#define TSO_PARAM_PKTNT_MASK 0x0001 +#define TSO_PARAM_PKTINT_SHIFT 15 +#define TSO_PARAM_VLANTAG_MASK 0xFFFF +#define TSO_PARAM_VLAN_SHIFT 16 + +/* tsopl defines */ +#define TSO_PARAM_EOP_MASK 0x0001 +#define TSO_PARAM_EOP_SHIFT 0 +#define TSO_PARAM_COALESCE_MASK 0x0001 +#define TSO_PARAM_COALESCE_SHIFT 1 +#define TSO_PARAM_INSVLAG_MASK 0x0001 +#define TSO_PARAM_INSVLAG_SHIFT 2 +#define TSO_PARAM_CUSTOMCKSUM_MASK 0x0001 +#define TSO_PARAM_CUSTOMCKSUM_SHIFT 3 +#define TSO_PARAM_SEGMENT_MASK 0x0001 +#define TSO_PARAM_SEGMENT_SHIFT 4 +#define TSO_PARAM_IPCKSUM_MASK 0x0001 +#define TSO_PARAM_IPCKSUM_SHIFT 5 +#define TSO_PARAM_TCPCKSUM_MASK 0x0001 +#define TSO_PARAM_TCPCKSUM_SHIFT 6 +#define TSO_PARAM_UDPCKSUM_MASK 0x0001 +#define TSO_PARAM_UDPCKSUM_SHIFT 7 +#define TSO_PARAM_VLANTAGGED_MASK 0x0001 +#define TSO_PARAM_VLANTAGGED_SHIFT 8 +#define TSO_PARAM_ETHTYPE_MASK 0x0001 +#define TSO_PARAM_ETHTYPE_SHIFT 9 +#define TSO_PARAM_IPHL_MASK 0x000F +#define TSO_PARAM_IPHL_SHIFT 10 +#define TSO_PARAM_TCPHDRLEN_MASK 0x000F +#define TSO_PARAM_TCPHDRLEN_SHIFT 14 +#define TSO_PARAM_HDRFLAG_MASK 0x0001 +#define TSO_PARAM_HDRFLAG_SHIFT 18 +#define TSO_PARAM_MSS_MASK 0x1FFF +#define TSO_PARAM_MSS_SHIFT 19 + +/* csumpu defines */ +#define CSUM_PARAM_BUFLEN_MASK 0x3FFF +#define CSUM_PARAM_BUFLEN_SHIFT 0 +#define CSUM_PARAM_DMAINT_MASK 0x0001 +#define CSUM_PARAM_DMAINT_SHIFT 14 +#define CSUM_PARAM_PKTINT_MASK 0x0001 +#define CSUM_PARAM_PKTINT_SHIFT 15 +#define CSUM_PARAM_VALANTAG_MASK 0xFFFF +#define CSUM_PARAM_VALAN_SHIFT 16 + +/* csumpl defines*/ +#define CSUM_PARAM_EOP_MASK 0x0001 +#define CSUM_PARAM_EOP_SHIFT 0 +#define CSUM_PARAM_COALESCE_MASK 0x0001 +#define CSUM_PARAM_COALESCE_SHIFT 1 +#define CSUM_PARAM_INSVLAG_MASK 0x0001 +#define CSUM_PARAM_INSVLAG_SHIFT 2 +#define CSUM_PARAM_CUSTOMCKSUM_MASK 0x0001 +#define CSUM_PARAM_CUSTOMCKSUM_SHIFT 3 +#define CSUM_PARAM_SEGMENT_MASK 0x0001 +#define CSUM_PARAM_SEGMENT_SHIFT 4 +#define CSUM_PARAM_IPCKSUM_MASK 0x0001 +#define CSUM_PARAM_IPCKSUM_SHIFT 5 +#define CSUM_PARAM_TCPCKSUM_MASK 0x0001 +#define CSUM_PARAM_TCPCKSUM_SHIFT 6 +#define CSUM_PARAM_UDPCKSUM_MASK 0x0001 +#define CSUM_PARAM_UDPCKSUM_SHIFT 7 +#define CSUM_PARAM_VLANTAGGED_MASK 0x0001 +#define CSUM_PARAM_VLANTAGGED_SHIFT 8 +#define CSUM_PARAM_ETHTYPE_MASK 0x0001 +#define CSUM_PARAM_ETHTYPE_SHIFT 9 +#define CSUM_PARAM_IPHL_MASK 0x000F +#define CSUM_PARAM_IPHL_SHIFT 10 +#define CSUM_PARAM_PLOADOFFSET_MASK 0x00FF +#define CSUM_PARAM_PLOADOFFSET_SHIFT 16 +#define CSUM_PARAM_XSUMOFFSET_MASK 0x00FF +#define CSUM_PARAM_XSUMOFFSET_SHIFT 24 + +/* TPD descriptor */ +struct tso_param { + /* The order of these declarations is important -- don't change it */ + u32 tsopu; /* tso_param upper word */ + u32 tsopl; /* tso_param lower word */ +}; + +struct csum_param { + /* The order of these declarations is important -- don't change it */ + u32 csumpu; /* csum_param upper word */ + u32 csumpl; /* csum_param lower word */ +}; + +union tpd_descr { + u64 data; + struct csum_param csum; + struct tso_param tso; +}; + +struct tx_packet_desc { + __le64 buffer_addr; + union tpd_descr desc; +}; + +/* DMA Order Settings */ +enum atl1_dma_order { + atl1_dma_ord_in = 1, + atl1_dma_ord_enh = 2, + atl1_dma_ord_out = 4 +}; + +enum atl1_dma_rcb { + atl1_rcb_64 = 0, + atl1_rcb_128 = 1 +}; + +enum atl1_dma_req_block { + atl1_dma_req_128 = 0, + atl1_dma_req_256 = 1, + atl1_dma_req_512 = 2, + atl1_dma_req_1024 = 3, + atl1_dma_req_2048 = 4, + atl1_dma_req_4096 = 5 +}; + +struct atl1_spi_flash_dev { + const char *manu_name; /* manufacturer id */ + /* op-code */ + u8 cmd_wrsr; + u8 cmd_read; + u8 cmd_program; + u8 cmd_wren; + u8 cmd_wrdi; + u8 cmd_rdsr; + u8 cmd_rdid; + u8 cmd_sector_erase; + u8 cmd_chip_erase; +}; + +#endif /* _ATL1_HW_H_ */ diff --git a/drivers/net/atlx/atl1_main.c b/drivers/net/atlx/atl1_main.c new file mode 100644 index 0000000..9200ee5 --- /dev/null +++ b/drivers/net/atlx/atl1_main.c @@ -0,0 +1,2453 @@ +/* + * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. + * Copyright(c) 2006 Chris Snook + * Copyright(c) 2006 Jay Cliburn + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * The full GNU General Public License is included in this distribution in the + * file called COPYING. + * + * Contact Information: + * Xiong Huang + * Attansic Technology Corp. 3F 147, Xianzheng 9th Road, Zhubei, + * Xinzhu 302, TAIWAN, REPUBLIC OF CHINA + * + * Chris Snook + * Jay Cliburn + * + * This version is adapted from the Attansic reference driver for + * inclusion in the Linux kernel. It is currently under heavy development. + * A very incomplete list of things that need to be dealt with: + * + * TODO: + * Fix TSO; tx performance is horrible with TSO enabled. + * Wake on LAN. + * Add more ethtool functions. + * Fix abstruse irq enable/disable condition described here: + * http://marc.theaimsgroup.com/?l=linux-netdev&m=116398508500553&w=2 + * + * NEEDS TESTING: + * VLAN + * multicast + * promiscuous mode + * interrupt coalescing + * SMP torture testing + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "atl1.h" + +#define DRIVER_VERSION "2.0.7" + +char atl1_driver_name[] = "atl1"; +static const char atl1_driver_string[] = "Attansic L1 Ethernet Network Driver"; +static const char atl1_copyright[] = "Copyright(c) 2005-2006 Attansic Corporation."; +char atl1_driver_version[] = DRIVER_VERSION; + +MODULE_AUTHOR + ("Attansic Corporation , Chris Snook , Jay Cliburn "); +MODULE_DESCRIPTION("Attansic 1000M Ethernet Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRIVER_VERSION); + +/* + * atl1_pci_tbl - PCI Device ID Table + */ +static const struct pci_device_id atl1_pci_tbl[] = { + {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1)}, + /* required last entry */ + {0,} +}; + +MODULE_DEVICE_TABLE(pci, atl1_pci_tbl); + +/* + * atl1_sw_init - Initialize general software structures (struct atl1_adapter) + * @adapter: board private structure to initialize + * + * atl1_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + */ +static int __devinit atl1_sw_init(struct atl1_adapter *adapter) +{ + struct atl1_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + + hw->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + hw->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; + + adapter->wol = 0; + adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7; + adapter->ict = 50000; /* 100ms */ + adapter->link_speed = SPEED_0; /* hardware init */ + adapter->link_duplex = FULL_DUPLEX; + + hw->phy_configured = false; + hw->preamble_len = 7; + hw->ipgt = 0x60; + hw->min_ifg = 0x50; + hw->ipgr1 = 0x40; + hw->ipgr2 = 0x60; + hw->max_retry = 0xf; + hw->lcol = 0x37; + hw->jam_ipg = 7; + hw->rfd_burst = 8; + hw->rrd_burst = 8; + hw->rfd_fetch_gap = 1; + hw->rx_jumbo_th = adapter->rx_buffer_len / 8; + hw->rx_jumbo_lkah = 1; + hw->rrd_ret_timer = 16; + hw->tpd_burst = 4; + hw->tpd_fetch_th = 16; + hw->txf_burst = 0x100; + hw->tx_jumbo_task_th = (hw->max_frame_size + 7) >> 3; + hw->tpd_fetch_gap = 1; + hw->rcb_value = atl1_rcb_64; + hw->dma_ord = atl1_dma_ord_enh; + hw->dmar_block = atl1_dma_req_256; + hw->dmaw_block = atl1_dma_req_256; + hw->cmb_rrd = 4; + hw->cmb_tpd = 4; + hw->cmb_rx_timer = 1; /* about 2us */ + hw->cmb_tx_timer = 1; /* about 2us */ + hw->smb_timer = 100000; /* about 200ms */ + + spin_lock_init(&adapter->lock); + spin_lock_init(&adapter->mb_lock); + + return 0; +} + +static int mdio_read(struct net_device *netdev, int phy_id, int reg_num) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + u16 result; + + atl1_read_phy_reg(&adapter->hw, reg_num & 0x1f, &result); + + return result; +} + +static void mdio_write(struct net_device *netdev, int phy_id, int reg_num, + int val) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + + atl1_write_phy_reg(&adapter->hw, reg_num, val); +} + +/* + * atl1_mii_ioctl - + * @netdev: + * @ifreq: + * @cmd: + */ +static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + unsigned long flags; + int retval; + + if (!netif_running(netdev)) + return -EINVAL; + + spin_lock_irqsave(&adapter->lock, flags); + retval = generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL); + spin_unlock_irqrestore(&adapter->lock, flags); + + return retval; +} + +/* + * atl1_ioctl - + * @netdev: + * @ifreq: + * @cmd: + */ +static int atl1_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return atl1_mii_ioctl(netdev, ifr, cmd); + default: + return -EOPNOTSUPP; + } +} + +/* + * atl1_setup_mem_resources - allocate Tx / RX descriptor resources + * @adapter: board private structure + * + * Return 0 on success, negative on failure + */ +s32 atl1_setup_ring_resources(struct atl1_adapter *adapter) +{ + struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; + struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; + struct atl1_ring_header *ring_header = &adapter->ring_header; + struct pci_dev *pdev = adapter->pdev; + int size; + u8 offset = 0; + + size = sizeof(struct atl1_buffer) * (tpd_ring->count + rfd_ring->count); + tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL); + if (unlikely(!tpd_ring->buffer_info)) { + dev_err(&pdev->dev, "kzalloc failed , size = D%d\n", size); + goto err_nomem; + } + rfd_ring->buffer_info = + (struct atl1_buffer *)(tpd_ring->buffer_info + tpd_ring->count); + + /* real ring DMA buffer + * each ring/block may need up to 8 bytes for alignment, hence the + * additional 40 bytes tacked onto the end. + */ + ring_header->size = size = + sizeof(struct tx_packet_desc) * tpd_ring->count + + sizeof(struct rx_free_desc) * rfd_ring->count + + sizeof(struct rx_return_desc) * rrd_ring->count + + sizeof(struct coals_msg_block) + + sizeof(struct stats_msg_block) + + 40; + + ring_header->desc = pci_alloc_consistent(pdev, ring_header->size, + &ring_header->dma); + if (unlikely(!ring_header->desc)) { + dev_err(&pdev->dev, "pci_alloc_consistent failed\n"); + goto err_nomem; + } + + memset(ring_header->desc, 0, ring_header->size); + + /* init TPD ring */ + tpd_ring->dma = ring_header->dma; + offset = (tpd_ring->dma & 0x7) ? (8 - (ring_header->dma & 0x7)) : 0; + tpd_ring->dma += offset; + tpd_ring->desc = (u8 *) ring_header->desc + offset; + tpd_ring->size = sizeof(struct tx_packet_desc) * tpd_ring->count; + + /* init RFD ring */ + rfd_ring->dma = tpd_ring->dma + tpd_ring->size; + offset = (rfd_ring->dma & 0x7) ? (8 - (rfd_ring->dma & 0x7)) : 0; + rfd_ring->dma += offset; + rfd_ring->desc = (u8 *) tpd_ring->desc + (tpd_ring->size + offset); + rfd_ring->size = sizeof(struct rx_free_desc) * rfd_ring->count; + + + /* init RRD ring */ + rrd_ring->dma = rfd_ring->dma + rfd_ring->size; + offset = (rrd_ring->dma & 0x7) ? (8 - (rrd_ring->dma & 0x7)) : 0; + rrd_ring->dma += offset; + rrd_ring->desc = (u8 *) rfd_ring->desc + (rfd_ring->size + offset); + rrd_ring->size = sizeof(struct rx_return_desc) * rrd_ring->count; + + + /* init CMB */ + adapter->cmb.dma = rrd_ring->dma + rrd_ring->size; + offset = (adapter->cmb.dma & 0x7) ? (8 - (adapter->cmb.dma & 0x7)) : 0; + adapter->cmb.dma += offset; + adapter->cmb.cmb = (struct coals_msg_block *) + ((u8 *) rrd_ring->desc + (rrd_ring->size + offset)); + + /* init SMB */ + adapter->smb.dma = adapter->cmb.dma + sizeof(struct coals_msg_block); + offset = (adapter->smb.dma & 0x7) ? (8 - (adapter->smb.dma & 0x7)) : 0; + adapter->smb.dma += offset; + adapter->smb.smb = (struct stats_msg_block *) + ((u8 *) adapter->cmb.cmb + + (sizeof(struct coals_msg_block) + offset)); + + return ATL1_SUCCESS; + +err_nomem: + kfree(tpd_ring->buffer_info); + return -ENOMEM; +} + +static void atl1_init_ring_ptrs(struct atl1_adapter *adapter) +{ + struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; + struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; + + atomic_set(&tpd_ring->next_to_use, 0); + atomic_set(&tpd_ring->next_to_clean, 0); + + rfd_ring->next_to_clean = 0; + atomic_set(&rfd_ring->next_to_use, 0); + + rrd_ring->next_to_use = 0; + atomic_set(&rrd_ring->next_to_clean, 0); +} + +/* + * atl1_clean_rx_ring - Free RFD Buffers + * @adapter: board private structure + */ +static void atl1_clean_rx_ring(struct atl1_adapter *adapter) +{ + struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; + struct atl1_buffer *buffer_info; + struct pci_dev *pdev = adapter->pdev; + unsigned long size; + unsigned int i; + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rfd_ring->count; i++) { + buffer_info = &rfd_ring->buffer_info[i]; + if (buffer_info->dma) { + pci_unmap_page(pdev, buffer_info->dma, + buffer_info->length, PCI_DMA_FROMDEVICE); + buffer_info->dma = 0; + } + if (buffer_info->skb) { + dev_kfree_skb(buffer_info->skb); + buffer_info->skb = NULL; + } + } + + size = sizeof(struct atl1_buffer) * rfd_ring->count; + memset(rfd_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(rfd_ring->desc, 0, rfd_ring->size); + + rfd_ring->next_to_clean = 0; + atomic_set(&rfd_ring->next_to_use, 0); + + rrd_ring->next_to_use = 0; + atomic_set(&rrd_ring->next_to_clean, 0); +} + +/* + * atl1_clean_tx_ring - Free Tx Buffers + * @adapter: board private structure + */ +static void atl1_clean_tx_ring(struct atl1_adapter *adapter) +{ + struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; + struct atl1_buffer *buffer_info; + struct pci_dev *pdev = adapter->pdev; + unsigned long size; + unsigned int i; + + /* Free all the Tx ring sk_buffs */ + for (i = 0; i < tpd_ring->count; i++) { + buffer_info = &tpd_ring->buffer_info[i]; + if (buffer_info->dma) { + pci_unmap_page(pdev, buffer_info->dma, + buffer_info->length, PCI_DMA_TODEVICE); + buffer_info->dma = 0; + } + } + + for (i = 0; i < tpd_ring->count; i++) { + buffer_info = &tpd_ring->buffer_info[i]; + if (buffer_info->skb) { + dev_kfree_skb_any(buffer_info->skb); + buffer_info->skb = NULL; + } + } + + size = sizeof(struct atl1_buffer) * tpd_ring->count; + memset(tpd_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(tpd_ring->desc, 0, tpd_ring->size); + + atomic_set(&tpd_ring->next_to_use, 0); + atomic_set(&tpd_ring->next_to_clean, 0); +} + +/* + * atl1_free_ring_resources - Free Tx / RX descriptor Resources + * @adapter: board private structure + * + * Free all transmit software resources + */ +void atl1_free_ring_resources(struct atl1_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; + struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; + struct atl1_ring_header *ring_header = &adapter->ring_header; + + atl1_clean_tx_ring(adapter); + atl1_clean_rx_ring(adapter); + + kfree(tpd_ring->buffer_info); + pci_free_consistent(pdev, ring_header->size, ring_header->desc, + ring_header->dma); + + tpd_ring->buffer_info = NULL; + tpd_ring->desc = NULL; + tpd_ring->dma = 0; + + rfd_ring->buffer_info = NULL; + rfd_ring->desc = NULL; + rfd_ring->dma = 0; + + rrd_ring->desc = NULL; + rrd_ring->dma = 0; +} + +static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter) +{ + u32 value; + struct atl1_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + /* Config MAC CTRL Register */ + value = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN; + /* duplex */ + if (FULL_DUPLEX == adapter->link_duplex) + value |= MAC_CTRL_DUPLX; + /* speed */ + value |= ((u32) ((SPEED_1000 == adapter->link_speed) ? + MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) << + MAC_CTRL_SPEED_SHIFT); + /* flow control */ + value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW); + /* PAD & CRC */ + value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD); + /* preamble length */ + value |= (((u32) adapter->hw.preamble_len + & MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT); + /* vlan */ + if (adapter->vlgrp) + value |= MAC_CTRL_RMV_VLAN; + /* rx checksum + if (adapter->rx_csum) + value |= MAC_CTRL_RX_CHKSUM_EN; + */ + /* filter mode */ + value |= MAC_CTRL_BC_EN; + if (netdev->flags & IFF_PROMISC) + value |= MAC_CTRL_PROMIS_EN; + else if (netdev->flags & IFF_ALLMULTI) + value |= MAC_CTRL_MC_ALL_EN; + /* value |= MAC_CTRL_LOOPBACK; */ + iowrite32(value, hw->hw_addr + REG_MAC_CTRL); +} + +/* + * atl1_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + */ +static int atl1_set_mac(struct net_device *netdev, void *p) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (netif_running(netdev)) + return -EBUSY; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); + + atl1_set_mac_addr(&adapter->hw); + return 0; +} + +static u32 atl1_check_link(struct atl1_adapter *adapter) +{ + struct atl1_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 ret_val; + u16 speed, duplex, phy_data; + int reconfig = 0; + + /* MII_BMSR must read twice */ + atl1_read_phy_reg(hw, MII_BMSR, &phy_data); + atl1_read_phy_reg(hw, MII_BMSR, &phy_data); + if (!(phy_data & BMSR_LSTATUS)) { /* link down */ + if (netif_carrier_ok(netdev)) { /* old link state: Up */ + dev_info(&adapter->pdev->dev, "link is down\n"); + adapter->link_speed = SPEED_0; + netif_carrier_off(netdev); + netif_stop_queue(netdev); + } + return ATL1_SUCCESS; + } + + /* Link Up */ + ret_val = atl1_get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) + return ret_val; + + switch (hw->media_type) { + case MEDIA_TYPE_1000M_FULL: + if (speed != SPEED_1000 || duplex != FULL_DUPLEX) + reconfig = 1; + break; + case MEDIA_TYPE_100M_FULL: + if (speed != SPEED_100 || duplex != FULL_DUPLEX) + reconfig = 1; + break; + case MEDIA_TYPE_100M_HALF: + if (speed != SPEED_100 || duplex != HALF_DUPLEX) + reconfig = 1; + break; + case MEDIA_TYPE_10M_FULL: + if (speed != SPEED_10 || duplex != FULL_DUPLEX) + reconfig = 1; + break; + case MEDIA_TYPE_10M_HALF: + if (speed != SPEED_10 || duplex != HALF_DUPLEX) + reconfig = 1; + break; + } + + /* link result is our setting */ + if (!reconfig) { + if (adapter->link_speed != speed + || adapter->link_duplex != duplex) { + adapter->link_speed = speed; + adapter->link_duplex = duplex; + atl1_setup_mac_ctrl(adapter); + dev_info(&adapter->pdev->dev, + "%s link is up %d Mbps %s\n", + netdev->name, adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? + "full duplex" : "half duplex"); + } + if (!netif_carrier_ok(netdev)) { /* Link down -> Up */ + netif_carrier_on(netdev); + netif_wake_queue(netdev); + } + return ATL1_SUCCESS; + } + + /* change orignal link status */ + if (netif_carrier_ok(netdev)) { + adapter->link_speed = SPEED_0; + netif_carrier_off(netdev); + netif_stop_queue(netdev); + } + + if (hw->media_type != MEDIA_TYPE_AUTO_SENSOR && + hw->media_type != MEDIA_TYPE_1000M_FULL) { + switch (hw->media_type) { + case MEDIA_TYPE_100M_FULL: + phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 | + MII_CR_RESET; + break; + case MEDIA_TYPE_100M_HALF: + phy_data = MII_CR_SPEED_100 | MII_CR_RESET; + break; + case MEDIA_TYPE_10M_FULL: + phy_data = + MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET; + break; + default: /* MEDIA_TYPE_10M_HALF: */ + phy_data = MII_CR_SPEED_10 | MII_CR_RESET; + break; + } + atl1_write_phy_reg(hw, MII_BMCR, phy_data); + return ATL1_SUCCESS; + } + + /* auto-neg, insert timer to re-config phy */ + if (!adapter->phy_timer_pending) { + adapter->phy_timer_pending = true; + mod_timer(&adapter->phy_config_timer, jiffies + 3 * HZ); + } + + return ATL1_SUCCESS; +} + +static void atl1_check_for_link(struct atl1_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + u16 phy_data = 0; + + spin_lock(&adapter->lock); + adapter->phy_timer_pending = false; + atl1_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); + atl1_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); + spin_unlock(&adapter->lock); + + /* notify upper layer link down ASAP */ + if (!(phy_data & BMSR_LSTATUS)) { /* Link Down */ + if (netif_carrier_ok(netdev)) { /* old link state: Up */ + dev_info(&adapter->pdev->dev, "%s link is down\n", + netdev->name); + adapter->link_speed = SPEED_0; + netif_carrier_off(netdev); + netif_stop_queue(netdev); + } + } + schedule_work(&adapter->link_chg_task); +} + +/* + * atl1_set_multi - Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_multi entry point is called whenever the multicast address + * list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper multicast, + * promiscuous mode, and all-multi behavior. + */ +static void atl1_set_multi(struct net_device *netdev) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_hw *hw = &adapter->hw; + struct dev_mc_list *mc_ptr; + u32 rctl; + u32 hash_value; + + /* Check for Promiscuous and All Multicast modes */ + rctl = ioread32(hw->hw_addr + REG_MAC_CTRL); + if (netdev->flags & IFF_PROMISC) + rctl |= MAC_CTRL_PROMIS_EN; + else if (netdev->flags & IFF_ALLMULTI) { + rctl |= MAC_CTRL_MC_ALL_EN; + rctl &= ~MAC_CTRL_PROMIS_EN; + } else + rctl &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN); + + iowrite32(rctl, hw->hw_addr + REG_MAC_CTRL); + + /* clear the old settings from the multicast hash table */ + iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE); + iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2)); + + /* compute mc addresses' hash value ,and put it into hash table */ + for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) { + hash_value = atl1_hash_mc_addr(hw, mc_ptr->dmi_addr); + atl1_hash_set(hw, hash_value); + } +} + +/* + * atl1_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + */ +static int atl1_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + int old_mtu = netdev->mtu; + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + + if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || + (max_frame > MAX_JUMBO_FRAME_SIZE)) { + dev_warn(&adapter->pdev->dev, "invalid MTU setting\n"); + return -EINVAL; + } + + adapter->hw.max_frame_size = max_frame; + adapter->hw.tx_jumbo_task_th = (max_frame + 7) >> 3; + adapter->rx_buffer_len = (max_frame + 7) & ~7; + adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8; + + netdev->mtu = new_mtu; + if ((old_mtu != new_mtu) && netif_running(netdev)) { + atl1_down(adapter); + atl1_up(adapter); + } + + return 0; +} + +static void set_flow_ctrl_old(struct atl1_adapter *adapter) +{ + u32 hi, lo, value; + + /* RFD Flow Control */ + value = adapter->rfd_ring.count; + hi = value / 16; + if (hi < 2) + hi = 2; + lo = value * 7 / 8; + + value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) | + ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT); + iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RXF_PAUSE_THRESH); + + /* RRD Flow Control */ + value = adapter->rrd_ring.count; + lo = value / 16; + hi = value * 7 / 8; + if (lo < 2) + lo = 2; + value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) | + ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT); + iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RRD_PAUSE_THRESH); +} + +static void set_flow_ctrl_new(struct atl1_hw *hw) +{ + u32 hi, lo, value; + + /* RXF Flow Control */ + value = ioread32(hw->hw_addr + REG_SRAM_RXF_LEN); + lo = value / 16; + if (lo < 192) + lo = 192; + hi = value * 7 / 8; + if (hi < lo) + hi = lo + 16; + value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) | + ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT); + iowrite32(value, hw->hw_addr + REG_RXQ_RXF_PAUSE_THRESH); + + /* RRD Flow Control */ + value = ioread32(hw->hw_addr + REG_SRAM_RRD_LEN); + lo = value / 8; + hi = value * 7 / 8; + if (lo < 2) + lo = 2; + if (hi < lo) + hi = lo + 3; + value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) | + ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT); + iowrite32(value, hw->hw_addr + REG_RXQ_RRD_PAUSE_THRESH); +} + +/* + * atl1_configure - Configure Transmit&Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Tx /Rx unit of the MAC after a reset. + */ +static u32 atl1_configure(struct atl1_adapter *adapter) +{ + struct atl1_hw *hw = &adapter->hw; + u32 value; + + /* clear interrupt status */ + iowrite32(0xffffffff, adapter->hw.hw_addr + REG_ISR); + + /* set MAC Address */ + value = (((u32) hw->mac_addr[2]) << 24) | + (((u32) hw->mac_addr[3]) << 16) | + (((u32) hw->mac_addr[4]) << 8) | + (((u32) hw->mac_addr[5])); + iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR); + value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1])); + iowrite32(value, hw->hw_addr + (REG_MAC_STA_ADDR + 4)); + + /* tx / rx ring */ + + /* HI base address */ + iowrite32((u32) ((adapter->tpd_ring.dma & 0xffffffff00000000ULL) >> 32), + hw->hw_addr + REG_DESC_BASE_ADDR_HI); + /* LO base address */ + iowrite32((u32) (adapter->rfd_ring.dma & 0x00000000ffffffffULL), + hw->hw_addr + REG_DESC_RFD_ADDR_LO); + iowrite32((u32) (adapter->rrd_ring.dma & 0x00000000ffffffffULL), + hw->hw_addr + REG_DESC_RRD_ADDR_LO); + iowrite32((u32) (adapter->tpd_ring.dma & 0x00000000ffffffffULL), + hw->hw_addr + REG_DESC_TPD_ADDR_LO); + iowrite32((u32) (adapter->cmb.dma & 0x00000000ffffffffULL), + hw->hw_addr + REG_DESC_CMB_ADDR_LO); + iowrite32((u32) (adapter->smb.dma & 0x00000000ffffffffULL), + hw->hw_addr + REG_DESC_SMB_ADDR_LO); + + /* element count */ + value = adapter->rrd_ring.count; + value <<= 16; + value += adapter->rfd_ring.count; + iowrite32(value, hw->hw_addr + REG_DESC_RFD_RRD_RING_SIZE); + iowrite32(adapter->tpd_ring.count, hw->hw_addr + + REG_DESC_TPD_RING_SIZE); + + /* Load Ptr */ + iowrite32(1, hw->hw_addr + REG_LOAD_PTR); + + /* config Mailbox */ + value = ((atomic_read(&adapter->tpd_ring.next_to_use) + & MB_TPD_PROD_INDX_MASK) << MB_TPD_PROD_INDX_SHIFT) | + ((atomic_read(&adapter->rrd_ring.next_to_clean) + & MB_RRD_CONS_INDX_MASK) << MB_RRD_CONS_INDX_SHIFT) | + ((atomic_read(&adapter->rfd_ring.next_to_use) + & MB_RFD_PROD_INDX_MASK) << MB_RFD_PROD_INDX_SHIFT); + iowrite32(value, hw->hw_addr + REG_MAILBOX); + + /* config IPG/IFG */ + value = (((u32) hw->ipgt & MAC_IPG_IFG_IPGT_MASK) + << MAC_IPG_IFG_IPGT_SHIFT) | + (((u32) hw->min_ifg & MAC_IPG_IFG_MIFG_MASK) + << MAC_IPG_IFG_MIFG_SHIFT) | + (((u32) hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK) + << MAC_IPG_IFG_IPGR1_SHIFT) | + (((u32) hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK) + << MAC_IPG_IFG_IPGR2_SHIFT); + iowrite32(value, hw->hw_addr + REG_MAC_IPG_IFG); + + /* config Half-Duplex Control */ + value = ((u32) hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) | + (((u32) hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK) + << MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) | + MAC_HALF_DUPLX_CTRL_EXC_DEF_EN | + (0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) | + (((u32) hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK) + << MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT); + iowrite32(value, hw->hw_addr + REG_MAC_HALF_DUPLX_CTRL); + + /* set Interrupt Moderator Timer */ + iowrite16(adapter->imt, hw->hw_addr + REG_IRQ_MODU_TIMER_INIT); + iowrite32(MASTER_CTRL_ITIMER_EN, hw->hw_addr + REG_MASTER_CTRL); + + /* set Interrupt Clear Timer */ + iowrite16(adapter->ict, hw->hw_addr + REG_CMBDISDMA_TIMER); + + /* set max frame size hw will accept */ + iowrite32(hw->max_frame_size, hw->hw_addr + REG_MTU); + + /* jumbo size & rrd retirement timer */ + value = (((u32) hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK) + << RXQ_JMBOSZ_TH_SHIFT) | + (((u32) hw->rx_jumbo_lkah & RXQ_JMBO_LKAH_MASK) + << RXQ_JMBO_LKAH_SHIFT) | + (((u32) hw->rrd_ret_timer & RXQ_RRD_TIMER_MASK) + << RXQ_RRD_TIMER_SHIFT); + iowrite32(value, hw->hw_addr + REG_RXQ_JMBOSZ_RRDTIM); + + /* Flow Control */ + switch (hw->dev_rev) { + case 0x8001: + case 0x9001: + case 0x9002: + case 0x9003: + set_flow_ctrl_old(adapter); + break; + default: + set_flow_ctrl_new(hw); + break; + } + + /* config TXQ */ + value = (((u32) hw->tpd_burst & TXQ_CTRL_TPD_BURST_NUM_MASK) + << TXQ_CTRL_TPD_BURST_NUM_SHIFT) | + (((u32) hw->txf_burst & TXQ_CTRL_TXF_BURST_NUM_MASK) + << TXQ_CTRL_TXF_BURST_NUM_SHIFT) | + (((u32) hw->tpd_fetch_th & TXQ_CTRL_TPD_FETCH_TH_MASK) + << TXQ_CTRL_TPD_FETCH_TH_SHIFT) | TXQ_CTRL_ENH_MODE | + TXQ_CTRL_EN; + iowrite32(value, hw->hw_addr + REG_TXQ_CTRL); + + /* min tpd fetch gap & tx jumbo packet size threshold for taskoffload */ + value = (((u32) hw->tx_jumbo_task_th & TX_JUMBO_TASK_TH_MASK) + << TX_JUMBO_TASK_TH_SHIFT) | + (((u32) hw->tpd_fetch_gap & TX_TPD_MIN_IPG_MASK) + << TX_TPD_MIN_IPG_SHIFT); + iowrite32(value, hw->hw_addr + REG_TX_JUMBO_TASK_TH_TPD_IPG); + + /* config RXQ */ + value = (((u32) hw->rfd_burst & RXQ_CTRL_RFD_BURST_NUM_MASK) + << RXQ_CTRL_RFD_BURST_NUM_SHIFT) | + (((u32) hw->rrd_burst & RXQ_CTRL_RRD_BURST_THRESH_MASK) + << RXQ_CTRL_RRD_BURST_THRESH_SHIFT) | + (((u32) hw->rfd_fetch_gap & RXQ_CTRL_RFD_PREF_MIN_IPG_MASK) + << RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT) | RXQ_CTRL_CUT_THRU_EN | + RXQ_CTRL_EN; + iowrite32(value, hw->hw_addr + REG_RXQ_CTRL); + + /* config DMA Engine */ + value = ((((u32) hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK) + << DMA_CTRL_DMAR_BURST_LEN_SHIFT) | + ((((u32) hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK) + << DMA_CTRL_DMAW_BURST_LEN_SHIFT) | DMA_CTRL_DMAR_EN | + DMA_CTRL_DMAW_EN; + value |= (u32) hw->dma_ord; + if (atl1_rcb_128 == hw->rcb_value) + value |= DMA_CTRL_RCB_VALUE; + iowrite32(value, hw->hw_addr + REG_DMA_CTRL); + + /* config CMB / SMB */ + value = (hw->cmb_tpd > adapter->tpd_ring.count) ? + hw->cmb_tpd : adapter->tpd_ring.count; + value <<= 16; + value |= hw->cmb_rrd; + iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TH); + value = hw->cmb_rx_timer | ((u32) hw->cmb_tx_timer << 16); + iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TIMER); + iowrite32(hw->smb_timer, hw->hw_addr + REG_SMB_TIMER); + + /* --- enable CMB / SMB */ + value = CSMB_CTRL_CMB_EN | CSMB_CTRL_SMB_EN; + iowrite32(value, hw->hw_addr + REG_CSMB_CTRL); + + value = ioread32(adapter->hw.hw_addr + REG_ISR); + if (unlikely((value & ISR_PHY_LINKDOWN) != 0)) + value = 1; /* config failed */ + else + value = 0; + + /* clear all interrupt status */ + iowrite32(0x3fffffff, adapter->hw.hw_addr + REG_ISR); + iowrite32(0, adapter->hw.hw_addr + REG_ISR); + return value; +} + +/* + * atl1_pcie_patch - Patch for PCIE module + */ +static void atl1_pcie_patch(struct atl1_adapter *adapter) +{ + u32 value; + + /* much vendor magic here */ + value = 0x6500; + iowrite32(value, adapter->hw.hw_addr + 0x12FC); + /* pcie flow control mode change */ + value = ioread32(adapter->hw.hw_addr + 0x1008); + value |= 0x8000; + iowrite32(value, adapter->hw.hw_addr + 0x1008); +} + +/* + * When ACPI resume on some VIA MotherBoard, the Interrupt Disable bit/0x400 + * on PCI Command register is disable. + * The function enable this bit. + * Brackett, 2006/03/15 + */ +static void atl1_via_workaround(struct atl1_adapter *adapter) +{ + unsigned long value; + + value = ioread16(adapter->hw.hw_addr + PCI_COMMAND); + if (value & PCI_COMMAND_INTX_DISABLE) + value &= ~PCI_COMMAND_INTX_DISABLE; + iowrite32(value, adapter->hw.hw_addr + PCI_COMMAND); +} + +/* + * atl1_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + */ +static void atl1_irq_enable(struct atl1_adapter *adapter) +{ + iowrite32(IMR_NORMAL_MASK, adapter->hw.hw_addr + REG_IMR); + ioread32(adapter->hw.hw_addr + REG_IMR); +} + +/* + * atl1_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + */ +static void atl1_irq_disable(struct atl1_adapter *adapter) +{ + iowrite32(0, adapter->hw.hw_addr + REG_IMR); + ioread32(adapter->hw.hw_addr + REG_IMR); + synchronize_irq(adapter->pdev->irq); +} + +static void atl1_clear_phy_int(struct atl1_adapter *adapter) +{ + u16 phy_data; + unsigned long flags; + + spin_lock_irqsave(&adapter->lock, flags); + atl1_read_phy_reg(&adapter->hw, 19, &phy_data); + spin_unlock_irqrestore(&adapter->lock, flags); +} + +static void atl1_inc_smb(struct atl1_adapter *adapter) +{ + struct stats_msg_block *smb = adapter->smb.smb; + + /* Fill out the OS statistics structure */ + adapter->soft_stats.rx_packets += smb->rx_ok; + adapter->soft_stats.tx_packets += smb->tx_ok; + adapter->soft_stats.rx_bytes += smb->rx_byte_cnt; + adapter->soft_stats.tx_bytes += smb->tx_byte_cnt; + adapter->soft_stats.multicast += smb->rx_mcast; + adapter->soft_stats.collisions += (smb->tx_1_col + smb->tx_2_col * 2 + + smb->tx_late_col + smb->tx_abort_col * adapter->hw.max_retry); + + /* Rx Errors */ + adapter->soft_stats.rx_errors += (smb->rx_frag + smb->rx_fcs_err + + smb->rx_len_err + smb->rx_sz_ov + smb->rx_rxf_ov + + smb->rx_rrd_ov + smb->rx_align_err); + adapter->soft_stats.rx_fifo_errors += smb->rx_rxf_ov; + adapter->soft_stats.rx_length_errors += smb->rx_len_err; + adapter->soft_stats.rx_crc_errors += smb->rx_fcs_err; + adapter->soft_stats.rx_frame_errors += smb->rx_align_err; + adapter->soft_stats.rx_missed_errors += (smb->rx_rrd_ov + + smb->rx_rxf_ov); + + adapter->soft_stats.rx_pause += smb->rx_pause; + adapter->soft_stats.rx_rrd_ov += smb->rx_rrd_ov; + adapter->soft_stats.rx_trunc += smb->rx_sz_ov; + + /* Tx Errors */ + adapter->soft_stats.tx_errors += (smb->tx_late_col + + smb->tx_abort_col + smb->tx_underrun + smb->tx_trunc); + adapter->soft_stats.tx_fifo_errors += smb->tx_underrun; + adapter->soft_stats.tx_aborted_errors += smb->tx_abort_col; + adapter->soft_stats.tx_window_errors += smb->tx_late_col; + + adapter->soft_stats.excecol += smb->tx_abort_col; + adapter->soft_stats.deffer += smb->tx_defer; + adapter->soft_stats.scc += smb->tx_1_col; + adapter->soft_stats.mcc += smb->tx_2_col; + adapter->soft_stats.latecol += smb->tx_late_col; + adapter->soft_stats.tx_underun += smb->tx_underrun; + adapter->soft_stats.tx_trunc += smb->tx_trunc; + adapter->soft_stats.tx_pause += smb->tx_pause; + + adapter->net_stats.rx_packets = adapter->soft_stats.rx_packets; + adapter->net_stats.tx_packets = adapter->soft_stats.tx_packets; + adapter->net_stats.rx_bytes = adapter->soft_stats.rx_bytes; + adapter->net_stats.tx_bytes = adapter->soft_stats.tx_bytes; + adapter->net_stats.multicast = adapter->soft_stats.multicast; + adapter->net_stats.collisions = adapter->soft_stats.collisions; + adapter->net_stats.rx_errors = adapter->soft_stats.rx_errors; + adapter->net_stats.rx_over_errors = + adapter->soft_stats.rx_missed_errors; + adapter->net_stats.rx_length_errors = + adapter->soft_stats.rx_length_errors; + adapter->net_stats.rx_crc_errors = adapter->soft_stats.rx_crc_errors; + adapter->net_stats.rx_frame_errors = + adapter->soft_stats.rx_frame_errors; + adapter->net_stats.rx_fifo_errors = adapter->soft_stats.rx_fifo_errors; + adapter->net_stats.rx_missed_errors = + adapter->soft_stats.rx_missed_errors; + adapter->net_stats.tx_errors = adapter->soft_stats.tx_errors; + adapter->net_stats.tx_fifo_errors = adapter->soft_stats.tx_fifo_errors; + adapter->net_stats.tx_aborted_errors = + adapter->soft_stats.tx_aborted_errors; + adapter->net_stats.tx_window_errors = + adapter->soft_stats.tx_window_errors; + adapter->net_stats.tx_carrier_errors = + adapter->soft_stats.tx_carrier_errors; +} + +/* + * atl1_get_stats - Get System Network Statistics + * @netdev: network interface device structure + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the timer callback. + */ +static struct net_device_stats *atl1_get_stats(struct net_device *netdev) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + return &adapter->net_stats; +} + +static void atl1_update_mailbox(struct atl1_adapter *adapter) +{ + unsigned long flags; + u32 tpd_next_to_use; + u32 rfd_next_to_use; + u32 rrd_next_to_clean; + u32 value; + + spin_lock_irqsave(&adapter->mb_lock, flags); + + tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use); + rfd_next_to_use = atomic_read(&adapter->rfd_ring.next_to_use); + rrd_next_to_clean = atomic_read(&adapter->rrd_ring.next_to_clean); + + value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) << + MB_RFD_PROD_INDX_SHIFT) | + ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) << + MB_RRD_CONS_INDX_SHIFT) | + ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) << + MB_TPD_PROD_INDX_SHIFT); + iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX); + + spin_unlock_irqrestore(&adapter->mb_lock, flags); +} + +static void atl1_clean_alloc_flag(struct atl1_adapter *adapter, + struct rx_return_desc *rrd, u16 offset) +{ + struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; + + while (rfd_ring->next_to_clean != (rrd->buf_indx + offset)) { + rfd_ring->buffer_info[rfd_ring->next_to_clean].alloced = 0; + if (++rfd_ring->next_to_clean == rfd_ring->count) { + rfd_ring->next_to_clean = 0; + } + } +} + +static void atl1_update_rfd_index(struct atl1_adapter *adapter, + struct rx_return_desc *rrd) +{ + u16 num_buf; + + num_buf = (rrd->xsz.xsum_sz.pkt_size + adapter->rx_buffer_len - 1) / + adapter->rx_buffer_len; + if (rrd->num_buf == num_buf) + /* clean alloc flag for bad rrd */ + atl1_clean_alloc_flag(adapter, rrd, num_buf); +} + +static void atl1_rx_checksum(struct atl1_adapter *adapter, + struct rx_return_desc *rrd, struct sk_buff *skb) +{ + struct pci_dev *pdev = adapter->pdev; + + skb->ip_summed = CHECKSUM_NONE; + + if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { + if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC | + ERR_FLAG_CODE | ERR_FLAG_OV)) { + adapter->hw_csum_err++; + dev_printk(KERN_DEBUG, &pdev->dev, + "rx checksum error\n"); + return; + } + } + + /* not IPv4 */ + if (!(rrd->pkt_flg & PACKET_FLAG_IPV4)) + /* checksum is invalid, but it's not an IPv4 pkt, so ok */ + return; + + /* IPv4 packet */ + if (likely(!(rrd->err_flg & + (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM)))) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + adapter->hw_csum_good++; + return; + } + + /* IPv4, but hardware thinks its checksum is wrong */ + dev_printk(KERN_DEBUG, &pdev->dev, + "hw csum wrong, pkt_flag:%x, err_flag:%x\n", + rrd->pkt_flg, rrd->err_flg); + skb->ip_summed = CHECKSUM_COMPLETE; + skb->csum = htons(rrd->xsz.xsum_sz.rx_chksum); + adapter->hw_csum_err++; + return; +} + +/* + * atl1_alloc_rx_buffers - Replace used receive buffers + * @adapter: address of board private structure + */ +static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter) +{ + struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct pci_dev *pdev = adapter->pdev; + struct page *page; + unsigned long offset; + struct atl1_buffer *buffer_info, *next_info; + struct sk_buff *skb; + u16 num_alloc = 0; + u16 rfd_next_to_use, next_next; + struct rx_free_desc *rfd_desc; + + next_next = rfd_next_to_use = atomic_read(&rfd_ring->next_to_use); + if (++next_next == rfd_ring->count) + next_next = 0; + buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; + next_info = &rfd_ring->buffer_info[next_next]; + + while (!buffer_info->alloced && !next_info->alloced) { + if (buffer_info->skb) { + buffer_info->alloced = 1; + goto next; + } + + rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use); + + skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN); + if (unlikely(!skb)) { /* Better luck next round */ + adapter->net_stats.rx_dropped++; + break; + } + + /* + * Make buffer alignment 2 beyond a 16 byte boundary + * this will result in a 16 byte aligned IP header after + * the 14 byte MAC header is removed + */ + skb_reserve(skb, NET_IP_ALIGN); + + buffer_info->alloced = 1; + buffer_info->skb = skb; + buffer_info->length = (u16) adapter->rx_buffer_len; + page = virt_to_page(skb->data); + offset = (unsigned long)skb->data & ~PAGE_MASK; + buffer_info->dma = pci_map_page(pdev, page, offset, + adapter->rx_buffer_len, + PCI_DMA_FROMDEVICE); + rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len); + rfd_desc->coalese = 0; + +next: + rfd_next_to_use = next_next; + if (unlikely(++next_next == rfd_ring->count)) + next_next = 0; + + buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; + next_info = &rfd_ring->buffer_info[next_next]; + num_alloc++; + } + + if (num_alloc) { + /* + * Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + atomic_set(&rfd_ring->next_to_use, (int)rfd_next_to_use); + } + return num_alloc; +} + +static void atl1_intr_rx(struct atl1_adapter *adapter) +{ + int i, count; + u16 length; + u16 rrd_next_to_clean; + u32 value; + struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; + struct atl1_buffer *buffer_info; + struct rx_return_desc *rrd; + struct sk_buff *skb; + + count = 0; + + rrd_next_to_clean = atomic_read(&rrd_ring->next_to_clean); + + while (1) { + rrd = ATL1_RRD_DESC(rrd_ring, rrd_next_to_clean); + i = 1; + if (likely(rrd->xsz.valid)) { /* packet valid */ +chk_rrd: + /* check rrd status */ + if (likely(rrd->num_buf == 1)) + goto rrd_ok; + + /* rrd seems to be bad */ + if (unlikely(i-- > 0)) { + /* rrd may not be DMAed completely */ + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "incomplete RRD DMA transfer\n"); + udelay(1); + goto chk_rrd; + } + /* bad rrd */ + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "bad RRD\n"); + /* see if update RFD index */ + if (rrd->num_buf > 1) + atl1_update_rfd_index(adapter, rrd); + + /* update rrd */ + rrd->xsz.valid = 0; + if (++rrd_next_to_clean == rrd_ring->count) + rrd_next_to_clean = 0; + count++; + continue; + } else { /* current rrd still not be updated */ + + break; + } +rrd_ok: + /* clean alloc flag for bad rrd */ + atl1_clean_alloc_flag(adapter, rrd, 0); + + buffer_info = &rfd_ring->buffer_info[rrd->buf_indx]; + if (++rfd_ring->next_to_clean == rfd_ring->count) + rfd_ring->next_to_clean = 0; + + /* update rrd next to clean */ + if (++rrd_next_to_clean == rrd_ring->count) + rrd_next_to_clean = 0; + count++; + + if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { + if (!(rrd->err_flg & + (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM + | ERR_FLAG_LEN))) { + /* packet error, don't need upstream */ + buffer_info->alloced = 0; + rrd->xsz.valid = 0; + continue; + } + } + + /* Good Receive */ + pci_unmap_page(adapter->pdev, buffer_info->dma, + buffer_info->length, PCI_DMA_FROMDEVICE); + skb = buffer_info->skb; + length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size); + + skb_put(skb, length - ETH_FCS_LEN); + + /* Receive Checksum Offload */ + atl1_rx_checksum(adapter, rrd, skb); + skb->protocol = eth_type_trans(skb, adapter->netdev); + + if (adapter->vlgrp && (rrd->pkt_flg & PACKET_FLAG_VLAN_INS)) { + u16 vlan_tag = (rrd->vlan_tag >> 4) | + ((rrd->vlan_tag & 7) << 13) | + ((rrd->vlan_tag & 8) << 9); + vlan_hwaccel_rx(skb, adapter->vlgrp, vlan_tag); + } else + netif_rx(skb); + + /* let protocol layer free skb */ + buffer_info->skb = NULL; + buffer_info->alloced = 0; + rrd->xsz.valid = 0; + + adapter->netdev->last_rx = jiffies; + } + + atomic_set(&rrd_ring->next_to_clean, rrd_next_to_clean); + + atl1_alloc_rx_buffers(adapter); + + /* update mailbox ? */ + if (count) { + u32 tpd_next_to_use; + u32 rfd_next_to_use; + + spin_lock(&adapter->mb_lock); + + tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use); + rfd_next_to_use = + atomic_read(&adapter->rfd_ring.next_to_use); + rrd_next_to_clean = + atomic_read(&adapter->rrd_ring.next_to_clean); + value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) << + MB_RFD_PROD_INDX_SHIFT) | + ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) << + MB_RRD_CONS_INDX_SHIFT) | + ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) << + MB_TPD_PROD_INDX_SHIFT); + iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX); + spin_unlock(&adapter->mb_lock); + } +} + +static void atl1_intr_tx(struct atl1_adapter *adapter) +{ + struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; + struct atl1_buffer *buffer_info; + u16 sw_tpd_next_to_clean; + u16 cmb_tpd_next_to_clean; + + sw_tpd_next_to_clean = atomic_read(&tpd_ring->next_to_clean); + cmb_tpd_next_to_clean = le16_to_cpu(adapter->cmb.cmb->tpd_cons_idx); + + while (cmb_tpd_next_to_clean != sw_tpd_next_to_clean) { + struct tx_packet_desc *tpd; + + tpd = ATL1_TPD_DESC(tpd_ring, sw_tpd_next_to_clean); + buffer_info = &tpd_ring->buffer_info[sw_tpd_next_to_clean]; + if (buffer_info->dma) { + pci_unmap_page(adapter->pdev, buffer_info->dma, + buffer_info->length, PCI_DMA_TODEVICE); + buffer_info->dma = 0; + } + + if (buffer_info->skb) { + dev_kfree_skb_irq(buffer_info->skb); + buffer_info->skb = NULL; + } + tpd->buffer_addr = 0; + tpd->desc.data = 0; + + if (++sw_tpd_next_to_clean == tpd_ring->count) + sw_tpd_next_to_clean = 0; + } + atomic_set(&tpd_ring->next_to_clean, sw_tpd_next_to_clean); + + if (netif_queue_stopped(adapter->netdev) + && netif_carrier_ok(adapter->netdev)) + netif_wake_queue(adapter->netdev); +} + +static u16 atl1_tpd_avail(struct atl1_tpd_ring *tpd_ring) +{ + u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean); + u16 next_to_use = atomic_read(&tpd_ring->next_to_use); + return ((next_to_clean > next_to_use) ? + next_to_clean - next_to_use - 1 : + tpd_ring->count + next_to_clean - next_to_use - 1); +} + +static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb, + struct tso_param *tso) +{ + /* We enter this function holding a spinlock. */ + u8 ipofst; + int err; + + if (skb_shinfo(skb)->gso_size) { + if (skb_header_cloned(skb)) { + err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + if (unlikely(err)) + return err; + } + + if (skb->protocol == ntohs(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + + iph->tot_len = 0; + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, IPPROTO_TCP, 0); + ipofst = skb_network_offset(skb); + if (ipofst != ETH_HLEN) /* 802.3 frame */ + tso->tsopl |= 1 << TSO_PARAM_ETHTYPE_SHIFT; + + tso->tsopl |= (iph->ihl & + CSUM_PARAM_IPHL_MASK) << CSUM_PARAM_IPHL_SHIFT; + tso->tsopl |= (tcp_hdrlen(skb) & + TSO_PARAM_TCPHDRLEN_MASK) << + TSO_PARAM_TCPHDRLEN_SHIFT; + tso->tsopl |= (skb_shinfo(skb)->gso_size & + TSO_PARAM_MSS_MASK) << TSO_PARAM_MSS_SHIFT; + tso->tsopl |= 1 << TSO_PARAM_IPCKSUM_SHIFT; + tso->tsopl |= 1 << TSO_PARAM_TCPCKSUM_SHIFT; + tso->tsopl |= 1 << TSO_PARAM_SEGMENT_SHIFT; + return true; + } + } + return false; +} + +static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb, + struct csum_param *csum) +{ + u8 css, cso; + + if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { + cso = skb_transport_offset(skb); + css = cso + skb->csum_offset; + if (unlikely(cso & 0x1)) { + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "payload offset not an even number\n"); + return -1; + } + csum->csumpl |= (cso & CSUM_PARAM_PLOADOFFSET_MASK) << + CSUM_PARAM_PLOADOFFSET_SHIFT; + csum->csumpl |= (css & CSUM_PARAM_XSUMOFFSET_MASK) << + CSUM_PARAM_XSUMOFFSET_SHIFT; + csum->csumpl |= 1 << CSUM_PARAM_CUSTOMCKSUM_SHIFT; + return true; + } + + return true; +} + +static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, + bool tcp_seg) +{ + /* We enter this function holding a spinlock. */ + struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; + struct atl1_buffer *buffer_info; + struct page *page; + int first_buf_len = skb->len; + unsigned long offset; + unsigned int nr_frags; + unsigned int f; + u16 tpd_next_to_use; + u16 proto_hdr_len; + u16 len12; + + first_buf_len -= skb->data_len; + nr_frags = skb_shinfo(skb)->nr_frags; + tpd_next_to_use = atomic_read(&tpd_ring->next_to_use); + buffer_info = &tpd_ring->buffer_info[tpd_next_to_use]; + if (unlikely(buffer_info->skb)) + BUG(); + buffer_info->skb = NULL; /* put skb in last TPD */ + + if (tcp_seg) { + /* TSO/GSO */ + proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + buffer_info->length = proto_hdr_len; + page = virt_to_page(skb->data); + offset = (unsigned long)skb->data & ~PAGE_MASK; + buffer_info->dma = pci_map_page(adapter->pdev, page, + offset, proto_hdr_len, + PCI_DMA_TODEVICE); + + if (++tpd_next_to_use == tpd_ring->count) + tpd_next_to_use = 0; + + if (first_buf_len > proto_hdr_len) { + int i, m; + + len12 = first_buf_len - proto_hdr_len; + m = (len12 + ATL1_MAX_TX_BUF_LEN - 1) / + ATL1_MAX_TX_BUF_LEN; + for (i = 0; i < m; i++) { + buffer_info = + &tpd_ring->buffer_info[tpd_next_to_use]; + buffer_info->skb = NULL; + buffer_info->length = + (ATL1_MAX_TX_BUF_LEN >= + len12) ? ATL1_MAX_TX_BUF_LEN : len12; + len12 -= buffer_info->length; + page = virt_to_page(skb->data + + (proto_hdr_len + + i * ATL1_MAX_TX_BUF_LEN)); + offset = (unsigned long)(skb->data + + (proto_hdr_len + + i * ATL1_MAX_TX_BUF_LEN)) & ~PAGE_MASK; + buffer_info->dma = pci_map_page(adapter->pdev, + page, offset, buffer_info->length, + PCI_DMA_TODEVICE); + if (++tpd_next_to_use == tpd_ring->count) + tpd_next_to_use = 0; + } + } + } else { + /* not TSO/GSO */ + buffer_info->length = first_buf_len; + page = virt_to_page(skb->data); + offset = (unsigned long)skb->data & ~PAGE_MASK; + buffer_info->dma = pci_map_page(adapter->pdev, page, + offset, first_buf_len, PCI_DMA_TODEVICE); + if (++tpd_next_to_use == tpd_ring->count) + tpd_next_to_use = 0; + } + + for (f = 0; f < nr_frags; f++) { + struct skb_frag_struct *frag; + u16 lenf, i, m; + + frag = &skb_shinfo(skb)->frags[f]; + lenf = frag->size; + + m = (lenf + ATL1_MAX_TX_BUF_LEN - 1) / ATL1_MAX_TX_BUF_LEN; + for (i = 0; i < m; i++) { + buffer_info = &tpd_ring->buffer_info[tpd_next_to_use]; + if (unlikely(buffer_info->skb)) + BUG(); + buffer_info->skb = NULL; + buffer_info->length = (lenf > ATL1_MAX_TX_BUF_LEN) ? + ATL1_MAX_TX_BUF_LEN : lenf; + lenf -= buffer_info->length; + buffer_info->dma = pci_map_page(adapter->pdev, + frag->page, + frag->page_offset + (i * ATL1_MAX_TX_BUF_LEN), + buffer_info->length, PCI_DMA_TODEVICE); + + if (++tpd_next_to_use == tpd_ring->count) + tpd_next_to_use = 0; + } + } + + /* last tpd's buffer-info */ + buffer_info->skb = skb; +} + +static void atl1_tx_queue(struct atl1_adapter *adapter, int count, + union tpd_descr *descr) +{ + /* We enter this function holding a spinlock. */ + struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; + int j; + u32 val; + struct atl1_buffer *buffer_info; + struct tx_packet_desc *tpd; + u16 tpd_next_to_use = atomic_read(&tpd_ring->next_to_use); + + for (j = 0; j < count; j++) { + buffer_info = &tpd_ring->buffer_info[tpd_next_to_use]; + tpd = ATL1_TPD_DESC(&adapter->tpd_ring, tpd_next_to_use); + tpd->desc.csum.csumpu = descr->csum.csumpu; + tpd->desc.csum.csumpl = descr->csum.csumpl; + tpd->desc.tso.tsopu = descr->tso.tsopu; + tpd->desc.tso.tsopl = descr->tso.tsopl; + tpd->buffer_addr = cpu_to_le64(buffer_info->dma); + tpd->desc.data = descr->data; + tpd->desc.csum.csumpu |= (cpu_to_le16(buffer_info->length) & + CSUM_PARAM_BUFLEN_MASK) << CSUM_PARAM_BUFLEN_SHIFT; + + val = (descr->tso.tsopl >> TSO_PARAM_SEGMENT_SHIFT) & + TSO_PARAM_SEGMENT_MASK; + if (val && !j) + tpd->desc.tso.tsopl |= 1 << TSO_PARAM_HDRFLAG_SHIFT; + + if (j == (count - 1)) + tpd->desc.csum.csumpl |= 1 << CSUM_PARAM_EOP_SHIFT; + + if (++tpd_next_to_use == tpd_ring->count) + tpd_next_to_use = 0; + } + /* + * Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + + atomic_set(&tpd_ring->next_to_use, (int)tpd_next_to_use); +} + +static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + int len = skb->len; + int tso; + int count = 1; + int ret_val; + u32 val; + union tpd_descr param; + u16 frag_size; + u16 vlan_tag; + unsigned long flags; + unsigned int nr_frags = 0; + unsigned int mss = 0; + unsigned int f; + unsigned int proto_hdr_len; + + len -= skb->data_len; + + if (unlikely(skb->len == 0)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + param.data = 0; + param.tso.tsopu = 0; + param.tso.tsopl = 0; + param.csum.csumpu = 0; + param.csum.csumpl = 0; + + /* nr_frags will be nonzero if we're doing scatter/gather (SG) */ + nr_frags = skb_shinfo(skb)->nr_frags; + for (f = 0; f < nr_frags; f++) { + frag_size = skb_shinfo(skb)->frags[f].size; + if (frag_size) + count += (frag_size + ATL1_MAX_TX_BUF_LEN - 1) / + ATL1_MAX_TX_BUF_LEN; + } + + /* mss will be nonzero if we're doing segment offload (TSO/GSO) */ + mss = skb_shinfo(skb)->gso_size; + if (mss) { + if (skb->protocol == htons(ETH_P_IP)) { + proto_hdr_len = (skb_transport_offset(skb) + + tcp_hdrlen(skb)); + if (unlikely(proto_hdr_len > len)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + /* need additional TPD ? */ + if (proto_hdr_len != len) + count += (len - proto_hdr_len + + ATL1_MAX_TX_BUF_LEN - 1) / + ATL1_MAX_TX_BUF_LEN; + } + } + + if (!spin_trylock_irqsave(&adapter->lock, flags)) { + /* Can't get lock - tell upper layer to requeue */ + dev_printk(KERN_DEBUG, &adapter->pdev->dev, "tx locked\n"); + return NETDEV_TX_LOCKED; + } + + if (atl1_tpd_avail(&adapter->tpd_ring) < count) { + /* not enough descriptors */ + netif_stop_queue(netdev); + spin_unlock_irqrestore(&adapter->lock, flags); + dev_printk(KERN_DEBUG, &adapter->pdev->dev, "tx busy\n"); + return NETDEV_TX_BUSY; + } + + param.data = 0; + + if (adapter->vlgrp && vlan_tx_tag_present(skb)) { + vlan_tag = vlan_tx_tag_get(skb); + vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) | + ((vlan_tag >> 9) & 0x8); + param.csum.csumpl |= 1 << CSUM_PARAM_INSVLAG_SHIFT; + param.csum.csumpu |= (vlan_tag & CSUM_PARAM_VALANTAG_MASK) << + CSUM_PARAM_VALAN_SHIFT; + } + + tso = atl1_tso(adapter, skb, ¶m.tso); + if (tso < 0) { + spin_unlock_irqrestore(&adapter->lock, flags); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (!tso) { + ret_val = atl1_tx_csum(adapter, skb, ¶m.csum); + if (ret_val < 0) { + spin_unlock_irqrestore(&adapter->lock, flags); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + } + + val = (param.csum.csumpl >> CSUM_PARAM_SEGMENT_SHIFT) & + CSUM_PARAM_SEGMENT_MASK; + atl1_tx_map(adapter, skb, 1 == val); + atl1_tx_queue(adapter, count, ¶m); + netdev->trans_start = jiffies; + spin_unlock_irqrestore(&adapter->lock, flags); + atl1_update_mailbox(adapter); + return NETDEV_TX_OK; +} + +/* + * atl1_intr - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + * @pt_regs: CPU registers structure + */ +static irqreturn_t atl1_intr(int irq, void *data) +{ + struct atl1_adapter *adapter = netdev_priv(data); + u32 status; + u8 update_rx; + int max_ints = 10; + + status = adapter->cmb.cmb->int_stats; + if (!status) + return IRQ_NONE; + + update_rx = 0; + + do { + /* clear CMB interrupt status at once */ + adapter->cmb.cmb->int_stats = 0; + + if (status & ISR_GPHY) /* clear phy status */ + atl1_clear_phy_int(adapter); + + /* clear ISR status, and Enable CMB DMA/Disable Interrupt */ + iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR); + + /* check if SMB intr */ + if (status & ISR_SMB) + atl1_inc_smb(adapter); + + /* check if PCIE PHY Link down */ + if (status & ISR_PHY_LINKDOWN) { + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "pcie phy link down %x\n", status); + if (netif_running(adapter->netdev)) { /* reset MAC */ + iowrite32(0, adapter->hw.hw_addr + REG_IMR); + schedule_work(&adapter->pcie_dma_to_rst_task); + return IRQ_HANDLED; + } + } + + /* check if DMA read/write error ? */ + if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) { + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "pcie DMA r/w error (status = 0x%x)\n", + status); + iowrite32(0, adapter->hw.hw_addr + REG_IMR); + schedule_work(&adapter->pcie_dma_to_rst_task); + return IRQ_HANDLED; + } + + /* link event */ + if (status & ISR_GPHY) { + adapter->soft_stats.tx_carrier_errors++; + atl1_check_for_link(adapter); + } + + /* transmit event */ + if (status & ISR_CMB_TX) + atl1_intr_tx(adapter); + + /* rx exception */ + if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN | + ISR_RRD_OV | ISR_HOST_RFD_UNRUN | + ISR_HOST_RRD_OV | ISR_CMB_RX))) { + if (status & (ISR_RXF_OV | ISR_RFD_UNRUN | + ISR_RRD_OV | ISR_HOST_RFD_UNRUN | + ISR_HOST_RRD_OV)) + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "rx exception, ISR = 0x%x\n", status); + atl1_intr_rx(adapter); + } + + if (--max_ints < 0) + break; + + } while ((status = adapter->cmb.cmb->int_stats)); + + /* re-enable Interrupt */ + iowrite32(ISR_DIS_SMB | ISR_DIS_DMA, adapter->hw.hw_addr + REG_ISR); + return IRQ_HANDLED; +} + +/* + * atl1_watchdog - Timer Call-back + * @data: pointer to netdev cast into an unsigned long + */ +static void atl1_watchdog(unsigned long data) +{ + struct atl1_adapter *adapter = (struct atl1_adapter *)data; + + /* Reset the timer */ + mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); +} + +/* + * atl1_phy_config - Timer Call-back + * @data: pointer to netdev cast into an unsigned long + */ +static void atl1_phy_config(unsigned long data) +{ + struct atl1_adapter *adapter = (struct atl1_adapter *)data; + struct atl1_hw *hw = &adapter->hw; + unsigned long flags; + + spin_lock_irqsave(&adapter->lock, flags); + adapter->phy_timer_pending = false; + atl1_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg); + atl1_write_phy_reg(hw, MII_AT001_CR, hw->mii_1000t_ctrl_reg); + atl1_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN); + spin_unlock_irqrestore(&adapter->lock, flags); +} + +/* + * atl1_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + */ +static void atl1_tx_timeout(struct net_device *netdev) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + /* Do the reset outside of interrupt context */ + schedule_work(&adapter->tx_timeout_task); +} + +/* + * Orphaned vendor comment left intact here: + * + * If TPD Buffer size equal to 0, PCIE DMAR_TO_INT + * will assert. We do soft reset <0x1400=1> according + * with the SPEC. BUT, it seemes that PCIE or DMA + * state-machine will not be reset. DMAR_TO_INT will + * assert again and again. + * + */ +static void atl1_tx_timeout_task(struct work_struct *work) +{ + struct atl1_adapter *adapter = + container_of(work, struct atl1_adapter, tx_timeout_task); + struct net_device *netdev = adapter->netdev; + + netif_device_detach(netdev); + atl1_down(adapter); + atl1_up(adapter); + netif_device_attach(netdev); +} + +/* + * atl1_link_chg_task - deal with link change event Out of interrupt context + */ +static void atl1_link_chg_task(struct work_struct *work) +{ + struct atl1_adapter *adapter = + container_of(work, struct atl1_adapter, link_chg_task); + unsigned long flags; + + spin_lock_irqsave(&adapter->lock, flags); + atl1_check_link(adapter); + spin_unlock_irqrestore(&adapter->lock, flags); +} + +static void atl1_vlan_rx_register(struct net_device *netdev, + struct vlan_group *grp) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + unsigned long flags; + u32 ctrl; + + spin_lock_irqsave(&adapter->lock, flags); + /* atl1_irq_disable(adapter); */ + adapter->vlgrp = grp; + + if (grp) { + /* enable VLAN tag insert/strip */ + ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL); + ctrl |= MAC_CTRL_RMV_VLAN; + iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL); + } else { + /* disable VLAN tag insert/strip */ + ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL); + ctrl &= ~MAC_CTRL_RMV_VLAN; + iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL); + } + + /* atl1_irq_enable(adapter); */ + spin_unlock_irqrestore(&adapter->lock, flags); +} + +static void atl1_restore_vlan(struct atl1_adapter *adapter) +{ + atl1_vlan_rx_register(adapter->netdev, adapter->vlgrp); +} + +int atl1_reset(struct atl1_adapter *adapter) +{ + int ret; + + ret = atl1_reset_hw(&adapter->hw); + if (ret != ATL1_SUCCESS) + return ret; + return atl1_init_hw(&adapter->hw); +} + +s32 atl1_up(struct atl1_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err; + int irq_flags = IRQF_SAMPLE_RANDOM; + + /* hardware has been reset, we need to reload some things */ + atl1_set_multi(netdev); + atl1_init_ring_ptrs(adapter); + atl1_restore_vlan(adapter); + err = atl1_alloc_rx_buffers(adapter); + if (unlikely(!err)) /* no RX BUFFER allocated */ + return -ENOMEM; + + if (unlikely(atl1_configure(adapter))) { + err = -EIO; + goto err_up; + } + + err = pci_enable_msi(adapter->pdev); + if (err) { + dev_info(&adapter->pdev->dev, + "Unable to enable MSI: %d\n", err); + irq_flags |= IRQF_SHARED; + } + + err = request_irq(adapter->pdev->irq, &atl1_intr, irq_flags, + netdev->name, netdev); + if (unlikely(err)) + goto err_up; + + mod_timer(&adapter->watchdog_timer, jiffies); + atl1_irq_enable(adapter); + atl1_check_link(adapter); + return 0; + +err_up: + pci_disable_msi(adapter->pdev); + /* free rx_buffers */ + atl1_clean_rx_ring(adapter); + return err; +} + +void atl1_down(struct atl1_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_config_timer); + adapter->phy_timer_pending = false; + + atl1_irq_disable(adapter); + free_irq(adapter->pdev->irq, netdev); + pci_disable_msi(adapter->pdev); + atl1_reset_hw(&adapter->hw); + adapter->cmb.cmb->int_stats = 0; + + adapter->link_speed = SPEED_0; + adapter->link_duplex = -1; + netif_carrier_off(netdev); + netif_stop_queue(netdev); + + atl1_clean_tx_ring(adapter); + atl1_clean_rx_ring(adapter); +} + +/* + * atl1_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + */ +static int atl1_open(struct net_device *netdev) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + int err; + + /* allocate transmit descriptors */ + err = atl1_setup_ring_resources(adapter); + if (err) + return err; + + err = atl1_up(adapter); + if (err) + goto err_up; + + return 0; + +err_up: + atl1_reset(adapter); + return err; +} + +/* + * atl1_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + */ +static int atl1_close(struct net_device *netdev) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + atl1_down(adapter); + atl1_free_ring_resources(adapter); + return 0; +} + +#ifdef CONFIG_PM +static int atl1_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_hw *hw = &adapter->hw; + u32 ctrl = 0; + u32 wufc = adapter->wol; + + netif_device_detach(netdev); + if (netif_running(netdev)) + atl1_down(adapter); + + atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); + atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); + if (ctrl & BMSR_LSTATUS) + wufc &= ~ATL1_WUFC_LNKC; + + /* reduce speed to 10/100M */ + if (wufc) { + atl1_phy_enter_power_saving(hw); + /* if resume, let driver to re- setup link */ + hw->phy_configured = false; + atl1_set_mac_addr(hw); + atl1_set_multi(netdev); + + ctrl = 0; + /* turn on magic packet wol */ + if (wufc & ATL1_WUFC_MAG) + ctrl = WOL_MAGIC_EN | WOL_MAGIC_PME_EN; + + /* turn on Link change WOL */ + if (wufc & ATL1_WUFC_LNKC) + ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN); + iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL); + + /* turn on all-multi mode if wake on multicast is enabled */ + ctrl = ioread32(hw->hw_addr + REG_MAC_CTRL); + ctrl &= ~MAC_CTRL_DBG; + ctrl &= ~MAC_CTRL_PROMIS_EN; + if (wufc & ATL1_WUFC_MC) + ctrl |= MAC_CTRL_MC_ALL_EN; + else + ctrl &= ~MAC_CTRL_MC_ALL_EN; + + /* turn on broadcast mode if wake on-BC is enabled */ + if (wufc & ATL1_WUFC_BC) + ctrl |= MAC_CTRL_BC_EN; + else + ctrl &= ~MAC_CTRL_BC_EN; + + /* enable RX */ + ctrl |= MAC_CTRL_RX_EN; + iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL); + pci_enable_wake(pdev, PCI_D3hot, 1); + pci_enable_wake(pdev, PCI_D3cold, 1); + } else { + iowrite32(0, hw->hw_addr + REG_WOL_CTRL); + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + } + + pci_save_state(pdev); + pci_disable_device(pdev); + + pci_set_power_state(pdev, PCI_D3hot); + + return 0; +} + +static int atl1_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1_adapter *adapter = netdev_priv(netdev); + u32 ret_val; + + pci_set_power_state(pdev, 0); + pci_restore_state(pdev); + + ret_val = pci_enable_device(pdev); + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL); + atl1_reset(adapter); + + if (netif_running(netdev)) + atl1_up(adapter); + netif_device_attach(netdev); + + atl1_via_workaround(adapter); + + return 0; +} +#else +#define atl1_suspend NULL +#define atl1_resume NULL +#endif + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void atl1_poll_controller(struct net_device *netdev) +{ + disable_irq(netdev->irq); + atl1_intr(netdev->irq, netdev); + enable_irq(netdev->irq); +} +#endif + +/* + * atl1_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in atl1_pci_tbl + * + * Returns 0 on success, negative on failure + * + * atl1_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + */ +static int __devinit atl1_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct atl1_adapter *adapter; + static int cards_found = 0; + int err; + + err = pci_enable_device(pdev); + if (err) + return err; + + /* + * The atl1 chip can DMA to 64-bit addresses, but it uses a single + * shared register for the high 32 bits, so only a single, aligned, + * 4 GB physical address range can be used at a time. + * + * Supporting 64-bit DMA on this hardware is more trouble than it's + * worth. It is far easier to limit to 32-bit DMA than update + * various kernel subsystems to support the mechanics required by a + * fixed-high-32-bit system. + */ + err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + if (err) { + dev_err(&pdev->dev, "no usable DMA configuration\n"); + goto err_dma; + } + /* Mark all PCI regions associated with PCI device + * pdev as being reserved by owner atl1_driver_name + */ + err = pci_request_regions(pdev, atl1_driver_name); + if (err) + goto err_request_regions; + + /* Enables bus-mastering on the device and calls + * pcibios_set_master to do the needed arch specific settings + */ + pci_set_master(pdev); + + netdev = alloc_etherdev(sizeof(struct atl1_adapter)); + if (!netdev) { + err = -ENOMEM; + goto err_alloc_etherdev; + } + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->hw.back = adapter; + + adapter->hw.hw_addr = pci_iomap(pdev, 0, 0); + if (!adapter->hw.hw_addr) { + err = -EIO; + goto err_pci_iomap; + } + /* get device revision number */ + adapter->hw.dev_rev = ioread16(adapter->hw.hw_addr + + (REG_MASTER_CTRL + 2)); + dev_info(&pdev->dev, "version %s\n", DRIVER_VERSION); + + /* set default ring resource counts */ + adapter->rfd_ring.count = adapter->rrd_ring.count = ATL1_DEFAULT_RFD; + adapter->tpd_ring.count = ATL1_DEFAULT_TPD; + + adapter->mii.dev = netdev; + adapter->mii.mdio_read = mdio_read; + adapter->mii.mdio_write = mdio_write; + adapter->mii.phy_id_mask = 0x1f; + adapter->mii.reg_num_mask = 0x1f; + + netdev->open = &atl1_open; + netdev->stop = &atl1_close; + netdev->hard_start_xmit = &atl1_xmit_frame; + netdev->get_stats = &atl1_get_stats; + netdev->set_multicast_list = &atl1_set_multi; + netdev->set_mac_address = &atl1_set_mac; + netdev->change_mtu = &atl1_change_mtu; + netdev->do_ioctl = &atl1_ioctl; + netdev->tx_timeout = &atl1_tx_timeout; + netdev->watchdog_timeo = 5 * HZ; +#ifdef CONFIG_NET_POLL_CONTROLLER + netdev->poll_controller = atl1_poll_controller; +#endif + netdev->vlan_rx_register = atl1_vlan_rx_register; + + netdev->ethtool_ops = &atl1_ethtool_ops; + adapter->bd_number = cards_found; + + /* setup the private structure */ + err = atl1_sw_init(adapter); + if (err) + goto err_common; + + netdev->features = NETIF_F_HW_CSUM; + netdev->features |= NETIF_F_SG; + netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX); + + /* + * FIXME - Until tso performance gets fixed, disable the feature. + * Enable it with ethtool -K if desired. + */ + /* netdev->features |= NETIF_F_TSO; */ + + netdev->features |= NETIF_F_LLTX; + + /* + * patch for some L1 of old version, + * the final version of L1 may not need these + * patches + */ + /* atl1_pcie_patch(adapter); */ + + /* really reset GPHY core */ + iowrite16(0, adapter->hw.hw_addr + REG_GPHY_ENABLE); + + /* + * reset the controller to + * put the device in a known good starting state + */ + if (atl1_reset_hw(&adapter->hw)) { + err = -EIO; + goto err_common; + } + + /* copy the MAC address out of the EEPROM */ + atl1_read_mac_addr(&adapter->hw); + memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); + + if (!is_valid_ether_addr(netdev->dev_addr)) { + err = -EIO; + goto err_common; + } + + atl1_check_options(adapter); + + /* pre-init the MAC, and setup link */ + err = atl1_init_hw(&adapter->hw); + if (err) { + err = -EIO; + goto err_common; + } + + atl1_pcie_patch(adapter); + /* assume we have no link for now */ + netif_carrier_off(netdev); + netif_stop_queue(netdev); + + init_timer(&adapter->watchdog_timer); + adapter->watchdog_timer.function = &atl1_watchdog; + adapter->watchdog_timer.data = (unsigned long)adapter; + + init_timer(&adapter->phy_config_timer); + adapter->phy_config_timer.function = &atl1_phy_config; + adapter->phy_config_timer.data = (unsigned long)adapter; + adapter->phy_timer_pending = false; + + INIT_WORK(&adapter->tx_timeout_task, atl1_tx_timeout_task); + + INIT_WORK(&adapter->link_chg_task, atl1_link_chg_task); + + INIT_WORK(&adapter->pcie_dma_to_rst_task, atl1_tx_timeout_task); + + err = register_netdev(netdev); + if (err) + goto err_common; + + cards_found++; + atl1_via_workaround(adapter); + return 0; + +err_common: + pci_iounmap(pdev, adapter->hw.hw_addr); +err_pci_iomap: + free_netdev(netdev); +err_alloc_etherdev: + pci_release_regions(pdev); +err_dma: +err_request_regions: + pci_disable_device(pdev); + return err; +} + +/* + * atl1_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * atl1_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + */ +static void __devexit atl1_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1_adapter *adapter; + /* Device not available. Return. */ + if (!netdev) + return; + + adapter = netdev_priv(netdev); + + /* Some atl1 boards lack persistent storage for their MAC, and get it + * from the BIOS during POST. If we've been messing with the MAC + * address, we need to save the permanent one. + */ + if (memcmp(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, ETH_ALEN)) { + memcpy(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, + ETH_ALEN); + atl1_set_mac_addr(&adapter->hw); + } + + iowrite16(0, adapter->hw.hw_addr + REG_GPHY_ENABLE); + unregister_netdev(netdev); + pci_iounmap(pdev, adapter->hw.hw_addr); + pci_release_regions(pdev); + free_netdev(netdev); + pci_disable_device(pdev); +} + +static struct pci_driver atl1_driver = { + .name = atl1_driver_name, + .id_table = atl1_pci_tbl, + .probe = atl1_probe, + .remove = __devexit_p(atl1_remove), + .suspend = atl1_suspend, + .resume = atl1_resume +}; + +/* + * atl1_exit_module - Driver Exit Cleanup Routine + * + * atl1_exit_module is called just before the driver is removed + * from memory. + */ +static void __exit atl1_exit_module(void) +{ + pci_unregister_driver(&atl1_driver); +} + +/* + * atl1_init_module - Driver Registration Routine + * + * atl1_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + */ +static int __init atl1_init_module(void) +{ + return pci_register_driver(&atl1_driver); +} + +module_init(atl1_init_module); +module_exit(atl1_exit_module); diff --git a/drivers/net/atlx/atl1_param.c b/drivers/net/atlx/atl1_param.c new file mode 100644 index 0000000..4246bb9 --- /dev/null +++ b/drivers/net/atlx/atl1_param.c @@ -0,0 +1,203 @@ +/* + * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. + * Copyright(c) 2006 Chris Snook + * Copyright(c) 2006 Jay Cliburn + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include +#include +#include +#include "atl1.h" + +/* + * This is the only thing that needs to be changed to adjust the + * maximum number of ports that the driver can manage. + */ +#define ATL1_MAX_NIC 4 + +#define OPTION_UNSET -1 +#define OPTION_DISABLED 0 +#define OPTION_ENABLED 1 + +#define ATL1_PARAM_INIT { [0 ... ATL1_MAX_NIC] = OPTION_UNSET } + +/* + * Interrupt Moderate Timer in units of 2 us + * + * Valid Range: 10-65535 + * + * Default Value: 100 (200us) + */ +static int __devinitdata int_mod_timer[ATL1_MAX_NIC+1] = ATL1_PARAM_INIT; +static int num_int_mod_timer = 0; +module_param_array_named(int_mod_timer, int_mod_timer, int, &num_int_mod_timer, 0); +MODULE_PARM_DESC(int_mod_timer, "Interrupt moderator timer"); + +/* + * flash_vendor + * + * Valid Range: 0-2 + * + * 0 - Atmel + * 1 - SST + * 2 - ST + * + * Default Value: 0 + */ +static int __devinitdata flash_vendor[ATL1_MAX_NIC+1] = ATL1_PARAM_INIT; +static int num_flash_vendor = 0; +module_param_array_named(flash_vendor, flash_vendor, int, &num_flash_vendor, 0); +MODULE_PARM_DESC(flash_vendor, "SPI flash vendor"); + +#define DEFAULT_INT_MOD_CNT 100 /* 200us */ +#define MAX_INT_MOD_CNT 65000 +#define MIN_INT_MOD_CNT 50 + +#define FLASH_VENDOR_DEFAULT 0 +#define FLASH_VENDOR_MIN 0 +#define FLASH_VENDOR_MAX 2 + +struct atl1_option { + enum { enable_option, range_option, list_option } type; + char *name; + char *err; + int def; + union { + struct { /* range_option info */ + int min; + int max; + } r; + struct { /* list_option info */ + int nr; + struct atl1_opt_list { + int i; + char *str; + } *p; + } l; + } arg; +}; + +static int __devinit atl1_validate_option(int *value, struct atl1_option *opt, struct pci_dev *pdev) +{ + if (*value == OPTION_UNSET) { + *value = opt->def; + return 0; + } + + switch (opt->type) { + case enable_option: + switch (*value) { + case OPTION_ENABLED: + dev_info(&pdev->dev, "%s enabled\n", opt->name); + return 0; + case OPTION_DISABLED: + dev_info(&pdev->dev, "%s disabled\n", opt->name); + return 0; + } + break; + case range_option: + if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { + dev_info(&pdev->dev, "%s set to %i\n", opt->name, + *value); + return 0; + } + break; + case list_option:{ + int i; + struct atl1_opt_list *ent; + + for (i = 0; i < opt->arg.l.nr; i++) { + ent = &opt->arg.l.p[i]; + if (*value == ent->i) { + if (ent->str[0] != '\0') + dev_info(&pdev->dev, "%s\n", + ent->str); + return 0; + } + } + } + break; + + default: + break; + } + + dev_info(&pdev->dev, "invalid %s specified (%i) %s\n", + opt->name, *value, opt->err); + *value = opt->def; + return -1; +} + +/* + * atl1_check_options - Range Checking for Command Line Parameters + * @adapter: board private structure + * + * This routine checks all command line parameters for valid user + * input. If an invalid value is given, or if no user specified + * value exists, a default value is used. The final value is stored + * in a variable in the adapter structure. + */ +void __devinit atl1_check_options(struct atl1_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + int bd = adapter->bd_number; + if (bd >= ATL1_MAX_NIC) { + dev_notice(&pdev->dev, "no configuration for board#%i\n", bd); + dev_notice(&pdev->dev, "using defaults for all values\n"); + } + { /* Interrupt Moderate Timer */ + struct atl1_option opt = { + .type = range_option, + .name = "Interrupt Moderator Timer", + .err = "using default of " + __MODULE_STRING(DEFAULT_INT_MOD_CNT), + .def = DEFAULT_INT_MOD_CNT, + .arg = {.r = + {.min = MIN_INT_MOD_CNT,.max = MAX_INT_MOD_CNT}} + }; + int val; + if (num_int_mod_timer > bd) { + val = int_mod_timer[bd]; + atl1_validate_option(&val, &opt, pdev); + adapter->imt = (u16) val; + } else + adapter->imt = (u16) (opt.def); + } + + { /* Flash Vendor */ + struct atl1_option opt = { + .type = range_option, + .name = "SPI Flash Vendor", + .err = "using default of " + __MODULE_STRING(FLASH_VENDOR_DEFAULT), + .def = DEFAULT_INT_MOD_CNT, + .arg = {.r = + {.min = FLASH_VENDOR_MIN,.max = + FLASH_VENDOR_MAX}} + }; + int val; + if (num_flash_vendor > bd) { + val = flash_vendor[bd]; + atl1_validate_option(&val, &opt, pdev); + adapter->hw.flash_vendor = (u8) val; + } else + adapter->hw.flash_vendor = (u8) (opt.def); + } +} -- cgit v1.1 From 305282ba19f81e571bd6d2dcc10ebb02e59a06ef Mon Sep 17 00:00:00 2001 From: Jay Cliburn Date: Sat, 2 Feb 2008 19:50:04 -0600 Subject: atl1: move common functions to atlx files The future atl2 driver and the existing atl1 driver can share certain functions and definitions. Move these shareable functions and definitions out of atl1-specific files and into atlx.c and atlx.h. Some transitory hackery will be present until atl2 is merged. Reduce the number of source files by moving ethtool, hw, and param functions from separate files into atl1_main.c, then rename it to just atl1.c. Move all atl1-specific definitions from atl1_hw.h to atl1.h. Finally, clean up to make checkpatch.pl happy. Signed-off-by: Chris Snook Signed-off-by: Jay Cliburn Signed-off-by: Jeff Garzik --- drivers/net/atlx/Makefile | 1 - drivers/net/atlx/atl1.c | 3417 +++++++++++++++++++++++++++++++++++++++ drivers/net/atlx/atl1.h | 603 ++++++- drivers/net/atlx/atl1_ethtool.c | 505 ------ drivers/net/atlx/atl1_hw.c | 720 --------- drivers/net/atlx/atl1_hw.h | 946 ----------- drivers/net/atlx/atl1_main.c | 2453 ---------------------------- drivers/net/atlx/atl1_param.c | 203 --- drivers/net/atlx/atlx.c | 433 +++++ drivers/net/atlx/atlx.h | 506 ++++++ 10 files changed, 4914 insertions(+), 4873 deletions(-) create mode 100644 drivers/net/atlx/atl1.c delete mode 100644 drivers/net/atlx/atl1_ethtool.c delete mode 100644 drivers/net/atlx/atl1_hw.c delete mode 100644 drivers/net/atlx/atl1_hw.h delete mode 100644 drivers/net/atlx/atl1_main.c delete mode 100644 drivers/net/atlx/atl1_param.c create mode 100644 drivers/net/atlx/atlx.c create mode 100644 drivers/net/atlx/atlx.h (limited to 'drivers/net/atlx') diff --git a/drivers/net/atlx/Makefile b/drivers/net/atlx/Makefile index a6b707e..ca45553 100644 --- a/drivers/net/atlx/Makefile +++ b/drivers/net/atlx/Makefile @@ -1,2 +1 @@ obj-$(CONFIG_ATL1) += atl1.o -atl1-y += atl1_main.o atl1_hw.o atl1_ethtool.o atl1_param.o diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c new file mode 100644 index 0000000..a84c97c --- /dev/null +++ b/drivers/net/atlx/atl1.c @@ -0,0 +1,3417 @@ +/* + * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. + * Copyright(c) 2006 - 2007 Chris Snook + * Copyright(c) 2006 Jay Cliburn + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * The full GNU General Public License is included in this distribution in the + * file called COPYING. + * + * Contact Information: + * Xiong Huang + * Attansic Technology Corp. 3F 147, Xianzheng 9th Road, Zhubei, + * Xinzhu 302, TAIWAN, REPUBLIC OF CHINA + * + * Chris Snook + * Jay Cliburn + * + * This version is adapted from the Attansic reference driver for + * inclusion in the Linux kernel. It is currently under heavy development. + * A very incomplete list of things that need to be dealt with: + * + * TODO: + * Fix TSO; tx performance is horrible with TSO enabled. + * Wake on LAN. + * Add more ethtool functions. + * Fix abstruse irq enable/disable condition described here: + * http://marc.theaimsgroup.com/?l=linux-netdev&m=116398508500553&w=2 + * + * NEEDS TESTING: + * VLAN + * multicast + * promiscuous mode + * interrupt coalescing + * SMP torture testing + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "atl1.h" + +/* Temporary hack for merging atl1 and atl2 */ +#include "atlx.c" + +/* + * atl1_pci_tbl - PCI Device ID Table + */ +static const struct pci_device_id atl1_pci_tbl[] = { + {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1)}, + /* required last entry */ + {0,} +}; +MODULE_DEVICE_TABLE(pci, atl1_pci_tbl); + +/* + * atl1_sw_init - Initialize general software structures (struct atl1_adapter) + * @adapter: board private structure to initialize + * + * atl1_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + */ +static int __devinit atl1_sw_init(struct atl1_adapter *adapter) +{ + struct atl1_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + + hw->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + hw->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; + + adapter->wol = 0; + adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7; + adapter->ict = 50000; /* 100ms */ + adapter->link_speed = SPEED_0; /* hardware init */ + adapter->link_duplex = FULL_DUPLEX; + + hw->phy_configured = false; + hw->preamble_len = 7; + hw->ipgt = 0x60; + hw->min_ifg = 0x50; + hw->ipgr1 = 0x40; + hw->ipgr2 = 0x60; + hw->max_retry = 0xf; + hw->lcol = 0x37; + hw->jam_ipg = 7; + hw->rfd_burst = 8; + hw->rrd_burst = 8; + hw->rfd_fetch_gap = 1; + hw->rx_jumbo_th = adapter->rx_buffer_len / 8; + hw->rx_jumbo_lkah = 1; + hw->rrd_ret_timer = 16; + hw->tpd_burst = 4; + hw->tpd_fetch_th = 16; + hw->txf_burst = 0x100; + hw->tx_jumbo_task_th = (hw->max_frame_size + 7) >> 3; + hw->tpd_fetch_gap = 1; + hw->rcb_value = atl1_rcb_64; + hw->dma_ord = atl1_dma_ord_enh; + hw->dmar_block = atl1_dma_req_256; + hw->dmaw_block = atl1_dma_req_256; + hw->cmb_rrd = 4; + hw->cmb_tpd = 4; + hw->cmb_rx_timer = 1; /* about 2us */ + hw->cmb_tx_timer = 1; /* about 2us */ + hw->smb_timer = 100000; /* about 200ms */ + + spin_lock_init(&adapter->lock); + spin_lock_init(&adapter->mb_lock); + + return 0; +} + +static int mdio_read(struct net_device *netdev, int phy_id, int reg_num) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + u16 result; + + atl1_read_phy_reg(&adapter->hw, reg_num & 0x1f, &result); + + return result; +} + +static void mdio_write(struct net_device *netdev, int phy_id, int reg_num, + int val) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + + atl1_write_phy_reg(&adapter->hw, reg_num, val); +} + +/* + * atl1_mii_ioctl - + * @netdev: + * @ifreq: + * @cmd: + */ +static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + unsigned long flags; + int retval; + + if (!netif_running(netdev)) + return -EINVAL; + + spin_lock_irqsave(&adapter->lock, flags); + retval = generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL); + spin_unlock_irqrestore(&adapter->lock, flags); + + return retval; +} + +/* + * atl1_setup_mem_resources - allocate Tx / RX descriptor resources + * @adapter: board private structure + * + * Return 0 on success, negative on failure + */ +s32 atl1_setup_ring_resources(struct atl1_adapter *adapter) +{ + struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; + struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; + struct atl1_ring_header *ring_header = &adapter->ring_header; + struct pci_dev *pdev = adapter->pdev; + int size; + u8 offset = 0; + + size = sizeof(struct atl1_buffer) * (tpd_ring->count + rfd_ring->count); + tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL); + if (unlikely(!tpd_ring->buffer_info)) { + dev_err(&pdev->dev, "kzalloc failed , size = D%d\n", size); + goto err_nomem; + } + rfd_ring->buffer_info = + (struct atl1_buffer *)(tpd_ring->buffer_info + tpd_ring->count); + + /* + * real ring DMA buffer + * each ring/block may need up to 8 bytes for alignment, hence the + * additional 40 bytes tacked onto the end. + */ + ring_header->size = size = + sizeof(struct tx_packet_desc) * tpd_ring->count + + sizeof(struct rx_free_desc) * rfd_ring->count + + sizeof(struct rx_return_desc) * rrd_ring->count + + sizeof(struct coals_msg_block) + + sizeof(struct stats_msg_block) + + 40; + + ring_header->desc = pci_alloc_consistent(pdev, ring_header->size, + &ring_header->dma); + if (unlikely(!ring_header->desc)) { + dev_err(&pdev->dev, "pci_alloc_consistent failed\n"); + goto err_nomem; + } + + memset(ring_header->desc, 0, ring_header->size); + + /* init TPD ring */ + tpd_ring->dma = ring_header->dma; + offset = (tpd_ring->dma & 0x7) ? (8 - (ring_header->dma & 0x7)) : 0; + tpd_ring->dma += offset; + tpd_ring->desc = (u8 *) ring_header->desc + offset; + tpd_ring->size = sizeof(struct tx_packet_desc) * tpd_ring->count; + + /* init RFD ring */ + rfd_ring->dma = tpd_ring->dma + tpd_ring->size; + offset = (rfd_ring->dma & 0x7) ? (8 - (rfd_ring->dma & 0x7)) : 0; + rfd_ring->dma += offset; + rfd_ring->desc = (u8 *) tpd_ring->desc + (tpd_ring->size + offset); + rfd_ring->size = sizeof(struct rx_free_desc) * rfd_ring->count; + + + /* init RRD ring */ + rrd_ring->dma = rfd_ring->dma + rfd_ring->size; + offset = (rrd_ring->dma & 0x7) ? (8 - (rrd_ring->dma & 0x7)) : 0; + rrd_ring->dma += offset; + rrd_ring->desc = (u8 *) rfd_ring->desc + (rfd_ring->size + offset); + rrd_ring->size = sizeof(struct rx_return_desc) * rrd_ring->count; + + + /* init CMB */ + adapter->cmb.dma = rrd_ring->dma + rrd_ring->size; + offset = (adapter->cmb.dma & 0x7) ? (8 - (adapter->cmb.dma & 0x7)) : 0; + adapter->cmb.dma += offset; + adapter->cmb.cmb = (struct coals_msg_block *) + ((u8 *) rrd_ring->desc + (rrd_ring->size + offset)); + + /* init SMB */ + adapter->smb.dma = adapter->cmb.dma + sizeof(struct coals_msg_block); + offset = (adapter->smb.dma & 0x7) ? (8 - (adapter->smb.dma & 0x7)) : 0; + adapter->smb.dma += offset; + adapter->smb.smb = (struct stats_msg_block *) + ((u8 *) adapter->cmb.cmb + + (sizeof(struct coals_msg_block) + offset)); + + return 0; + +err_nomem: + kfree(tpd_ring->buffer_info); + return -ENOMEM; +} + +static void atl1_init_ring_ptrs(struct atl1_adapter *adapter) +{ + struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; + struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; + + atomic_set(&tpd_ring->next_to_use, 0); + atomic_set(&tpd_ring->next_to_clean, 0); + + rfd_ring->next_to_clean = 0; + atomic_set(&rfd_ring->next_to_use, 0); + + rrd_ring->next_to_use = 0; + atomic_set(&rrd_ring->next_to_clean, 0); +} + +/* + * atl1_clean_rx_ring - Free RFD Buffers + * @adapter: board private structure + */ +static void atl1_clean_rx_ring(struct atl1_adapter *adapter) +{ + struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; + struct atl1_buffer *buffer_info; + struct pci_dev *pdev = adapter->pdev; + unsigned long size; + unsigned int i; + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rfd_ring->count; i++) { + buffer_info = &rfd_ring->buffer_info[i]; + if (buffer_info->dma) { + pci_unmap_page(pdev, buffer_info->dma, + buffer_info->length, PCI_DMA_FROMDEVICE); + buffer_info->dma = 0; + } + if (buffer_info->skb) { + dev_kfree_skb(buffer_info->skb); + buffer_info->skb = NULL; + } + } + + size = sizeof(struct atl1_buffer) * rfd_ring->count; + memset(rfd_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(rfd_ring->desc, 0, rfd_ring->size); + + rfd_ring->next_to_clean = 0; + atomic_set(&rfd_ring->next_to_use, 0); + + rrd_ring->next_to_use = 0; + atomic_set(&rrd_ring->next_to_clean, 0); +} + +/* + * atl1_clean_tx_ring - Free Tx Buffers + * @adapter: board private structure + */ +static void atl1_clean_tx_ring(struct atl1_adapter *adapter) +{ + struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; + struct atl1_buffer *buffer_info; + struct pci_dev *pdev = adapter->pdev; + unsigned long size; + unsigned int i; + + /* Free all the Tx ring sk_buffs */ + for (i = 0; i < tpd_ring->count; i++) { + buffer_info = &tpd_ring->buffer_info[i]; + if (buffer_info->dma) { + pci_unmap_page(pdev, buffer_info->dma, + buffer_info->length, PCI_DMA_TODEVICE); + buffer_info->dma = 0; + } + } + + for (i = 0; i < tpd_ring->count; i++) { + buffer_info = &tpd_ring->buffer_info[i]; + if (buffer_info->skb) { + dev_kfree_skb_any(buffer_info->skb); + buffer_info->skb = NULL; + } + } + + size = sizeof(struct atl1_buffer) * tpd_ring->count; + memset(tpd_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(tpd_ring->desc, 0, tpd_ring->size); + + atomic_set(&tpd_ring->next_to_use, 0); + atomic_set(&tpd_ring->next_to_clean, 0); +} + +/* + * atl1_free_ring_resources - Free Tx / RX descriptor Resources + * @adapter: board private structure + * + * Free all transmit software resources + */ +void atl1_free_ring_resources(struct atl1_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; + struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; + struct atl1_ring_header *ring_header = &adapter->ring_header; + + atl1_clean_tx_ring(adapter); + atl1_clean_rx_ring(adapter); + + kfree(tpd_ring->buffer_info); + pci_free_consistent(pdev, ring_header->size, ring_header->desc, + ring_header->dma); + + tpd_ring->buffer_info = NULL; + tpd_ring->desc = NULL; + tpd_ring->dma = 0; + + rfd_ring->buffer_info = NULL; + rfd_ring->desc = NULL; + rfd_ring->dma = 0; + + rrd_ring->desc = NULL; + rrd_ring->dma = 0; +} + +static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter) +{ + u32 value; + struct atl1_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + /* Config MAC CTRL Register */ + value = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN; + /* duplex */ + if (FULL_DUPLEX == adapter->link_duplex) + value |= MAC_CTRL_DUPLX; + /* speed */ + value |= ((u32) ((SPEED_1000 == adapter->link_speed) ? + MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) << + MAC_CTRL_SPEED_SHIFT); + /* flow control */ + value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW); + /* PAD & CRC */ + value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD); + /* preamble length */ + value |= (((u32) adapter->hw.preamble_len + & MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT); + /* vlan */ + if (adapter->vlgrp) + value |= MAC_CTRL_RMV_VLAN; + /* rx checksum + if (adapter->rx_csum) + value |= MAC_CTRL_RX_CHKSUM_EN; + */ + /* filter mode */ + value |= MAC_CTRL_BC_EN; + if (netdev->flags & IFF_PROMISC) + value |= MAC_CTRL_PROMIS_EN; + else if (netdev->flags & IFF_ALLMULTI) + value |= MAC_CTRL_MC_ALL_EN; + /* value |= MAC_CTRL_LOOPBACK; */ + iowrite32(value, hw->hw_addr + REG_MAC_CTRL); +} + +static u32 atl1_check_link(struct atl1_adapter *adapter) +{ + struct atl1_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 ret_val; + u16 speed, duplex, phy_data; + int reconfig = 0; + + /* MII_BMSR must read twice */ + atl1_read_phy_reg(hw, MII_BMSR, &phy_data); + atl1_read_phy_reg(hw, MII_BMSR, &phy_data); + if (!(phy_data & BMSR_LSTATUS)) { + /* link down */ + if (netif_carrier_ok(netdev)) { + /* old link state: Up */ + dev_info(&adapter->pdev->dev, "link is down\n"); + adapter->link_speed = SPEED_0; + netif_carrier_off(netdev); + netif_stop_queue(netdev); + } + return 0; + } + + /* Link Up */ + ret_val = atl1_get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) + return ret_val; + + switch (hw->media_type) { + case MEDIA_TYPE_1000M_FULL: + if (speed != SPEED_1000 || duplex != FULL_DUPLEX) + reconfig = 1; + break; + case MEDIA_TYPE_100M_FULL: + if (speed != SPEED_100 || duplex != FULL_DUPLEX) + reconfig = 1; + break; + case MEDIA_TYPE_100M_HALF: + if (speed != SPEED_100 || duplex != HALF_DUPLEX) + reconfig = 1; + break; + case MEDIA_TYPE_10M_FULL: + if (speed != SPEED_10 || duplex != FULL_DUPLEX) + reconfig = 1; + break; + case MEDIA_TYPE_10M_HALF: + if (speed != SPEED_10 || duplex != HALF_DUPLEX) + reconfig = 1; + break; + } + + /* link result is our setting */ + if (!reconfig) { + if (adapter->link_speed != speed + || adapter->link_duplex != duplex) { + adapter->link_speed = speed; + adapter->link_duplex = duplex; + atl1_setup_mac_ctrl(adapter); + dev_info(&adapter->pdev->dev, + "%s link is up %d Mbps %s\n", + netdev->name, adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? + "full duplex" : "half duplex"); + } + if (!netif_carrier_ok(netdev)) { + /* Link down -> Up */ + netif_carrier_on(netdev); + netif_wake_queue(netdev); + } + return 0; + } + + /* change original link status */ + if (netif_carrier_ok(netdev)) { + adapter->link_speed = SPEED_0; + netif_carrier_off(netdev); + netif_stop_queue(netdev); + } + + if (hw->media_type != MEDIA_TYPE_AUTO_SENSOR && + hw->media_type != MEDIA_TYPE_1000M_FULL) { + switch (hw->media_type) { + case MEDIA_TYPE_100M_FULL: + phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 | + MII_CR_RESET; + break; + case MEDIA_TYPE_100M_HALF: + phy_data = MII_CR_SPEED_100 | MII_CR_RESET; + break; + case MEDIA_TYPE_10M_FULL: + phy_data = + MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET; + break; + default: + /* MEDIA_TYPE_10M_HALF: */ + phy_data = MII_CR_SPEED_10 | MII_CR_RESET; + break; + } + atl1_write_phy_reg(hw, MII_BMCR, phy_data); + return 0; + } + + /* auto-neg, insert timer to re-config phy */ + if (!adapter->phy_timer_pending) { + adapter->phy_timer_pending = true; + mod_timer(&adapter->phy_config_timer, jiffies + 3 * HZ); + } + + return 0; +} + +/* + * atl1_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + */ +static int atl1_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + int old_mtu = netdev->mtu; + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + + if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || + (max_frame > MAX_JUMBO_FRAME_SIZE)) { + dev_warn(&adapter->pdev->dev, "invalid MTU setting\n"); + return -EINVAL; + } + + adapter->hw.max_frame_size = max_frame; + adapter->hw.tx_jumbo_task_th = (max_frame + 7) >> 3; + adapter->rx_buffer_len = (max_frame + 7) & ~7; + adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8; + + netdev->mtu = new_mtu; + if ((old_mtu != new_mtu) && netif_running(netdev)) { + atl1_down(adapter); + atl1_up(adapter); + } + + return 0; +} + +static void set_flow_ctrl_old(struct atl1_adapter *adapter) +{ + u32 hi, lo, value; + + /* RFD Flow Control */ + value = adapter->rfd_ring.count; + hi = value / 16; + if (hi < 2) + hi = 2; + lo = value * 7 / 8; + + value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) | + ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT); + iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RXF_PAUSE_THRESH); + + /* RRD Flow Control */ + value = adapter->rrd_ring.count; + lo = value / 16; + hi = value * 7 / 8; + if (lo < 2) + lo = 2; + value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) | + ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT); + iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RRD_PAUSE_THRESH); +} + +static void set_flow_ctrl_new(struct atl1_hw *hw) +{ + u32 hi, lo, value; + + /* RXF Flow Control */ + value = ioread32(hw->hw_addr + REG_SRAM_RXF_LEN); + lo = value / 16; + if (lo < 192) + lo = 192; + hi = value * 7 / 8; + if (hi < lo) + hi = lo + 16; + value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) | + ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT); + iowrite32(value, hw->hw_addr + REG_RXQ_RXF_PAUSE_THRESH); + + /* RRD Flow Control */ + value = ioread32(hw->hw_addr + REG_SRAM_RRD_LEN); + lo = value / 8; + hi = value * 7 / 8; + if (lo < 2) + lo = 2; + if (hi < lo) + hi = lo + 3; + value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) | + ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT); + iowrite32(value, hw->hw_addr + REG_RXQ_RRD_PAUSE_THRESH); +} + +/* + * atl1_configure - Configure Transmit&Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Tx /Rx unit of the MAC after a reset. + */ +static u32 atl1_configure(struct atl1_adapter *adapter) +{ + struct atl1_hw *hw = &adapter->hw; + u32 value; + + /* clear interrupt status */ + iowrite32(0xffffffff, adapter->hw.hw_addr + REG_ISR); + + /* set MAC Address */ + value = (((u32) hw->mac_addr[2]) << 24) | + (((u32) hw->mac_addr[3]) << 16) | + (((u32) hw->mac_addr[4]) << 8) | + (((u32) hw->mac_addr[5])); + iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR); + value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1])); + iowrite32(value, hw->hw_addr + (REG_MAC_STA_ADDR + 4)); + + /* tx / rx ring */ + + /* HI base address */ + iowrite32((u32) ((adapter->tpd_ring.dma & 0xffffffff00000000ULL) >> 32), + hw->hw_addr + REG_DESC_BASE_ADDR_HI); + /* LO base address */ + iowrite32((u32) (adapter->rfd_ring.dma & 0x00000000ffffffffULL), + hw->hw_addr + REG_DESC_RFD_ADDR_LO); + iowrite32((u32) (adapter->rrd_ring.dma & 0x00000000ffffffffULL), + hw->hw_addr + REG_DESC_RRD_ADDR_LO); + iowrite32((u32) (adapter->tpd_ring.dma & 0x00000000ffffffffULL), + hw->hw_addr + REG_DESC_TPD_ADDR_LO); + iowrite32((u32) (adapter->cmb.dma & 0x00000000ffffffffULL), + hw->hw_addr + REG_DESC_CMB_ADDR_LO); + iowrite32((u32) (adapter->smb.dma & 0x00000000ffffffffULL), + hw->hw_addr + REG_DESC_SMB_ADDR_LO); + + /* element count */ + value = adapter->rrd_ring.count; + value <<= 16; + value += adapter->rfd_ring.count; + iowrite32(value, hw->hw_addr + REG_DESC_RFD_RRD_RING_SIZE); + iowrite32(adapter->tpd_ring.count, hw->hw_addr + + REG_DESC_TPD_RING_SIZE); + + /* Load Ptr */ + iowrite32(1, hw->hw_addr + REG_LOAD_PTR); + + /* config Mailbox */ + value = ((atomic_read(&adapter->tpd_ring.next_to_use) + & MB_TPD_PROD_INDX_MASK) << MB_TPD_PROD_INDX_SHIFT) | + ((atomic_read(&adapter->rrd_ring.next_to_clean) + & MB_RRD_CONS_INDX_MASK) << MB_RRD_CONS_INDX_SHIFT) | + ((atomic_read(&adapter->rfd_ring.next_to_use) + & MB_RFD_PROD_INDX_MASK) << MB_RFD_PROD_INDX_SHIFT); + iowrite32(value, hw->hw_addr + REG_MAILBOX); + + /* config IPG/IFG */ + value = (((u32) hw->ipgt & MAC_IPG_IFG_IPGT_MASK) + << MAC_IPG_IFG_IPGT_SHIFT) | + (((u32) hw->min_ifg & MAC_IPG_IFG_MIFG_MASK) + << MAC_IPG_IFG_MIFG_SHIFT) | + (((u32) hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK) + << MAC_IPG_IFG_IPGR1_SHIFT) | + (((u32) hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK) + << MAC_IPG_IFG_IPGR2_SHIFT); + iowrite32(value, hw->hw_addr + REG_MAC_IPG_IFG); + + /* config Half-Duplex Control */ + value = ((u32) hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) | + (((u32) hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK) + << MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) | + MAC_HALF_DUPLX_CTRL_EXC_DEF_EN | + (0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) | + (((u32) hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK) + << MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT); + iowrite32(value, hw->hw_addr + REG_MAC_HALF_DUPLX_CTRL); + + /* set Interrupt Moderator Timer */ + iowrite16(adapter->imt, hw->hw_addr + REG_IRQ_MODU_TIMER_INIT); + iowrite32(MASTER_CTRL_ITIMER_EN, hw->hw_addr + REG_MASTER_CTRL); + + /* set Interrupt Clear Timer */ + iowrite16(adapter->ict, hw->hw_addr + REG_CMBDISDMA_TIMER); + + /* set max frame size hw will accept */ + iowrite32(hw->max_frame_size, hw->hw_addr + REG_MTU); + + /* jumbo size & rrd retirement timer */ + value = (((u32) hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK) + << RXQ_JMBOSZ_TH_SHIFT) | + (((u32) hw->rx_jumbo_lkah & RXQ_JMBO_LKAH_MASK) + << RXQ_JMBO_LKAH_SHIFT) | + (((u32) hw->rrd_ret_timer & RXQ_RRD_TIMER_MASK) + << RXQ_RRD_TIMER_SHIFT); + iowrite32(value, hw->hw_addr + REG_RXQ_JMBOSZ_RRDTIM); + + /* Flow Control */ + switch (hw->dev_rev) { + case 0x8001: + case 0x9001: + case 0x9002: + case 0x9003: + set_flow_ctrl_old(adapter); + break; + default: + set_flow_ctrl_new(hw); + break; + } + + /* config TXQ */ + value = (((u32) hw->tpd_burst & TXQ_CTRL_TPD_BURST_NUM_MASK) + << TXQ_CTRL_TPD_BURST_NUM_SHIFT) | + (((u32) hw->txf_burst & TXQ_CTRL_TXF_BURST_NUM_MASK) + << TXQ_CTRL_TXF_BURST_NUM_SHIFT) | + (((u32) hw->tpd_fetch_th & TXQ_CTRL_TPD_FETCH_TH_MASK) + << TXQ_CTRL_TPD_FETCH_TH_SHIFT) | TXQ_CTRL_ENH_MODE | + TXQ_CTRL_EN; + iowrite32(value, hw->hw_addr + REG_TXQ_CTRL); + + /* min tpd fetch gap & tx jumbo packet size threshold for taskoffload */ + value = (((u32) hw->tx_jumbo_task_th & TX_JUMBO_TASK_TH_MASK) + << TX_JUMBO_TASK_TH_SHIFT) | + (((u32) hw->tpd_fetch_gap & TX_TPD_MIN_IPG_MASK) + << TX_TPD_MIN_IPG_SHIFT); + iowrite32(value, hw->hw_addr + REG_TX_JUMBO_TASK_TH_TPD_IPG); + + /* config RXQ */ + value = (((u32) hw->rfd_burst & RXQ_CTRL_RFD_BURST_NUM_MASK) + << RXQ_CTRL_RFD_BURST_NUM_SHIFT) | + (((u32) hw->rrd_burst & RXQ_CTRL_RRD_BURST_THRESH_MASK) + << RXQ_CTRL_RRD_BURST_THRESH_SHIFT) | + (((u32) hw->rfd_fetch_gap & RXQ_CTRL_RFD_PREF_MIN_IPG_MASK) + << RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT) | RXQ_CTRL_CUT_THRU_EN | + RXQ_CTRL_EN; + iowrite32(value, hw->hw_addr + REG_RXQ_CTRL); + + /* config DMA Engine */ + value = ((((u32) hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK) + << DMA_CTRL_DMAR_BURST_LEN_SHIFT) | + ((((u32) hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK) + << DMA_CTRL_DMAW_BURST_LEN_SHIFT) | DMA_CTRL_DMAR_EN | + DMA_CTRL_DMAW_EN; + value |= (u32) hw->dma_ord; + if (atl1_rcb_128 == hw->rcb_value) + value |= DMA_CTRL_RCB_VALUE; + iowrite32(value, hw->hw_addr + REG_DMA_CTRL); + + /* config CMB / SMB */ + value = (hw->cmb_tpd > adapter->tpd_ring.count) ? + hw->cmb_tpd : adapter->tpd_ring.count; + value <<= 16; + value |= hw->cmb_rrd; + iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TH); + value = hw->cmb_rx_timer | ((u32) hw->cmb_tx_timer << 16); + iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TIMER); + iowrite32(hw->smb_timer, hw->hw_addr + REG_SMB_TIMER); + + /* --- enable CMB / SMB */ + value = CSMB_CTRL_CMB_EN | CSMB_CTRL_SMB_EN; + iowrite32(value, hw->hw_addr + REG_CSMB_CTRL); + + value = ioread32(adapter->hw.hw_addr + REG_ISR); + if (unlikely((value & ISR_PHY_LINKDOWN) != 0)) + value = 1; /* config failed */ + else + value = 0; + + /* clear all interrupt status */ + iowrite32(0x3fffffff, adapter->hw.hw_addr + REG_ISR); + iowrite32(0, adapter->hw.hw_addr + REG_ISR); + return value; +} + +/* + * atl1_pcie_patch - Patch for PCIE module + */ +static void atl1_pcie_patch(struct atl1_adapter *adapter) +{ + u32 value; + + /* much vendor magic here */ + value = 0x6500; + iowrite32(value, adapter->hw.hw_addr + 0x12FC); + /* pcie flow control mode change */ + value = ioread32(adapter->hw.hw_addr + 0x1008); + value |= 0x8000; + iowrite32(value, adapter->hw.hw_addr + 0x1008); +} + +/* + * When ACPI resume on some VIA MotherBoard, the Interrupt Disable bit/0x400 + * on PCI Command register is disable. + * The function enable this bit. + * Brackett, 2006/03/15 + */ +static void atl1_via_workaround(struct atl1_adapter *adapter) +{ + unsigned long value; + + value = ioread16(adapter->hw.hw_addr + PCI_COMMAND); + if (value & PCI_COMMAND_INTX_DISABLE) + value &= ~PCI_COMMAND_INTX_DISABLE; + iowrite32(value, adapter->hw.hw_addr + PCI_COMMAND); +} + +static void atl1_inc_smb(struct atl1_adapter *adapter) +{ + struct stats_msg_block *smb = adapter->smb.smb; + + /* Fill out the OS statistics structure */ + adapter->soft_stats.rx_packets += smb->rx_ok; + adapter->soft_stats.tx_packets += smb->tx_ok; + adapter->soft_stats.rx_bytes += smb->rx_byte_cnt; + adapter->soft_stats.tx_bytes += smb->tx_byte_cnt; + adapter->soft_stats.multicast += smb->rx_mcast; + adapter->soft_stats.collisions += (smb->tx_1_col + smb->tx_2_col * 2 + + smb->tx_late_col + smb->tx_abort_col * adapter->hw.max_retry); + + /* Rx Errors */ + adapter->soft_stats.rx_errors += (smb->rx_frag + smb->rx_fcs_err + + smb->rx_len_err + smb->rx_sz_ov + smb->rx_rxf_ov + + smb->rx_rrd_ov + smb->rx_align_err); + adapter->soft_stats.rx_fifo_errors += smb->rx_rxf_ov; + adapter->soft_stats.rx_length_errors += smb->rx_len_err; + adapter->soft_stats.rx_crc_errors += smb->rx_fcs_err; + adapter->soft_stats.rx_frame_errors += smb->rx_align_err; + adapter->soft_stats.rx_missed_errors += (smb->rx_rrd_ov + + smb->rx_rxf_ov); + + adapter->soft_stats.rx_pause += smb->rx_pause; + adapter->soft_stats.rx_rrd_ov += smb->rx_rrd_ov; + adapter->soft_stats.rx_trunc += smb->rx_sz_ov; + + /* Tx Errors */ + adapter->soft_stats.tx_errors += (smb->tx_late_col + + smb->tx_abort_col + smb->tx_underrun + smb->tx_trunc); + adapter->soft_stats.tx_fifo_errors += smb->tx_underrun; + adapter->soft_stats.tx_aborted_errors += smb->tx_abort_col; + adapter->soft_stats.tx_window_errors += smb->tx_late_col; + + adapter->soft_stats.excecol += smb->tx_abort_col; + adapter->soft_stats.deffer += smb->tx_defer; + adapter->soft_stats.scc += smb->tx_1_col; + adapter->soft_stats.mcc += smb->tx_2_col; + adapter->soft_stats.latecol += smb->tx_late_col; + adapter->soft_stats.tx_underun += smb->tx_underrun; + adapter->soft_stats.tx_trunc += smb->tx_trunc; + adapter->soft_stats.tx_pause += smb->tx_pause; + + adapter->net_stats.rx_packets = adapter->soft_stats.rx_packets; + adapter->net_stats.tx_packets = adapter->soft_stats.tx_packets; + adapter->net_stats.rx_bytes = adapter->soft_stats.rx_bytes; + adapter->net_stats.tx_bytes = adapter->soft_stats.tx_bytes; + adapter->net_stats.multicast = adapter->soft_stats.multicast; + adapter->net_stats.collisions = adapter->soft_stats.collisions; + adapter->net_stats.rx_errors = adapter->soft_stats.rx_errors; + adapter->net_stats.rx_over_errors = + adapter->soft_stats.rx_missed_errors; + adapter->net_stats.rx_length_errors = + adapter->soft_stats.rx_length_errors; + adapter->net_stats.rx_crc_errors = adapter->soft_stats.rx_crc_errors; + adapter->net_stats.rx_frame_errors = + adapter->soft_stats.rx_frame_errors; + adapter->net_stats.rx_fifo_errors = adapter->soft_stats.rx_fifo_errors; + adapter->net_stats.rx_missed_errors = + adapter->soft_stats.rx_missed_errors; + adapter->net_stats.tx_errors = adapter->soft_stats.tx_errors; + adapter->net_stats.tx_fifo_errors = adapter->soft_stats.tx_fifo_errors; + adapter->net_stats.tx_aborted_errors = + adapter->soft_stats.tx_aborted_errors; + adapter->net_stats.tx_window_errors = + adapter->soft_stats.tx_window_errors; + adapter->net_stats.tx_carrier_errors = + adapter->soft_stats.tx_carrier_errors; +} + +static void atl1_update_mailbox(struct atl1_adapter *adapter) +{ + unsigned long flags; + u32 tpd_next_to_use; + u32 rfd_next_to_use; + u32 rrd_next_to_clean; + u32 value; + + spin_lock_irqsave(&adapter->mb_lock, flags); + + tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use); + rfd_next_to_use = atomic_read(&adapter->rfd_ring.next_to_use); + rrd_next_to_clean = atomic_read(&adapter->rrd_ring.next_to_clean); + + value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) << + MB_RFD_PROD_INDX_SHIFT) | + ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) << + MB_RRD_CONS_INDX_SHIFT) | + ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) << + MB_TPD_PROD_INDX_SHIFT); + iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX); + + spin_unlock_irqrestore(&adapter->mb_lock, flags); +} + +static void atl1_clean_alloc_flag(struct atl1_adapter *adapter, + struct rx_return_desc *rrd, u16 offset) +{ + struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; + + while (rfd_ring->next_to_clean != (rrd->buf_indx + offset)) { + rfd_ring->buffer_info[rfd_ring->next_to_clean].alloced = 0; + if (++rfd_ring->next_to_clean == rfd_ring->count) { + rfd_ring->next_to_clean = 0; + } + } +} + +static void atl1_update_rfd_index(struct atl1_adapter *adapter, + struct rx_return_desc *rrd) +{ + u16 num_buf; + + num_buf = (rrd->xsz.xsum_sz.pkt_size + adapter->rx_buffer_len - 1) / + adapter->rx_buffer_len; + if (rrd->num_buf == num_buf) + /* clean alloc flag for bad rrd */ + atl1_clean_alloc_flag(adapter, rrd, num_buf); +} + +static void atl1_rx_checksum(struct atl1_adapter *adapter, + struct rx_return_desc *rrd, struct sk_buff *skb) +{ + struct pci_dev *pdev = adapter->pdev; + + skb->ip_summed = CHECKSUM_NONE; + + if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { + if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC | + ERR_FLAG_CODE | ERR_FLAG_OV)) { + adapter->hw_csum_err++; + dev_printk(KERN_DEBUG, &pdev->dev, + "rx checksum error\n"); + return; + } + } + + /* not IPv4 */ + if (!(rrd->pkt_flg & PACKET_FLAG_IPV4)) + /* checksum is invalid, but it's not an IPv4 pkt, so ok */ + return; + + /* IPv4 packet */ + if (likely(!(rrd->err_flg & + (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM)))) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + adapter->hw_csum_good++; + return; + } + + /* IPv4, but hardware thinks its checksum is wrong */ + dev_printk(KERN_DEBUG, &pdev->dev, + "hw csum wrong, pkt_flag:%x, err_flag:%x\n", + rrd->pkt_flg, rrd->err_flg); + skb->ip_summed = CHECKSUM_COMPLETE; + skb->csum = htons(rrd->xsz.xsum_sz.rx_chksum); + adapter->hw_csum_err++; + return; +} + +/* + * atl1_alloc_rx_buffers - Replace used receive buffers + * @adapter: address of board private structure + */ +static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter) +{ + struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct pci_dev *pdev = adapter->pdev; + struct page *page; + unsigned long offset; + struct atl1_buffer *buffer_info, *next_info; + struct sk_buff *skb; + u16 num_alloc = 0; + u16 rfd_next_to_use, next_next; + struct rx_free_desc *rfd_desc; + + next_next = rfd_next_to_use = atomic_read(&rfd_ring->next_to_use); + if (++next_next == rfd_ring->count) + next_next = 0; + buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; + next_info = &rfd_ring->buffer_info[next_next]; + + while (!buffer_info->alloced && !next_info->alloced) { + if (buffer_info->skb) { + buffer_info->alloced = 1; + goto next; + } + + rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use); + + skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN); + if (unlikely(!skb)) { + /* Better luck next round */ + adapter->net_stats.rx_dropped++; + break; + } + + /* + * Make buffer alignment 2 beyond a 16 byte boundary + * this will result in a 16 byte aligned IP header after + * the 14 byte MAC header is removed + */ + skb_reserve(skb, NET_IP_ALIGN); + + buffer_info->alloced = 1; + buffer_info->skb = skb; + buffer_info->length = (u16) adapter->rx_buffer_len; + page = virt_to_page(skb->data); + offset = (unsigned long)skb->data & ~PAGE_MASK; + buffer_info->dma = pci_map_page(pdev, page, offset, + adapter->rx_buffer_len, + PCI_DMA_FROMDEVICE); + rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len); + rfd_desc->coalese = 0; + +next: + rfd_next_to_use = next_next; + if (unlikely(++next_next == rfd_ring->count)) + next_next = 0; + + buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; + next_info = &rfd_ring->buffer_info[next_next]; + num_alloc++; + } + + if (num_alloc) { + /* + * Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + atomic_set(&rfd_ring->next_to_use, (int)rfd_next_to_use); + } + return num_alloc; +} + +static void atl1_intr_rx(struct atl1_adapter *adapter) +{ + int i, count; + u16 length; + u16 rrd_next_to_clean; + u32 value; + struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; + struct atl1_buffer *buffer_info; + struct rx_return_desc *rrd; + struct sk_buff *skb; + + count = 0; + + rrd_next_to_clean = atomic_read(&rrd_ring->next_to_clean); + + while (1) { + rrd = ATL1_RRD_DESC(rrd_ring, rrd_next_to_clean); + i = 1; + if (likely(rrd->xsz.valid)) { /* packet valid */ +chk_rrd: + /* check rrd status */ + if (likely(rrd->num_buf == 1)) + goto rrd_ok; + + /* rrd seems to be bad */ + if (unlikely(i-- > 0)) { + /* rrd may not be DMAed completely */ + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "incomplete RRD DMA transfer\n"); + udelay(1); + goto chk_rrd; + } + /* bad rrd */ + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "bad RRD\n"); + /* see if update RFD index */ + if (rrd->num_buf > 1) + atl1_update_rfd_index(adapter, rrd); + + /* update rrd */ + rrd->xsz.valid = 0; + if (++rrd_next_to_clean == rrd_ring->count) + rrd_next_to_clean = 0; + count++; + continue; + } else { /* current rrd still not be updated */ + + break; + } +rrd_ok: + /* clean alloc flag for bad rrd */ + atl1_clean_alloc_flag(adapter, rrd, 0); + + buffer_info = &rfd_ring->buffer_info[rrd->buf_indx]; + if (++rfd_ring->next_to_clean == rfd_ring->count) + rfd_ring->next_to_clean = 0; + + /* update rrd next to clean */ + if (++rrd_next_to_clean == rrd_ring->count) + rrd_next_to_clean = 0; + count++; + + if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { + if (!(rrd->err_flg & + (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM + | ERR_FLAG_LEN))) { + /* packet error, don't need upstream */ + buffer_info->alloced = 0; + rrd->xsz.valid = 0; + continue; + } + } + + /* Good Receive */ + pci_unmap_page(adapter->pdev, buffer_info->dma, + buffer_info->length, PCI_DMA_FROMDEVICE); + skb = buffer_info->skb; + length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size); + + skb_put(skb, length - ETH_FCS_LEN); + + /* Receive Checksum Offload */ + atl1_rx_checksum(adapter, rrd, skb); + skb->protocol = eth_type_trans(skb, adapter->netdev); + + if (adapter->vlgrp && (rrd->pkt_flg & PACKET_FLAG_VLAN_INS)) { + u16 vlan_tag = (rrd->vlan_tag >> 4) | + ((rrd->vlan_tag & 7) << 13) | + ((rrd->vlan_tag & 8) << 9); + vlan_hwaccel_rx(skb, adapter->vlgrp, vlan_tag); + } else + netif_rx(skb); + + /* let protocol layer free skb */ + buffer_info->skb = NULL; + buffer_info->alloced = 0; + rrd->xsz.valid = 0; + + adapter->netdev->last_rx = jiffies; + } + + atomic_set(&rrd_ring->next_to_clean, rrd_next_to_clean); + + atl1_alloc_rx_buffers(adapter); + + /* update mailbox ? */ + if (count) { + u32 tpd_next_to_use; + u32 rfd_next_to_use; + + spin_lock(&adapter->mb_lock); + + tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use); + rfd_next_to_use = + atomic_read(&adapter->rfd_ring.next_to_use); + rrd_next_to_clean = + atomic_read(&adapter->rrd_ring.next_to_clean); + value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) << + MB_RFD_PROD_INDX_SHIFT) | + ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) << + MB_RRD_CONS_INDX_SHIFT) | + ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) << + MB_TPD_PROD_INDX_SHIFT); + iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX); + spin_unlock(&adapter->mb_lock); + } +} + +static void atl1_intr_tx(struct atl1_adapter *adapter) +{ + struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; + struct atl1_buffer *buffer_info; + u16 sw_tpd_next_to_clean; + u16 cmb_tpd_next_to_clean; + + sw_tpd_next_to_clean = atomic_read(&tpd_ring->next_to_clean); + cmb_tpd_next_to_clean = le16_to_cpu(adapter->cmb.cmb->tpd_cons_idx); + + while (cmb_tpd_next_to_clean != sw_tpd_next_to_clean) { + struct tx_packet_desc *tpd; + + tpd = ATL1_TPD_DESC(tpd_ring, sw_tpd_next_to_clean); + buffer_info = &tpd_ring->buffer_info[sw_tpd_next_to_clean]; + if (buffer_info->dma) { + pci_unmap_page(adapter->pdev, buffer_info->dma, + buffer_info->length, PCI_DMA_TODEVICE); + buffer_info->dma = 0; + } + + if (buffer_info->skb) { + dev_kfree_skb_irq(buffer_info->skb); + buffer_info->skb = NULL; + } + tpd->buffer_addr = 0; + tpd->desc.data = 0; + + if (++sw_tpd_next_to_clean == tpd_ring->count) + sw_tpd_next_to_clean = 0; + } + atomic_set(&tpd_ring->next_to_clean, sw_tpd_next_to_clean); + + if (netif_queue_stopped(adapter->netdev) + && netif_carrier_ok(adapter->netdev)) + netif_wake_queue(adapter->netdev); +} + +static u16 atl1_tpd_avail(struct atl1_tpd_ring *tpd_ring) +{ + u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean); + u16 next_to_use = atomic_read(&tpd_ring->next_to_use); + return ((next_to_clean > next_to_use) ? + next_to_clean - next_to_use - 1 : + tpd_ring->count + next_to_clean - next_to_use - 1); +} + +static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb, + struct tso_param *tso) +{ + /* We enter this function holding a spinlock. */ + u8 ipofst; + int err; + + if (skb_shinfo(skb)->gso_size) { + if (skb_header_cloned(skb)) { + err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + if (unlikely(err)) + return err; + } + + if (skb->protocol == ntohs(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + + iph->tot_len = 0; + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, IPPROTO_TCP, 0); + ipofst = skb_network_offset(skb); + if (ipofst != ETH_HLEN) /* 802.3 frame */ + tso->tsopl |= 1 << TSO_PARAM_ETHTYPE_SHIFT; + + tso->tsopl |= (iph->ihl & + CSUM_PARAM_IPHL_MASK) << CSUM_PARAM_IPHL_SHIFT; + tso->tsopl |= (tcp_hdrlen(skb) & + TSO_PARAM_TCPHDRLEN_MASK) << + TSO_PARAM_TCPHDRLEN_SHIFT; + tso->tsopl |= (skb_shinfo(skb)->gso_size & + TSO_PARAM_MSS_MASK) << TSO_PARAM_MSS_SHIFT; + tso->tsopl |= 1 << TSO_PARAM_IPCKSUM_SHIFT; + tso->tsopl |= 1 << TSO_PARAM_TCPCKSUM_SHIFT; + tso->tsopl |= 1 << TSO_PARAM_SEGMENT_SHIFT; + return true; + } + } + return false; +} + +static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb, + struct csum_param *csum) +{ + u8 css, cso; + + if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { + cso = skb_transport_offset(skb); + css = cso + skb->csum_offset; + if (unlikely(cso & 0x1)) { + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "payload offset not an even number\n"); + return -1; + } + csum->csumpl |= (cso & CSUM_PARAM_PLOADOFFSET_MASK) << + CSUM_PARAM_PLOADOFFSET_SHIFT; + csum->csumpl |= (css & CSUM_PARAM_XSUMOFFSET_MASK) << + CSUM_PARAM_XSUMOFFSET_SHIFT; + csum->csumpl |= 1 << CSUM_PARAM_CUSTOMCKSUM_SHIFT; + return true; + } + + return true; +} + +static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, + bool tcp_seg) +{ + /* We enter this function holding a spinlock. */ + struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; + struct atl1_buffer *buffer_info; + struct page *page; + int first_buf_len = skb->len; + unsigned long offset; + unsigned int nr_frags; + unsigned int f; + u16 tpd_next_to_use; + u16 proto_hdr_len; + u16 len12; + + first_buf_len -= skb->data_len; + nr_frags = skb_shinfo(skb)->nr_frags; + tpd_next_to_use = atomic_read(&tpd_ring->next_to_use); + buffer_info = &tpd_ring->buffer_info[tpd_next_to_use]; + if (unlikely(buffer_info->skb)) + BUG(); + /* put skb in last TPD */ + buffer_info->skb = NULL; + + if (tcp_seg) { + /* TSO/GSO */ + proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + buffer_info->length = proto_hdr_len; + page = virt_to_page(skb->data); + offset = (unsigned long)skb->data & ~PAGE_MASK; + buffer_info->dma = pci_map_page(adapter->pdev, page, + offset, proto_hdr_len, + PCI_DMA_TODEVICE); + + if (++tpd_next_to_use == tpd_ring->count) + tpd_next_to_use = 0; + + if (first_buf_len > proto_hdr_len) { + int i, m; + + len12 = first_buf_len - proto_hdr_len; + m = (len12 + ATL1_MAX_TX_BUF_LEN - 1) / + ATL1_MAX_TX_BUF_LEN; + for (i = 0; i < m; i++) { + buffer_info = + &tpd_ring->buffer_info[tpd_next_to_use]; + buffer_info->skb = NULL; + buffer_info->length = + (ATL1_MAX_TX_BUF_LEN >= + len12) ? ATL1_MAX_TX_BUF_LEN : len12; + len12 -= buffer_info->length; + page = virt_to_page(skb->data + + (proto_hdr_len + + i * ATL1_MAX_TX_BUF_LEN)); + offset = (unsigned long)(skb->data + + (proto_hdr_len + + i * ATL1_MAX_TX_BUF_LEN)) & ~PAGE_MASK; + buffer_info->dma = pci_map_page(adapter->pdev, + page, offset, buffer_info->length, + PCI_DMA_TODEVICE); + if (++tpd_next_to_use == tpd_ring->count) + tpd_next_to_use = 0; + } + } + } else { + /* not TSO/GSO */ + buffer_info->length = first_buf_len; + page = virt_to_page(skb->data); + offset = (unsigned long)skb->data & ~PAGE_MASK; + buffer_info->dma = pci_map_page(adapter->pdev, page, + offset, first_buf_len, PCI_DMA_TODEVICE); + if (++tpd_next_to_use == tpd_ring->count) + tpd_next_to_use = 0; + } + + for (f = 0; f < nr_frags; f++) { + struct skb_frag_struct *frag; + u16 lenf, i, m; + + frag = &skb_shinfo(skb)->frags[f]; + lenf = frag->size; + + m = (lenf + ATL1_MAX_TX_BUF_LEN - 1) / ATL1_MAX_TX_BUF_LEN; + for (i = 0; i < m; i++) { + buffer_info = &tpd_ring->buffer_info[tpd_next_to_use]; + if (unlikely(buffer_info->skb)) + BUG(); + buffer_info->skb = NULL; + buffer_info->length = (lenf > ATL1_MAX_TX_BUF_LEN) ? + ATL1_MAX_TX_BUF_LEN : lenf; + lenf -= buffer_info->length; + buffer_info->dma = pci_map_page(adapter->pdev, + frag->page, + frag->page_offset + (i * ATL1_MAX_TX_BUF_LEN), + buffer_info->length, PCI_DMA_TODEVICE); + + if (++tpd_next_to_use == tpd_ring->count) + tpd_next_to_use = 0; + } + } + + /* last tpd's buffer-info */ + buffer_info->skb = skb; +} + +static void atl1_tx_queue(struct atl1_adapter *adapter, int count, + union tpd_descr *descr) +{ + /* We enter this function holding a spinlock. */ + struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; + int j; + u32 val; + struct atl1_buffer *buffer_info; + struct tx_packet_desc *tpd; + u16 tpd_next_to_use = atomic_read(&tpd_ring->next_to_use); + + for (j = 0; j < count; j++) { + buffer_info = &tpd_ring->buffer_info[tpd_next_to_use]; + tpd = ATL1_TPD_DESC(&adapter->tpd_ring, tpd_next_to_use); + tpd->desc.csum.csumpu = descr->csum.csumpu; + tpd->desc.csum.csumpl = descr->csum.csumpl; + tpd->desc.tso.tsopu = descr->tso.tsopu; + tpd->desc.tso.tsopl = descr->tso.tsopl; + tpd->buffer_addr = cpu_to_le64(buffer_info->dma); + tpd->desc.data = descr->data; + tpd->desc.csum.csumpu |= (cpu_to_le16(buffer_info->length) & + CSUM_PARAM_BUFLEN_MASK) << CSUM_PARAM_BUFLEN_SHIFT; + + val = (descr->tso.tsopl >> TSO_PARAM_SEGMENT_SHIFT) & + TSO_PARAM_SEGMENT_MASK; + if (val && !j) + tpd->desc.tso.tsopl |= 1 << TSO_PARAM_HDRFLAG_SHIFT; + + if (j == (count - 1)) + tpd->desc.csum.csumpl |= 1 << CSUM_PARAM_EOP_SHIFT; + + if (++tpd_next_to_use == tpd_ring->count) + tpd_next_to_use = 0; + } + /* + * Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + + atomic_set(&tpd_ring->next_to_use, (int)tpd_next_to_use); +} + +static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + int len = skb->len; + int tso; + int count = 1; + int ret_val; + u32 val; + union tpd_descr param; + u16 frag_size; + u16 vlan_tag; + unsigned long flags; + unsigned int nr_frags = 0; + unsigned int mss = 0; + unsigned int f; + unsigned int proto_hdr_len; + + len -= skb->data_len; + + if (unlikely(skb->len == 0)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + param.data = 0; + param.tso.tsopu = 0; + param.tso.tsopl = 0; + param.csum.csumpu = 0; + param.csum.csumpl = 0; + + /* nr_frags will be nonzero if we're doing scatter/gather (SG) */ + nr_frags = skb_shinfo(skb)->nr_frags; + for (f = 0; f < nr_frags; f++) { + frag_size = skb_shinfo(skb)->frags[f].size; + if (frag_size) + count += (frag_size + ATL1_MAX_TX_BUF_LEN - 1) / + ATL1_MAX_TX_BUF_LEN; + } + + /* mss will be nonzero if we're doing segment offload (TSO/GSO) */ + mss = skb_shinfo(skb)->gso_size; + if (mss) { + if (skb->protocol == htons(ETH_P_IP)) { + proto_hdr_len = (skb_transport_offset(skb) + + tcp_hdrlen(skb)); + if (unlikely(proto_hdr_len > len)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + /* need additional TPD ? */ + if (proto_hdr_len != len) + count += (len - proto_hdr_len + + ATL1_MAX_TX_BUF_LEN - 1) / + ATL1_MAX_TX_BUF_LEN; + } + } + + if (!spin_trylock_irqsave(&adapter->lock, flags)) { + /* Can't get lock - tell upper layer to requeue */ + dev_printk(KERN_DEBUG, &adapter->pdev->dev, "tx locked\n"); + return NETDEV_TX_LOCKED; + } + + if (atl1_tpd_avail(&adapter->tpd_ring) < count) { + /* not enough descriptors */ + netif_stop_queue(netdev); + spin_unlock_irqrestore(&adapter->lock, flags); + dev_printk(KERN_DEBUG, &adapter->pdev->dev, "tx busy\n"); + return NETDEV_TX_BUSY; + } + + param.data = 0; + + if (adapter->vlgrp && vlan_tx_tag_present(skb)) { + vlan_tag = vlan_tx_tag_get(skb); + vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) | + ((vlan_tag >> 9) & 0x8); + param.csum.csumpl |= 1 << CSUM_PARAM_INSVLAG_SHIFT; + param.csum.csumpu |= (vlan_tag & CSUM_PARAM_VALANTAG_MASK) << + CSUM_PARAM_VALAN_SHIFT; + } + + tso = atl1_tso(adapter, skb, ¶m.tso); + if (tso < 0) { + spin_unlock_irqrestore(&adapter->lock, flags); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (!tso) { + ret_val = atl1_tx_csum(adapter, skb, ¶m.csum); + if (ret_val < 0) { + spin_unlock_irqrestore(&adapter->lock, flags); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + } + + val = (param.csum.csumpl >> CSUM_PARAM_SEGMENT_SHIFT) & + CSUM_PARAM_SEGMENT_MASK; + atl1_tx_map(adapter, skb, 1 == val); + atl1_tx_queue(adapter, count, ¶m); + netdev->trans_start = jiffies; + spin_unlock_irqrestore(&adapter->lock, flags); + atl1_update_mailbox(adapter); + return NETDEV_TX_OK; +} + +/* + * atl1_intr - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + * @pt_regs: CPU registers structure + */ +static irqreturn_t atl1_intr(int irq, void *data) +{ + struct atl1_adapter *adapter = netdev_priv(data); + u32 status; + u8 update_rx; + int max_ints = 10; + + status = adapter->cmb.cmb->int_stats; + if (!status) + return IRQ_NONE; + + update_rx = 0; + + do { + /* clear CMB interrupt status at once */ + adapter->cmb.cmb->int_stats = 0; + + if (status & ISR_GPHY) /* clear phy status */ + atlx_clear_phy_int(adapter); + + /* clear ISR status, and Enable CMB DMA/Disable Interrupt */ + iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR); + + /* check if SMB intr */ + if (status & ISR_SMB) + atl1_inc_smb(adapter); + + /* check if PCIE PHY Link down */ + if (status & ISR_PHY_LINKDOWN) { + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "pcie phy link down %x\n", status); + if (netif_running(adapter->netdev)) { /* reset MAC */ + iowrite32(0, adapter->hw.hw_addr + REG_IMR); + schedule_work(&adapter->pcie_dma_to_rst_task); + return IRQ_HANDLED; + } + } + + /* check if DMA read/write error ? */ + if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) { + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "pcie DMA r/w error (status = 0x%x)\n", + status); + iowrite32(0, adapter->hw.hw_addr + REG_IMR); + schedule_work(&adapter->pcie_dma_to_rst_task); + return IRQ_HANDLED; + } + + /* link event */ + if (status & ISR_GPHY) { + adapter->soft_stats.tx_carrier_errors++; + atl1_check_for_link(adapter); + } + + /* transmit event */ + if (status & ISR_CMB_TX) + atl1_intr_tx(adapter); + + /* rx exception */ + if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN | + ISR_RRD_OV | ISR_HOST_RFD_UNRUN | + ISR_HOST_RRD_OV | ISR_CMB_RX))) { + if (status & (ISR_RXF_OV | ISR_RFD_UNRUN | + ISR_RRD_OV | ISR_HOST_RFD_UNRUN | + ISR_HOST_RRD_OV)) + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "rx exception, ISR = 0x%x\n", status); + atl1_intr_rx(adapter); + } + + if (--max_ints < 0) + break; + + } while ((status = adapter->cmb.cmb->int_stats)); + + /* re-enable Interrupt */ + iowrite32(ISR_DIS_SMB | ISR_DIS_DMA, adapter->hw.hw_addr + REG_ISR); + return IRQ_HANDLED; +} + +/* + * atl1_watchdog - Timer Call-back + * @data: pointer to netdev cast into an unsigned long + */ +static void atl1_watchdog(unsigned long data) +{ + struct atl1_adapter *adapter = (struct atl1_adapter *)data; + + /* Reset the timer */ + mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); +} + +/* + * atl1_phy_config - Timer Call-back + * @data: pointer to netdev cast into an unsigned long + */ +static void atl1_phy_config(unsigned long data) +{ + struct atl1_adapter *adapter = (struct atl1_adapter *)data; + struct atl1_hw *hw = &adapter->hw; + unsigned long flags; + + spin_lock_irqsave(&adapter->lock, flags); + adapter->phy_timer_pending = false; + atl1_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg); + atl1_write_phy_reg(hw, MII_ATLX_CR, hw->mii_1000t_ctrl_reg); + atl1_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN); + spin_unlock_irqrestore(&adapter->lock, flags); +} + +/* + * Orphaned vendor comment left intact here: + * + * If TPD Buffer size equal to 0, PCIE DMAR_TO_INT + * will assert. We do soft reset <0x1400=1> according + * with the SPEC. BUT, it seemes that PCIE or DMA + * state-machine will not be reset. DMAR_TO_INT will + * assert again and again. + * + */ +static void atl1_tx_timeout_task(struct work_struct *work) +{ + struct atl1_adapter *adapter = + container_of(work, struct atl1_adapter, tx_timeout_task); + struct net_device *netdev = adapter->netdev; + + netif_device_detach(netdev); + atl1_down(adapter); + atl1_up(adapter); + netif_device_attach(netdev); +} + +int atl1_reset(struct atl1_adapter *adapter) +{ + int ret; + ret = atl1_reset_hw(&adapter->hw); + if (ret) + return ret; + return atl1_init_hw(&adapter->hw); +} + +s32 atl1_up(struct atl1_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err; + int irq_flags = IRQF_SAMPLE_RANDOM; + + /* hardware has been reset, we need to reload some things */ + atlx_set_multi(netdev); + atl1_init_ring_ptrs(adapter); + atlx_restore_vlan(adapter); + err = atl1_alloc_rx_buffers(adapter); + if (unlikely(!err)) + /* no RX BUFFER allocated */ + return -ENOMEM; + + if (unlikely(atl1_configure(adapter))) { + err = -EIO; + goto err_up; + } + + err = pci_enable_msi(adapter->pdev); + if (err) { + dev_info(&adapter->pdev->dev, + "Unable to enable MSI: %d\n", err); + irq_flags |= IRQF_SHARED; + } + + err = request_irq(adapter->pdev->irq, &atl1_intr, irq_flags, + netdev->name, netdev); + if (unlikely(err)) + goto err_up; + + mod_timer(&adapter->watchdog_timer, jiffies); + atlx_irq_enable(adapter); + atl1_check_link(adapter); + return 0; + +err_up: + pci_disable_msi(adapter->pdev); + /* free rx_buffers */ + atl1_clean_rx_ring(adapter); + return err; +} + +void atl1_down(struct atl1_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_config_timer); + adapter->phy_timer_pending = false; + + atlx_irq_disable(adapter); + free_irq(adapter->pdev->irq, netdev); + pci_disable_msi(adapter->pdev); + atl1_reset_hw(&adapter->hw); + adapter->cmb.cmb->int_stats = 0; + + adapter->link_speed = SPEED_0; + adapter->link_duplex = -1; + netif_carrier_off(netdev); + netif_stop_queue(netdev); + + atl1_clean_tx_ring(adapter); + atl1_clean_rx_ring(adapter); +} + +/* + * atl1_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + */ +static int atl1_open(struct net_device *netdev) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + int err; + + /* allocate transmit descriptors */ + err = atl1_setup_ring_resources(adapter); + if (err) + return err; + + err = atl1_up(adapter); + if (err) + goto err_up; + + return 0; + +err_up: + atl1_reset(adapter); + return err; +} + +/* + * atl1_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + */ +static int atl1_close(struct net_device *netdev) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + atl1_down(adapter); + atl1_free_ring_resources(adapter); + return 0; +} + +#ifdef CONFIG_PM +static int atl1_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_hw *hw = &adapter->hw; + u32 ctrl = 0; + u32 wufc = adapter->wol; + + netif_device_detach(netdev); + if (netif_running(netdev)) + atl1_down(adapter); + + atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); + atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); + if (ctrl & BMSR_LSTATUS) + wufc &= ~ATLX_WUFC_LNKC; + + /* reduce speed to 10/100M */ + if (wufc) { + atl1_phy_enter_power_saving(hw); + /* if resume, let driver to re- setup link */ + hw->phy_configured = false; + atl1_set_mac_addr(hw); + atlx_set_multi(netdev); + + ctrl = 0; + /* turn on magic packet wol */ + if (wufc & ATLX_WUFC_MAG) + ctrl = WOL_MAGIC_EN | WOL_MAGIC_PME_EN; + + /* turn on Link change WOL */ + if (wufc & ATLX_WUFC_LNKC) + ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN); + iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL); + + /* turn on all-multi mode if wake on multicast is enabled */ + ctrl = ioread32(hw->hw_addr + REG_MAC_CTRL); + ctrl &= ~MAC_CTRL_DBG; + ctrl &= ~MAC_CTRL_PROMIS_EN; + if (wufc & ATLX_WUFC_MC) + ctrl |= MAC_CTRL_MC_ALL_EN; + else + ctrl &= ~MAC_CTRL_MC_ALL_EN; + + /* turn on broadcast mode if wake on-BC is enabled */ + if (wufc & ATLX_WUFC_BC) + ctrl |= MAC_CTRL_BC_EN; + else + ctrl &= ~MAC_CTRL_BC_EN; + + /* enable RX */ + ctrl |= MAC_CTRL_RX_EN; + iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL); + pci_enable_wake(pdev, PCI_D3hot, 1); + pci_enable_wake(pdev, PCI_D3cold, 1); + } else { + iowrite32(0, hw->hw_addr + REG_WOL_CTRL); + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + } + + pci_save_state(pdev); + pci_disable_device(pdev); + + pci_set_power_state(pdev, PCI_D3hot); + + return 0; +} + +static int atl1_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1_adapter *adapter = netdev_priv(netdev); + u32 err; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + + /* FIXME: check and handle */ + err = pci_enable_device(pdev); + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL); + atl1_reset(adapter); + + if (netif_running(netdev)) + atl1_up(adapter); + netif_device_attach(netdev); + + atl1_via_workaround(adapter); + + return 0; +} +#else +#define atl1_suspend NULL +#define atl1_resume NULL +#endif + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void atl1_poll_controller(struct net_device *netdev) +{ + disable_irq(netdev->irq); + atl1_intr(netdev->irq, netdev); + enable_irq(netdev->irq); +} +#endif + +/* + * atl1_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in atl1_pci_tbl + * + * Returns 0 on success, negative on failure + * + * atl1_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + */ +static int __devinit atl1_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct atl1_adapter *adapter; + static int cards_found = 0; + int err; + + err = pci_enable_device(pdev); + if (err) + return err; + + /* + * The atl1 chip can DMA to 64-bit addresses, but it uses a single + * shared register for the high 32 bits, so only a single, aligned, + * 4 GB physical address range can be used at a time. + * + * Supporting 64-bit DMA on this hardware is more trouble than it's + * worth. It is far easier to limit to 32-bit DMA than update + * various kernel subsystems to support the mechanics required by a + * fixed-high-32-bit system. + */ + err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + if (err) { + dev_err(&pdev->dev, "no usable DMA configuration\n"); + goto err_dma; + } + /* + * Mark all PCI regions associated with PCI device + * pdev as being reserved by owner atl1_driver_name + */ + err = pci_request_regions(pdev, ATLX_DRIVER_NAME); + if (err) + goto err_request_regions; + + /* + * Enables bus-mastering on the device and calls + * pcibios_set_master to do the needed arch specific settings + */ + pci_set_master(pdev); + + netdev = alloc_etherdev(sizeof(struct atl1_adapter)); + if (!netdev) { + err = -ENOMEM; + goto err_alloc_etherdev; + } + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->hw.back = adapter; + + adapter->hw.hw_addr = pci_iomap(pdev, 0, 0); + if (!adapter->hw.hw_addr) { + err = -EIO; + goto err_pci_iomap; + } + /* get device revision number */ + adapter->hw.dev_rev = ioread16(adapter->hw.hw_addr + + (REG_MASTER_CTRL + 2)); + dev_info(&pdev->dev, "version %s\n", ATLX_DRIVER_VERSION); + + /* set default ring resource counts */ + adapter->rfd_ring.count = adapter->rrd_ring.count = ATL1_DEFAULT_RFD; + adapter->tpd_ring.count = ATL1_DEFAULT_TPD; + + adapter->mii.dev = netdev; + adapter->mii.mdio_read = mdio_read; + adapter->mii.mdio_write = mdio_write; + adapter->mii.phy_id_mask = 0x1f; + adapter->mii.reg_num_mask = 0x1f; + + netdev->open = &atl1_open; + netdev->stop = &atl1_close; + netdev->hard_start_xmit = &atl1_xmit_frame; + netdev->get_stats = &atlx_get_stats; + netdev->set_multicast_list = &atlx_set_multi; + netdev->set_mac_address = &atl1_set_mac; + netdev->change_mtu = &atl1_change_mtu; + netdev->do_ioctl = &atlx_ioctl; + netdev->tx_timeout = &atlx_tx_timeout; + netdev->watchdog_timeo = 5 * HZ; +#ifdef CONFIG_NET_POLL_CONTROLLER + netdev->poll_controller = atl1_poll_controller; +#endif + netdev->vlan_rx_register = atlx_vlan_rx_register; + + netdev->ethtool_ops = &atl1_ethtool_ops; + adapter->bd_number = cards_found; + + /* setup the private structure */ + err = atl1_sw_init(adapter); + if (err) + goto err_common; + + netdev->features = NETIF_F_HW_CSUM; + netdev->features |= NETIF_F_SG; + netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX); + + /* + * FIXME - Until tso performance gets fixed, disable the feature. + * Enable it with ethtool -K if desired. + */ + /* netdev->features |= NETIF_F_TSO; */ + + netdev->features |= NETIF_F_LLTX; + + /* + * patch for some L1 of old version, + * the final version of L1 may not need these + * patches + */ + /* atl1_pcie_patch(adapter); */ + + /* really reset GPHY core */ + iowrite16(0, adapter->hw.hw_addr + REG_PHY_ENABLE); + + /* + * reset the controller to + * put the device in a known good starting state + */ + if (atl1_reset_hw(&adapter->hw)) { + err = -EIO; + goto err_common; + } + + /* copy the MAC address out of the EEPROM */ + atl1_read_mac_addr(&adapter->hw); + memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); + + if (!is_valid_ether_addr(netdev->dev_addr)) { + err = -EIO; + goto err_common; + } + + atl1_check_options(adapter); + + /* pre-init the MAC, and setup link */ + err = atl1_init_hw(&adapter->hw); + if (err) { + err = -EIO; + goto err_common; + } + + atl1_pcie_patch(adapter); + /* assume we have no link for now */ + netif_carrier_off(netdev); + netif_stop_queue(netdev); + + init_timer(&adapter->watchdog_timer); + adapter->watchdog_timer.function = &atl1_watchdog; + adapter->watchdog_timer.data = (unsigned long)adapter; + + init_timer(&adapter->phy_config_timer); + adapter->phy_config_timer.function = &atl1_phy_config; + adapter->phy_config_timer.data = (unsigned long)adapter; + adapter->phy_timer_pending = false; + + INIT_WORK(&adapter->tx_timeout_task, atl1_tx_timeout_task); + + INIT_WORK(&adapter->link_chg_task, atlx_link_chg_task); + + INIT_WORK(&adapter->pcie_dma_to_rst_task, atl1_tx_timeout_task); + + err = register_netdev(netdev); + if (err) + goto err_common; + + cards_found++; + atl1_via_workaround(adapter); + return 0; + +err_common: + pci_iounmap(pdev, adapter->hw.hw_addr); +err_pci_iomap: + free_netdev(netdev); +err_alloc_etherdev: + pci_release_regions(pdev); +err_dma: +err_request_regions: + pci_disable_device(pdev); + return err; +} + +/* + * atl1_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * atl1_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + */ +static void __devexit atl1_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1_adapter *adapter; + /* Device not available. Return. */ + if (!netdev) + return; + + adapter = netdev_priv(netdev); + + /* + * Some atl1 boards lack persistent storage for their MAC, and get it + * from the BIOS during POST. If we've been messing with the MAC + * address, we need to save the permanent one. + */ + if (memcmp(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, ETH_ALEN)) { + memcpy(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, + ETH_ALEN); + atl1_set_mac_addr(&adapter->hw); + } + + iowrite16(0, adapter->hw.hw_addr + REG_PHY_ENABLE); + unregister_netdev(netdev); + pci_iounmap(pdev, adapter->hw.hw_addr); + pci_release_regions(pdev); + free_netdev(netdev); + pci_disable_device(pdev); +} + +static struct pci_driver atl1_driver = { + .name = ATLX_DRIVER_NAME, + .id_table = atl1_pci_tbl, + .probe = atl1_probe, + .remove = __devexit_p(atl1_remove), + .suspend = atl1_suspend, + .resume = atl1_resume +}; + +/* + * atl1_exit_module - Driver Exit Cleanup Routine + * + * atl1_exit_module is called just before the driver is removed + * from memory. + */ +static void __exit atl1_exit_module(void) +{ + pci_unregister_driver(&atl1_driver); +} + +/* + * atl1_init_module - Driver Registration Routine + * + * atl1_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + */ +static int __init atl1_init_module(void) +{ + return pci_register_driver(&atl1_driver); +} + +module_init(atl1_init_module); +module_exit(atl1_exit_module); + +struct atl1_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define ATL1_STAT(m) \ + sizeof(((struct atl1_adapter *)0)->m), offsetof(struct atl1_adapter, m) + +static struct atl1_stats atl1_gstrings_stats[] = { + {"rx_packets", ATL1_STAT(soft_stats.rx_packets)}, + {"tx_packets", ATL1_STAT(soft_stats.tx_packets)}, + {"rx_bytes", ATL1_STAT(soft_stats.rx_bytes)}, + {"tx_bytes", ATL1_STAT(soft_stats.tx_bytes)}, + {"rx_errors", ATL1_STAT(soft_stats.rx_errors)}, + {"tx_errors", ATL1_STAT(soft_stats.tx_errors)}, + {"rx_dropped", ATL1_STAT(net_stats.rx_dropped)}, + {"tx_dropped", ATL1_STAT(net_stats.tx_dropped)}, + {"multicast", ATL1_STAT(soft_stats.multicast)}, + {"collisions", ATL1_STAT(soft_stats.collisions)}, + {"rx_length_errors", ATL1_STAT(soft_stats.rx_length_errors)}, + {"rx_over_errors", ATL1_STAT(soft_stats.rx_missed_errors)}, + {"rx_crc_errors", ATL1_STAT(soft_stats.rx_crc_errors)}, + {"rx_frame_errors", ATL1_STAT(soft_stats.rx_frame_errors)}, + {"rx_fifo_errors", ATL1_STAT(soft_stats.rx_fifo_errors)}, + {"rx_missed_errors", ATL1_STAT(soft_stats.rx_missed_errors)}, + {"tx_aborted_errors", ATL1_STAT(soft_stats.tx_aborted_errors)}, + {"tx_carrier_errors", ATL1_STAT(soft_stats.tx_carrier_errors)}, + {"tx_fifo_errors", ATL1_STAT(soft_stats.tx_fifo_errors)}, + {"tx_window_errors", ATL1_STAT(soft_stats.tx_window_errors)}, + {"tx_abort_exce_coll", ATL1_STAT(soft_stats.excecol)}, + {"tx_abort_late_coll", ATL1_STAT(soft_stats.latecol)}, + {"tx_deferred_ok", ATL1_STAT(soft_stats.deffer)}, + {"tx_single_coll_ok", ATL1_STAT(soft_stats.scc)}, + {"tx_multi_coll_ok", ATL1_STAT(soft_stats.mcc)}, + {"tx_underun", ATL1_STAT(soft_stats.tx_underun)}, + {"tx_trunc", ATL1_STAT(soft_stats.tx_trunc)}, + {"tx_pause", ATL1_STAT(soft_stats.tx_pause)}, + {"rx_pause", ATL1_STAT(soft_stats.rx_pause)}, + {"rx_rrd_ov", ATL1_STAT(soft_stats.rx_rrd_ov)}, + {"rx_trunc", ATL1_STAT(soft_stats.rx_trunc)} +}; + +static void atl1_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + int i; + char *p; + + for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) { + p = (char *)adapter+atl1_gstrings_stats[i].stat_offset; + data[i] = (atl1_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + +} + +static int atl1_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(atl1_gstrings_stats); + default: + return -EOPNOTSUPP; + } +} + +static int atl1_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_hw *hw = &adapter->hw; + + ecmd->supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_Autoneg | SUPPORTED_TP); + ecmd->advertising = ADVERTISED_TP; + if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || + hw->media_type == MEDIA_TYPE_1000M_FULL) { + ecmd->advertising |= ADVERTISED_Autoneg; + if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR) { + ecmd->advertising |= ADVERTISED_Autoneg; + ecmd->advertising |= + (ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Full); + } else + ecmd->advertising |= (ADVERTISED_1000baseT_Full); + } + ecmd->port = PORT_TP; + ecmd->phy_address = 0; + ecmd->transceiver = XCVR_INTERNAL; + + if (netif_carrier_ok(adapter->netdev)) { + u16 link_speed, link_duplex; + atl1_get_speed_and_duplex(hw, &link_speed, &link_duplex); + ecmd->speed = link_speed; + if (link_duplex == FULL_DUPLEX) + ecmd->duplex = DUPLEX_FULL; + else + ecmd->duplex = DUPLEX_HALF; + } else { + ecmd->speed = -1; + ecmd->duplex = -1; + } + if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || + hw->media_type == MEDIA_TYPE_1000M_FULL) + ecmd->autoneg = AUTONEG_ENABLE; + else + ecmd->autoneg = AUTONEG_DISABLE; + + return 0; +} + +static int atl1_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_hw *hw = &adapter->hw; + u16 phy_data; + int ret_val = 0; + u16 old_media_type = hw->media_type; + + if (netif_running(adapter->netdev)) { + dev_dbg(&adapter->pdev->dev, "ethtool shutting down adapter\n"); + atl1_down(adapter); + } + + if (ecmd->autoneg == AUTONEG_ENABLE) + hw->media_type = MEDIA_TYPE_AUTO_SENSOR; + else { + if (ecmd->speed == SPEED_1000) { + if (ecmd->duplex != DUPLEX_FULL) { + dev_warn(&adapter->pdev->dev, + "can't force to 1000M half duplex\n"); + ret_val = -EINVAL; + goto exit_sset; + } + hw->media_type = MEDIA_TYPE_1000M_FULL; + } else if (ecmd->speed == SPEED_100) { + if (ecmd->duplex == DUPLEX_FULL) + hw->media_type = MEDIA_TYPE_100M_FULL; + else + hw->media_type = MEDIA_TYPE_100M_HALF; + } else { + if (ecmd->duplex == DUPLEX_FULL) + hw->media_type = MEDIA_TYPE_10M_FULL; + else + hw->media_type = MEDIA_TYPE_10M_HALF; + } + } + switch (hw->media_type) { + case MEDIA_TYPE_AUTO_SENSOR: + ecmd->advertising = + ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Full | + ADVERTISED_Autoneg | ADVERTISED_TP; + break; + case MEDIA_TYPE_1000M_FULL: + ecmd->advertising = + ADVERTISED_1000baseT_Full | + ADVERTISED_Autoneg | ADVERTISED_TP; + break; + default: + ecmd->advertising = 0; + break; + } + if (atl1_phy_setup_autoneg_adv(hw)) { + ret_val = -EINVAL; + dev_warn(&adapter->pdev->dev, + "invalid ethtool speed/duplex setting\n"); + goto exit_sset; + } + if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || + hw->media_type == MEDIA_TYPE_1000M_FULL) + phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN; + else { + switch (hw->media_type) { + case MEDIA_TYPE_100M_FULL: + phy_data = + MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 | + MII_CR_RESET; + break; + case MEDIA_TYPE_100M_HALF: + phy_data = MII_CR_SPEED_100 | MII_CR_RESET; + break; + case MEDIA_TYPE_10M_FULL: + phy_data = + MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET; + break; + default: + /* MEDIA_TYPE_10M_HALF: */ + phy_data = MII_CR_SPEED_10 | MII_CR_RESET; + break; + } + } + atl1_write_phy_reg(hw, MII_BMCR, phy_data); +exit_sset: + if (ret_val) + hw->media_type = old_media_type; + + if (netif_running(adapter->netdev)) { + dev_dbg(&adapter->pdev->dev, "ethtool starting adapter\n"); + atl1_up(adapter); + } else if (!ret_val) { + dev_dbg(&adapter->pdev->dev, "ethtool resetting adapter\n"); + atl1_reset(adapter); + } + return ret_val; +} + +static void atl1_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + + strncpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver)); + strncpy(drvinfo->version, ATLX_DRIVER_VERSION, + sizeof(drvinfo->version)); + strncpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); + strncpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); + drvinfo->eedump_len = ATL1_EEDUMP_LEN; +} + +static void atl1_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + + wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC; + wol->wolopts = 0; + if (adapter->wol & ATLX_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & ATLX_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & ATLX_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & ATLX_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; + return; +} + +static int atl1_set_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + + if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) + return -EOPNOTSUPP; + adapter->wol = 0; + if (wol->wolopts & WAKE_UCAST) + adapter->wol |= ATLX_WUFC_EX; + if (wol->wolopts & WAKE_MCAST) + adapter->wol |= ATLX_WUFC_MC; + if (wol->wolopts & WAKE_BCAST) + adapter->wol |= ATLX_WUFC_BC; + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= ATLX_WUFC_MAG; + return 0; +} + +static void atl1_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_tpd_ring *txdr = &adapter->tpd_ring; + struct atl1_rfd_ring *rxdr = &adapter->rfd_ring; + + ring->rx_max_pending = ATL1_MAX_RFD; + ring->tx_max_pending = ATL1_MAX_TPD; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = rxdr->count; + ring->tx_pending = txdr->count; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; +} + +static int atl1_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_tpd_ring *tpdr = &adapter->tpd_ring; + struct atl1_rrd_ring *rrdr = &adapter->rrd_ring; + struct atl1_rfd_ring *rfdr = &adapter->rfd_ring; + + struct atl1_tpd_ring tpd_old, tpd_new; + struct atl1_rfd_ring rfd_old, rfd_new; + struct atl1_rrd_ring rrd_old, rrd_new; + struct atl1_ring_header rhdr_old, rhdr_new; + int err; + + tpd_old = adapter->tpd_ring; + rfd_old = adapter->rfd_ring; + rrd_old = adapter->rrd_ring; + rhdr_old = adapter->ring_header; + + if (netif_running(adapter->netdev)) + atl1_down(adapter); + + rfdr->count = (u16) max(ring->rx_pending, (u32) ATL1_MIN_RFD); + rfdr->count = rfdr->count > ATL1_MAX_RFD ? ATL1_MAX_RFD : + rfdr->count; + rfdr->count = (rfdr->count + 3) & ~3; + rrdr->count = rfdr->count; + + tpdr->count = (u16) max(ring->tx_pending, (u32) ATL1_MIN_TPD); + tpdr->count = tpdr->count > ATL1_MAX_TPD ? ATL1_MAX_TPD : + tpdr->count; + tpdr->count = (tpdr->count + 3) & ~3; + + if (netif_running(adapter->netdev)) { + /* try to get new resources before deleting old */ + err = atl1_setup_ring_resources(adapter); + if (err) + goto err_setup_ring; + + /* + * save the new, restore the old in order to free it, + * then restore the new back again + */ + + rfd_new = adapter->rfd_ring; + rrd_new = adapter->rrd_ring; + tpd_new = adapter->tpd_ring; + rhdr_new = adapter->ring_header; + adapter->rfd_ring = rfd_old; + adapter->rrd_ring = rrd_old; + adapter->tpd_ring = tpd_old; + adapter->ring_header = rhdr_old; + atl1_free_ring_resources(adapter); + adapter->rfd_ring = rfd_new; + adapter->rrd_ring = rrd_new; + adapter->tpd_ring = tpd_new; + adapter->ring_header = rhdr_new; + + err = atl1_up(adapter); + if (err) + return err; + } + return 0; + +err_setup_ring: + adapter->rfd_ring = rfd_old; + adapter->rrd_ring = rrd_old; + adapter->tpd_ring = tpd_old; + adapter->ring_header = rhdr_old; + atl1_up(adapter); + return err; +} + +static void atl1_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *epause) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_hw *hw = &adapter->hw; + + if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || + hw->media_type == MEDIA_TYPE_1000M_FULL) { + epause->autoneg = AUTONEG_ENABLE; + } else { + epause->autoneg = AUTONEG_DISABLE; + } + epause->rx_pause = 1; + epause->tx_pause = 1; +} + +static int atl1_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *epause) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_hw *hw = &adapter->hw; + + if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || + hw->media_type == MEDIA_TYPE_1000M_FULL) { + epause->autoneg = AUTONEG_ENABLE; + } else { + epause->autoneg = AUTONEG_DISABLE; + } + + epause->rx_pause = 1; + epause->tx_pause = 1; + + return 0; +} + +/* FIXME: is this right? -- CHS */ +static u32 atl1_get_rx_csum(struct net_device *netdev) +{ + return 1; +} + +static void atl1_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) { + memcpy(p, atl1_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + break; + } +} + +static int atl1_nway_reset(struct net_device *netdev) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_hw *hw = &adapter->hw; + + if (netif_running(netdev)) { + u16 phy_data; + atl1_down(adapter); + + if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || + hw->media_type == MEDIA_TYPE_1000M_FULL) { + phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN; + } else { + switch (hw->media_type) { + case MEDIA_TYPE_100M_FULL: + phy_data = MII_CR_FULL_DUPLEX | + MII_CR_SPEED_100 | MII_CR_RESET; + break; + case MEDIA_TYPE_100M_HALF: + phy_data = MII_CR_SPEED_100 | MII_CR_RESET; + break; + case MEDIA_TYPE_10M_FULL: + phy_data = MII_CR_FULL_DUPLEX | + MII_CR_SPEED_10 | MII_CR_RESET; + break; + default: + /* MEDIA_TYPE_10M_HALF */ + phy_data = MII_CR_SPEED_10 | MII_CR_RESET; + } + } + atl1_write_phy_reg(hw, MII_BMCR, phy_data); + atl1_up(adapter); + } + return 0; +} + +const struct ethtool_ops atl1_ethtool_ops = { + .get_settings = atl1_get_settings, + .set_settings = atl1_set_settings, + .get_drvinfo = atl1_get_drvinfo, + .get_wol = atl1_get_wol, + .set_wol = atl1_set_wol, + .get_ringparam = atl1_get_ringparam, + .set_ringparam = atl1_set_ringparam, + .get_pauseparam = atl1_get_pauseparam, + .set_pauseparam = atl1_set_pauseparam, + .get_rx_csum = atl1_get_rx_csum, + .set_tx_csum = ethtool_op_set_tx_hw_csum, + .get_link = ethtool_op_get_link, + .set_sg = ethtool_op_set_sg, + .get_strings = atl1_get_strings, + .nway_reset = atl1_nway_reset, + .get_ethtool_stats = atl1_get_ethtool_stats, + .get_sset_count = atl1_get_sset_count, + .set_tso = ethtool_op_set_tso, +}; + +/* + * Reset the transmit and receive units; mask and clear all interrupts. + * hw - Struct containing variables accessed by shared code + * return : 0 or idle status (if error) + */ +s32 atl1_reset_hw(struct atl1_hw *hw) +{ + struct pci_dev *pdev = hw->back->pdev; + u32 icr; + int i; + + /* + * Clear Interrupt mask to stop board from generating + * interrupts & Clear any pending interrupt events + */ + /* + * iowrite32(0, hw->hw_addr + REG_IMR); + * iowrite32(0xffffffff, hw->hw_addr + REG_ISR); + */ + + /* + * Issue Soft Reset to the MAC. This will reset the chip's + * transmit, receive, DMA. It will not effect + * the current PCI configuration. The global reset bit is self- + * clearing, and should clear within a microsecond. + */ + iowrite32(MASTER_CTRL_SOFT_RST, hw->hw_addr + REG_MASTER_CTRL); + ioread32(hw->hw_addr + REG_MASTER_CTRL); + + iowrite16(1, hw->hw_addr + REG_PHY_ENABLE); + ioread16(hw->hw_addr + REG_PHY_ENABLE); + + /* delay about 1ms */ + msleep(1); + + /* Wait at least 10ms for All module to be Idle */ + for (i = 0; i < 10; i++) { + icr = ioread32(hw->hw_addr + REG_IDLE_STATUS); + if (!icr) + break; + /* delay 1 ms */ + msleep(1); + /* FIXME: still the right way to do this? */ + cpu_relax(); + } + + if (icr) { + dev_dbg(&pdev->dev, "ICR = 0x%x\n", icr); + return icr; + } + + return 0; +} + +/* function about EEPROM + * + * check_eeprom_exist + * return 0 if eeprom exist + */ +static int atl1_check_eeprom_exist(struct atl1_hw *hw) +{ + u32 value; + value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL); + if (value & SPI_FLASH_CTRL_EN_VPD) { + value &= ~SPI_FLASH_CTRL_EN_VPD; + iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL); + } + + value = ioread16(hw->hw_addr + REG_PCIE_CAP_LIST); + return ((value & 0xFF00) == 0x6C00) ? 0 : 1; +} + +static bool atl1_read_eeprom(struct atl1_hw *hw, u32 offset, u32 *p_value) +{ + int i; + u32 control; + + if (offset & 3) + /* address do not align */ + return false; + + iowrite32(0, hw->hw_addr + REG_VPD_DATA); + control = (offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT; + iowrite32(control, hw->hw_addr + REG_VPD_CAP); + ioread32(hw->hw_addr + REG_VPD_CAP); + + for (i = 0; i < 10; i++) { + msleep(2); + control = ioread32(hw->hw_addr + REG_VPD_CAP); + if (control & VPD_CAP_VPD_FLAG) + break; + } + if (control & VPD_CAP_VPD_FLAG) { + *p_value = ioread32(hw->hw_addr + REG_VPD_DATA); + return true; + } + /* timeout */ + return false; +} + +/* + * Reads the value from a PHY register + * hw - Struct containing variables accessed by shared code + * reg_addr - address of the PHY register to read + */ +s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data) +{ + u32 val; + int i; + + val = ((u32) (reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT | + MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW | MDIO_CLK_25_4 << + MDIO_CLK_SEL_SHIFT; + iowrite32(val, hw->hw_addr + REG_MDIO_CTRL); + ioread32(hw->hw_addr + REG_MDIO_CTRL); + + for (i = 0; i < MDIO_WAIT_TIMES; i++) { + udelay(2); + val = ioread32(hw->hw_addr + REG_MDIO_CTRL); + if (!(val & (MDIO_START | MDIO_BUSY))) + break; + } + if (!(val & (MDIO_START | MDIO_BUSY))) { + *phy_data = (u16) val; + return 0; + } + return ATLX_ERR_PHY; +} + +#define CUSTOM_SPI_CS_SETUP 2 +#define CUSTOM_SPI_CLK_HI 2 +#define CUSTOM_SPI_CLK_LO 2 +#define CUSTOM_SPI_CS_HOLD 2 +#define CUSTOM_SPI_CS_HI 3 + +static bool atl1_spi_read(struct atl1_hw *hw, u32 addr, u32 *buf) +{ + int i; + u32 value; + + iowrite32(0, hw->hw_addr + REG_SPI_DATA); + iowrite32(addr, hw->hw_addr + REG_SPI_ADDR); + + value = SPI_FLASH_CTRL_WAIT_READY | + (CUSTOM_SPI_CS_SETUP & SPI_FLASH_CTRL_CS_SETUP_MASK) << + SPI_FLASH_CTRL_CS_SETUP_SHIFT | (CUSTOM_SPI_CLK_HI & + SPI_FLASH_CTRL_CLK_HI_MASK) << + SPI_FLASH_CTRL_CLK_HI_SHIFT | (CUSTOM_SPI_CLK_LO & + SPI_FLASH_CTRL_CLK_LO_MASK) << + SPI_FLASH_CTRL_CLK_LO_SHIFT | (CUSTOM_SPI_CS_HOLD & + SPI_FLASH_CTRL_CS_HOLD_MASK) << + SPI_FLASH_CTRL_CS_HOLD_SHIFT | (CUSTOM_SPI_CS_HI & + SPI_FLASH_CTRL_CS_HI_MASK) << + SPI_FLASH_CTRL_CS_HI_SHIFT | (1 & SPI_FLASH_CTRL_INS_MASK) << + SPI_FLASH_CTRL_INS_SHIFT; + + iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL); + + value |= SPI_FLASH_CTRL_START; + iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL); + ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL); + + for (i = 0; i < 10; i++) { + msleep(1); + value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL); + if (!(value & SPI_FLASH_CTRL_START)) + break; + } + + if (value & SPI_FLASH_CTRL_START) + return false; + + *buf = ioread32(hw->hw_addr + REG_SPI_DATA); + + return true; +} + +/* + * get_permanent_address + * return 0 if get valid mac address, + */ +static int atl1_get_permanent_address(struct atl1_hw *hw) +{ + u32 addr[2]; + u32 i, control; + u16 reg; + u8 eth_addr[ETH_ALEN]; + bool key_valid; + + if (is_valid_ether_addr(hw->perm_mac_addr)) + return 0; + + /* init */ + addr[0] = addr[1] = 0; + + if (!atl1_check_eeprom_exist(hw)) { + reg = 0; + key_valid = false; + /* Read out all EEPROM content */ + i = 0; + while (1) { + if (atl1_read_eeprom(hw, i + 0x100, &control)) { + if (key_valid) { + if (reg == REG_MAC_STA_ADDR) + addr[0] = control; + else if (reg == (REG_MAC_STA_ADDR + 4)) + addr[1] = control; + key_valid = false; + } else if ((control & 0xff) == 0x5A) { + key_valid = true; + reg = (u16) (control >> 16); + } else + break; + } else + /* read error */ + break; + i += 4; + } + + *(u32 *) ð_addr[2] = swab32(addr[0]); + *(u16 *) ð_addr[0] = swab16(*(u16 *) &addr[1]); + if (is_valid_ether_addr(eth_addr)) { + memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN); + return 0; + } + return 1; + } + + /* see if SPI FLAGS exist ? */ + addr[0] = addr[1] = 0; + reg = 0; + key_valid = false; + i = 0; + while (1) { + if (atl1_spi_read(hw, i + 0x1f000, &control)) { + if (key_valid) { + if (reg == REG_MAC_STA_ADDR) + addr[0] = control; + else if (reg == (REG_MAC_STA_ADDR + 4)) + addr[1] = control; + key_valid = false; + } else if ((control & 0xff) == 0x5A) { + key_valid = true; + reg = (u16) (control >> 16); + } else + /* data end */ + break; + } else + /* read error */ + break; + i += 4; + } + + *(u32 *) ð_addr[2] = swab32(addr[0]); + *(u16 *) ð_addr[0] = swab16(*(u16 *) &addr[1]); + if (is_valid_ether_addr(eth_addr)) { + memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN); + return 0; + } + + /* + * On some motherboards, the MAC address is written by the + * BIOS directly to the MAC register during POST, and is + * not stored in eeprom. If all else thus far has failed + * to fetch the permanent MAC address, try reading it directly. + */ + addr[0] = ioread32(hw->hw_addr + REG_MAC_STA_ADDR); + addr[1] = ioread16(hw->hw_addr + (REG_MAC_STA_ADDR + 4)); + *(u32 *) ð_addr[2] = swab32(addr[0]); + *(u16 *) ð_addr[0] = swab16(*(u16 *) &addr[1]); + if (is_valid_ether_addr(eth_addr)) { + memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN); + return 0; + } + + return 1; +} + +/* + * Reads the adapter's MAC address from the EEPROM + * hw - Struct containing variables accessed by shared code + */ +s32 atl1_read_mac_addr(struct atl1_hw *hw) +{ + u16 i; + + if (atl1_get_permanent_address(hw)) + random_ether_addr(hw->perm_mac_addr); + + for (i = 0; i < ETH_ALEN; i++) + hw->mac_addr[i] = hw->perm_mac_addr[i]; + return 0; +} + +/* + * Hashes an address to determine its location in the multicast table + * hw - Struct containing variables accessed by shared code + * mc_addr - the multicast address to hash + * + * atl1_hash_mc_addr + * purpose + * set hash value for a multicast address + * hash calcu processing : + * 1. calcu 32bit CRC for multicast address + * 2. reverse crc with MSB to LSB + */ +u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr) +{ + u32 crc32, value = 0; + int i; + + crc32 = ether_crc_le(6, mc_addr); + for (i = 0; i < 32; i++) + value |= (((crc32 >> i) & 1) << (31 - i)); + + return value; +} + +/* + * Sets the bit in the multicast table corresponding to the hash value. + * hw - Struct containing variables accessed by shared code + * hash_value - Multicast address hash value + */ +void atl1_hash_set(struct atl1_hw *hw, u32 hash_value) +{ + u32 hash_bit, hash_reg; + u32 mta; + + /* + * The HASH Table is a register array of 2 32-bit registers. + * It is treated like an array of 64 bits. We want to set + * bit BitArray[hash_value]. So we figure out what register + * the bit is in, read it, OR in the new bit, then write + * back the new value. The register is determined by the + * upper 7 bits of the hash value and the bit within that + * register are determined by the lower 5 bits of the value. + */ + hash_reg = (hash_value >> 31) & 0x1; + hash_bit = (hash_value >> 26) & 0x1F; + mta = ioread32((hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2)); + mta |= (1 << hash_bit); + iowrite32(mta, (hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2)); +} + +/* + * Writes a value to a PHY register + * hw - Struct containing variables accessed by shared code + * reg_addr - address of the PHY register to write + * data - data to write to the PHY + */ +s32 atl1_write_phy_reg(struct atl1_hw *hw, u32 reg_addr, u16 phy_data) +{ + int i; + u32 val; + + val = ((u32) (phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT | + (reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT | + MDIO_SUP_PREAMBLE | + MDIO_START | MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT; + iowrite32(val, hw->hw_addr + REG_MDIO_CTRL); + ioread32(hw->hw_addr + REG_MDIO_CTRL); + + for (i = 0; i < MDIO_WAIT_TIMES; i++) { + udelay(2); + val = ioread32(hw->hw_addr + REG_MDIO_CTRL); + if (!(val & (MDIO_START | MDIO_BUSY))) + break; + } + + if (!(val & (MDIO_START | MDIO_BUSY))) + return 0; + + return ATLX_ERR_PHY; +} + +/* + * Make L001's PHY out of Power Saving State (bug) + * hw - Struct containing variables accessed by shared code + * when power on, L001's PHY always on Power saving State + * (Gigabit Link forbidden) + */ +static s32 atl1_phy_leave_power_saving(struct atl1_hw *hw) +{ + s32 ret; + ret = atl1_write_phy_reg(hw, 29, 0x0029); + if (ret) + return ret; + return atl1_write_phy_reg(hw, 30, 0); +} + +/* + *TODO: do something or get rid of this + */ +s32 atl1_phy_enter_power_saving(struct atl1_hw *hw) +{ +/* s32 ret_val; + * u16 phy_data; + */ + +/* + ret_val = atl1_write_phy_reg(hw, ...); + ret_val = atl1_write_phy_reg(hw, ...); + .... +*/ + return 0; +} + +/* + * Resets the PHY and make all config validate + * hw - Struct containing variables accessed by shared code + * + * Sets bit 15 and 12 of the MII Control regiser (for F001 bug) + */ +static s32 atl1_phy_reset(struct atl1_hw *hw) +{ + struct pci_dev *pdev = hw->back->pdev; + s32 ret_val; + u16 phy_data; + + if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || + hw->media_type == MEDIA_TYPE_1000M_FULL) + phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN; + else { + switch (hw->media_type) { + case MEDIA_TYPE_100M_FULL: + phy_data = + MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 | + MII_CR_RESET; + break; + case MEDIA_TYPE_100M_HALF: + phy_data = MII_CR_SPEED_100 | MII_CR_RESET; + break; + case MEDIA_TYPE_10M_FULL: + phy_data = + MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET; + break; + default: + /* MEDIA_TYPE_10M_HALF: */ + phy_data = MII_CR_SPEED_10 | MII_CR_RESET; + break; + } + } + + ret_val = atl1_write_phy_reg(hw, MII_BMCR, phy_data); + if (ret_val) { + u32 val; + int i; + /* pcie serdes link may be down! */ + dev_dbg(&pdev->dev, "pcie phy link down\n"); + + for (i = 0; i < 25; i++) { + msleep(1); + val = ioread32(hw->hw_addr + REG_MDIO_CTRL); + if (!(val & (MDIO_START | MDIO_BUSY))) + break; + } + + if ((val & (MDIO_START | MDIO_BUSY)) != 0) { + dev_warn(&pdev->dev, "pcie link down at least 25ms\n"); + return ret_val; + } + } + return 0; +} + +/* + * Configures PHY autoneg and flow control advertisement settings + * hw - Struct containing variables accessed by shared code + */ +s32 atl1_phy_setup_autoneg_adv(struct atl1_hw *hw) +{ + s32 ret_val; + s16 mii_autoneg_adv_reg; + s16 mii_1000t_ctrl_reg; + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK; + + /* Read the MII 1000Base-T Control Register (Address 9). */ + mii_1000t_ctrl_reg = MII_ATLX_CR_1000T_DEFAULT_CAP_MASK; + + /* + * First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK; + mii_1000t_ctrl_reg &= ~MII_ATLX_CR_1000T_SPEED_MASK; + + /* + * Need to parse media_type and set up + * the appropriate PHY registers. + */ + switch (hw->media_type) { + case MEDIA_TYPE_AUTO_SENSOR: + mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS | + MII_AR_10T_FD_CAPS | + MII_AR_100TX_HD_CAPS | + MII_AR_100TX_FD_CAPS); + mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS; + break; + + case MEDIA_TYPE_1000M_FULL: + mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS; + break; + + case MEDIA_TYPE_100M_FULL: + mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS; + break; + + case MEDIA_TYPE_100M_HALF: + mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS; + break; + + case MEDIA_TYPE_10M_FULL: + mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS; + break; + + default: + mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS; + break; + } + + /* flow control fixed to enable all */ + mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE); + + hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg; + hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg; + + ret_val = atl1_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + ret_val = atl1_write_phy_reg(hw, MII_ATLX_CR, mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + + return 0; +} + +/* + * Configures link settings. + * hw - Struct containing variables accessed by shared code + * Assumes the hardware has previously been reset and the + * transmitter and receiver are not enabled. + */ +static s32 atl1_setup_link(struct atl1_hw *hw) +{ + struct pci_dev *pdev = hw->back->pdev; + s32 ret_val; + + /* + * Options: + * PHY will advertise value(s) parsed from + * autoneg_advertised and fc + * no matter what autoneg is , We will not wait link result. + */ + ret_val = atl1_phy_setup_autoneg_adv(hw); + if (ret_val) { + dev_dbg(&pdev->dev, "error setting up autonegotiation\n"); + return ret_val; + } + /* SW.Reset , En-Auto-Neg if needed */ + ret_val = atl1_phy_reset(hw); + if (ret_val) { + dev_dbg(&pdev->dev, "error resetting phy\n"); + return ret_val; + } + hw->phy_configured = true; + return ret_val; +} + +static void atl1_init_flash_opcode(struct atl1_hw *hw) +{ + if (hw->flash_vendor >= ARRAY_SIZE(flash_table)) + /* Atmel */ + hw->flash_vendor = 0; + + /* Init OP table */ + iowrite8(flash_table[hw->flash_vendor].cmd_program, + hw->hw_addr + REG_SPI_FLASH_OP_PROGRAM); + iowrite8(flash_table[hw->flash_vendor].cmd_sector_erase, + hw->hw_addr + REG_SPI_FLASH_OP_SC_ERASE); + iowrite8(flash_table[hw->flash_vendor].cmd_chip_erase, + hw->hw_addr + REG_SPI_FLASH_OP_CHIP_ERASE); + iowrite8(flash_table[hw->flash_vendor].cmd_rdid, + hw->hw_addr + REG_SPI_FLASH_OP_RDID); + iowrite8(flash_table[hw->flash_vendor].cmd_wren, + hw->hw_addr + REG_SPI_FLASH_OP_WREN); + iowrite8(flash_table[hw->flash_vendor].cmd_rdsr, + hw->hw_addr + REG_SPI_FLASH_OP_RDSR); + iowrite8(flash_table[hw->flash_vendor].cmd_wrsr, + hw->hw_addr + REG_SPI_FLASH_OP_WRSR); + iowrite8(flash_table[hw->flash_vendor].cmd_read, + hw->hw_addr + REG_SPI_FLASH_OP_READ); +} + +/* + * Performs basic configuration of the adapter. + * hw - Struct containing variables accessed by shared code + * Assumes that the controller has previously been reset and is in a + * post-reset uninitialized state. Initializes multicast table, + * and Calls routines to setup link + * Leaves the transmit and receive units disabled and uninitialized. + */ +s32 atl1_init_hw(struct atl1_hw *hw) +{ + u32 ret_val = 0; + + /* Zero out the Multicast HASH table */ + iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE); + /* clear the old settings from the multicast hash table */ + iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2)); + + atl1_init_flash_opcode(hw); + + if (!hw->phy_configured) { + /* enable GPHY LinkChange Interrrupt */ + ret_val = atl1_write_phy_reg(hw, 18, 0xC00); + if (ret_val) + return ret_val; + /* make PHY out of power-saving state */ + ret_val = atl1_phy_leave_power_saving(hw); + if (ret_val) + return ret_val; + /* Call a subroutine to configure the link */ + ret_val = atl1_setup_link(hw); + } + return ret_val; +} + +/* + * Detects the current speed and duplex settings of the hardware. + * hw - Struct containing variables accessed by shared code + * speed - Speed of the connection + * duplex - Duplex setting of the connection + */ +s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex) +{ + struct pci_dev *pdev = hw->back->pdev; + s32 ret_val; + u16 phy_data; + + /* ; --- Read PHY Specific Status Register (17) */ + ret_val = atl1_read_phy_reg(hw, MII_ATLX_PSSR, &phy_data); + if (ret_val) + return ret_val; + + if (!(phy_data & MII_ATLX_PSSR_SPD_DPLX_RESOLVED)) + return ATLX_ERR_PHY_RES; + + switch (phy_data & MII_ATLX_PSSR_SPEED) { + case MII_ATLX_PSSR_1000MBS: + *speed = SPEED_1000; + break; + case MII_ATLX_PSSR_100MBS: + *speed = SPEED_100; + break; + case MII_ATLX_PSSR_10MBS: + *speed = SPEED_10; + break; + default: + dev_dbg(&pdev->dev, "error getting speed\n"); + return ATLX_ERR_PHY_SPEED; + break; + } + if (phy_data & MII_ATLX_PSSR_DPLX) + *duplex = FULL_DUPLEX; + else + *duplex = HALF_DUPLEX; + + return 0; +} + +void atl1_set_mac_addr(struct atl1_hw *hw) +{ + u32 value; + /* + * 00-0B-6A-F6-00-DC + * 0: 6AF600DC 1: 000B + * low dword + */ + value = (((u32) hw->mac_addr[2]) << 24) | + (((u32) hw->mac_addr[3]) << 16) | + (((u32) hw->mac_addr[4]) << 8) | (((u32) hw->mac_addr[5])); + iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR); + /* high dword */ + value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1])); + iowrite32(value, (hw->hw_addr + REG_MAC_STA_ADDR) + (1 << 2)); +} diff --git a/drivers/net/atlx/atl1.h b/drivers/net/atlx/atl1.h index ff4765f6..538948d 100644 --- a/drivers/net/atlx/atl1.h +++ b/drivers/net/atlx/atl1.h @@ -1,6 +1,6 @@ /* * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. - * Copyright(c) 2006 Chris Snook + * Copyright(c) 2006 - 2007 Chris Snook * Copyright(c) 2006 Jay Cliburn * * Derived from Intel e1000 driver @@ -21,26 +21,559 @@ * Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ -#ifndef _ATL1_H_ -#define _ATL1_H_ +#ifndef ATL1_H +#define ATL1_H -#include +#include +#include #include +#include +#include +#include +#include +#include +#include +#include + +#include "atlx.h" + +#define ATLX_DRIVER_NAME "atl1" -#include "atl1_hw.h" +MODULE_DESCRIPTION("Atheros L1 Gigabit Ethernet Driver"); + +#define atlx_adapter atl1_adapter +#define atlx_check_for_link atl1_check_for_link +#define atlx_check_link atl1_check_link +#define atlx_hash_mc_addr atl1_hash_mc_addr +#define atlx_hash_set atl1_hash_set +#define atlx_hw atl1_hw +#define atlx_mii_ioctl atl1_mii_ioctl +#define atlx_read_phy_reg atl1_read_phy_reg +#define atlx_set_mac atl1_set_mac +#define atlx_set_mac_addr atl1_set_mac_addr + +struct atl1_adapter; +struct atl1_hw; /* function prototypes needed by multiple files */ +s32 atl1_phy_setup_autoneg_adv(struct atl1_hw *hw); +s32 atl1_write_phy_reg(struct atl1_hw *hw, u32 reg_addr, u16 phy_data); +s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex); +s32 atl1_read_mac_addr(struct atl1_hw *hw); +s32 atl1_init_hw(struct atl1_hw *hw); +s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex); +s32 atl1_set_speed_and_duplex(struct atl1_hw *hw, u16 speed, u16 duplex); +u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr); +void atl1_hash_set(struct atl1_hw *hw, u32 hash_value); +s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data); +void atl1_set_mac_addr(struct atl1_hw *hw); +s32 atl1_phy_enter_power_saving(struct atl1_hw *hw); +s32 atl1_reset_hw(struct atl1_hw *hw); +void atl1_check_options(struct atl1_adapter *adapter); +static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, + int cmd); +static u32 atl1_check_link(struct atl1_adapter *adapter); s32 atl1_up(struct atl1_adapter *adapter); void atl1_down(struct atl1_adapter *adapter); int atl1_reset(struct atl1_adapter *adapter); -s32 atl1_setup_ring_resources(struct atl1_adapter *adapter); -void atl1_free_ring_resources(struct atl1_adapter *adapter); -extern char atl1_driver_name[]; -extern char atl1_driver_version[]; extern const struct ethtool_ops atl1_ethtool_ops; -struct atl1_adapter; +/* hardware definitions specific to L1 */ + +/* Block IDLE Status Register */ +#define IDLE_STATUS_RXMAC 0x1 +#define IDLE_STATUS_TXMAC 0x2 +#define IDLE_STATUS_RXQ 0x4 +#define IDLE_STATUS_TXQ 0x8 +#define IDLE_STATUS_DMAR 0x10 +#define IDLE_STATUS_DMAW 0x20 +#define IDLE_STATUS_SMB 0x40 +#define IDLE_STATUS_CMB 0x80 + +/* MDIO Control Register */ +#define MDIO_WAIT_TIMES 30 + +/* MAC Control Register */ +#define MAC_CTRL_TX_PAUSE 0x10000 +#define MAC_CTRL_SCNT 0x20000 +#define MAC_CTRL_SRST_TX 0x40000 +#define MAC_CTRL_TX_SIMURST 0x80000 +#define MAC_CTRL_SPEED_SHIFT 20 +#define MAC_CTRL_SPEED_MASK 0x300000 +#define MAC_CTRL_SPEED_1000 0x2 +#define MAC_CTRL_SPEED_10_100 0x1 +#define MAC_CTRL_DBG_TX_BKPRESURE 0x400000 +#define MAC_CTRL_TX_HUGE 0x800000 +#define MAC_CTRL_RX_CHKSUM_EN 0x1000000 +#define MAC_CTRL_DBG 0x8000000 + +/* Wake-On-Lan control register */ +#define WOL_CLK_SWITCH_EN 0x8000 +#define WOL_PT5_EN 0x200000 +#define WOL_PT6_EN 0x400000 +#define WOL_PT5_MATCH 0x8000000 +#define WOL_PT6_MATCH 0x10000000 + +/* WOL Length ( 2 DWORD ) */ +#define REG_WOL_PATTERN_LEN 0x14A4 +#define WOL_PT_LEN_MASK 0x7F +#define WOL_PT0_LEN_SHIFT 0 +#define WOL_PT1_LEN_SHIFT 8 +#define WOL_PT2_LEN_SHIFT 16 +#define WOL_PT3_LEN_SHIFT 24 +#define WOL_PT4_LEN_SHIFT 0 +#define WOL_PT5_LEN_SHIFT 8 +#define WOL_PT6_LEN_SHIFT 16 + +/* Internal SRAM Partition Registers, low 32 bits */ +#define REG_SRAM_RFD_LEN 0x1504 +#define REG_SRAM_RRD_ADDR 0x1508 +#define REG_SRAM_RRD_LEN 0x150C +#define REG_SRAM_TPD_ADDR 0x1510 +#define REG_SRAM_TPD_LEN 0x1514 +#define REG_SRAM_TRD_ADDR 0x1518 +#define REG_SRAM_TRD_LEN 0x151C +#define REG_SRAM_RXF_ADDR 0x1520 +#define REG_SRAM_RXF_LEN 0x1524 +#define REG_SRAM_TXF_ADDR 0x1528 +#define REG_SRAM_TXF_LEN 0x152C +#define REG_SRAM_TCPH_PATH_ADDR 0x1530 +#define SRAM_TCPH_ADDR_MASK 0xFFF +#define SRAM_TCPH_ADDR_SHIFT 0 +#define SRAM_PATH_ADDR_MASK 0xFFF +#define SRAM_PATH_ADDR_SHIFT 16 + +/* Load Ptr Register */ +#define REG_LOAD_PTR 0x1534 + +/* Descriptor Control registers, low 32 bits */ +#define REG_DESC_RFD_ADDR_LO 0x1544 +#define REG_DESC_RRD_ADDR_LO 0x1548 +#define REG_DESC_TPD_ADDR_LO 0x154C +#define REG_DESC_CMB_ADDR_LO 0x1550 +#define REG_DESC_SMB_ADDR_LO 0x1554 +#define REG_DESC_RFD_RRD_RING_SIZE 0x1558 +#define DESC_RFD_RING_SIZE_MASK 0x7FF +#define DESC_RFD_RING_SIZE_SHIFT 0 +#define DESC_RRD_RING_SIZE_MASK 0x7FF +#define DESC_RRD_RING_SIZE_SHIFT 16 +#define REG_DESC_TPD_RING_SIZE 0x155C +#define DESC_TPD_RING_SIZE_MASK 0x3FF +#define DESC_TPD_RING_SIZE_SHIFT 0 + +/* TXQ Control Register */ +#define REG_TXQ_CTRL 0x1580 +#define TXQ_CTRL_TPD_BURST_NUM_SHIFT 0 +#define TXQ_CTRL_TPD_BURST_NUM_MASK 0x1F +#define TXQ_CTRL_EN 0x20 +#define TXQ_CTRL_ENH_MODE 0x40 +#define TXQ_CTRL_TPD_FETCH_TH_SHIFT 8 +#define TXQ_CTRL_TPD_FETCH_TH_MASK 0x3F +#define TXQ_CTRL_TXF_BURST_NUM_SHIFT 16 +#define TXQ_CTRL_TXF_BURST_NUM_MASK 0xFFFF + +/* Jumbo packet Threshold for task offload */ +#define REG_TX_JUMBO_TASK_TH_TPD_IPG 0x1584 +#define TX_JUMBO_TASK_TH_MASK 0x7FF +#define TX_JUMBO_TASK_TH_SHIFT 0 +#define TX_TPD_MIN_IPG_MASK 0x1F +#define TX_TPD_MIN_IPG_SHIFT 16 + +/* RXQ Control Register */ +#define REG_RXQ_CTRL 0x15A0 +#define RXQ_CTRL_RFD_BURST_NUM_SHIFT 0 +#define RXQ_CTRL_RFD_BURST_NUM_MASK 0xFF +#define RXQ_CTRL_RRD_BURST_THRESH_SHIFT 8 +#define RXQ_CTRL_RRD_BURST_THRESH_MASK 0xFF +#define RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT 16 +#define RXQ_CTRL_RFD_PREF_MIN_IPG_MASK 0x1F +#define RXQ_CTRL_CUT_THRU_EN 0x40000000 +#define RXQ_CTRL_EN 0x80000000 + +/* Rx jumbo packet threshold and rrd retirement timer */ +#define REG_RXQ_JMBOSZ_RRDTIM 0x15A4 +#define RXQ_JMBOSZ_TH_MASK 0x7FF +#define RXQ_JMBOSZ_TH_SHIFT 0 +#define RXQ_JMBO_LKAH_MASK 0xF +#define RXQ_JMBO_LKAH_SHIFT 11 +#define RXQ_RRD_TIMER_MASK 0xFFFF +#define RXQ_RRD_TIMER_SHIFT 16 + +/* RFD flow control register */ +#define REG_RXQ_RXF_PAUSE_THRESH 0x15A8 +#define RXQ_RXF_PAUSE_TH_HI_SHIFT 16 +#define RXQ_RXF_PAUSE_TH_HI_MASK 0xFFF +#define RXQ_RXF_PAUSE_TH_LO_SHIFT 0 +#define RXQ_RXF_PAUSE_TH_LO_MASK 0xFFF + +/* RRD flow control register */ +#define REG_RXQ_RRD_PAUSE_THRESH 0x15AC +#define RXQ_RRD_PAUSE_TH_HI_SHIFT 0 +#define RXQ_RRD_PAUSE_TH_HI_MASK 0xFFF +#define RXQ_RRD_PAUSE_TH_LO_SHIFT 16 +#define RXQ_RRD_PAUSE_TH_LO_MASK 0xFFF + +/* DMA Engine Control Register */ +#define REG_DMA_CTRL 0x15C0 +#define DMA_CTRL_DMAR_IN_ORDER 0x1 +#define DMA_CTRL_DMAR_ENH_ORDER 0x2 +#define DMA_CTRL_DMAR_OUT_ORDER 0x4 +#define DMA_CTRL_RCB_VALUE 0x8 +#define DMA_CTRL_DMAR_BURST_LEN_SHIFT 4 +#define DMA_CTRL_DMAR_BURST_LEN_MASK 7 +#define DMA_CTRL_DMAW_BURST_LEN_SHIFT 7 +#define DMA_CTRL_DMAW_BURST_LEN_MASK 7 +#define DMA_CTRL_DMAR_EN 0x400 +#define DMA_CTRL_DMAW_EN 0x800 + +/* CMB/SMB Control Register */ +#define REG_CSMB_CTRL 0x15D0 +#define CSMB_CTRL_CMB_NOW 1 +#define CSMB_CTRL_SMB_NOW 2 +#define CSMB_CTRL_CMB_EN 4 +#define CSMB_CTRL_SMB_EN 8 + +/* CMB DMA Write Threshold Register */ +#define REG_CMB_WRITE_TH 0x15D4 +#define CMB_RRD_TH_SHIFT 0 +#define CMB_RRD_TH_MASK 0x7FF +#define CMB_TPD_TH_SHIFT 16 +#define CMB_TPD_TH_MASK 0x7FF + +/* RX/TX count-down timer to trigger CMB-write. 2us resolution. */ +#define REG_CMB_WRITE_TIMER 0x15D8 +#define CMB_RX_TM_SHIFT 0 +#define CMB_RX_TM_MASK 0xFFFF +#define CMB_TX_TM_SHIFT 16 +#define CMB_TX_TM_MASK 0xFFFF + +/* Number of packet received since last CMB write */ +#define REG_CMB_RX_PKT_CNT 0x15DC + +/* Number of packet transmitted since last CMB write */ +#define REG_CMB_TX_PKT_CNT 0x15E0 + +/* SMB auto DMA timer register */ +#define REG_SMB_TIMER 0x15E4 + +/* Mailbox Register */ +#define REG_MAILBOX 0x15F0 +#define MB_RFD_PROD_INDX_SHIFT 0 +#define MB_RFD_PROD_INDX_MASK 0x7FF +#define MB_RRD_CONS_INDX_SHIFT 11 +#define MB_RRD_CONS_INDX_MASK 0x7FF +#define MB_TPD_PROD_INDX_SHIFT 22 +#define MB_TPD_PROD_INDX_MASK 0x3FF + +/* Interrupt Status Register */ +#define ISR_SMB 0x1 +#define ISR_TIMER 0x2 +#define ISR_MANUAL 0x4 +#define ISR_RXF_OV 0x8 +#define ISR_RFD_UNRUN 0x10 +#define ISR_RRD_OV 0x20 +#define ISR_TXF_UNRUN 0x40 +#define ISR_LINK 0x80 +#define ISR_HOST_RFD_UNRUN 0x100 +#define ISR_HOST_RRD_OV 0x200 +#define ISR_DMAR_TO_RST 0x400 +#define ISR_DMAW_TO_RST 0x800 +#define ISR_GPHY 0x1000 +#define ISR_RX_PKT 0x10000 +#define ISR_TX_PKT 0x20000 +#define ISR_TX_DMA 0x40000 +#define ISR_RX_DMA 0x80000 +#define ISR_CMB_RX 0x100000 +#define ISR_CMB_TX 0x200000 +#define ISR_MAC_RX 0x400000 +#define ISR_MAC_TX 0x800000 +#define ISR_DIS_SMB 0x20000000 +#define ISR_DIS_DMA 0x40000000 + +/* Normal Interrupt mask */ +#define IMR_NORMAL_MASK (\ + ISR_SMB |\ + ISR_GPHY |\ + ISR_PHY_LINKDOWN|\ + ISR_DMAR_TO_RST |\ + ISR_DMAW_TO_RST |\ + ISR_CMB_TX |\ + ISR_CMB_RX) + +/* Debug Interrupt Mask (enable all interrupt) */ +#define IMR_DEBUG_MASK (\ + ISR_SMB |\ + ISR_TIMER |\ + ISR_MANUAL |\ + ISR_RXF_OV |\ + ISR_RFD_UNRUN |\ + ISR_RRD_OV |\ + ISR_TXF_UNRUN |\ + ISR_LINK |\ + ISR_CMB_TX |\ + ISR_CMB_RX |\ + ISR_RX_PKT |\ + ISR_TX_PKT |\ + ISR_MAC_RX |\ + ISR_MAC_TX) + +#define MEDIA_TYPE_1000M_FULL 1 +#define MEDIA_TYPE_100M_FULL 2 +#define MEDIA_TYPE_100M_HALF 3 +#define MEDIA_TYPE_10M_FULL 4 +#define MEDIA_TYPE_10M_HALF 5 + +#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x002F /* All but 1000-Half */ + +#define MAX_JUMBO_FRAME_SIZE 10240 + +#define ATL1_EEDUMP_LEN 48 + +/* Statistics counters collected by the MAC */ +struct stats_msg_block { + /* rx */ + u32 rx_ok; /* good RX packets */ + u32 rx_bcast; /* good RX broadcast packets */ + u32 rx_mcast; /* good RX multicast packets */ + u32 rx_pause; /* RX pause frames */ + u32 rx_ctrl; /* RX control packets other than pause frames */ + u32 rx_fcs_err; /* RX packets with bad FCS */ + u32 rx_len_err; /* RX packets with length != actual size */ + u32 rx_byte_cnt; /* good bytes received. FCS is NOT included */ + u32 rx_runt; /* RX packets < 64 bytes with good FCS */ + u32 rx_frag; /* RX packets < 64 bytes with bad FCS */ + u32 rx_sz_64; /* 64 byte RX packets */ + u32 rx_sz_65_127; + u32 rx_sz_128_255; + u32 rx_sz_256_511; + u32 rx_sz_512_1023; + u32 rx_sz_1024_1518; + u32 rx_sz_1519_max; /* 1519 byte to MTU RX packets */ + u32 rx_sz_ov; /* truncated RX packets > MTU */ + u32 rx_rxf_ov; /* frames dropped due to RX FIFO overflow */ + u32 rx_rrd_ov; /* frames dropped due to RRD overflow */ + u32 rx_align_err; /* alignment errors */ + u32 rx_bcast_byte_cnt; /* RX broadcast bytes, excluding FCS */ + u32 rx_mcast_byte_cnt; /* RX multicast bytes, excluding FCS */ + u32 rx_err_addr; /* packets dropped due to address filtering */ + + /* tx */ + u32 tx_ok; /* good TX packets */ + u32 tx_bcast; /* good TX broadcast packets */ + u32 tx_mcast; /* good TX multicast packets */ + u32 tx_pause; /* TX pause frames */ + u32 tx_exc_defer; /* TX packets deferred excessively */ + u32 tx_ctrl; /* TX control frames, excluding pause frames */ + u32 tx_defer; /* TX packets deferred */ + u32 tx_byte_cnt; /* bytes transmitted, FCS is NOT included */ + u32 tx_sz_64; /* 64 byte TX packets */ + u32 tx_sz_65_127; + u32 tx_sz_128_255; + u32 tx_sz_256_511; + u32 tx_sz_512_1023; + u32 tx_sz_1024_1518; + u32 tx_sz_1519_max; /* 1519 byte to MTU TX packets */ + u32 tx_1_col; /* packets TX after a single collision */ + u32 tx_2_col; /* packets TX after multiple collisions */ + u32 tx_late_col; /* TX packets with late collisions */ + u32 tx_abort_col; /* TX packets aborted w/excessive collisions */ + u32 tx_underrun; /* TX packets aborted due to TX FIFO underrun + * or TRD FIFO underrun */ + u32 tx_rd_eop; /* reads beyond the EOP into the next frame + * when TRD was not written timely */ + u32 tx_len_err; /* TX packets where length != actual size */ + u32 tx_trunc; /* TX packets truncated due to size > MTU */ + u32 tx_bcast_byte; /* broadcast bytes transmitted, excluding FCS */ + u32 tx_mcast_byte; /* multicast bytes transmitted, excluding FCS */ + u32 smb_updated; /* 1: SMB Updated. This is used by software to + * indicate the statistics update. Software + * should clear this bit after retrieving the + * statistics information. */ +}; + +/* Coalescing Message Block */ +struct coals_msg_block { + u32 int_stats; /* interrupt status */ + u16 rrd_prod_idx; /* TRD Producer Index. */ + u16 rfd_cons_idx; /* RFD Consumer Index. */ + u16 update; /* Selene sets this bit every time it DMAs the + * CMB to host memory. Software should clear + * this bit when CMB info is processed. */ + u16 tpd_cons_idx; /* TPD Consumer Index. */ +}; + +/* RRD descriptor */ +struct rx_return_desc { + u8 num_buf; /* Number of RFD buffers used by the received packet */ + u8 resved; + u16 buf_indx; /* RFD Index of the first buffer */ + union { + u32 valid; + struct { + u16 rx_chksum; + u16 pkt_size; + } xsum_sz; + } xsz; + + u16 pkt_flg; /* Packet flags */ + u16 err_flg; /* Error flags */ + u16 resved2; + u16 vlan_tag; /* VLAN TAG */ +}; + +#define PACKET_FLAG_ETH_TYPE 0x0080 +#define PACKET_FLAG_VLAN_INS 0x0100 +#define PACKET_FLAG_ERR 0x0200 +#define PACKET_FLAG_IPV4 0x0400 +#define PACKET_FLAG_UDP 0x0800 +#define PACKET_FLAG_TCP 0x1000 +#define PACKET_FLAG_BCAST 0x2000 +#define PACKET_FLAG_MCAST 0x4000 +#define PACKET_FLAG_PAUSE 0x8000 + +#define ERR_FLAG_CRC 0x0001 +#define ERR_FLAG_CODE 0x0002 +#define ERR_FLAG_DRIBBLE 0x0004 +#define ERR_FLAG_RUNT 0x0008 +#define ERR_FLAG_OV 0x0010 +#define ERR_FLAG_TRUNC 0x0020 +#define ERR_FLAG_IP_CHKSUM 0x0040 +#define ERR_FLAG_L4_CHKSUM 0x0080 +#define ERR_FLAG_LEN 0x0100 +#define ERR_FLAG_DES_ADDR 0x0200 + +/* RFD descriptor */ +struct rx_free_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + __le16 buf_len; /* Size of the receive buffer in host memory */ + u16 coalese; /* Update consumer index to host after the + * reception of this frame */ + /* __attribute__ ((packed)) is required */ +} __attribute__ ((packed)); + +/* tsopu defines */ +#define TSO_PARAM_BUFLEN_MASK 0x3FFF +#define TSO_PARAM_BUFLEN_SHIFT 0 +#define TSO_PARAM_DMAINT_MASK 0x0001 +#define TSO_PARAM_DMAINT_SHIFT 14 +#define TSO_PARAM_PKTNT_MASK 0x0001 +#define TSO_PARAM_PKTINT_SHIFT 15 +#define TSO_PARAM_VLANTAG_MASK 0xFFFF +#define TSO_PARAM_VLAN_SHIFT 16 + +/* tsopl defines */ +#define TSO_PARAM_EOP_MASK 0x0001 +#define TSO_PARAM_EOP_SHIFT 0 +#define TSO_PARAM_COALESCE_MASK 0x0001 +#define TSO_PARAM_COALESCE_SHIFT 1 +#define TSO_PARAM_INSVLAG_MASK 0x0001 +#define TSO_PARAM_INSVLAG_SHIFT 2 +#define TSO_PARAM_CUSTOMCKSUM_MASK 0x0001 +#define TSO_PARAM_CUSTOMCKSUM_SHIFT 3 +#define TSO_PARAM_SEGMENT_MASK 0x0001 +#define TSO_PARAM_SEGMENT_SHIFT 4 +#define TSO_PARAM_IPCKSUM_MASK 0x0001 +#define TSO_PARAM_IPCKSUM_SHIFT 5 +#define TSO_PARAM_TCPCKSUM_MASK 0x0001 +#define TSO_PARAM_TCPCKSUM_SHIFT 6 +#define TSO_PARAM_UDPCKSUM_MASK 0x0001 +#define TSO_PARAM_UDPCKSUM_SHIFT 7 +#define TSO_PARAM_VLANTAGGED_MASK 0x0001 +#define TSO_PARAM_VLANTAGGED_SHIFT 8 +#define TSO_PARAM_ETHTYPE_MASK 0x0001 +#define TSO_PARAM_ETHTYPE_SHIFT 9 +#define TSO_PARAM_IPHL_MASK 0x000F +#define TSO_PARAM_IPHL_SHIFT 10 +#define TSO_PARAM_TCPHDRLEN_MASK 0x000F +#define TSO_PARAM_TCPHDRLEN_SHIFT 14 +#define TSO_PARAM_HDRFLAG_MASK 0x0001 +#define TSO_PARAM_HDRFLAG_SHIFT 18 +#define TSO_PARAM_MSS_MASK 0x1FFF +#define TSO_PARAM_MSS_SHIFT 19 + +/* csumpu defines */ +#define CSUM_PARAM_BUFLEN_MASK 0x3FFF +#define CSUM_PARAM_BUFLEN_SHIFT 0 +#define CSUM_PARAM_DMAINT_MASK 0x0001 +#define CSUM_PARAM_DMAINT_SHIFT 14 +#define CSUM_PARAM_PKTINT_MASK 0x0001 +#define CSUM_PARAM_PKTINT_SHIFT 15 +#define CSUM_PARAM_VALANTAG_MASK 0xFFFF +#define CSUM_PARAM_VALAN_SHIFT 16 + +/* csumpl defines*/ +#define CSUM_PARAM_EOP_MASK 0x0001 +#define CSUM_PARAM_EOP_SHIFT 0 +#define CSUM_PARAM_COALESCE_MASK 0x0001 +#define CSUM_PARAM_COALESCE_SHIFT 1 +#define CSUM_PARAM_INSVLAG_MASK 0x0001 +#define CSUM_PARAM_INSVLAG_SHIFT 2 +#define CSUM_PARAM_CUSTOMCKSUM_MASK 0x0001 +#define CSUM_PARAM_CUSTOMCKSUM_SHIFT 3 +#define CSUM_PARAM_SEGMENT_MASK 0x0001 +#define CSUM_PARAM_SEGMENT_SHIFT 4 +#define CSUM_PARAM_IPCKSUM_MASK 0x0001 +#define CSUM_PARAM_IPCKSUM_SHIFT 5 +#define CSUM_PARAM_TCPCKSUM_MASK 0x0001 +#define CSUM_PARAM_TCPCKSUM_SHIFT 6 +#define CSUM_PARAM_UDPCKSUM_MASK 0x0001 +#define CSUM_PARAM_UDPCKSUM_SHIFT 7 +#define CSUM_PARAM_VLANTAGGED_MASK 0x0001 +#define CSUM_PARAM_VLANTAGGED_SHIFT 8 +#define CSUM_PARAM_ETHTYPE_MASK 0x0001 +#define CSUM_PARAM_ETHTYPE_SHIFT 9 +#define CSUM_PARAM_IPHL_MASK 0x000F +#define CSUM_PARAM_IPHL_SHIFT 10 +#define CSUM_PARAM_PLOADOFFSET_MASK 0x00FF +#define CSUM_PARAM_PLOADOFFSET_SHIFT 16 +#define CSUM_PARAM_XSUMOFFSET_MASK 0x00FF +#define CSUM_PARAM_XSUMOFFSET_SHIFT 24 + +/* TPD descriptor */ +struct tso_param { + /* The order of these declarations is important -- don't change it */ + u32 tsopu; /* tso_param upper word */ + u32 tsopl; /* tso_param lower word */ +}; + +struct csum_param { + /* The order of these declarations is important -- don't change it */ + u32 csumpu; /* csum_param upper word */ + u32 csumpl; /* csum_param lower word */ +}; + +union tpd_descr { + u64 data; + struct csum_param csum; + struct tso_param tso; +}; + +struct tx_packet_desc { + __le64 buffer_addr; + union tpd_descr desc; +}; + +/* DMA Order Settings */ +enum atl1_dma_order { + atl1_dma_ord_in = 1, + atl1_dma_ord_enh = 2, + atl1_dma_ord_out = 4 +}; + +enum atl1_dma_rcb { + atl1_rcb_64 = 0, + atl1_rcb_128 = 1 +}; + +enum atl1_dma_req_block { + atl1_dma_req_128 = 0, + atl1_dma_req_256 = 1, + atl1_dma_req_512 = 2, + atl1_dma_req_1024 = 3, + atl1_dma_req_2048 = 4, + atl1_dma_req_4096 = 5 +}; #define ATL1_MAX_INTR 3 #define ATL1_MAX_TX_BUF_LEN 0x3000 /* 12288 bytes */ @@ -58,19 +591,6 @@ struct atl1_adapter; #define ATL1_RRD_DESC(R, i) ATL1_GET_DESC(R, i, struct rx_return_desc) /* - * This detached comment is preserved for documentation purposes only. - * It was originally attached to some code that got deleted, but seems - * important enough to keep around... - * - * - * Some workarounds require millisecond delays and are run during interrupt - * context. Most notably, when establishing link, the phy may need tweaking - * but cannot process phy register reads/writes faster than millisecond - * intervals...and we establish link due to a "link status change" interrupt. - * - */ - -/* * atl1_ring_header represents a single, contiguous block of DMA space * mapped for the three descriptor rings (tpd, rfd, rrd) and the two * message blocks (cmb, smb) described below @@ -156,20 +676,15 @@ struct atl1_sft_stats { u64 tx_aborted_errors; u64 tx_window_errors; u64 tx_carrier_errors; - u64 tx_pause; /* num pause packets transmitted. */ - u64 excecol; /* num tx packets w/ excessive collisions. */ - u64 deffer; /* num tx packets deferred */ - u64 scc; /* num packets subsequently transmitted - * successfully w/ single prior collision. */ - u64 mcc; /* num packets subsequently transmitted - * successfully w/ multiple prior collisions. */ - u64 latecol; /* num tx packets w/ late collisions. */ - u64 tx_underun; /* num tx packets aborted due to transmit - * FIFO underrun, or TRD FIFO underrun */ - u64 tx_trunc; /* num tx packets truncated due to size - * exceeding MTU, regardless whether truncated - * by the chip or not. (The name doesn't really - * reflect the meaning in this case.) */ + u64 tx_pause; /* TX pause frames */ + u64 excecol; /* TX packets w/ excessive collisions */ + u64 deffer; /* TX packets deferred */ + u64 scc; /* packets TX after a single collision */ + u64 mcc; /* packets TX after multiple collisions */ + u64 latecol; /* TX packets w/ late collisions */ + u64 tx_underun; /* TX packets aborted due to TX FIFO underrun + * or TRD FIFO underrun */ + u64 tx_trunc; /* TX packets truncated due to size > MTU */ u64 rx_pause; /* num Pause packets received. */ u64 rx_rrd_ov; u64 rx_trunc; @@ -184,8 +699,7 @@ struct atl1_hw { enum atl1_dma_req_block dmar_block; enum atl1_dma_req_block dmaw_block; u8 preamble_len; - u8 max_retry; /* Retransmission maximum, after which the - * packet will be discarded */ + u8 max_retry; u8 jam_ipg; /* IPG to start JAM for collision based flow * control in half-duplex mode. In units of * 8-bit time */ @@ -271,16 +785,15 @@ struct atl1_adapter { u64 hw_csum_err; u64 hw_csum_good; - u16 imt; /* interrupt moderator timer (2us resolution */ - u16 ict; /* interrupt clear timer (2us resolution */ - struct mii_if_info mii; /* MII interface info */ + u16 imt; /* interrupt moderator timer (2us resolution) */ + u16 ict; /* interrupt clear timer (2us resolution */ + struct mii_if_info mii; /* MII interface info */ - /* structs defined in atl1_hw.h */ - u32 bd_number; /* board number */ + u32 bd_number; /* board number */ bool pci_using_64; struct atl1_hw hw; struct atl1_smb smb; struct atl1_cmb cmb; }; -#endif /* _ATL1_H_ */ +#endif /* ATL1_H */ diff --git a/drivers/net/atlx/atl1_ethtool.c b/drivers/net/atlx/atl1_ethtool.c deleted file mode 100644 index 68a83be..0000000 --- a/drivers/net/atlx/atl1_ethtool.c +++ /dev/null @@ -1,505 +0,0 @@ -/* - * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. - * Copyright(c) 2006 Chris Snook - * Copyright(c) 2006 Jay Cliburn - * - * Derived from Intel e1000 driver - * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 - * Temple Place - Suite 330, Boston, MA 02111-1307, USA. - */ - -#include -#include -#include -#include -#include -#include - -#include "atl1.h" - -struct atl1_stats { - char stat_string[ETH_GSTRING_LEN]; - int sizeof_stat; - int stat_offset; -}; - -#define ATL1_STAT(m) sizeof(((struct atl1_adapter *)0)->m), \ - offsetof(struct atl1_adapter, m) - -static struct atl1_stats atl1_gstrings_stats[] = { - {"rx_packets", ATL1_STAT(soft_stats.rx_packets)}, - {"tx_packets", ATL1_STAT(soft_stats.tx_packets)}, - {"rx_bytes", ATL1_STAT(soft_stats.rx_bytes)}, - {"tx_bytes", ATL1_STAT(soft_stats.tx_bytes)}, - {"rx_errors", ATL1_STAT(soft_stats.rx_errors)}, - {"tx_errors", ATL1_STAT(soft_stats.tx_errors)}, - {"rx_dropped", ATL1_STAT(net_stats.rx_dropped)}, - {"tx_dropped", ATL1_STAT(net_stats.tx_dropped)}, - {"multicast", ATL1_STAT(soft_stats.multicast)}, - {"collisions", ATL1_STAT(soft_stats.collisions)}, - {"rx_length_errors", ATL1_STAT(soft_stats.rx_length_errors)}, - {"rx_over_errors", ATL1_STAT(soft_stats.rx_missed_errors)}, - {"rx_crc_errors", ATL1_STAT(soft_stats.rx_crc_errors)}, - {"rx_frame_errors", ATL1_STAT(soft_stats.rx_frame_errors)}, - {"rx_fifo_errors", ATL1_STAT(soft_stats.rx_fifo_errors)}, - {"rx_missed_errors", ATL1_STAT(soft_stats.rx_missed_errors)}, - {"tx_aborted_errors", ATL1_STAT(soft_stats.tx_aborted_errors)}, - {"tx_carrier_errors", ATL1_STAT(soft_stats.tx_carrier_errors)}, - {"tx_fifo_errors", ATL1_STAT(soft_stats.tx_fifo_errors)}, - {"tx_window_errors", ATL1_STAT(soft_stats.tx_window_errors)}, - {"tx_abort_exce_coll", ATL1_STAT(soft_stats.excecol)}, - {"tx_abort_late_coll", ATL1_STAT(soft_stats.latecol)}, - {"tx_deferred_ok", ATL1_STAT(soft_stats.deffer)}, - {"tx_single_coll_ok", ATL1_STAT(soft_stats.scc)}, - {"tx_multi_coll_ok", ATL1_STAT(soft_stats.mcc)}, - {"tx_underun", ATL1_STAT(soft_stats.tx_underun)}, - {"tx_trunc", ATL1_STAT(soft_stats.tx_trunc)}, - {"tx_pause", ATL1_STAT(soft_stats.tx_pause)}, - {"rx_pause", ATL1_STAT(soft_stats.rx_pause)}, - {"rx_rrd_ov", ATL1_STAT(soft_stats.rx_rrd_ov)}, - {"rx_trunc", ATL1_STAT(soft_stats.rx_trunc)} -}; - -static void atl1_get_ethtool_stats(struct net_device *netdev, - struct ethtool_stats *stats, u64 *data) -{ - struct atl1_adapter *adapter = netdev_priv(netdev); - int i; - char *p; - - for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) { - p = (char *)adapter+atl1_gstrings_stats[i].stat_offset; - data[i] = (atl1_gstrings_stats[i].sizeof_stat == - sizeof(u64)) ? *(u64 *)p : *(u32 *)p; - } - -} - -static int atl1_get_sset_count(struct net_device *netdev, int sset) -{ - switch (sset) { - case ETH_SS_STATS: - return ARRAY_SIZE(atl1_gstrings_stats); - default: - return -EOPNOTSUPP; - } -} - -static int atl1_get_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) -{ - struct atl1_adapter *adapter = netdev_priv(netdev); - struct atl1_hw *hw = &adapter->hw; - - ecmd->supported = (SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_Autoneg | SUPPORTED_TP); - ecmd->advertising = ADVERTISED_TP; - if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || - hw->media_type == MEDIA_TYPE_1000M_FULL) { - ecmd->advertising |= ADVERTISED_Autoneg; - if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR) { - ecmd->advertising |= ADVERTISED_Autoneg; - ecmd->advertising |= - (ADVERTISED_10baseT_Half | - ADVERTISED_10baseT_Full | - ADVERTISED_100baseT_Half | - ADVERTISED_100baseT_Full | - ADVERTISED_1000baseT_Full); - } - else - ecmd->advertising |= (ADVERTISED_1000baseT_Full); - } - ecmd->port = PORT_TP; - ecmd->phy_address = 0; - ecmd->transceiver = XCVR_INTERNAL; - - if (netif_carrier_ok(adapter->netdev)) { - u16 link_speed, link_duplex; - atl1_get_speed_and_duplex(hw, &link_speed, &link_duplex); - ecmd->speed = link_speed; - if (link_duplex == FULL_DUPLEX) - ecmd->duplex = DUPLEX_FULL; - else - ecmd->duplex = DUPLEX_HALF; - } else { - ecmd->speed = -1; - ecmd->duplex = -1; - } - if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || - hw->media_type == MEDIA_TYPE_1000M_FULL) - ecmd->autoneg = AUTONEG_ENABLE; - else - ecmd->autoneg = AUTONEG_DISABLE; - - return 0; -} - -static int atl1_set_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) -{ - struct atl1_adapter *adapter = netdev_priv(netdev); - struct atl1_hw *hw = &adapter->hw; - u16 phy_data; - int ret_val = 0; - u16 old_media_type = hw->media_type; - - if (netif_running(adapter->netdev)) { - dev_dbg(&adapter->pdev->dev, "ethtool shutting down adapter\n"); - atl1_down(adapter); - } - - if (ecmd->autoneg == AUTONEG_ENABLE) - hw->media_type = MEDIA_TYPE_AUTO_SENSOR; - else { - if (ecmd->speed == SPEED_1000) { - if (ecmd->duplex != DUPLEX_FULL) { - dev_warn(&adapter->pdev->dev, - "can't force to 1000M half duplex\n"); - ret_val = -EINVAL; - goto exit_sset; - } - hw->media_type = MEDIA_TYPE_1000M_FULL; - } else if (ecmd->speed == SPEED_100) { - if (ecmd->duplex == DUPLEX_FULL) { - hw->media_type = MEDIA_TYPE_100M_FULL; - } else - hw->media_type = MEDIA_TYPE_100M_HALF; - } else { - if (ecmd->duplex == DUPLEX_FULL) - hw->media_type = MEDIA_TYPE_10M_FULL; - else - hw->media_type = MEDIA_TYPE_10M_HALF; - } - } - switch (hw->media_type) { - case MEDIA_TYPE_AUTO_SENSOR: - ecmd->advertising = - ADVERTISED_10baseT_Half | - ADVERTISED_10baseT_Full | - ADVERTISED_100baseT_Half | - ADVERTISED_100baseT_Full | - ADVERTISED_1000baseT_Full | - ADVERTISED_Autoneg | ADVERTISED_TP; - break; - case MEDIA_TYPE_1000M_FULL: - ecmd->advertising = - ADVERTISED_1000baseT_Full | - ADVERTISED_Autoneg | ADVERTISED_TP; - break; - default: - ecmd->advertising = 0; - break; - } - if (atl1_phy_setup_autoneg_adv(hw)) { - ret_val = -EINVAL; - dev_warn(&adapter->pdev->dev, - "invalid ethtool speed/duplex setting\n"); - goto exit_sset; - } - if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || - hw->media_type == MEDIA_TYPE_1000M_FULL) - phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN; - else { - switch (hw->media_type) { - case MEDIA_TYPE_100M_FULL: - phy_data = - MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 | - MII_CR_RESET; - break; - case MEDIA_TYPE_100M_HALF: - phy_data = MII_CR_SPEED_100 | MII_CR_RESET; - break; - case MEDIA_TYPE_10M_FULL: - phy_data = - MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET; - break; - default: /* MEDIA_TYPE_10M_HALF: */ - phy_data = MII_CR_SPEED_10 | MII_CR_RESET; - break; - } - } - atl1_write_phy_reg(hw, MII_BMCR, phy_data); -exit_sset: - if (ret_val) - hw->media_type = old_media_type; - - if (netif_running(adapter->netdev)) { - dev_dbg(&adapter->pdev->dev, "ethtool starting adapter\n"); - atl1_up(adapter); - } else if (!ret_val) { - dev_dbg(&adapter->pdev->dev, "ethtool resetting adapter\n"); - atl1_reset(adapter); - } - return ret_val; -} - -static void atl1_get_drvinfo(struct net_device *netdev, - struct ethtool_drvinfo *drvinfo) -{ - struct atl1_adapter *adapter = netdev_priv(netdev); - - strncpy(drvinfo->driver, atl1_driver_name, sizeof(drvinfo->driver)); - strncpy(drvinfo->version, atl1_driver_version, - sizeof(drvinfo->version)); - strncpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); - strncpy(drvinfo->bus_info, pci_name(adapter->pdev), - sizeof(drvinfo->bus_info)); - drvinfo->eedump_len = ATL1_EEDUMP_LEN; -} - -static void atl1_get_wol(struct net_device *netdev, - struct ethtool_wolinfo *wol) -{ - struct atl1_adapter *adapter = netdev_priv(netdev); - - wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC; - wol->wolopts = 0; - if (adapter->wol & ATL1_WUFC_EX) - wol->wolopts |= WAKE_UCAST; - if (adapter->wol & ATL1_WUFC_MC) - wol->wolopts |= WAKE_MCAST; - if (adapter->wol & ATL1_WUFC_BC) - wol->wolopts |= WAKE_BCAST; - if (adapter->wol & ATL1_WUFC_MAG) - wol->wolopts |= WAKE_MAGIC; - return; -} - -static int atl1_set_wol(struct net_device *netdev, - struct ethtool_wolinfo *wol) -{ - struct atl1_adapter *adapter = netdev_priv(netdev); - - if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) - return -EOPNOTSUPP; - adapter->wol = 0; - if (wol->wolopts & WAKE_UCAST) - adapter->wol |= ATL1_WUFC_EX; - if (wol->wolopts & WAKE_MCAST) - adapter->wol |= ATL1_WUFC_MC; - if (wol->wolopts & WAKE_BCAST) - adapter->wol |= ATL1_WUFC_BC; - if (wol->wolopts & WAKE_MAGIC) - adapter->wol |= ATL1_WUFC_MAG; - return 0; -} - -static void atl1_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) -{ - struct atl1_adapter *adapter = netdev_priv(netdev); - struct atl1_tpd_ring *txdr = &adapter->tpd_ring; - struct atl1_rfd_ring *rxdr = &adapter->rfd_ring; - - ring->rx_max_pending = ATL1_MAX_RFD; - ring->tx_max_pending = ATL1_MAX_TPD; - ring->rx_mini_max_pending = 0; - ring->rx_jumbo_max_pending = 0; - ring->rx_pending = rxdr->count; - ring->tx_pending = txdr->count; - ring->rx_mini_pending = 0; - ring->rx_jumbo_pending = 0; -} - -static int atl1_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) -{ - struct atl1_adapter *adapter = netdev_priv(netdev); - struct atl1_tpd_ring *tpdr = &adapter->tpd_ring; - struct atl1_rrd_ring *rrdr = &adapter->rrd_ring; - struct atl1_rfd_ring *rfdr = &adapter->rfd_ring; - - struct atl1_tpd_ring tpd_old, tpd_new; - struct atl1_rfd_ring rfd_old, rfd_new; - struct atl1_rrd_ring rrd_old, rrd_new; - struct atl1_ring_header rhdr_old, rhdr_new; - int err; - - tpd_old = adapter->tpd_ring; - rfd_old = adapter->rfd_ring; - rrd_old = adapter->rrd_ring; - rhdr_old = adapter->ring_header; - - if (netif_running(adapter->netdev)) - atl1_down(adapter); - - rfdr->count = (u16) max(ring->rx_pending, (u32) ATL1_MIN_RFD); - rfdr->count = rfdr->count > ATL1_MAX_RFD ? ATL1_MAX_RFD : - rfdr->count; - rfdr->count = (rfdr->count + 3) & ~3; - rrdr->count = rfdr->count; - - tpdr->count = (u16) max(ring->tx_pending, (u32) ATL1_MIN_TPD); - tpdr->count = tpdr->count > ATL1_MAX_TPD ? ATL1_MAX_TPD : - tpdr->count; - tpdr->count = (tpdr->count + 3) & ~3; - - if (netif_running(adapter->netdev)) { - /* try to get new resources before deleting old */ - err = atl1_setup_ring_resources(adapter); - if (err) - goto err_setup_ring; - - /* - * save the new, restore the old in order to free it, - * then restore the new back again - */ - - rfd_new = adapter->rfd_ring; - rrd_new = adapter->rrd_ring; - tpd_new = adapter->tpd_ring; - rhdr_new = adapter->ring_header; - adapter->rfd_ring = rfd_old; - adapter->rrd_ring = rrd_old; - adapter->tpd_ring = tpd_old; - adapter->ring_header = rhdr_old; - atl1_free_ring_resources(adapter); - adapter->rfd_ring = rfd_new; - adapter->rrd_ring = rrd_new; - adapter->tpd_ring = tpd_new; - adapter->ring_header = rhdr_new; - - err = atl1_up(adapter); - if (err) - return err; - } - return 0; - -err_setup_ring: - adapter->rfd_ring = rfd_old; - adapter->rrd_ring = rrd_old; - adapter->tpd_ring = tpd_old; - adapter->ring_header = rhdr_old; - atl1_up(adapter); - return err; -} - -static void atl1_get_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *epause) -{ - struct atl1_adapter *adapter = netdev_priv(netdev); - struct atl1_hw *hw = &adapter->hw; - - if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || - hw->media_type == MEDIA_TYPE_1000M_FULL) { - epause->autoneg = AUTONEG_ENABLE; - } else { - epause->autoneg = AUTONEG_DISABLE; - } - epause->rx_pause = 1; - epause->tx_pause = 1; -} - -static int atl1_set_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *epause) -{ - struct atl1_adapter *adapter = netdev_priv(netdev); - struct atl1_hw *hw = &adapter->hw; - - if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || - hw->media_type == MEDIA_TYPE_1000M_FULL) { - epause->autoneg = AUTONEG_ENABLE; - } else { - epause->autoneg = AUTONEG_DISABLE; - } - - epause->rx_pause = 1; - epause->tx_pause = 1; - - return 0; -} - -static u32 atl1_get_rx_csum(struct net_device *netdev) -{ - return 1; -} - -static void atl1_get_strings(struct net_device *netdev, u32 stringset, - u8 *data) -{ - u8 *p = data; - int i; - - switch (stringset) { - case ETH_SS_STATS: - for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) { - memcpy(p, atl1_gstrings_stats[i].stat_string, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } - break; - } -} - -static int atl1_nway_reset(struct net_device *netdev) -{ - struct atl1_adapter *adapter = netdev_priv(netdev); - struct atl1_hw *hw = &adapter->hw; - - if (netif_running(netdev)) { - u16 phy_data; - atl1_down(adapter); - - if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || - hw->media_type == MEDIA_TYPE_1000M_FULL) { - phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN; - } else { - switch (hw->media_type) { - case MEDIA_TYPE_100M_FULL: - phy_data = MII_CR_FULL_DUPLEX | - MII_CR_SPEED_100 | MII_CR_RESET; - break; - case MEDIA_TYPE_100M_HALF: - phy_data = MII_CR_SPEED_100 | MII_CR_RESET; - break; - case MEDIA_TYPE_10M_FULL: - phy_data = MII_CR_FULL_DUPLEX | - MII_CR_SPEED_10 | MII_CR_RESET; - break; - default: /* MEDIA_TYPE_10M_HALF */ - phy_data = MII_CR_SPEED_10 | MII_CR_RESET; - } - } - atl1_write_phy_reg(hw, MII_BMCR, phy_data); - atl1_up(adapter); - } - return 0; -} - -const struct ethtool_ops atl1_ethtool_ops = { - .get_settings = atl1_get_settings, - .set_settings = atl1_set_settings, - .get_drvinfo = atl1_get_drvinfo, - .get_wol = atl1_get_wol, - .set_wol = atl1_set_wol, - .get_ringparam = atl1_get_ringparam, - .set_ringparam = atl1_set_ringparam, - .get_pauseparam = atl1_get_pauseparam, - .set_pauseparam = atl1_set_pauseparam, - .get_rx_csum = atl1_get_rx_csum, - .set_tx_csum = ethtool_op_set_tx_hw_csum, - .get_link = ethtool_op_get_link, - .set_sg = ethtool_op_set_sg, - .get_strings = atl1_get_strings, - .nway_reset = atl1_nway_reset, - .get_ethtool_stats = atl1_get_ethtool_stats, - .get_sset_count = atl1_get_sset_count, - .set_tso = ethtool_op_set_tso, -}; diff --git a/drivers/net/atlx/atl1_hw.c b/drivers/net/atlx/atl1_hw.c deleted file mode 100644 index 9d3bd22..0000000 --- a/drivers/net/atlx/atl1_hw.c +++ /dev/null @@ -1,720 +0,0 @@ -/* - * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. - * Copyright(c) 2006 Chris Snook - * Copyright(c) 2006 Jay Cliburn - * - * Derived from Intel e1000 driver - * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 - * Temple Place - Suite 330, Boston, MA 02111-1307, USA. - */ - -#include -#include -#include -#include -#include -#include -#include - -#include "atl1.h" - -/* - * Reset the transmit and receive units; mask and clear all interrupts. - * hw - Struct containing variables accessed by shared code - * return : ATL1_SUCCESS or idle status (if error) - */ -s32 atl1_reset_hw(struct atl1_hw *hw) -{ - struct pci_dev *pdev = hw->back->pdev; - u32 icr; - int i; - - /* - * Clear Interrupt mask to stop board from generating - * interrupts & Clear any pending interrupt events - */ - /* - * iowrite32(0, hw->hw_addr + REG_IMR); - * iowrite32(0xffffffff, hw->hw_addr + REG_ISR); - */ - - /* - * Issue Soft Reset to the MAC. This will reset the chip's - * transmit, receive, DMA. It will not effect - * the current PCI configuration. The global reset bit is self- - * clearing, and should clear within a microsecond. - */ - iowrite32(MASTER_CTRL_SOFT_RST, hw->hw_addr + REG_MASTER_CTRL); - ioread32(hw->hw_addr + REG_MASTER_CTRL); - - iowrite16(1, hw->hw_addr + REG_GPHY_ENABLE); - ioread16(hw->hw_addr + REG_GPHY_ENABLE); - - msleep(1); /* delay about 1ms */ - - /* Wait at least 10ms for All module to be Idle */ - for (i = 0; i < 10; i++) { - icr = ioread32(hw->hw_addr + REG_IDLE_STATUS); - if (!icr) - break; - msleep(1); /* delay 1 ms */ - cpu_relax(); /* FIXME: is this still the right way to do this? */ - } - - if (icr) { - dev_dbg(&pdev->dev, "ICR = 0x%x\n", icr); - return icr; - } - - return ATL1_SUCCESS; -} - -/* function about EEPROM - * - * check_eeprom_exist - * return 0 if eeprom exist - */ -static int atl1_check_eeprom_exist(struct atl1_hw *hw) -{ - u32 value; - value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL); - if (value & SPI_FLASH_CTRL_EN_VPD) { - value &= ~SPI_FLASH_CTRL_EN_VPD; - iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL); - } - - value = ioread16(hw->hw_addr + REG_PCIE_CAP_LIST); - return ((value & 0xFF00) == 0x6C00) ? 0 : 1; -} - -static bool atl1_read_eeprom(struct atl1_hw *hw, u32 offset, u32 *p_value) -{ - int i; - u32 control; - - if (offset & 3) - return false; /* address do not align */ - - iowrite32(0, hw->hw_addr + REG_VPD_DATA); - control = (offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT; - iowrite32(control, hw->hw_addr + REG_VPD_CAP); - ioread32(hw->hw_addr + REG_VPD_CAP); - - for (i = 0; i < 10; i++) { - msleep(2); - control = ioread32(hw->hw_addr + REG_VPD_CAP); - if (control & VPD_CAP_VPD_FLAG) - break; - } - if (control & VPD_CAP_VPD_FLAG) { - *p_value = ioread32(hw->hw_addr + REG_VPD_DATA); - return true; - } - return false; /* timeout */ -} - -/* - * Reads the value from a PHY register - * hw - Struct containing variables accessed by shared code - * reg_addr - address of the PHY register to read - */ -s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data) -{ - u32 val; - int i; - - val = ((u32) (reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT | - MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW | MDIO_CLK_25_4 << - MDIO_CLK_SEL_SHIFT; - iowrite32(val, hw->hw_addr + REG_MDIO_CTRL); - ioread32(hw->hw_addr + REG_MDIO_CTRL); - - for (i = 0; i < MDIO_WAIT_TIMES; i++) { - udelay(2); - val = ioread32(hw->hw_addr + REG_MDIO_CTRL); - if (!(val & (MDIO_START | MDIO_BUSY))) - break; - } - if (!(val & (MDIO_START | MDIO_BUSY))) { - *phy_data = (u16) val; - return ATL1_SUCCESS; - } - return ATL1_ERR_PHY; -} - -#define CUSTOM_SPI_CS_SETUP 2 -#define CUSTOM_SPI_CLK_HI 2 -#define CUSTOM_SPI_CLK_LO 2 -#define CUSTOM_SPI_CS_HOLD 2 -#define CUSTOM_SPI_CS_HI 3 - -static bool atl1_spi_read(struct atl1_hw *hw, u32 addr, u32 *buf) -{ - int i; - u32 value; - - iowrite32(0, hw->hw_addr + REG_SPI_DATA); - iowrite32(addr, hw->hw_addr + REG_SPI_ADDR); - - value = SPI_FLASH_CTRL_WAIT_READY | - (CUSTOM_SPI_CS_SETUP & SPI_FLASH_CTRL_CS_SETUP_MASK) << - SPI_FLASH_CTRL_CS_SETUP_SHIFT | (CUSTOM_SPI_CLK_HI & - SPI_FLASH_CTRL_CLK_HI_MASK) << - SPI_FLASH_CTRL_CLK_HI_SHIFT | (CUSTOM_SPI_CLK_LO & - SPI_FLASH_CTRL_CLK_LO_MASK) << - SPI_FLASH_CTRL_CLK_LO_SHIFT | (CUSTOM_SPI_CS_HOLD & - SPI_FLASH_CTRL_CS_HOLD_MASK) << - SPI_FLASH_CTRL_CS_HOLD_SHIFT | (CUSTOM_SPI_CS_HI & - SPI_FLASH_CTRL_CS_HI_MASK) << - SPI_FLASH_CTRL_CS_HI_SHIFT | (1 & SPI_FLASH_CTRL_INS_MASK) << - SPI_FLASH_CTRL_INS_SHIFT; - - iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL); - - value |= SPI_FLASH_CTRL_START; - iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL); - ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL); - - for (i = 0; i < 10; i++) { - msleep(1); /* 1ms */ - value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL); - if (!(value & SPI_FLASH_CTRL_START)) - break; - } - - if (value & SPI_FLASH_CTRL_START) - return false; - - *buf = ioread32(hw->hw_addr + REG_SPI_DATA); - - return true; -} - -/* - * get_permanent_address - * return 0 if get valid mac address, - */ -static int atl1_get_permanent_address(struct atl1_hw *hw) -{ - u32 addr[2]; - u32 i, control; - u16 reg; - u8 eth_addr[ETH_ALEN]; - bool key_valid; - - if (is_valid_ether_addr(hw->perm_mac_addr)) - return 0; - - /* init */ - addr[0] = addr[1] = 0; - - if (!atl1_check_eeprom_exist(hw)) { /* eeprom exist */ - reg = 0; - key_valid = false; - /* Read out all EEPROM content */ - i = 0; - while (1) { - if (atl1_read_eeprom(hw, i + 0x100, &control)) { - if (key_valid) { - if (reg == REG_MAC_STA_ADDR) - addr[0] = control; - else if (reg == (REG_MAC_STA_ADDR + 4)) - addr[1] = control; - key_valid = false; - } else if ((control & 0xff) == 0x5A) { - key_valid = true; - reg = (u16) (control >> 16); - } else - break; /* assume data end while encount an invalid KEYWORD */ - } else - break; /* read error */ - i += 4; - } - - *(u32 *) ð_addr[2] = swab32(addr[0]); - *(u16 *) ð_addr[0] = swab16(*(u16 *) &addr[1]); - if (is_valid_ether_addr(eth_addr)) { - memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN); - return 0; - } - return 1; - } - - /* see if SPI FLAGS exist ? */ - addr[0] = addr[1] = 0; - reg = 0; - key_valid = false; - i = 0; - while (1) { - if (atl1_spi_read(hw, i + 0x1f000, &control)) { - if (key_valid) { - if (reg == REG_MAC_STA_ADDR) - addr[0] = control; - else if (reg == (REG_MAC_STA_ADDR + 4)) - addr[1] = control; - key_valid = false; - } else if ((control & 0xff) == 0x5A) { - key_valid = true; - reg = (u16) (control >> 16); - } else - break; /* data end */ - } else - break; /* read error */ - i += 4; - } - - *(u32 *) ð_addr[2] = swab32(addr[0]); - *(u16 *) ð_addr[0] = swab16(*(u16 *) &addr[1]); - if (is_valid_ether_addr(eth_addr)) { - memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN); - return 0; - } - - /* - * On some motherboards, the MAC address is written by the - * BIOS directly to the MAC register during POST, and is - * not stored in eeprom. If all else thus far has failed - * to fetch the permanent MAC address, try reading it directly. - */ - addr[0] = ioread32(hw->hw_addr + REG_MAC_STA_ADDR); - addr[1] = ioread16(hw->hw_addr + (REG_MAC_STA_ADDR + 4)); - *(u32 *) ð_addr[2] = swab32(addr[0]); - *(u16 *) ð_addr[0] = swab16(*(u16 *) &addr[1]); - if (is_valid_ether_addr(eth_addr)) { - memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN); - return 0; - } - - return 1; -} - -/* - * Reads the adapter's MAC address from the EEPROM - * hw - Struct containing variables accessed by shared code - */ -s32 atl1_read_mac_addr(struct atl1_hw *hw) -{ - u16 i; - - if (atl1_get_permanent_address(hw)) - random_ether_addr(hw->perm_mac_addr); - - for (i = 0; i < ETH_ALEN; i++) - hw->mac_addr[i] = hw->perm_mac_addr[i]; - return ATL1_SUCCESS; -} - -/* - * Hashes an address to determine its location in the multicast table - * hw - Struct containing variables accessed by shared code - * mc_addr - the multicast address to hash - * - * atl1_hash_mc_addr - * purpose - * set hash value for a multicast address - * hash calcu processing : - * 1. calcu 32bit CRC for multicast address - * 2. reverse crc with MSB to LSB - */ -u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr) -{ - u32 crc32, value = 0; - int i; - - crc32 = ether_crc_le(6, mc_addr); - for (i = 0; i < 32; i++) - value |= (((crc32 >> i) & 1) << (31 - i)); - - return value; -} - -/* - * Sets the bit in the multicast table corresponding to the hash value. - * hw - Struct containing variables accessed by shared code - * hash_value - Multicast address hash value - */ -void atl1_hash_set(struct atl1_hw *hw, u32 hash_value) -{ - u32 hash_bit, hash_reg; - u32 mta; - - /* - * The HASH Table is a register array of 2 32-bit registers. - * It is treated like an array of 64 bits. We want to set - * bit BitArray[hash_value]. So we figure out what register - * the bit is in, read it, OR in the new bit, then write - * back the new value. The register is determined by the - * upper 7 bits of the hash value and the bit within that - * register are determined by the lower 5 bits of the value. - */ - hash_reg = (hash_value >> 31) & 0x1; - hash_bit = (hash_value >> 26) & 0x1F; - mta = ioread32((hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2)); - mta |= (1 << hash_bit); - iowrite32(mta, (hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2)); -} - -/* - * Writes a value to a PHY register - * hw - Struct containing variables accessed by shared code - * reg_addr - address of the PHY register to write - * data - data to write to the PHY - */ -s32 atl1_write_phy_reg(struct atl1_hw *hw, u32 reg_addr, u16 phy_data) -{ - int i; - u32 val; - - val = ((u32) (phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT | - (reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT | - MDIO_SUP_PREAMBLE | - MDIO_START | MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT; - iowrite32(val, hw->hw_addr + REG_MDIO_CTRL); - ioread32(hw->hw_addr + REG_MDIO_CTRL); - - for (i = 0; i < MDIO_WAIT_TIMES; i++) { - udelay(2); - val = ioread32(hw->hw_addr + REG_MDIO_CTRL); - if (!(val & (MDIO_START | MDIO_BUSY))) - break; - } - - if (!(val & (MDIO_START | MDIO_BUSY))) - return ATL1_SUCCESS; - - return ATL1_ERR_PHY; -} - -/* - * Make L001's PHY out of Power Saving State (bug) - * hw - Struct containing variables accessed by shared code - * when power on, L001's PHY always on Power saving State - * (Gigabit Link forbidden) - */ -static s32 atl1_phy_leave_power_saving(struct atl1_hw *hw) -{ - s32 ret; - ret = atl1_write_phy_reg(hw, 29, 0x0029); - if (ret) - return ret; - return atl1_write_phy_reg(hw, 30, 0); -} - -/* - *TODO: do something or get rid of this - */ -s32 atl1_phy_enter_power_saving(struct atl1_hw *hw) -{ -/* s32 ret_val; - * u16 phy_data; - */ - -/* - ret_val = atl1_write_phy_reg(hw, ...); - ret_val = atl1_write_phy_reg(hw, ...); - .... -*/ - return ATL1_SUCCESS; -} - -/* - * Resets the PHY and make all config validate - * hw - Struct containing variables accessed by shared code - * - * Sets bit 15 and 12 of the MII Control regiser (for F001 bug) - */ -static s32 atl1_phy_reset(struct atl1_hw *hw) -{ - struct pci_dev *pdev = hw->back->pdev; - s32 ret_val; - u16 phy_data; - - if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || - hw->media_type == MEDIA_TYPE_1000M_FULL) - phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN; - else { - switch (hw->media_type) { - case MEDIA_TYPE_100M_FULL: - phy_data = - MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 | - MII_CR_RESET; - break; - case MEDIA_TYPE_100M_HALF: - phy_data = MII_CR_SPEED_100 | MII_CR_RESET; - break; - case MEDIA_TYPE_10M_FULL: - phy_data = - MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET; - break; - default: /* MEDIA_TYPE_10M_HALF: */ - phy_data = MII_CR_SPEED_10 | MII_CR_RESET; - break; - } - } - - ret_val = atl1_write_phy_reg(hw, MII_BMCR, phy_data); - if (ret_val) { - u32 val; - int i; - /* pcie serdes link may be down! */ - dev_dbg(&pdev->dev, "pcie phy link down\n"); - - for (i = 0; i < 25; i++) { - msleep(1); - val = ioread32(hw->hw_addr + REG_MDIO_CTRL); - if (!(val & (MDIO_START | MDIO_BUSY))) - break; - } - - if ((val & (MDIO_START | MDIO_BUSY)) != 0) { - dev_warn(&pdev->dev, "pcie link down at least 25ms\n"); - return ret_val; - } - } - return ATL1_SUCCESS; -} - -/* - * Configures PHY autoneg and flow control advertisement settings - * hw - Struct containing variables accessed by shared code - */ -s32 atl1_phy_setup_autoneg_adv(struct atl1_hw *hw) -{ - s32 ret_val; - s16 mii_autoneg_adv_reg; - s16 mii_1000t_ctrl_reg; - - /* Read the MII Auto-Neg Advertisement Register (Address 4). */ - mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK; - - /* Read the MII 1000Base-T Control Register (Address 9). */ - mii_1000t_ctrl_reg = MII_AT001_CR_1000T_DEFAULT_CAP_MASK; - - /* - * First we clear all the 10/100 mb speed bits in the Auto-Neg - * Advertisement Register (Address 4) and the 1000 mb speed bits in - * the 1000Base-T Control Register (Address 9). - */ - mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK; - mii_1000t_ctrl_reg &= ~MII_AT001_CR_1000T_SPEED_MASK; - - /* - * Need to parse media_type and set up - * the appropriate PHY registers. - */ - switch (hw->media_type) { - case MEDIA_TYPE_AUTO_SENSOR: - mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS | - MII_AR_10T_FD_CAPS | - MII_AR_100TX_HD_CAPS | - MII_AR_100TX_FD_CAPS); - mii_1000t_ctrl_reg |= MII_AT001_CR_1000T_FD_CAPS; - break; - - case MEDIA_TYPE_1000M_FULL: - mii_1000t_ctrl_reg |= MII_AT001_CR_1000T_FD_CAPS; - break; - - case MEDIA_TYPE_100M_FULL: - mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS; - break; - - case MEDIA_TYPE_100M_HALF: - mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS; - break; - - case MEDIA_TYPE_10M_FULL: - mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS; - break; - - default: - mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS; - break; - } - - /* flow control fixed to enable all */ - mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE); - - hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg; - hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg; - - ret_val = atl1_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg); - if (ret_val) - return ret_val; - - ret_val = atl1_write_phy_reg(hw, MII_AT001_CR, mii_1000t_ctrl_reg); - if (ret_val) - return ret_val; - - return ATL1_SUCCESS; -} - -/* - * Configures link settings. - * hw - Struct containing variables accessed by shared code - * Assumes the hardware has previously been reset and the - * transmitter and receiver are not enabled. - */ -static s32 atl1_setup_link(struct atl1_hw *hw) -{ - struct pci_dev *pdev = hw->back->pdev; - s32 ret_val; - - /* - * Options: - * PHY will advertise value(s) parsed from - * autoneg_advertised and fc - * no matter what autoneg is , We will not wait link result. - */ - ret_val = atl1_phy_setup_autoneg_adv(hw); - if (ret_val) { - dev_dbg(&pdev->dev, "error setting up autonegotiation\n"); - return ret_val; - } - /* SW.Reset , En-Auto-Neg if needed */ - ret_val = atl1_phy_reset(hw); - if (ret_val) { - dev_dbg(&pdev->dev, "error resetting phy\n"); - return ret_val; - } - hw->phy_configured = true; - return ret_val; -} - -static struct atl1_spi_flash_dev flash_table[] = { -/* MFR_NAME WRSR READ PRGM WREN WRDI RDSR RDID SECTOR_ERASE CHIP_ERASE */ - {"Atmel", 0x00, 0x03, 0x02, 0x06, 0x04, 0x05, 0x15, 0x52, 0x62}, - {"SST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0x90, 0x20, 0x60}, - {"ST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0xAB, 0xD8, 0xC7}, -}; - -static void atl1_init_flash_opcode(struct atl1_hw *hw) -{ - if (hw->flash_vendor >= ARRAY_SIZE(flash_table)) - hw->flash_vendor = 0; /* ATMEL */ - - /* Init OP table */ - iowrite8(flash_table[hw->flash_vendor].cmd_program, - hw->hw_addr + REG_SPI_FLASH_OP_PROGRAM); - iowrite8(flash_table[hw->flash_vendor].cmd_sector_erase, - hw->hw_addr + REG_SPI_FLASH_OP_SC_ERASE); - iowrite8(flash_table[hw->flash_vendor].cmd_chip_erase, - hw->hw_addr + REG_SPI_FLASH_OP_CHIP_ERASE); - iowrite8(flash_table[hw->flash_vendor].cmd_rdid, - hw->hw_addr + REG_SPI_FLASH_OP_RDID); - iowrite8(flash_table[hw->flash_vendor].cmd_wren, - hw->hw_addr + REG_SPI_FLASH_OP_WREN); - iowrite8(flash_table[hw->flash_vendor].cmd_rdsr, - hw->hw_addr + REG_SPI_FLASH_OP_RDSR); - iowrite8(flash_table[hw->flash_vendor].cmd_wrsr, - hw->hw_addr + REG_SPI_FLASH_OP_WRSR); - iowrite8(flash_table[hw->flash_vendor].cmd_read, - hw->hw_addr + REG_SPI_FLASH_OP_READ); -} - -/* - * Performs basic configuration of the adapter. - * hw - Struct containing variables accessed by shared code - * Assumes that the controller has previously been reset and is in a - * post-reset uninitialized state. Initializes multicast table, - * and Calls routines to setup link - * Leaves the transmit and receive units disabled and uninitialized. - */ -s32 atl1_init_hw(struct atl1_hw *hw) -{ - u32 ret_val = 0; - - /* Zero out the Multicast HASH table */ - iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE); - /* clear the old settings from the multicast hash table */ - iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2)); - - atl1_init_flash_opcode(hw); - - if (!hw->phy_configured) { - /* enable GPHY LinkChange Interrrupt */ - ret_val = atl1_write_phy_reg(hw, 18, 0xC00); - if (ret_val) - return ret_val; - /* make PHY out of power-saving state */ - ret_val = atl1_phy_leave_power_saving(hw); - if (ret_val) - return ret_val; - /* Call a subroutine to configure the link */ - ret_val = atl1_setup_link(hw); - } - return ret_val; -} - -/* - * Detects the current speed and duplex settings of the hardware. - * hw - Struct containing variables accessed by shared code - * speed - Speed of the connection - * duplex - Duplex setting of the connection - */ -s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex) -{ - struct pci_dev *pdev = hw->back->pdev; - s32 ret_val; - u16 phy_data; - - /* ; --- Read PHY Specific Status Register (17) */ - ret_val = atl1_read_phy_reg(hw, MII_AT001_PSSR, &phy_data); - if (ret_val) - return ret_val; - - if (!(phy_data & MII_AT001_PSSR_SPD_DPLX_RESOLVED)) - return ATL1_ERR_PHY_RES; - - switch (phy_data & MII_AT001_PSSR_SPEED) { - case MII_AT001_PSSR_1000MBS: - *speed = SPEED_1000; - break; - case MII_AT001_PSSR_100MBS: - *speed = SPEED_100; - break; - case MII_AT001_PSSR_10MBS: - *speed = SPEED_10; - break; - default: - dev_dbg(&pdev->dev, "error getting speed\n"); - return ATL1_ERR_PHY_SPEED; - break; - } - if (phy_data & MII_AT001_PSSR_DPLX) - *duplex = FULL_DUPLEX; - else - *duplex = HALF_DUPLEX; - - return ATL1_SUCCESS; -} - -void atl1_set_mac_addr(struct atl1_hw *hw) -{ - u32 value; - /* - * 00-0B-6A-F6-00-DC - * 0: 6AF600DC 1: 000B - * low dword - */ - value = (((u32) hw->mac_addr[2]) << 24) | - (((u32) hw->mac_addr[3]) << 16) | - (((u32) hw->mac_addr[4]) << 8) | (((u32) hw->mac_addr[5])); - iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR); - /* high dword */ - value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1])); - iowrite32(value, (hw->hw_addr + REG_MAC_STA_ADDR) + (1 << 2)); -} diff --git a/drivers/net/atlx/atl1_hw.h b/drivers/net/atlx/atl1_hw.h deleted file mode 100644 index 939aa0f..0000000 --- a/drivers/net/atlx/atl1_hw.h +++ /dev/null @@ -1,946 +0,0 @@ -/* - * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. - * Copyright(c) 2006 Chris Snook - * Copyright(c) 2006 Jay Cliburn - * - * Derived from Intel e1000 driver - * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 - * Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * There are a lot of defines in here that are unused and/or have cryptic - * names. Please leave them alone, as they're the closest thing we have - * to a spec from Attansic at present. *ahem* -- CHS - */ - -#ifndef _ATL1_HW_H_ -#define _ATL1_HW_H_ - -#include -#include - -struct atl1_adapter; -struct atl1_hw; - -/* function prototypes needed by multiple files */ -s32 atl1_phy_setup_autoneg_adv(struct atl1_hw *hw); -s32 atl1_write_phy_reg(struct atl1_hw *hw, u32 reg_addr, u16 phy_data); -s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex); -s32 atl1_read_mac_addr(struct atl1_hw *hw); -s32 atl1_init_hw(struct atl1_hw *hw); -s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex); -s32 atl1_set_speed_and_duplex(struct atl1_hw *hw, u16 speed, u16 duplex); -u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr); -void atl1_hash_set(struct atl1_hw *hw, u32 hash_value); -s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data); -void atl1_set_mac_addr(struct atl1_hw *hw); -s32 atl1_phy_enter_power_saving(struct atl1_hw *hw); -s32 atl1_reset_hw(struct atl1_hw *hw); -void atl1_check_options(struct atl1_adapter *adapter); - -/* register definitions */ -#define REG_PCIE_CAP_LIST 0x58 - -#define REG_VPD_CAP 0x6C -#define VPD_CAP_ID_MASK 0xff -#define VPD_CAP_ID_SHIFT 0 -#define VPD_CAP_NEXT_PTR_MASK 0xFF -#define VPD_CAP_NEXT_PTR_SHIFT 8 -#define VPD_CAP_VPD_ADDR_MASK 0x7FFF -#define VPD_CAP_VPD_ADDR_SHIFT 16 -#define VPD_CAP_VPD_FLAG 0x80000000 - -#define REG_VPD_DATA 0x70 - -#define REG_SPI_FLASH_CTRL 0x200 -#define SPI_FLASH_CTRL_STS_NON_RDY 0x1 -#define SPI_FLASH_CTRL_STS_WEN 0x2 -#define SPI_FLASH_CTRL_STS_WPEN 0x80 -#define SPI_FLASH_CTRL_DEV_STS_MASK 0xFF -#define SPI_FLASH_CTRL_DEV_STS_SHIFT 0 -#define SPI_FLASH_CTRL_INS_MASK 0x7 -#define SPI_FLASH_CTRL_INS_SHIFT 8 -#define SPI_FLASH_CTRL_START 0x800 -#define SPI_FLASH_CTRL_EN_VPD 0x2000 -#define SPI_FLASH_CTRL_LDSTART 0x8000 -#define SPI_FLASH_CTRL_CS_HI_MASK 0x3 -#define SPI_FLASH_CTRL_CS_HI_SHIFT 16 -#define SPI_FLASH_CTRL_CS_HOLD_MASK 0x3 -#define SPI_FLASH_CTRL_CS_HOLD_SHIFT 18 -#define SPI_FLASH_CTRL_CLK_LO_MASK 0x3 -#define SPI_FLASH_CTRL_CLK_LO_SHIFT 20 -#define SPI_FLASH_CTRL_CLK_HI_MASK 0x3 -#define SPI_FLASH_CTRL_CLK_HI_SHIFT 22 -#define SPI_FLASH_CTRL_CS_SETUP_MASK 0x3 -#define SPI_FLASH_CTRL_CS_SETUP_SHIFT 24 -#define SPI_FLASH_CTRL_EROM_PGSZ_MASK 0x3 -#define SPI_FLASH_CTRL_EROM_PGSZ_SHIFT 26 -#define SPI_FLASH_CTRL_WAIT_READY 0x10000000 - -#define REG_SPI_ADDR 0x204 - -#define REG_SPI_DATA 0x208 - -#define REG_SPI_FLASH_CONFIG 0x20C -#define SPI_FLASH_CONFIG_LD_ADDR_MASK 0xFFFFFF -#define SPI_FLASH_CONFIG_LD_ADDR_SHIFT 0 -#define SPI_FLASH_CONFIG_VPD_ADDR_MASK 0x3 -#define SPI_FLASH_CONFIG_VPD_ADDR_SHIFT 24 -#define SPI_FLASH_CONFIG_LD_EXIST 0x4000000 - -#define REG_SPI_FLASH_OP_PROGRAM 0x210 -#define REG_SPI_FLASH_OP_SC_ERASE 0x211 -#define REG_SPI_FLASH_OP_CHIP_ERASE 0x212 -#define REG_SPI_FLASH_OP_RDID 0x213 -#define REG_SPI_FLASH_OP_WREN 0x214 -#define REG_SPI_FLASH_OP_RDSR 0x215 -#define REG_SPI_FLASH_OP_WRSR 0x216 -#define REG_SPI_FLASH_OP_READ 0x217 - -#define REG_TWSI_CTRL 0x218 -#define TWSI_CTRL_LD_OFFSET_MASK 0xFF -#define TWSI_CTRL_LD_OFFSET_SHIFT 0 -#define TWSI_CTRL_LD_SLV_ADDR_MASK 0x7 -#define TWSI_CTRL_LD_SLV_ADDR_SHIFT 8 -#define TWSI_CTRL_SW_LDSTART 0x800 -#define TWSI_CTRL_HW_LDSTART 0x1000 -#define TWSI_CTRL_SMB_SLV_ADDR_MASK 0x7F -#define TWSI_CTRL_SMB_SLV_ADDR_SHIFT 15 -#define TWSI_CTRL_LD_EXIST 0x400000 -#define TWSI_CTRL_READ_FREQ_SEL_MASK 0x3 -#define TWSI_CTRL_READ_FREQ_SEL_SHIFT 23 -#define TWSI_CTRL_FREQ_SEL_100K 0 -#define TWSI_CTRL_FREQ_SEL_200K 1 -#define TWSI_CTRL_FREQ_SEL_300K 2 -#define TWSI_CTRL_FREQ_SEL_400K 3 -#define TWSI_CTRL_SMB_SLV_ADDR -#define TWSI_CTRL_WRITE_FREQ_SEL_MASK 0x3 -#define TWSI_CTRL_WRITE_FREQ_SEL_SHIFT 24 - -#define REG_PCIE_DEV_MISC_CTRL 0x21C -#define PCIE_DEV_MISC_CTRL_EXT_PIPE 0x2 -#define PCIE_DEV_MISC_CTRL_RETRY_BUFDIS 0x1 -#define PCIE_DEV_MISC_CTRL_SPIROM_EXIST 0x4 -#define PCIE_DEV_MISC_CTRL_SERDES_ENDIAN 0x8 -#define PCIE_DEV_MISC_CTRL_SERDES_SEL_DIN 0x10 - -/* Selene Master Control Register */ -#define REG_MASTER_CTRL 0x1400 -#define MASTER_CTRL_SOFT_RST 0x1 -#define MASTER_CTRL_MTIMER_EN 0x2 -#define MASTER_CTRL_ITIMER_EN 0x4 -#define MASTER_CTRL_MANUAL_INT 0x8 -#define MASTER_CTRL_REV_NUM_SHIFT 16 -#define MASTER_CTRL_REV_NUM_MASK 0xff -#define MASTER_CTRL_DEV_ID_SHIFT 24 -#define MASTER_CTRL_DEV_ID_MASK 0xff - -/* Timer Initial Value Register */ -#define REG_MANUAL_TIMER_INIT 0x1404 - -/* IRQ ModeratorTimer Initial Value Register */ -#define REG_IRQ_MODU_TIMER_INIT 0x1408 - -#define REG_GPHY_ENABLE 0x140C - -/* IRQ Anti-Lost Timer Initial Value Register */ -#define REG_CMBDISDMA_TIMER 0x140E - -/* Block IDLE Status Register */ -#define REG_IDLE_STATUS 0x1410 -#define IDLE_STATUS_RXMAC 1 -#define IDLE_STATUS_TXMAC 2 -#define IDLE_STATUS_RXQ 4 -#define IDLE_STATUS_TXQ 8 -#define IDLE_STATUS_DMAR 0x10 -#define IDLE_STATUS_DMAW 0x20 -#define IDLE_STATUS_SMB 0x40 -#define IDLE_STATUS_CMB 0x80 - -/* MDIO Control Register */ -#define REG_MDIO_CTRL 0x1414 -#define MDIO_DATA_MASK 0xffff -#define MDIO_DATA_SHIFT 0 -#define MDIO_REG_ADDR_MASK 0x1f -#define MDIO_REG_ADDR_SHIFT 16 -#define MDIO_RW 0x200000 -#define MDIO_SUP_PREAMBLE 0x400000 -#define MDIO_START 0x800000 -#define MDIO_CLK_SEL_SHIFT 24 -#define MDIO_CLK_25_4 0 -#define MDIO_CLK_25_6 2 -#define MDIO_CLK_25_8 3 -#define MDIO_CLK_25_10 4 -#define MDIO_CLK_25_14 5 -#define MDIO_CLK_25_20 6 -#define MDIO_CLK_25_28 7 -#define MDIO_BUSY 0x8000000 -#define MDIO_WAIT_TIMES 30 - -/* MII PHY Status Register */ -#define REG_PHY_STATUS 0x1418 - -/* BIST Control and Status Register0 (for the Packet Memory) */ -#define REG_BIST0_CTRL 0x141c -#define BIST0_NOW 0x1 -#define BIST0_SRAM_FAIL 0x2 -#define BIST0_FUSE_FLAG 0x4 -#define REG_BIST1_CTRL 0x1420 -#define BIST1_NOW 0x1 -#define BIST1_SRAM_FAIL 0x2 -#define BIST1_FUSE_FLAG 0x4 - -/* MAC Control Register */ -#define REG_MAC_CTRL 0x1480 -#define MAC_CTRL_TX_EN 1 -#define MAC_CTRL_RX_EN 2 -#define MAC_CTRL_TX_FLOW 4 -#define MAC_CTRL_RX_FLOW 8 -#define MAC_CTRL_LOOPBACK 0x10 -#define MAC_CTRL_DUPLX 0x20 -#define MAC_CTRL_ADD_CRC 0x40 -#define MAC_CTRL_PAD 0x80 -#define MAC_CTRL_LENCHK 0x100 -#define MAC_CTRL_HUGE_EN 0x200 -#define MAC_CTRL_PRMLEN_SHIFT 10 -#define MAC_CTRL_PRMLEN_MASK 0xf -#define MAC_CTRL_RMV_VLAN 0x4000 -#define MAC_CTRL_PROMIS_EN 0x8000 -#define MAC_CTRL_TX_PAUSE 0x10000 -#define MAC_CTRL_SCNT 0x20000 -#define MAC_CTRL_SRST_TX 0x40000 -#define MAC_CTRL_TX_SIMURST 0x80000 -#define MAC_CTRL_SPEED_SHIFT 20 -#define MAC_CTRL_SPEED_MASK 0x300000 -#define MAC_CTRL_SPEED_1000 2 -#define MAC_CTRL_SPEED_10_100 1 -#define MAC_CTRL_DBG_TX_BKPRESURE 0x400000 -#define MAC_CTRL_TX_HUGE 0x800000 -#define MAC_CTRL_RX_CHKSUM_EN 0x1000000 -#define MAC_CTRL_MC_ALL_EN 0x2000000 -#define MAC_CTRL_BC_EN 0x4000000 -#define MAC_CTRL_DBG 0x8000000 - -/* MAC IPG/IFG Control Register */ -#define REG_MAC_IPG_IFG 0x1484 -#define MAC_IPG_IFG_IPGT_SHIFT 0 -#define MAC_IPG_IFG_IPGT_MASK 0x7f -#define MAC_IPG_IFG_MIFG_SHIFT 8 -#define MAC_IPG_IFG_MIFG_MASK 0xff -#define MAC_IPG_IFG_IPGR1_SHIFT 16 -#define MAC_IPG_IFG_IPGR1_MASK 0x7f -#define MAC_IPG_IFG_IPGR2_SHIFT 24 -#define MAC_IPG_IFG_IPGR2_MASK 0x7f - -/* MAC STATION ADDRESS */ -#define REG_MAC_STA_ADDR 0x1488 - -/* Hash table for multicast address */ -#define REG_RX_HASH_TABLE 0x1490 - -/* MAC Half-Duplex Control Register */ -#define REG_MAC_HALF_DUPLX_CTRL 0x1498 -#define MAC_HALF_DUPLX_CTRL_LCOL_SHIFT 0 -#define MAC_HALF_DUPLX_CTRL_LCOL_MASK 0x3ff -#define MAC_HALF_DUPLX_CTRL_RETRY_SHIFT 12 -#define MAC_HALF_DUPLX_CTRL_RETRY_MASK 0xf -#define MAC_HALF_DUPLX_CTRL_EXC_DEF_EN 0x10000 -#define MAC_HALF_DUPLX_CTRL_NO_BACK_C 0x20000 -#define MAC_HALF_DUPLX_CTRL_NO_BACK_P 0x40000 -#define MAC_HALF_DUPLX_CTRL_ABEBE 0x80000 -#define MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT 20 -#define MAC_HALF_DUPLX_CTRL_ABEBT_MASK 0xf -#define MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT 24 -#define MAC_HALF_DUPLX_CTRL_JAMIPG_MASK 0xf - -/* Maximum Frame Length Control Register */ -#define REG_MTU 0x149c - -/* Wake-On-Lan control register */ -#define REG_WOL_CTRL 0x14a0 -#define WOL_PATTERN_EN 0x00000001 -#define WOL_PATTERN_PME_EN 0x00000002 -#define WOL_MAGIC_EN 0x00000004 -#define WOL_MAGIC_PME_EN 0x00000008 -#define WOL_LINK_CHG_EN 0x00000010 -#define WOL_LINK_CHG_PME_EN 0x00000020 -#define WOL_PATTERN_ST 0x00000100 -#define WOL_MAGIC_ST 0x00000200 -#define WOL_LINKCHG_ST 0x00000400 -#define WOL_CLK_SWITCH_EN 0x00008000 -#define WOL_PT0_EN 0x00010000 -#define WOL_PT1_EN 0x00020000 -#define WOL_PT2_EN 0x00040000 -#define WOL_PT3_EN 0x00080000 -#define WOL_PT4_EN 0x00100000 -#define WOL_PT5_EN 0x00200000 -#define WOL_PT6_EN 0x00400000 - -/* WOL Length ( 2 DWORD ) */ -#define REG_WOL_PATTERN_LEN 0x14a4 -#define WOL_PT_LEN_MASK 0x7f -#define WOL_PT0_LEN_SHIFT 0 -#define WOL_PT1_LEN_SHIFT 8 -#define WOL_PT2_LEN_SHIFT 16 -#define WOL_PT3_LEN_SHIFT 24 -#define WOL_PT4_LEN_SHIFT 0 -#define WOL_PT5_LEN_SHIFT 8 -#define WOL_PT6_LEN_SHIFT 16 - -/* Internal SRAM Partition Register */ -#define REG_SRAM_RFD_ADDR 0x1500 -#define REG_SRAM_RFD_LEN (REG_SRAM_RFD_ADDR+ 4) -#define REG_SRAM_RRD_ADDR (REG_SRAM_RFD_ADDR+ 8) -#define REG_SRAM_RRD_LEN (REG_SRAM_RFD_ADDR+12) -#define REG_SRAM_TPD_ADDR (REG_SRAM_RFD_ADDR+16) -#define REG_SRAM_TPD_LEN (REG_SRAM_RFD_ADDR+20) -#define REG_SRAM_TRD_ADDR (REG_SRAM_RFD_ADDR+24) -#define REG_SRAM_TRD_LEN (REG_SRAM_RFD_ADDR+28) -#define REG_SRAM_RXF_ADDR (REG_SRAM_RFD_ADDR+32) -#define REG_SRAM_RXF_LEN (REG_SRAM_RFD_ADDR+36) -#define REG_SRAM_TXF_ADDR (REG_SRAM_RFD_ADDR+40) -#define REG_SRAM_TXF_LEN (REG_SRAM_RFD_ADDR+44) -#define REG_SRAM_TCPH_PATH_ADDR (REG_SRAM_RFD_ADDR+48) -#define SRAM_TCPH_ADDR_MASK 0x0fff -#define SRAM_TCPH_ADDR_SHIFT 0 -#define SRAM_PATH_ADDR_MASK 0x0fff -#define SRAM_PATH_ADDR_SHIFT 16 - -/* Load Ptr Register */ -#define REG_LOAD_PTR (REG_SRAM_RFD_ADDR+52) - -/* Descriptor Control register */ -#define REG_DESC_BASE_ADDR_HI 0x1540 -#define REG_DESC_RFD_ADDR_LO (REG_DESC_BASE_ADDR_HI+4) -#define REG_DESC_RRD_ADDR_LO (REG_DESC_BASE_ADDR_HI+8) -#define REG_DESC_TPD_ADDR_LO (REG_DESC_BASE_ADDR_HI+12) -#define REG_DESC_CMB_ADDR_LO (REG_DESC_BASE_ADDR_HI+16) -#define REG_DESC_SMB_ADDR_LO (REG_DESC_BASE_ADDR_HI+20) -#define REG_DESC_RFD_RRD_RING_SIZE (REG_DESC_BASE_ADDR_HI+24) -#define DESC_RFD_RING_SIZE_MASK 0x7ff -#define DESC_RFD_RING_SIZE_SHIFT 0 -#define DESC_RRD_RING_SIZE_MASK 0x7ff -#define DESC_RRD_RING_SIZE_SHIFT 16 -#define REG_DESC_TPD_RING_SIZE (REG_DESC_BASE_ADDR_HI+28) -#define DESC_TPD_RING_SIZE_MASK 0x3ff -#define DESC_TPD_RING_SIZE_SHIFT 0 - -/* TXQ Control Register */ -#define REG_TXQ_CTRL 0x1580 -#define TXQ_CTRL_TPD_BURST_NUM_SHIFT 0 -#define TXQ_CTRL_TPD_BURST_NUM_MASK 0x1f -#define TXQ_CTRL_EN 0x20 -#define TXQ_CTRL_ENH_MODE 0x40 -#define TXQ_CTRL_TPD_FETCH_TH_SHIFT 8 -#define TXQ_CTRL_TPD_FETCH_TH_MASK 0x3f -#define TXQ_CTRL_TXF_BURST_NUM_SHIFT 16 -#define TXQ_CTRL_TXF_BURST_NUM_MASK 0xffff - -/* Jumbo packet Threshold for task offload */ -#define REG_TX_JUMBO_TASK_TH_TPD_IPG 0x1584 -#define TX_JUMBO_TASK_TH_MASK 0x7ff -#define TX_JUMBO_TASK_TH_SHIFT 0 -#define TX_TPD_MIN_IPG_MASK 0x1f -#define TX_TPD_MIN_IPG_SHIFT 16 - -/* RXQ Control Register */ -#define REG_RXQ_CTRL 0x15a0 -#define RXQ_CTRL_RFD_BURST_NUM_SHIFT 0 -#define RXQ_CTRL_RFD_BURST_NUM_MASK 0xff -#define RXQ_CTRL_RRD_BURST_THRESH_SHIFT 8 -#define RXQ_CTRL_RRD_BURST_THRESH_MASK 0xff -#define RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT 16 -#define RXQ_CTRL_RFD_PREF_MIN_IPG_MASK 0x1f -#define RXQ_CTRL_CUT_THRU_EN 0x40000000 -#define RXQ_CTRL_EN 0x80000000 - -/* Rx jumbo packet threshold and rrd retirement timer */ -#define REG_RXQ_JMBOSZ_RRDTIM (REG_RXQ_CTRL+ 4) -#define RXQ_JMBOSZ_TH_MASK 0x7ff -#define RXQ_JMBOSZ_TH_SHIFT 0 -#define RXQ_JMBO_LKAH_MASK 0xf -#define RXQ_JMBO_LKAH_SHIFT 11 -#define RXQ_RRD_TIMER_MASK 0xffff -#define RXQ_RRD_TIMER_SHIFT 16 - -/* RFD flow control register */ -#define REG_RXQ_RXF_PAUSE_THRESH (REG_RXQ_CTRL+ 8) -#define RXQ_RXF_PAUSE_TH_HI_SHIFT 16 -#define RXQ_RXF_PAUSE_TH_HI_MASK 0xfff -#define RXQ_RXF_PAUSE_TH_LO_SHIFT 0 -#define RXQ_RXF_PAUSE_TH_LO_MASK 0xfff - -/* RRD flow control register */ -#define REG_RXQ_RRD_PAUSE_THRESH (REG_RXQ_CTRL+12) -#define RXQ_RRD_PAUSE_TH_HI_SHIFT 0 -#define RXQ_RRD_PAUSE_TH_HI_MASK 0xfff -#define RXQ_RRD_PAUSE_TH_LO_SHIFT 16 -#define RXQ_RRD_PAUSE_TH_LO_MASK 0xfff - -/* DMA Engine Control Register */ -#define REG_DMA_CTRL 0x15c0 -#define DMA_CTRL_DMAR_IN_ORDER 0x1 -#define DMA_CTRL_DMAR_ENH_ORDER 0x2 -#define DMA_CTRL_DMAR_OUT_ORDER 0x4 -#define DMA_CTRL_RCB_VALUE 0x8 -#define DMA_CTRL_DMAR_BURST_LEN_SHIFT 4 -#define DMA_CTRL_DMAR_BURST_LEN_MASK 7 -#define DMA_CTRL_DMAW_BURST_LEN_SHIFT 7 -#define DMA_CTRL_DMAW_BURST_LEN_MASK 7 -#define DMA_CTRL_DMAR_EN 0x400 -#define DMA_CTRL_DMAW_EN 0x800 - -/* CMB/SMB Control Register */ -#define REG_CSMB_CTRL 0x15d0 -#define CSMB_CTRL_CMB_NOW 1 -#define CSMB_CTRL_SMB_NOW 2 -#define CSMB_CTRL_CMB_EN 4 -#define CSMB_CTRL_SMB_EN 8 - -/* CMB DMA Write Threshold Register */ -#define REG_CMB_WRITE_TH (REG_CSMB_CTRL+ 4) -#define CMB_RRD_TH_SHIFT 0 -#define CMB_RRD_TH_MASK 0x7ff -#define CMB_TPD_TH_SHIFT 16 -#define CMB_TPD_TH_MASK 0x7ff - -/* RX/TX count-down timer to trigger CMB-write. 2us resolution. */ -#define REG_CMB_WRITE_TIMER (REG_CSMB_CTRL+ 8) -#define CMB_RX_TM_SHIFT 0 -#define CMB_RX_TM_MASK 0xffff -#define CMB_TX_TM_SHIFT 16 -#define CMB_TX_TM_MASK 0xffff - -/* Number of packet received since last CMB write */ -#define REG_CMB_RX_PKT_CNT (REG_CSMB_CTRL+12) - -/* Number of packet transmitted since last CMB write */ -#define REG_CMB_TX_PKT_CNT (REG_CSMB_CTRL+16) - -/* SMB auto DMA timer register */ -#define REG_SMB_TIMER (REG_CSMB_CTRL+20) - -/* Mailbox Register */ -#define REG_MAILBOX 0x15f0 -#define MB_RFD_PROD_INDX_SHIFT 0 -#define MB_RFD_PROD_INDX_MASK 0x7ff -#define MB_RRD_CONS_INDX_SHIFT 11 -#define MB_RRD_CONS_INDX_MASK 0x7ff -#define MB_TPD_PROD_INDX_SHIFT 22 -#define MB_TPD_PROD_INDX_MASK 0x3ff - -/* Interrupt Status Register */ -#define REG_ISR 0x1600 -#define ISR_SMB 1 -#define ISR_TIMER 2 -#define ISR_MANUAL 4 -#define ISR_RXF_OV 8 -#define ISR_RFD_UNRUN 0x10 -#define ISR_RRD_OV 0x20 -#define ISR_TXF_UNRUN 0x40 -#define ISR_LINK 0x80 -#define ISR_HOST_RFD_UNRUN 0x100 -#define ISR_HOST_RRD_OV 0x200 -#define ISR_DMAR_TO_RST 0x400 -#define ISR_DMAW_TO_RST 0x800 -#define ISR_GPHY 0x1000 -#define ISR_RX_PKT 0x10000 -#define ISR_TX_PKT 0x20000 -#define ISR_TX_DMA 0x40000 -#define ISR_RX_DMA 0x80000 -#define ISR_CMB_RX 0x100000 -#define ISR_CMB_TX 0x200000 -#define ISR_MAC_RX 0x400000 -#define ISR_MAC_TX 0x800000 -#define ISR_UR_DETECTED 0x1000000 -#define ISR_FERR_DETECTED 0x2000000 -#define ISR_NFERR_DETECTED 0x4000000 -#define ISR_CERR_DETECTED 0x8000000 -#define ISR_PHY_LINKDOWN 0x10000000 -#define ISR_DIS_SMB 0x20000000 -#define ISR_DIS_DMA 0x40000000 -#define ISR_DIS_INT 0x80000000 - -/* Interrupt Mask Register */ -#define REG_IMR 0x1604 - -/* Normal Interrupt mask */ -#define IMR_NORMAL_MASK (\ - ISR_SMB |\ - ISR_GPHY |\ - ISR_PHY_LINKDOWN|\ - ISR_DMAR_TO_RST |\ - ISR_DMAW_TO_RST |\ - ISR_CMB_TX |\ - ISR_CMB_RX ) - -/* Debug Interrupt Mask (enable all interrupt) */ -#define IMR_DEBUG_MASK (\ - ISR_SMB |\ - ISR_TIMER |\ - ISR_MANUAL |\ - ISR_RXF_OV |\ - ISR_RFD_UNRUN |\ - ISR_RRD_OV |\ - ISR_TXF_UNRUN |\ - ISR_LINK |\ - ISR_CMB_TX |\ - ISR_CMB_RX |\ - ISR_RX_PKT |\ - ISR_TX_PKT |\ - ISR_MAC_RX |\ - ISR_MAC_TX ) - -/* Interrupt Status Register */ -#define REG_RFD_RRD_IDX 0x1800 -#define REG_TPD_IDX 0x1804 - -/* MII definition */ -/* PHY Common Register */ -#define MII_AT001_CR 0x09 -#define MII_AT001_SR 0x0A -#define MII_AT001_ESR 0x0F -#define MII_AT001_PSCR 0x10 -#define MII_AT001_PSSR 0x11 - -/* PHY Control Register */ -#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ -#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ -#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ -#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ -#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */ -#define MII_CR_POWER_DOWN 0x0800 /* Power down */ -#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ -#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */ -#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ -#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ -#define MII_CR_SPEED_MASK 0x2040 -#define MII_CR_SPEED_1000 0x0040 -#define MII_CR_SPEED_100 0x2000 -#define MII_CR_SPEED_10 0x0000 - -/* PHY Status Register */ -#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ -#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ -#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ -#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ -#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ -#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ -#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ -#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ -#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ -#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ -#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ -#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ -#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ -#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ -#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ - -/* Link partner ability register. */ -#define MII_LPA_SLCT 0x001f /* Same as advertise selector */ -#define MII_LPA_10HALF 0x0020 /* Can do 10mbps half-duplex */ -#define MII_LPA_10FULL 0x0040 /* Can do 10mbps full-duplex */ -#define MII_LPA_100HALF 0x0080 /* Can do 100mbps half-duplex */ -#define MII_LPA_100FULL 0x0100 /* Can do 100mbps full-duplex */ -#define MII_LPA_100BASE4 0x0200 /* 100BASE-T4 */ -#define MII_LPA_PAUSE 0x0400 /* PAUSE */ -#define MII_LPA_ASYPAUSE 0x0800 /* Asymmetrical PAUSE */ -#define MII_LPA_RFAULT 0x2000 /* Link partner faulted */ -#define MII_LPA_LPACK 0x4000 /* Link partner acked us */ -#define MII_LPA_NPAGE 0x8000 /* Next page bit */ - -/* Autoneg Advertisement Register */ -#define MII_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */ -#define MII_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ -#define MII_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ -#define MII_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ -#define MII_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ -#define MII_AR_100T4_CAPS 0x0200 /* 100T4 Capable */ -#define MII_AR_PAUSE 0x0400 /* Pause operation desired */ -#define MII_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ -#define MII_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */ -#define MII_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */ -#define MII_AR_SPEED_MASK 0x01E0 -#define MII_AR_DEFAULT_CAP_MASK 0x0DE0 - -/* 1000BASE-T Control Register */ -#define MII_AT001_CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ -#define MII_AT001_CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ -#define MII_AT001_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port, 0=DTE device */ -#define MII_AT001_CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master, 0=Configure PHY as Slave */ -#define MII_AT001_CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value, 0=Automatic Master/Slave config */ -#define MII_AT001_CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */ -#define MII_AT001_CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */ -#define MII_AT001_CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */ -#define MII_AT001_CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */ -#define MII_AT001_CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */ -#define MII_AT001_CR_1000T_SPEED_MASK 0x0300 -#define MII_AT001_CR_1000T_DEFAULT_CAP_MASK 0x0300 - -/* 1000BASE-T Status Register */ -#define MII_AT001_SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */ -#define MII_AT001_SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */ -#define MII_AT001_SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ -#define MII_AT001_SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ -#define MII_AT001_SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master, 0=Slave */ -#define MII_AT001_SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */ -#define MII_AT001_SR_1000T_REMOTE_RX_STATUS_SHIFT 12 -#define MII_AT001_SR_1000T_LOCAL_RX_STATUS_SHIFT 13 - -/* Extended Status Register */ -#define MII_AT001_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */ -#define MII_AT001_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */ -#define MII_AT001_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */ -#define MII_AT001_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */ - -/* AT001 PHY Specific Control Register */ -#define MII_AT001_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */ -#define MII_AT001_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */ -#define MII_AT001_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */ -#define MII_AT001_PSCR_MAC_POWERDOWN 0x0008 -#define MII_AT001_PSCR_CLK125_DISABLE 0x0010 /* 1=CLK125 low, 0=CLK125 toggling */ -#define MII_AT001_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5, Manual MDI configuration */ -#define MII_AT001_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ -#define MII_AT001_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */ -#define MII_AT001_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled all speeds. */ -#define MII_AT001_PSCR_10BT_EXT_DIST_ENABLE 0x0080 /* 1=Enable Extended 10BASE-T distance (Lower 10BASE-T RX Threshold), 0=Normal 10BASE-T RX Threshold */ -#define MII_AT001_PSCR_MII_5BIT_ENABLE 0x0100 /* 1=5-Bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */ -#define MII_AT001_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler disable */ -#define MII_AT001_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */ -#define MII_AT001_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ -#define MII_AT001_PSCR_POLARITY_REVERSAL_SHIFT 1 -#define MII_AT001_PSCR_AUTO_X_MODE_SHIFT 5 -#define MII_AT001_PSCR_10BT_EXT_DIST_ENABLE_SHIFT 7 - -/* AT001 PHY Specific Status Register */ -#define MII_AT001_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */ -#define MII_AT001_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */ -#define MII_AT001_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ -#define MII_AT001_PSSR_10MBS 0x0000 /* 00=10Mbs */ -#define MII_AT001_PSSR_100MBS 0x4000 /* 01=100Mbs */ -#define MII_AT001_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ - -/* PCI Command Register Bit Definitions */ -#define PCI_REG_COMMAND 0x04 /* PCI Command Register */ -#define CMD_IO_SPACE 0x0001 -#define CMD_MEMORY_SPACE 0x0002 -#define CMD_BUS_MASTER 0x0004 - -/* Wake Up Filter Control */ -#define ATL1_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ -#define ATL1_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ -#define ATL1_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ -#define ATL1_WUFC_MC 0x00000008 /* Multicast Wakeup Enable */ -#define ATL1_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ - -/* Error Codes */ -#define ATL1_SUCCESS 0 -#define ATL1_ERR_EEPROM 1 -#define ATL1_ERR_PHY 2 -#define ATL1_ERR_CONFIG 3 -#define ATL1_ERR_PARAM 4 -#define ATL1_ERR_MAC_TYPE 5 -#define ATL1_ERR_PHY_TYPE 6 -#define ATL1_ERR_PHY_SPEED 7 -#define ATL1_ERR_PHY_RES 8 - -#define SPEED_0 0xffff -#define SPEED_10 10 -#define SPEED_100 100 -#define SPEED_1000 1000 -#define HALF_DUPLEX 1 -#define FULL_DUPLEX 2 - -#define MEDIA_TYPE_AUTO_SENSOR 0 -#define MEDIA_TYPE_1000M_FULL 1 -#define MEDIA_TYPE_100M_FULL 2 -#define MEDIA_TYPE_100M_HALF 3 -#define MEDIA_TYPE_10M_FULL 4 -#define MEDIA_TYPE_10M_HALF 5 - -#define ADVERTISE_10_HALF 0x0001 -#define ADVERTISE_10_FULL 0x0002 -#define ADVERTISE_100_HALF 0x0004 -#define ADVERTISE_100_FULL 0x0008 -#define ADVERTISE_1000_HALF 0x0010 -#define ADVERTISE_1000_FULL 0x0020 -#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x002F /* Everything but 1000-Half */ -#define AUTONEG_ADVERTISE_10_100_ALL 0x000F /* All 10/100 speeds */ -#define AUTONEG_ADVERTISE_10_ALL 0x0003 /* 10Mbps Full & Half speeds */ - -#define MAX_JUMBO_FRAME_SIZE 0x2800 - -#define PHY_AUTO_NEG_TIME 45 /* 4.5 Seconds */ -#define PHY_FORCE_TIME 20 /* 2.0 Seconds */ - -/* For checksumming , the sum of all words in the EEPROM should equal 0xBABA */ -#define EEPROM_SUM 0xBABA - -#define ATL1_EEDUMP_LEN 48 - -/* Statistics counters collected by the MAC */ -struct stats_msg_block { - /* rx */ - u32 rx_ok; /* The number of good packet received. */ - u32 rx_bcast; /* The number of good broadcast packet received. */ - u32 rx_mcast; /* The number of good multicast packet received. */ - u32 rx_pause; /* The number of Pause packet received. */ - u32 rx_ctrl; /* The number of Control packet received other than Pause frame. */ - u32 rx_fcs_err; /* The number of packets with bad FCS. */ - u32 rx_len_err; /* The number of packets with mismatch of length field and actual size. */ - u32 rx_byte_cnt; /* The number of bytes of good packet received. FCS is NOT included. */ - u32 rx_runt; /* The number of packets received that are less than 64 byte long and with good FCS. */ - u32 rx_frag; /* The number of packets received that are less than 64 byte long and with bad FCS. */ - u32 rx_sz_64; /* The number of good and bad packets received that are 64 byte long. */ - u32 rx_sz_65_127; /* The number of good and bad packets received that are between 65 and 127-byte long. */ - u32 rx_sz_128_255; /* The number of good and bad packets received that are between 128 and 255-byte long. */ - u32 rx_sz_256_511; /* The number of good and bad packets received that are between 256 and 511-byte long. */ - u32 rx_sz_512_1023; /* The number of good and bad packets received that are between 512 and 1023-byte long. */ - u32 rx_sz_1024_1518; /* The number of good and bad packets received that are between 1024 and 1518-byte long. */ - u32 rx_sz_1519_max; /* The number of good and bad packets received that are between 1519-byte and MTU. */ - u32 rx_sz_ov; /* The number of good and bad packets received that are more than MTU size Å¡C truncated by Selene. */ - u32 rx_rxf_ov; /* The number of frame dropped due to occurrence of RX FIFO overflow. */ - u32 rx_rrd_ov; /* The number of frame dropped due to occurrence of RRD overflow. */ - u32 rx_align_err; /* Alignment Error */ - u32 rx_bcast_byte_cnt; /* The byte count of broadcast packet received, excluding FCS. */ - u32 rx_mcast_byte_cnt; /* The byte count of multicast packet received, excluding FCS. */ - u32 rx_err_addr; /* The number of packets dropped due to address filtering. */ - - /* tx */ - u32 tx_ok; /* The number of good packet transmitted. */ - u32 tx_bcast; /* The number of good broadcast packet transmitted. */ - u32 tx_mcast; /* The number of good multicast packet transmitted. */ - u32 tx_pause; /* The number of Pause packet transmitted. */ - u32 tx_exc_defer; /* The number of packets transmitted with excessive deferral. */ - u32 tx_ctrl; /* The number of packets transmitted is a control frame, excluding Pause frame. */ - u32 tx_defer; /* The number of packets transmitted that is deferred. */ - u32 tx_byte_cnt; /* The number of bytes of data transmitted. FCS is NOT included. */ - u32 tx_sz_64; /* The number of good and bad packets transmitted that are 64 byte long. */ - u32 tx_sz_65_127; /* The number of good and bad packets transmitted that are between 65 and 127-byte long. */ - u32 tx_sz_128_255; /* The number of good and bad packets transmitted that are between 128 and 255-byte long. */ - u32 tx_sz_256_511; /* The number of good and bad packets transmitted that are between 256 and 511-byte long. */ - u32 tx_sz_512_1023; /* The number of good and bad packets transmitted that are between 512 and 1023-byte long. */ - u32 tx_sz_1024_1518; /* The number of good and bad packets transmitted that are between 1024 and 1518-byte long. */ - u32 tx_sz_1519_max; /* The number of good and bad packets transmitted that are between 1519-byte and MTU. */ - u32 tx_1_col; /* The number of packets subsequently transmitted successfully with a single prior collision. */ - u32 tx_2_col; /* The number of packets subsequently transmitted successfully with multiple prior collisions. */ - u32 tx_late_col; /* The number of packets transmitted with late collisions. */ - u32 tx_abort_col; /* The number of transmit packets aborted due to excessive collisions. */ - u32 tx_underrun; /* The number of transmit packets aborted due to transmit FIFO underrun, or TRD FIFO underrun */ - u32 tx_rd_eop; /* The number of times that read beyond the EOP into the next frame area when TRD was not written timely */ - u32 tx_len_err; /* The number of transmit packets with length field does NOT match the actual frame size. */ - u32 tx_trunc; /* The number of transmit packets truncated due to size exceeding MTU. */ - u32 tx_bcast_byte; /* The byte count of broadcast packet transmitted, excluding FCS. */ - u32 tx_mcast_byte; /* The byte count of multicast packet transmitted, excluding FCS. */ - u32 smb_updated; /* 1: SMB Updated. This is used by software as the indication of the statistics update. - * Software should clear this bit as soon as retrieving the statistics information. */ -}; - -/* Coalescing Message Block */ -struct coals_msg_block { - u32 int_stats; /* interrupt status */ - u16 rrd_prod_idx; /* TRD Producer Index. */ - u16 rfd_cons_idx; /* RFD Consumer Index. */ - u16 update; /* Selene sets this bit every time it DMA the CMB to host memory. - * Software supposes to clear this bit when CMB information is processed. */ - u16 tpd_cons_idx; /* TPD Consumer Index. */ -}; - -/* RRD descriptor */ -struct rx_return_desc { - u8 num_buf; /* Number of RFD buffers used by the received packet */ - u8 resved; - u16 buf_indx; /* RFD Index of the first buffer */ - union { - u32 valid; - struct { - u16 rx_chksum; - u16 pkt_size; - } xsum_sz; - } xsz; - - u16 pkt_flg; /* Packet flags */ - u16 err_flg; /* Error flags */ - u16 resved2; - u16 vlan_tag; /* VLAN TAG */ -}; - -#define PACKET_FLAG_ETH_TYPE 0x0080 -#define PACKET_FLAG_VLAN_INS 0x0100 -#define PACKET_FLAG_ERR 0x0200 -#define PACKET_FLAG_IPV4 0x0400 -#define PACKET_FLAG_UDP 0x0800 -#define PACKET_FLAG_TCP 0x1000 -#define PACKET_FLAG_BCAST 0x2000 -#define PACKET_FLAG_MCAST 0x4000 -#define PACKET_FLAG_PAUSE 0x8000 - -#define ERR_FLAG_CRC 0x0001 -#define ERR_FLAG_CODE 0x0002 -#define ERR_FLAG_DRIBBLE 0x0004 -#define ERR_FLAG_RUNT 0x0008 -#define ERR_FLAG_OV 0x0010 -#define ERR_FLAG_TRUNC 0x0020 -#define ERR_FLAG_IP_CHKSUM 0x0040 -#define ERR_FLAG_L4_CHKSUM 0x0080 -#define ERR_FLAG_LEN 0x0100 -#define ERR_FLAG_DES_ADDR 0x0200 - -/* RFD descriptor */ -struct rx_free_desc { - __le64 buffer_addr; /* Address of the descriptor's data buffer */ - __le16 buf_len; /* Size of the receive buffer in host memory, in byte */ - u16 coalese; /* Update consumer index to host after the reception of this frame */ - /* __attribute__ ((packed)) is required */ -} __attribute__ ((packed)); - -/* tsopu defines */ -#define TSO_PARAM_BUFLEN_MASK 0x3FFF -#define TSO_PARAM_BUFLEN_SHIFT 0 -#define TSO_PARAM_DMAINT_MASK 0x0001 -#define TSO_PARAM_DMAINT_SHIFT 14 -#define TSO_PARAM_PKTNT_MASK 0x0001 -#define TSO_PARAM_PKTINT_SHIFT 15 -#define TSO_PARAM_VLANTAG_MASK 0xFFFF -#define TSO_PARAM_VLAN_SHIFT 16 - -/* tsopl defines */ -#define TSO_PARAM_EOP_MASK 0x0001 -#define TSO_PARAM_EOP_SHIFT 0 -#define TSO_PARAM_COALESCE_MASK 0x0001 -#define TSO_PARAM_COALESCE_SHIFT 1 -#define TSO_PARAM_INSVLAG_MASK 0x0001 -#define TSO_PARAM_INSVLAG_SHIFT 2 -#define TSO_PARAM_CUSTOMCKSUM_MASK 0x0001 -#define TSO_PARAM_CUSTOMCKSUM_SHIFT 3 -#define TSO_PARAM_SEGMENT_MASK 0x0001 -#define TSO_PARAM_SEGMENT_SHIFT 4 -#define TSO_PARAM_IPCKSUM_MASK 0x0001 -#define TSO_PARAM_IPCKSUM_SHIFT 5 -#define TSO_PARAM_TCPCKSUM_MASK 0x0001 -#define TSO_PARAM_TCPCKSUM_SHIFT 6 -#define TSO_PARAM_UDPCKSUM_MASK 0x0001 -#define TSO_PARAM_UDPCKSUM_SHIFT 7 -#define TSO_PARAM_VLANTAGGED_MASK 0x0001 -#define TSO_PARAM_VLANTAGGED_SHIFT 8 -#define TSO_PARAM_ETHTYPE_MASK 0x0001 -#define TSO_PARAM_ETHTYPE_SHIFT 9 -#define TSO_PARAM_IPHL_MASK 0x000F -#define TSO_PARAM_IPHL_SHIFT 10 -#define TSO_PARAM_TCPHDRLEN_MASK 0x000F -#define TSO_PARAM_TCPHDRLEN_SHIFT 14 -#define TSO_PARAM_HDRFLAG_MASK 0x0001 -#define TSO_PARAM_HDRFLAG_SHIFT 18 -#define TSO_PARAM_MSS_MASK 0x1FFF -#define TSO_PARAM_MSS_SHIFT 19 - -/* csumpu defines */ -#define CSUM_PARAM_BUFLEN_MASK 0x3FFF -#define CSUM_PARAM_BUFLEN_SHIFT 0 -#define CSUM_PARAM_DMAINT_MASK 0x0001 -#define CSUM_PARAM_DMAINT_SHIFT 14 -#define CSUM_PARAM_PKTINT_MASK 0x0001 -#define CSUM_PARAM_PKTINT_SHIFT 15 -#define CSUM_PARAM_VALANTAG_MASK 0xFFFF -#define CSUM_PARAM_VALAN_SHIFT 16 - -/* csumpl defines*/ -#define CSUM_PARAM_EOP_MASK 0x0001 -#define CSUM_PARAM_EOP_SHIFT 0 -#define CSUM_PARAM_COALESCE_MASK 0x0001 -#define CSUM_PARAM_COALESCE_SHIFT 1 -#define CSUM_PARAM_INSVLAG_MASK 0x0001 -#define CSUM_PARAM_INSVLAG_SHIFT 2 -#define CSUM_PARAM_CUSTOMCKSUM_MASK 0x0001 -#define CSUM_PARAM_CUSTOMCKSUM_SHIFT 3 -#define CSUM_PARAM_SEGMENT_MASK 0x0001 -#define CSUM_PARAM_SEGMENT_SHIFT 4 -#define CSUM_PARAM_IPCKSUM_MASK 0x0001 -#define CSUM_PARAM_IPCKSUM_SHIFT 5 -#define CSUM_PARAM_TCPCKSUM_MASK 0x0001 -#define CSUM_PARAM_TCPCKSUM_SHIFT 6 -#define CSUM_PARAM_UDPCKSUM_MASK 0x0001 -#define CSUM_PARAM_UDPCKSUM_SHIFT 7 -#define CSUM_PARAM_VLANTAGGED_MASK 0x0001 -#define CSUM_PARAM_VLANTAGGED_SHIFT 8 -#define CSUM_PARAM_ETHTYPE_MASK 0x0001 -#define CSUM_PARAM_ETHTYPE_SHIFT 9 -#define CSUM_PARAM_IPHL_MASK 0x000F -#define CSUM_PARAM_IPHL_SHIFT 10 -#define CSUM_PARAM_PLOADOFFSET_MASK 0x00FF -#define CSUM_PARAM_PLOADOFFSET_SHIFT 16 -#define CSUM_PARAM_XSUMOFFSET_MASK 0x00FF -#define CSUM_PARAM_XSUMOFFSET_SHIFT 24 - -/* TPD descriptor */ -struct tso_param { - /* The order of these declarations is important -- don't change it */ - u32 tsopu; /* tso_param upper word */ - u32 tsopl; /* tso_param lower word */ -}; - -struct csum_param { - /* The order of these declarations is important -- don't change it */ - u32 csumpu; /* csum_param upper word */ - u32 csumpl; /* csum_param lower word */ -}; - -union tpd_descr { - u64 data; - struct csum_param csum; - struct tso_param tso; -}; - -struct tx_packet_desc { - __le64 buffer_addr; - union tpd_descr desc; -}; - -/* DMA Order Settings */ -enum atl1_dma_order { - atl1_dma_ord_in = 1, - atl1_dma_ord_enh = 2, - atl1_dma_ord_out = 4 -}; - -enum atl1_dma_rcb { - atl1_rcb_64 = 0, - atl1_rcb_128 = 1 -}; - -enum atl1_dma_req_block { - atl1_dma_req_128 = 0, - atl1_dma_req_256 = 1, - atl1_dma_req_512 = 2, - atl1_dma_req_1024 = 3, - atl1_dma_req_2048 = 4, - atl1_dma_req_4096 = 5 -}; - -struct atl1_spi_flash_dev { - const char *manu_name; /* manufacturer id */ - /* op-code */ - u8 cmd_wrsr; - u8 cmd_read; - u8 cmd_program; - u8 cmd_wren; - u8 cmd_wrdi; - u8 cmd_rdsr; - u8 cmd_rdid; - u8 cmd_sector_erase; - u8 cmd_chip_erase; -}; - -#endif /* _ATL1_HW_H_ */ diff --git a/drivers/net/atlx/atl1_main.c b/drivers/net/atlx/atl1_main.c deleted file mode 100644 index 9200ee5..0000000 --- a/drivers/net/atlx/atl1_main.c +++ /dev/null @@ -1,2453 +0,0 @@ -/* - * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. - * Copyright(c) 2006 Chris Snook - * Copyright(c) 2006 Jay Cliburn - * - * Derived from Intel e1000 driver - * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 - * Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * The full GNU General Public License is included in this distribution in the - * file called COPYING. - * - * Contact Information: - * Xiong Huang - * Attansic Technology Corp. 3F 147, Xianzheng 9th Road, Zhubei, - * Xinzhu 302, TAIWAN, REPUBLIC OF CHINA - * - * Chris Snook - * Jay Cliburn - * - * This version is adapted from the Attansic reference driver for - * inclusion in the Linux kernel. It is currently under heavy development. - * A very incomplete list of things that need to be dealt with: - * - * TODO: - * Fix TSO; tx performance is horrible with TSO enabled. - * Wake on LAN. - * Add more ethtool functions. - * Fix abstruse irq enable/disable condition described here: - * http://marc.theaimsgroup.com/?l=linux-netdev&m=116398508500553&w=2 - * - * NEEDS TESTING: - * VLAN - * multicast - * promiscuous mode - * interrupt coalescing - * SMP torture testing - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include "atl1.h" - -#define DRIVER_VERSION "2.0.7" - -char atl1_driver_name[] = "atl1"; -static const char atl1_driver_string[] = "Attansic L1 Ethernet Network Driver"; -static const char atl1_copyright[] = "Copyright(c) 2005-2006 Attansic Corporation."; -char atl1_driver_version[] = DRIVER_VERSION; - -MODULE_AUTHOR - ("Attansic Corporation , Chris Snook , Jay Cliburn "); -MODULE_DESCRIPTION("Attansic 1000M Ethernet Network Driver"); -MODULE_LICENSE("GPL"); -MODULE_VERSION(DRIVER_VERSION); - -/* - * atl1_pci_tbl - PCI Device ID Table - */ -static const struct pci_device_id atl1_pci_tbl[] = { - {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1)}, - /* required last entry */ - {0,} -}; - -MODULE_DEVICE_TABLE(pci, atl1_pci_tbl); - -/* - * atl1_sw_init - Initialize general software structures (struct atl1_adapter) - * @adapter: board private structure to initialize - * - * atl1_sw_init initializes the Adapter private data structure. - * Fields are initialized based on PCI device information and - * OS network device settings (MTU size). - */ -static int __devinit atl1_sw_init(struct atl1_adapter *adapter) -{ - struct atl1_hw *hw = &adapter->hw; - struct net_device *netdev = adapter->netdev; - - hw->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; - hw->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; - - adapter->wol = 0; - adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7; - adapter->ict = 50000; /* 100ms */ - adapter->link_speed = SPEED_0; /* hardware init */ - adapter->link_duplex = FULL_DUPLEX; - - hw->phy_configured = false; - hw->preamble_len = 7; - hw->ipgt = 0x60; - hw->min_ifg = 0x50; - hw->ipgr1 = 0x40; - hw->ipgr2 = 0x60; - hw->max_retry = 0xf; - hw->lcol = 0x37; - hw->jam_ipg = 7; - hw->rfd_burst = 8; - hw->rrd_burst = 8; - hw->rfd_fetch_gap = 1; - hw->rx_jumbo_th = adapter->rx_buffer_len / 8; - hw->rx_jumbo_lkah = 1; - hw->rrd_ret_timer = 16; - hw->tpd_burst = 4; - hw->tpd_fetch_th = 16; - hw->txf_burst = 0x100; - hw->tx_jumbo_task_th = (hw->max_frame_size + 7) >> 3; - hw->tpd_fetch_gap = 1; - hw->rcb_value = atl1_rcb_64; - hw->dma_ord = atl1_dma_ord_enh; - hw->dmar_block = atl1_dma_req_256; - hw->dmaw_block = atl1_dma_req_256; - hw->cmb_rrd = 4; - hw->cmb_tpd = 4; - hw->cmb_rx_timer = 1; /* about 2us */ - hw->cmb_tx_timer = 1; /* about 2us */ - hw->smb_timer = 100000; /* about 200ms */ - - spin_lock_init(&adapter->lock); - spin_lock_init(&adapter->mb_lock); - - return 0; -} - -static int mdio_read(struct net_device *netdev, int phy_id, int reg_num) -{ - struct atl1_adapter *adapter = netdev_priv(netdev); - u16 result; - - atl1_read_phy_reg(&adapter->hw, reg_num & 0x1f, &result); - - return result; -} - -static void mdio_write(struct net_device *netdev, int phy_id, int reg_num, - int val) -{ - struct atl1_adapter *adapter = netdev_priv(netdev); - - atl1_write_phy_reg(&adapter->hw, reg_num, val); -} - -/* - * atl1_mii_ioctl - - * @netdev: - * @ifreq: - * @cmd: - */ -static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) -{ - struct atl1_adapter *adapter = netdev_priv(netdev); - unsigned long flags; - int retval; - - if (!netif_running(netdev)) - return -EINVAL; - - spin_lock_irqsave(&adapter->lock, flags); - retval = generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL); - spin_unlock_irqrestore(&adapter->lock, flags); - - return retval; -} - -/* - * atl1_ioctl - - * @netdev: - * @ifreq: - * @cmd: - */ -static int atl1_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) -{ - switch (cmd) { - case SIOCGMIIPHY: - case SIOCGMIIREG: - case SIOCSMIIREG: - return atl1_mii_ioctl(netdev, ifr, cmd); - default: - return -EOPNOTSUPP; - } -} - -/* - * atl1_setup_mem_resources - allocate Tx / RX descriptor resources - * @adapter: board private structure - * - * Return 0 on success, negative on failure - */ -s32 atl1_setup_ring_resources(struct atl1_adapter *adapter) -{ - struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; - struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; - struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; - struct atl1_ring_header *ring_header = &adapter->ring_header; - struct pci_dev *pdev = adapter->pdev; - int size; - u8 offset = 0; - - size = sizeof(struct atl1_buffer) * (tpd_ring->count + rfd_ring->count); - tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL); - if (unlikely(!tpd_ring->buffer_info)) { - dev_err(&pdev->dev, "kzalloc failed , size = D%d\n", size); - goto err_nomem; - } - rfd_ring->buffer_info = - (struct atl1_buffer *)(tpd_ring->buffer_info + tpd_ring->count); - - /* real ring DMA buffer - * each ring/block may need up to 8 bytes for alignment, hence the - * additional 40 bytes tacked onto the end. - */ - ring_header->size = size = - sizeof(struct tx_packet_desc) * tpd_ring->count - + sizeof(struct rx_free_desc) * rfd_ring->count - + sizeof(struct rx_return_desc) * rrd_ring->count - + sizeof(struct coals_msg_block) - + sizeof(struct stats_msg_block) - + 40; - - ring_header->desc = pci_alloc_consistent(pdev, ring_header->size, - &ring_header->dma); - if (unlikely(!ring_header->desc)) { - dev_err(&pdev->dev, "pci_alloc_consistent failed\n"); - goto err_nomem; - } - - memset(ring_header->desc, 0, ring_header->size); - - /* init TPD ring */ - tpd_ring->dma = ring_header->dma; - offset = (tpd_ring->dma & 0x7) ? (8 - (ring_header->dma & 0x7)) : 0; - tpd_ring->dma += offset; - tpd_ring->desc = (u8 *) ring_header->desc + offset; - tpd_ring->size = sizeof(struct tx_packet_desc) * tpd_ring->count; - - /* init RFD ring */ - rfd_ring->dma = tpd_ring->dma + tpd_ring->size; - offset = (rfd_ring->dma & 0x7) ? (8 - (rfd_ring->dma & 0x7)) : 0; - rfd_ring->dma += offset; - rfd_ring->desc = (u8 *) tpd_ring->desc + (tpd_ring->size + offset); - rfd_ring->size = sizeof(struct rx_free_desc) * rfd_ring->count; - - - /* init RRD ring */ - rrd_ring->dma = rfd_ring->dma + rfd_ring->size; - offset = (rrd_ring->dma & 0x7) ? (8 - (rrd_ring->dma & 0x7)) : 0; - rrd_ring->dma += offset; - rrd_ring->desc = (u8 *) rfd_ring->desc + (rfd_ring->size + offset); - rrd_ring->size = sizeof(struct rx_return_desc) * rrd_ring->count; - - - /* init CMB */ - adapter->cmb.dma = rrd_ring->dma + rrd_ring->size; - offset = (adapter->cmb.dma & 0x7) ? (8 - (adapter->cmb.dma & 0x7)) : 0; - adapter->cmb.dma += offset; - adapter->cmb.cmb = (struct coals_msg_block *) - ((u8 *) rrd_ring->desc + (rrd_ring->size + offset)); - - /* init SMB */ - adapter->smb.dma = adapter->cmb.dma + sizeof(struct coals_msg_block); - offset = (adapter->smb.dma & 0x7) ? (8 - (adapter->smb.dma & 0x7)) : 0; - adapter->smb.dma += offset; - adapter->smb.smb = (struct stats_msg_block *) - ((u8 *) adapter->cmb.cmb + - (sizeof(struct coals_msg_block) + offset)); - - return ATL1_SUCCESS; - -err_nomem: - kfree(tpd_ring->buffer_info); - return -ENOMEM; -} - -static void atl1_init_ring_ptrs(struct atl1_adapter *adapter) -{ - struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; - struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; - struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; - - atomic_set(&tpd_ring->next_to_use, 0); - atomic_set(&tpd_ring->next_to_clean, 0); - - rfd_ring->next_to_clean = 0; - atomic_set(&rfd_ring->next_to_use, 0); - - rrd_ring->next_to_use = 0; - atomic_set(&rrd_ring->next_to_clean, 0); -} - -/* - * atl1_clean_rx_ring - Free RFD Buffers - * @adapter: board private structure - */ -static void atl1_clean_rx_ring(struct atl1_adapter *adapter) -{ - struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; - struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; - struct atl1_buffer *buffer_info; - struct pci_dev *pdev = adapter->pdev; - unsigned long size; - unsigned int i; - - /* Free all the Rx ring sk_buffs */ - for (i = 0; i < rfd_ring->count; i++) { - buffer_info = &rfd_ring->buffer_info[i]; - if (buffer_info->dma) { - pci_unmap_page(pdev, buffer_info->dma, - buffer_info->length, PCI_DMA_FROMDEVICE); - buffer_info->dma = 0; - } - if (buffer_info->skb) { - dev_kfree_skb(buffer_info->skb); - buffer_info->skb = NULL; - } - } - - size = sizeof(struct atl1_buffer) * rfd_ring->count; - memset(rfd_ring->buffer_info, 0, size); - - /* Zero out the descriptor ring */ - memset(rfd_ring->desc, 0, rfd_ring->size); - - rfd_ring->next_to_clean = 0; - atomic_set(&rfd_ring->next_to_use, 0); - - rrd_ring->next_to_use = 0; - atomic_set(&rrd_ring->next_to_clean, 0); -} - -/* - * atl1_clean_tx_ring - Free Tx Buffers - * @adapter: board private structure - */ -static void atl1_clean_tx_ring(struct atl1_adapter *adapter) -{ - struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; - struct atl1_buffer *buffer_info; - struct pci_dev *pdev = adapter->pdev; - unsigned long size; - unsigned int i; - - /* Free all the Tx ring sk_buffs */ - for (i = 0; i < tpd_ring->count; i++) { - buffer_info = &tpd_ring->buffer_info[i]; - if (buffer_info->dma) { - pci_unmap_page(pdev, buffer_info->dma, - buffer_info->length, PCI_DMA_TODEVICE); - buffer_info->dma = 0; - } - } - - for (i = 0; i < tpd_ring->count; i++) { - buffer_info = &tpd_ring->buffer_info[i]; - if (buffer_info->skb) { - dev_kfree_skb_any(buffer_info->skb); - buffer_info->skb = NULL; - } - } - - size = sizeof(struct atl1_buffer) * tpd_ring->count; - memset(tpd_ring->buffer_info, 0, size); - - /* Zero out the descriptor ring */ - memset(tpd_ring->desc, 0, tpd_ring->size); - - atomic_set(&tpd_ring->next_to_use, 0); - atomic_set(&tpd_ring->next_to_clean, 0); -} - -/* - * atl1_free_ring_resources - Free Tx / RX descriptor Resources - * @adapter: board private structure - * - * Free all transmit software resources - */ -void atl1_free_ring_resources(struct atl1_adapter *adapter) -{ - struct pci_dev *pdev = adapter->pdev; - struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; - struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; - struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; - struct atl1_ring_header *ring_header = &adapter->ring_header; - - atl1_clean_tx_ring(adapter); - atl1_clean_rx_ring(adapter); - - kfree(tpd_ring->buffer_info); - pci_free_consistent(pdev, ring_header->size, ring_header->desc, - ring_header->dma); - - tpd_ring->buffer_info = NULL; - tpd_ring->desc = NULL; - tpd_ring->dma = 0; - - rfd_ring->buffer_info = NULL; - rfd_ring->desc = NULL; - rfd_ring->dma = 0; - - rrd_ring->desc = NULL; - rrd_ring->dma = 0; -} - -static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter) -{ - u32 value; - struct atl1_hw *hw = &adapter->hw; - struct net_device *netdev = adapter->netdev; - /* Config MAC CTRL Register */ - value = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN; - /* duplex */ - if (FULL_DUPLEX == adapter->link_duplex) - value |= MAC_CTRL_DUPLX; - /* speed */ - value |= ((u32) ((SPEED_1000 == adapter->link_speed) ? - MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) << - MAC_CTRL_SPEED_SHIFT); - /* flow control */ - value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW); - /* PAD & CRC */ - value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD); - /* preamble length */ - value |= (((u32) adapter->hw.preamble_len - & MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT); - /* vlan */ - if (adapter->vlgrp) - value |= MAC_CTRL_RMV_VLAN; - /* rx checksum - if (adapter->rx_csum) - value |= MAC_CTRL_RX_CHKSUM_EN; - */ - /* filter mode */ - value |= MAC_CTRL_BC_EN; - if (netdev->flags & IFF_PROMISC) - value |= MAC_CTRL_PROMIS_EN; - else if (netdev->flags & IFF_ALLMULTI) - value |= MAC_CTRL_MC_ALL_EN; - /* value |= MAC_CTRL_LOOPBACK; */ - iowrite32(value, hw->hw_addr + REG_MAC_CTRL); -} - -/* - * atl1_set_mac - Change the Ethernet Address of the NIC - * @netdev: network interface device structure - * @p: pointer to an address structure - * - * Returns 0 on success, negative on failure - */ -static int atl1_set_mac(struct net_device *netdev, void *p) -{ - struct atl1_adapter *adapter = netdev_priv(netdev); - struct sockaddr *addr = p; - - if (netif_running(netdev)) - return -EBUSY; - - if (!is_valid_ether_addr(addr->sa_data)) - return -EADDRNOTAVAIL; - - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); - memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); - - atl1_set_mac_addr(&adapter->hw); - return 0; -} - -static u32 atl1_check_link(struct atl1_adapter *adapter) -{ - struct atl1_hw *hw = &adapter->hw; - struct net_device *netdev = adapter->netdev; - u32 ret_val; - u16 speed, duplex, phy_data; - int reconfig = 0; - - /* MII_BMSR must read twice */ - atl1_read_phy_reg(hw, MII_BMSR, &phy_data); - atl1_read_phy_reg(hw, MII_BMSR, &phy_data); - if (!(phy_data & BMSR_LSTATUS)) { /* link down */ - if (netif_carrier_ok(netdev)) { /* old link state: Up */ - dev_info(&adapter->pdev->dev, "link is down\n"); - adapter->link_speed = SPEED_0; - netif_carrier_off(netdev); - netif_stop_queue(netdev); - } - return ATL1_SUCCESS; - } - - /* Link Up */ - ret_val = atl1_get_speed_and_duplex(hw, &speed, &duplex); - if (ret_val) - return ret_val; - - switch (hw->media_type) { - case MEDIA_TYPE_1000M_FULL: - if (speed != SPEED_1000 || duplex != FULL_DUPLEX) - reconfig = 1; - break; - case MEDIA_TYPE_100M_FULL: - if (speed != SPEED_100 || duplex != FULL_DUPLEX) - reconfig = 1; - break; - case MEDIA_TYPE_100M_HALF: - if (speed != SPEED_100 || duplex != HALF_DUPLEX) - reconfig = 1; - break; - case MEDIA_TYPE_10M_FULL: - if (speed != SPEED_10 || duplex != FULL_DUPLEX) - reconfig = 1; - break; - case MEDIA_TYPE_10M_HALF: - if (speed != SPEED_10 || duplex != HALF_DUPLEX) - reconfig = 1; - break; - } - - /* link result is our setting */ - if (!reconfig) { - if (adapter->link_speed != speed - || adapter->link_duplex != duplex) { - adapter->link_speed = speed; - adapter->link_duplex = duplex; - atl1_setup_mac_ctrl(adapter); - dev_info(&adapter->pdev->dev, - "%s link is up %d Mbps %s\n", - netdev->name, adapter->link_speed, - adapter->link_duplex == FULL_DUPLEX ? - "full duplex" : "half duplex"); - } - if (!netif_carrier_ok(netdev)) { /* Link down -> Up */ - netif_carrier_on(netdev); - netif_wake_queue(netdev); - } - return ATL1_SUCCESS; - } - - /* change orignal link status */ - if (netif_carrier_ok(netdev)) { - adapter->link_speed = SPEED_0; - netif_carrier_off(netdev); - netif_stop_queue(netdev); - } - - if (hw->media_type != MEDIA_TYPE_AUTO_SENSOR && - hw->media_type != MEDIA_TYPE_1000M_FULL) { - switch (hw->media_type) { - case MEDIA_TYPE_100M_FULL: - phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 | - MII_CR_RESET; - break; - case MEDIA_TYPE_100M_HALF: - phy_data = MII_CR_SPEED_100 | MII_CR_RESET; - break; - case MEDIA_TYPE_10M_FULL: - phy_data = - MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET; - break; - default: /* MEDIA_TYPE_10M_HALF: */ - phy_data = MII_CR_SPEED_10 | MII_CR_RESET; - break; - } - atl1_write_phy_reg(hw, MII_BMCR, phy_data); - return ATL1_SUCCESS; - } - - /* auto-neg, insert timer to re-config phy */ - if (!adapter->phy_timer_pending) { - adapter->phy_timer_pending = true; - mod_timer(&adapter->phy_config_timer, jiffies + 3 * HZ); - } - - return ATL1_SUCCESS; -} - -static void atl1_check_for_link(struct atl1_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - u16 phy_data = 0; - - spin_lock(&adapter->lock); - adapter->phy_timer_pending = false; - atl1_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); - atl1_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); - spin_unlock(&adapter->lock); - - /* notify upper layer link down ASAP */ - if (!(phy_data & BMSR_LSTATUS)) { /* Link Down */ - if (netif_carrier_ok(netdev)) { /* old link state: Up */ - dev_info(&adapter->pdev->dev, "%s link is down\n", - netdev->name); - adapter->link_speed = SPEED_0; - netif_carrier_off(netdev); - netif_stop_queue(netdev); - } - } - schedule_work(&adapter->link_chg_task); -} - -/* - * atl1_set_multi - Multicast and Promiscuous mode set - * @netdev: network interface device structure - * - * The set_multi entry point is called whenever the multicast address - * list or the network interface flags are updated. This routine is - * responsible for configuring the hardware for proper multicast, - * promiscuous mode, and all-multi behavior. - */ -static void atl1_set_multi(struct net_device *netdev) -{ - struct atl1_adapter *adapter = netdev_priv(netdev); - struct atl1_hw *hw = &adapter->hw; - struct dev_mc_list *mc_ptr; - u32 rctl; - u32 hash_value; - - /* Check for Promiscuous and All Multicast modes */ - rctl = ioread32(hw->hw_addr + REG_MAC_CTRL); - if (netdev->flags & IFF_PROMISC) - rctl |= MAC_CTRL_PROMIS_EN; - else if (netdev->flags & IFF_ALLMULTI) { - rctl |= MAC_CTRL_MC_ALL_EN; - rctl &= ~MAC_CTRL_PROMIS_EN; - } else - rctl &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN); - - iowrite32(rctl, hw->hw_addr + REG_MAC_CTRL); - - /* clear the old settings from the multicast hash table */ - iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE); - iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2)); - - /* compute mc addresses' hash value ,and put it into hash table */ - for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) { - hash_value = atl1_hash_mc_addr(hw, mc_ptr->dmi_addr); - atl1_hash_set(hw, hash_value); - } -} - -/* - * atl1_change_mtu - Change the Maximum Transfer Unit - * @netdev: network interface device structure - * @new_mtu: new value for maximum frame size - * - * Returns 0 on success, negative on failure - */ -static int atl1_change_mtu(struct net_device *netdev, int new_mtu) -{ - struct atl1_adapter *adapter = netdev_priv(netdev); - int old_mtu = netdev->mtu; - int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; - - if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || - (max_frame > MAX_JUMBO_FRAME_SIZE)) { - dev_warn(&adapter->pdev->dev, "invalid MTU setting\n"); - return -EINVAL; - } - - adapter->hw.max_frame_size = max_frame; - adapter->hw.tx_jumbo_task_th = (max_frame + 7) >> 3; - adapter->rx_buffer_len = (max_frame + 7) & ~7; - adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8; - - netdev->mtu = new_mtu; - if ((old_mtu != new_mtu) && netif_running(netdev)) { - atl1_down(adapter); - atl1_up(adapter); - } - - return 0; -} - -static void set_flow_ctrl_old(struct atl1_adapter *adapter) -{ - u32 hi, lo, value; - - /* RFD Flow Control */ - value = adapter->rfd_ring.count; - hi = value / 16; - if (hi < 2) - hi = 2; - lo = value * 7 / 8; - - value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) | - ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT); - iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RXF_PAUSE_THRESH); - - /* RRD Flow Control */ - value = adapter->rrd_ring.count; - lo = value / 16; - hi = value * 7 / 8; - if (lo < 2) - lo = 2; - value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) | - ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT); - iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RRD_PAUSE_THRESH); -} - -static void set_flow_ctrl_new(struct atl1_hw *hw) -{ - u32 hi, lo, value; - - /* RXF Flow Control */ - value = ioread32(hw->hw_addr + REG_SRAM_RXF_LEN); - lo = value / 16; - if (lo < 192) - lo = 192; - hi = value * 7 / 8; - if (hi < lo) - hi = lo + 16; - value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) | - ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT); - iowrite32(value, hw->hw_addr + REG_RXQ_RXF_PAUSE_THRESH); - - /* RRD Flow Control */ - value = ioread32(hw->hw_addr + REG_SRAM_RRD_LEN); - lo = value / 8; - hi = value * 7 / 8; - if (lo < 2) - lo = 2; - if (hi < lo) - hi = lo + 3; - value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) | - ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT); - iowrite32(value, hw->hw_addr + REG_RXQ_RRD_PAUSE_THRESH); -} - -/* - * atl1_configure - Configure Transmit&Receive Unit after Reset - * @adapter: board private structure - * - * Configure the Tx /Rx unit of the MAC after a reset. - */ -static u32 atl1_configure(struct atl1_adapter *adapter) -{ - struct atl1_hw *hw = &adapter->hw; - u32 value; - - /* clear interrupt status */ - iowrite32(0xffffffff, adapter->hw.hw_addr + REG_ISR); - - /* set MAC Address */ - value = (((u32) hw->mac_addr[2]) << 24) | - (((u32) hw->mac_addr[3]) << 16) | - (((u32) hw->mac_addr[4]) << 8) | - (((u32) hw->mac_addr[5])); - iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR); - value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1])); - iowrite32(value, hw->hw_addr + (REG_MAC_STA_ADDR + 4)); - - /* tx / rx ring */ - - /* HI base address */ - iowrite32((u32) ((adapter->tpd_ring.dma & 0xffffffff00000000ULL) >> 32), - hw->hw_addr + REG_DESC_BASE_ADDR_HI); - /* LO base address */ - iowrite32((u32) (adapter->rfd_ring.dma & 0x00000000ffffffffULL), - hw->hw_addr + REG_DESC_RFD_ADDR_LO); - iowrite32((u32) (adapter->rrd_ring.dma & 0x00000000ffffffffULL), - hw->hw_addr + REG_DESC_RRD_ADDR_LO); - iowrite32((u32) (adapter->tpd_ring.dma & 0x00000000ffffffffULL), - hw->hw_addr + REG_DESC_TPD_ADDR_LO); - iowrite32((u32) (adapter->cmb.dma & 0x00000000ffffffffULL), - hw->hw_addr + REG_DESC_CMB_ADDR_LO); - iowrite32((u32) (adapter->smb.dma & 0x00000000ffffffffULL), - hw->hw_addr + REG_DESC_SMB_ADDR_LO); - - /* element count */ - value = adapter->rrd_ring.count; - value <<= 16; - value += adapter->rfd_ring.count; - iowrite32(value, hw->hw_addr + REG_DESC_RFD_RRD_RING_SIZE); - iowrite32(adapter->tpd_ring.count, hw->hw_addr + - REG_DESC_TPD_RING_SIZE); - - /* Load Ptr */ - iowrite32(1, hw->hw_addr + REG_LOAD_PTR); - - /* config Mailbox */ - value = ((atomic_read(&adapter->tpd_ring.next_to_use) - & MB_TPD_PROD_INDX_MASK) << MB_TPD_PROD_INDX_SHIFT) | - ((atomic_read(&adapter->rrd_ring.next_to_clean) - & MB_RRD_CONS_INDX_MASK) << MB_RRD_CONS_INDX_SHIFT) | - ((atomic_read(&adapter->rfd_ring.next_to_use) - & MB_RFD_PROD_INDX_MASK) << MB_RFD_PROD_INDX_SHIFT); - iowrite32(value, hw->hw_addr + REG_MAILBOX); - - /* config IPG/IFG */ - value = (((u32) hw->ipgt & MAC_IPG_IFG_IPGT_MASK) - << MAC_IPG_IFG_IPGT_SHIFT) | - (((u32) hw->min_ifg & MAC_IPG_IFG_MIFG_MASK) - << MAC_IPG_IFG_MIFG_SHIFT) | - (((u32) hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK) - << MAC_IPG_IFG_IPGR1_SHIFT) | - (((u32) hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK) - << MAC_IPG_IFG_IPGR2_SHIFT); - iowrite32(value, hw->hw_addr + REG_MAC_IPG_IFG); - - /* config Half-Duplex Control */ - value = ((u32) hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) | - (((u32) hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK) - << MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) | - MAC_HALF_DUPLX_CTRL_EXC_DEF_EN | - (0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) | - (((u32) hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK) - << MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT); - iowrite32(value, hw->hw_addr + REG_MAC_HALF_DUPLX_CTRL); - - /* set Interrupt Moderator Timer */ - iowrite16(adapter->imt, hw->hw_addr + REG_IRQ_MODU_TIMER_INIT); - iowrite32(MASTER_CTRL_ITIMER_EN, hw->hw_addr + REG_MASTER_CTRL); - - /* set Interrupt Clear Timer */ - iowrite16(adapter->ict, hw->hw_addr + REG_CMBDISDMA_TIMER); - - /* set max frame size hw will accept */ - iowrite32(hw->max_frame_size, hw->hw_addr + REG_MTU); - - /* jumbo size & rrd retirement timer */ - value = (((u32) hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK) - << RXQ_JMBOSZ_TH_SHIFT) | - (((u32) hw->rx_jumbo_lkah & RXQ_JMBO_LKAH_MASK) - << RXQ_JMBO_LKAH_SHIFT) | - (((u32) hw->rrd_ret_timer & RXQ_RRD_TIMER_MASK) - << RXQ_RRD_TIMER_SHIFT); - iowrite32(value, hw->hw_addr + REG_RXQ_JMBOSZ_RRDTIM); - - /* Flow Control */ - switch (hw->dev_rev) { - case 0x8001: - case 0x9001: - case 0x9002: - case 0x9003: - set_flow_ctrl_old(adapter); - break; - default: - set_flow_ctrl_new(hw); - break; - } - - /* config TXQ */ - value = (((u32) hw->tpd_burst & TXQ_CTRL_TPD_BURST_NUM_MASK) - << TXQ_CTRL_TPD_BURST_NUM_SHIFT) | - (((u32) hw->txf_burst & TXQ_CTRL_TXF_BURST_NUM_MASK) - << TXQ_CTRL_TXF_BURST_NUM_SHIFT) | - (((u32) hw->tpd_fetch_th & TXQ_CTRL_TPD_FETCH_TH_MASK) - << TXQ_CTRL_TPD_FETCH_TH_SHIFT) | TXQ_CTRL_ENH_MODE | - TXQ_CTRL_EN; - iowrite32(value, hw->hw_addr + REG_TXQ_CTRL); - - /* min tpd fetch gap & tx jumbo packet size threshold for taskoffload */ - value = (((u32) hw->tx_jumbo_task_th & TX_JUMBO_TASK_TH_MASK) - << TX_JUMBO_TASK_TH_SHIFT) | - (((u32) hw->tpd_fetch_gap & TX_TPD_MIN_IPG_MASK) - << TX_TPD_MIN_IPG_SHIFT); - iowrite32(value, hw->hw_addr + REG_TX_JUMBO_TASK_TH_TPD_IPG); - - /* config RXQ */ - value = (((u32) hw->rfd_burst & RXQ_CTRL_RFD_BURST_NUM_MASK) - << RXQ_CTRL_RFD_BURST_NUM_SHIFT) | - (((u32) hw->rrd_burst & RXQ_CTRL_RRD_BURST_THRESH_MASK) - << RXQ_CTRL_RRD_BURST_THRESH_SHIFT) | - (((u32) hw->rfd_fetch_gap & RXQ_CTRL_RFD_PREF_MIN_IPG_MASK) - << RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT) | RXQ_CTRL_CUT_THRU_EN | - RXQ_CTRL_EN; - iowrite32(value, hw->hw_addr + REG_RXQ_CTRL); - - /* config DMA Engine */ - value = ((((u32) hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK) - << DMA_CTRL_DMAR_BURST_LEN_SHIFT) | - ((((u32) hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK) - << DMA_CTRL_DMAW_BURST_LEN_SHIFT) | DMA_CTRL_DMAR_EN | - DMA_CTRL_DMAW_EN; - value |= (u32) hw->dma_ord; - if (atl1_rcb_128 == hw->rcb_value) - value |= DMA_CTRL_RCB_VALUE; - iowrite32(value, hw->hw_addr + REG_DMA_CTRL); - - /* config CMB / SMB */ - value = (hw->cmb_tpd > adapter->tpd_ring.count) ? - hw->cmb_tpd : adapter->tpd_ring.count; - value <<= 16; - value |= hw->cmb_rrd; - iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TH); - value = hw->cmb_rx_timer | ((u32) hw->cmb_tx_timer << 16); - iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TIMER); - iowrite32(hw->smb_timer, hw->hw_addr + REG_SMB_TIMER); - - /* --- enable CMB / SMB */ - value = CSMB_CTRL_CMB_EN | CSMB_CTRL_SMB_EN; - iowrite32(value, hw->hw_addr + REG_CSMB_CTRL); - - value = ioread32(adapter->hw.hw_addr + REG_ISR); - if (unlikely((value & ISR_PHY_LINKDOWN) != 0)) - value = 1; /* config failed */ - else - value = 0; - - /* clear all interrupt status */ - iowrite32(0x3fffffff, adapter->hw.hw_addr + REG_ISR); - iowrite32(0, adapter->hw.hw_addr + REG_ISR); - return value; -} - -/* - * atl1_pcie_patch - Patch for PCIE module - */ -static void atl1_pcie_patch(struct atl1_adapter *adapter) -{ - u32 value; - - /* much vendor magic here */ - value = 0x6500; - iowrite32(value, adapter->hw.hw_addr + 0x12FC); - /* pcie flow control mode change */ - value = ioread32(adapter->hw.hw_addr + 0x1008); - value |= 0x8000; - iowrite32(value, adapter->hw.hw_addr + 0x1008); -} - -/* - * When ACPI resume on some VIA MotherBoard, the Interrupt Disable bit/0x400 - * on PCI Command register is disable. - * The function enable this bit. - * Brackett, 2006/03/15 - */ -static void atl1_via_workaround(struct atl1_adapter *adapter) -{ - unsigned long value; - - value = ioread16(adapter->hw.hw_addr + PCI_COMMAND); - if (value & PCI_COMMAND_INTX_DISABLE) - value &= ~PCI_COMMAND_INTX_DISABLE; - iowrite32(value, adapter->hw.hw_addr + PCI_COMMAND); -} - -/* - * atl1_irq_enable - Enable default interrupt generation settings - * @adapter: board private structure - */ -static void atl1_irq_enable(struct atl1_adapter *adapter) -{ - iowrite32(IMR_NORMAL_MASK, adapter->hw.hw_addr + REG_IMR); - ioread32(adapter->hw.hw_addr + REG_IMR); -} - -/* - * atl1_irq_disable - Mask off interrupt generation on the NIC - * @adapter: board private structure - */ -static void atl1_irq_disable(struct atl1_adapter *adapter) -{ - iowrite32(0, adapter->hw.hw_addr + REG_IMR); - ioread32(adapter->hw.hw_addr + REG_IMR); - synchronize_irq(adapter->pdev->irq); -} - -static void atl1_clear_phy_int(struct atl1_adapter *adapter) -{ - u16 phy_data; - unsigned long flags; - - spin_lock_irqsave(&adapter->lock, flags); - atl1_read_phy_reg(&adapter->hw, 19, &phy_data); - spin_unlock_irqrestore(&adapter->lock, flags); -} - -static void atl1_inc_smb(struct atl1_adapter *adapter) -{ - struct stats_msg_block *smb = adapter->smb.smb; - - /* Fill out the OS statistics structure */ - adapter->soft_stats.rx_packets += smb->rx_ok; - adapter->soft_stats.tx_packets += smb->tx_ok; - adapter->soft_stats.rx_bytes += smb->rx_byte_cnt; - adapter->soft_stats.tx_bytes += smb->tx_byte_cnt; - adapter->soft_stats.multicast += smb->rx_mcast; - adapter->soft_stats.collisions += (smb->tx_1_col + smb->tx_2_col * 2 + - smb->tx_late_col + smb->tx_abort_col * adapter->hw.max_retry); - - /* Rx Errors */ - adapter->soft_stats.rx_errors += (smb->rx_frag + smb->rx_fcs_err + - smb->rx_len_err + smb->rx_sz_ov + smb->rx_rxf_ov + - smb->rx_rrd_ov + smb->rx_align_err); - adapter->soft_stats.rx_fifo_errors += smb->rx_rxf_ov; - adapter->soft_stats.rx_length_errors += smb->rx_len_err; - adapter->soft_stats.rx_crc_errors += smb->rx_fcs_err; - adapter->soft_stats.rx_frame_errors += smb->rx_align_err; - adapter->soft_stats.rx_missed_errors += (smb->rx_rrd_ov + - smb->rx_rxf_ov); - - adapter->soft_stats.rx_pause += smb->rx_pause; - adapter->soft_stats.rx_rrd_ov += smb->rx_rrd_ov; - adapter->soft_stats.rx_trunc += smb->rx_sz_ov; - - /* Tx Errors */ - adapter->soft_stats.tx_errors += (smb->tx_late_col + - smb->tx_abort_col + smb->tx_underrun + smb->tx_trunc); - adapter->soft_stats.tx_fifo_errors += smb->tx_underrun; - adapter->soft_stats.tx_aborted_errors += smb->tx_abort_col; - adapter->soft_stats.tx_window_errors += smb->tx_late_col; - - adapter->soft_stats.excecol += smb->tx_abort_col; - adapter->soft_stats.deffer += smb->tx_defer; - adapter->soft_stats.scc += smb->tx_1_col; - adapter->soft_stats.mcc += smb->tx_2_col; - adapter->soft_stats.latecol += smb->tx_late_col; - adapter->soft_stats.tx_underun += smb->tx_underrun; - adapter->soft_stats.tx_trunc += smb->tx_trunc; - adapter->soft_stats.tx_pause += smb->tx_pause; - - adapter->net_stats.rx_packets = adapter->soft_stats.rx_packets; - adapter->net_stats.tx_packets = adapter->soft_stats.tx_packets; - adapter->net_stats.rx_bytes = adapter->soft_stats.rx_bytes; - adapter->net_stats.tx_bytes = adapter->soft_stats.tx_bytes; - adapter->net_stats.multicast = adapter->soft_stats.multicast; - adapter->net_stats.collisions = adapter->soft_stats.collisions; - adapter->net_stats.rx_errors = adapter->soft_stats.rx_errors; - adapter->net_stats.rx_over_errors = - adapter->soft_stats.rx_missed_errors; - adapter->net_stats.rx_length_errors = - adapter->soft_stats.rx_length_errors; - adapter->net_stats.rx_crc_errors = adapter->soft_stats.rx_crc_errors; - adapter->net_stats.rx_frame_errors = - adapter->soft_stats.rx_frame_errors; - adapter->net_stats.rx_fifo_errors = adapter->soft_stats.rx_fifo_errors; - adapter->net_stats.rx_missed_errors = - adapter->soft_stats.rx_missed_errors; - adapter->net_stats.tx_errors = adapter->soft_stats.tx_errors; - adapter->net_stats.tx_fifo_errors = adapter->soft_stats.tx_fifo_errors; - adapter->net_stats.tx_aborted_errors = - adapter->soft_stats.tx_aborted_errors; - adapter->net_stats.tx_window_errors = - adapter->soft_stats.tx_window_errors; - adapter->net_stats.tx_carrier_errors = - adapter->soft_stats.tx_carrier_errors; -} - -/* - * atl1_get_stats - Get System Network Statistics - * @netdev: network interface device structure - * - * Returns the address of the device statistics structure. - * The statistics are actually updated from the timer callback. - */ -static struct net_device_stats *atl1_get_stats(struct net_device *netdev) -{ - struct atl1_adapter *adapter = netdev_priv(netdev); - return &adapter->net_stats; -} - -static void atl1_update_mailbox(struct atl1_adapter *adapter) -{ - unsigned long flags; - u32 tpd_next_to_use; - u32 rfd_next_to_use; - u32 rrd_next_to_clean; - u32 value; - - spin_lock_irqsave(&adapter->mb_lock, flags); - - tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use); - rfd_next_to_use = atomic_read(&adapter->rfd_ring.next_to_use); - rrd_next_to_clean = atomic_read(&adapter->rrd_ring.next_to_clean); - - value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) << - MB_RFD_PROD_INDX_SHIFT) | - ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) << - MB_RRD_CONS_INDX_SHIFT) | - ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) << - MB_TPD_PROD_INDX_SHIFT); - iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX); - - spin_unlock_irqrestore(&adapter->mb_lock, flags); -} - -static void atl1_clean_alloc_flag(struct atl1_adapter *adapter, - struct rx_return_desc *rrd, u16 offset) -{ - struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; - - while (rfd_ring->next_to_clean != (rrd->buf_indx + offset)) { - rfd_ring->buffer_info[rfd_ring->next_to_clean].alloced = 0; - if (++rfd_ring->next_to_clean == rfd_ring->count) { - rfd_ring->next_to_clean = 0; - } - } -} - -static void atl1_update_rfd_index(struct atl1_adapter *adapter, - struct rx_return_desc *rrd) -{ - u16 num_buf; - - num_buf = (rrd->xsz.xsum_sz.pkt_size + adapter->rx_buffer_len - 1) / - adapter->rx_buffer_len; - if (rrd->num_buf == num_buf) - /* clean alloc flag for bad rrd */ - atl1_clean_alloc_flag(adapter, rrd, num_buf); -} - -static void atl1_rx_checksum(struct atl1_adapter *adapter, - struct rx_return_desc *rrd, struct sk_buff *skb) -{ - struct pci_dev *pdev = adapter->pdev; - - skb->ip_summed = CHECKSUM_NONE; - - if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { - if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC | - ERR_FLAG_CODE | ERR_FLAG_OV)) { - adapter->hw_csum_err++; - dev_printk(KERN_DEBUG, &pdev->dev, - "rx checksum error\n"); - return; - } - } - - /* not IPv4 */ - if (!(rrd->pkt_flg & PACKET_FLAG_IPV4)) - /* checksum is invalid, but it's not an IPv4 pkt, so ok */ - return; - - /* IPv4 packet */ - if (likely(!(rrd->err_flg & - (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM)))) { - skb->ip_summed = CHECKSUM_UNNECESSARY; - adapter->hw_csum_good++; - return; - } - - /* IPv4, but hardware thinks its checksum is wrong */ - dev_printk(KERN_DEBUG, &pdev->dev, - "hw csum wrong, pkt_flag:%x, err_flag:%x\n", - rrd->pkt_flg, rrd->err_flg); - skb->ip_summed = CHECKSUM_COMPLETE; - skb->csum = htons(rrd->xsz.xsum_sz.rx_chksum); - adapter->hw_csum_err++; - return; -} - -/* - * atl1_alloc_rx_buffers - Replace used receive buffers - * @adapter: address of board private structure - */ -static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter) -{ - struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; - struct pci_dev *pdev = adapter->pdev; - struct page *page; - unsigned long offset; - struct atl1_buffer *buffer_info, *next_info; - struct sk_buff *skb; - u16 num_alloc = 0; - u16 rfd_next_to_use, next_next; - struct rx_free_desc *rfd_desc; - - next_next = rfd_next_to_use = atomic_read(&rfd_ring->next_to_use); - if (++next_next == rfd_ring->count) - next_next = 0; - buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; - next_info = &rfd_ring->buffer_info[next_next]; - - while (!buffer_info->alloced && !next_info->alloced) { - if (buffer_info->skb) { - buffer_info->alloced = 1; - goto next; - } - - rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use); - - skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN); - if (unlikely(!skb)) { /* Better luck next round */ - adapter->net_stats.rx_dropped++; - break; - } - - /* - * Make buffer alignment 2 beyond a 16 byte boundary - * this will result in a 16 byte aligned IP header after - * the 14 byte MAC header is removed - */ - skb_reserve(skb, NET_IP_ALIGN); - - buffer_info->alloced = 1; - buffer_info->skb = skb; - buffer_info->length = (u16) adapter->rx_buffer_len; - page = virt_to_page(skb->data); - offset = (unsigned long)skb->data & ~PAGE_MASK; - buffer_info->dma = pci_map_page(pdev, page, offset, - adapter->rx_buffer_len, - PCI_DMA_FROMDEVICE); - rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma); - rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len); - rfd_desc->coalese = 0; - -next: - rfd_next_to_use = next_next; - if (unlikely(++next_next == rfd_ring->count)) - next_next = 0; - - buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; - next_info = &rfd_ring->buffer_info[next_next]; - num_alloc++; - } - - if (num_alloc) { - /* - * Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). - */ - wmb(); - atomic_set(&rfd_ring->next_to_use, (int)rfd_next_to_use); - } - return num_alloc; -} - -static void atl1_intr_rx(struct atl1_adapter *adapter) -{ - int i, count; - u16 length; - u16 rrd_next_to_clean; - u32 value; - struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; - struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; - struct atl1_buffer *buffer_info; - struct rx_return_desc *rrd; - struct sk_buff *skb; - - count = 0; - - rrd_next_to_clean = atomic_read(&rrd_ring->next_to_clean); - - while (1) { - rrd = ATL1_RRD_DESC(rrd_ring, rrd_next_to_clean); - i = 1; - if (likely(rrd->xsz.valid)) { /* packet valid */ -chk_rrd: - /* check rrd status */ - if (likely(rrd->num_buf == 1)) - goto rrd_ok; - - /* rrd seems to be bad */ - if (unlikely(i-- > 0)) { - /* rrd may not be DMAed completely */ - dev_printk(KERN_DEBUG, &adapter->pdev->dev, - "incomplete RRD DMA transfer\n"); - udelay(1); - goto chk_rrd; - } - /* bad rrd */ - dev_printk(KERN_DEBUG, &adapter->pdev->dev, - "bad RRD\n"); - /* see if update RFD index */ - if (rrd->num_buf > 1) - atl1_update_rfd_index(adapter, rrd); - - /* update rrd */ - rrd->xsz.valid = 0; - if (++rrd_next_to_clean == rrd_ring->count) - rrd_next_to_clean = 0; - count++; - continue; - } else { /* current rrd still not be updated */ - - break; - } -rrd_ok: - /* clean alloc flag for bad rrd */ - atl1_clean_alloc_flag(adapter, rrd, 0); - - buffer_info = &rfd_ring->buffer_info[rrd->buf_indx]; - if (++rfd_ring->next_to_clean == rfd_ring->count) - rfd_ring->next_to_clean = 0; - - /* update rrd next to clean */ - if (++rrd_next_to_clean == rrd_ring->count) - rrd_next_to_clean = 0; - count++; - - if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { - if (!(rrd->err_flg & - (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM - | ERR_FLAG_LEN))) { - /* packet error, don't need upstream */ - buffer_info->alloced = 0; - rrd->xsz.valid = 0; - continue; - } - } - - /* Good Receive */ - pci_unmap_page(adapter->pdev, buffer_info->dma, - buffer_info->length, PCI_DMA_FROMDEVICE); - skb = buffer_info->skb; - length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size); - - skb_put(skb, length - ETH_FCS_LEN); - - /* Receive Checksum Offload */ - atl1_rx_checksum(adapter, rrd, skb); - skb->protocol = eth_type_trans(skb, adapter->netdev); - - if (adapter->vlgrp && (rrd->pkt_flg & PACKET_FLAG_VLAN_INS)) { - u16 vlan_tag = (rrd->vlan_tag >> 4) | - ((rrd->vlan_tag & 7) << 13) | - ((rrd->vlan_tag & 8) << 9); - vlan_hwaccel_rx(skb, adapter->vlgrp, vlan_tag); - } else - netif_rx(skb); - - /* let protocol layer free skb */ - buffer_info->skb = NULL; - buffer_info->alloced = 0; - rrd->xsz.valid = 0; - - adapter->netdev->last_rx = jiffies; - } - - atomic_set(&rrd_ring->next_to_clean, rrd_next_to_clean); - - atl1_alloc_rx_buffers(adapter); - - /* update mailbox ? */ - if (count) { - u32 tpd_next_to_use; - u32 rfd_next_to_use; - - spin_lock(&adapter->mb_lock); - - tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use); - rfd_next_to_use = - atomic_read(&adapter->rfd_ring.next_to_use); - rrd_next_to_clean = - atomic_read(&adapter->rrd_ring.next_to_clean); - value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) << - MB_RFD_PROD_INDX_SHIFT) | - ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) << - MB_RRD_CONS_INDX_SHIFT) | - ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) << - MB_TPD_PROD_INDX_SHIFT); - iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX); - spin_unlock(&adapter->mb_lock); - } -} - -static void atl1_intr_tx(struct atl1_adapter *adapter) -{ - struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; - struct atl1_buffer *buffer_info; - u16 sw_tpd_next_to_clean; - u16 cmb_tpd_next_to_clean; - - sw_tpd_next_to_clean = atomic_read(&tpd_ring->next_to_clean); - cmb_tpd_next_to_clean = le16_to_cpu(adapter->cmb.cmb->tpd_cons_idx); - - while (cmb_tpd_next_to_clean != sw_tpd_next_to_clean) { - struct tx_packet_desc *tpd; - - tpd = ATL1_TPD_DESC(tpd_ring, sw_tpd_next_to_clean); - buffer_info = &tpd_ring->buffer_info[sw_tpd_next_to_clean]; - if (buffer_info->dma) { - pci_unmap_page(adapter->pdev, buffer_info->dma, - buffer_info->length, PCI_DMA_TODEVICE); - buffer_info->dma = 0; - } - - if (buffer_info->skb) { - dev_kfree_skb_irq(buffer_info->skb); - buffer_info->skb = NULL; - } - tpd->buffer_addr = 0; - tpd->desc.data = 0; - - if (++sw_tpd_next_to_clean == tpd_ring->count) - sw_tpd_next_to_clean = 0; - } - atomic_set(&tpd_ring->next_to_clean, sw_tpd_next_to_clean); - - if (netif_queue_stopped(adapter->netdev) - && netif_carrier_ok(adapter->netdev)) - netif_wake_queue(adapter->netdev); -} - -static u16 atl1_tpd_avail(struct atl1_tpd_ring *tpd_ring) -{ - u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean); - u16 next_to_use = atomic_read(&tpd_ring->next_to_use); - return ((next_to_clean > next_to_use) ? - next_to_clean - next_to_use - 1 : - tpd_ring->count + next_to_clean - next_to_use - 1); -} - -static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb, - struct tso_param *tso) -{ - /* We enter this function holding a spinlock. */ - u8 ipofst; - int err; - - if (skb_shinfo(skb)->gso_size) { - if (skb_header_cloned(skb)) { - err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); - if (unlikely(err)) - return err; - } - - if (skb->protocol == ntohs(ETH_P_IP)) { - struct iphdr *iph = ip_hdr(skb); - - iph->tot_len = 0; - iph->check = 0; - tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, - iph->daddr, 0, IPPROTO_TCP, 0); - ipofst = skb_network_offset(skb); - if (ipofst != ETH_HLEN) /* 802.3 frame */ - tso->tsopl |= 1 << TSO_PARAM_ETHTYPE_SHIFT; - - tso->tsopl |= (iph->ihl & - CSUM_PARAM_IPHL_MASK) << CSUM_PARAM_IPHL_SHIFT; - tso->tsopl |= (tcp_hdrlen(skb) & - TSO_PARAM_TCPHDRLEN_MASK) << - TSO_PARAM_TCPHDRLEN_SHIFT; - tso->tsopl |= (skb_shinfo(skb)->gso_size & - TSO_PARAM_MSS_MASK) << TSO_PARAM_MSS_SHIFT; - tso->tsopl |= 1 << TSO_PARAM_IPCKSUM_SHIFT; - tso->tsopl |= 1 << TSO_PARAM_TCPCKSUM_SHIFT; - tso->tsopl |= 1 << TSO_PARAM_SEGMENT_SHIFT; - return true; - } - } - return false; -} - -static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb, - struct csum_param *csum) -{ - u8 css, cso; - - if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { - cso = skb_transport_offset(skb); - css = cso + skb->csum_offset; - if (unlikely(cso & 0x1)) { - dev_printk(KERN_DEBUG, &adapter->pdev->dev, - "payload offset not an even number\n"); - return -1; - } - csum->csumpl |= (cso & CSUM_PARAM_PLOADOFFSET_MASK) << - CSUM_PARAM_PLOADOFFSET_SHIFT; - csum->csumpl |= (css & CSUM_PARAM_XSUMOFFSET_MASK) << - CSUM_PARAM_XSUMOFFSET_SHIFT; - csum->csumpl |= 1 << CSUM_PARAM_CUSTOMCKSUM_SHIFT; - return true; - } - - return true; -} - -static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, - bool tcp_seg) -{ - /* We enter this function holding a spinlock. */ - struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; - struct atl1_buffer *buffer_info; - struct page *page; - int first_buf_len = skb->len; - unsigned long offset; - unsigned int nr_frags; - unsigned int f; - u16 tpd_next_to_use; - u16 proto_hdr_len; - u16 len12; - - first_buf_len -= skb->data_len; - nr_frags = skb_shinfo(skb)->nr_frags; - tpd_next_to_use = atomic_read(&tpd_ring->next_to_use); - buffer_info = &tpd_ring->buffer_info[tpd_next_to_use]; - if (unlikely(buffer_info->skb)) - BUG(); - buffer_info->skb = NULL; /* put skb in last TPD */ - - if (tcp_seg) { - /* TSO/GSO */ - proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); - buffer_info->length = proto_hdr_len; - page = virt_to_page(skb->data); - offset = (unsigned long)skb->data & ~PAGE_MASK; - buffer_info->dma = pci_map_page(adapter->pdev, page, - offset, proto_hdr_len, - PCI_DMA_TODEVICE); - - if (++tpd_next_to_use == tpd_ring->count) - tpd_next_to_use = 0; - - if (first_buf_len > proto_hdr_len) { - int i, m; - - len12 = first_buf_len - proto_hdr_len; - m = (len12 + ATL1_MAX_TX_BUF_LEN - 1) / - ATL1_MAX_TX_BUF_LEN; - for (i = 0; i < m; i++) { - buffer_info = - &tpd_ring->buffer_info[tpd_next_to_use]; - buffer_info->skb = NULL; - buffer_info->length = - (ATL1_MAX_TX_BUF_LEN >= - len12) ? ATL1_MAX_TX_BUF_LEN : len12; - len12 -= buffer_info->length; - page = virt_to_page(skb->data + - (proto_hdr_len + - i * ATL1_MAX_TX_BUF_LEN)); - offset = (unsigned long)(skb->data + - (proto_hdr_len + - i * ATL1_MAX_TX_BUF_LEN)) & ~PAGE_MASK; - buffer_info->dma = pci_map_page(adapter->pdev, - page, offset, buffer_info->length, - PCI_DMA_TODEVICE); - if (++tpd_next_to_use == tpd_ring->count) - tpd_next_to_use = 0; - } - } - } else { - /* not TSO/GSO */ - buffer_info->length = first_buf_len; - page = virt_to_page(skb->data); - offset = (unsigned long)skb->data & ~PAGE_MASK; - buffer_info->dma = pci_map_page(adapter->pdev, page, - offset, first_buf_len, PCI_DMA_TODEVICE); - if (++tpd_next_to_use == tpd_ring->count) - tpd_next_to_use = 0; - } - - for (f = 0; f < nr_frags; f++) { - struct skb_frag_struct *frag; - u16 lenf, i, m; - - frag = &skb_shinfo(skb)->frags[f]; - lenf = frag->size; - - m = (lenf + ATL1_MAX_TX_BUF_LEN - 1) / ATL1_MAX_TX_BUF_LEN; - for (i = 0; i < m; i++) { - buffer_info = &tpd_ring->buffer_info[tpd_next_to_use]; - if (unlikely(buffer_info->skb)) - BUG(); - buffer_info->skb = NULL; - buffer_info->length = (lenf > ATL1_MAX_TX_BUF_LEN) ? - ATL1_MAX_TX_BUF_LEN : lenf; - lenf -= buffer_info->length; - buffer_info->dma = pci_map_page(adapter->pdev, - frag->page, - frag->page_offset + (i * ATL1_MAX_TX_BUF_LEN), - buffer_info->length, PCI_DMA_TODEVICE); - - if (++tpd_next_to_use == tpd_ring->count) - tpd_next_to_use = 0; - } - } - - /* last tpd's buffer-info */ - buffer_info->skb = skb; -} - -static void atl1_tx_queue(struct atl1_adapter *adapter, int count, - union tpd_descr *descr) -{ - /* We enter this function holding a spinlock. */ - struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; - int j; - u32 val; - struct atl1_buffer *buffer_info; - struct tx_packet_desc *tpd; - u16 tpd_next_to_use = atomic_read(&tpd_ring->next_to_use); - - for (j = 0; j < count; j++) { - buffer_info = &tpd_ring->buffer_info[tpd_next_to_use]; - tpd = ATL1_TPD_DESC(&adapter->tpd_ring, tpd_next_to_use); - tpd->desc.csum.csumpu = descr->csum.csumpu; - tpd->desc.csum.csumpl = descr->csum.csumpl; - tpd->desc.tso.tsopu = descr->tso.tsopu; - tpd->desc.tso.tsopl = descr->tso.tsopl; - tpd->buffer_addr = cpu_to_le64(buffer_info->dma); - tpd->desc.data = descr->data; - tpd->desc.csum.csumpu |= (cpu_to_le16(buffer_info->length) & - CSUM_PARAM_BUFLEN_MASK) << CSUM_PARAM_BUFLEN_SHIFT; - - val = (descr->tso.tsopl >> TSO_PARAM_SEGMENT_SHIFT) & - TSO_PARAM_SEGMENT_MASK; - if (val && !j) - tpd->desc.tso.tsopl |= 1 << TSO_PARAM_HDRFLAG_SHIFT; - - if (j == (count - 1)) - tpd->desc.csum.csumpl |= 1 << CSUM_PARAM_EOP_SHIFT; - - if (++tpd_next_to_use == tpd_ring->count) - tpd_next_to_use = 0; - } - /* - * Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). - */ - wmb(); - - atomic_set(&tpd_ring->next_to_use, (int)tpd_next_to_use); -} - -static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) -{ - struct atl1_adapter *adapter = netdev_priv(netdev); - int len = skb->len; - int tso; - int count = 1; - int ret_val; - u32 val; - union tpd_descr param; - u16 frag_size; - u16 vlan_tag; - unsigned long flags; - unsigned int nr_frags = 0; - unsigned int mss = 0; - unsigned int f; - unsigned int proto_hdr_len; - - len -= skb->data_len; - - if (unlikely(skb->len == 0)) { - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } - - param.data = 0; - param.tso.tsopu = 0; - param.tso.tsopl = 0; - param.csum.csumpu = 0; - param.csum.csumpl = 0; - - /* nr_frags will be nonzero if we're doing scatter/gather (SG) */ - nr_frags = skb_shinfo(skb)->nr_frags; - for (f = 0; f < nr_frags; f++) { - frag_size = skb_shinfo(skb)->frags[f].size; - if (frag_size) - count += (frag_size + ATL1_MAX_TX_BUF_LEN - 1) / - ATL1_MAX_TX_BUF_LEN; - } - - /* mss will be nonzero if we're doing segment offload (TSO/GSO) */ - mss = skb_shinfo(skb)->gso_size; - if (mss) { - if (skb->protocol == htons(ETH_P_IP)) { - proto_hdr_len = (skb_transport_offset(skb) + - tcp_hdrlen(skb)); - if (unlikely(proto_hdr_len > len)) { - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } - /* need additional TPD ? */ - if (proto_hdr_len != len) - count += (len - proto_hdr_len + - ATL1_MAX_TX_BUF_LEN - 1) / - ATL1_MAX_TX_BUF_LEN; - } - } - - if (!spin_trylock_irqsave(&adapter->lock, flags)) { - /* Can't get lock - tell upper layer to requeue */ - dev_printk(KERN_DEBUG, &adapter->pdev->dev, "tx locked\n"); - return NETDEV_TX_LOCKED; - } - - if (atl1_tpd_avail(&adapter->tpd_ring) < count) { - /* not enough descriptors */ - netif_stop_queue(netdev); - spin_unlock_irqrestore(&adapter->lock, flags); - dev_printk(KERN_DEBUG, &adapter->pdev->dev, "tx busy\n"); - return NETDEV_TX_BUSY; - } - - param.data = 0; - - if (adapter->vlgrp && vlan_tx_tag_present(skb)) { - vlan_tag = vlan_tx_tag_get(skb); - vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) | - ((vlan_tag >> 9) & 0x8); - param.csum.csumpl |= 1 << CSUM_PARAM_INSVLAG_SHIFT; - param.csum.csumpu |= (vlan_tag & CSUM_PARAM_VALANTAG_MASK) << - CSUM_PARAM_VALAN_SHIFT; - } - - tso = atl1_tso(adapter, skb, ¶m.tso); - if (tso < 0) { - spin_unlock_irqrestore(&adapter->lock, flags); - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } - - if (!tso) { - ret_val = atl1_tx_csum(adapter, skb, ¶m.csum); - if (ret_val < 0) { - spin_unlock_irqrestore(&adapter->lock, flags); - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } - } - - val = (param.csum.csumpl >> CSUM_PARAM_SEGMENT_SHIFT) & - CSUM_PARAM_SEGMENT_MASK; - atl1_tx_map(adapter, skb, 1 == val); - atl1_tx_queue(adapter, count, ¶m); - netdev->trans_start = jiffies; - spin_unlock_irqrestore(&adapter->lock, flags); - atl1_update_mailbox(adapter); - return NETDEV_TX_OK; -} - -/* - * atl1_intr - Interrupt Handler - * @irq: interrupt number - * @data: pointer to a network interface device structure - * @pt_regs: CPU registers structure - */ -static irqreturn_t atl1_intr(int irq, void *data) -{ - struct atl1_adapter *adapter = netdev_priv(data); - u32 status; - u8 update_rx; - int max_ints = 10; - - status = adapter->cmb.cmb->int_stats; - if (!status) - return IRQ_NONE; - - update_rx = 0; - - do { - /* clear CMB interrupt status at once */ - adapter->cmb.cmb->int_stats = 0; - - if (status & ISR_GPHY) /* clear phy status */ - atl1_clear_phy_int(adapter); - - /* clear ISR status, and Enable CMB DMA/Disable Interrupt */ - iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR); - - /* check if SMB intr */ - if (status & ISR_SMB) - atl1_inc_smb(adapter); - - /* check if PCIE PHY Link down */ - if (status & ISR_PHY_LINKDOWN) { - dev_printk(KERN_DEBUG, &adapter->pdev->dev, - "pcie phy link down %x\n", status); - if (netif_running(adapter->netdev)) { /* reset MAC */ - iowrite32(0, adapter->hw.hw_addr + REG_IMR); - schedule_work(&adapter->pcie_dma_to_rst_task); - return IRQ_HANDLED; - } - } - - /* check if DMA read/write error ? */ - if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) { - dev_printk(KERN_DEBUG, &adapter->pdev->dev, - "pcie DMA r/w error (status = 0x%x)\n", - status); - iowrite32(0, adapter->hw.hw_addr + REG_IMR); - schedule_work(&adapter->pcie_dma_to_rst_task); - return IRQ_HANDLED; - } - - /* link event */ - if (status & ISR_GPHY) { - adapter->soft_stats.tx_carrier_errors++; - atl1_check_for_link(adapter); - } - - /* transmit event */ - if (status & ISR_CMB_TX) - atl1_intr_tx(adapter); - - /* rx exception */ - if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN | - ISR_RRD_OV | ISR_HOST_RFD_UNRUN | - ISR_HOST_RRD_OV | ISR_CMB_RX))) { - if (status & (ISR_RXF_OV | ISR_RFD_UNRUN | - ISR_RRD_OV | ISR_HOST_RFD_UNRUN | - ISR_HOST_RRD_OV)) - dev_printk(KERN_DEBUG, &adapter->pdev->dev, - "rx exception, ISR = 0x%x\n", status); - atl1_intr_rx(adapter); - } - - if (--max_ints < 0) - break; - - } while ((status = adapter->cmb.cmb->int_stats)); - - /* re-enable Interrupt */ - iowrite32(ISR_DIS_SMB | ISR_DIS_DMA, adapter->hw.hw_addr + REG_ISR); - return IRQ_HANDLED; -} - -/* - * atl1_watchdog - Timer Call-back - * @data: pointer to netdev cast into an unsigned long - */ -static void atl1_watchdog(unsigned long data) -{ - struct atl1_adapter *adapter = (struct atl1_adapter *)data; - - /* Reset the timer */ - mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); -} - -/* - * atl1_phy_config - Timer Call-back - * @data: pointer to netdev cast into an unsigned long - */ -static void atl1_phy_config(unsigned long data) -{ - struct atl1_adapter *adapter = (struct atl1_adapter *)data; - struct atl1_hw *hw = &adapter->hw; - unsigned long flags; - - spin_lock_irqsave(&adapter->lock, flags); - adapter->phy_timer_pending = false; - atl1_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg); - atl1_write_phy_reg(hw, MII_AT001_CR, hw->mii_1000t_ctrl_reg); - atl1_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN); - spin_unlock_irqrestore(&adapter->lock, flags); -} - -/* - * atl1_tx_timeout - Respond to a Tx Hang - * @netdev: network interface device structure - */ -static void atl1_tx_timeout(struct net_device *netdev) -{ - struct atl1_adapter *adapter = netdev_priv(netdev); - /* Do the reset outside of interrupt context */ - schedule_work(&adapter->tx_timeout_task); -} - -/* - * Orphaned vendor comment left intact here: - * - * If TPD Buffer size equal to 0, PCIE DMAR_TO_INT - * will assert. We do soft reset <0x1400=1> according - * with the SPEC. BUT, it seemes that PCIE or DMA - * state-machine will not be reset. DMAR_TO_INT will - * assert again and again. - * - */ -static void atl1_tx_timeout_task(struct work_struct *work) -{ - struct atl1_adapter *adapter = - container_of(work, struct atl1_adapter, tx_timeout_task); - struct net_device *netdev = adapter->netdev; - - netif_device_detach(netdev); - atl1_down(adapter); - atl1_up(adapter); - netif_device_attach(netdev); -} - -/* - * atl1_link_chg_task - deal with link change event Out of interrupt context - */ -static void atl1_link_chg_task(struct work_struct *work) -{ - struct atl1_adapter *adapter = - container_of(work, struct atl1_adapter, link_chg_task); - unsigned long flags; - - spin_lock_irqsave(&adapter->lock, flags); - atl1_check_link(adapter); - spin_unlock_irqrestore(&adapter->lock, flags); -} - -static void atl1_vlan_rx_register(struct net_device *netdev, - struct vlan_group *grp) -{ - struct atl1_adapter *adapter = netdev_priv(netdev); - unsigned long flags; - u32 ctrl; - - spin_lock_irqsave(&adapter->lock, flags); - /* atl1_irq_disable(adapter); */ - adapter->vlgrp = grp; - - if (grp) { - /* enable VLAN tag insert/strip */ - ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL); - ctrl |= MAC_CTRL_RMV_VLAN; - iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL); - } else { - /* disable VLAN tag insert/strip */ - ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL); - ctrl &= ~MAC_CTRL_RMV_VLAN; - iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL); - } - - /* atl1_irq_enable(adapter); */ - spin_unlock_irqrestore(&adapter->lock, flags); -} - -static void atl1_restore_vlan(struct atl1_adapter *adapter) -{ - atl1_vlan_rx_register(adapter->netdev, adapter->vlgrp); -} - -int atl1_reset(struct atl1_adapter *adapter) -{ - int ret; - - ret = atl1_reset_hw(&adapter->hw); - if (ret != ATL1_SUCCESS) - return ret; - return atl1_init_hw(&adapter->hw); -} - -s32 atl1_up(struct atl1_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - int err; - int irq_flags = IRQF_SAMPLE_RANDOM; - - /* hardware has been reset, we need to reload some things */ - atl1_set_multi(netdev); - atl1_init_ring_ptrs(adapter); - atl1_restore_vlan(adapter); - err = atl1_alloc_rx_buffers(adapter); - if (unlikely(!err)) /* no RX BUFFER allocated */ - return -ENOMEM; - - if (unlikely(atl1_configure(adapter))) { - err = -EIO; - goto err_up; - } - - err = pci_enable_msi(adapter->pdev); - if (err) { - dev_info(&adapter->pdev->dev, - "Unable to enable MSI: %d\n", err); - irq_flags |= IRQF_SHARED; - } - - err = request_irq(adapter->pdev->irq, &atl1_intr, irq_flags, - netdev->name, netdev); - if (unlikely(err)) - goto err_up; - - mod_timer(&adapter->watchdog_timer, jiffies); - atl1_irq_enable(adapter); - atl1_check_link(adapter); - return 0; - -err_up: - pci_disable_msi(adapter->pdev); - /* free rx_buffers */ - atl1_clean_rx_ring(adapter); - return err; -} - -void atl1_down(struct atl1_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - - del_timer_sync(&adapter->watchdog_timer); - del_timer_sync(&adapter->phy_config_timer); - adapter->phy_timer_pending = false; - - atl1_irq_disable(adapter); - free_irq(adapter->pdev->irq, netdev); - pci_disable_msi(adapter->pdev); - atl1_reset_hw(&adapter->hw); - adapter->cmb.cmb->int_stats = 0; - - adapter->link_speed = SPEED_0; - adapter->link_duplex = -1; - netif_carrier_off(netdev); - netif_stop_queue(netdev); - - atl1_clean_tx_ring(adapter); - atl1_clean_rx_ring(adapter); -} - -/* - * atl1_open - Called when a network interface is made active - * @netdev: network interface device structure - * - * Returns 0 on success, negative value on failure - * - * The open entry point is called when a network interface is made - * active by the system (IFF_UP). At this point all resources needed - * for transmit and receive operations are allocated, the interrupt - * handler is registered with the OS, the watchdog timer is started, - * and the stack is notified that the interface is ready. - */ -static int atl1_open(struct net_device *netdev) -{ - struct atl1_adapter *adapter = netdev_priv(netdev); - int err; - - /* allocate transmit descriptors */ - err = atl1_setup_ring_resources(adapter); - if (err) - return err; - - err = atl1_up(adapter); - if (err) - goto err_up; - - return 0; - -err_up: - atl1_reset(adapter); - return err; -} - -/* - * atl1_close - Disables a network interface - * @netdev: network interface device structure - * - * Returns 0, this is not allowed to fail - * - * The close entry point is called when an interface is de-activated - * by the OS. The hardware is still under the drivers control, but - * needs to be disabled. A global MAC reset is issued to stop the - * hardware, and all transmit and receive resources are freed. - */ -static int atl1_close(struct net_device *netdev) -{ - struct atl1_adapter *adapter = netdev_priv(netdev); - atl1_down(adapter); - atl1_free_ring_resources(adapter); - return 0; -} - -#ifdef CONFIG_PM -static int atl1_suspend(struct pci_dev *pdev, pm_message_t state) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct atl1_adapter *adapter = netdev_priv(netdev); - struct atl1_hw *hw = &adapter->hw; - u32 ctrl = 0; - u32 wufc = adapter->wol; - - netif_device_detach(netdev); - if (netif_running(netdev)) - atl1_down(adapter); - - atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); - atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); - if (ctrl & BMSR_LSTATUS) - wufc &= ~ATL1_WUFC_LNKC; - - /* reduce speed to 10/100M */ - if (wufc) { - atl1_phy_enter_power_saving(hw); - /* if resume, let driver to re- setup link */ - hw->phy_configured = false; - atl1_set_mac_addr(hw); - atl1_set_multi(netdev); - - ctrl = 0; - /* turn on magic packet wol */ - if (wufc & ATL1_WUFC_MAG) - ctrl = WOL_MAGIC_EN | WOL_MAGIC_PME_EN; - - /* turn on Link change WOL */ - if (wufc & ATL1_WUFC_LNKC) - ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN); - iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL); - - /* turn on all-multi mode if wake on multicast is enabled */ - ctrl = ioread32(hw->hw_addr + REG_MAC_CTRL); - ctrl &= ~MAC_CTRL_DBG; - ctrl &= ~MAC_CTRL_PROMIS_EN; - if (wufc & ATL1_WUFC_MC) - ctrl |= MAC_CTRL_MC_ALL_EN; - else - ctrl &= ~MAC_CTRL_MC_ALL_EN; - - /* turn on broadcast mode if wake on-BC is enabled */ - if (wufc & ATL1_WUFC_BC) - ctrl |= MAC_CTRL_BC_EN; - else - ctrl &= ~MAC_CTRL_BC_EN; - - /* enable RX */ - ctrl |= MAC_CTRL_RX_EN; - iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL); - pci_enable_wake(pdev, PCI_D3hot, 1); - pci_enable_wake(pdev, PCI_D3cold, 1); - } else { - iowrite32(0, hw->hw_addr + REG_WOL_CTRL); - pci_enable_wake(pdev, PCI_D3hot, 0); - pci_enable_wake(pdev, PCI_D3cold, 0); - } - - pci_save_state(pdev); - pci_disable_device(pdev); - - pci_set_power_state(pdev, PCI_D3hot); - - return 0; -} - -static int atl1_resume(struct pci_dev *pdev) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct atl1_adapter *adapter = netdev_priv(netdev); - u32 ret_val; - - pci_set_power_state(pdev, 0); - pci_restore_state(pdev); - - ret_val = pci_enable_device(pdev); - pci_enable_wake(pdev, PCI_D3hot, 0); - pci_enable_wake(pdev, PCI_D3cold, 0); - - iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL); - atl1_reset(adapter); - - if (netif_running(netdev)) - atl1_up(adapter); - netif_device_attach(netdev); - - atl1_via_workaround(adapter); - - return 0; -} -#else -#define atl1_suspend NULL -#define atl1_resume NULL -#endif - -#ifdef CONFIG_NET_POLL_CONTROLLER -static void atl1_poll_controller(struct net_device *netdev) -{ - disable_irq(netdev->irq); - atl1_intr(netdev->irq, netdev); - enable_irq(netdev->irq); -} -#endif - -/* - * atl1_probe - Device Initialization Routine - * @pdev: PCI device information struct - * @ent: entry in atl1_pci_tbl - * - * Returns 0 on success, negative on failure - * - * atl1_probe initializes an adapter identified by a pci_dev structure. - * The OS initialization, configuring of the adapter private structure, - * and a hardware reset occur. - */ -static int __devinit atl1_probe(struct pci_dev *pdev, - const struct pci_device_id *ent) -{ - struct net_device *netdev; - struct atl1_adapter *adapter; - static int cards_found = 0; - int err; - - err = pci_enable_device(pdev); - if (err) - return err; - - /* - * The atl1 chip can DMA to 64-bit addresses, but it uses a single - * shared register for the high 32 bits, so only a single, aligned, - * 4 GB physical address range can be used at a time. - * - * Supporting 64-bit DMA on this hardware is more trouble than it's - * worth. It is far easier to limit to 32-bit DMA than update - * various kernel subsystems to support the mechanics required by a - * fixed-high-32-bit system. - */ - err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); - if (err) { - dev_err(&pdev->dev, "no usable DMA configuration\n"); - goto err_dma; - } - /* Mark all PCI regions associated with PCI device - * pdev as being reserved by owner atl1_driver_name - */ - err = pci_request_regions(pdev, atl1_driver_name); - if (err) - goto err_request_regions; - - /* Enables bus-mastering on the device and calls - * pcibios_set_master to do the needed arch specific settings - */ - pci_set_master(pdev); - - netdev = alloc_etherdev(sizeof(struct atl1_adapter)); - if (!netdev) { - err = -ENOMEM; - goto err_alloc_etherdev; - } - SET_NETDEV_DEV(netdev, &pdev->dev); - - pci_set_drvdata(pdev, netdev); - adapter = netdev_priv(netdev); - adapter->netdev = netdev; - adapter->pdev = pdev; - adapter->hw.back = adapter; - - adapter->hw.hw_addr = pci_iomap(pdev, 0, 0); - if (!adapter->hw.hw_addr) { - err = -EIO; - goto err_pci_iomap; - } - /* get device revision number */ - adapter->hw.dev_rev = ioread16(adapter->hw.hw_addr + - (REG_MASTER_CTRL + 2)); - dev_info(&pdev->dev, "version %s\n", DRIVER_VERSION); - - /* set default ring resource counts */ - adapter->rfd_ring.count = adapter->rrd_ring.count = ATL1_DEFAULT_RFD; - adapter->tpd_ring.count = ATL1_DEFAULT_TPD; - - adapter->mii.dev = netdev; - adapter->mii.mdio_read = mdio_read; - adapter->mii.mdio_write = mdio_write; - adapter->mii.phy_id_mask = 0x1f; - adapter->mii.reg_num_mask = 0x1f; - - netdev->open = &atl1_open; - netdev->stop = &atl1_close; - netdev->hard_start_xmit = &atl1_xmit_frame; - netdev->get_stats = &atl1_get_stats; - netdev->set_multicast_list = &atl1_set_multi; - netdev->set_mac_address = &atl1_set_mac; - netdev->change_mtu = &atl1_change_mtu; - netdev->do_ioctl = &atl1_ioctl; - netdev->tx_timeout = &atl1_tx_timeout; - netdev->watchdog_timeo = 5 * HZ; -#ifdef CONFIG_NET_POLL_CONTROLLER - netdev->poll_controller = atl1_poll_controller; -#endif - netdev->vlan_rx_register = atl1_vlan_rx_register; - - netdev->ethtool_ops = &atl1_ethtool_ops; - adapter->bd_number = cards_found; - - /* setup the private structure */ - err = atl1_sw_init(adapter); - if (err) - goto err_common; - - netdev->features = NETIF_F_HW_CSUM; - netdev->features |= NETIF_F_SG; - netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX); - - /* - * FIXME - Until tso performance gets fixed, disable the feature. - * Enable it with ethtool -K if desired. - */ - /* netdev->features |= NETIF_F_TSO; */ - - netdev->features |= NETIF_F_LLTX; - - /* - * patch for some L1 of old version, - * the final version of L1 may not need these - * patches - */ - /* atl1_pcie_patch(adapter); */ - - /* really reset GPHY core */ - iowrite16(0, adapter->hw.hw_addr + REG_GPHY_ENABLE); - - /* - * reset the controller to - * put the device in a known good starting state - */ - if (atl1_reset_hw(&adapter->hw)) { - err = -EIO; - goto err_common; - } - - /* copy the MAC address out of the EEPROM */ - atl1_read_mac_addr(&adapter->hw); - memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); - - if (!is_valid_ether_addr(netdev->dev_addr)) { - err = -EIO; - goto err_common; - } - - atl1_check_options(adapter); - - /* pre-init the MAC, and setup link */ - err = atl1_init_hw(&adapter->hw); - if (err) { - err = -EIO; - goto err_common; - } - - atl1_pcie_patch(adapter); - /* assume we have no link for now */ - netif_carrier_off(netdev); - netif_stop_queue(netdev); - - init_timer(&adapter->watchdog_timer); - adapter->watchdog_timer.function = &atl1_watchdog; - adapter->watchdog_timer.data = (unsigned long)adapter; - - init_timer(&adapter->phy_config_timer); - adapter->phy_config_timer.function = &atl1_phy_config; - adapter->phy_config_timer.data = (unsigned long)adapter; - adapter->phy_timer_pending = false; - - INIT_WORK(&adapter->tx_timeout_task, atl1_tx_timeout_task); - - INIT_WORK(&adapter->link_chg_task, atl1_link_chg_task); - - INIT_WORK(&adapter->pcie_dma_to_rst_task, atl1_tx_timeout_task); - - err = register_netdev(netdev); - if (err) - goto err_common; - - cards_found++; - atl1_via_workaround(adapter); - return 0; - -err_common: - pci_iounmap(pdev, adapter->hw.hw_addr); -err_pci_iomap: - free_netdev(netdev); -err_alloc_etherdev: - pci_release_regions(pdev); -err_dma: -err_request_regions: - pci_disable_device(pdev); - return err; -} - -/* - * atl1_remove - Device Removal Routine - * @pdev: PCI device information struct - * - * atl1_remove is called by the PCI subsystem to alert the driver - * that it should release a PCI device. The could be caused by a - * Hot-Plug event, or because the driver is going to be removed from - * memory. - */ -static void __devexit atl1_remove(struct pci_dev *pdev) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct atl1_adapter *adapter; - /* Device not available. Return. */ - if (!netdev) - return; - - adapter = netdev_priv(netdev); - - /* Some atl1 boards lack persistent storage for their MAC, and get it - * from the BIOS during POST. If we've been messing with the MAC - * address, we need to save the permanent one. - */ - if (memcmp(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, ETH_ALEN)) { - memcpy(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, - ETH_ALEN); - atl1_set_mac_addr(&adapter->hw); - } - - iowrite16(0, adapter->hw.hw_addr + REG_GPHY_ENABLE); - unregister_netdev(netdev); - pci_iounmap(pdev, adapter->hw.hw_addr); - pci_release_regions(pdev); - free_netdev(netdev); - pci_disable_device(pdev); -} - -static struct pci_driver atl1_driver = { - .name = atl1_driver_name, - .id_table = atl1_pci_tbl, - .probe = atl1_probe, - .remove = __devexit_p(atl1_remove), - .suspend = atl1_suspend, - .resume = atl1_resume -}; - -/* - * atl1_exit_module - Driver Exit Cleanup Routine - * - * atl1_exit_module is called just before the driver is removed - * from memory. - */ -static void __exit atl1_exit_module(void) -{ - pci_unregister_driver(&atl1_driver); -} - -/* - * atl1_init_module - Driver Registration Routine - * - * atl1_init_module is the first routine called when the driver is - * loaded. All it does is register with the PCI subsystem. - */ -static int __init atl1_init_module(void) -{ - return pci_register_driver(&atl1_driver); -} - -module_init(atl1_init_module); -module_exit(atl1_exit_module); diff --git a/drivers/net/atlx/atl1_param.c b/drivers/net/atlx/atl1_param.c deleted file mode 100644 index 4246bb9..0000000 --- a/drivers/net/atlx/atl1_param.c +++ /dev/null @@ -1,203 +0,0 @@ -/* - * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. - * Copyright(c) 2006 Chris Snook - * Copyright(c) 2006 Jay Cliburn - * - * Derived from Intel e1000 driver - * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 - * Temple Place - Suite 330, Boston, MA 02111-1307, USA. - */ - -#include -#include -#include -#include "atl1.h" - -/* - * This is the only thing that needs to be changed to adjust the - * maximum number of ports that the driver can manage. - */ -#define ATL1_MAX_NIC 4 - -#define OPTION_UNSET -1 -#define OPTION_DISABLED 0 -#define OPTION_ENABLED 1 - -#define ATL1_PARAM_INIT { [0 ... ATL1_MAX_NIC] = OPTION_UNSET } - -/* - * Interrupt Moderate Timer in units of 2 us - * - * Valid Range: 10-65535 - * - * Default Value: 100 (200us) - */ -static int __devinitdata int_mod_timer[ATL1_MAX_NIC+1] = ATL1_PARAM_INIT; -static int num_int_mod_timer = 0; -module_param_array_named(int_mod_timer, int_mod_timer, int, &num_int_mod_timer, 0); -MODULE_PARM_DESC(int_mod_timer, "Interrupt moderator timer"); - -/* - * flash_vendor - * - * Valid Range: 0-2 - * - * 0 - Atmel - * 1 - SST - * 2 - ST - * - * Default Value: 0 - */ -static int __devinitdata flash_vendor[ATL1_MAX_NIC+1] = ATL1_PARAM_INIT; -static int num_flash_vendor = 0; -module_param_array_named(flash_vendor, flash_vendor, int, &num_flash_vendor, 0); -MODULE_PARM_DESC(flash_vendor, "SPI flash vendor"); - -#define DEFAULT_INT_MOD_CNT 100 /* 200us */ -#define MAX_INT_MOD_CNT 65000 -#define MIN_INT_MOD_CNT 50 - -#define FLASH_VENDOR_DEFAULT 0 -#define FLASH_VENDOR_MIN 0 -#define FLASH_VENDOR_MAX 2 - -struct atl1_option { - enum { enable_option, range_option, list_option } type; - char *name; - char *err; - int def; - union { - struct { /* range_option info */ - int min; - int max; - } r; - struct { /* list_option info */ - int nr; - struct atl1_opt_list { - int i; - char *str; - } *p; - } l; - } arg; -}; - -static int __devinit atl1_validate_option(int *value, struct atl1_option *opt, struct pci_dev *pdev) -{ - if (*value == OPTION_UNSET) { - *value = opt->def; - return 0; - } - - switch (opt->type) { - case enable_option: - switch (*value) { - case OPTION_ENABLED: - dev_info(&pdev->dev, "%s enabled\n", opt->name); - return 0; - case OPTION_DISABLED: - dev_info(&pdev->dev, "%s disabled\n", opt->name); - return 0; - } - break; - case range_option: - if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { - dev_info(&pdev->dev, "%s set to %i\n", opt->name, - *value); - return 0; - } - break; - case list_option:{ - int i; - struct atl1_opt_list *ent; - - for (i = 0; i < opt->arg.l.nr; i++) { - ent = &opt->arg.l.p[i]; - if (*value == ent->i) { - if (ent->str[0] != '\0') - dev_info(&pdev->dev, "%s\n", - ent->str); - return 0; - } - } - } - break; - - default: - break; - } - - dev_info(&pdev->dev, "invalid %s specified (%i) %s\n", - opt->name, *value, opt->err); - *value = opt->def; - return -1; -} - -/* - * atl1_check_options - Range Checking for Command Line Parameters - * @adapter: board private structure - * - * This routine checks all command line parameters for valid user - * input. If an invalid value is given, or if no user specified - * value exists, a default value is used. The final value is stored - * in a variable in the adapter structure. - */ -void __devinit atl1_check_options(struct atl1_adapter *adapter) -{ - struct pci_dev *pdev = adapter->pdev; - int bd = adapter->bd_number; - if (bd >= ATL1_MAX_NIC) { - dev_notice(&pdev->dev, "no configuration for board#%i\n", bd); - dev_notice(&pdev->dev, "using defaults for all values\n"); - } - { /* Interrupt Moderate Timer */ - struct atl1_option opt = { - .type = range_option, - .name = "Interrupt Moderator Timer", - .err = "using default of " - __MODULE_STRING(DEFAULT_INT_MOD_CNT), - .def = DEFAULT_INT_MOD_CNT, - .arg = {.r = - {.min = MIN_INT_MOD_CNT,.max = MAX_INT_MOD_CNT}} - }; - int val; - if (num_int_mod_timer > bd) { - val = int_mod_timer[bd]; - atl1_validate_option(&val, &opt, pdev); - adapter->imt = (u16) val; - } else - adapter->imt = (u16) (opt.def); - } - - { /* Flash Vendor */ - struct atl1_option opt = { - .type = range_option, - .name = "SPI Flash Vendor", - .err = "using default of " - __MODULE_STRING(FLASH_VENDOR_DEFAULT), - .def = DEFAULT_INT_MOD_CNT, - .arg = {.r = - {.min = FLASH_VENDOR_MIN,.max = - FLASH_VENDOR_MAX}} - }; - int val; - if (num_flash_vendor > bd) { - val = flash_vendor[bd]; - atl1_validate_option(&val, &opt, pdev); - adapter->hw.flash_vendor = (u8) val; - } else - adapter->hw.flash_vendor = (u8) (opt.def); - } -} diff --git a/drivers/net/atlx/atlx.c b/drivers/net/atlx/atlx.c new file mode 100644 index 0000000..4186326 --- /dev/null +++ b/drivers/net/atlx/atlx.c @@ -0,0 +1,433 @@ +/* atlx.c -- common functions for Attansic network drivers + * + * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. + * Copyright(c) 2006 - 2007 Chris Snook + * Copyright(c) 2006 Jay Cliburn + * Copyright(c) 2007 Atheros Corporation. All rights reserved. + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* Including this file like a header is a temporary hack, I promise. -- CHS */ +#ifndef ATLX_C +#define ATLX_C + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "atlx.h" + +static struct atlx_spi_flash_dev flash_table[] = { +/* MFR_NAME WRSR READ PRGM WREN WRDI RDSR RDID SEC_ERS CHIP_ERS */ + {"Atmel", 0x00, 0x03, 0x02, 0x06, 0x04, 0x05, 0x15, 0x52, 0x62}, + {"SST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0x90, 0x20, 0x60}, + {"ST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0xAB, 0xD8, 0xC7}, +}; + +static int atlx_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return atlx_mii_ioctl(netdev, ifr, cmd); + default: + return -EOPNOTSUPP; + } +} + +/* + * atlx_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + */ +static int atlx_set_mac(struct net_device *netdev, void *p) +{ + struct atlx_adapter *adapter = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (netif_running(netdev)) + return -EBUSY; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); + + atlx_set_mac_addr(&adapter->hw); + return 0; +} + +static void atlx_check_for_link(struct atlx_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + u16 phy_data = 0; + + spin_lock(&adapter->lock); + adapter->phy_timer_pending = false; + atlx_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); + atlx_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); + spin_unlock(&adapter->lock); + + /* notify upper layer link down ASAP */ + if (!(phy_data & BMSR_LSTATUS)) { + /* Link Down */ + if (netif_carrier_ok(netdev)) { + /* old link state: Up */ + dev_info(&adapter->pdev->dev, "%s link is down\n", + netdev->name); + adapter->link_speed = SPEED_0; + netif_carrier_off(netdev); + netif_stop_queue(netdev); + } + } + schedule_work(&adapter->link_chg_task); +} + +/* + * atlx_set_multi - Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_multi entry point is called whenever the multicast address + * list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper multicast, + * promiscuous mode, and all-multi behavior. + */ +static void atlx_set_multi(struct net_device *netdev) +{ + struct atlx_adapter *adapter = netdev_priv(netdev); + struct atlx_hw *hw = &adapter->hw; + struct dev_mc_list *mc_ptr; + u32 rctl; + u32 hash_value; + + /* Check for Promiscuous and All Multicast modes */ + rctl = ioread32(hw->hw_addr + REG_MAC_CTRL); + if (netdev->flags & IFF_PROMISC) + rctl |= MAC_CTRL_PROMIS_EN; + else if (netdev->flags & IFF_ALLMULTI) { + rctl |= MAC_CTRL_MC_ALL_EN; + rctl &= ~MAC_CTRL_PROMIS_EN; + } else + rctl &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN); + + iowrite32(rctl, hw->hw_addr + REG_MAC_CTRL); + + /* clear the old settings from the multicast hash table */ + iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE); + iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2)); + + /* compute mc addresses' hash value ,and put it into hash table */ + for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) { + hash_value = atlx_hash_mc_addr(hw, mc_ptr->dmi_addr); + atlx_hash_set(hw, hash_value); + } +} + +/* + * atlx_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + */ +static void atlx_irq_enable(struct atlx_adapter *adapter) +{ + iowrite32(IMR_NORMAL_MASK, adapter->hw.hw_addr + REG_IMR); + ioread32(adapter->hw.hw_addr + REG_IMR); +} + +/* + * atlx_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + */ +static void atlx_irq_disable(struct atlx_adapter *adapter) +{ + iowrite32(0, adapter->hw.hw_addr + REG_IMR); + ioread32(adapter->hw.hw_addr + REG_IMR); + synchronize_irq(adapter->pdev->irq); +} + +static void atlx_clear_phy_int(struct atlx_adapter *adapter) +{ + u16 phy_data; + unsigned long flags; + + spin_lock_irqsave(&adapter->lock, flags); + atlx_read_phy_reg(&adapter->hw, 19, &phy_data); + spin_unlock_irqrestore(&adapter->lock, flags); +} + +/* + * atlx_get_stats - Get System Network Statistics + * @netdev: network interface device structure + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the timer callback. + */ +static struct net_device_stats *atlx_get_stats(struct net_device *netdev) +{ + struct atlx_adapter *adapter = netdev_priv(netdev); + return &adapter->net_stats; +} + +/* + * atlx_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + */ +static void atlx_tx_timeout(struct net_device *netdev) +{ + struct atlx_adapter *adapter = netdev_priv(netdev); + /* Do the reset outside of interrupt context */ + schedule_work(&adapter->tx_timeout_task); +} + +/* + * atlx_link_chg_task - deal with link change event Out of interrupt context + */ +static void atlx_link_chg_task(struct work_struct *work) +{ + struct atlx_adapter *adapter; + unsigned long flags; + + adapter = container_of(work, struct atlx_adapter, link_chg_task); + + spin_lock_irqsave(&adapter->lock, flags); + atlx_check_link(adapter); + spin_unlock_irqrestore(&adapter->lock, flags); +} + +static void atlx_vlan_rx_register(struct net_device *netdev, + struct vlan_group *grp) +{ + struct atlx_adapter *adapter = netdev_priv(netdev); + unsigned long flags; + u32 ctrl; + + spin_lock_irqsave(&adapter->lock, flags); + /* atlx_irq_disable(adapter); FIXME: confirm/remove */ + adapter->vlgrp = grp; + + if (grp) { + /* enable VLAN tag insert/strip */ + ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL); + ctrl |= MAC_CTRL_RMV_VLAN; + iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL); + } else { + /* disable VLAN tag insert/strip */ + ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL); + ctrl &= ~MAC_CTRL_RMV_VLAN; + iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL); + } + + /* atlx_irq_enable(adapter); FIXME */ + spin_unlock_irqrestore(&adapter->lock, flags); +} + +static void atlx_restore_vlan(struct atlx_adapter *adapter) +{ + atlx_vlan_rx_register(adapter->netdev, adapter->vlgrp); +} + +/* + * This is the only thing that needs to be changed to adjust the + * maximum number of ports that the driver can manage. + */ +#define ATL1_MAX_NIC 4 + +#define OPTION_UNSET -1 +#define OPTION_DISABLED 0 +#define OPTION_ENABLED 1 + +#define ATL1_PARAM_INIT { [0 ... ATL1_MAX_NIC] = OPTION_UNSET } + +/* + * Interrupt Moderate Timer in units of 2 us + * + * Valid Range: 10-65535 + * + * Default Value: 100 (200us) + */ +static int __devinitdata int_mod_timer[ATL1_MAX_NIC+1] = ATL1_PARAM_INIT; +static int num_int_mod_timer; +module_param_array_named(int_mod_timer, int_mod_timer, int, + &num_int_mod_timer, 0); +MODULE_PARM_DESC(int_mod_timer, "Interrupt moderator timer"); + +/* + * flash_vendor + * + * Valid Range: 0-2 + * + * 0 - Atmel + * 1 - SST + * 2 - ST + * + * Default Value: 0 + */ +static int __devinitdata flash_vendor[ATL1_MAX_NIC+1] = ATL1_PARAM_INIT; +static int num_flash_vendor; +module_param_array_named(flash_vendor, flash_vendor, int, &num_flash_vendor, 0); +MODULE_PARM_DESC(flash_vendor, "SPI flash vendor"); + +#define DEFAULT_INT_MOD_CNT 100 /* 200us */ +#define MAX_INT_MOD_CNT 65000 +#define MIN_INT_MOD_CNT 50 + +#define FLASH_VENDOR_DEFAULT 0 +#define FLASH_VENDOR_MIN 0 +#define FLASH_VENDOR_MAX 2 + +struct atl1_option { + enum { enable_option, range_option, list_option } type; + char *name; + char *err; + int def; + union { + struct { /* range_option info */ + int min; + int max; + } r; + struct { /* list_option info */ + int nr; + struct atl1_opt_list { + int i; + char *str; + } *p; + } l; + } arg; +}; + +static int __devinit atl1_validate_option(int *value, struct atl1_option *opt, + struct pci_dev *pdev) +{ + if (*value == OPTION_UNSET) { + *value = opt->def; + return 0; + } + + switch (opt->type) { + case enable_option: + switch (*value) { + case OPTION_ENABLED: + dev_info(&pdev->dev, "%s enabled\n", opt->name); + return 0; + case OPTION_DISABLED: + dev_info(&pdev->dev, "%s disabled\n", opt->name); + return 0; + } + break; + case range_option: + if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { + dev_info(&pdev->dev, "%s set to %i\n", opt->name, + *value); + return 0; + } + break; + case list_option:{ + int i; + struct atl1_opt_list *ent; + + for (i = 0; i < opt->arg.l.nr; i++) { + ent = &opt->arg.l.p[i]; + if (*value == ent->i) { + if (ent->str[0] != '\0') + dev_info(&pdev->dev, "%s\n", + ent->str); + return 0; + } + } + } + break; + + default: + break; + } + + dev_info(&pdev->dev, "invalid %s specified (%i) %s\n", + opt->name, *value, opt->err); + *value = opt->def; + return -1; +} + +/* + * atl1_check_options - Range Checking for Command Line Parameters + * @adapter: board private structure + * + * This routine checks all command line parameters for valid user + * input. If an invalid value is given, or if no user specified + * value exists, a default value is used. The final value is stored + * in a variable in the adapter structure. + */ +void __devinit atl1_check_options(struct atl1_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + int bd = adapter->bd_number; + if (bd >= ATL1_MAX_NIC) { + dev_notice(&pdev->dev, "no configuration for board#%i\n", bd); + dev_notice(&pdev->dev, "using defaults for all values\n"); + } + { /* Interrupt Moderate Timer */ + struct atl1_option opt = { + .type = range_option, + .name = "Interrupt Moderator Timer", + .err = "using default of " + __MODULE_STRING(DEFAULT_INT_MOD_CNT), + .def = DEFAULT_INT_MOD_CNT, + .arg = {.r = {.min = MIN_INT_MOD_CNT, + .max = MAX_INT_MOD_CNT} } + }; + int val; + if (num_int_mod_timer > bd) { + val = int_mod_timer[bd]; + atl1_validate_option(&val, &opt, pdev); + adapter->imt = (u16) val; + } else + adapter->imt = (u16) (opt.def); + } + + { /* Flash Vendor */ + struct atl1_option opt = { + .type = range_option, + .name = "SPI Flash Vendor", + .err = "using default of " + __MODULE_STRING(FLASH_VENDOR_DEFAULT), + .def = DEFAULT_INT_MOD_CNT, + .arg = {.r = {.min = FLASH_VENDOR_MIN, + .max = FLASH_VENDOR_MAX} } + }; + int val; + if (num_flash_vendor > bd) { + val = flash_vendor[bd]; + atl1_validate_option(&val, &opt, pdev); + adapter->hw.flash_vendor = (u8) val; + } else + adapter->hw.flash_vendor = (u8) (opt.def); + } +} + +#endif /* ATLX_C */ diff --git a/drivers/net/atlx/atlx.h b/drivers/net/atlx/atlx.h new file mode 100644 index 0000000..3be7c09 --- /dev/null +++ b/drivers/net/atlx/atlx.h @@ -0,0 +1,506 @@ +/* atlx_hw.h -- common hardware definitions for Attansic network drivers + * + * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. + * Copyright(c) 2006 - 2007 Chris Snook + * Copyright(c) 2006 Jay Cliburn + * Copyright(c) 2007 Atheros Corporation. All rights reserved. + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef ATLX_H +#define ATLX_H + +#include +#include + +#define ATLX_DRIVER_VERSION "2.1.1" +MODULE_AUTHOR("Xiong Huang , \ + Chris Snook , Jay Cliburn "); +MODULE_LICENSE("GPL"); +MODULE_VERSION(ATLX_DRIVER_VERSION); + +#define ATLX_ERR_PHY 2 +#define ATLX_ERR_PHY_SPEED 7 +#define ATLX_ERR_PHY_RES 8 + +#define SPEED_0 0xffff +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +#define MEDIA_TYPE_AUTO_SENSOR 0 + +/* register definitions */ +#define REG_PM_CTRLSTAT 0x44 + +#define REG_PCIE_CAP_LIST 0x58 + +#define REG_VPD_CAP 0x6C +#define VPD_CAP_ID_MASK 0xFF +#define VPD_CAP_ID_SHIFT 0 +#define VPD_CAP_NEXT_PTR_MASK 0xFF +#define VPD_CAP_NEXT_PTR_SHIFT 8 +#define VPD_CAP_VPD_ADDR_MASK 0x7FFF +#define VPD_CAP_VPD_ADDR_SHIFT 16 +#define VPD_CAP_VPD_FLAG 0x80000000 + +#define REG_VPD_DATA 0x70 + +#define REG_SPI_FLASH_CTRL 0x200 +#define SPI_FLASH_CTRL_STS_NON_RDY 0x1 +#define SPI_FLASH_CTRL_STS_WEN 0x2 +#define SPI_FLASH_CTRL_STS_WPEN 0x80 +#define SPI_FLASH_CTRL_DEV_STS_MASK 0xFF +#define SPI_FLASH_CTRL_DEV_STS_SHIFT 0 +#define SPI_FLASH_CTRL_INS_MASK 0x7 +#define SPI_FLASH_CTRL_INS_SHIFT 8 +#define SPI_FLASH_CTRL_START 0x800 +#define SPI_FLASH_CTRL_EN_VPD 0x2000 +#define SPI_FLASH_CTRL_LDSTART 0x8000 +#define SPI_FLASH_CTRL_CS_HI_MASK 0x3 +#define SPI_FLASH_CTRL_CS_HI_SHIFT 16 +#define SPI_FLASH_CTRL_CS_HOLD_MASK 0x3 +#define SPI_FLASH_CTRL_CS_HOLD_SHIFT 18 +#define SPI_FLASH_CTRL_CLK_LO_MASK 0x3 +#define SPI_FLASH_CTRL_CLK_LO_SHIFT 20 +#define SPI_FLASH_CTRL_CLK_HI_MASK 0x3 +#define SPI_FLASH_CTRL_CLK_HI_SHIFT 22 +#define SPI_FLASH_CTRL_CS_SETUP_MASK 0x3 +#define SPI_FLASH_CTRL_CS_SETUP_SHIFT 24 +#define SPI_FLASH_CTRL_EROM_PGSZ_MASK 0x3 +#define SPI_FLASH_CTRL_EROM_PGSZ_SHIFT 26 +#define SPI_FLASH_CTRL_WAIT_READY 0x10000000 + +#define REG_SPI_ADDR 0x204 + +#define REG_SPI_DATA 0x208 + +#define REG_SPI_FLASH_CONFIG 0x20C +#define SPI_FLASH_CONFIG_LD_ADDR_MASK 0xFFFFFF +#define SPI_FLASH_CONFIG_LD_ADDR_SHIFT 0 +#define SPI_FLASH_CONFIG_VPD_ADDR_MASK 0x3 +#define SPI_FLASH_CONFIG_VPD_ADDR_SHIFT 24 +#define SPI_FLASH_CONFIG_LD_EXIST 0x4000000 + +#define REG_SPI_FLASH_OP_PROGRAM 0x210 +#define REG_SPI_FLASH_OP_SC_ERASE 0x211 +#define REG_SPI_FLASH_OP_CHIP_ERASE 0x212 +#define REG_SPI_FLASH_OP_RDID 0x213 +#define REG_SPI_FLASH_OP_WREN 0x214 +#define REG_SPI_FLASH_OP_RDSR 0x215 +#define REG_SPI_FLASH_OP_WRSR 0x216 +#define REG_SPI_FLASH_OP_READ 0x217 + +#define REG_TWSI_CTRL 0x218 +#define TWSI_CTRL_LD_OFFSET_MASK 0xFF +#define TWSI_CTRL_LD_OFFSET_SHIFT 0 +#define TWSI_CTRL_LD_SLV_ADDR_MASK 0x7 +#define TWSI_CTRL_LD_SLV_ADDR_SHIFT 8 +#define TWSI_CTRL_SW_LDSTART 0x800 +#define TWSI_CTRL_HW_LDSTART 0x1000 +#define TWSI_CTRL_SMB_SLV_ADDR_MASK 0x7F +#define TWSI_CTRL_SMB_SLV_ADDR_SHIFT 15 +#define TWSI_CTRL_LD_EXIST 0x400000 +#define TWSI_CTRL_READ_FREQ_SEL_MASK 0x3 +#define TWSI_CTRL_READ_FREQ_SEL_SHIFT 23 +#define TWSI_CTRL_FREQ_SEL_100K 0 +#define TWSI_CTRL_FREQ_SEL_200K 1 +#define TWSI_CTRL_FREQ_SEL_300K 2 +#define TWSI_CTRL_FREQ_SEL_400K 3 +#define TWSI_CTRL_SMB_SLV_ADDR /* FIXME: define or remove */ +#define TWSI_CTRL_WRITE_FREQ_SEL_MASK 0x3 +#define TWSI_CTRL_WRITE_FREQ_SEL_SHIFT 24 + +#define REG_PCIE_DEV_MISC_CTRL 0x21C +#define PCIE_DEV_MISC_CTRL_EXT_PIPE 0x2 +#define PCIE_DEV_MISC_CTRL_RETRY_BUFDIS 0x1 +#define PCIE_DEV_MISC_CTRL_SPIROM_EXIST 0x4 +#define PCIE_DEV_MISC_CTRL_SERDES_ENDIAN 0x8 +#define PCIE_DEV_MISC_CTRL_SERDES_SEL_DIN 0x10 + +#define REG_PCIE_PHYMISC 0x1000 +#define PCIE_PHYMISC_FORCE_RCV_DET 0x4 + +#define REG_PCIE_DLL_TX_CTRL1 0x1104 +#define PCIE_DLL_TX_CTRL1_SEL_NOR_CLK 0x400 +#define PCIE_DLL_TX_CTRL1_DEF 0x568 + +#define REG_LTSSM_TEST_MODE 0x12FC +#define LTSSM_TEST_MODE_DEF 0x6500 + +/* Master Control Register */ +#define REG_MASTER_CTRL 0x1400 +#define MASTER_CTRL_SOFT_RST 0x1 +#define MASTER_CTRL_MTIMER_EN 0x2 +#define MASTER_CTRL_ITIMER_EN 0x4 +#define MASTER_CTRL_MANUAL_INT 0x8 +#define MASTER_CTRL_REV_NUM_SHIFT 16 +#define MASTER_CTRL_REV_NUM_MASK 0xFF +#define MASTER_CTRL_DEV_ID_SHIFT 24 +#define MASTER_CTRL_DEV_ID_MASK 0xFF + +/* Timer Initial Value Register */ +#define REG_MANUAL_TIMER_INIT 0x1404 + +/* IRQ Moderator Timer Initial Value Register */ +#define REG_IRQ_MODU_TIMER_INIT 0x1408 + +#define REG_PHY_ENABLE 0x140C + +/* IRQ Anti-Lost Timer Initial Value Register */ +#define REG_CMBDISDMA_TIMER 0x140E + +/* Block IDLE Status Register */ +#define REG_IDLE_STATUS 0x1410 + +/* MDIO Control Register */ +#define REG_MDIO_CTRL 0x1414 +#define MDIO_DATA_MASK 0xFFFF +#define MDIO_DATA_SHIFT 0 +#define MDIO_REG_ADDR_MASK 0x1F +#define MDIO_REG_ADDR_SHIFT 16 +#define MDIO_RW 0x200000 +#define MDIO_SUP_PREAMBLE 0x400000 +#define MDIO_START 0x800000 +#define MDIO_CLK_SEL_SHIFT 24 +#define MDIO_CLK_25_4 0 +#define MDIO_CLK_25_6 2 +#define MDIO_CLK_25_8 3 +#define MDIO_CLK_25_10 4 +#define MDIO_CLK_25_14 5 +#define MDIO_CLK_25_20 6 +#define MDIO_CLK_25_28 7 +#define MDIO_BUSY 0x8000000 + +/* MII PHY Status Register */ +#define REG_PHY_STATUS 0x1418 + +/* BIST Control and Status Register0 (for the Packet Memory) */ +#define REG_BIST0_CTRL 0x141C +#define BIST0_NOW 0x1 +#define BIST0_SRAM_FAIL 0x2 +#define BIST0_FUSE_FLAG 0x4 +#define REG_BIST1_CTRL 0x1420 +#define BIST1_NOW 0x1 +#define BIST1_SRAM_FAIL 0x2 +#define BIST1_FUSE_FLAG 0x4 + +/* SerDes Lock Detect Control and Status Register */ +#define REG_SERDES_LOCK 0x1424 +#define SERDES_LOCK_DETECT 1 +#define SERDES_LOCK_DETECT_EN 2 + +/* MAC Control Register */ +#define REG_MAC_CTRL 0x1480 +#define MAC_CTRL_TX_EN 1 +#define MAC_CTRL_RX_EN 2 +#define MAC_CTRL_TX_FLOW 4 +#define MAC_CTRL_RX_FLOW 8 +#define MAC_CTRL_LOOPBACK 0x10 +#define MAC_CTRL_DUPLX 0x20 +#define MAC_CTRL_ADD_CRC 0x40 +#define MAC_CTRL_PAD 0x80 +#define MAC_CTRL_LENCHK 0x100 +#define MAC_CTRL_HUGE_EN 0x200 +#define MAC_CTRL_PRMLEN_SHIFT 10 +#define MAC_CTRL_PRMLEN_MASK 0xF +#define MAC_CTRL_RMV_VLAN 0x4000 +#define MAC_CTRL_PROMIS_EN 0x8000 +#define MAC_CTRL_MC_ALL_EN 0x2000000 +#define MAC_CTRL_BC_EN 0x4000000 + +/* MAC IPG/IFG Control Register */ +#define REG_MAC_IPG_IFG 0x1484 +#define MAC_IPG_IFG_IPGT_SHIFT 0 +#define MAC_IPG_IFG_IPGT_MASK 0x7F +#define MAC_IPG_IFG_MIFG_SHIFT 8 +#define MAC_IPG_IFG_MIFG_MASK 0xFF +#define MAC_IPG_IFG_IPGR1_SHIFT 16 +#define MAC_IPG_IFG_IPGR1_MASK 0x7F +#define MAC_IPG_IFG_IPGR2_SHIFT 24 +#define MAC_IPG_IFG_IPGR2_MASK 0x7F + +/* MAC STATION ADDRESS */ +#define REG_MAC_STA_ADDR 0x1488 + +/* Hash table for multicast address */ +#define REG_RX_HASH_TABLE 0x1490 + +/* MAC Half-Duplex Control Register */ +#define REG_MAC_HALF_DUPLX_CTRL 0x1498 +#define MAC_HALF_DUPLX_CTRL_LCOL_SHIFT 0 +#define MAC_HALF_DUPLX_CTRL_LCOL_MASK 0x3FF +#define MAC_HALF_DUPLX_CTRL_RETRY_SHIFT 12 +#define MAC_HALF_DUPLX_CTRL_RETRY_MASK 0xF +#define MAC_HALF_DUPLX_CTRL_EXC_DEF_EN 0x10000 +#define MAC_HALF_DUPLX_CTRL_NO_BACK_C 0x20000 +#define MAC_HALF_DUPLX_CTRL_NO_BACK_P 0x40000 +#define MAC_HALF_DUPLX_CTRL_ABEBE 0x80000 +#define MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT 20 +#define MAC_HALF_DUPLX_CTRL_ABEBT_MASK 0xF +#define MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT 24 +#define MAC_HALF_DUPLX_CTRL_JAMIPG_MASK 0xF + +/* Maximum Frame Length Control Register */ +#define REG_MTU 0x149C + +/* Wake-On-Lan control register */ +#define REG_WOL_CTRL 0x14A0 +#define WOL_PATTERN_EN 0x1 +#define WOL_PATTERN_PME_EN 0x2 +#define WOL_MAGIC_EN 0x4 +#define WOL_MAGIC_PME_EN 0x8 +#define WOL_LINK_CHG_EN 0x10 +#define WOL_LINK_CHG_PME_EN 0x20 +#define WOL_PATTERN_ST 0x100 +#define WOL_MAGIC_ST 0x200 +#define WOL_LINKCHG_ST 0x400 +#define WOL_PT0_EN 0x10000 +#define WOL_PT1_EN 0x20000 +#define WOL_PT2_EN 0x40000 +#define WOL_PT3_EN 0x80000 +#define WOL_PT4_EN 0x100000 +#define WOL_PT0_MATCH 0x1000000 +#define WOL_PT1_MATCH 0x2000000 +#define WOL_PT2_MATCH 0x4000000 +#define WOL_PT3_MATCH 0x8000000 +#define WOL_PT4_MATCH 0x10000000 + +/* Internal SRAM Partition Register, high 32 bits */ +#define REG_SRAM_RFD_ADDR 0x1500 + +/* Descriptor Control register, high 32 bits */ +#define REG_DESC_BASE_ADDR_HI 0x1540 + +/* Interrupt Status Register */ +#define REG_ISR 0x1600 +#define ISR_UR_DETECTED 0x1000000 +#define ISR_FERR_DETECTED 0x2000000 +#define ISR_NFERR_DETECTED 0x4000000 +#define ISR_CERR_DETECTED 0x8000000 +#define ISR_PHY_LINKDOWN 0x10000000 +#define ISR_DIS_INT 0x80000000 + +/* Interrupt Mask Register */ +#define REG_IMR 0x1604 + +#define REG_RFD_RRD_IDX 0x1800 +#define REG_TPD_IDX 0x1804 + +/* MII definitions */ + +/* PHY Common Register */ +#define MII_ATLX_CR 0x09 +#define MII_ATLX_SR 0x0A +#define MII_ATLX_ESR 0x0F +#define MII_ATLX_PSCR 0x10 +#define MII_ATLX_PSSR 0x11 + +/* PHY Control Register */ +#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, + * 00=10 + */ +#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ +#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ +#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, + * 00=10 + */ +#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ +#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ +#define MII_CR_SPEED_MASK 0x2040 +#define MII_CR_SPEED_1000 0x0040 +#define MII_CR_SPEED_100 0x2000 +#define MII_CR_SPEED_10 0x0000 + +/* PHY Status Register */ +#define MII_SR_EXTENDED_CAPS 0x0001 /* Ext register capabilities */ +#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ +#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ +#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ +#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext stat info in Reg 0x0F */ +#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ +#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ +#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ +#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ +#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ +#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ +#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ + +/* Link partner ability register */ +#define MII_LPA_SLCT 0x001f /* Same as advertise selector */ +#define MII_LPA_10HALF 0x0020 /* Can do 10mbps half-duplex */ +#define MII_LPA_10FULL 0x0040 /* Can do 10mbps full-duplex */ +#define MII_LPA_100HALF 0x0080 /* Can do 100mbps half-duplex */ +#define MII_LPA_100FULL 0x0100 /* Can do 100mbps full-duplex */ +#define MII_LPA_100BASE4 0x0200 /* 100BASE-T4 */ +#define MII_LPA_PAUSE 0x0400 /* PAUSE */ +#define MII_LPA_ASYPAUSE 0x0800 /* Asymmetrical PAUSE */ +#define MII_LPA_RFAULT 0x2000 /* Link partner faulted */ +#define MII_LPA_LPACK 0x4000 /* Link partner acked us */ +#define MII_LPA_NPAGE 0x8000 /* Next page bit */ + +/* Autoneg Advertisement Register */ +#define MII_AR_SELECTOR_FIELD 0x0001 /* IEEE 802.3 CSMA/CD */ +#define MII_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +#define MII_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +#define MII_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +#define MII_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +#define MII_AR_100T4_CAPS 0x0200 /* 100T4 Capable */ +#define MII_AR_PAUSE 0x0400 /* Pause operation desired */ +#define MII_AR_ASM_DIR 0x0800 /* Asymmetric Pause Dir bit */ +#define MII_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */ +#define MII_AR_NEXT_PAGE 0x8000 /* Next Page ability support */ +#define MII_AR_SPEED_MASK 0x01E0 +#define MII_AR_DEFAULT_CAP_MASK 0x0DE0 + +/* 1000BASE-T Control Register */ +#define MII_ATLX_CR_1000T_HD_CAPS 0x0100 /* Adv 1000T HD cap */ +#define MII_ATLX_CR_1000T_FD_CAPS 0x0200 /* Adv 1000T FD cap */ +#define MII_ATLX_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device, + * 0=DTE device */ +#define MII_ATLX_CR_1000T_MS_VALUE 0x0800 /* 1=Config PHY as Master, + * 0=Configure PHY as Slave */ +#define MII_ATLX_CR_1000T_MS_ENABLE 0x1000 /* 1=Man Master/Slave config, + * 0=Auto Master/Slave config + */ +#define MII_ATLX_CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */ +#define MII_ATLX_CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */ +#define MII_ATLX_CR_1000T_TEST_MODE_2 0x4000 /* Master Xmit Jitter test */ +#define MII_ATLX_CR_1000T_TEST_MODE_3 0x6000 /* Slave Xmit Jitter test */ +#define MII_ATLX_CR_1000T_TEST_MODE_4 0x8000 /* Xmitter Distortion test */ +#define MII_ATLX_CR_1000T_SPEED_MASK 0x0300 +#define MII_ATLX_CR_1000T_DEFAULT_CAP_MASK 0x0300 + +/* 1000BASE-T Status Register */ +#define MII_ATLX_SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */ +#define MII_ATLX_SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */ +#define MII_ATLX_SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ +#define MII_ATLX_SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ +#define MII_ATLX_SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master + * 0=Slave + */ +#define MII_ATLX_SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config + * fault */ +#define MII_ATLX_SR_1000T_REMOTE_RX_STATUS_SHIFT 12 +#define MII_ATLX_SR_1000T_LOCAL_RX_STATUS_SHIFT 13 + +/* Extended Status Register */ +#define MII_ATLX_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */ +#define MII_ATLX_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */ +#define MII_ATLX_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */ +#define MII_ATLX_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */ + +/* ATLX PHY Specific Control Register */ +#define MII_ATLX_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Func disabled */ +#define MII_ATLX_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enbld */ +#define MII_ATLX_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */ +#define MII_ATLX_PSCR_MAC_POWERDOWN 0x0008 +#define MII_ATLX_PSCR_CLK125_DISABLE 0x0010 /* 1=CLK125 low + * 0=CLK125 toggling + */ +#define MII_ATLX_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5, + * Manual MDI configuration + */ +#define MII_ATLX_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ +#define MII_ATLX_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover + * 100BASE-TX/10BASE-T: MDI + * Mode */ +#define MII_ATLX_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled + * all speeds. + */ +#define MII_ATLX_PSCR_10BT_EXT_DIST_ENABLE 0x0080 /* 1=Enable Extended + * 10BASE-T distance + * (Lower 10BASE-T RX + * Threshold) + * 0=Normal 10BASE-T RX + * Threshold + */ +#define MII_ATLX_PSCR_MII_5BIT_ENABLE 0x0100 /* 1=5-Bit interface in + * 100BASE-TX + * 0=MII interface in + * 100BASE-TX + */ +#define MII_ATLX_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler dsbl */ +#define MII_ATLX_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */ +#define MII_ATLX_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ +#define MII_ATLX_PSCR_POLARITY_REVERSAL_SHIFT 1 +#define MII_ATLX_PSCR_AUTO_X_MODE_SHIFT 5 +#define MII_ATLX_PSCR_10BT_EXT_DIST_ENABLE_SHIFT 7 + +/* ATLX PHY Specific Status Register */ +#define MII_ATLX_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */ +#define MII_ATLX_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */ +#define MII_ATLX_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define MII_ATLX_PSSR_10MBS 0x0000 /* 00=10Mbs */ +#define MII_ATLX_PSSR_100MBS 0x4000 /* 01=100Mbs */ +#define MII_ATLX_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +/* PCI Command Register Bit Definitions */ +#define PCI_REG_COMMAND 0x04 /* PCI Command Register */ +#define CMD_IO_SPACE 0x0001 +#define CMD_MEMORY_SPACE 0x0002 +#define CMD_BUS_MASTER 0x0004 + +/* Wake Up Filter Control */ +#define ATLX_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define ATLX_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define ATLX_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define ATLX_WUFC_MC 0x00000008 /* Multicast Wakeup Enable */ +#define ATLX_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ + +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 +#define ADVERTISE_1000_FULL 0x0020 +#define AUTONEG_ADVERTISE_10_100_ALL 0x000F /* All 10/100 speeds */ +#define AUTONEG_ADVERTISE_10_ALL 0x0003 /* 10Mbps Full & Half speeds */ + +#define PHY_AUTO_NEG_TIME 45 /* 4.5 Seconds */ +#define PHY_FORCE_TIME 20 /* 2.0 Seconds */ + +/* For checksumming, the sum of all words in the EEPROM should equal 0xBABA */ +#define EEPROM_SUM 0xBABA +#define NODE_ADDRESS_SIZE 6 + +struct atlx_spi_flash_dev { + const char *manu_name; /* manufacturer id */ + /* op-code */ + u8 cmd_wrsr; + u8 cmd_read; + u8 cmd_program; + u8 cmd_wren; + u8 cmd_wrdi; + u8 cmd_rdsr; + u8 cmd_rdid; + u8 cmd_sector_erase; + u8 cmd_chip_erase; +}; + +#endif /* ATLX_H */ -- cgit v1.1 From 9d90fb1ac9d97da86e24d9ea947bf2a2f333829a Mon Sep 17 00:00:00 2001 From: Jay Cliburn Date: Sat, 2 Feb 2008 19:50:05 -0600 Subject: atl1: fix broken TSO The L1 tx packet descriptor expects TCP Header Length to be expressed as a number of 32-bit dwords. The atl1 driver uses tcp_hdrlen() to populate the field, but tcp_hdrlen() returns the header length in bytes, not in dwords. Add a shift to convert tcp_hdrlen() to dwords when we write it to the tpd. Also, some of our bit assignments are made to the wrong tpd words. Change those to the correct words. Finally, since all this fixes TSO, enable TSO by default. Signed-off-by: Jay Cliburn Acked-by: Chris Snook Signed-off-by: Jeff Garzik --- drivers/net/atlx/atl1.c | 29 +++++++++++------------------ 1 file changed, 11 insertions(+), 18 deletions(-) (limited to 'drivers/net/atlx') diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c index a84c97c..8a26dad 100644 --- a/drivers/net/atlx/atl1.c +++ b/drivers/net/atlx/atl1.c @@ -36,7 +36,6 @@ * A very incomplete list of things that need to be dealt with: * * TODO: - * Fix TSO; tx performance is horrible with TSO enabled. * Wake on LAN. * Add more ethtool functions. * Fix abstruse irq enable/disable condition described here: @@ -1308,8 +1307,8 @@ static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb, tso->tsopl |= 1 << TSO_PARAM_ETHTYPE_SHIFT; tso->tsopl |= (iph->ihl & - CSUM_PARAM_IPHL_MASK) << CSUM_PARAM_IPHL_SHIFT; - tso->tsopl |= (tcp_hdrlen(skb) & + TSO_PARAM_IPHL_MASK) << TSO_PARAM_IPHL_SHIFT; + tso->tsopl |= ((tcp_hdrlen(skb) >> 2) & TSO_PARAM_TCPHDRLEN_MASK) << TSO_PARAM_TCPHDRLEN_SHIFT; tso->tsopl |= (skb_shinfo(skb)->gso_size & @@ -1472,8 +1471,8 @@ static void atl1_tx_queue(struct atl1_adapter *adapter, int count, tpd->desc.tso.tsopl = descr->tso.tsopl; tpd->buffer_addr = cpu_to_le64(buffer_info->dma); tpd->desc.data = descr->data; - tpd->desc.csum.csumpu |= (cpu_to_le16(buffer_info->length) & - CSUM_PARAM_BUFLEN_MASK) << CSUM_PARAM_BUFLEN_SHIFT; + tpd->desc.tso.tsopu |= (cpu_to_le16(buffer_info->length) & + TSO_PARAM_BUFLEN_MASK) << TSO_PARAM_BUFLEN_SHIFT; val = (descr->tso.tsopl >> TSO_PARAM_SEGMENT_SHIFT) & TSO_PARAM_SEGMENT_MASK; @@ -1481,7 +1480,7 @@ static void atl1_tx_queue(struct atl1_adapter *adapter, int count, tpd->desc.tso.tsopl |= 1 << TSO_PARAM_HDRFLAG_SHIFT; if (j == (count - 1)) - tpd->desc.csum.csumpl |= 1 << CSUM_PARAM_EOP_SHIFT; + tpd->desc.tso.tsopl |= 1 << TSO_PARAM_EOP_SHIFT; if (++tpd_next_to_use == tpd_ring->count) tpd_next_to_use = 0; @@ -1574,9 +1573,9 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) vlan_tag = vlan_tx_tag_get(skb); vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) | ((vlan_tag >> 9) & 0x8); - param.csum.csumpl |= 1 << CSUM_PARAM_INSVLAG_SHIFT; - param.csum.csumpu |= (vlan_tag & CSUM_PARAM_VALANTAG_MASK) << - CSUM_PARAM_VALAN_SHIFT; + param.tso.tsopl |= 1 << TSO_PARAM_INSVLAG_SHIFT; + param.tso.tsopu |= (vlan_tag & TSO_PARAM_VLANTAG_MASK) << + TSO_PARAM_VLAN_SHIFT; } tso = atl1_tso(adapter, skb, ¶m.tso); @@ -1595,8 +1594,8 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) } } - val = (param.csum.csumpl >> CSUM_PARAM_SEGMENT_SHIFT) & - CSUM_PARAM_SEGMENT_MASK; + val = (param.tso.tsopl >> TSO_PARAM_SEGMENT_SHIFT) & + TSO_PARAM_SEGMENT_MASK; atl1_tx_map(adapter, skb, 1 == val); atl1_tx_queue(adapter, count, ¶m); netdev->trans_start = jiffies; @@ -2091,13 +2090,7 @@ static int __devinit atl1_probe(struct pci_dev *pdev, netdev->features = NETIF_F_HW_CSUM; netdev->features |= NETIF_F_SG; netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX); - - /* - * FIXME - Until tso performance gets fixed, disable the feature. - * Enable it with ethtool -K if desired. - */ - /* netdev->features |= NETIF_F_TSO; */ - + netdev->features |= NETIF_F_TSO; netdev->features |= NETIF_F_LLTX; /* -- cgit v1.1 From c67c9a2f11d97a545c0e8f56b2ca3e5e36566a94 Mon Sep 17 00:00:00 2001 From: Jay Cliburn Date: Sat, 2 Feb 2008 19:50:06 -0600 Subject: atl1: add ethtool register dump Add the ethtool register dump option to the atl1 driver. Signed-off-by: Jay Cliburn Acked-by: Chris Snook Signed-off-by: Jeff Garzik --- drivers/net/atlx/atl1.c | 53 +++++++++++++++++++++++++++++++++++++++++++++++++ drivers/net/atlx/atl1.h | 1 + 2 files changed, 54 insertions(+) (limited to 'drivers/net/atlx') diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c index 8a26dad..1f564f0 100644 --- a/drivers/net/atlx/atl1.c +++ b/drivers/net/atlx/atl1.c @@ -2513,6 +2513,57 @@ static int atl1_set_wol(struct net_device *netdev, return 0; } +static int atl1_get_regs_len(struct net_device *netdev) +{ + return ATL1_REG_COUNT * sizeof(u32); +} + +static void atl1_get_regs(struct net_device *netdev, struct ethtool_regs *regs, + void *p) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_hw *hw = &adapter->hw; + unsigned int i; + u32 *regbuf = p; + + for (i = 0; i < ATL1_REG_COUNT; i++) { + /* + * This switch statement avoids reserved regions + * of register space. + */ + switch (i) { + case 6 ... 9: + case 14: + case 29 ... 31: + case 34 ... 63: + case 75 ... 127: + case 136 ... 1023: + case 1027 ... 1087: + case 1091 ... 1151: + case 1194 ... 1195: + case 1200 ... 1201: + case 1206 ... 1213: + case 1216 ... 1279: + case 1290 ... 1311: + case 1323 ... 1343: + case 1358 ... 1359: + case 1368 ... 1375: + case 1378 ... 1383: + case 1388 ... 1391: + case 1393 ... 1395: + case 1402 ... 1403: + case 1410 ... 1471: + case 1522 ... 1535: + /* reserved region; don't read it */ + regbuf[i] = 0; + break; + default: + /* unreserved region */ + regbuf[i] = ioread32(hw->hw_addr + (i * sizeof(u32))); + } + } +} + static void atl1_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) { @@ -2703,6 +2754,8 @@ const struct ethtool_ops atl1_ethtool_ops = { .get_drvinfo = atl1_get_drvinfo, .get_wol = atl1_get_wol, .set_wol = atl1_set_wol, + .get_regs_len = atl1_get_regs_len, + .get_regs = atl1_get_regs, .get_ringparam = atl1_get_ringparam, .set_ringparam = atl1_set_ringparam, .get_pauseparam = atl1_get_pauseparam, diff --git a/drivers/net/atlx/atl1.h b/drivers/net/atlx/atl1.h index 538948d..30c5a8d 100644 --- a/drivers/net/atlx/atl1.h +++ b/drivers/net/atlx/atl1.h @@ -584,6 +584,7 @@ enum atl1_dma_req_block { #define ATL1_DEFAULT_RFD 512 #define ATL1_MIN_RFD 128 #define ATL1_MAX_RFD 2048 +#define ATL1_REG_COUNT 1538 #define ATL1_GET_DESC(R, i, type) (&(((type *)((R)->desc))[i])) #define ATL1_RFD_DESC(R, i) ATL1_GET_DESC(R, i, struct rx_free_desc) -- cgit v1.1 From 401c0aabec4b97320f962a0161a846d230a6f7aa Mon Sep 17 00:00:00 2001 From: Jay Cliburn Date: Sat, 2 Feb 2008 19:50:07 -0600 Subject: atl1: simplify tx packet descriptor The transmit packet descriptor consists of four 32-bit words, with word 3 upper bits overloaded depending upon the condition of its bits 3 and 4. The driver currently duplicates all word 2 and some word 3 register bit definitions unnecessarily and also uses a set of nested structures in its definition of the TPD without good cause. This patch adds a lengthy comment describing the TPD, eliminates duplicate TPD bit definitions, and simplifies the TPD structure itself. It also expands the TSO check to correctly handle custom checksum versus TSO processing using the revised TPD definitions. Finally, shorten some variable names in the transmit processing path to reduce line lengths, rename some variables to better describe their purpose (e.g., nseg versus m), and add a comment or two to better describe what the code is doing. Signed-off-by: Jay Cliburn Acked-by: Chris Snook Signed-off-by: Jeff Garzik --- drivers/net/atlx/atl1.c | 265 ++++++++++++++++++++++++++---------------------- drivers/net/atlx/atl1.h | 201 ++++++++++++++++++------------------ 2 files changed, 246 insertions(+), 220 deletions(-) (limited to 'drivers/net/atlx') diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c index 1f564f0..f4add3c 100644 --- a/drivers/net/atlx/atl1.c +++ b/drivers/net/atlx/atl1.c @@ -1259,8 +1259,6 @@ static void atl1_intr_tx(struct atl1_adapter *adapter) dev_kfree_skb_irq(buffer_info->skb); buffer_info->skb = NULL; } - tpd->buffer_addr = 0; - tpd->desc.data = 0; if (++sw_tpd_next_to_clean == tpd_ring->count) sw_tpd_next_to_clean = 0; @@ -1282,48 +1280,69 @@ static u16 atl1_tpd_avail(struct atl1_tpd_ring *tpd_ring) } static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb, - struct tso_param *tso) + struct tx_packet_desc *ptpd) { - /* We enter this function holding a spinlock. */ - u8 ipofst; + /* spinlock held */ + u8 hdr_len, ip_off; + u32 real_len; int err; if (skb_shinfo(skb)->gso_size) { if (skb_header_cloned(skb)) { err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); if (unlikely(err)) - return err; + return -1; } if (skb->protocol == ntohs(ETH_P_IP)) { struct iphdr *iph = ip_hdr(skb); - iph->tot_len = 0; + real_len = (((unsigned char *)iph - skb->data) + + ntohs(iph->tot_len)); + if (real_len < skb->len) + pskb_trim(skb, real_len); + hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb)); + if (skb->len == hdr_len) { + iph->check = 0; + tcp_hdr(skb)->check = + ~csum_tcpudp_magic(iph->saddr, + iph->daddr, tcp_hdrlen(skb), + IPPROTO_TCP, 0); + ptpd->word3 |= (iph->ihl & TPD_IPHL_MASK) << + TPD_IPHL_SHIFT; + ptpd->word3 |= ((tcp_hdrlen(skb) >> 2) & + TPD_TCPHDRLEN_MASK) << + TPD_TCPHDRLEN_SHIFT; + ptpd->word3 |= 1 << TPD_IP_CSUM_SHIFT; + ptpd->word3 |= 1 << TPD_TCP_CSUM_SHIFT; + return 1; + } + iph->check = 0; tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, - iph->daddr, 0, IPPROTO_TCP, 0); - ipofst = skb_network_offset(skb); - if (ipofst != ETH_HLEN) /* 802.3 frame */ - tso->tsopl |= 1 << TSO_PARAM_ETHTYPE_SHIFT; - - tso->tsopl |= (iph->ihl & - TSO_PARAM_IPHL_MASK) << TSO_PARAM_IPHL_SHIFT; - tso->tsopl |= ((tcp_hdrlen(skb) >> 2) & - TSO_PARAM_TCPHDRLEN_MASK) << - TSO_PARAM_TCPHDRLEN_SHIFT; - tso->tsopl |= (skb_shinfo(skb)->gso_size & - TSO_PARAM_MSS_MASK) << TSO_PARAM_MSS_SHIFT; - tso->tsopl |= 1 << TSO_PARAM_IPCKSUM_SHIFT; - tso->tsopl |= 1 << TSO_PARAM_TCPCKSUM_SHIFT; - tso->tsopl |= 1 << TSO_PARAM_SEGMENT_SHIFT; - return true; + iph->daddr, 0, IPPROTO_TCP, 0); + ip_off = (unsigned char *)iph - + (unsigned char *) skb_network_header(skb); + if (ip_off == 8) /* 802.3-SNAP frame */ + ptpd->word3 |= 1 << TPD_ETHTYPE_SHIFT; + else if (ip_off != 0) + return -2; + + ptpd->word3 |= (iph->ihl & TPD_IPHL_MASK) << + TPD_IPHL_SHIFT; + ptpd->word3 |= ((tcp_hdrlen(skb) >> 2) & + TPD_TCPHDRLEN_MASK) << TPD_TCPHDRLEN_SHIFT; + ptpd->word3 |= (skb_shinfo(skb)->gso_size & + TPD_MSS_MASK) << TPD_MSS_SHIFT; + ptpd->word3 |= 1 << TPD_SEGMENT_EN_SHIFT; + return 3; } } return false; } static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb, - struct csum_param *csum) + struct tx_packet_desc *ptpd) { u8 css, cso; @@ -1335,115 +1354,116 @@ static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb, "payload offset not an even number\n"); return -1; } - csum->csumpl |= (cso & CSUM_PARAM_PLOADOFFSET_MASK) << - CSUM_PARAM_PLOADOFFSET_SHIFT; - csum->csumpl |= (css & CSUM_PARAM_XSUMOFFSET_MASK) << - CSUM_PARAM_XSUMOFFSET_SHIFT; - csum->csumpl |= 1 << CSUM_PARAM_CUSTOMCKSUM_SHIFT; + ptpd->word3 |= (cso & TPD_PLOADOFFSET_MASK) << + TPD_PLOADOFFSET_SHIFT; + ptpd->word3 |= (css & TPD_CCSUMOFFSET_MASK) << + TPD_CCSUMOFFSET_SHIFT; + ptpd->word3 |= 1 << TPD_CUST_CSUM_EN_SHIFT; return true; } - - return true; + return 0; } static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, - bool tcp_seg) + struct tx_packet_desc *ptpd) { - /* We enter this function holding a spinlock. */ + /* spinlock held */ struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; struct atl1_buffer *buffer_info; + u16 buf_len = skb->len; struct page *page; - int first_buf_len = skb->len; unsigned long offset; unsigned int nr_frags; unsigned int f; - u16 tpd_next_to_use; - u16 proto_hdr_len; - u16 len12; + int retval; + u16 next_to_use; + u16 data_len; + u8 hdr_len; - first_buf_len -= skb->data_len; + buf_len -= skb->data_len; nr_frags = skb_shinfo(skb)->nr_frags; - tpd_next_to_use = atomic_read(&tpd_ring->next_to_use); - buffer_info = &tpd_ring->buffer_info[tpd_next_to_use]; + next_to_use = atomic_read(&tpd_ring->next_to_use); + buffer_info = &tpd_ring->buffer_info[next_to_use]; if (unlikely(buffer_info->skb)) BUG(); /* put skb in last TPD */ buffer_info->skb = NULL; - if (tcp_seg) { - /* TSO/GSO */ - proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); - buffer_info->length = proto_hdr_len; + retval = (ptpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK; + if (retval) { + /* TSO */ + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + buffer_info->length = hdr_len; page = virt_to_page(skb->data); offset = (unsigned long)skb->data & ~PAGE_MASK; buffer_info->dma = pci_map_page(adapter->pdev, page, - offset, proto_hdr_len, + offset, hdr_len, PCI_DMA_TODEVICE); - if (++tpd_next_to_use == tpd_ring->count) - tpd_next_to_use = 0; + if (++next_to_use == tpd_ring->count) + next_to_use = 0; - if (first_buf_len > proto_hdr_len) { - int i, m; + if (buf_len > hdr_len) { + int i, nseg; - len12 = first_buf_len - proto_hdr_len; - m = (len12 + ATL1_MAX_TX_BUF_LEN - 1) / + data_len = buf_len - hdr_len; + nseg = (data_len + ATL1_MAX_TX_BUF_LEN - 1) / ATL1_MAX_TX_BUF_LEN; - for (i = 0; i < m; i++) { + for (i = 0; i < nseg; i++) { buffer_info = - &tpd_ring->buffer_info[tpd_next_to_use]; + &tpd_ring->buffer_info[next_to_use]; buffer_info->skb = NULL; buffer_info->length = (ATL1_MAX_TX_BUF_LEN >= - len12) ? ATL1_MAX_TX_BUF_LEN : len12; - len12 -= buffer_info->length; + data_len) ? ATL1_MAX_TX_BUF_LEN : data_len; + data_len -= buffer_info->length; page = virt_to_page(skb->data + - (proto_hdr_len + - i * ATL1_MAX_TX_BUF_LEN)); + (hdr_len + i * ATL1_MAX_TX_BUF_LEN)); offset = (unsigned long)(skb->data + - (proto_hdr_len + - i * ATL1_MAX_TX_BUF_LEN)) & ~PAGE_MASK; + (hdr_len + i * ATL1_MAX_TX_BUF_LEN)) & + ~PAGE_MASK; buffer_info->dma = pci_map_page(adapter->pdev, page, offset, buffer_info->length, PCI_DMA_TODEVICE); - if (++tpd_next_to_use == tpd_ring->count) - tpd_next_to_use = 0; + if (++next_to_use == tpd_ring->count) + next_to_use = 0; } } } else { - /* not TSO/GSO */ - buffer_info->length = first_buf_len; + /* not TSO */ + buffer_info->length = buf_len; page = virt_to_page(skb->data); offset = (unsigned long)skb->data & ~PAGE_MASK; buffer_info->dma = pci_map_page(adapter->pdev, page, - offset, first_buf_len, PCI_DMA_TODEVICE); - if (++tpd_next_to_use == tpd_ring->count) - tpd_next_to_use = 0; + offset, buf_len, PCI_DMA_TODEVICE); + if (++next_to_use == tpd_ring->count) + next_to_use = 0; } for (f = 0; f < nr_frags; f++) { struct skb_frag_struct *frag; - u16 lenf, i, m; + u16 i, nseg; frag = &skb_shinfo(skb)->frags[f]; - lenf = frag->size; + buf_len = frag->size; - m = (lenf + ATL1_MAX_TX_BUF_LEN - 1) / ATL1_MAX_TX_BUF_LEN; - for (i = 0; i < m; i++) { - buffer_info = &tpd_ring->buffer_info[tpd_next_to_use]; + nseg = (buf_len + ATL1_MAX_TX_BUF_LEN - 1) / + ATL1_MAX_TX_BUF_LEN; + for (i = 0; i < nseg; i++) { + buffer_info = &tpd_ring->buffer_info[next_to_use]; if (unlikely(buffer_info->skb)) BUG(); buffer_info->skb = NULL; - buffer_info->length = (lenf > ATL1_MAX_TX_BUF_LEN) ? - ATL1_MAX_TX_BUF_LEN : lenf; - lenf -= buffer_info->length; + buffer_info->length = (buf_len > ATL1_MAX_TX_BUF_LEN) ? + ATL1_MAX_TX_BUF_LEN : buf_len; + buf_len -= buffer_info->length; buffer_info->dma = pci_map_page(adapter->pdev, frag->page, frag->page_offset + (i * ATL1_MAX_TX_BUF_LEN), buffer_info->length, PCI_DMA_TODEVICE); - if (++tpd_next_to_use == tpd_ring->count) - tpd_next_to_use = 0; + if (++next_to_use == tpd_ring->count) + next_to_use = 0; } } @@ -1451,39 +1471,44 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, buffer_info->skb = skb; } -static void atl1_tx_queue(struct atl1_adapter *adapter, int count, - union tpd_descr *descr) +static void atl1_tx_queue(struct atl1_adapter *adapter, u16 count, + struct tx_packet_desc *ptpd) { - /* We enter this function holding a spinlock. */ + /* spinlock held */ struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; - int j; - u32 val; struct atl1_buffer *buffer_info; struct tx_packet_desc *tpd; - u16 tpd_next_to_use = atomic_read(&tpd_ring->next_to_use); + u16 j; + u32 val; + u16 next_to_use = (u16) atomic_read(&tpd_ring->next_to_use); for (j = 0; j < count; j++) { - buffer_info = &tpd_ring->buffer_info[tpd_next_to_use]; - tpd = ATL1_TPD_DESC(&adapter->tpd_ring, tpd_next_to_use); - tpd->desc.csum.csumpu = descr->csum.csumpu; - tpd->desc.csum.csumpl = descr->csum.csumpl; - tpd->desc.tso.tsopu = descr->tso.tsopu; - tpd->desc.tso.tsopl = descr->tso.tsopl; + buffer_info = &tpd_ring->buffer_info[next_to_use]; + tpd = ATL1_TPD_DESC(&adapter->tpd_ring, next_to_use); + if (tpd != ptpd) + memcpy(tpd, ptpd, sizeof(struct tx_packet_desc)); tpd->buffer_addr = cpu_to_le64(buffer_info->dma); - tpd->desc.data = descr->data; - tpd->desc.tso.tsopu |= (cpu_to_le16(buffer_info->length) & - TSO_PARAM_BUFLEN_MASK) << TSO_PARAM_BUFLEN_SHIFT; + tpd->word2 = (cpu_to_le16(buffer_info->length) & + TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT; - val = (descr->tso.tsopl >> TSO_PARAM_SEGMENT_SHIFT) & - TSO_PARAM_SEGMENT_MASK; - if (val && !j) - tpd->desc.tso.tsopl |= 1 << TSO_PARAM_HDRFLAG_SHIFT; + /* + * if this is the first packet in a TSO chain, set + * TPD_HDRFLAG, otherwise, clear it. + */ + val = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & + TPD_SEGMENT_EN_MASK; + if (val) { + if (!j) + tpd->word3 |= 1 << TPD_HDRFLAG_SHIFT; + else + tpd->word3 &= ~(1 << TPD_HDRFLAG_SHIFT); + } if (j == (count - 1)) - tpd->desc.tso.tsopl |= 1 << TSO_PARAM_EOP_SHIFT; + tpd->word3 |= 1 << TPD_EOP_SHIFT; - if (++tpd_next_to_use == tpd_ring->count) - tpd_next_to_use = 0; + if (++next_to_use == tpd_ring->count) + next_to_use = 0; } /* * Force memory writes to complete before letting h/w @@ -1493,18 +1518,18 @@ static void atl1_tx_queue(struct atl1_adapter *adapter, int count, */ wmb(); - atomic_set(&tpd_ring->next_to_use, (int)tpd_next_to_use); + atomic_set(&tpd_ring->next_to_use, next_to_use); } static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; int len = skb->len; int tso; int count = 1; int ret_val; - u32 val; - union tpd_descr param; + struct tx_packet_desc *ptpd; u16 frag_size; u16 vlan_tag; unsigned long flags; @@ -1515,18 +1540,11 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) len -= skb->data_len; - if (unlikely(skb->len == 0)) { + if (unlikely(skb->len <= 0)) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } - param.data = 0; - param.tso.tsopu = 0; - param.tso.tsopl = 0; - param.csum.csumpu = 0; - param.csum.csumpl = 0; - - /* nr_frags will be nonzero if we're doing scatter/gather (SG) */ nr_frags = skb_shinfo(skb)->nr_frags; for (f = 0; f < nr_frags; f++) { frag_size = skb_shinfo(skb)->frags[f].size; @@ -1535,10 +1553,9 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) ATL1_MAX_TX_BUF_LEN; } - /* mss will be nonzero if we're doing segment offload (TSO/GSO) */ mss = skb_shinfo(skb)->gso_size; if (mss) { - if (skb->protocol == htons(ETH_P_IP)) { + if (skb->protocol == ntohs(ETH_P_IP)) { proto_hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb)); if (unlikely(proto_hdr_len > len)) { @@ -1567,18 +1584,20 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) return NETDEV_TX_BUSY; } - param.data = 0; + ptpd = ATL1_TPD_DESC(tpd_ring, + (u16) atomic_read(&tpd_ring->next_to_use)); + memset(ptpd, 0, sizeof(struct tx_packet_desc)); if (adapter->vlgrp && vlan_tx_tag_present(skb)) { vlan_tag = vlan_tx_tag_get(skb); vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) | ((vlan_tag >> 9) & 0x8); - param.tso.tsopl |= 1 << TSO_PARAM_INSVLAG_SHIFT; - param.tso.tsopu |= (vlan_tag & TSO_PARAM_VLANTAG_MASK) << - TSO_PARAM_VLAN_SHIFT; + ptpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT; + ptpd->word3 |= (vlan_tag & TPD_VL_TAGGED_MASK) << + TPD_VL_TAGGED_SHIFT; } - tso = atl1_tso(adapter, skb, ¶m.tso); + tso = atl1_tso(adapter, skb, ptpd); if (tso < 0) { spin_unlock_irqrestore(&adapter->lock, flags); dev_kfree_skb_any(skb); @@ -1586,7 +1605,7 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) } if (!tso) { - ret_val = atl1_tx_csum(adapter, skb, ¶m.csum); + ret_val = atl1_tx_csum(adapter, skb, ptpd); if (ret_val < 0) { spin_unlock_irqrestore(&adapter->lock, flags); dev_kfree_skb_any(skb); @@ -1594,13 +1613,11 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) } } - val = (param.tso.tsopl >> TSO_PARAM_SEGMENT_SHIFT) & - TSO_PARAM_SEGMENT_MASK; - atl1_tx_map(adapter, skb, 1 == val); - atl1_tx_queue(adapter, count, ¶m); - netdev->trans_start = jiffies; - spin_unlock_irqrestore(&adapter->lock, flags); + atl1_tx_map(adapter, skb, ptpd); + atl1_tx_queue(adapter, count, ptpd); atl1_update_mailbox(adapter); + spin_unlock_irqrestore(&adapter->lock, flags); + netdev->trans_start = jiffies; return NETDEV_TX_OK; } @@ -2759,7 +2776,7 @@ const struct ethtool_ops atl1_ethtool_ops = { .get_ringparam = atl1_get_ringparam, .set_ringparam = atl1_set_ringparam, .get_pauseparam = atl1_get_pauseparam, - .set_pauseparam = atl1_set_pauseparam, + .set_pauseparam = atl1_set_pauseparam, .get_rx_csum = atl1_get_rx_csum, .set_tx_csum = ethtool_op_set_tx_hw_csum, .get_link = ethtool_op_get_link, diff --git a/drivers/net/atlx/atl1.h b/drivers/net/atlx/atl1.h index 30c5a8d..4d3d65b 100644 --- a/drivers/net/atlx/atl1.h +++ b/drivers/net/atlx/atl1.h @@ -452,106 +452,115 @@ struct rx_free_desc { /* __attribute__ ((packed)) is required */ } __attribute__ ((packed)); -/* tsopu defines */ -#define TSO_PARAM_BUFLEN_MASK 0x3FFF -#define TSO_PARAM_BUFLEN_SHIFT 0 -#define TSO_PARAM_DMAINT_MASK 0x0001 -#define TSO_PARAM_DMAINT_SHIFT 14 -#define TSO_PARAM_PKTNT_MASK 0x0001 -#define TSO_PARAM_PKTINT_SHIFT 15 -#define TSO_PARAM_VLANTAG_MASK 0xFFFF -#define TSO_PARAM_VLAN_SHIFT 16 - -/* tsopl defines */ -#define TSO_PARAM_EOP_MASK 0x0001 -#define TSO_PARAM_EOP_SHIFT 0 -#define TSO_PARAM_COALESCE_MASK 0x0001 -#define TSO_PARAM_COALESCE_SHIFT 1 -#define TSO_PARAM_INSVLAG_MASK 0x0001 -#define TSO_PARAM_INSVLAG_SHIFT 2 -#define TSO_PARAM_CUSTOMCKSUM_MASK 0x0001 -#define TSO_PARAM_CUSTOMCKSUM_SHIFT 3 -#define TSO_PARAM_SEGMENT_MASK 0x0001 -#define TSO_PARAM_SEGMENT_SHIFT 4 -#define TSO_PARAM_IPCKSUM_MASK 0x0001 -#define TSO_PARAM_IPCKSUM_SHIFT 5 -#define TSO_PARAM_TCPCKSUM_MASK 0x0001 -#define TSO_PARAM_TCPCKSUM_SHIFT 6 -#define TSO_PARAM_UDPCKSUM_MASK 0x0001 -#define TSO_PARAM_UDPCKSUM_SHIFT 7 -#define TSO_PARAM_VLANTAGGED_MASK 0x0001 -#define TSO_PARAM_VLANTAGGED_SHIFT 8 -#define TSO_PARAM_ETHTYPE_MASK 0x0001 -#define TSO_PARAM_ETHTYPE_SHIFT 9 -#define TSO_PARAM_IPHL_MASK 0x000F -#define TSO_PARAM_IPHL_SHIFT 10 -#define TSO_PARAM_TCPHDRLEN_MASK 0x000F -#define TSO_PARAM_TCPHDRLEN_SHIFT 14 -#define TSO_PARAM_HDRFLAG_MASK 0x0001 -#define TSO_PARAM_HDRFLAG_SHIFT 18 -#define TSO_PARAM_MSS_MASK 0x1FFF -#define TSO_PARAM_MSS_SHIFT 19 - -/* csumpu defines */ -#define CSUM_PARAM_BUFLEN_MASK 0x3FFF -#define CSUM_PARAM_BUFLEN_SHIFT 0 -#define CSUM_PARAM_DMAINT_MASK 0x0001 -#define CSUM_PARAM_DMAINT_SHIFT 14 -#define CSUM_PARAM_PKTINT_MASK 0x0001 -#define CSUM_PARAM_PKTINT_SHIFT 15 -#define CSUM_PARAM_VALANTAG_MASK 0xFFFF -#define CSUM_PARAM_VALAN_SHIFT 16 - -/* csumpl defines*/ -#define CSUM_PARAM_EOP_MASK 0x0001 -#define CSUM_PARAM_EOP_SHIFT 0 -#define CSUM_PARAM_COALESCE_MASK 0x0001 -#define CSUM_PARAM_COALESCE_SHIFT 1 -#define CSUM_PARAM_INSVLAG_MASK 0x0001 -#define CSUM_PARAM_INSVLAG_SHIFT 2 -#define CSUM_PARAM_CUSTOMCKSUM_MASK 0x0001 -#define CSUM_PARAM_CUSTOMCKSUM_SHIFT 3 -#define CSUM_PARAM_SEGMENT_MASK 0x0001 -#define CSUM_PARAM_SEGMENT_SHIFT 4 -#define CSUM_PARAM_IPCKSUM_MASK 0x0001 -#define CSUM_PARAM_IPCKSUM_SHIFT 5 -#define CSUM_PARAM_TCPCKSUM_MASK 0x0001 -#define CSUM_PARAM_TCPCKSUM_SHIFT 6 -#define CSUM_PARAM_UDPCKSUM_MASK 0x0001 -#define CSUM_PARAM_UDPCKSUM_SHIFT 7 -#define CSUM_PARAM_VLANTAGGED_MASK 0x0001 -#define CSUM_PARAM_VLANTAGGED_SHIFT 8 -#define CSUM_PARAM_ETHTYPE_MASK 0x0001 -#define CSUM_PARAM_ETHTYPE_SHIFT 9 -#define CSUM_PARAM_IPHL_MASK 0x000F -#define CSUM_PARAM_IPHL_SHIFT 10 -#define CSUM_PARAM_PLOADOFFSET_MASK 0x00FF -#define CSUM_PARAM_PLOADOFFSET_SHIFT 16 -#define CSUM_PARAM_XSUMOFFSET_MASK 0x00FF -#define CSUM_PARAM_XSUMOFFSET_SHIFT 24 - -/* TPD descriptor */ -struct tso_param { - /* The order of these declarations is important -- don't change it */ - u32 tsopu; /* tso_param upper word */ - u32 tsopl; /* tso_param lower word */ -}; - -struct csum_param { - /* The order of these declarations is important -- don't change it */ - u32 csumpu; /* csum_param upper word */ - u32 csumpl; /* csum_param lower word */ -}; +/* + * The L1 transmit packet descriptor is comprised of four 32-bit words. + * + * 31 0 + * +---------------------------------------+ + * | Word 0: Buffer addr lo | + * +---------------------------------------+ + * | Word 1: Buffer addr hi | + * +---------------------------------------+ + * | Word 2 | + * +---------------------------------------+ + * | Word 3 | + * +---------------------------------------+ + * + * Words 0 and 1 combine to form a 64-bit buffer address. + * + * Word 2 is self explanatory in the #define block below. + * + * Word 3 has two forms, depending upon the state of bits 3 and 4. + * If bits 3 and 4 are both zero, then bits 14:31 are unused by the + * hardware. Otherwise, if either bit 3 or 4 is set, the definition + * of bits 14:31 vary according to the following depiction. + * + * 0 End of packet 0 End of packet + * 1 Coalesce 1 Coalesce + * 2 Insert VLAN tag 2 Insert VLAN tag + * 3 Custom csum enable = 0 3 Custom csum enable = 1 + * 4 Segment enable = 1 4 Segment enable = 0 + * 5 Generate IP checksum 5 Generate IP checksum + * 6 Generate TCP checksum 6 Generate TCP checksum + * 7 Generate UDP checksum 7 Generate UDP checksum + * 8 VLAN tagged 8 VLAN tagged + * 9 Ethernet frame type 9 Ethernet frame type + * 10-+ 10-+ + * 11 | IP hdr length (10:13) 11 | IP hdr length (10:13) + * 12 | (num 32-bit words) 12 | (num 32-bit words) + * 13-+ 13-+ + * 14-+ 14 Unused + * 15 | TCP hdr length (14:17) 15 Unused + * 16 | (num 32-bit words) 16-+ + * 17-+ 17 | + * 18 Header TPD flag 18 | + * 19-+ 19 | Payload offset + * 20 | 20 | (16:23) + * 21 | 21 | + * 22 | 22 | + * 23 | 23-+ + * 24 | 24-+ + * 25 | MSS (19:31) 25 | + * 26 | 26 | + * 27 | 27 | Custom csum offset + * 28 | 28 | (24:31) + * 29 | 29 | + * 30 | 30 | + * 31-+ 31-+ + */ -union tpd_descr { - u64 data; - struct csum_param csum; - struct tso_param tso; -}; +/* tpd word 2 */ +#define TPD_BUFLEN_MASK 0x3FFF +#define TPD_BUFLEN_SHIFT 0 +#define TPD_DMAINT_MASK 0x0001 +#define TPD_DMAINT_SHIFT 14 +#define TPD_PKTNT_MASK 0x0001 +#define TPD_PKTINT_SHIFT 15 +#define TPD_VLANTAG_MASK 0xFFFF +#define TPD_VLAN_SHIFT 16 + +/* tpd word 3 bits 0:13 */ +#define TPD_EOP_MASK 0x0001 +#define TPD_EOP_SHIFT 0 +#define TPD_COALESCE_MASK 0x0001 +#define TPD_COALESCE_SHIFT 1 +#define TPD_INS_VL_TAG_MASK 0x0001 +#define TPD_INS_VL_TAG_SHIFT 2 +#define TPD_CUST_CSUM_EN_MASK 0x0001 +#define TPD_CUST_CSUM_EN_SHIFT 3 +#define TPD_SEGMENT_EN_MASK 0x0001 +#define TPD_SEGMENT_EN_SHIFT 4 +#define TPD_IP_CSUM_MASK 0x0001 +#define TPD_IP_CSUM_SHIFT 5 +#define TPD_TCP_CSUM_MASK 0x0001 +#define TPD_TCP_CSUM_SHIFT 6 +#define TPD_UDP_CSUM_MASK 0x0001 +#define TPD_UDP_CSUM_SHIFT 7 +#define TPD_VL_TAGGED_MASK 0x0001 +#define TPD_VL_TAGGED_SHIFT 8 +#define TPD_ETHTYPE_MASK 0x0001 +#define TPD_ETHTYPE_SHIFT 9 +#define TPD_IPHL_MASK 0x000F +#define TPD_IPHL_SHIFT 10 + +/* tpd word 3 bits 14:31 if segment enabled */ +#define TPD_TCPHDRLEN_MASK 0x000F +#define TPD_TCPHDRLEN_SHIFT 14 +#define TPD_HDRFLAG_MASK 0x0001 +#define TPD_HDRFLAG_SHIFT 18 +#define TPD_MSS_MASK 0x1FFF +#define TPD_MSS_SHIFT 19 + +/* tpd word 3 bits 16:31 if custom csum enabled */ +#define TPD_PLOADOFFSET_MASK 0x00FF +#define TPD_PLOADOFFSET_SHIFT 16 +#define TPD_CCSUMOFFSET_MASK 0x00FF +#define TPD_CCSUMOFFSET_SHIFT 24 struct tx_packet_desc { __le64 buffer_addr; - union tpd_descr desc; + __le32 word2; + __le32 word3; }; /* DMA Order Settings */ -- cgit v1.1 From 5ca3bc3041b1cbbb3361a99bd666e907850a1a7a Mon Sep 17 00:00:00 2001 From: Jay Cliburn Date: Sat, 2 Feb 2008 19:50:08 -0600 Subject: atl1: use csum_start Use skb->csum_start for tx checksum offload preparation. Also swap the variables css and cso so they hold the intended values of csum start and offset, respectively. Signed-off-by: Jay Cliburn Acked-by: Chris Snook Signed-off-by: Jeff Garzik --- drivers/net/atlx/atl1.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'drivers/net/atlx') diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c index f4add3c..9929822 100644 --- a/drivers/net/atlx/atl1.c +++ b/drivers/net/atlx/atl1.c @@ -1347,16 +1347,17 @@ static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb, u8 css, cso; if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { - cso = skb_transport_offset(skb); - css = cso + skb->csum_offset; - if (unlikely(cso & 0x1)) { + css = (u8) (skb->csum_start - skb_headroom(skb)); + cso = css + (u8) skb->csum_offset; + if (unlikely(css & 0x1)) { + /* L1 hardware requires an even number here */ dev_printk(KERN_DEBUG, &adapter->pdev->dev, "payload offset not an even number\n"); return -1; } - ptpd->word3 |= (cso & TPD_PLOADOFFSET_MASK) << + ptpd->word3 |= (css & TPD_PLOADOFFSET_MASK) << TPD_PLOADOFFSET_SHIFT; - ptpd->word3 |= (css & TPD_CCSUMOFFSET_MASK) << + ptpd->word3 |= (cso & TPD_CCSUMOFFSET_MASK) << TPD_CCSUMOFFSET_SHIFT; ptpd->word3 |= 1 << TPD_CUST_CSUM_EN_SHIFT; return true; -- cgit v1.1 From 460578bfe4dffbdc6eec9fcbd0fe0bb4f9f82188 Mon Sep 17 00:00:00 2001 From: Jay Cliburn Date: Sat, 2 Feb 2008 19:50:09 -0600 Subject: atl1: use netif_msg Use netif_msg_* for console messages emitted by the driver. Add a parameter to allow control of messaging at driver startup, and also add the ability to control it with ethtool. Signed-off-by: Jay Cliburn Acked-by: Chris Snook Signed-off-by: Jeff Garzik --- drivers/net/atlx/atl1.c | 155 ++++++++++++++++++++++++++++++++++-------------- drivers/net/atlx/atl1.h | 2 +- 2 files changed, 111 insertions(+), 46 deletions(-) (limited to 'drivers/net/atlx') diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c index 9929822..51eca23 100644 --- a/drivers/net/atlx/atl1.c +++ b/drivers/net/atlx/atl1.c @@ -100,6 +100,13 @@ static const struct pci_device_id atl1_pci_tbl[] = { }; MODULE_DEVICE_TABLE(pci, atl1_pci_tbl); +static const u32 atl1_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | + NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP; + +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Message level (0=none,...,16=all)"); + /* * atl1_sw_init - Initialize general software structures (struct atl1_adapter) * @adapter: board private structure to initialize @@ -217,7 +224,9 @@ s32 atl1_setup_ring_resources(struct atl1_adapter *adapter) size = sizeof(struct atl1_buffer) * (tpd_ring->count + rfd_ring->count); tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL); if (unlikely(!tpd_ring->buffer_info)) { - dev_err(&pdev->dev, "kzalloc failed , size = D%d\n", size); + if (netif_msg_drv(adapter)) + dev_err(&pdev->dev, "kzalloc failed , size = D%d\n", + size); goto err_nomem; } rfd_ring->buffer_info = @@ -239,7 +248,8 @@ s32 atl1_setup_ring_resources(struct atl1_adapter *adapter) ring_header->desc = pci_alloc_consistent(pdev, ring_header->size, &ring_header->dma); if (unlikely(!ring_header->desc)) { - dev_err(&pdev->dev, "pci_alloc_consistent failed\n"); + if (netif_msg_drv(adapter)) + dev_err(&pdev->dev, "pci_alloc_consistent failed\n"); goto err_nomem; } @@ -472,7 +482,8 @@ static u32 atl1_check_link(struct atl1_adapter *adapter) /* link down */ if (netif_carrier_ok(netdev)) { /* old link state: Up */ - dev_info(&adapter->pdev->dev, "link is down\n"); + if (netif_msg_link(adapter)) + dev_info(&adapter->pdev->dev, "link is down\n"); adapter->link_speed = SPEED_0; netif_carrier_off(netdev); netif_stop_queue(netdev); @@ -515,11 +526,12 @@ static u32 atl1_check_link(struct atl1_adapter *adapter) adapter->link_speed = speed; adapter->link_duplex = duplex; atl1_setup_mac_ctrl(adapter); - dev_info(&adapter->pdev->dev, - "%s link is up %d Mbps %s\n", - netdev->name, adapter->link_speed, - adapter->link_duplex == FULL_DUPLEX ? - "full duplex" : "half duplex"); + if (netif_msg_link(adapter)) + dev_info(&adapter->pdev->dev, + "%s link is up %d Mbps %s\n", + netdev->name, adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? + "full duplex" : "half duplex"); } if (!netif_carrier_ok(netdev)) { /* Link down -> Up */ @@ -583,7 +595,8 @@ static int atl1_change_mtu(struct net_device *netdev, int new_mtu) if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || (max_frame > MAX_JUMBO_FRAME_SIZE)) { - dev_warn(&adapter->pdev->dev, "invalid MTU setting\n"); + if (netif_msg_link(adapter)) + dev_warn(&adapter->pdev->dev, "invalid MTU setting\n"); return -EINVAL; } @@ -997,8 +1010,9 @@ static void atl1_rx_checksum(struct atl1_adapter *adapter, if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC | ERR_FLAG_CODE | ERR_FLAG_OV)) { adapter->hw_csum_err++; - dev_printk(KERN_DEBUG, &pdev->dev, - "rx checksum error\n"); + if (netif_msg_rx_err(adapter)) + dev_printk(KERN_DEBUG, &pdev->dev, + "rx checksum error\n"); return; } } @@ -1017,9 +1031,10 @@ static void atl1_rx_checksum(struct atl1_adapter *adapter, } /* IPv4, but hardware thinks its checksum is wrong */ - dev_printk(KERN_DEBUG, &pdev->dev, - "hw csum wrong, pkt_flag:%x, err_flag:%x\n", - rrd->pkt_flg, rrd->err_flg); + if (netif_msg_rx_err(adapter)) + dev_printk(KERN_DEBUG, &pdev->dev, + "hw csum wrong, pkt_flag:%x, err_flag:%x\n", + rrd->pkt_flg, rrd->err_flg); skb->ip_summed = CHECKSUM_COMPLETE; skb->csum = htons(rrd->xsz.xsum_sz.rx_chksum); adapter->hw_csum_err++; @@ -1133,14 +1148,17 @@ chk_rrd: /* rrd seems to be bad */ if (unlikely(i-- > 0)) { /* rrd may not be DMAed completely */ - dev_printk(KERN_DEBUG, &adapter->pdev->dev, - "incomplete RRD DMA transfer\n"); + if (netif_msg_rx_err(adapter)) + dev_printk(KERN_DEBUG, + &adapter->pdev->dev, + "unexpected RRD count\n"); udelay(1); goto chk_rrd; } /* bad rrd */ - dev_printk(KERN_DEBUG, &adapter->pdev->dev, - "bad RRD\n"); + if (netif_msg_rx_err(adapter)) + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "bad RRD\n"); /* see if update RFD index */ if (rrd->num_buf > 1) atl1_update_rfd_index(adapter, rrd); @@ -1351,8 +1369,9 @@ static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb, cso = css + (u8) skb->csum_offset; if (unlikely(css & 0x1)) { /* L1 hardware requires an even number here */ - dev_printk(KERN_DEBUG, &adapter->pdev->dev, - "payload offset not an even number\n"); + if (netif_msg_tx_err(adapter)) + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "payload offset not an even number\n"); return -1; } ptpd->word3 |= (css & TPD_PLOADOFFSET_MASK) << @@ -1573,7 +1592,9 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) if (!spin_trylock_irqsave(&adapter->lock, flags)) { /* Can't get lock - tell upper layer to requeue */ - dev_printk(KERN_DEBUG, &adapter->pdev->dev, "tx locked\n"); + if (netif_msg_tx_queued(adapter)) + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "tx locked\n"); return NETDEV_TX_LOCKED; } @@ -1581,7 +1602,9 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) /* not enough descriptors */ netif_stop_queue(netdev); spin_unlock_irqrestore(&adapter->lock, flags); - dev_printk(KERN_DEBUG, &adapter->pdev->dev, "tx busy\n"); + if (netif_msg_tx_queued(adapter)) + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "tx busy\n"); return NETDEV_TX_BUSY; } @@ -1657,8 +1680,9 @@ static irqreturn_t atl1_intr(int irq, void *data) /* check if PCIE PHY Link down */ if (status & ISR_PHY_LINKDOWN) { - dev_printk(KERN_DEBUG, &adapter->pdev->dev, - "pcie phy link down %x\n", status); + if (netif_msg_intr(adapter)) + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "pcie phy link down %x\n", status); if (netif_running(adapter->netdev)) { /* reset MAC */ iowrite32(0, adapter->hw.hw_addr + REG_IMR); schedule_work(&adapter->pcie_dma_to_rst_task); @@ -1668,9 +1692,10 @@ static irqreturn_t atl1_intr(int irq, void *data) /* check if DMA read/write error ? */ if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) { - dev_printk(KERN_DEBUG, &adapter->pdev->dev, - "pcie DMA r/w error (status = 0x%x)\n", - status); + if (netif_msg_intr(adapter)) + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "pcie DMA r/w error (status = 0x%x)\n", + status); iowrite32(0, adapter->hw.hw_addr + REG_IMR); schedule_work(&adapter->pcie_dma_to_rst_task); return IRQ_HANDLED; @@ -1693,8 +1718,11 @@ static irqreturn_t atl1_intr(int irq, void *data) if (status & (ISR_RXF_OV | ISR_RFD_UNRUN | ISR_RRD_OV | ISR_HOST_RFD_UNRUN | ISR_HOST_RRD_OV)) - dev_printk(KERN_DEBUG, &adapter->pdev->dev, - "rx exception, ISR = 0x%x\n", status); + if (netif_msg_intr(adapter)) + dev_printk(KERN_DEBUG, + &adapter->pdev->dev, + "rx exception, ISR = 0x%x\n", + status); atl1_intr_rx(adapter); } @@ -1791,8 +1819,9 @@ s32 atl1_up(struct atl1_adapter *adapter) err = pci_enable_msi(adapter->pdev); if (err) { - dev_info(&adapter->pdev->dev, - "Unable to enable MSI: %d\n", err); + if (netif_msg_ifup(adapter)) + dev_info(&adapter->pdev->dev, + "Unable to enable MSI: %d\n", err); irq_flags |= IRQF_SHARED; } @@ -2061,6 +2090,7 @@ static int __devinit atl1_probe(struct pci_dev *pdev, adapter->netdev = netdev; adapter->pdev = pdev; adapter->hw.back = adapter; + adapter->msg_enable = netif_msg_init(debug, atl1_default_msg); adapter->hw.hw_addr = pci_iomap(pdev, 0, 0); if (!adapter->hw.hw_addr) { @@ -2070,7 +2100,8 @@ static int __devinit atl1_probe(struct pci_dev *pdev, /* get device revision number */ adapter->hw.dev_rev = ioread16(adapter->hw.hw_addr + (REG_MASTER_CTRL + 2)); - dev_info(&pdev->dev, "version %s\n", ATLX_DRIVER_VERSION); + if (netif_msg_probe(adapter)) + dev_info(&pdev->dev, "version %s\n", ATLX_DRIVER_VERSION); /* set default ring resource counts */ adapter->rfd_ring.count = adapter->rrd_ring.count = ATL1_DEFAULT_RFD; @@ -2390,7 +2421,9 @@ static int atl1_set_settings(struct net_device *netdev, u16 old_media_type = hw->media_type; if (netif_running(adapter->netdev)) { - dev_dbg(&adapter->pdev->dev, "ethtool shutting down adapter\n"); + if (netif_msg_link(adapter)) + dev_dbg(&adapter->pdev->dev, + "ethtool shutting down adapter\n"); atl1_down(adapter); } @@ -2399,8 +2432,9 @@ static int atl1_set_settings(struct net_device *netdev, else { if (ecmd->speed == SPEED_1000) { if (ecmd->duplex != DUPLEX_FULL) { - dev_warn(&adapter->pdev->dev, - "can't force to 1000M half duplex\n"); + if (netif_msg_link(adapter)) + dev_warn(&adapter->pdev->dev, + "1000M half is invalid\n"); ret_val = -EINVAL; goto exit_sset; } @@ -2438,8 +2472,9 @@ static int atl1_set_settings(struct net_device *netdev, } if (atl1_phy_setup_autoneg_adv(hw)) { ret_val = -EINVAL; - dev_warn(&adapter->pdev->dev, - "invalid ethtool speed/duplex setting\n"); + if (netif_msg_link(adapter)) + dev_warn(&adapter->pdev->dev, + "invalid ethtool speed/duplex setting\n"); goto exit_sset; } if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || @@ -2471,10 +2506,14 @@ exit_sset: hw->media_type = old_media_type; if (netif_running(adapter->netdev)) { - dev_dbg(&adapter->pdev->dev, "ethtool starting adapter\n"); + if (netif_msg_link(adapter)) + dev_dbg(&adapter->pdev->dev, + "ethtool starting adapter\n"); atl1_up(adapter); } else if (!ret_val) { - dev_dbg(&adapter->pdev->dev, "ethtool resetting adapter\n"); + if (netif_msg_link(adapter)) + dev_dbg(&adapter->pdev->dev, + "ethtool resetting adapter\n"); atl1_reset(adapter); } return ret_val; @@ -2531,6 +2570,18 @@ static int atl1_set_wol(struct net_device *netdev, return 0; } +static u32 atl1_get_msglevel(struct net_device *netdev) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} + +static void atl1_set_msglevel(struct net_device *netdev, u32 value) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = value; +} + static int atl1_get_regs_len(struct net_device *netdev) { return ATL1_REG_COUNT * sizeof(u32); @@ -2772,6 +2823,8 @@ const struct ethtool_ops atl1_ethtool_ops = { .get_drvinfo = atl1_get_drvinfo, .get_wol = atl1_get_wol, .set_wol = atl1_set_wol, + .get_msglevel = atl1_get_msglevel, + .set_msglevel = atl1_set_msglevel, .get_regs_len = atl1_get_regs_len, .get_regs = atl1_get_regs, .get_ringparam = atl1_get_ringparam, @@ -2797,6 +2850,7 @@ const struct ethtool_ops atl1_ethtool_ops = { s32 atl1_reset_hw(struct atl1_hw *hw) { struct pci_dev *pdev = hw->back->pdev; + struct atl1_adapter *adapter = hw->back; u32 icr; int i; @@ -2836,7 +2890,8 @@ s32 atl1_reset_hw(struct atl1_hw *hw) } if (icr) { - dev_dbg(&pdev->dev, "ICR = 0x%x\n", icr); + if (netif_msg_hw(adapter)) + dev_dbg(&pdev->dev, "ICR = 0x%x\n", icr); return icr; } @@ -3205,6 +3260,7 @@ s32 atl1_phy_enter_power_saving(struct atl1_hw *hw) static s32 atl1_phy_reset(struct atl1_hw *hw) { struct pci_dev *pdev = hw->back->pdev; + struct atl1_adapter *adapter = hw->back; s32 ret_val; u16 phy_data; @@ -3237,7 +3293,8 @@ static s32 atl1_phy_reset(struct atl1_hw *hw) u32 val; int i; /* pcie serdes link may be down! */ - dev_dbg(&pdev->dev, "pcie phy link down\n"); + if (netif_msg_hw(adapter)) + dev_dbg(&pdev->dev, "pcie phy link down\n"); for (i = 0; i < 25; i++) { msleep(1); @@ -3247,7 +3304,9 @@ static s32 atl1_phy_reset(struct atl1_hw *hw) } if ((val & (MDIO_START | MDIO_BUSY)) != 0) { - dev_warn(&pdev->dev, "pcie link down at least 25ms\n"); + if (netif_msg_hw(adapter)) + dev_warn(&pdev->dev, + "pcie link down at least 25ms\n"); return ret_val; } } @@ -3338,6 +3397,7 @@ s32 atl1_phy_setup_autoneg_adv(struct atl1_hw *hw) static s32 atl1_setup_link(struct atl1_hw *hw) { struct pci_dev *pdev = hw->back->pdev; + struct atl1_adapter *adapter = hw->back; s32 ret_val; /* @@ -3348,13 +3408,16 @@ static s32 atl1_setup_link(struct atl1_hw *hw) */ ret_val = atl1_phy_setup_autoneg_adv(hw); if (ret_val) { - dev_dbg(&pdev->dev, "error setting up autonegotiation\n"); + if (netif_msg_link(adapter)) + dev_dbg(&pdev->dev, + "error setting up autonegotiation\n"); return ret_val; } /* SW.Reset , En-Auto-Neg if needed */ ret_val = atl1_phy_reset(hw); if (ret_val) { - dev_dbg(&pdev->dev, "error resetting phy\n"); + if (netif_msg_link(adapter)) + dev_dbg(&pdev->dev, "error resetting phy\n"); return ret_val; } hw->phy_configured = true; @@ -3429,6 +3492,7 @@ s32 atl1_init_hw(struct atl1_hw *hw) s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex) { struct pci_dev *pdev = hw->back->pdev; + struct atl1_adapter *adapter = hw->back; s32 ret_val; u16 phy_data; @@ -3451,7 +3515,8 @@ s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex) *speed = SPEED_10; break; default: - dev_dbg(&pdev->dev, "error getting speed\n"); + if (netif_msg_hw(adapter)) + dev_dbg(&pdev->dev, "error getting speed\n"); return ATLX_ERR_PHY_SPEED; break; } diff --git a/drivers/net/atlx/atl1.h b/drivers/net/atlx/atl1.h index 4d3d65b..12ec916 100644 --- a/drivers/net/atlx/atl1.h +++ b/drivers/net/atlx/atl1.h @@ -794,7 +794,7 @@ struct atl1_adapter { struct atl1_rrd_ring rrd_ring; u64 hw_csum_err; u64 hw_csum_good; - + u32 msg_enable; u16 imt; /* interrupt moderator timer (2us resolution) */ u16 ict; /* interrupt clear timer (2us resolution */ struct mii_if_info mii; /* MII interface info */ -- cgit v1.1 From 235ffa136c09c56db0c6c5fc5b5832749a72f557 Mon Sep 17 00:00:00 2001 From: Jay Cliburn Date: Sat, 2 Feb 2008 19:50:10 -0600 Subject: atl1: print debug info if rrd error Add some debug printks if we encounter a potentially bad receive return descriptor. Signed-off-by: Jay Cliburn Acked-by: Chris Snook Signed-off-by: Jeff Garzik --- drivers/net/atlx/atl1.c | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) (limited to 'drivers/net/atlx') diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c index 51eca23..4e4cb23 100644 --- a/drivers/net/atlx/atl1.c +++ b/drivers/net/atlx/atl1.c @@ -1144,14 +1144,32 @@ chk_rrd: /* check rrd status */ if (likely(rrd->num_buf == 1)) goto rrd_ok; + else if (netif_msg_rx_err(adapter)) { + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "unexpected RRD buffer count\n"); + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "rx_buf_len = %d\n", + adapter->rx_buffer_len); + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "RRD num_buf = %d\n", + rrd->num_buf); + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "RRD pkt_len = %d\n", + rrd->xsz.xsum_sz.pkt_size); + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "RRD pkt_flg = 0x%08X\n", + rrd->pkt_flg); + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "RRD err_flg = 0x%08X\n", + rrd->err_flg); + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "RRD vlan_tag = 0x%08X\n", + rrd->vlan_tag); + } /* rrd seems to be bad */ if (unlikely(i-- > 0)) { /* rrd may not be DMAed completely */ - if (netif_msg_rx_err(adapter)) - dev_printk(KERN_DEBUG, - &adapter->pdev->dev, - "unexpected RRD count\n"); udelay(1); goto chk_rrd; } -- cgit v1.1 From 0dde4ef99dcbf221bce6c0a5c3c3e4cdfea0b370 Mon Sep 17 00:00:00 2001 From: Jay Cliburn Date: Sat, 2 Feb 2008 19:50:11 -0600 Subject: atl1: make functions static Make needlessly global functions static. In a couple of cases this requires removing forward declarations and reordering functions. Signed-off-by: Jay Cliburn Acked-by: Chris Snook Signed-off-by: Jeff Garzik --- drivers/net/atlx/atl1.c | 101 ++++++++++++++++++++++++------------------------ drivers/net/atlx/atl1.h | 3 -- 2 files changed, 51 insertions(+), 53 deletions(-) (limited to 'drivers/net/atlx') diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c index 4e4cb23..6f4a1d5 100644 --- a/drivers/net/atlx/atl1.c +++ b/drivers/net/atlx/atl1.c @@ -211,7 +211,7 @@ static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) * * Return 0 on success, negative on failure */ -s32 atl1_setup_ring_resources(struct atl1_adapter *adapter) +static s32 atl1_setup_ring_resources(struct atl1_adapter *adapter) { struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; @@ -402,7 +402,7 @@ static void atl1_clean_tx_ring(struct atl1_adapter *adapter) * * Free all transmit software resources */ -void atl1_free_ring_resources(struct atl1_adapter *adapter) +static void atl1_free_ring_resources(struct atl1_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; @@ -580,40 +580,6 @@ static u32 atl1_check_link(struct atl1_adapter *adapter) return 0; } -/* - * atl1_change_mtu - Change the Maximum Transfer Unit - * @netdev: network interface device structure - * @new_mtu: new value for maximum frame size - * - * Returns 0 on success, negative on failure - */ -static int atl1_change_mtu(struct net_device *netdev, int new_mtu) -{ - struct atl1_adapter *adapter = netdev_priv(netdev); - int old_mtu = netdev->mtu; - int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; - - if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || - (max_frame > MAX_JUMBO_FRAME_SIZE)) { - if (netif_msg_link(adapter)) - dev_warn(&adapter->pdev->dev, "invalid MTU setting\n"); - return -EINVAL; - } - - adapter->hw.max_frame_size = max_frame; - adapter->hw.tx_jumbo_task_th = (max_frame + 7) >> 3; - adapter->rx_buffer_len = (max_frame + 7) & ~7; - adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8; - - netdev->mtu = new_mtu; - if ((old_mtu != new_mtu) && netif_running(netdev)) { - atl1_down(adapter); - atl1_up(adapter); - } - - return 0; -} - static void set_flow_ctrl_old(struct atl1_adapter *adapter) { u32 hi, lo, value; @@ -1794,19 +1760,8 @@ static void atl1_phy_config(unsigned long data) * assert again and again. * */ -static void atl1_tx_timeout_task(struct work_struct *work) -{ - struct atl1_adapter *adapter = - container_of(work, struct atl1_adapter, tx_timeout_task); - struct net_device *netdev = adapter->netdev; - netif_device_detach(netdev); - atl1_down(adapter); - atl1_up(adapter); - netif_device_attach(netdev); -} - -int atl1_reset(struct atl1_adapter *adapter) +static int atl1_reset(struct atl1_adapter *adapter) { int ret; ret = atl1_reset_hw(&adapter->hw); @@ -1815,7 +1770,7 @@ int atl1_reset(struct atl1_adapter *adapter) return atl1_init_hw(&adapter->hw); } -s32 atl1_up(struct atl1_adapter *adapter) +static s32 atl1_up(struct atl1_adapter *adapter) { struct net_device *netdev = adapter->netdev; int err; @@ -1860,7 +1815,7 @@ err_up: return err; } -void atl1_down(struct atl1_adapter *adapter) +static void atl1_down(struct atl1_adapter *adapter) { struct net_device *netdev = adapter->netdev; @@ -1883,6 +1838,52 @@ void atl1_down(struct atl1_adapter *adapter) atl1_clean_rx_ring(adapter); } +static void atl1_tx_timeout_task(struct work_struct *work) +{ + struct atl1_adapter *adapter = + container_of(work, struct atl1_adapter, tx_timeout_task); + struct net_device *netdev = adapter->netdev; + + netif_device_detach(netdev); + atl1_down(adapter); + atl1_up(adapter); + netif_device_attach(netdev); +} + +/* + * atl1_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + */ +static int atl1_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + int old_mtu = netdev->mtu; + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + + if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || + (max_frame > MAX_JUMBO_FRAME_SIZE)) { + if (netif_msg_link(adapter)) + dev_warn(&adapter->pdev->dev, "invalid MTU setting\n"); + return -EINVAL; + } + + adapter->hw.max_frame_size = max_frame; + adapter->hw.tx_jumbo_task_th = (max_frame + 7) >> 3; + adapter->rx_buffer_len = (max_frame + 7) & ~7; + adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8; + + netdev->mtu = new_mtu; + if ((old_mtu != new_mtu) && netif_running(netdev)) { + atl1_down(adapter); + atl1_up(adapter); + } + + return 0; +} + /* * atl1_open - Called when a network interface is made active * @netdev: network interface device structure diff --git a/drivers/net/atlx/atl1.h b/drivers/net/atlx/atl1.h index 12ec916..6220298 100644 --- a/drivers/net/atlx/atl1.h +++ b/drivers/net/atlx/atl1.h @@ -73,9 +73,6 @@ void atl1_check_options(struct atl1_adapter *adapter); static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); static u32 atl1_check_link(struct atl1_adapter *adapter); -s32 atl1_up(struct atl1_adapter *adapter); -void atl1_down(struct atl1_adapter *adapter); -int atl1_reset(struct atl1_adapter *adapter); extern const struct ethtool_ops atl1_ethtool_ops; -- cgit v1.1 From 6446a860f8b72b6a7b6722b3e30c4b00d6f99967 Mon Sep 17 00:00:00 2001 From: Jay Cliburn Date: Sat, 2 Feb 2008 19:50:12 -0600 Subject: atl1: reduce forward declarations Rearrange functions to allow removal of some forward declarations. Make certain global functions static along the way. Signed-off-by: Jay Cliburn Acked-by: Chris Snook Signed-off-by: Jeff Garzik --- drivers/net/atlx/atl1.c | 5704 +++++++++++++++++++++++------------------------ drivers/net/atlx/atl1.h | 10 - 2 files changed, 2852 insertions(+), 2862 deletions(-) (limited to 'drivers/net/atlx') diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c index 6f4a1d5..240db84 100644 --- a/drivers/net/atlx/atl1.c +++ b/drivers/net/atlx/atl1.c @@ -108,452 +108,436 @@ module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Message level (0=none,...,16=all)"); /* - * atl1_sw_init - Initialize general software structures (struct atl1_adapter) - * @adapter: board private structure to initialize - * - * atl1_sw_init initializes the Adapter private data structure. - * Fields are initialized based on PCI device information and - * OS network device settings (MTU size). + * Reset the transmit and receive units; mask and clear all interrupts. + * hw - Struct containing variables accessed by shared code + * return : 0 or idle status (if error) */ -static int __devinit atl1_sw_init(struct atl1_adapter *adapter) +static s32 atl1_reset_hw(struct atl1_hw *hw) { - struct atl1_hw *hw = &adapter->hw; - struct net_device *netdev = adapter->netdev; - - hw->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; - hw->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; + struct pci_dev *pdev = hw->back->pdev; + struct atl1_adapter *adapter = hw->back; + u32 icr; + int i; - adapter->wol = 0; - adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7; - adapter->ict = 50000; /* 100ms */ - adapter->link_speed = SPEED_0; /* hardware init */ - adapter->link_duplex = FULL_DUPLEX; + /* + * Clear Interrupt mask to stop board from generating + * interrupts & Clear any pending interrupt events + */ + /* + * iowrite32(0, hw->hw_addr + REG_IMR); + * iowrite32(0xffffffff, hw->hw_addr + REG_ISR); + */ - hw->phy_configured = false; - hw->preamble_len = 7; - hw->ipgt = 0x60; - hw->min_ifg = 0x50; - hw->ipgr1 = 0x40; - hw->ipgr2 = 0x60; - hw->max_retry = 0xf; - hw->lcol = 0x37; - hw->jam_ipg = 7; - hw->rfd_burst = 8; - hw->rrd_burst = 8; - hw->rfd_fetch_gap = 1; - hw->rx_jumbo_th = adapter->rx_buffer_len / 8; - hw->rx_jumbo_lkah = 1; - hw->rrd_ret_timer = 16; - hw->tpd_burst = 4; - hw->tpd_fetch_th = 16; - hw->txf_burst = 0x100; - hw->tx_jumbo_task_th = (hw->max_frame_size + 7) >> 3; - hw->tpd_fetch_gap = 1; - hw->rcb_value = atl1_rcb_64; - hw->dma_ord = atl1_dma_ord_enh; - hw->dmar_block = atl1_dma_req_256; - hw->dmaw_block = atl1_dma_req_256; - hw->cmb_rrd = 4; - hw->cmb_tpd = 4; - hw->cmb_rx_timer = 1; /* about 2us */ - hw->cmb_tx_timer = 1; /* about 2us */ - hw->smb_timer = 100000; /* about 200ms */ + /* + * Issue Soft Reset to the MAC. This will reset the chip's + * transmit, receive, DMA. It will not effect + * the current PCI configuration. The global reset bit is self- + * clearing, and should clear within a microsecond. + */ + iowrite32(MASTER_CTRL_SOFT_RST, hw->hw_addr + REG_MASTER_CTRL); + ioread32(hw->hw_addr + REG_MASTER_CTRL); - spin_lock_init(&adapter->lock); - spin_lock_init(&adapter->mb_lock); + iowrite16(1, hw->hw_addr + REG_PHY_ENABLE); + ioread16(hw->hw_addr + REG_PHY_ENABLE); - return 0; -} + /* delay about 1ms */ + msleep(1); -static int mdio_read(struct net_device *netdev, int phy_id, int reg_num) -{ - struct atl1_adapter *adapter = netdev_priv(netdev); - u16 result; + /* Wait at least 10ms for All module to be Idle */ + for (i = 0; i < 10; i++) { + icr = ioread32(hw->hw_addr + REG_IDLE_STATUS); + if (!icr) + break; + /* delay 1 ms */ + msleep(1); + /* FIXME: still the right way to do this? */ + cpu_relax(); + } - atl1_read_phy_reg(&adapter->hw, reg_num & 0x1f, &result); + if (icr) { + if (netif_msg_hw(adapter)) + dev_dbg(&pdev->dev, "ICR = 0x%x\n", icr); + return icr; + } - return result; + return 0; } -static void mdio_write(struct net_device *netdev, int phy_id, int reg_num, - int val) +/* function about EEPROM + * + * check_eeprom_exist + * return 0 if eeprom exist + */ +static int atl1_check_eeprom_exist(struct atl1_hw *hw) { - struct atl1_adapter *adapter = netdev_priv(netdev); + u32 value; + value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL); + if (value & SPI_FLASH_CTRL_EN_VPD) { + value &= ~SPI_FLASH_CTRL_EN_VPD; + iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL); + } - atl1_write_phy_reg(&adapter->hw, reg_num, val); + value = ioread16(hw->hw_addr + REG_PCIE_CAP_LIST); + return ((value & 0xFF00) == 0x6C00) ? 0 : 1; } -/* - * atl1_mii_ioctl - - * @netdev: - * @ifreq: - * @cmd: - */ -static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +static bool atl1_read_eeprom(struct atl1_hw *hw, u32 offset, u32 *p_value) { - struct atl1_adapter *adapter = netdev_priv(netdev); - unsigned long flags; - int retval; + int i; + u32 control; - if (!netif_running(netdev)) - return -EINVAL; + if (offset & 3) + /* address do not align */ + return false; - spin_lock_irqsave(&adapter->lock, flags); - retval = generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL); - spin_unlock_irqrestore(&adapter->lock, flags); + iowrite32(0, hw->hw_addr + REG_VPD_DATA); + control = (offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT; + iowrite32(control, hw->hw_addr + REG_VPD_CAP); + ioread32(hw->hw_addr + REG_VPD_CAP); - return retval; + for (i = 0; i < 10; i++) { + msleep(2); + control = ioread32(hw->hw_addr + REG_VPD_CAP); + if (control & VPD_CAP_VPD_FLAG) + break; + } + if (control & VPD_CAP_VPD_FLAG) { + *p_value = ioread32(hw->hw_addr + REG_VPD_DATA); + return true; + } + /* timeout */ + return false; } /* - * atl1_setup_mem_resources - allocate Tx / RX descriptor resources - * @adapter: board private structure - * - * Return 0 on success, negative on failure + * Reads the value from a PHY register + * hw - Struct containing variables accessed by shared code + * reg_addr - address of the PHY register to read */ -static s32 atl1_setup_ring_resources(struct atl1_adapter *adapter) +s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data) { - struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; - struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; - struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; - struct atl1_ring_header *ring_header = &adapter->ring_header; - struct pci_dev *pdev = adapter->pdev; - int size; - u8 offset = 0; - - size = sizeof(struct atl1_buffer) * (tpd_ring->count + rfd_ring->count); - tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL); - if (unlikely(!tpd_ring->buffer_info)) { - if (netif_msg_drv(adapter)) - dev_err(&pdev->dev, "kzalloc failed , size = D%d\n", - size); - goto err_nomem; - } - rfd_ring->buffer_info = - (struct atl1_buffer *)(tpd_ring->buffer_info + tpd_ring->count); + u32 val; + int i; - /* - * real ring DMA buffer - * each ring/block may need up to 8 bytes for alignment, hence the - * additional 40 bytes tacked onto the end. - */ - ring_header->size = size = - sizeof(struct tx_packet_desc) * tpd_ring->count - + sizeof(struct rx_free_desc) * rfd_ring->count - + sizeof(struct rx_return_desc) * rrd_ring->count - + sizeof(struct coals_msg_block) - + sizeof(struct stats_msg_block) - + 40; + val = ((u32) (reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT | + MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW | MDIO_CLK_25_4 << + MDIO_CLK_SEL_SHIFT; + iowrite32(val, hw->hw_addr + REG_MDIO_CTRL); + ioread32(hw->hw_addr + REG_MDIO_CTRL); - ring_header->desc = pci_alloc_consistent(pdev, ring_header->size, - &ring_header->dma); - if (unlikely(!ring_header->desc)) { - if (netif_msg_drv(adapter)) - dev_err(&pdev->dev, "pci_alloc_consistent failed\n"); - goto err_nomem; + for (i = 0; i < MDIO_WAIT_TIMES; i++) { + udelay(2); + val = ioread32(hw->hw_addr + REG_MDIO_CTRL); + if (!(val & (MDIO_START | MDIO_BUSY))) + break; + } + if (!(val & (MDIO_START | MDIO_BUSY))) { + *phy_data = (u16) val; + return 0; } + return ATLX_ERR_PHY; +} - memset(ring_header->desc, 0, ring_header->size); +#define CUSTOM_SPI_CS_SETUP 2 +#define CUSTOM_SPI_CLK_HI 2 +#define CUSTOM_SPI_CLK_LO 2 +#define CUSTOM_SPI_CS_HOLD 2 +#define CUSTOM_SPI_CS_HI 3 - /* init TPD ring */ - tpd_ring->dma = ring_header->dma; - offset = (tpd_ring->dma & 0x7) ? (8 - (ring_header->dma & 0x7)) : 0; - tpd_ring->dma += offset; - tpd_ring->desc = (u8 *) ring_header->desc + offset; - tpd_ring->size = sizeof(struct tx_packet_desc) * tpd_ring->count; +static bool atl1_spi_read(struct atl1_hw *hw, u32 addr, u32 *buf) +{ + int i; + u32 value; - /* init RFD ring */ - rfd_ring->dma = tpd_ring->dma + tpd_ring->size; - offset = (rfd_ring->dma & 0x7) ? (8 - (rfd_ring->dma & 0x7)) : 0; - rfd_ring->dma += offset; - rfd_ring->desc = (u8 *) tpd_ring->desc + (tpd_ring->size + offset); - rfd_ring->size = sizeof(struct rx_free_desc) * rfd_ring->count; + iowrite32(0, hw->hw_addr + REG_SPI_DATA); + iowrite32(addr, hw->hw_addr + REG_SPI_ADDR); + value = SPI_FLASH_CTRL_WAIT_READY | + (CUSTOM_SPI_CS_SETUP & SPI_FLASH_CTRL_CS_SETUP_MASK) << + SPI_FLASH_CTRL_CS_SETUP_SHIFT | (CUSTOM_SPI_CLK_HI & + SPI_FLASH_CTRL_CLK_HI_MASK) << + SPI_FLASH_CTRL_CLK_HI_SHIFT | (CUSTOM_SPI_CLK_LO & + SPI_FLASH_CTRL_CLK_LO_MASK) << + SPI_FLASH_CTRL_CLK_LO_SHIFT | (CUSTOM_SPI_CS_HOLD & + SPI_FLASH_CTRL_CS_HOLD_MASK) << + SPI_FLASH_CTRL_CS_HOLD_SHIFT | (CUSTOM_SPI_CS_HI & + SPI_FLASH_CTRL_CS_HI_MASK) << + SPI_FLASH_CTRL_CS_HI_SHIFT | (1 & SPI_FLASH_CTRL_INS_MASK) << + SPI_FLASH_CTRL_INS_SHIFT; - /* init RRD ring */ - rrd_ring->dma = rfd_ring->dma + rfd_ring->size; - offset = (rrd_ring->dma & 0x7) ? (8 - (rrd_ring->dma & 0x7)) : 0; - rrd_ring->dma += offset; - rrd_ring->desc = (u8 *) rfd_ring->desc + (rfd_ring->size + offset); - rrd_ring->size = sizeof(struct rx_return_desc) * rrd_ring->count; - - - /* init CMB */ - adapter->cmb.dma = rrd_ring->dma + rrd_ring->size; - offset = (adapter->cmb.dma & 0x7) ? (8 - (adapter->cmb.dma & 0x7)) : 0; - adapter->cmb.dma += offset; - adapter->cmb.cmb = (struct coals_msg_block *) - ((u8 *) rrd_ring->desc + (rrd_ring->size + offset)); - - /* init SMB */ - adapter->smb.dma = adapter->cmb.dma + sizeof(struct coals_msg_block); - offset = (adapter->smb.dma & 0x7) ? (8 - (adapter->smb.dma & 0x7)) : 0; - adapter->smb.dma += offset; - adapter->smb.smb = (struct stats_msg_block *) - ((u8 *) adapter->cmb.cmb + - (sizeof(struct coals_msg_block) + offset)); - - return 0; + iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL); -err_nomem: - kfree(tpd_ring->buffer_info); - return -ENOMEM; -} + value |= SPI_FLASH_CTRL_START; + iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL); + ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL); -static void atl1_init_ring_ptrs(struct atl1_adapter *adapter) -{ - struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; - struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; - struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; + for (i = 0; i < 10; i++) { + msleep(1); + value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL); + if (!(value & SPI_FLASH_CTRL_START)) + break; + } - atomic_set(&tpd_ring->next_to_use, 0); - atomic_set(&tpd_ring->next_to_clean, 0); + if (value & SPI_FLASH_CTRL_START) + return false; - rfd_ring->next_to_clean = 0; - atomic_set(&rfd_ring->next_to_use, 0); + *buf = ioread32(hw->hw_addr + REG_SPI_DATA); - rrd_ring->next_to_use = 0; - atomic_set(&rrd_ring->next_to_clean, 0); + return true; } /* - * atl1_clean_rx_ring - Free RFD Buffers - * @adapter: board private structure + * get_permanent_address + * return 0 if get valid mac address, */ -static void atl1_clean_rx_ring(struct atl1_adapter *adapter) +static int atl1_get_permanent_address(struct atl1_hw *hw) { - struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; - struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; - struct atl1_buffer *buffer_info; - struct pci_dev *pdev = adapter->pdev; - unsigned long size; - unsigned int i; + u32 addr[2]; + u32 i, control; + u16 reg; + u8 eth_addr[ETH_ALEN]; + bool key_valid; - /* Free all the Rx ring sk_buffs */ - for (i = 0; i < rfd_ring->count; i++) { - buffer_info = &rfd_ring->buffer_info[i]; - if (buffer_info->dma) { - pci_unmap_page(pdev, buffer_info->dma, - buffer_info->length, PCI_DMA_FROMDEVICE); - buffer_info->dma = 0; + if (is_valid_ether_addr(hw->perm_mac_addr)) + return 0; + + /* init */ + addr[0] = addr[1] = 0; + + if (!atl1_check_eeprom_exist(hw)) { + reg = 0; + key_valid = false; + /* Read out all EEPROM content */ + i = 0; + while (1) { + if (atl1_read_eeprom(hw, i + 0x100, &control)) { + if (key_valid) { + if (reg == REG_MAC_STA_ADDR) + addr[0] = control; + else if (reg == (REG_MAC_STA_ADDR + 4)) + addr[1] = control; + key_valid = false; + } else if ((control & 0xff) == 0x5A) { + key_valid = true; + reg = (u16) (control >> 16); + } else + break; + } else + /* read error */ + break; + i += 4; } - if (buffer_info->skb) { - dev_kfree_skb(buffer_info->skb); - buffer_info->skb = NULL; + + *(u32 *) ð_addr[2] = swab32(addr[0]); + *(u16 *) ð_addr[0] = swab16(*(u16 *) &addr[1]); + if (is_valid_ether_addr(eth_addr)) { + memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN); + return 0; } + return 1; } - size = sizeof(struct atl1_buffer) * rfd_ring->count; - memset(rfd_ring->buffer_info, 0, size); + /* see if SPI FLAGS exist ? */ + addr[0] = addr[1] = 0; + reg = 0; + key_valid = false; + i = 0; + while (1) { + if (atl1_spi_read(hw, i + 0x1f000, &control)) { + if (key_valid) { + if (reg == REG_MAC_STA_ADDR) + addr[0] = control; + else if (reg == (REG_MAC_STA_ADDR + 4)) + addr[1] = control; + key_valid = false; + } else if ((control & 0xff) == 0x5A) { + key_valid = true; + reg = (u16) (control >> 16); + } else + /* data end */ + break; + } else + /* read error */ + break; + i += 4; + } - /* Zero out the descriptor ring */ - memset(rfd_ring->desc, 0, rfd_ring->size); + *(u32 *) ð_addr[2] = swab32(addr[0]); + *(u16 *) ð_addr[0] = swab16(*(u16 *) &addr[1]); + if (is_valid_ether_addr(eth_addr)) { + memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN); + return 0; + } - rfd_ring->next_to_clean = 0; - atomic_set(&rfd_ring->next_to_use, 0); + /* + * On some motherboards, the MAC address is written by the + * BIOS directly to the MAC register during POST, and is + * not stored in eeprom. If all else thus far has failed + * to fetch the permanent MAC address, try reading it directly. + */ + addr[0] = ioread32(hw->hw_addr + REG_MAC_STA_ADDR); + addr[1] = ioread16(hw->hw_addr + (REG_MAC_STA_ADDR + 4)); + *(u32 *) ð_addr[2] = swab32(addr[0]); + *(u16 *) ð_addr[0] = swab16(*(u16 *) &addr[1]); + if (is_valid_ether_addr(eth_addr)) { + memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN); + return 0; + } - rrd_ring->next_to_use = 0; - atomic_set(&rrd_ring->next_to_clean, 0); + return 1; } /* - * atl1_clean_tx_ring - Free Tx Buffers - * @adapter: board private structure + * Reads the adapter's MAC address from the EEPROM + * hw - Struct containing variables accessed by shared code */ -static void atl1_clean_tx_ring(struct atl1_adapter *adapter) +s32 atl1_read_mac_addr(struct atl1_hw *hw) { - struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; - struct atl1_buffer *buffer_info; - struct pci_dev *pdev = adapter->pdev; - unsigned long size; - unsigned int i; - - /* Free all the Tx ring sk_buffs */ - for (i = 0; i < tpd_ring->count; i++) { - buffer_info = &tpd_ring->buffer_info[i]; - if (buffer_info->dma) { - pci_unmap_page(pdev, buffer_info->dma, - buffer_info->length, PCI_DMA_TODEVICE); - buffer_info->dma = 0; - } - } - - for (i = 0; i < tpd_ring->count; i++) { - buffer_info = &tpd_ring->buffer_info[i]; - if (buffer_info->skb) { - dev_kfree_skb_any(buffer_info->skb); - buffer_info->skb = NULL; - } - } - - size = sizeof(struct atl1_buffer) * tpd_ring->count; - memset(tpd_ring->buffer_info, 0, size); + u16 i; - /* Zero out the descriptor ring */ - memset(tpd_ring->desc, 0, tpd_ring->size); + if (atl1_get_permanent_address(hw)) + random_ether_addr(hw->perm_mac_addr); - atomic_set(&tpd_ring->next_to_use, 0); - atomic_set(&tpd_ring->next_to_clean, 0); + for (i = 0; i < ETH_ALEN; i++) + hw->mac_addr[i] = hw->perm_mac_addr[i]; + return 0; } /* - * atl1_free_ring_resources - Free Tx / RX descriptor Resources - * @adapter: board private structure + * Hashes an address to determine its location in the multicast table + * hw - Struct containing variables accessed by shared code + * mc_addr - the multicast address to hash * - * Free all transmit software resources + * atl1_hash_mc_addr + * purpose + * set hash value for a multicast address + * hash calcu processing : + * 1. calcu 32bit CRC for multicast address + * 2. reverse crc with MSB to LSB */ -static void atl1_free_ring_resources(struct atl1_adapter *adapter) +u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr) { - struct pci_dev *pdev = adapter->pdev; - struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; - struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; - struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; - struct atl1_ring_header *ring_header = &adapter->ring_header; - - atl1_clean_tx_ring(adapter); - atl1_clean_rx_ring(adapter); - - kfree(tpd_ring->buffer_info); - pci_free_consistent(pdev, ring_header->size, ring_header->desc, - ring_header->dma); - - tpd_ring->buffer_info = NULL; - tpd_ring->desc = NULL; - tpd_ring->dma = 0; + u32 crc32, value = 0; + int i; - rfd_ring->buffer_info = NULL; - rfd_ring->desc = NULL; - rfd_ring->dma = 0; + crc32 = ether_crc_le(6, mc_addr); + for (i = 0; i < 32; i++) + value |= (((crc32 >> i) & 1) << (31 - i)); - rrd_ring->desc = NULL; - rrd_ring->dma = 0; + return value; } -static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter) +/* + * Sets the bit in the multicast table corresponding to the hash value. + * hw - Struct containing variables accessed by shared code + * hash_value - Multicast address hash value + */ +void atl1_hash_set(struct atl1_hw *hw, u32 hash_value) { - u32 value; - struct atl1_hw *hw = &adapter->hw; - struct net_device *netdev = adapter->netdev; - /* Config MAC CTRL Register */ - value = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN; - /* duplex */ - if (FULL_DUPLEX == adapter->link_duplex) - value |= MAC_CTRL_DUPLX; - /* speed */ - value |= ((u32) ((SPEED_1000 == adapter->link_speed) ? - MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) << - MAC_CTRL_SPEED_SHIFT); - /* flow control */ - value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW); - /* PAD & CRC */ - value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD); - /* preamble length */ - value |= (((u32) adapter->hw.preamble_len - & MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT); - /* vlan */ - if (adapter->vlgrp) - value |= MAC_CTRL_RMV_VLAN; - /* rx checksum - if (adapter->rx_csum) - value |= MAC_CTRL_RX_CHKSUM_EN; + u32 hash_bit, hash_reg; + u32 mta; + + /* + * The HASH Table is a register array of 2 32-bit registers. + * It is treated like an array of 64 bits. We want to set + * bit BitArray[hash_value]. So we figure out what register + * the bit is in, read it, OR in the new bit, then write + * back the new value. The register is determined by the + * upper 7 bits of the hash value and the bit within that + * register are determined by the lower 5 bits of the value. */ - /* filter mode */ - value |= MAC_CTRL_BC_EN; - if (netdev->flags & IFF_PROMISC) - value |= MAC_CTRL_PROMIS_EN; - else if (netdev->flags & IFF_ALLMULTI) - value |= MAC_CTRL_MC_ALL_EN; - /* value |= MAC_CTRL_LOOPBACK; */ - iowrite32(value, hw->hw_addr + REG_MAC_CTRL); + hash_reg = (hash_value >> 31) & 0x1; + hash_bit = (hash_value >> 26) & 0x1F; + mta = ioread32((hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2)); + mta |= (1 << hash_bit); + iowrite32(mta, (hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2)); } -static u32 atl1_check_link(struct atl1_adapter *adapter) +/* + * Writes a value to a PHY register + * hw - Struct containing variables accessed by shared code + * reg_addr - address of the PHY register to write + * data - data to write to the PHY + */ +static s32 atl1_write_phy_reg(struct atl1_hw *hw, u32 reg_addr, u16 phy_data) { - struct atl1_hw *hw = &adapter->hw; - struct net_device *netdev = adapter->netdev; - u32 ret_val; - u16 speed, duplex, phy_data; - int reconfig = 0; - - /* MII_BMSR must read twice */ - atl1_read_phy_reg(hw, MII_BMSR, &phy_data); - atl1_read_phy_reg(hw, MII_BMSR, &phy_data); - if (!(phy_data & BMSR_LSTATUS)) { - /* link down */ - if (netif_carrier_ok(netdev)) { - /* old link state: Up */ - if (netif_msg_link(adapter)) - dev_info(&adapter->pdev->dev, "link is down\n"); - adapter->link_speed = SPEED_0; - netif_carrier_off(netdev); - netif_stop_queue(netdev); - } - return 0; - } + int i; + u32 val; - /* Link Up */ - ret_val = atl1_get_speed_and_duplex(hw, &speed, &duplex); - if (ret_val) - return ret_val; + val = ((u32) (phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT | + (reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT | + MDIO_SUP_PREAMBLE | + MDIO_START | MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT; + iowrite32(val, hw->hw_addr + REG_MDIO_CTRL); + ioread32(hw->hw_addr + REG_MDIO_CTRL); - switch (hw->media_type) { - case MEDIA_TYPE_1000M_FULL: - if (speed != SPEED_1000 || duplex != FULL_DUPLEX) - reconfig = 1; - break; - case MEDIA_TYPE_100M_FULL: - if (speed != SPEED_100 || duplex != FULL_DUPLEX) - reconfig = 1; - break; - case MEDIA_TYPE_100M_HALF: - if (speed != SPEED_100 || duplex != HALF_DUPLEX) - reconfig = 1; - break; - case MEDIA_TYPE_10M_FULL: - if (speed != SPEED_10 || duplex != FULL_DUPLEX) - reconfig = 1; - break; - case MEDIA_TYPE_10M_HALF: - if (speed != SPEED_10 || duplex != HALF_DUPLEX) - reconfig = 1; - break; + for (i = 0; i < MDIO_WAIT_TIMES; i++) { + udelay(2); + val = ioread32(hw->hw_addr + REG_MDIO_CTRL); + if (!(val & (MDIO_START | MDIO_BUSY))) + break; } - /* link result is our setting */ - if (!reconfig) { - if (adapter->link_speed != speed - || adapter->link_duplex != duplex) { - adapter->link_speed = speed; - adapter->link_duplex = duplex; - atl1_setup_mac_ctrl(adapter); - if (netif_msg_link(adapter)) - dev_info(&adapter->pdev->dev, - "%s link is up %d Mbps %s\n", - netdev->name, adapter->link_speed, - adapter->link_duplex == FULL_DUPLEX ? - "full duplex" : "half duplex"); - } - if (!netif_carrier_ok(netdev)) { - /* Link down -> Up */ - netif_carrier_on(netdev); - netif_wake_queue(netdev); - } + if (!(val & (MDIO_START | MDIO_BUSY))) return 0; - } - /* change original link status */ - if (netif_carrier_ok(netdev)) { - adapter->link_speed = SPEED_0; - netif_carrier_off(netdev); - netif_stop_queue(netdev); - } + return ATLX_ERR_PHY; +} - if (hw->media_type != MEDIA_TYPE_AUTO_SENSOR && - hw->media_type != MEDIA_TYPE_1000M_FULL) { +/* + * Make L001's PHY out of Power Saving State (bug) + * hw - Struct containing variables accessed by shared code + * when power on, L001's PHY always on Power saving State + * (Gigabit Link forbidden) + */ +static s32 atl1_phy_leave_power_saving(struct atl1_hw *hw) +{ + s32 ret; + ret = atl1_write_phy_reg(hw, 29, 0x0029); + if (ret) + return ret; + return atl1_write_phy_reg(hw, 30, 0); +} + +/* + *TODO: do something or get rid of this + */ +static s32 atl1_phy_enter_power_saving(struct atl1_hw *hw) +{ +/* s32 ret_val; + * u16 phy_data; + */ + +/* + ret_val = atl1_write_phy_reg(hw, ...); + ret_val = atl1_write_phy_reg(hw, ...); + .... +*/ + return 0; +} + +/* + * Resets the PHY and make all config validate + * hw - Struct containing variables accessed by shared code + * + * Sets bit 15 and 12 of the MII Control regiser (for F001 bug) + */ +static s32 atl1_phy_reset(struct atl1_hw *hw) +{ + struct pci_dev *pdev = hw->back->pdev; + struct atl1_adapter *adapter = hw->back; + s32 ret_val; + u16 phy_data; + + if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || + hw->media_type == MEDIA_TYPE_1000M_FULL) + phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN; + else { switch (hw->media_type) { case MEDIA_TYPE_100M_FULL: - phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 | - MII_CR_RESET; + phy_data = + MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 | + MII_CR_RESET; break; case MEDIA_TYPE_100M_HALF: phy_data = MII_CR_SPEED_100 | MII_CR_RESET; @@ -567,2722 +551,2654 @@ static u32 atl1_check_link(struct atl1_adapter *adapter) phy_data = MII_CR_SPEED_10 | MII_CR_RESET; break; } - atl1_write_phy_reg(hw, MII_BMCR, phy_data); - return 0; } - /* auto-neg, insert timer to re-config phy */ - if (!adapter->phy_timer_pending) { - adapter->phy_timer_pending = true; - mod_timer(&adapter->phy_config_timer, jiffies + 3 * HZ); - } + ret_val = atl1_write_phy_reg(hw, MII_BMCR, phy_data); + if (ret_val) { + u32 val; + int i; + /* pcie serdes link may be down! */ + if (netif_msg_hw(adapter)) + dev_dbg(&pdev->dev, "pcie phy link down\n"); + + for (i = 0; i < 25; i++) { + msleep(1); + val = ioread32(hw->hw_addr + REG_MDIO_CTRL); + if (!(val & (MDIO_START | MDIO_BUSY))) + break; + } + if ((val & (MDIO_START | MDIO_BUSY)) != 0) { + if (netif_msg_hw(adapter)) + dev_warn(&pdev->dev, + "pcie link down at least 25ms\n"); + return ret_val; + } + } return 0; } -static void set_flow_ctrl_old(struct atl1_adapter *adapter) +/* + * Configures PHY autoneg and flow control advertisement settings + * hw - Struct containing variables accessed by shared code + */ +static s32 atl1_phy_setup_autoneg_adv(struct atl1_hw *hw) { - u32 hi, lo, value; + s32 ret_val; + s16 mii_autoneg_adv_reg; + s16 mii_1000t_ctrl_reg; - /* RFD Flow Control */ - value = adapter->rfd_ring.count; - hi = value / 16; - if (hi < 2) - hi = 2; - lo = value * 7 / 8; + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK; - value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) | - ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT); - iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RXF_PAUSE_THRESH); + /* Read the MII 1000Base-T Control Register (Address 9). */ + mii_1000t_ctrl_reg = MII_ATLX_CR_1000T_DEFAULT_CAP_MASK; - /* RRD Flow Control */ - value = adapter->rrd_ring.count; - lo = value / 16; - hi = value * 7 / 8; - if (lo < 2) - lo = 2; - value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) | - ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT); - iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RRD_PAUSE_THRESH); -} - -static void set_flow_ctrl_new(struct atl1_hw *hw) -{ - u32 hi, lo, value; - - /* RXF Flow Control */ - value = ioread32(hw->hw_addr + REG_SRAM_RXF_LEN); - lo = value / 16; - if (lo < 192) - lo = 192; - hi = value * 7 / 8; - if (hi < lo) - hi = lo + 16; - value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) | - ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT); - iowrite32(value, hw->hw_addr + REG_RXQ_RXF_PAUSE_THRESH); - - /* RRD Flow Control */ - value = ioread32(hw->hw_addr + REG_SRAM_RRD_LEN); - lo = value / 8; - hi = value * 7 / 8; - if (lo < 2) - lo = 2; - if (hi < lo) - hi = lo + 3; - value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) | - ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT); - iowrite32(value, hw->hw_addr + REG_RXQ_RRD_PAUSE_THRESH); -} - -/* - * atl1_configure - Configure Transmit&Receive Unit after Reset - * @adapter: board private structure - * - * Configure the Tx /Rx unit of the MAC after a reset. - */ -static u32 atl1_configure(struct atl1_adapter *adapter) -{ - struct atl1_hw *hw = &adapter->hw; - u32 value; - - /* clear interrupt status */ - iowrite32(0xffffffff, adapter->hw.hw_addr + REG_ISR); - - /* set MAC Address */ - value = (((u32) hw->mac_addr[2]) << 24) | - (((u32) hw->mac_addr[3]) << 16) | - (((u32) hw->mac_addr[4]) << 8) | - (((u32) hw->mac_addr[5])); - iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR); - value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1])); - iowrite32(value, hw->hw_addr + (REG_MAC_STA_ADDR + 4)); - - /* tx / rx ring */ - - /* HI base address */ - iowrite32((u32) ((adapter->tpd_ring.dma & 0xffffffff00000000ULL) >> 32), - hw->hw_addr + REG_DESC_BASE_ADDR_HI); - /* LO base address */ - iowrite32((u32) (adapter->rfd_ring.dma & 0x00000000ffffffffULL), - hw->hw_addr + REG_DESC_RFD_ADDR_LO); - iowrite32((u32) (adapter->rrd_ring.dma & 0x00000000ffffffffULL), - hw->hw_addr + REG_DESC_RRD_ADDR_LO); - iowrite32((u32) (adapter->tpd_ring.dma & 0x00000000ffffffffULL), - hw->hw_addr + REG_DESC_TPD_ADDR_LO); - iowrite32((u32) (adapter->cmb.dma & 0x00000000ffffffffULL), - hw->hw_addr + REG_DESC_CMB_ADDR_LO); - iowrite32((u32) (adapter->smb.dma & 0x00000000ffffffffULL), - hw->hw_addr + REG_DESC_SMB_ADDR_LO); - - /* element count */ - value = adapter->rrd_ring.count; - value <<= 16; - value += adapter->rfd_ring.count; - iowrite32(value, hw->hw_addr + REG_DESC_RFD_RRD_RING_SIZE); - iowrite32(adapter->tpd_ring.count, hw->hw_addr + - REG_DESC_TPD_RING_SIZE); - - /* Load Ptr */ - iowrite32(1, hw->hw_addr + REG_LOAD_PTR); - - /* config Mailbox */ - value = ((atomic_read(&adapter->tpd_ring.next_to_use) - & MB_TPD_PROD_INDX_MASK) << MB_TPD_PROD_INDX_SHIFT) | - ((atomic_read(&adapter->rrd_ring.next_to_clean) - & MB_RRD_CONS_INDX_MASK) << MB_RRD_CONS_INDX_SHIFT) | - ((atomic_read(&adapter->rfd_ring.next_to_use) - & MB_RFD_PROD_INDX_MASK) << MB_RFD_PROD_INDX_SHIFT); - iowrite32(value, hw->hw_addr + REG_MAILBOX); - - /* config IPG/IFG */ - value = (((u32) hw->ipgt & MAC_IPG_IFG_IPGT_MASK) - << MAC_IPG_IFG_IPGT_SHIFT) | - (((u32) hw->min_ifg & MAC_IPG_IFG_MIFG_MASK) - << MAC_IPG_IFG_MIFG_SHIFT) | - (((u32) hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK) - << MAC_IPG_IFG_IPGR1_SHIFT) | - (((u32) hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK) - << MAC_IPG_IFG_IPGR2_SHIFT); - iowrite32(value, hw->hw_addr + REG_MAC_IPG_IFG); - - /* config Half-Duplex Control */ - value = ((u32) hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) | - (((u32) hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK) - << MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) | - MAC_HALF_DUPLX_CTRL_EXC_DEF_EN | - (0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) | - (((u32) hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK) - << MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT); - iowrite32(value, hw->hw_addr + REG_MAC_HALF_DUPLX_CTRL); + /* + * First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK; + mii_1000t_ctrl_reg &= ~MII_ATLX_CR_1000T_SPEED_MASK; - /* set Interrupt Moderator Timer */ - iowrite16(adapter->imt, hw->hw_addr + REG_IRQ_MODU_TIMER_INIT); - iowrite32(MASTER_CTRL_ITIMER_EN, hw->hw_addr + REG_MASTER_CTRL); + /* + * Need to parse media_type and set up + * the appropriate PHY registers. + */ + switch (hw->media_type) { + case MEDIA_TYPE_AUTO_SENSOR: + mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS | + MII_AR_10T_FD_CAPS | + MII_AR_100TX_HD_CAPS | + MII_AR_100TX_FD_CAPS); + mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS; + break; - /* set Interrupt Clear Timer */ - iowrite16(adapter->ict, hw->hw_addr + REG_CMBDISDMA_TIMER); + case MEDIA_TYPE_1000M_FULL: + mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS; + break; - /* set max frame size hw will accept */ - iowrite32(hw->max_frame_size, hw->hw_addr + REG_MTU); + case MEDIA_TYPE_100M_FULL: + mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS; + break; - /* jumbo size & rrd retirement timer */ - value = (((u32) hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK) - << RXQ_JMBOSZ_TH_SHIFT) | - (((u32) hw->rx_jumbo_lkah & RXQ_JMBO_LKAH_MASK) - << RXQ_JMBO_LKAH_SHIFT) | - (((u32) hw->rrd_ret_timer & RXQ_RRD_TIMER_MASK) - << RXQ_RRD_TIMER_SHIFT); - iowrite32(value, hw->hw_addr + REG_RXQ_JMBOSZ_RRDTIM); + case MEDIA_TYPE_100M_HALF: + mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS; + break; - /* Flow Control */ - switch (hw->dev_rev) { - case 0x8001: - case 0x9001: - case 0x9002: - case 0x9003: - set_flow_ctrl_old(adapter); + case MEDIA_TYPE_10M_FULL: + mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS; break; + default: - set_flow_ctrl_new(hw); + mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS; break; } - /* config TXQ */ - value = (((u32) hw->tpd_burst & TXQ_CTRL_TPD_BURST_NUM_MASK) - << TXQ_CTRL_TPD_BURST_NUM_SHIFT) | - (((u32) hw->txf_burst & TXQ_CTRL_TXF_BURST_NUM_MASK) - << TXQ_CTRL_TXF_BURST_NUM_SHIFT) | - (((u32) hw->tpd_fetch_th & TXQ_CTRL_TPD_FETCH_TH_MASK) - << TXQ_CTRL_TPD_FETCH_TH_SHIFT) | TXQ_CTRL_ENH_MODE | - TXQ_CTRL_EN; - iowrite32(value, hw->hw_addr + REG_TXQ_CTRL); - - /* min tpd fetch gap & tx jumbo packet size threshold for taskoffload */ - value = (((u32) hw->tx_jumbo_task_th & TX_JUMBO_TASK_TH_MASK) - << TX_JUMBO_TASK_TH_SHIFT) | - (((u32) hw->tpd_fetch_gap & TX_TPD_MIN_IPG_MASK) - << TX_TPD_MIN_IPG_SHIFT); - iowrite32(value, hw->hw_addr + REG_TX_JUMBO_TASK_TH_TPD_IPG); - - /* config RXQ */ - value = (((u32) hw->rfd_burst & RXQ_CTRL_RFD_BURST_NUM_MASK) - << RXQ_CTRL_RFD_BURST_NUM_SHIFT) | - (((u32) hw->rrd_burst & RXQ_CTRL_RRD_BURST_THRESH_MASK) - << RXQ_CTRL_RRD_BURST_THRESH_SHIFT) | - (((u32) hw->rfd_fetch_gap & RXQ_CTRL_RFD_PREF_MIN_IPG_MASK) - << RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT) | RXQ_CTRL_CUT_THRU_EN | - RXQ_CTRL_EN; - iowrite32(value, hw->hw_addr + REG_RXQ_CTRL); - - /* config DMA Engine */ - value = ((((u32) hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK) - << DMA_CTRL_DMAR_BURST_LEN_SHIFT) | - ((((u32) hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK) - << DMA_CTRL_DMAW_BURST_LEN_SHIFT) | DMA_CTRL_DMAR_EN | - DMA_CTRL_DMAW_EN; - value |= (u32) hw->dma_ord; - if (atl1_rcb_128 == hw->rcb_value) - value |= DMA_CTRL_RCB_VALUE; - iowrite32(value, hw->hw_addr + REG_DMA_CTRL); + /* flow control fixed to enable all */ + mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE); - /* config CMB / SMB */ - value = (hw->cmb_tpd > adapter->tpd_ring.count) ? - hw->cmb_tpd : adapter->tpd_ring.count; - value <<= 16; - value |= hw->cmb_rrd; - iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TH); - value = hw->cmb_rx_timer | ((u32) hw->cmb_tx_timer << 16); - iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TIMER); - iowrite32(hw->smb_timer, hw->hw_addr + REG_SMB_TIMER); + hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg; + hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg; - /* --- enable CMB / SMB */ - value = CSMB_CTRL_CMB_EN | CSMB_CTRL_SMB_EN; - iowrite32(value, hw->hw_addr + REG_CSMB_CTRL); + ret_val = atl1_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg); + if (ret_val) + return ret_val; - value = ioread32(adapter->hw.hw_addr + REG_ISR); - if (unlikely((value & ISR_PHY_LINKDOWN) != 0)) - value = 1; /* config failed */ - else - value = 0; + ret_val = atl1_write_phy_reg(hw, MII_ATLX_CR, mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; - /* clear all interrupt status */ - iowrite32(0x3fffffff, adapter->hw.hw_addr + REG_ISR); - iowrite32(0, adapter->hw.hw_addr + REG_ISR); - return value; + return 0; } /* - * atl1_pcie_patch - Patch for PCIE module + * Configures link settings. + * hw - Struct containing variables accessed by shared code + * Assumes the hardware has previously been reset and the + * transmitter and receiver are not enabled. */ -static void atl1_pcie_patch(struct atl1_adapter *adapter) +static s32 atl1_setup_link(struct atl1_hw *hw) { - u32 value; + struct pci_dev *pdev = hw->back->pdev; + struct atl1_adapter *adapter = hw->back; + s32 ret_val; - /* much vendor magic here */ - value = 0x6500; - iowrite32(value, adapter->hw.hw_addr + 0x12FC); - /* pcie flow control mode change */ - value = ioread32(adapter->hw.hw_addr + 0x1008); - value |= 0x8000; - iowrite32(value, adapter->hw.hw_addr + 0x1008); -} + /* + * Options: + * PHY will advertise value(s) parsed from + * autoneg_advertised and fc + * no matter what autoneg is , We will not wait link result. + */ + ret_val = atl1_phy_setup_autoneg_adv(hw); + if (ret_val) { + if (netif_msg_link(adapter)) + dev_dbg(&pdev->dev, + "error setting up autonegotiation\n"); + return ret_val; + } + /* SW.Reset , En-Auto-Neg if needed */ + ret_val = atl1_phy_reset(hw); + if (ret_val) { + if (netif_msg_link(adapter)) + dev_dbg(&pdev->dev, "error resetting phy\n"); + return ret_val; + } + hw->phy_configured = true; + return ret_val; +} -/* - * When ACPI resume on some VIA MotherBoard, the Interrupt Disable bit/0x400 - * on PCI Command register is disable. - * The function enable this bit. - * Brackett, 2006/03/15 - */ -static void atl1_via_workaround(struct atl1_adapter *adapter) +static void atl1_init_flash_opcode(struct atl1_hw *hw) { - unsigned long value; + if (hw->flash_vendor >= ARRAY_SIZE(flash_table)) + /* Atmel */ + hw->flash_vendor = 0; - value = ioread16(adapter->hw.hw_addr + PCI_COMMAND); - if (value & PCI_COMMAND_INTX_DISABLE) - value &= ~PCI_COMMAND_INTX_DISABLE; - iowrite32(value, adapter->hw.hw_addr + PCI_COMMAND); + /* Init OP table */ + iowrite8(flash_table[hw->flash_vendor].cmd_program, + hw->hw_addr + REG_SPI_FLASH_OP_PROGRAM); + iowrite8(flash_table[hw->flash_vendor].cmd_sector_erase, + hw->hw_addr + REG_SPI_FLASH_OP_SC_ERASE); + iowrite8(flash_table[hw->flash_vendor].cmd_chip_erase, + hw->hw_addr + REG_SPI_FLASH_OP_CHIP_ERASE); + iowrite8(flash_table[hw->flash_vendor].cmd_rdid, + hw->hw_addr + REG_SPI_FLASH_OP_RDID); + iowrite8(flash_table[hw->flash_vendor].cmd_wren, + hw->hw_addr + REG_SPI_FLASH_OP_WREN); + iowrite8(flash_table[hw->flash_vendor].cmd_rdsr, + hw->hw_addr + REG_SPI_FLASH_OP_RDSR); + iowrite8(flash_table[hw->flash_vendor].cmd_wrsr, + hw->hw_addr + REG_SPI_FLASH_OP_WRSR); + iowrite8(flash_table[hw->flash_vendor].cmd_read, + hw->hw_addr + REG_SPI_FLASH_OP_READ); } -static void atl1_inc_smb(struct atl1_adapter *adapter) +/* + * Performs basic configuration of the adapter. + * hw - Struct containing variables accessed by shared code + * Assumes that the controller has previously been reset and is in a + * post-reset uninitialized state. Initializes multicast table, + * and Calls routines to setup link + * Leaves the transmit and receive units disabled and uninitialized. + */ +static s32 atl1_init_hw(struct atl1_hw *hw) { - struct stats_msg_block *smb = adapter->smb.smb; - - /* Fill out the OS statistics structure */ - adapter->soft_stats.rx_packets += smb->rx_ok; - adapter->soft_stats.tx_packets += smb->tx_ok; - adapter->soft_stats.rx_bytes += smb->rx_byte_cnt; - adapter->soft_stats.tx_bytes += smb->tx_byte_cnt; - adapter->soft_stats.multicast += smb->rx_mcast; - adapter->soft_stats.collisions += (smb->tx_1_col + smb->tx_2_col * 2 + - smb->tx_late_col + smb->tx_abort_col * adapter->hw.max_retry); - - /* Rx Errors */ - adapter->soft_stats.rx_errors += (smb->rx_frag + smb->rx_fcs_err + - smb->rx_len_err + smb->rx_sz_ov + smb->rx_rxf_ov + - smb->rx_rrd_ov + smb->rx_align_err); - adapter->soft_stats.rx_fifo_errors += smb->rx_rxf_ov; - adapter->soft_stats.rx_length_errors += smb->rx_len_err; - adapter->soft_stats.rx_crc_errors += smb->rx_fcs_err; - adapter->soft_stats.rx_frame_errors += smb->rx_align_err; - adapter->soft_stats.rx_missed_errors += (smb->rx_rrd_ov + - smb->rx_rxf_ov); - - adapter->soft_stats.rx_pause += smb->rx_pause; - adapter->soft_stats.rx_rrd_ov += smb->rx_rrd_ov; - adapter->soft_stats.rx_trunc += smb->rx_sz_ov; + u32 ret_val = 0; - /* Tx Errors */ - adapter->soft_stats.tx_errors += (smb->tx_late_col + - smb->tx_abort_col + smb->tx_underrun + smb->tx_trunc); - adapter->soft_stats.tx_fifo_errors += smb->tx_underrun; - adapter->soft_stats.tx_aborted_errors += smb->tx_abort_col; - adapter->soft_stats.tx_window_errors += smb->tx_late_col; + /* Zero out the Multicast HASH table */ + iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE); + /* clear the old settings from the multicast hash table */ + iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2)); - adapter->soft_stats.excecol += smb->tx_abort_col; - adapter->soft_stats.deffer += smb->tx_defer; - adapter->soft_stats.scc += smb->tx_1_col; - adapter->soft_stats.mcc += smb->tx_2_col; - adapter->soft_stats.latecol += smb->tx_late_col; - adapter->soft_stats.tx_underun += smb->tx_underrun; - adapter->soft_stats.tx_trunc += smb->tx_trunc; - adapter->soft_stats.tx_pause += smb->tx_pause; + atl1_init_flash_opcode(hw); - adapter->net_stats.rx_packets = adapter->soft_stats.rx_packets; - adapter->net_stats.tx_packets = adapter->soft_stats.tx_packets; - adapter->net_stats.rx_bytes = adapter->soft_stats.rx_bytes; - adapter->net_stats.tx_bytes = adapter->soft_stats.tx_bytes; - adapter->net_stats.multicast = adapter->soft_stats.multicast; - adapter->net_stats.collisions = adapter->soft_stats.collisions; - adapter->net_stats.rx_errors = adapter->soft_stats.rx_errors; - adapter->net_stats.rx_over_errors = - adapter->soft_stats.rx_missed_errors; - adapter->net_stats.rx_length_errors = - adapter->soft_stats.rx_length_errors; - adapter->net_stats.rx_crc_errors = adapter->soft_stats.rx_crc_errors; - adapter->net_stats.rx_frame_errors = - adapter->soft_stats.rx_frame_errors; - adapter->net_stats.rx_fifo_errors = adapter->soft_stats.rx_fifo_errors; - adapter->net_stats.rx_missed_errors = - adapter->soft_stats.rx_missed_errors; - adapter->net_stats.tx_errors = adapter->soft_stats.tx_errors; - adapter->net_stats.tx_fifo_errors = adapter->soft_stats.tx_fifo_errors; - adapter->net_stats.tx_aborted_errors = - adapter->soft_stats.tx_aborted_errors; - adapter->net_stats.tx_window_errors = - adapter->soft_stats.tx_window_errors; - adapter->net_stats.tx_carrier_errors = - adapter->soft_stats.tx_carrier_errors; + if (!hw->phy_configured) { + /* enable GPHY LinkChange Interrrupt */ + ret_val = atl1_write_phy_reg(hw, 18, 0xC00); + if (ret_val) + return ret_val; + /* make PHY out of power-saving state */ + ret_val = atl1_phy_leave_power_saving(hw); + if (ret_val) + return ret_val; + /* Call a subroutine to configure the link */ + ret_val = atl1_setup_link(hw); + } + return ret_val; } -static void atl1_update_mailbox(struct atl1_adapter *adapter) +/* + * Detects the current speed and duplex settings of the hardware. + * hw - Struct containing variables accessed by shared code + * speed - Speed of the connection + * duplex - Duplex setting of the connection + */ +static s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex) { - unsigned long flags; - u32 tpd_next_to_use; - u32 rfd_next_to_use; - u32 rrd_next_to_clean; - u32 value; - - spin_lock_irqsave(&adapter->mb_lock, flags); - - tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use); - rfd_next_to_use = atomic_read(&adapter->rfd_ring.next_to_use); - rrd_next_to_clean = atomic_read(&adapter->rrd_ring.next_to_clean); - - value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) << - MB_RFD_PROD_INDX_SHIFT) | - ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) << - MB_RRD_CONS_INDX_SHIFT) | - ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) << - MB_TPD_PROD_INDX_SHIFT); - iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX); + struct pci_dev *pdev = hw->back->pdev; + struct atl1_adapter *adapter = hw->back; + s32 ret_val; + u16 phy_data; - spin_unlock_irqrestore(&adapter->mb_lock, flags); -} + /* ; --- Read PHY Specific Status Register (17) */ + ret_val = atl1_read_phy_reg(hw, MII_ATLX_PSSR, &phy_data); + if (ret_val) + return ret_val; -static void atl1_clean_alloc_flag(struct atl1_adapter *adapter, - struct rx_return_desc *rrd, u16 offset) -{ - struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; + if (!(phy_data & MII_ATLX_PSSR_SPD_DPLX_RESOLVED)) + return ATLX_ERR_PHY_RES; - while (rfd_ring->next_to_clean != (rrd->buf_indx + offset)) { - rfd_ring->buffer_info[rfd_ring->next_to_clean].alloced = 0; - if (++rfd_ring->next_to_clean == rfd_ring->count) { - rfd_ring->next_to_clean = 0; - } + switch (phy_data & MII_ATLX_PSSR_SPEED) { + case MII_ATLX_PSSR_1000MBS: + *speed = SPEED_1000; + break; + case MII_ATLX_PSSR_100MBS: + *speed = SPEED_100; + break; + case MII_ATLX_PSSR_10MBS: + *speed = SPEED_10; + break; + default: + if (netif_msg_hw(adapter)) + dev_dbg(&pdev->dev, "error getting speed\n"); + return ATLX_ERR_PHY_SPEED; + break; } + if (phy_data & MII_ATLX_PSSR_DPLX) + *duplex = FULL_DUPLEX; + else + *duplex = HALF_DUPLEX; + + return 0; } -static void atl1_update_rfd_index(struct atl1_adapter *adapter, - struct rx_return_desc *rrd) +void atl1_set_mac_addr(struct atl1_hw *hw) { - u16 num_buf; - - num_buf = (rrd->xsz.xsum_sz.pkt_size + adapter->rx_buffer_len - 1) / - adapter->rx_buffer_len; - if (rrd->num_buf == num_buf) - /* clean alloc flag for bad rrd */ - atl1_clean_alloc_flag(adapter, rrd, num_buf); + u32 value; + /* + * 00-0B-6A-F6-00-DC + * 0: 6AF600DC 1: 000B + * low dword + */ + value = (((u32) hw->mac_addr[2]) << 24) | + (((u32) hw->mac_addr[3]) << 16) | + (((u32) hw->mac_addr[4]) << 8) | (((u32) hw->mac_addr[5])); + iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR); + /* high dword */ + value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1])); + iowrite32(value, (hw->hw_addr + REG_MAC_STA_ADDR) + (1 << 2)); } -static void atl1_rx_checksum(struct atl1_adapter *adapter, - struct rx_return_desc *rrd, struct sk_buff *skb) +/* + * atl1_sw_init - Initialize general software structures (struct atl1_adapter) + * @adapter: board private structure to initialize + * + * atl1_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + */ +static int __devinit atl1_sw_init(struct atl1_adapter *adapter) { - struct pci_dev *pdev = adapter->pdev; + struct atl1_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; - skb->ip_summed = CHECKSUM_NONE; + hw->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + hw->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; - if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { - if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC | - ERR_FLAG_CODE | ERR_FLAG_OV)) { - adapter->hw_csum_err++; - if (netif_msg_rx_err(adapter)) - dev_printk(KERN_DEBUG, &pdev->dev, - "rx checksum error\n"); - return; - } - } + adapter->wol = 0; + adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7; + adapter->ict = 50000; /* 100ms */ + adapter->link_speed = SPEED_0; /* hardware init */ + adapter->link_duplex = FULL_DUPLEX; - /* not IPv4 */ - if (!(rrd->pkt_flg & PACKET_FLAG_IPV4)) - /* checksum is invalid, but it's not an IPv4 pkt, so ok */ - return; + hw->phy_configured = false; + hw->preamble_len = 7; + hw->ipgt = 0x60; + hw->min_ifg = 0x50; + hw->ipgr1 = 0x40; + hw->ipgr2 = 0x60; + hw->max_retry = 0xf; + hw->lcol = 0x37; + hw->jam_ipg = 7; + hw->rfd_burst = 8; + hw->rrd_burst = 8; + hw->rfd_fetch_gap = 1; + hw->rx_jumbo_th = adapter->rx_buffer_len / 8; + hw->rx_jumbo_lkah = 1; + hw->rrd_ret_timer = 16; + hw->tpd_burst = 4; + hw->tpd_fetch_th = 16; + hw->txf_burst = 0x100; + hw->tx_jumbo_task_th = (hw->max_frame_size + 7) >> 3; + hw->tpd_fetch_gap = 1; + hw->rcb_value = atl1_rcb_64; + hw->dma_ord = atl1_dma_ord_enh; + hw->dmar_block = atl1_dma_req_256; + hw->dmaw_block = atl1_dma_req_256; + hw->cmb_rrd = 4; + hw->cmb_tpd = 4; + hw->cmb_rx_timer = 1; /* about 2us */ + hw->cmb_tx_timer = 1; /* about 2us */ + hw->smb_timer = 100000; /* about 200ms */ - /* IPv4 packet */ - if (likely(!(rrd->err_flg & - (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM)))) { - skb->ip_summed = CHECKSUM_UNNECESSARY; - adapter->hw_csum_good++; - return; - } + spin_lock_init(&adapter->lock); + spin_lock_init(&adapter->mb_lock); - /* IPv4, but hardware thinks its checksum is wrong */ - if (netif_msg_rx_err(adapter)) - dev_printk(KERN_DEBUG, &pdev->dev, - "hw csum wrong, pkt_flag:%x, err_flag:%x\n", - rrd->pkt_flg, rrd->err_flg); - skb->ip_summed = CHECKSUM_COMPLETE; - skb->csum = htons(rrd->xsz.xsum_sz.rx_chksum); - adapter->hw_csum_err++; - return; + return 0; +} + +static int mdio_read(struct net_device *netdev, int phy_id, int reg_num) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + u16 result; + + atl1_read_phy_reg(&adapter->hw, reg_num & 0x1f, &result); + + return result; +} + +static void mdio_write(struct net_device *netdev, int phy_id, int reg_num, + int val) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + + atl1_write_phy_reg(&adapter->hw, reg_num, val); } /* - * atl1_alloc_rx_buffers - Replace used receive buffers - * @adapter: address of board private structure + * atl1_mii_ioctl - + * @netdev: + * @ifreq: + * @cmd: */ -static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter) +static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + unsigned long flags; + int retval; + + if (!netif_running(netdev)) + return -EINVAL; + + spin_lock_irqsave(&adapter->lock, flags); + retval = generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL); + spin_unlock_irqrestore(&adapter->lock, flags); + + return retval; +} + +/* + * atl1_setup_mem_resources - allocate Tx / RX descriptor resources + * @adapter: board private structure + * + * Return 0 on success, negative on failure + */ +static s32 atl1_setup_ring_resources(struct atl1_adapter *adapter) { + struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; + struct atl1_ring_header *ring_header = &adapter->ring_header; struct pci_dev *pdev = adapter->pdev; - struct page *page; - unsigned long offset; - struct atl1_buffer *buffer_info, *next_info; - struct sk_buff *skb; - u16 num_alloc = 0; - u16 rfd_next_to_use, next_next; - struct rx_free_desc *rfd_desc; + int size; + u8 offset = 0; - next_next = rfd_next_to_use = atomic_read(&rfd_ring->next_to_use); - if (++next_next == rfd_ring->count) - next_next = 0; - buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; - next_info = &rfd_ring->buffer_info[next_next]; + size = sizeof(struct atl1_buffer) * (tpd_ring->count + rfd_ring->count); + tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL); + if (unlikely(!tpd_ring->buffer_info)) { + if (netif_msg_drv(adapter)) + dev_err(&pdev->dev, "kzalloc failed , size = D%d\n", + size); + goto err_nomem; + } + rfd_ring->buffer_info = + (struct atl1_buffer *)(tpd_ring->buffer_info + tpd_ring->count); - while (!buffer_info->alloced && !next_info->alloced) { - if (buffer_info->skb) { - buffer_info->alloced = 1; - goto next; - } + /* + * real ring DMA buffer + * each ring/block may need up to 8 bytes for alignment, hence the + * additional 40 bytes tacked onto the end. + */ + ring_header->size = size = + sizeof(struct tx_packet_desc) * tpd_ring->count + + sizeof(struct rx_free_desc) * rfd_ring->count + + sizeof(struct rx_return_desc) * rrd_ring->count + + sizeof(struct coals_msg_block) + + sizeof(struct stats_msg_block) + + 40; - rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use); + ring_header->desc = pci_alloc_consistent(pdev, ring_header->size, + &ring_header->dma); + if (unlikely(!ring_header->desc)) { + if (netif_msg_drv(adapter)) + dev_err(&pdev->dev, "pci_alloc_consistent failed\n"); + goto err_nomem; + } - skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN); - if (unlikely(!skb)) { - /* Better luck next round */ - adapter->net_stats.rx_dropped++; - break; - } + memset(ring_header->desc, 0, ring_header->size); - /* - * Make buffer alignment 2 beyond a 16 byte boundary - * this will result in a 16 byte aligned IP header after - * the 14 byte MAC header is removed - */ - skb_reserve(skb, NET_IP_ALIGN); + /* init TPD ring */ + tpd_ring->dma = ring_header->dma; + offset = (tpd_ring->dma & 0x7) ? (8 - (ring_header->dma & 0x7)) : 0; + tpd_ring->dma += offset; + tpd_ring->desc = (u8 *) ring_header->desc + offset; + tpd_ring->size = sizeof(struct tx_packet_desc) * tpd_ring->count; - buffer_info->alloced = 1; - buffer_info->skb = skb; - buffer_info->length = (u16) adapter->rx_buffer_len; - page = virt_to_page(skb->data); - offset = (unsigned long)skb->data & ~PAGE_MASK; - buffer_info->dma = pci_map_page(pdev, page, offset, - adapter->rx_buffer_len, - PCI_DMA_FROMDEVICE); - rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma); - rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len); - rfd_desc->coalese = 0; + /* init RFD ring */ + rfd_ring->dma = tpd_ring->dma + tpd_ring->size; + offset = (rfd_ring->dma & 0x7) ? (8 - (rfd_ring->dma & 0x7)) : 0; + rfd_ring->dma += offset; + rfd_ring->desc = (u8 *) tpd_ring->desc + (tpd_ring->size + offset); + rfd_ring->size = sizeof(struct rx_free_desc) * rfd_ring->count; -next: - rfd_next_to_use = next_next; - if (unlikely(++next_next == rfd_ring->count)) - next_next = 0; - buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; - next_info = &rfd_ring->buffer_info[next_next]; - num_alloc++; - } + /* init RRD ring */ + rrd_ring->dma = rfd_ring->dma + rfd_ring->size; + offset = (rrd_ring->dma & 0x7) ? (8 - (rrd_ring->dma & 0x7)) : 0; + rrd_ring->dma += offset; + rrd_ring->desc = (u8 *) rfd_ring->desc + (rfd_ring->size + offset); + rrd_ring->size = sizeof(struct rx_return_desc) * rrd_ring->count; - if (num_alloc) { - /* - * Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). - */ - wmb(); - atomic_set(&rfd_ring->next_to_use, (int)rfd_next_to_use); - } - return num_alloc; + + /* init CMB */ + adapter->cmb.dma = rrd_ring->dma + rrd_ring->size; + offset = (adapter->cmb.dma & 0x7) ? (8 - (adapter->cmb.dma & 0x7)) : 0; + adapter->cmb.dma += offset; + adapter->cmb.cmb = (struct coals_msg_block *) + ((u8 *) rrd_ring->desc + (rrd_ring->size + offset)); + + /* init SMB */ + adapter->smb.dma = adapter->cmb.dma + sizeof(struct coals_msg_block); + offset = (adapter->smb.dma & 0x7) ? (8 - (adapter->smb.dma & 0x7)) : 0; + adapter->smb.dma += offset; + adapter->smb.smb = (struct stats_msg_block *) + ((u8 *) adapter->cmb.cmb + + (sizeof(struct coals_msg_block) + offset)); + + return 0; + +err_nomem: + kfree(tpd_ring->buffer_info); + return -ENOMEM; } -static void atl1_intr_rx(struct atl1_adapter *adapter) +static void atl1_init_ring_ptrs(struct atl1_adapter *adapter) { - int i, count; - u16 length; - u16 rrd_next_to_clean; - u32 value; + struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; - struct atl1_buffer *buffer_info; - struct rx_return_desc *rrd; - struct sk_buff *skb; - - count = 0; - rrd_next_to_clean = atomic_read(&rrd_ring->next_to_clean); + atomic_set(&tpd_ring->next_to_use, 0); + atomic_set(&tpd_ring->next_to_clean, 0); - while (1) { - rrd = ATL1_RRD_DESC(rrd_ring, rrd_next_to_clean); - i = 1; - if (likely(rrd->xsz.valid)) { /* packet valid */ -chk_rrd: - /* check rrd status */ - if (likely(rrd->num_buf == 1)) - goto rrd_ok; - else if (netif_msg_rx_err(adapter)) { - dev_printk(KERN_DEBUG, &adapter->pdev->dev, - "unexpected RRD buffer count\n"); - dev_printk(KERN_DEBUG, &adapter->pdev->dev, - "rx_buf_len = %d\n", - adapter->rx_buffer_len); - dev_printk(KERN_DEBUG, &adapter->pdev->dev, - "RRD num_buf = %d\n", - rrd->num_buf); - dev_printk(KERN_DEBUG, &adapter->pdev->dev, - "RRD pkt_len = %d\n", - rrd->xsz.xsum_sz.pkt_size); - dev_printk(KERN_DEBUG, &adapter->pdev->dev, - "RRD pkt_flg = 0x%08X\n", - rrd->pkt_flg); - dev_printk(KERN_DEBUG, &adapter->pdev->dev, - "RRD err_flg = 0x%08X\n", - rrd->err_flg); - dev_printk(KERN_DEBUG, &adapter->pdev->dev, - "RRD vlan_tag = 0x%08X\n", - rrd->vlan_tag); - } + rfd_ring->next_to_clean = 0; + atomic_set(&rfd_ring->next_to_use, 0); - /* rrd seems to be bad */ - if (unlikely(i-- > 0)) { - /* rrd may not be DMAed completely */ - udelay(1); - goto chk_rrd; - } - /* bad rrd */ - if (netif_msg_rx_err(adapter)) - dev_printk(KERN_DEBUG, &adapter->pdev->dev, - "bad RRD\n"); - /* see if update RFD index */ - if (rrd->num_buf > 1) - atl1_update_rfd_index(adapter, rrd); + rrd_ring->next_to_use = 0; + atomic_set(&rrd_ring->next_to_clean, 0); +} - /* update rrd */ - rrd->xsz.valid = 0; - if (++rrd_next_to_clean == rrd_ring->count) - rrd_next_to_clean = 0; - count++; - continue; - } else { /* current rrd still not be updated */ +/* + * atl1_clean_rx_ring - Free RFD Buffers + * @adapter: board private structure + */ +static void atl1_clean_rx_ring(struct atl1_adapter *adapter) +{ + struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; + struct atl1_buffer *buffer_info; + struct pci_dev *pdev = adapter->pdev; + unsigned long size; + unsigned int i; - break; + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rfd_ring->count; i++) { + buffer_info = &rfd_ring->buffer_info[i]; + if (buffer_info->dma) { + pci_unmap_page(pdev, buffer_info->dma, + buffer_info->length, PCI_DMA_FROMDEVICE); + buffer_info->dma = 0; } -rrd_ok: - /* clean alloc flag for bad rrd */ - atl1_clean_alloc_flag(adapter, rrd, 0); - - buffer_info = &rfd_ring->buffer_info[rrd->buf_indx]; - if (++rfd_ring->next_to_clean == rfd_ring->count) - rfd_ring->next_to_clean = 0; - - /* update rrd next to clean */ - if (++rrd_next_to_clean == rrd_ring->count) - rrd_next_to_clean = 0; - count++; - - if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { - if (!(rrd->err_flg & - (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM - | ERR_FLAG_LEN))) { - /* packet error, don't need upstream */ - buffer_info->alloced = 0; - rrd->xsz.valid = 0; - continue; - } + if (buffer_info->skb) { + dev_kfree_skb(buffer_info->skb); + buffer_info->skb = NULL; } - - /* Good Receive */ - pci_unmap_page(adapter->pdev, buffer_info->dma, - buffer_info->length, PCI_DMA_FROMDEVICE); - skb = buffer_info->skb; - length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size); - - skb_put(skb, length - ETH_FCS_LEN); - - /* Receive Checksum Offload */ - atl1_rx_checksum(adapter, rrd, skb); - skb->protocol = eth_type_trans(skb, adapter->netdev); - - if (adapter->vlgrp && (rrd->pkt_flg & PACKET_FLAG_VLAN_INS)) { - u16 vlan_tag = (rrd->vlan_tag >> 4) | - ((rrd->vlan_tag & 7) << 13) | - ((rrd->vlan_tag & 8) << 9); - vlan_hwaccel_rx(skb, adapter->vlgrp, vlan_tag); - } else - netif_rx(skb); - - /* let protocol layer free skb */ - buffer_info->skb = NULL; - buffer_info->alloced = 0; - rrd->xsz.valid = 0; - - adapter->netdev->last_rx = jiffies; } - atomic_set(&rrd_ring->next_to_clean, rrd_next_to_clean); - - atl1_alloc_rx_buffers(adapter); + size = sizeof(struct atl1_buffer) * rfd_ring->count; + memset(rfd_ring->buffer_info, 0, size); - /* update mailbox ? */ - if (count) { - u32 tpd_next_to_use; - u32 rfd_next_to_use; + /* Zero out the descriptor ring */ + memset(rfd_ring->desc, 0, rfd_ring->size); - spin_lock(&adapter->mb_lock); + rfd_ring->next_to_clean = 0; + atomic_set(&rfd_ring->next_to_use, 0); - tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use); - rfd_next_to_use = - atomic_read(&adapter->rfd_ring.next_to_use); - rrd_next_to_clean = - atomic_read(&adapter->rrd_ring.next_to_clean); - value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) << - MB_RFD_PROD_INDX_SHIFT) | - ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) << - MB_RRD_CONS_INDX_SHIFT) | - ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) << - MB_TPD_PROD_INDX_SHIFT); - iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX); - spin_unlock(&adapter->mb_lock); - } + rrd_ring->next_to_use = 0; + atomic_set(&rrd_ring->next_to_clean, 0); } -static void atl1_intr_tx(struct atl1_adapter *adapter) +/* + * atl1_clean_tx_ring - Free Tx Buffers + * @adapter: board private structure + */ +static void atl1_clean_tx_ring(struct atl1_adapter *adapter) { struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; struct atl1_buffer *buffer_info; - u16 sw_tpd_next_to_clean; - u16 cmb_tpd_next_to_clean; - - sw_tpd_next_to_clean = atomic_read(&tpd_ring->next_to_clean); - cmb_tpd_next_to_clean = le16_to_cpu(adapter->cmb.cmb->tpd_cons_idx); - - while (cmb_tpd_next_to_clean != sw_tpd_next_to_clean) { - struct tx_packet_desc *tpd; + struct pci_dev *pdev = adapter->pdev; + unsigned long size; + unsigned int i; - tpd = ATL1_TPD_DESC(tpd_ring, sw_tpd_next_to_clean); - buffer_info = &tpd_ring->buffer_info[sw_tpd_next_to_clean]; + /* Free all the Tx ring sk_buffs */ + for (i = 0; i < tpd_ring->count; i++) { + buffer_info = &tpd_ring->buffer_info[i]; if (buffer_info->dma) { - pci_unmap_page(adapter->pdev, buffer_info->dma, - buffer_info->length, PCI_DMA_TODEVICE); + pci_unmap_page(pdev, buffer_info->dma, + buffer_info->length, PCI_DMA_TODEVICE); buffer_info->dma = 0; } + } + for (i = 0; i < tpd_ring->count; i++) { + buffer_info = &tpd_ring->buffer_info[i]; if (buffer_info->skb) { - dev_kfree_skb_irq(buffer_info->skb); + dev_kfree_skb_any(buffer_info->skb); buffer_info->skb = NULL; } - - if (++sw_tpd_next_to_clean == tpd_ring->count) - sw_tpd_next_to_clean = 0; } - atomic_set(&tpd_ring->next_to_clean, sw_tpd_next_to_clean); - if (netif_queue_stopped(adapter->netdev) - && netif_carrier_ok(adapter->netdev)) - netif_wake_queue(adapter->netdev); -} + size = sizeof(struct atl1_buffer) * tpd_ring->count; + memset(tpd_ring->buffer_info, 0, size); -static u16 atl1_tpd_avail(struct atl1_tpd_ring *tpd_ring) -{ - u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean); - u16 next_to_use = atomic_read(&tpd_ring->next_to_use); - return ((next_to_clean > next_to_use) ? - next_to_clean - next_to_use - 1 : - tpd_ring->count + next_to_clean - next_to_use - 1); + /* Zero out the descriptor ring */ + memset(tpd_ring->desc, 0, tpd_ring->size); + + atomic_set(&tpd_ring->next_to_use, 0); + atomic_set(&tpd_ring->next_to_clean, 0); } -static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb, - struct tx_packet_desc *ptpd) +/* + * atl1_free_ring_resources - Free Tx / RX descriptor Resources + * @adapter: board private structure + * + * Free all transmit software resources + */ +static void atl1_free_ring_resources(struct atl1_adapter *adapter) { - /* spinlock held */ - u8 hdr_len, ip_off; - u32 real_len; - int err; + struct pci_dev *pdev = adapter->pdev; + struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; + struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; + struct atl1_ring_header *ring_header = &adapter->ring_header; - if (skb_shinfo(skb)->gso_size) { - if (skb_header_cloned(skb)) { - err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); - if (unlikely(err)) - return -1; - } + atl1_clean_tx_ring(adapter); + atl1_clean_rx_ring(adapter); - if (skb->protocol == ntohs(ETH_P_IP)) { - struct iphdr *iph = ip_hdr(skb); + kfree(tpd_ring->buffer_info); + pci_free_consistent(pdev, ring_header->size, ring_header->desc, + ring_header->dma); - real_len = (((unsigned char *)iph - skb->data) + - ntohs(iph->tot_len)); - if (real_len < skb->len) - pskb_trim(skb, real_len); - hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb)); - if (skb->len == hdr_len) { - iph->check = 0; - tcp_hdr(skb)->check = - ~csum_tcpudp_magic(iph->saddr, - iph->daddr, tcp_hdrlen(skb), - IPPROTO_TCP, 0); - ptpd->word3 |= (iph->ihl & TPD_IPHL_MASK) << - TPD_IPHL_SHIFT; - ptpd->word3 |= ((tcp_hdrlen(skb) >> 2) & - TPD_TCPHDRLEN_MASK) << - TPD_TCPHDRLEN_SHIFT; - ptpd->word3 |= 1 << TPD_IP_CSUM_SHIFT; - ptpd->word3 |= 1 << TPD_TCP_CSUM_SHIFT; - return 1; - } + tpd_ring->buffer_info = NULL; + tpd_ring->desc = NULL; + tpd_ring->dma = 0; - iph->check = 0; - tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, - iph->daddr, 0, IPPROTO_TCP, 0); - ip_off = (unsigned char *)iph - - (unsigned char *) skb_network_header(skb); - if (ip_off == 8) /* 802.3-SNAP frame */ - ptpd->word3 |= 1 << TPD_ETHTYPE_SHIFT; - else if (ip_off != 0) - return -2; + rfd_ring->buffer_info = NULL; + rfd_ring->desc = NULL; + rfd_ring->dma = 0; - ptpd->word3 |= (iph->ihl & TPD_IPHL_MASK) << - TPD_IPHL_SHIFT; - ptpd->word3 |= ((tcp_hdrlen(skb) >> 2) & - TPD_TCPHDRLEN_MASK) << TPD_TCPHDRLEN_SHIFT; - ptpd->word3 |= (skb_shinfo(skb)->gso_size & - TPD_MSS_MASK) << TPD_MSS_SHIFT; - ptpd->word3 |= 1 << TPD_SEGMENT_EN_SHIFT; - return 3; - } - } - return false; + rrd_ring->desc = NULL; + rrd_ring->dma = 0; } -static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb, - struct tx_packet_desc *ptpd) +static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter) { - u8 css, cso; - - if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { - css = (u8) (skb->csum_start - skb_headroom(skb)); - cso = css + (u8) skb->csum_offset; - if (unlikely(css & 0x1)) { - /* L1 hardware requires an even number here */ - if (netif_msg_tx_err(adapter)) - dev_printk(KERN_DEBUG, &adapter->pdev->dev, - "payload offset not an even number\n"); - return -1; - } - ptpd->word3 |= (css & TPD_PLOADOFFSET_MASK) << - TPD_PLOADOFFSET_SHIFT; - ptpd->word3 |= (cso & TPD_CCSUMOFFSET_MASK) << - TPD_CCSUMOFFSET_SHIFT; - ptpd->word3 |= 1 << TPD_CUST_CSUM_EN_SHIFT; - return true; - } - return 0; + u32 value; + struct atl1_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + /* Config MAC CTRL Register */ + value = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN; + /* duplex */ + if (FULL_DUPLEX == adapter->link_duplex) + value |= MAC_CTRL_DUPLX; + /* speed */ + value |= ((u32) ((SPEED_1000 == adapter->link_speed) ? + MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) << + MAC_CTRL_SPEED_SHIFT); + /* flow control */ + value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW); + /* PAD & CRC */ + value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD); + /* preamble length */ + value |= (((u32) adapter->hw.preamble_len + & MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT); + /* vlan */ + if (adapter->vlgrp) + value |= MAC_CTRL_RMV_VLAN; + /* rx checksum + if (adapter->rx_csum) + value |= MAC_CTRL_RX_CHKSUM_EN; + */ + /* filter mode */ + value |= MAC_CTRL_BC_EN; + if (netdev->flags & IFF_PROMISC) + value |= MAC_CTRL_PROMIS_EN; + else if (netdev->flags & IFF_ALLMULTI) + value |= MAC_CTRL_MC_ALL_EN; + /* value |= MAC_CTRL_LOOPBACK; */ + iowrite32(value, hw->hw_addr + REG_MAC_CTRL); } -static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, - struct tx_packet_desc *ptpd) +static u32 atl1_check_link(struct atl1_adapter *adapter) { - /* spinlock held */ - struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; - struct atl1_buffer *buffer_info; - u16 buf_len = skb->len; - struct page *page; - unsigned long offset; - unsigned int nr_frags; - unsigned int f; - int retval; - u16 next_to_use; - u16 data_len; - u8 hdr_len; - - buf_len -= skb->data_len; - nr_frags = skb_shinfo(skb)->nr_frags; - next_to_use = atomic_read(&tpd_ring->next_to_use); - buffer_info = &tpd_ring->buffer_info[next_to_use]; - if (unlikely(buffer_info->skb)) - BUG(); - /* put skb in last TPD */ - buffer_info->skb = NULL; - - retval = (ptpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK; - if (retval) { - /* TSO */ - hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); - buffer_info->length = hdr_len; - page = virt_to_page(skb->data); - offset = (unsigned long)skb->data & ~PAGE_MASK; - buffer_info->dma = pci_map_page(adapter->pdev, page, - offset, hdr_len, - PCI_DMA_TODEVICE); - - if (++next_to_use == tpd_ring->count) - next_to_use = 0; - - if (buf_len > hdr_len) { - int i, nseg; - - data_len = buf_len - hdr_len; - nseg = (data_len + ATL1_MAX_TX_BUF_LEN - 1) / - ATL1_MAX_TX_BUF_LEN; - for (i = 0; i < nseg; i++) { - buffer_info = - &tpd_ring->buffer_info[next_to_use]; - buffer_info->skb = NULL; - buffer_info->length = - (ATL1_MAX_TX_BUF_LEN >= - data_len) ? ATL1_MAX_TX_BUF_LEN : data_len; - data_len -= buffer_info->length; - page = virt_to_page(skb->data + - (hdr_len + i * ATL1_MAX_TX_BUF_LEN)); - offset = (unsigned long)(skb->data + - (hdr_len + i * ATL1_MAX_TX_BUF_LEN)) & - ~PAGE_MASK; - buffer_info->dma = pci_map_page(adapter->pdev, - page, offset, buffer_info->length, - PCI_DMA_TODEVICE); - if (++next_to_use == tpd_ring->count) - next_to_use = 0; - } - } - } else { - /* not TSO */ - buffer_info->length = buf_len; - page = virt_to_page(skb->data); - offset = (unsigned long)skb->data & ~PAGE_MASK; - buffer_info->dma = pci_map_page(adapter->pdev, page, - offset, buf_len, PCI_DMA_TODEVICE); - if (++next_to_use == tpd_ring->count) - next_to_use = 0; - } - - for (f = 0; f < nr_frags; f++) { - struct skb_frag_struct *frag; - u16 i, nseg; - - frag = &skb_shinfo(skb)->frags[f]; - buf_len = frag->size; - - nseg = (buf_len + ATL1_MAX_TX_BUF_LEN - 1) / - ATL1_MAX_TX_BUF_LEN; - for (i = 0; i < nseg; i++) { - buffer_info = &tpd_ring->buffer_info[next_to_use]; - if (unlikely(buffer_info->skb)) - BUG(); - buffer_info->skb = NULL; - buffer_info->length = (buf_len > ATL1_MAX_TX_BUF_LEN) ? - ATL1_MAX_TX_BUF_LEN : buf_len; - buf_len -= buffer_info->length; - buffer_info->dma = pci_map_page(adapter->pdev, - frag->page, - frag->page_offset + (i * ATL1_MAX_TX_BUF_LEN), - buffer_info->length, PCI_DMA_TODEVICE); + struct atl1_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 ret_val; + u16 speed, duplex, phy_data; + int reconfig = 0; - if (++next_to_use == tpd_ring->count) - next_to_use = 0; + /* MII_BMSR must read twice */ + atl1_read_phy_reg(hw, MII_BMSR, &phy_data); + atl1_read_phy_reg(hw, MII_BMSR, &phy_data); + if (!(phy_data & BMSR_LSTATUS)) { + /* link down */ + if (netif_carrier_ok(netdev)) { + /* old link state: Up */ + if (netif_msg_link(adapter)) + dev_info(&adapter->pdev->dev, "link is down\n"); + adapter->link_speed = SPEED_0; + netif_carrier_off(netdev); + netif_stop_queue(netdev); } + return 0; } - /* last tpd's buffer-info */ - buffer_info->skb = skb; -} - -static void atl1_tx_queue(struct atl1_adapter *adapter, u16 count, - struct tx_packet_desc *ptpd) -{ - /* spinlock held */ - struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; - struct atl1_buffer *buffer_info; - struct tx_packet_desc *tpd; - u16 j; - u32 val; - u16 next_to_use = (u16) atomic_read(&tpd_ring->next_to_use); - - for (j = 0; j < count; j++) { - buffer_info = &tpd_ring->buffer_info[next_to_use]; - tpd = ATL1_TPD_DESC(&adapter->tpd_ring, next_to_use); - if (tpd != ptpd) - memcpy(tpd, ptpd, sizeof(struct tx_packet_desc)); - tpd->buffer_addr = cpu_to_le64(buffer_info->dma); - tpd->word2 = (cpu_to_le16(buffer_info->length) & - TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT; - - /* - * if this is the first packet in a TSO chain, set - * TPD_HDRFLAG, otherwise, clear it. - */ - val = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & - TPD_SEGMENT_EN_MASK; - if (val) { - if (!j) - tpd->word3 |= 1 << TPD_HDRFLAG_SHIFT; - else - tpd->word3 &= ~(1 << TPD_HDRFLAG_SHIFT); - } - - if (j == (count - 1)) - tpd->word3 |= 1 << TPD_EOP_SHIFT; + /* Link Up */ + ret_val = atl1_get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) + return ret_val; - if (++next_to_use == tpd_ring->count) - next_to_use = 0; + switch (hw->media_type) { + case MEDIA_TYPE_1000M_FULL: + if (speed != SPEED_1000 || duplex != FULL_DUPLEX) + reconfig = 1; + break; + case MEDIA_TYPE_100M_FULL: + if (speed != SPEED_100 || duplex != FULL_DUPLEX) + reconfig = 1; + break; + case MEDIA_TYPE_100M_HALF: + if (speed != SPEED_100 || duplex != HALF_DUPLEX) + reconfig = 1; + break; + case MEDIA_TYPE_10M_FULL: + if (speed != SPEED_10 || duplex != FULL_DUPLEX) + reconfig = 1; + break; + case MEDIA_TYPE_10M_HALF: + if (speed != SPEED_10 || duplex != HALF_DUPLEX) + reconfig = 1; + break; } - /* - * Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). - */ - wmb(); - atomic_set(&tpd_ring->next_to_use, next_to_use); -} - -static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) -{ - struct atl1_adapter *adapter = netdev_priv(netdev); - struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; - int len = skb->len; - int tso; - int count = 1; - int ret_val; - struct tx_packet_desc *ptpd; - u16 frag_size; - u16 vlan_tag; - unsigned long flags; - unsigned int nr_frags = 0; - unsigned int mss = 0; - unsigned int f; - unsigned int proto_hdr_len; - - len -= skb->data_len; - - if (unlikely(skb->len <= 0)) { - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; + /* link result is our setting */ + if (!reconfig) { + if (adapter->link_speed != speed + || adapter->link_duplex != duplex) { + adapter->link_speed = speed; + adapter->link_duplex = duplex; + atl1_setup_mac_ctrl(adapter); + if (netif_msg_link(adapter)) + dev_info(&adapter->pdev->dev, + "%s link is up %d Mbps %s\n", + netdev->name, adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? + "full duplex" : "half duplex"); + } + if (!netif_carrier_ok(netdev)) { + /* Link down -> Up */ + netif_carrier_on(netdev); + netif_wake_queue(netdev); + } + return 0; } - nr_frags = skb_shinfo(skb)->nr_frags; - for (f = 0; f < nr_frags; f++) { - frag_size = skb_shinfo(skb)->frags[f].size; - if (frag_size) - count += (frag_size + ATL1_MAX_TX_BUF_LEN - 1) / - ATL1_MAX_TX_BUF_LEN; + /* change original link status */ + if (netif_carrier_ok(netdev)) { + adapter->link_speed = SPEED_0; + netif_carrier_off(netdev); + netif_stop_queue(netdev); } - mss = skb_shinfo(skb)->gso_size; - if (mss) { - if (skb->protocol == ntohs(ETH_P_IP)) { - proto_hdr_len = (skb_transport_offset(skb) + - tcp_hdrlen(skb)); - if (unlikely(proto_hdr_len > len)) { - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } - /* need additional TPD ? */ - if (proto_hdr_len != len) - count += (len - proto_hdr_len + - ATL1_MAX_TX_BUF_LEN - 1) / - ATL1_MAX_TX_BUF_LEN; + if (hw->media_type != MEDIA_TYPE_AUTO_SENSOR && + hw->media_type != MEDIA_TYPE_1000M_FULL) { + switch (hw->media_type) { + case MEDIA_TYPE_100M_FULL: + phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 | + MII_CR_RESET; + break; + case MEDIA_TYPE_100M_HALF: + phy_data = MII_CR_SPEED_100 | MII_CR_RESET; + break; + case MEDIA_TYPE_10M_FULL: + phy_data = + MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET; + break; + default: + /* MEDIA_TYPE_10M_HALF: */ + phy_data = MII_CR_SPEED_10 | MII_CR_RESET; + break; } + atl1_write_phy_reg(hw, MII_BMCR, phy_data); + return 0; } - if (!spin_trylock_irqsave(&adapter->lock, flags)) { - /* Can't get lock - tell upper layer to requeue */ - if (netif_msg_tx_queued(adapter)) - dev_printk(KERN_DEBUG, &adapter->pdev->dev, - "tx locked\n"); - return NETDEV_TX_LOCKED; + /* auto-neg, insert timer to re-config phy */ + if (!adapter->phy_timer_pending) { + adapter->phy_timer_pending = true; + mod_timer(&adapter->phy_config_timer, jiffies + 3 * HZ); } - if (atl1_tpd_avail(&adapter->tpd_ring) < count) { - /* not enough descriptors */ - netif_stop_queue(netdev); - spin_unlock_irqrestore(&adapter->lock, flags); - if (netif_msg_tx_queued(adapter)) - dev_printk(KERN_DEBUG, &adapter->pdev->dev, - "tx busy\n"); - return NETDEV_TX_BUSY; - } + return 0; +} - ptpd = ATL1_TPD_DESC(tpd_ring, - (u16) atomic_read(&tpd_ring->next_to_use)); - memset(ptpd, 0, sizeof(struct tx_packet_desc)); +static void set_flow_ctrl_old(struct atl1_adapter *adapter) +{ + u32 hi, lo, value; - if (adapter->vlgrp && vlan_tx_tag_present(skb)) { - vlan_tag = vlan_tx_tag_get(skb); - vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) | - ((vlan_tag >> 9) & 0x8); - ptpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT; - ptpd->word3 |= (vlan_tag & TPD_VL_TAGGED_MASK) << - TPD_VL_TAGGED_SHIFT; - } + /* RFD Flow Control */ + value = adapter->rfd_ring.count; + hi = value / 16; + if (hi < 2) + hi = 2; + lo = value * 7 / 8; - tso = atl1_tso(adapter, skb, ptpd); - if (tso < 0) { - spin_unlock_irqrestore(&adapter->lock, flags); - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } + value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) | + ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT); + iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RXF_PAUSE_THRESH); - if (!tso) { - ret_val = atl1_tx_csum(adapter, skb, ptpd); - if (ret_val < 0) { - spin_unlock_irqrestore(&adapter->lock, flags); - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } - } + /* RRD Flow Control */ + value = adapter->rrd_ring.count; + lo = value / 16; + hi = value * 7 / 8; + if (lo < 2) + lo = 2; + value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) | + ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT); + iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RRD_PAUSE_THRESH); +} - atl1_tx_map(adapter, skb, ptpd); - atl1_tx_queue(adapter, count, ptpd); - atl1_update_mailbox(adapter); - spin_unlock_irqrestore(&adapter->lock, flags); - netdev->trans_start = jiffies; - return NETDEV_TX_OK; +static void set_flow_ctrl_new(struct atl1_hw *hw) +{ + u32 hi, lo, value; + + /* RXF Flow Control */ + value = ioread32(hw->hw_addr + REG_SRAM_RXF_LEN); + lo = value / 16; + if (lo < 192) + lo = 192; + hi = value * 7 / 8; + if (hi < lo) + hi = lo + 16; + value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) | + ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT); + iowrite32(value, hw->hw_addr + REG_RXQ_RXF_PAUSE_THRESH); + + /* RRD Flow Control */ + value = ioread32(hw->hw_addr + REG_SRAM_RRD_LEN); + lo = value / 8; + hi = value * 7 / 8; + if (lo < 2) + lo = 2; + if (hi < lo) + hi = lo + 3; + value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) | + ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT); + iowrite32(value, hw->hw_addr + REG_RXQ_RRD_PAUSE_THRESH); } /* - * atl1_intr - Interrupt Handler - * @irq: interrupt number - * @data: pointer to a network interface device structure - * @pt_regs: CPU registers structure + * atl1_configure - Configure Transmit&Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Tx /Rx unit of the MAC after a reset. */ -static irqreturn_t atl1_intr(int irq, void *data) +static u32 atl1_configure(struct atl1_adapter *adapter) { - struct atl1_adapter *adapter = netdev_priv(data); - u32 status; - u8 update_rx; - int max_ints = 10; + struct atl1_hw *hw = &adapter->hw; + u32 value; - status = adapter->cmb.cmb->int_stats; - if (!status) - return IRQ_NONE; + /* clear interrupt status */ + iowrite32(0xffffffff, adapter->hw.hw_addr + REG_ISR); - update_rx = 0; + /* set MAC Address */ + value = (((u32) hw->mac_addr[2]) << 24) | + (((u32) hw->mac_addr[3]) << 16) | + (((u32) hw->mac_addr[4]) << 8) | + (((u32) hw->mac_addr[5])); + iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR); + value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1])); + iowrite32(value, hw->hw_addr + (REG_MAC_STA_ADDR + 4)); - do { - /* clear CMB interrupt status at once */ - adapter->cmb.cmb->int_stats = 0; + /* tx / rx ring */ - if (status & ISR_GPHY) /* clear phy status */ - atlx_clear_phy_int(adapter); + /* HI base address */ + iowrite32((u32) ((adapter->tpd_ring.dma & 0xffffffff00000000ULL) >> 32), + hw->hw_addr + REG_DESC_BASE_ADDR_HI); + /* LO base address */ + iowrite32((u32) (adapter->rfd_ring.dma & 0x00000000ffffffffULL), + hw->hw_addr + REG_DESC_RFD_ADDR_LO); + iowrite32((u32) (adapter->rrd_ring.dma & 0x00000000ffffffffULL), + hw->hw_addr + REG_DESC_RRD_ADDR_LO); + iowrite32((u32) (adapter->tpd_ring.dma & 0x00000000ffffffffULL), + hw->hw_addr + REG_DESC_TPD_ADDR_LO); + iowrite32((u32) (adapter->cmb.dma & 0x00000000ffffffffULL), + hw->hw_addr + REG_DESC_CMB_ADDR_LO); + iowrite32((u32) (adapter->smb.dma & 0x00000000ffffffffULL), + hw->hw_addr + REG_DESC_SMB_ADDR_LO); + + /* element count */ + value = adapter->rrd_ring.count; + value <<= 16; + value += adapter->rfd_ring.count; + iowrite32(value, hw->hw_addr + REG_DESC_RFD_RRD_RING_SIZE); + iowrite32(adapter->tpd_ring.count, hw->hw_addr + + REG_DESC_TPD_RING_SIZE); + + /* Load Ptr */ + iowrite32(1, hw->hw_addr + REG_LOAD_PTR); + + /* config Mailbox */ + value = ((atomic_read(&adapter->tpd_ring.next_to_use) + & MB_TPD_PROD_INDX_MASK) << MB_TPD_PROD_INDX_SHIFT) | + ((atomic_read(&adapter->rrd_ring.next_to_clean) + & MB_RRD_CONS_INDX_MASK) << MB_RRD_CONS_INDX_SHIFT) | + ((atomic_read(&adapter->rfd_ring.next_to_use) + & MB_RFD_PROD_INDX_MASK) << MB_RFD_PROD_INDX_SHIFT); + iowrite32(value, hw->hw_addr + REG_MAILBOX); + + /* config IPG/IFG */ + value = (((u32) hw->ipgt & MAC_IPG_IFG_IPGT_MASK) + << MAC_IPG_IFG_IPGT_SHIFT) | + (((u32) hw->min_ifg & MAC_IPG_IFG_MIFG_MASK) + << MAC_IPG_IFG_MIFG_SHIFT) | + (((u32) hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK) + << MAC_IPG_IFG_IPGR1_SHIFT) | + (((u32) hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK) + << MAC_IPG_IFG_IPGR2_SHIFT); + iowrite32(value, hw->hw_addr + REG_MAC_IPG_IFG); + + /* config Half-Duplex Control */ + value = ((u32) hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) | + (((u32) hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK) + << MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) | + MAC_HALF_DUPLX_CTRL_EXC_DEF_EN | + (0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) | + (((u32) hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK) + << MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT); + iowrite32(value, hw->hw_addr + REG_MAC_HALF_DUPLX_CTRL); + + /* set Interrupt Moderator Timer */ + iowrite16(adapter->imt, hw->hw_addr + REG_IRQ_MODU_TIMER_INIT); + iowrite32(MASTER_CTRL_ITIMER_EN, hw->hw_addr + REG_MASTER_CTRL); - /* clear ISR status, and Enable CMB DMA/Disable Interrupt */ - iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR); + /* set Interrupt Clear Timer */ + iowrite16(adapter->ict, hw->hw_addr + REG_CMBDISDMA_TIMER); - /* check if SMB intr */ - if (status & ISR_SMB) - atl1_inc_smb(adapter); + /* set max frame size hw will accept */ + iowrite32(hw->max_frame_size, hw->hw_addr + REG_MTU); - /* check if PCIE PHY Link down */ - if (status & ISR_PHY_LINKDOWN) { - if (netif_msg_intr(adapter)) - dev_printk(KERN_DEBUG, &adapter->pdev->dev, - "pcie phy link down %x\n", status); - if (netif_running(adapter->netdev)) { /* reset MAC */ - iowrite32(0, adapter->hw.hw_addr + REG_IMR); - schedule_work(&adapter->pcie_dma_to_rst_task); - return IRQ_HANDLED; - } - } + /* jumbo size & rrd retirement timer */ + value = (((u32) hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK) + << RXQ_JMBOSZ_TH_SHIFT) | + (((u32) hw->rx_jumbo_lkah & RXQ_JMBO_LKAH_MASK) + << RXQ_JMBO_LKAH_SHIFT) | + (((u32) hw->rrd_ret_timer & RXQ_RRD_TIMER_MASK) + << RXQ_RRD_TIMER_SHIFT); + iowrite32(value, hw->hw_addr + REG_RXQ_JMBOSZ_RRDTIM); - /* check if DMA read/write error ? */ - if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) { - if (netif_msg_intr(adapter)) - dev_printk(KERN_DEBUG, &adapter->pdev->dev, - "pcie DMA r/w error (status = 0x%x)\n", - status); - iowrite32(0, adapter->hw.hw_addr + REG_IMR); - schedule_work(&adapter->pcie_dma_to_rst_task); - return IRQ_HANDLED; - } + /* Flow Control */ + switch (hw->dev_rev) { + case 0x8001: + case 0x9001: + case 0x9002: + case 0x9003: + set_flow_ctrl_old(adapter); + break; + default: + set_flow_ctrl_new(hw); + break; + } - /* link event */ - if (status & ISR_GPHY) { - adapter->soft_stats.tx_carrier_errors++; - atl1_check_for_link(adapter); - } + /* config TXQ */ + value = (((u32) hw->tpd_burst & TXQ_CTRL_TPD_BURST_NUM_MASK) + << TXQ_CTRL_TPD_BURST_NUM_SHIFT) | + (((u32) hw->txf_burst & TXQ_CTRL_TXF_BURST_NUM_MASK) + << TXQ_CTRL_TXF_BURST_NUM_SHIFT) | + (((u32) hw->tpd_fetch_th & TXQ_CTRL_TPD_FETCH_TH_MASK) + << TXQ_CTRL_TPD_FETCH_TH_SHIFT) | TXQ_CTRL_ENH_MODE | + TXQ_CTRL_EN; + iowrite32(value, hw->hw_addr + REG_TXQ_CTRL); - /* transmit event */ - if (status & ISR_CMB_TX) - atl1_intr_tx(adapter); + /* min tpd fetch gap & tx jumbo packet size threshold for taskoffload */ + value = (((u32) hw->tx_jumbo_task_th & TX_JUMBO_TASK_TH_MASK) + << TX_JUMBO_TASK_TH_SHIFT) | + (((u32) hw->tpd_fetch_gap & TX_TPD_MIN_IPG_MASK) + << TX_TPD_MIN_IPG_SHIFT); + iowrite32(value, hw->hw_addr + REG_TX_JUMBO_TASK_TH_TPD_IPG); - /* rx exception */ - if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN | - ISR_RRD_OV | ISR_HOST_RFD_UNRUN | - ISR_HOST_RRD_OV | ISR_CMB_RX))) { - if (status & (ISR_RXF_OV | ISR_RFD_UNRUN | - ISR_RRD_OV | ISR_HOST_RFD_UNRUN | - ISR_HOST_RRD_OV)) - if (netif_msg_intr(adapter)) - dev_printk(KERN_DEBUG, - &adapter->pdev->dev, - "rx exception, ISR = 0x%x\n", - status); - atl1_intr_rx(adapter); - } + /* config RXQ */ + value = (((u32) hw->rfd_burst & RXQ_CTRL_RFD_BURST_NUM_MASK) + << RXQ_CTRL_RFD_BURST_NUM_SHIFT) | + (((u32) hw->rrd_burst & RXQ_CTRL_RRD_BURST_THRESH_MASK) + << RXQ_CTRL_RRD_BURST_THRESH_SHIFT) | + (((u32) hw->rfd_fetch_gap & RXQ_CTRL_RFD_PREF_MIN_IPG_MASK) + << RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT) | RXQ_CTRL_CUT_THRU_EN | + RXQ_CTRL_EN; + iowrite32(value, hw->hw_addr + REG_RXQ_CTRL); - if (--max_ints < 0) - break; + /* config DMA Engine */ + value = ((((u32) hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK) + << DMA_CTRL_DMAR_BURST_LEN_SHIFT) | + ((((u32) hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK) + << DMA_CTRL_DMAW_BURST_LEN_SHIFT) | DMA_CTRL_DMAR_EN | + DMA_CTRL_DMAW_EN; + value |= (u32) hw->dma_ord; + if (atl1_rcb_128 == hw->rcb_value) + value |= DMA_CTRL_RCB_VALUE; + iowrite32(value, hw->hw_addr + REG_DMA_CTRL); - } while ((status = adapter->cmb.cmb->int_stats)); + /* config CMB / SMB */ + value = (hw->cmb_tpd > adapter->tpd_ring.count) ? + hw->cmb_tpd : adapter->tpd_ring.count; + value <<= 16; + value |= hw->cmb_rrd; + iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TH); + value = hw->cmb_rx_timer | ((u32) hw->cmb_tx_timer << 16); + iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TIMER); + iowrite32(hw->smb_timer, hw->hw_addr + REG_SMB_TIMER); - /* re-enable Interrupt */ - iowrite32(ISR_DIS_SMB | ISR_DIS_DMA, adapter->hw.hw_addr + REG_ISR); - return IRQ_HANDLED; -} + /* --- enable CMB / SMB */ + value = CSMB_CTRL_CMB_EN | CSMB_CTRL_SMB_EN; + iowrite32(value, hw->hw_addr + REG_CSMB_CTRL); -/* - * atl1_watchdog - Timer Call-back - * @data: pointer to netdev cast into an unsigned long - */ -static void atl1_watchdog(unsigned long data) -{ - struct atl1_adapter *adapter = (struct atl1_adapter *)data; + value = ioread32(adapter->hw.hw_addr + REG_ISR); + if (unlikely((value & ISR_PHY_LINKDOWN) != 0)) + value = 1; /* config failed */ + else + value = 0; - /* Reset the timer */ - mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); + /* clear all interrupt status */ + iowrite32(0x3fffffff, adapter->hw.hw_addr + REG_ISR); + iowrite32(0, adapter->hw.hw_addr + REG_ISR); + return value; } /* - * atl1_phy_config - Timer Call-back - * @data: pointer to netdev cast into an unsigned long + * atl1_pcie_patch - Patch for PCIE module */ -static void atl1_phy_config(unsigned long data) +static void atl1_pcie_patch(struct atl1_adapter *adapter) { - struct atl1_adapter *adapter = (struct atl1_adapter *)data; - struct atl1_hw *hw = &adapter->hw; - unsigned long flags; + u32 value; - spin_lock_irqsave(&adapter->lock, flags); - adapter->phy_timer_pending = false; - atl1_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg); - atl1_write_phy_reg(hw, MII_ATLX_CR, hw->mii_1000t_ctrl_reg); - atl1_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN); - spin_unlock_irqrestore(&adapter->lock, flags); + /* much vendor magic here */ + value = 0x6500; + iowrite32(value, adapter->hw.hw_addr + 0x12FC); + /* pcie flow control mode change */ + value = ioread32(adapter->hw.hw_addr + 0x1008); + value |= 0x8000; + iowrite32(value, adapter->hw.hw_addr + 0x1008); } /* - * Orphaned vendor comment left intact here: - * - * If TPD Buffer size equal to 0, PCIE DMAR_TO_INT - * will assert. We do soft reset <0x1400=1> according - * with the SPEC. BUT, it seemes that PCIE or DMA - * state-machine will not be reset. DMAR_TO_INT will - * assert again and again. - * + * When ACPI resume on some VIA MotherBoard, the Interrupt Disable bit/0x400 + * on PCI Command register is disable. + * The function enable this bit. + * Brackett, 2006/03/15 */ - -static int atl1_reset(struct atl1_adapter *adapter) +static void atl1_via_workaround(struct atl1_adapter *adapter) { - int ret; - ret = atl1_reset_hw(&adapter->hw); - if (ret) - return ret; - return atl1_init_hw(&adapter->hw); + unsigned long value; + + value = ioread16(adapter->hw.hw_addr + PCI_COMMAND); + if (value & PCI_COMMAND_INTX_DISABLE) + value &= ~PCI_COMMAND_INTX_DISABLE; + iowrite32(value, adapter->hw.hw_addr + PCI_COMMAND); } -static s32 atl1_up(struct atl1_adapter *adapter) +static void atl1_inc_smb(struct atl1_adapter *adapter) { - struct net_device *netdev = adapter->netdev; - int err; - int irq_flags = IRQF_SAMPLE_RANDOM; + struct stats_msg_block *smb = adapter->smb.smb; - /* hardware has been reset, we need to reload some things */ - atlx_set_multi(netdev); - atl1_init_ring_ptrs(adapter); - atlx_restore_vlan(adapter); - err = atl1_alloc_rx_buffers(adapter); - if (unlikely(!err)) - /* no RX BUFFER allocated */ - return -ENOMEM; + /* Fill out the OS statistics structure */ + adapter->soft_stats.rx_packets += smb->rx_ok; + adapter->soft_stats.tx_packets += smb->tx_ok; + adapter->soft_stats.rx_bytes += smb->rx_byte_cnt; + adapter->soft_stats.tx_bytes += smb->tx_byte_cnt; + adapter->soft_stats.multicast += smb->rx_mcast; + adapter->soft_stats.collisions += (smb->tx_1_col + smb->tx_2_col * 2 + + smb->tx_late_col + smb->tx_abort_col * adapter->hw.max_retry); - if (unlikely(atl1_configure(adapter))) { - err = -EIO; - goto err_up; - } + /* Rx Errors */ + adapter->soft_stats.rx_errors += (smb->rx_frag + smb->rx_fcs_err + + smb->rx_len_err + smb->rx_sz_ov + smb->rx_rxf_ov + + smb->rx_rrd_ov + smb->rx_align_err); + adapter->soft_stats.rx_fifo_errors += smb->rx_rxf_ov; + adapter->soft_stats.rx_length_errors += smb->rx_len_err; + adapter->soft_stats.rx_crc_errors += smb->rx_fcs_err; + adapter->soft_stats.rx_frame_errors += smb->rx_align_err; + adapter->soft_stats.rx_missed_errors += (smb->rx_rrd_ov + + smb->rx_rxf_ov); - err = pci_enable_msi(adapter->pdev); - if (err) { - if (netif_msg_ifup(adapter)) - dev_info(&adapter->pdev->dev, - "Unable to enable MSI: %d\n", err); - irq_flags |= IRQF_SHARED; - } + adapter->soft_stats.rx_pause += smb->rx_pause; + adapter->soft_stats.rx_rrd_ov += smb->rx_rrd_ov; + adapter->soft_stats.rx_trunc += smb->rx_sz_ov; - err = request_irq(adapter->pdev->irq, &atl1_intr, irq_flags, - netdev->name, netdev); - if (unlikely(err)) - goto err_up; + /* Tx Errors */ + adapter->soft_stats.tx_errors += (smb->tx_late_col + + smb->tx_abort_col + smb->tx_underrun + smb->tx_trunc); + adapter->soft_stats.tx_fifo_errors += smb->tx_underrun; + adapter->soft_stats.tx_aborted_errors += smb->tx_abort_col; + adapter->soft_stats.tx_window_errors += smb->tx_late_col; - mod_timer(&adapter->watchdog_timer, jiffies); - atlx_irq_enable(adapter); - atl1_check_link(adapter); - return 0; + adapter->soft_stats.excecol += smb->tx_abort_col; + adapter->soft_stats.deffer += smb->tx_defer; + adapter->soft_stats.scc += smb->tx_1_col; + adapter->soft_stats.mcc += smb->tx_2_col; + adapter->soft_stats.latecol += smb->tx_late_col; + adapter->soft_stats.tx_underun += smb->tx_underrun; + adapter->soft_stats.tx_trunc += smb->tx_trunc; + adapter->soft_stats.tx_pause += smb->tx_pause; -err_up: - pci_disable_msi(adapter->pdev); - /* free rx_buffers */ - atl1_clean_rx_ring(adapter); - return err; + adapter->net_stats.rx_packets = adapter->soft_stats.rx_packets; + adapter->net_stats.tx_packets = adapter->soft_stats.tx_packets; + adapter->net_stats.rx_bytes = adapter->soft_stats.rx_bytes; + adapter->net_stats.tx_bytes = adapter->soft_stats.tx_bytes; + adapter->net_stats.multicast = adapter->soft_stats.multicast; + adapter->net_stats.collisions = adapter->soft_stats.collisions; + adapter->net_stats.rx_errors = adapter->soft_stats.rx_errors; + adapter->net_stats.rx_over_errors = + adapter->soft_stats.rx_missed_errors; + adapter->net_stats.rx_length_errors = + adapter->soft_stats.rx_length_errors; + adapter->net_stats.rx_crc_errors = adapter->soft_stats.rx_crc_errors; + adapter->net_stats.rx_frame_errors = + adapter->soft_stats.rx_frame_errors; + adapter->net_stats.rx_fifo_errors = adapter->soft_stats.rx_fifo_errors; + adapter->net_stats.rx_missed_errors = + adapter->soft_stats.rx_missed_errors; + adapter->net_stats.tx_errors = adapter->soft_stats.tx_errors; + adapter->net_stats.tx_fifo_errors = adapter->soft_stats.tx_fifo_errors; + adapter->net_stats.tx_aborted_errors = + adapter->soft_stats.tx_aborted_errors; + adapter->net_stats.tx_window_errors = + adapter->soft_stats.tx_window_errors; + adapter->net_stats.tx_carrier_errors = + adapter->soft_stats.tx_carrier_errors; } -static void atl1_down(struct atl1_adapter *adapter) +static void atl1_update_mailbox(struct atl1_adapter *adapter) { - struct net_device *netdev = adapter->netdev; + unsigned long flags; + u32 tpd_next_to_use; + u32 rfd_next_to_use; + u32 rrd_next_to_clean; + u32 value; - del_timer_sync(&adapter->watchdog_timer); - del_timer_sync(&adapter->phy_config_timer); - adapter->phy_timer_pending = false; + spin_lock_irqsave(&adapter->mb_lock, flags); - atlx_irq_disable(adapter); - free_irq(adapter->pdev->irq, netdev); - pci_disable_msi(adapter->pdev); - atl1_reset_hw(&adapter->hw); - adapter->cmb.cmb->int_stats = 0; + tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use); + rfd_next_to_use = atomic_read(&adapter->rfd_ring.next_to_use); + rrd_next_to_clean = atomic_read(&adapter->rrd_ring.next_to_clean); - adapter->link_speed = SPEED_0; - adapter->link_duplex = -1; - netif_carrier_off(netdev); - netif_stop_queue(netdev); + value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) << + MB_RFD_PROD_INDX_SHIFT) | + ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) << + MB_RRD_CONS_INDX_SHIFT) | + ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) << + MB_TPD_PROD_INDX_SHIFT); + iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX); - atl1_clean_tx_ring(adapter); - atl1_clean_rx_ring(adapter); + spin_unlock_irqrestore(&adapter->mb_lock, flags); } -static void atl1_tx_timeout_task(struct work_struct *work) +static void atl1_clean_alloc_flag(struct atl1_adapter *adapter, + struct rx_return_desc *rrd, u16 offset) { - struct atl1_adapter *adapter = - container_of(work, struct atl1_adapter, tx_timeout_task); - struct net_device *netdev = adapter->netdev; + struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; - netif_device_detach(netdev); - atl1_down(adapter); - atl1_up(adapter); - netif_device_attach(netdev); + while (rfd_ring->next_to_clean != (rrd->buf_indx + offset)) { + rfd_ring->buffer_info[rfd_ring->next_to_clean].alloced = 0; + if (++rfd_ring->next_to_clean == rfd_ring->count) { + rfd_ring->next_to_clean = 0; + } + } } -/* - * atl1_change_mtu - Change the Maximum Transfer Unit - * @netdev: network interface device structure - * @new_mtu: new value for maximum frame size - * - * Returns 0 on success, negative on failure - */ -static int atl1_change_mtu(struct net_device *netdev, int new_mtu) +static void atl1_update_rfd_index(struct atl1_adapter *adapter, + struct rx_return_desc *rrd) { - struct atl1_adapter *adapter = netdev_priv(netdev); - int old_mtu = netdev->mtu; - int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; - - if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || - (max_frame > MAX_JUMBO_FRAME_SIZE)) { - if (netif_msg_link(adapter)) - dev_warn(&adapter->pdev->dev, "invalid MTU setting\n"); - return -EINVAL; - } - - adapter->hw.max_frame_size = max_frame; - adapter->hw.tx_jumbo_task_th = (max_frame + 7) >> 3; - adapter->rx_buffer_len = (max_frame + 7) & ~7; - adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8; - - netdev->mtu = new_mtu; - if ((old_mtu != new_mtu) && netif_running(netdev)) { - atl1_down(adapter); - atl1_up(adapter); - } + u16 num_buf; - return 0; + num_buf = (rrd->xsz.xsum_sz.pkt_size + adapter->rx_buffer_len - 1) / + adapter->rx_buffer_len; + if (rrd->num_buf == num_buf) + /* clean alloc flag for bad rrd */ + atl1_clean_alloc_flag(adapter, rrd, num_buf); } -/* - * atl1_open - Called when a network interface is made active - * @netdev: network interface device structure - * - * Returns 0 on success, negative value on failure - * - * The open entry point is called when a network interface is made - * active by the system (IFF_UP). At this point all resources needed - * for transmit and receive operations are allocated, the interrupt - * handler is registered with the OS, the watchdog timer is started, - * and the stack is notified that the interface is ready. - */ -static int atl1_open(struct net_device *netdev) +static void atl1_rx_checksum(struct atl1_adapter *adapter, + struct rx_return_desc *rrd, struct sk_buff *skb) { - struct atl1_adapter *adapter = netdev_priv(netdev); - int err; + struct pci_dev *pdev = adapter->pdev; - /* allocate transmit descriptors */ - err = atl1_setup_ring_resources(adapter); - if (err) - return err; + skb->ip_summed = CHECKSUM_NONE; - err = atl1_up(adapter); - if (err) - goto err_up; + if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { + if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC | + ERR_FLAG_CODE | ERR_FLAG_OV)) { + adapter->hw_csum_err++; + if (netif_msg_rx_err(adapter)) + dev_printk(KERN_DEBUG, &pdev->dev, + "rx checksum error\n"); + return; + } + } - return 0; + /* not IPv4 */ + if (!(rrd->pkt_flg & PACKET_FLAG_IPV4)) + /* checksum is invalid, but it's not an IPv4 pkt, so ok */ + return; -err_up: - atl1_reset(adapter); - return err; + /* IPv4 packet */ + if (likely(!(rrd->err_flg & + (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM)))) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + adapter->hw_csum_good++; + return; + } + + /* IPv4, but hardware thinks its checksum is wrong */ + if (netif_msg_rx_err(adapter)) + dev_printk(KERN_DEBUG, &pdev->dev, + "hw csum wrong, pkt_flag:%x, err_flag:%x\n", + rrd->pkt_flg, rrd->err_flg); + skb->ip_summed = CHECKSUM_COMPLETE; + skb->csum = htons(rrd->xsz.xsum_sz.rx_chksum); + adapter->hw_csum_err++; + return; } /* - * atl1_close - Disables a network interface - * @netdev: network interface device structure - * - * Returns 0, this is not allowed to fail - * - * The close entry point is called when an interface is de-activated - * by the OS. The hardware is still under the drivers control, but - * needs to be disabled. A global MAC reset is issued to stop the - * hardware, and all transmit and receive resources are freed. + * atl1_alloc_rx_buffers - Replace used receive buffers + * @adapter: address of board private structure */ -static int atl1_close(struct net_device *netdev) -{ - struct atl1_adapter *adapter = netdev_priv(netdev); - atl1_down(adapter); - atl1_free_ring_resources(adapter); - return 0; -} - -#ifdef CONFIG_PM -static int atl1_suspend(struct pci_dev *pdev, pm_message_t state) +static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter) { - struct net_device *netdev = pci_get_drvdata(pdev); - struct atl1_adapter *adapter = netdev_priv(netdev); - struct atl1_hw *hw = &adapter->hw; - u32 ctrl = 0; - u32 wufc = adapter->wol; + struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct pci_dev *pdev = adapter->pdev; + struct page *page; + unsigned long offset; + struct atl1_buffer *buffer_info, *next_info; + struct sk_buff *skb; + u16 num_alloc = 0; + u16 rfd_next_to_use, next_next; + struct rx_free_desc *rfd_desc; - netif_device_detach(netdev); - if (netif_running(netdev)) - atl1_down(adapter); + next_next = rfd_next_to_use = atomic_read(&rfd_ring->next_to_use); + if (++next_next == rfd_ring->count) + next_next = 0; + buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; + next_info = &rfd_ring->buffer_info[next_next]; - atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); - atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); - if (ctrl & BMSR_LSTATUS) - wufc &= ~ATLX_WUFC_LNKC; + while (!buffer_info->alloced && !next_info->alloced) { + if (buffer_info->skb) { + buffer_info->alloced = 1; + goto next; + } - /* reduce speed to 10/100M */ - if (wufc) { - atl1_phy_enter_power_saving(hw); - /* if resume, let driver to re- setup link */ - hw->phy_configured = false; - atl1_set_mac_addr(hw); - atlx_set_multi(netdev); + rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use); - ctrl = 0; - /* turn on magic packet wol */ - if (wufc & ATLX_WUFC_MAG) - ctrl = WOL_MAGIC_EN | WOL_MAGIC_PME_EN; + skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN); + if (unlikely(!skb)) { + /* Better luck next round */ + adapter->net_stats.rx_dropped++; + break; + } - /* turn on Link change WOL */ - if (wufc & ATLX_WUFC_LNKC) - ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN); - iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL); + /* + * Make buffer alignment 2 beyond a 16 byte boundary + * this will result in a 16 byte aligned IP header after + * the 14 byte MAC header is removed + */ + skb_reserve(skb, NET_IP_ALIGN); - /* turn on all-multi mode if wake on multicast is enabled */ - ctrl = ioread32(hw->hw_addr + REG_MAC_CTRL); - ctrl &= ~MAC_CTRL_DBG; - ctrl &= ~MAC_CTRL_PROMIS_EN; - if (wufc & ATLX_WUFC_MC) - ctrl |= MAC_CTRL_MC_ALL_EN; - else - ctrl &= ~MAC_CTRL_MC_ALL_EN; + buffer_info->alloced = 1; + buffer_info->skb = skb; + buffer_info->length = (u16) adapter->rx_buffer_len; + page = virt_to_page(skb->data); + offset = (unsigned long)skb->data & ~PAGE_MASK; + buffer_info->dma = pci_map_page(pdev, page, offset, + adapter->rx_buffer_len, + PCI_DMA_FROMDEVICE); + rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len); + rfd_desc->coalese = 0; - /* turn on broadcast mode if wake on-BC is enabled */ - if (wufc & ATLX_WUFC_BC) - ctrl |= MAC_CTRL_BC_EN; - else - ctrl &= ~MAC_CTRL_BC_EN; +next: + rfd_next_to_use = next_next; + if (unlikely(++next_next == rfd_ring->count)) + next_next = 0; - /* enable RX */ - ctrl |= MAC_CTRL_RX_EN; - iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL); - pci_enable_wake(pdev, PCI_D3hot, 1); - pci_enable_wake(pdev, PCI_D3cold, 1); - } else { - iowrite32(0, hw->hw_addr + REG_WOL_CTRL); - pci_enable_wake(pdev, PCI_D3hot, 0); - pci_enable_wake(pdev, PCI_D3cold, 0); + buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; + next_info = &rfd_ring->buffer_info[next_next]; + num_alloc++; } - pci_save_state(pdev); - pci_disable_device(pdev); - - pci_set_power_state(pdev, PCI_D3hot); - - return 0; + if (num_alloc) { + /* + * Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + atomic_set(&rfd_ring->next_to_use, (int)rfd_next_to_use); + } + return num_alloc; } -static int atl1_resume(struct pci_dev *pdev) +static void atl1_intr_rx(struct atl1_adapter *adapter) { - struct net_device *netdev = pci_get_drvdata(pdev); - struct atl1_adapter *adapter = netdev_priv(netdev); - u32 err; + int i, count; + u16 length; + u16 rrd_next_to_clean; + u32 value; + struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; + struct atl1_buffer *buffer_info; + struct rx_return_desc *rrd; + struct sk_buff *skb; - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); + count = 0; - /* FIXME: check and handle */ - err = pci_enable_device(pdev); - pci_enable_wake(pdev, PCI_D3hot, 0); - pci_enable_wake(pdev, PCI_D3cold, 0); + rrd_next_to_clean = atomic_read(&rrd_ring->next_to_clean); - iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL); - atl1_reset(adapter); + while (1) { + rrd = ATL1_RRD_DESC(rrd_ring, rrd_next_to_clean); + i = 1; + if (likely(rrd->xsz.valid)) { /* packet valid */ +chk_rrd: + /* check rrd status */ + if (likely(rrd->num_buf == 1)) + goto rrd_ok; + else if (netif_msg_rx_err(adapter)) { + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "unexpected RRD buffer count\n"); + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "rx_buf_len = %d\n", + adapter->rx_buffer_len); + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "RRD num_buf = %d\n", + rrd->num_buf); + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "RRD pkt_len = %d\n", + rrd->xsz.xsum_sz.pkt_size); + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "RRD pkt_flg = 0x%08X\n", + rrd->pkt_flg); + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "RRD err_flg = 0x%08X\n", + rrd->err_flg); + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "RRD vlan_tag = 0x%08X\n", + rrd->vlan_tag); + } - if (netif_running(netdev)) - atl1_up(adapter); - netif_device_attach(netdev); + /* rrd seems to be bad */ + if (unlikely(i-- > 0)) { + /* rrd may not be DMAed completely */ + udelay(1); + goto chk_rrd; + } + /* bad rrd */ + if (netif_msg_rx_err(adapter)) + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "bad RRD\n"); + /* see if update RFD index */ + if (rrd->num_buf > 1) + atl1_update_rfd_index(adapter, rrd); - atl1_via_workaround(adapter); + /* update rrd */ + rrd->xsz.valid = 0; + if (++rrd_next_to_clean == rrd_ring->count) + rrd_next_to_clean = 0; + count++; + continue; + } else { /* current rrd still not be updated */ - return 0; -} -#else -#define atl1_suspend NULL -#define atl1_resume NULL -#endif + break; + } +rrd_ok: + /* clean alloc flag for bad rrd */ + atl1_clean_alloc_flag(adapter, rrd, 0); -#ifdef CONFIG_NET_POLL_CONTROLLER -static void atl1_poll_controller(struct net_device *netdev) -{ - disable_irq(netdev->irq); - atl1_intr(netdev->irq, netdev); - enable_irq(netdev->irq); -} -#endif + buffer_info = &rfd_ring->buffer_info[rrd->buf_indx]; + if (++rfd_ring->next_to_clean == rfd_ring->count) + rfd_ring->next_to_clean = 0; -/* - * atl1_probe - Device Initialization Routine - * @pdev: PCI device information struct - * @ent: entry in atl1_pci_tbl - * - * Returns 0 on success, negative on failure - * - * atl1_probe initializes an adapter identified by a pci_dev structure. - * The OS initialization, configuring of the adapter private structure, - * and a hardware reset occur. - */ -static int __devinit atl1_probe(struct pci_dev *pdev, - const struct pci_device_id *ent) -{ - struct net_device *netdev; - struct atl1_adapter *adapter; - static int cards_found = 0; - int err; + /* update rrd next to clean */ + if (++rrd_next_to_clean == rrd_ring->count) + rrd_next_to_clean = 0; + count++; - err = pci_enable_device(pdev); - if (err) - return err; + if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { + if (!(rrd->err_flg & + (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM + | ERR_FLAG_LEN))) { + /* packet error, don't need upstream */ + buffer_info->alloced = 0; + rrd->xsz.valid = 0; + continue; + } + } - /* - * The atl1 chip can DMA to 64-bit addresses, but it uses a single - * shared register for the high 32 bits, so only a single, aligned, - * 4 GB physical address range can be used at a time. - * - * Supporting 64-bit DMA on this hardware is more trouble than it's - * worth. It is far easier to limit to 32-bit DMA than update - * various kernel subsystems to support the mechanics required by a - * fixed-high-32-bit system. - */ - err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); - if (err) { - dev_err(&pdev->dev, "no usable DMA configuration\n"); - goto err_dma; - } - /* - * Mark all PCI regions associated with PCI device - * pdev as being reserved by owner atl1_driver_name - */ - err = pci_request_regions(pdev, ATLX_DRIVER_NAME); - if (err) - goto err_request_regions; + /* Good Receive */ + pci_unmap_page(adapter->pdev, buffer_info->dma, + buffer_info->length, PCI_DMA_FROMDEVICE); + skb = buffer_info->skb; + length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size); - /* - * Enables bus-mastering on the device and calls - * pcibios_set_master to do the needed arch specific settings - */ - pci_set_master(pdev); + skb_put(skb, length - ETH_FCS_LEN); - netdev = alloc_etherdev(sizeof(struct atl1_adapter)); - if (!netdev) { - err = -ENOMEM; - goto err_alloc_etherdev; + /* Receive Checksum Offload */ + atl1_rx_checksum(adapter, rrd, skb); + skb->protocol = eth_type_trans(skb, adapter->netdev); + + if (adapter->vlgrp && (rrd->pkt_flg & PACKET_FLAG_VLAN_INS)) { + u16 vlan_tag = (rrd->vlan_tag >> 4) | + ((rrd->vlan_tag & 7) << 13) | + ((rrd->vlan_tag & 8) << 9); + vlan_hwaccel_rx(skb, adapter->vlgrp, vlan_tag); + } else + netif_rx(skb); + + /* let protocol layer free skb */ + buffer_info->skb = NULL; + buffer_info->alloced = 0; + rrd->xsz.valid = 0; + + adapter->netdev->last_rx = jiffies; } - SET_NETDEV_DEV(netdev, &pdev->dev); - pci_set_drvdata(pdev, netdev); - adapter = netdev_priv(netdev); - adapter->netdev = netdev; - adapter->pdev = pdev; - adapter->hw.back = adapter; - adapter->msg_enable = netif_msg_init(debug, atl1_default_msg); + atomic_set(&rrd_ring->next_to_clean, rrd_next_to_clean); - adapter->hw.hw_addr = pci_iomap(pdev, 0, 0); - if (!adapter->hw.hw_addr) { - err = -EIO; - goto err_pci_iomap; - } - /* get device revision number */ - adapter->hw.dev_rev = ioread16(adapter->hw.hw_addr + - (REG_MASTER_CTRL + 2)); - if (netif_msg_probe(adapter)) - dev_info(&pdev->dev, "version %s\n", ATLX_DRIVER_VERSION); + atl1_alloc_rx_buffers(adapter); - /* set default ring resource counts */ - adapter->rfd_ring.count = adapter->rrd_ring.count = ATL1_DEFAULT_RFD; - adapter->tpd_ring.count = ATL1_DEFAULT_TPD; + /* update mailbox ? */ + if (count) { + u32 tpd_next_to_use; + u32 rfd_next_to_use; - adapter->mii.dev = netdev; - adapter->mii.mdio_read = mdio_read; - adapter->mii.mdio_write = mdio_write; - adapter->mii.phy_id_mask = 0x1f; - adapter->mii.reg_num_mask = 0x1f; + spin_lock(&adapter->mb_lock); - netdev->open = &atl1_open; - netdev->stop = &atl1_close; - netdev->hard_start_xmit = &atl1_xmit_frame; - netdev->get_stats = &atlx_get_stats; - netdev->set_multicast_list = &atlx_set_multi; - netdev->set_mac_address = &atl1_set_mac; - netdev->change_mtu = &atl1_change_mtu; - netdev->do_ioctl = &atlx_ioctl; - netdev->tx_timeout = &atlx_tx_timeout; - netdev->watchdog_timeo = 5 * HZ; -#ifdef CONFIG_NET_POLL_CONTROLLER - netdev->poll_controller = atl1_poll_controller; -#endif - netdev->vlan_rx_register = atlx_vlan_rx_register; + tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use); + rfd_next_to_use = + atomic_read(&adapter->rfd_ring.next_to_use); + rrd_next_to_clean = + atomic_read(&adapter->rrd_ring.next_to_clean); + value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) << + MB_RFD_PROD_INDX_SHIFT) | + ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) << + MB_RRD_CONS_INDX_SHIFT) | + ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) << + MB_TPD_PROD_INDX_SHIFT); + iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX); + spin_unlock(&adapter->mb_lock); + } +} - netdev->ethtool_ops = &atl1_ethtool_ops; - adapter->bd_number = cards_found; +static void atl1_intr_tx(struct atl1_adapter *adapter) +{ + struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; + struct atl1_buffer *buffer_info; + u16 sw_tpd_next_to_clean; + u16 cmb_tpd_next_to_clean; - /* setup the private structure */ - err = atl1_sw_init(adapter); - if (err) - goto err_common; + sw_tpd_next_to_clean = atomic_read(&tpd_ring->next_to_clean); + cmb_tpd_next_to_clean = le16_to_cpu(adapter->cmb.cmb->tpd_cons_idx); - netdev->features = NETIF_F_HW_CSUM; - netdev->features |= NETIF_F_SG; - netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX); - netdev->features |= NETIF_F_TSO; - netdev->features |= NETIF_F_LLTX; + while (cmb_tpd_next_to_clean != sw_tpd_next_to_clean) { + struct tx_packet_desc *tpd; - /* - * patch for some L1 of old version, - * the final version of L1 may not need these - * patches - */ - /* atl1_pcie_patch(adapter); */ + tpd = ATL1_TPD_DESC(tpd_ring, sw_tpd_next_to_clean); + buffer_info = &tpd_ring->buffer_info[sw_tpd_next_to_clean]; + if (buffer_info->dma) { + pci_unmap_page(adapter->pdev, buffer_info->dma, + buffer_info->length, PCI_DMA_TODEVICE); + buffer_info->dma = 0; + } - /* really reset GPHY core */ - iowrite16(0, adapter->hw.hw_addr + REG_PHY_ENABLE); + if (buffer_info->skb) { + dev_kfree_skb_irq(buffer_info->skb); + buffer_info->skb = NULL; + } - /* - * reset the controller to - * put the device in a known good starting state - */ - if (atl1_reset_hw(&adapter->hw)) { - err = -EIO; - goto err_common; + if (++sw_tpd_next_to_clean == tpd_ring->count) + sw_tpd_next_to_clean = 0; } + atomic_set(&tpd_ring->next_to_clean, sw_tpd_next_to_clean); - /* copy the MAC address out of the EEPROM */ - atl1_read_mac_addr(&adapter->hw); - memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); + if (netif_queue_stopped(adapter->netdev) + && netif_carrier_ok(adapter->netdev)) + netif_wake_queue(adapter->netdev); +} - if (!is_valid_ether_addr(netdev->dev_addr)) { - err = -EIO; - goto err_common; - } +static u16 atl1_tpd_avail(struct atl1_tpd_ring *tpd_ring) +{ + u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean); + u16 next_to_use = atomic_read(&tpd_ring->next_to_use); + return ((next_to_clean > next_to_use) ? + next_to_clean - next_to_use - 1 : + tpd_ring->count + next_to_clean - next_to_use - 1); +} - atl1_check_options(adapter); +static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb, + struct tx_packet_desc *ptpd) +{ + /* spinlock held */ + u8 hdr_len, ip_off; + u32 real_len; + int err; - /* pre-init the MAC, and setup link */ - err = atl1_init_hw(&adapter->hw); - if (err) { - err = -EIO; - goto err_common; + if (skb_shinfo(skb)->gso_size) { + if (skb_header_cloned(skb)) { + err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + if (unlikely(err)) + return -1; + } + + if (skb->protocol == ntohs(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + + real_len = (((unsigned char *)iph - skb->data) + + ntohs(iph->tot_len)); + if (real_len < skb->len) + pskb_trim(skb, real_len); + hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb)); + if (skb->len == hdr_len) { + iph->check = 0; + tcp_hdr(skb)->check = + ~csum_tcpudp_magic(iph->saddr, + iph->daddr, tcp_hdrlen(skb), + IPPROTO_TCP, 0); + ptpd->word3 |= (iph->ihl & TPD_IPHL_MASK) << + TPD_IPHL_SHIFT; + ptpd->word3 |= ((tcp_hdrlen(skb) >> 2) & + TPD_TCPHDRLEN_MASK) << + TPD_TCPHDRLEN_SHIFT; + ptpd->word3 |= 1 << TPD_IP_CSUM_SHIFT; + ptpd->word3 |= 1 << TPD_TCP_CSUM_SHIFT; + return 1; + } + + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, IPPROTO_TCP, 0); + ip_off = (unsigned char *)iph - + (unsigned char *) skb_network_header(skb); + if (ip_off == 8) /* 802.3-SNAP frame */ + ptpd->word3 |= 1 << TPD_ETHTYPE_SHIFT; + else if (ip_off != 0) + return -2; + + ptpd->word3 |= (iph->ihl & TPD_IPHL_MASK) << + TPD_IPHL_SHIFT; + ptpd->word3 |= ((tcp_hdrlen(skb) >> 2) & + TPD_TCPHDRLEN_MASK) << TPD_TCPHDRLEN_SHIFT; + ptpd->word3 |= (skb_shinfo(skb)->gso_size & + TPD_MSS_MASK) << TPD_MSS_SHIFT; + ptpd->word3 |= 1 << TPD_SEGMENT_EN_SHIFT; + return 3; + } } + return false; +} - atl1_pcie_patch(adapter); - /* assume we have no link for now */ - netif_carrier_off(netdev); - netif_stop_queue(netdev); +static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb, + struct tx_packet_desc *ptpd) +{ + u8 css, cso; - init_timer(&adapter->watchdog_timer); - adapter->watchdog_timer.function = &atl1_watchdog; - adapter->watchdog_timer.data = (unsigned long)adapter; + if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { + css = (u8) (skb->csum_start - skb_headroom(skb)); + cso = css + (u8) skb->csum_offset; + if (unlikely(css & 0x1)) { + /* L1 hardware requires an even number here */ + if (netif_msg_tx_err(adapter)) + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "payload offset not an even number\n"); + return -1; + } + ptpd->word3 |= (css & TPD_PLOADOFFSET_MASK) << + TPD_PLOADOFFSET_SHIFT; + ptpd->word3 |= (cso & TPD_CCSUMOFFSET_MASK) << + TPD_CCSUMOFFSET_SHIFT; + ptpd->word3 |= 1 << TPD_CUST_CSUM_EN_SHIFT; + return true; + } + return 0; +} - init_timer(&adapter->phy_config_timer); - adapter->phy_config_timer.function = &atl1_phy_config; - adapter->phy_config_timer.data = (unsigned long)adapter; - adapter->phy_timer_pending = false; +static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, + struct tx_packet_desc *ptpd) +{ + /* spinlock held */ + struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; + struct atl1_buffer *buffer_info; + u16 buf_len = skb->len; + struct page *page; + unsigned long offset; + unsigned int nr_frags; + unsigned int f; + int retval; + u16 next_to_use; + u16 data_len; + u8 hdr_len; - INIT_WORK(&adapter->tx_timeout_task, atl1_tx_timeout_task); + buf_len -= skb->data_len; + nr_frags = skb_shinfo(skb)->nr_frags; + next_to_use = atomic_read(&tpd_ring->next_to_use); + buffer_info = &tpd_ring->buffer_info[next_to_use]; + if (unlikely(buffer_info->skb)) + BUG(); + /* put skb in last TPD */ + buffer_info->skb = NULL; - INIT_WORK(&adapter->link_chg_task, atlx_link_chg_task); + retval = (ptpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK; + if (retval) { + /* TSO */ + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + buffer_info->length = hdr_len; + page = virt_to_page(skb->data); + offset = (unsigned long)skb->data & ~PAGE_MASK; + buffer_info->dma = pci_map_page(adapter->pdev, page, + offset, hdr_len, + PCI_DMA_TODEVICE); - INIT_WORK(&adapter->pcie_dma_to_rst_task, atl1_tx_timeout_task); + if (++next_to_use == tpd_ring->count) + next_to_use = 0; - err = register_netdev(netdev); - if (err) - goto err_common; + if (buf_len > hdr_len) { + int i, nseg; - cards_found++; - atl1_via_workaround(adapter); - return 0; + data_len = buf_len - hdr_len; + nseg = (data_len + ATL1_MAX_TX_BUF_LEN - 1) / + ATL1_MAX_TX_BUF_LEN; + for (i = 0; i < nseg; i++) { + buffer_info = + &tpd_ring->buffer_info[next_to_use]; + buffer_info->skb = NULL; + buffer_info->length = + (ATL1_MAX_TX_BUF_LEN >= + data_len) ? ATL1_MAX_TX_BUF_LEN : data_len; + data_len -= buffer_info->length; + page = virt_to_page(skb->data + + (hdr_len + i * ATL1_MAX_TX_BUF_LEN)); + offset = (unsigned long)(skb->data + + (hdr_len + i * ATL1_MAX_TX_BUF_LEN)) & + ~PAGE_MASK; + buffer_info->dma = pci_map_page(adapter->pdev, + page, offset, buffer_info->length, + PCI_DMA_TODEVICE); + if (++next_to_use == tpd_ring->count) + next_to_use = 0; + } + } + } else { + /* not TSO */ + buffer_info->length = buf_len; + page = virt_to_page(skb->data); + offset = (unsigned long)skb->data & ~PAGE_MASK; + buffer_info->dma = pci_map_page(adapter->pdev, page, + offset, buf_len, PCI_DMA_TODEVICE); + if (++next_to_use == tpd_ring->count) + next_to_use = 0; + } -err_common: - pci_iounmap(pdev, adapter->hw.hw_addr); -err_pci_iomap: - free_netdev(netdev); -err_alloc_etherdev: - pci_release_regions(pdev); -err_dma: -err_request_regions: - pci_disable_device(pdev); - return err; -} + for (f = 0; f < nr_frags; f++) { + struct skb_frag_struct *frag; + u16 i, nseg; -/* - * atl1_remove - Device Removal Routine - * @pdev: PCI device information struct - * - * atl1_remove is called by the PCI subsystem to alert the driver - * that it should release a PCI device. The could be caused by a - * Hot-Plug event, or because the driver is going to be removed from - * memory. - */ -static void __devexit atl1_remove(struct pci_dev *pdev) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct atl1_adapter *adapter; - /* Device not available. Return. */ - if (!netdev) - return; + frag = &skb_shinfo(skb)->frags[f]; + buf_len = frag->size; - adapter = netdev_priv(netdev); + nseg = (buf_len + ATL1_MAX_TX_BUF_LEN - 1) / + ATL1_MAX_TX_BUF_LEN; + for (i = 0; i < nseg; i++) { + buffer_info = &tpd_ring->buffer_info[next_to_use]; + if (unlikely(buffer_info->skb)) + BUG(); + buffer_info->skb = NULL; + buffer_info->length = (buf_len > ATL1_MAX_TX_BUF_LEN) ? + ATL1_MAX_TX_BUF_LEN : buf_len; + buf_len -= buffer_info->length; + buffer_info->dma = pci_map_page(adapter->pdev, + frag->page, + frag->page_offset + (i * ATL1_MAX_TX_BUF_LEN), + buffer_info->length, PCI_DMA_TODEVICE); - /* - * Some atl1 boards lack persistent storage for their MAC, and get it - * from the BIOS during POST. If we've been messing with the MAC - * address, we need to save the permanent one. - */ - if (memcmp(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, ETH_ALEN)) { - memcpy(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, - ETH_ALEN); - atl1_set_mac_addr(&adapter->hw); + if (++next_to_use == tpd_ring->count) + next_to_use = 0; + } } - iowrite16(0, adapter->hw.hw_addr + REG_PHY_ENABLE); - unregister_netdev(netdev); - pci_iounmap(pdev, adapter->hw.hw_addr); - pci_release_regions(pdev); - free_netdev(netdev); - pci_disable_device(pdev); + /* last tpd's buffer-info */ + buffer_info->skb = skb; } -static struct pci_driver atl1_driver = { - .name = ATLX_DRIVER_NAME, - .id_table = atl1_pci_tbl, - .probe = atl1_probe, - .remove = __devexit_p(atl1_remove), - .suspend = atl1_suspend, - .resume = atl1_resume -}; - -/* - * atl1_exit_module - Driver Exit Cleanup Routine - * - * atl1_exit_module is called just before the driver is removed - * from memory. - */ -static void __exit atl1_exit_module(void) +static void atl1_tx_queue(struct atl1_adapter *adapter, u16 count, + struct tx_packet_desc *ptpd) { - pci_unregister_driver(&atl1_driver); -} + /* spinlock held */ + struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; + struct atl1_buffer *buffer_info; + struct tx_packet_desc *tpd; + u16 j; + u32 val; + u16 next_to_use = (u16) atomic_read(&tpd_ring->next_to_use); -/* - * atl1_init_module - Driver Registration Routine - * - * atl1_init_module is the first routine called when the driver is - * loaded. All it does is register with the PCI subsystem. - */ -static int __init atl1_init_module(void) -{ - return pci_register_driver(&atl1_driver); -} + for (j = 0; j < count; j++) { + buffer_info = &tpd_ring->buffer_info[next_to_use]; + tpd = ATL1_TPD_DESC(&adapter->tpd_ring, next_to_use); + if (tpd != ptpd) + memcpy(tpd, ptpd, sizeof(struct tx_packet_desc)); + tpd->buffer_addr = cpu_to_le64(buffer_info->dma); + tpd->word2 = (cpu_to_le16(buffer_info->length) & + TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT; -module_init(atl1_init_module); -module_exit(atl1_exit_module); + /* + * if this is the first packet in a TSO chain, set + * TPD_HDRFLAG, otherwise, clear it. + */ + val = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & + TPD_SEGMENT_EN_MASK; + if (val) { + if (!j) + tpd->word3 |= 1 << TPD_HDRFLAG_SHIFT; + else + tpd->word3 &= ~(1 << TPD_HDRFLAG_SHIFT); + } -struct atl1_stats { - char stat_string[ETH_GSTRING_LEN]; - int sizeof_stat; - int stat_offset; -}; + if (j == (count - 1)) + tpd->word3 |= 1 << TPD_EOP_SHIFT; -#define ATL1_STAT(m) \ - sizeof(((struct atl1_adapter *)0)->m), offsetof(struct atl1_adapter, m) + if (++next_to_use == tpd_ring->count) + next_to_use = 0; + } + /* + * Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); -static struct atl1_stats atl1_gstrings_stats[] = { - {"rx_packets", ATL1_STAT(soft_stats.rx_packets)}, - {"tx_packets", ATL1_STAT(soft_stats.tx_packets)}, - {"rx_bytes", ATL1_STAT(soft_stats.rx_bytes)}, - {"tx_bytes", ATL1_STAT(soft_stats.tx_bytes)}, - {"rx_errors", ATL1_STAT(soft_stats.rx_errors)}, - {"tx_errors", ATL1_STAT(soft_stats.tx_errors)}, - {"rx_dropped", ATL1_STAT(net_stats.rx_dropped)}, - {"tx_dropped", ATL1_STAT(net_stats.tx_dropped)}, - {"multicast", ATL1_STAT(soft_stats.multicast)}, - {"collisions", ATL1_STAT(soft_stats.collisions)}, - {"rx_length_errors", ATL1_STAT(soft_stats.rx_length_errors)}, - {"rx_over_errors", ATL1_STAT(soft_stats.rx_missed_errors)}, - {"rx_crc_errors", ATL1_STAT(soft_stats.rx_crc_errors)}, - {"rx_frame_errors", ATL1_STAT(soft_stats.rx_frame_errors)}, - {"rx_fifo_errors", ATL1_STAT(soft_stats.rx_fifo_errors)}, - {"rx_missed_errors", ATL1_STAT(soft_stats.rx_missed_errors)}, - {"tx_aborted_errors", ATL1_STAT(soft_stats.tx_aborted_errors)}, - {"tx_carrier_errors", ATL1_STAT(soft_stats.tx_carrier_errors)}, - {"tx_fifo_errors", ATL1_STAT(soft_stats.tx_fifo_errors)}, - {"tx_window_errors", ATL1_STAT(soft_stats.tx_window_errors)}, - {"tx_abort_exce_coll", ATL1_STAT(soft_stats.excecol)}, - {"tx_abort_late_coll", ATL1_STAT(soft_stats.latecol)}, - {"tx_deferred_ok", ATL1_STAT(soft_stats.deffer)}, - {"tx_single_coll_ok", ATL1_STAT(soft_stats.scc)}, - {"tx_multi_coll_ok", ATL1_STAT(soft_stats.mcc)}, - {"tx_underun", ATL1_STAT(soft_stats.tx_underun)}, - {"tx_trunc", ATL1_STAT(soft_stats.tx_trunc)}, - {"tx_pause", ATL1_STAT(soft_stats.tx_pause)}, - {"rx_pause", ATL1_STAT(soft_stats.rx_pause)}, - {"rx_rrd_ov", ATL1_STAT(soft_stats.rx_rrd_ov)}, - {"rx_trunc", ATL1_STAT(soft_stats.rx_trunc)} -}; + atomic_set(&tpd_ring->next_to_use, next_to_use); +} -static void atl1_get_ethtool_stats(struct net_device *netdev, - struct ethtool_stats *stats, u64 *data) +static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct atl1_adapter *adapter = netdev_priv(netdev); - int i; - char *p; + struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; + int len = skb->len; + int tso; + int count = 1; + int ret_val; + struct tx_packet_desc *ptpd; + u16 frag_size; + u16 vlan_tag; + unsigned long flags; + unsigned int nr_frags = 0; + unsigned int mss = 0; + unsigned int f; + unsigned int proto_hdr_len; + + len -= skb->data_len; + + if (unlikely(skb->len <= 0)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } - for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) { - p = (char *)adapter+atl1_gstrings_stats[i].stat_offset; - data[i] = (atl1_gstrings_stats[i].sizeof_stat == - sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + nr_frags = skb_shinfo(skb)->nr_frags; + for (f = 0; f < nr_frags; f++) { + frag_size = skb_shinfo(skb)->frags[f].size; + if (frag_size) + count += (frag_size + ATL1_MAX_TX_BUF_LEN - 1) / + ATL1_MAX_TX_BUF_LEN; } -} + mss = skb_shinfo(skb)->gso_size; + if (mss) { + if (skb->protocol == ntohs(ETH_P_IP)) { + proto_hdr_len = (skb_transport_offset(skb) + + tcp_hdrlen(skb)); + if (unlikely(proto_hdr_len > len)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + /* need additional TPD ? */ + if (proto_hdr_len != len) + count += (len - proto_hdr_len + + ATL1_MAX_TX_BUF_LEN - 1) / + ATL1_MAX_TX_BUF_LEN; + } + } -static int atl1_get_sset_count(struct net_device *netdev, int sset) -{ - switch (sset) { - case ETH_SS_STATS: - return ARRAY_SIZE(atl1_gstrings_stats); - default: - return -EOPNOTSUPP; + if (!spin_trylock_irqsave(&adapter->lock, flags)) { + /* Can't get lock - tell upper layer to requeue */ + if (netif_msg_tx_queued(adapter)) + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "tx locked\n"); + return NETDEV_TX_LOCKED; } -} -static int atl1_get_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) -{ - struct atl1_adapter *adapter = netdev_priv(netdev); - struct atl1_hw *hw = &adapter->hw; + if (atl1_tpd_avail(&adapter->tpd_ring) < count) { + /* not enough descriptors */ + netif_stop_queue(netdev); + spin_unlock_irqrestore(&adapter->lock, flags); + if (netif_msg_tx_queued(adapter)) + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "tx busy\n"); + return NETDEV_TX_BUSY; + } - ecmd->supported = (SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_Autoneg | SUPPORTED_TP); - ecmd->advertising = ADVERTISED_TP; - if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || - hw->media_type == MEDIA_TYPE_1000M_FULL) { - ecmd->advertising |= ADVERTISED_Autoneg; - if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR) { - ecmd->advertising |= ADVERTISED_Autoneg; - ecmd->advertising |= - (ADVERTISED_10baseT_Half | - ADVERTISED_10baseT_Full | - ADVERTISED_100baseT_Half | - ADVERTISED_100baseT_Full | - ADVERTISED_1000baseT_Full); - } else - ecmd->advertising |= (ADVERTISED_1000baseT_Full); + ptpd = ATL1_TPD_DESC(tpd_ring, + (u16) atomic_read(&tpd_ring->next_to_use)); + memset(ptpd, 0, sizeof(struct tx_packet_desc)); + + if (adapter->vlgrp && vlan_tx_tag_present(skb)) { + vlan_tag = vlan_tx_tag_get(skb); + vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) | + ((vlan_tag >> 9) & 0x8); + ptpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT; + ptpd->word3 |= (vlan_tag & TPD_VL_TAGGED_MASK) << + TPD_VL_TAGGED_SHIFT; } - ecmd->port = PORT_TP; - ecmd->phy_address = 0; - ecmd->transceiver = XCVR_INTERNAL; - if (netif_carrier_ok(adapter->netdev)) { - u16 link_speed, link_duplex; - atl1_get_speed_and_duplex(hw, &link_speed, &link_duplex); - ecmd->speed = link_speed; - if (link_duplex == FULL_DUPLEX) - ecmd->duplex = DUPLEX_FULL; - else - ecmd->duplex = DUPLEX_HALF; - } else { - ecmd->speed = -1; - ecmd->duplex = -1; + tso = atl1_tso(adapter, skb, ptpd); + if (tso < 0) { + spin_unlock_irqrestore(&adapter->lock, flags); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; } - if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || - hw->media_type == MEDIA_TYPE_1000M_FULL) - ecmd->autoneg = AUTONEG_ENABLE; - else - ecmd->autoneg = AUTONEG_DISABLE; - return 0; + if (!tso) { + ret_val = atl1_tx_csum(adapter, skb, ptpd); + if (ret_val < 0) { + spin_unlock_irqrestore(&adapter->lock, flags); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + } + + atl1_tx_map(adapter, skb, ptpd); + atl1_tx_queue(adapter, count, ptpd); + atl1_update_mailbox(adapter); + spin_unlock_irqrestore(&adapter->lock, flags); + netdev->trans_start = jiffies; + return NETDEV_TX_OK; } -static int atl1_set_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) +/* + * atl1_intr - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + * @pt_regs: CPU registers structure + */ +static irqreturn_t atl1_intr(int irq, void *data) { - struct atl1_adapter *adapter = netdev_priv(netdev); - struct atl1_hw *hw = &adapter->hw; - u16 phy_data; - int ret_val = 0; - u16 old_media_type = hw->media_type; + struct atl1_adapter *adapter = netdev_priv(data); + u32 status; + u8 update_rx; + int max_ints = 10; - if (netif_running(adapter->netdev)) { - if (netif_msg_link(adapter)) - dev_dbg(&adapter->pdev->dev, - "ethtool shutting down adapter\n"); - atl1_down(adapter); - } + status = adapter->cmb.cmb->int_stats; + if (!status) + return IRQ_NONE; - if (ecmd->autoneg == AUTONEG_ENABLE) - hw->media_type = MEDIA_TYPE_AUTO_SENSOR; - else { - if (ecmd->speed == SPEED_1000) { - if (ecmd->duplex != DUPLEX_FULL) { - if (netif_msg_link(adapter)) - dev_warn(&adapter->pdev->dev, - "1000M half is invalid\n"); - ret_val = -EINVAL; - goto exit_sset; + update_rx = 0; + + do { + /* clear CMB interrupt status at once */ + adapter->cmb.cmb->int_stats = 0; + + if (status & ISR_GPHY) /* clear phy status */ + atlx_clear_phy_int(adapter); + + /* clear ISR status, and Enable CMB DMA/Disable Interrupt */ + iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR); + + /* check if SMB intr */ + if (status & ISR_SMB) + atl1_inc_smb(adapter); + + /* check if PCIE PHY Link down */ + if (status & ISR_PHY_LINKDOWN) { + if (netif_msg_intr(adapter)) + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "pcie phy link down %x\n", status); + if (netif_running(adapter->netdev)) { /* reset MAC */ + iowrite32(0, adapter->hw.hw_addr + REG_IMR); + schedule_work(&adapter->pcie_dma_to_rst_task); + return IRQ_HANDLED; } - hw->media_type = MEDIA_TYPE_1000M_FULL; - } else if (ecmd->speed == SPEED_100) { - if (ecmd->duplex == DUPLEX_FULL) - hw->media_type = MEDIA_TYPE_100M_FULL; - else - hw->media_type = MEDIA_TYPE_100M_HALF; - } else { - if (ecmd->duplex == DUPLEX_FULL) - hw->media_type = MEDIA_TYPE_10M_FULL; - else - hw->media_type = MEDIA_TYPE_10M_HALF; } - } - switch (hw->media_type) { - case MEDIA_TYPE_AUTO_SENSOR: - ecmd->advertising = - ADVERTISED_10baseT_Half | - ADVERTISED_10baseT_Full | - ADVERTISED_100baseT_Half | - ADVERTISED_100baseT_Full | - ADVERTISED_1000baseT_Full | - ADVERTISED_Autoneg | ADVERTISED_TP; - break; - case MEDIA_TYPE_1000M_FULL: - ecmd->advertising = - ADVERTISED_1000baseT_Full | - ADVERTISED_Autoneg | ADVERTISED_TP; - break; - default: - ecmd->advertising = 0; - break; - } - if (atl1_phy_setup_autoneg_adv(hw)) { - ret_val = -EINVAL; - if (netif_msg_link(adapter)) - dev_warn(&adapter->pdev->dev, - "invalid ethtool speed/duplex setting\n"); - goto exit_sset; - } - if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || - hw->media_type == MEDIA_TYPE_1000M_FULL) - phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN; - else { - switch (hw->media_type) { - case MEDIA_TYPE_100M_FULL: - phy_data = - MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 | - MII_CR_RESET; - break; - case MEDIA_TYPE_100M_HALF: - phy_data = MII_CR_SPEED_100 | MII_CR_RESET; - break; - case MEDIA_TYPE_10M_FULL: - phy_data = - MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET; - break; - default: - /* MEDIA_TYPE_10M_HALF: */ - phy_data = MII_CR_SPEED_10 | MII_CR_RESET; - break; + + /* check if DMA read/write error ? */ + if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) { + if (netif_msg_intr(adapter)) + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "pcie DMA r/w error (status = 0x%x)\n", + status); + iowrite32(0, adapter->hw.hw_addr + REG_IMR); + schedule_work(&adapter->pcie_dma_to_rst_task); + return IRQ_HANDLED; } - } - atl1_write_phy_reg(hw, MII_BMCR, phy_data); -exit_sset: - if (ret_val) - hw->media_type = old_media_type; - if (netif_running(adapter->netdev)) { - if (netif_msg_link(adapter)) - dev_dbg(&adapter->pdev->dev, - "ethtool starting adapter\n"); - atl1_up(adapter); - } else if (!ret_val) { - if (netif_msg_link(adapter)) - dev_dbg(&adapter->pdev->dev, - "ethtool resetting adapter\n"); - atl1_reset(adapter); - } - return ret_val; -} + /* link event */ + if (status & ISR_GPHY) { + adapter->soft_stats.tx_carrier_errors++; + atl1_check_for_link(adapter); + } -static void atl1_get_drvinfo(struct net_device *netdev, - struct ethtool_drvinfo *drvinfo) -{ - struct atl1_adapter *adapter = netdev_priv(netdev); + /* transmit event */ + if (status & ISR_CMB_TX) + atl1_intr_tx(adapter); - strncpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver)); - strncpy(drvinfo->version, ATLX_DRIVER_VERSION, - sizeof(drvinfo->version)); - strncpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); - strncpy(drvinfo->bus_info, pci_name(adapter->pdev), - sizeof(drvinfo->bus_info)); - drvinfo->eedump_len = ATL1_EEDUMP_LEN; -} + /* rx exception */ + if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN | + ISR_RRD_OV | ISR_HOST_RFD_UNRUN | + ISR_HOST_RRD_OV | ISR_CMB_RX))) { + if (status & (ISR_RXF_OV | ISR_RFD_UNRUN | + ISR_RRD_OV | ISR_HOST_RFD_UNRUN | + ISR_HOST_RRD_OV)) + if (netif_msg_intr(adapter)) + dev_printk(KERN_DEBUG, + &adapter->pdev->dev, + "rx exception, ISR = 0x%x\n", + status); + atl1_intr_rx(adapter); + } -static void atl1_get_wol(struct net_device *netdev, - struct ethtool_wolinfo *wol) -{ - struct atl1_adapter *adapter = netdev_priv(netdev); + if (--max_ints < 0) + break; - wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC; - wol->wolopts = 0; - if (adapter->wol & ATLX_WUFC_EX) - wol->wolopts |= WAKE_UCAST; - if (adapter->wol & ATLX_WUFC_MC) - wol->wolopts |= WAKE_MCAST; - if (adapter->wol & ATLX_WUFC_BC) - wol->wolopts |= WAKE_BCAST; - if (adapter->wol & ATLX_WUFC_MAG) - wol->wolopts |= WAKE_MAGIC; - return; + } while ((status = adapter->cmb.cmb->int_stats)); + + /* re-enable Interrupt */ + iowrite32(ISR_DIS_SMB | ISR_DIS_DMA, adapter->hw.hw_addr + REG_ISR); + return IRQ_HANDLED; } -static int atl1_set_wol(struct net_device *netdev, - struct ethtool_wolinfo *wol) +/* + * atl1_watchdog - Timer Call-back + * @data: pointer to netdev cast into an unsigned long + */ +static void atl1_watchdog(unsigned long data) { - struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_adapter *adapter = (struct atl1_adapter *)data; - if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) - return -EOPNOTSUPP; - adapter->wol = 0; - if (wol->wolopts & WAKE_UCAST) - adapter->wol |= ATLX_WUFC_EX; - if (wol->wolopts & WAKE_MCAST) - adapter->wol |= ATLX_WUFC_MC; - if (wol->wolopts & WAKE_BCAST) - adapter->wol |= ATLX_WUFC_BC; - if (wol->wolopts & WAKE_MAGIC) - adapter->wol |= ATLX_WUFC_MAG; - return 0; + /* Reset the timer */ + mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); } -static u32 atl1_get_msglevel(struct net_device *netdev) +/* + * atl1_phy_config - Timer Call-back + * @data: pointer to netdev cast into an unsigned long + */ +static void atl1_phy_config(unsigned long data) { - struct atl1_adapter *adapter = netdev_priv(netdev); - return adapter->msg_enable; -} + struct atl1_adapter *adapter = (struct atl1_adapter *)data; + struct atl1_hw *hw = &adapter->hw; + unsigned long flags; -static void atl1_set_msglevel(struct net_device *netdev, u32 value) -{ - struct atl1_adapter *adapter = netdev_priv(netdev); - adapter->msg_enable = value; + spin_lock_irqsave(&adapter->lock, flags); + adapter->phy_timer_pending = false; + atl1_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg); + atl1_write_phy_reg(hw, MII_ATLX_CR, hw->mii_1000t_ctrl_reg); + atl1_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN); + spin_unlock_irqrestore(&adapter->lock, flags); } -static int atl1_get_regs_len(struct net_device *netdev) -{ - return ATL1_REG_COUNT * sizeof(u32); -} +/* + * Orphaned vendor comment left intact here: + * + * If TPD Buffer size equal to 0, PCIE DMAR_TO_INT + * will assert. We do soft reset <0x1400=1> according + * with the SPEC. BUT, it seemes that PCIE or DMA + * state-machine will not be reset. DMAR_TO_INT will + * assert again and again. + * + */ -static void atl1_get_regs(struct net_device *netdev, struct ethtool_regs *regs, - void *p) +static int atl1_reset(struct atl1_adapter *adapter) { - struct atl1_adapter *adapter = netdev_priv(netdev); - struct atl1_hw *hw = &adapter->hw; - unsigned int i; - u32 *regbuf = p; - - for (i = 0; i < ATL1_REG_COUNT; i++) { - /* - * This switch statement avoids reserved regions - * of register space. - */ - switch (i) { - case 6 ... 9: - case 14: - case 29 ... 31: - case 34 ... 63: - case 75 ... 127: - case 136 ... 1023: - case 1027 ... 1087: - case 1091 ... 1151: - case 1194 ... 1195: - case 1200 ... 1201: - case 1206 ... 1213: - case 1216 ... 1279: - case 1290 ... 1311: - case 1323 ... 1343: - case 1358 ... 1359: - case 1368 ... 1375: - case 1378 ... 1383: - case 1388 ... 1391: - case 1393 ... 1395: - case 1402 ... 1403: - case 1410 ... 1471: - case 1522 ... 1535: - /* reserved region; don't read it */ - regbuf[i] = 0; - break; - default: - /* unreserved region */ - regbuf[i] = ioread32(hw->hw_addr + (i * sizeof(u32))); - } - } + int ret; + ret = atl1_reset_hw(&adapter->hw); + if (ret) + return ret; + return atl1_init_hw(&adapter->hw); } -static void atl1_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) +static s32 atl1_up(struct atl1_adapter *adapter) { - struct atl1_adapter *adapter = netdev_priv(netdev); - struct atl1_tpd_ring *txdr = &adapter->tpd_ring; - struct atl1_rfd_ring *rxdr = &adapter->rfd_ring; + struct net_device *netdev = adapter->netdev; + int err; + int irq_flags = IRQF_SAMPLE_RANDOM; - ring->rx_max_pending = ATL1_MAX_RFD; - ring->tx_max_pending = ATL1_MAX_TPD; - ring->rx_mini_max_pending = 0; - ring->rx_jumbo_max_pending = 0; - ring->rx_pending = rxdr->count; - ring->tx_pending = txdr->count; - ring->rx_mini_pending = 0; - ring->rx_jumbo_pending = 0; -} + /* hardware has been reset, we need to reload some things */ + atlx_set_multi(netdev); + atl1_init_ring_ptrs(adapter); + atlx_restore_vlan(adapter); + err = atl1_alloc_rx_buffers(adapter); + if (unlikely(!err)) + /* no RX BUFFER allocated */ + return -ENOMEM; -static int atl1_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) -{ - struct atl1_adapter *adapter = netdev_priv(netdev); - struct atl1_tpd_ring *tpdr = &adapter->tpd_ring; - struct atl1_rrd_ring *rrdr = &adapter->rrd_ring; - struct atl1_rfd_ring *rfdr = &adapter->rfd_ring; + if (unlikely(atl1_configure(adapter))) { + err = -EIO; + goto err_up; + } - struct atl1_tpd_ring tpd_old, tpd_new; - struct atl1_rfd_ring rfd_old, rfd_new; - struct atl1_rrd_ring rrd_old, rrd_new; - struct atl1_ring_header rhdr_old, rhdr_new; - int err; + err = pci_enable_msi(adapter->pdev); + if (err) { + if (netif_msg_ifup(adapter)) + dev_info(&adapter->pdev->dev, + "Unable to enable MSI: %d\n", err); + irq_flags |= IRQF_SHARED; + } - tpd_old = adapter->tpd_ring; - rfd_old = adapter->rfd_ring; - rrd_old = adapter->rrd_ring; - rhdr_old = adapter->ring_header; + err = request_irq(adapter->pdev->irq, &atl1_intr, irq_flags, + netdev->name, netdev); + if (unlikely(err)) + goto err_up; - if (netif_running(adapter->netdev)) - atl1_down(adapter); + mod_timer(&adapter->watchdog_timer, jiffies); + atlx_irq_enable(adapter); + atl1_check_link(adapter); + return 0; - rfdr->count = (u16) max(ring->rx_pending, (u32) ATL1_MIN_RFD); - rfdr->count = rfdr->count > ATL1_MAX_RFD ? ATL1_MAX_RFD : - rfdr->count; - rfdr->count = (rfdr->count + 3) & ~3; - rrdr->count = rfdr->count; +err_up: + pci_disable_msi(adapter->pdev); + /* free rx_buffers */ + atl1_clean_rx_ring(adapter); + return err; +} - tpdr->count = (u16) max(ring->tx_pending, (u32) ATL1_MIN_TPD); - tpdr->count = tpdr->count > ATL1_MAX_TPD ? ATL1_MAX_TPD : - tpdr->count; - tpdr->count = (tpdr->count + 3) & ~3; +static void atl1_down(struct atl1_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; - if (netif_running(adapter->netdev)) { - /* try to get new resources before deleting old */ - err = atl1_setup_ring_resources(adapter); - if (err) - goto err_setup_ring; + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_config_timer); + adapter->phy_timer_pending = false; - /* - * save the new, restore the old in order to free it, - * then restore the new back again - */ + atlx_irq_disable(adapter); + free_irq(adapter->pdev->irq, netdev); + pci_disable_msi(adapter->pdev); + atl1_reset_hw(&adapter->hw); + adapter->cmb.cmb->int_stats = 0; - rfd_new = adapter->rfd_ring; - rrd_new = adapter->rrd_ring; - tpd_new = adapter->tpd_ring; - rhdr_new = adapter->ring_header; - adapter->rfd_ring = rfd_old; - adapter->rrd_ring = rrd_old; - adapter->tpd_ring = tpd_old; - adapter->ring_header = rhdr_old; - atl1_free_ring_resources(adapter); - adapter->rfd_ring = rfd_new; - adapter->rrd_ring = rrd_new; - adapter->tpd_ring = tpd_new; - adapter->ring_header = rhdr_new; + adapter->link_speed = SPEED_0; + adapter->link_duplex = -1; + netif_carrier_off(netdev); + netif_stop_queue(netdev); - err = atl1_up(adapter); - if (err) - return err; - } - return 0; + atl1_clean_tx_ring(adapter); + atl1_clean_rx_ring(adapter); +} -err_setup_ring: - adapter->rfd_ring = rfd_old; - adapter->rrd_ring = rrd_old; - adapter->tpd_ring = tpd_old; - adapter->ring_header = rhdr_old; +static void atl1_tx_timeout_task(struct work_struct *work) +{ + struct atl1_adapter *adapter = + container_of(work, struct atl1_adapter, tx_timeout_task); + struct net_device *netdev = adapter->netdev; + + netif_device_detach(netdev); + atl1_down(adapter); atl1_up(adapter); - return err; + netif_device_attach(netdev); } -static void atl1_get_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *epause) +/* + * atl1_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + */ +static int atl1_change_mtu(struct net_device *netdev, int new_mtu) { struct atl1_adapter *adapter = netdev_priv(netdev); - struct atl1_hw *hw = &adapter->hw; + int old_mtu = netdev->mtu; + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; - if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || - hw->media_type == MEDIA_TYPE_1000M_FULL) { - epause->autoneg = AUTONEG_ENABLE; - } else { - epause->autoneg = AUTONEG_DISABLE; + if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || + (max_frame > MAX_JUMBO_FRAME_SIZE)) { + if (netif_msg_link(adapter)) + dev_warn(&adapter->pdev->dev, "invalid MTU setting\n"); + return -EINVAL; } - epause->rx_pause = 1; - epause->tx_pause = 1; + + adapter->hw.max_frame_size = max_frame; + adapter->hw.tx_jumbo_task_th = (max_frame + 7) >> 3; + adapter->rx_buffer_len = (max_frame + 7) & ~7; + adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8; + + netdev->mtu = new_mtu; + if ((old_mtu != new_mtu) && netif_running(netdev)) { + atl1_down(adapter); + atl1_up(adapter); + } + + return 0; } -static int atl1_set_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *epause) +/* + * atl1_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + */ +static int atl1_open(struct net_device *netdev) { struct atl1_adapter *adapter = netdev_priv(netdev); - struct atl1_hw *hw = &adapter->hw; + int err; - if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || - hw->media_type == MEDIA_TYPE_1000M_FULL) { - epause->autoneg = AUTONEG_ENABLE; - } else { - epause->autoneg = AUTONEG_DISABLE; - } + /* allocate transmit descriptors */ + err = atl1_setup_ring_resources(adapter); + if (err) + return err; - epause->rx_pause = 1; - epause->tx_pause = 1; + err = atl1_up(adapter); + if (err) + goto err_up; return 0; -} -/* FIXME: is this right? -- CHS */ -static u32 atl1_get_rx_csum(struct net_device *netdev) -{ - return 1; +err_up: + atl1_reset(adapter); + return err; } -static void atl1_get_strings(struct net_device *netdev, u32 stringset, - u8 *data) +/* + * atl1_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + */ +static int atl1_close(struct net_device *netdev) { - u8 *p = data; - int i; - - switch (stringset) { - case ETH_SS_STATS: - for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) { - memcpy(p, atl1_gstrings_stats[i].stat_string, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } - break; - } + struct atl1_adapter *adapter = netdev_priv(netdev); + atl1_down(adapter); + atl1_free_ring_resources(adapter); + return 0; } -static int atl1_nway_reset(struct net_device *netdev) +#ifdef CONFIG_PM +static int atl1_suspend(struct pci_dev *pdev, pm_message_t state) { + struct net_device *netdev = pci_get_drvdata(pdev); struct atl1_adapter *adapter = netdev_priv(netdev); struct atl1_hw *hw = &adapter->hw; + u32 ctrl = 0; + u32 wufc = adapter->wol; - if (netif_running(netdev)) { - u16 phy_data; + netif_device_detach(netdev); + if (netif_running(netdev)) atl1_down(adapter); - if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || - hw->media_type == MEDIA_TYPE_1000M_FULL) { - phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN; - } else { - switch (hw->media_type) { - case MEDIA_TYPE_100M_FULL: - phy_data = MII_CR_FULL_DUPLEX | - MII_CR_SPEED_100 | MII_CR_RESET; - break; - case MEDIA_TYPE_100M_HALF: - phy_data = MII_CR_SPEED_100 | MII_CR_RESET; - break; - case MEDIA_TYPE_10M_FULL: - phy_data = MII_CR_FULL_DUPLEX | - MII_CR_SPEED_10 | MII_CR_RESET; - break; - default: - /* MEDIA_TYPE_10M_HALF */ - phy_data = MII_CR_SPEED_10 | MII_CR_RESET; - } - } - atl1_write_phy_reg(hw, MII_BMCR, phy_data); - atl1_up(adapter); - } - return 0; -} - -const struct ethtool_ops atl1_ethtool_ops = { - .get_settings = atl1_get_settings, - .set_settings = atl1_set_settings, - .get_drvinfo = atl1_get_drvinfo, - .get_wol = atl1_get_wol, - .set_wol = atl1_set_wol, - .get_msglevel = atl1_get_msglevel, - .set_msglevel = atl1_set_msglevel, - .get_regs_len = atl1_get_regs_len, - .get_regs = atl1_get_regs, - .get_ringparam = atl1_get_ringparam, - .set_ringparam = atl1_set_ringparam, - .get_pauseparam = atl1_get_pauseparam, - .set_pauseparam = atl1_set_pauseparam, - .get_rx_csum = atl1_get_rx_csum, - .set_tx_csum = ethtool_op_set_tx_hw_csum, - .get_link = ethtool_op_get_link, - .set_sg = ethtool_op_set_sg, - .get_strings = atl1_get_strings, - .nway_reset = atl1_nway_reset, - .get_ethtool_stats = atl1_get_ethtool_stats, - .get_sset_count = atl1_get_sset_count, - .set_tso = ethtool_op_set_tso, -}; + atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); + atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); + if (ctrl & BMSR_LSTATUS) + wufc &= ~ATLX_WUFC_LNKC; -/* - * Reset the transmit and receive units; mask and clear all interrupts. - * hw - Struct containing variables accessed by shared code - * return : 0 or idle status (if error) - */ -s32 atl1_reset_hw(struct atl1_hw *hw) -{ - struct pci_dev *pdev = hw->back->pdev; - struct atl1_adapter *adapter = hw->back; - u32 icr; - int i; + /* reduce speed to 10/100M */ + if (wufc) { + atl1_phy_enter_power_saving(hw); + /* if resume, let driver to re- setup link */ + hw->phy_configured = false; + atl1_set_mac_addr(hw); + atlx_set_multi(netdev); - /* - * Clear Interrupt mask to stop board from generating - * interrupts & Clear any pending interrupt events - */ - /* - * iowrite32(0, hw->hw_addr + REG_IMR); - * iowrite32(0xffffffff, hw->hw_addr + REG_ISR); - */ + ctrl = 0; + /* turn on magic packet wol */ + if (wufc & ATLX_WUFC_MAG) + ctrl = WOL_MAGIC_EN | WOL_MAGIC_PME_EN; - /* - * Issue Soft Reset to the MAC. This will reset the chip's - * transmit, receive, DMA. It will not effect - * the current PCI configuration. The global reset bit is self- - * clearing, and should clear within a microsecond. - */ - iowrite32(MASTER_CTRL_SOFT_RST, hw->hw_addr + REG_MASTER_CTRL); - ioread32(hw->hw_addr + REG_MASTER_CTRL); + /* turn on Link change WOL */ + if (wufc & ATLX_WUFC_LNKC) + ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN); + iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL); - iowrite16(1, hw->hw_addr + REG_PHY_ENABLE); - ioread16(hw->hw_addr + REG_PHY_ENABLE); + /* turn on all-multi mode if wake on multicast is enabled */ + ctrl = ioread32(hw->hw_addr + REG_MAC_CTRL); + ctrl &= ~MAC_CTRL_DBG; + ctrl &= ~MAC_CTRL_PROMIS_EN; + if (wufc & ATLX_WUFC_MC) + ctrl |= MAC_CTRL_MC_ALL_EN; + else + ctrl &= ~MAC_CTRL_MC_ALL_EN; - /* delay about 1ms */ - msleep(1); + /* turn on broadcast mode if wake on-BC is enabled */ + if (wufc & ATLX_WUFC_BC) + ctrl |= MAC_CTRL_BC_EN; + else + ctrl &= ~MAC_CTRL_BC_EN; - /* Wait at least 10ms for All module to be Idle */ - for (i = 0; i < 10; i++) { - icr = ioread32(hw->hw_addr + REG_IDLE_STATUS); - if (!icr) - break; - /* delay 1 ms */ - msleep(1); - /* FIXME: still the right way to do this? */ - cpu_relax(); + /* enable RX */ + ctrl |= MAC_CTRL_RX_EN; + iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL); + pci_enable_wake(pdev, PCI_D3hot, 1); + pci_enable_wake(pdev, PCI_D3cold, 1); + } else { + iowrite32(0, hw->hw_addr + REG_WOL_CTRL); + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); } - if (icr) { - if (netif_msg_hw(adapter)) - dev_dbg(&pdev->dev, "ICR = 0x%x\n", icr); - return icr; - } + pci_save_state(pdev); + pci_disable_device(pdev); + + pci_set_power_state(pdev, PCI_D3hot); return 0; } -/* function about EEPROM - * - * check_eeprom_exist - * return 0 if eeprom exist - */ -static int atl1_check_eeprom_exist(struct atl1_hw *hw) +static int atl1_resume(struct pci_dev *pdev) { - u32 value; - value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL); - if (value & SPI_FLASH_CTRL_EN_VPD) { - value &= ~SPI_FLASH_CTRL_EN_VPD; - iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL); - } + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1_adapter *adapter = netdev_priv(netdev); + u32 err; - value = ioread16(hw->hw_addr + REG_PCIE_CAP_LIST); - return ((value & 0xFF00) == 0x6C00) ? 0 : 1; -} + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); -static bool atl1_read_eeprom(struct atl1_hw *hw, u32 offset, u32 *p_value) -{ - int i; - u32 control; + /* FIXME: check and handle */ + err = pci_enable_device(pdev); + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); - if (offset & 3) - /* address do not align */ - return false; + iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL); + atl1_reset(adapter); - iowrite32(0, hw->hw_addr + REG_VPD_DATA); - control = (offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT; - iowrite32(control, hw->hw_addr + REG_VPD_CAP); - ioread32(hw->hw_addr + REG_VPD_CAP); + if (netif_running(netdev)) + atl1_up(adapter); + netif_device_attach(netdev); - for (i = 0; i < 10; i++) { - msleep(2); - control = ioread32(hw->hw_addr + REG_VPD_CAP); - if (control & VPD_CAP_VPD_FLAG) - break; - } - if (control & VPD_CAP_VPD_FLAG) { - *p_value = ioread32(hw->hw_addr + REG_VPD_DATA); - return true; - } - /* timeout */ - return false; + atl1_via_workaround(adapter); + + return 0; +} +#else +#define atl1_suspend NULL +#define atl1_resume NULL +#endif + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void atl1_poll_controller(struct net_device *netdev) +{ + disable_irq(netdev->irq); + atl1_intr(netdev->irq, netdev); + enable_irq(netdev->irq); } +#endif /* - * Reads the value from a PHY register - * hw - Struct containing variables accessed by shared code - * reg_addr - address of the PHY register to read + * atl1_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in atl1_pci_tbl + * + * Returns 0 on success, negative on failure + * + * atl1_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. */ -s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data) +static int __devinit atl1_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) { - u32 val; - int i; + struct net_device *netdev; + struct atl1_adapter *adapter; + static int cards_found = 0; + int err; - val = ((u32) (reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT | - MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW | MDIO_CLK_25_4 << - MDIO_CLK_SEL_SHIFT; - iowrite32(val, hw->hw_addr + REG_MDIO_CTRL); - ioread32(hw->hw_addr + REG_MDIO_CTRL); + err = pci_enable_device(pdev); + if (err) + return err; - for (i = 0; i < MDIO_WAIT_TIMES; i++) { - udelay(2); - val = ioread32(hw->hw_addr + REG_MDIO_CTRL); - if (!(val & (MDIO_START | MDIO_BUSY))) - break; + /* + * The atl1 chip can DMA to 64-bit addresses, but it uses a single + * shared register for the high 32 bits, so only a single, aligned, + * 4 GB physical address range can be used at a time. + * + * Supporting 64-bit DMA on this hardware is more trouble than it's + * worth. It is far easier to limit to 32-bit DMA than update + * various kernel subsystems to support the mechanics required by a + * fixed-high-32-bit system. + */ + err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + if (err) { + dev_err(&pdev->dev, "no usable DMA configuration\n"); + goto err_dma; } - if (!(val & (MDIO_START | MDIO_BUSY))) { - *phy_data = (u16) val; - return 0; + /* + * Mark all PCI regions associated with PCI device + * pdev as being reserved by owner atl1_driver_name + */ + err = pci_request_regions(pdev, ATLX_DRIVER_NAME); + if (err) + goto err_request_regions; + + /* + * Enables bus-mastering on the device and calls + * pcibios_set_master to do the needed arch specific settings + */ + pci_set_master(pdev); + + netdev = alloc_etherdev(sizeof(struct atl1_adapter)); + if (!netdev) { + err = -ENOMEM; + goto err_alloc_etherdev; } - return ATLX_ERR_PHY; -} + SET_NETDEV_DEV(netdev, &pdev->dev); -#define CUSTOM_SPI_CS_SETUP 2 -#define CUSTOM_SPI_CLK_HI 2 -#define CUSTOM_SPI_CLK_LO 2 -#define CUSTOM_SPI_CS_HOLD 2 -#define CUSTOM_SPI_CS_HI 3 + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->hw.back = adapter; + adapter->msg_enable = netif_msg_init(debug, atl1_default_msg); + + adapter->hw.hw_addr = pci_iomap(pdev, 0, 0); + if (!adapter->hw.hw_addr) { + err = -EIO; + goto err_pci_iomap; + } + /* get device revision number */ + adapter->hw.dev_rev = ioread16(adapter->hw.hw_addr + + (REG_MASTER_CTRL + 2)); + if (netif_msg_probe(adapter)) + dev_info(&pdev->dev, "version %s\n", ATLX_DRIVER_VERSION); + + /* set default ring resource counts */ + adapter->rfd_ring.count = adapter->rrd_ring.count = ATL1_DEFAULT_RFD; + adapter->tpd_ring.count = ATL1_DEFAULT_TPD; + + adapter->mii.dev = netdev; + adapter->mii.mdio_read = mdio_read; + adapter->mii.mdio_write = mdio_write; + adapter->mii.phy_id_mask = 0x1f; + adapter->mii.reg_num_mask = 0x1f; + + netdev->open = &atl1_open; + netdev->stop = &atl1_close; + netdev->hard_start_xmit = &atl1_xmit_frame; + netdev->get_stats = &atlx_get_stats; + netdev->set_multicast_list = &atlx_set_multi; + netdev->set_mac_address = &atl1_set_mac; + netdev->change_mtu = &atl1_change_mtu; + netdev->do_ioctl = &atlx_ioctl; + netdev->tx_timeout = &atlx_tx_timeout; + netdev->watchdog_timeo = 5 * HZ; +#ifdef CONFIG_NET_POLL_CONTROLLER + netdev->poll_controller = atl1_poll_controller; +#endif + netdev->vlan_rx_register = atlx_vlan_rx_register; -static bool atl1_spi_read(struct atl1_hw *hw, u32 addr, u32 *buf) -{ - int i; - u32 value; + netdev->ethtool_ops = &atl1_ethtool_ops; + adapter->bd_number = cards_found; - iowrite32(0, hw->hw_addr + REG_SPI_DATA); - iowrite32(addr, hw->hw_addr + REG_SPI_ADDR); + /* setup the private structure */ + err = atl1_sw_init(adapter); + if (err) + goto err_common; - value = SPI_FLASH_CTRL_WAIT_READY | - (CUSTOM_SPI_CS_SETUP & SPI_FLASH_CTRL_CS_SETUP_MASK) << - SPI_FLASH_CTRL_CS_SETUP_SHIFT | (CUSTOM_SPI_CLK_HI & - SPI_FLASH_CTRL_CLK_HI_MASK) << - SPI_FLASH_CTRL_CLK_HI_SHIFT | (CUSTOM_SPI_CLK_LO & - SPI_FLASH_CTRL_CLK_LO_MASK) << - SPI_FLASH_CTRL_CLK_LO_SHIFT | (CUSTOM_SPI_CS_HOLD & - SPI_FLASH_CTRL_CS_HOLD_MASK) << - SPI_FLASH_CTRL_CS_HOLD_SHIFT | (CUSTOM_SPI_CS_HI & - SPI_FLASH_CTRL_CS_HI_MASK) << - SPI_FLASH_CTRL_CS_HI_SHIFT | (1 & SPI_FLASH_CTRL_INS_MASK) << - SPI_FLASH_CTRL_INS_SHIFT; + netdev->features = NETIF_F_HW_CSUM; + netdev->features |= NETIF_F_SG; + netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX); + netdev->features |= NETIF_F_TSO; + netdev->features |= NETIF_F_LLTX; - iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL); + /* + * patch for some L1 of old version, + * the final version of L1 may not need these + * patches + */ + /* atl1_pcie_patch(adapter); */ - value |= SPI_FLASH_CTRL_START; - iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL); - ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL); + /* really reset GPHY core */ + iowrite16(0, adapter->hw.hw_addr + REG_PHY_ENABLE); - for (i = 0; i < 10; i++) { - msleep(1); - value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL); - if (!(value & SPI_FLASH_CTRL_START)) - break; + /* + * reset the controller to + * put the device in a known good starting state + */ + if (atl1_reset_hw(&adapter->hw)) { + err = -EIO; + goto err_common; } - if (value & SPI_FLASH_CTRL_START) - return false; + /* copy the MAC address out of the EEPROM */ + atl1_read_mac_addr(&adapter->hw); + memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); - *buf = ioread32(hw->hw_addr + REG_SPI_DATA); + if (!is_valid_ether_addr(netdev->dev_addr)) { + err = -EIO; + goto err_common; + } - return true; -} + atl1_check_options(adapter); -/* - * get_permanent_address - * return 0 if get valid mac address, - */ -static int atl1_get_permanent_address(struct atl1_hw *hw) -{ - u32 addr[2]; - u32 i, control; - u16 reg; - u8 eth_addr[ETH_ALEN]; - bool key_valid; + /* pre-init the MAC, and setup link */ + err = atl1_init_hw(&adapter->hw); + if (err) { + err = -EIO; + goto err_common; + } - if (is_valid_ether_addr(hw->perm_mac_addr)) - return 0; + atl1_pcie_patch(adapter); + /* assume we have no link for now */ + netif_carrier_off(netdev); + netif_stop_queue(netdev); - /* init */ - addr[0] = addr[1] = 0; + init_timer(&adapter->watchdog_timer); + adapter->watchdog_timer.function = &atl1_watchdog; + adapter->watchdog_timer.data = (unsigned long)adapter; - if (!atl1_check_eeprom_exist(hw)) { - reg = 0; - key_valid = false; - /* Read out all EEPROM content */ - i = 0; - while (1) { - if (atl1_read_eeprom(hw, i + 0x100, &control)) { - if (key_valid) { - if (reg == REG_MAC_STA_ADDR) - addr[0] = control; - else if (reg == (REG_MAC_STA_ADDR + 4)) - addr[1] = control; - key_valid = false; - } else if ((control & 0xff) == 0x5A) { - key_valid = true; - reg = (u16) (control >> 16); - } else - break; - } else - /* read error */ - break; - i += 4; - } + init_timer(&adapter->phy_config_timer); + adapter->phy_config_timer.function = &atl1_phy_config; + adapter->phy_config_timer.data = (unsigned long)adapter; + adapter->phy_timer_pending = false; - *(u32 *) ð_addr[2] = swab32(addr[0]); - *(u16 *) ð_addr[0] = swab16(*(u16 *) &addr[1]); - if (is_valid_ether_addr(eth_addr)) { - memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN); - return 0; - } - return 1; - } + INIT_WORK(&adapter->tx_timeout_task, atl1_tx_timeout_task); - /* see if SPI FLAGS exist ? */ - addr[0] = addr[1] = 0; - reg = 0; - key_valid = false; - i = 0; - while (1) { - if (atl1_spi_read(hw, i + 0x1f000, &control)) { - if (key_valid) { - if (reg == REG_MAC_STA_ADDR) - addr[0] = control; - else if (reg == (REG_MAC_STA_ADDR + 4)) - addr[1] = control; - key_valid = false; - } else if ((control & 0xff) == 0x5A) { - key_valid = true; - reg = (u16) (control >> 16); - } else - /* data end */ - break; - } else - /* read error */ - break; - i += 4; - } + INIT_WORK(&adapter->link_chg_task, atlx_link_chg_task); - *(u32 *) ð_addr[2] = swab32(addr[0]); - *(u16 *) ð_addr[0] = swab16(*(u16 *) &addr[1]); - if (is_valid_ether_addr(eth_addr)) { - memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN); - return 0; - } + INIT_WORK(&adapter->pcie_dma_to_rst_task, atl1_tx_timeout_task); - /* - * On some motherboards, the MAC address is written by the - * BIOS directly to the MAC register during POST, and is - * not stored in eeprom. If all else thus far has failed - * to fetch the permanent MAC address, try reading it directly. - */ - addr[0] = ioread32(hw->hw_addr + REG_MAC_STA_ADDR); - addr[1] = ioread16(hw->hw_addr + (REG_MAC_STA_ADDR + 4)); - *(u32 *) ð_addr[2] = swab32(addr[0]); - *(u16 *) ð_addr[0] = swab16(*(u16 *) &addr[1]); - if (is_valid_ether_addr(eth_addr)) { - memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN); - return 0; - } + err = register_netdev(netdev); + if (err) + goto err_common; - return 1; + cards_found++; + atl1_via_workaround(adapter); + return 0; + +err_common: + pci_iounmap(pdev, adapter->hw.hw_addr); +err_pci_iomap: + free_netdev(netdev); +err_alloc_etherdev: + pci_release_regions(pdev); +err_dma: +err_request_regions: + pci_disable_device(pdev); + return err; } /* - * Reads the adapter's MAC address from the EEPROM - * hw - Struct containing variables accessed by shared code + * atl1_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * atl1_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. */ -s32 atl1_read_mac_addr(struct atl1_hw *hw) +static void __devexit atl1_remove(struct pci_dev *pdev) { - u16 i; + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1_adapter *adapter; + /* Device not available. Return. */ + if (!netdev) + return; - if (atl1_get_permanent_address(hw)) - random_ether_addr(hw->perm_mac_addr); + adapter = netdev_priv(netdev); - for (i = 0; i < ETH_ALEN; i++) - hw->mac_addr[i] = hw->perm_mac_addr[i]; - return 0; + /* + * Some atl1 boards lack persistent storage for their MAC, and get it + * from the BIOS during POST. If we've been messing with the MAC + * address, we need to save the permanent one. + */ + if (memcmp(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, ETH_ALEN)) { + memcpy(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, + ETH_ALEN); + atl1_set_mac_addr(&adapter->hw); + } + + iowrite16(0, adapter->hw.hw_addr + REG_PHY_ENABLE); + unregister_netdev(netdev); + pci_iounmap(pdev, adapter->hw.hw_addr); + pci_release_regions(pdev); + free_netdev(netdev); + pci_disable_device(pdev); } +static struct pci_driver atl1_driver = { + .name = ATLX_DRIVER_NAME, + .id_table = atl1_pci_tbl, + .probe = atl1_probe, + .remove = __devexit_p(atl1_remove), + .suspend = atl1_suspend, + .resume = atl1_resume +}; + /* - * Hashes an address to determine its location in the multicast table - * hw - Struct containing variables accessed by shared code - * mc_addr - the multicast address to hash + * atl1_exit_module - Driver Exit Cleanup Routine * - * atl1_hash_mc_addr - * purpose - * set hash value for a multicast address - * hash calcu processing : - * 1. calcu 32bit CRC for multicast address - * 2. reverse crc with MSB to LSB + * atl1_exit_module is called just before the driver is removed + * from memory. */ -u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr) +static void __exit atl1_exit_module(void) { - u32 crc32, value = 0; - int i; - - crc32 = ether_crc_le(6, mc_addr); - for (i = 0; i < 32; i++) - value |= (((crc32 >> i) & 1) << (31 - i)); - - return value; + pci_unregister_driver(&atl1_driver); } /* - * Sets the bit in the multicast table corresponding to the hash value. - * hw - Struct containing variables accessed by shared code - * hash_value - Multicast address hash value + * atl1_init_module - Driver Registration Routine + * + * atl1_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. */ -void atl1_hash_set(struct atl1_hw *hw, u32 hash_value) +static int __init atl1_init_module(void) { - u32 hash_bit, hash_reg; - u32 mta; - - /* - * The HASH Table is a register array of 2 32-bit registers. - * It is treated like an array of 64 bits. We want to set - * bit BitArray[hash_value]. So we figure out what register - * the bit is in, read it, OR in the new bit, then write - * back the new value. The register is determined by the - * upper 7 bits of the hash value and the bit within that - * register are determined by the lower 5 bits of the value. - */ - hash_reg = (hash_value >> 31) & 0x1; - hash_bit = (hash_value >> 26) & 0x1F; - mta = ioread32((hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2)); - mta |= (1 << hash_bit); - iowrite32(mta, (hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2)); + return pci_register_driver(&atl1_driver); } -/* - * Writes a value to a PHY register - * hw - Struct containing variables accessed by shared code - * reg_addr - address of the PHY register to write - * data - data to write to the PHY - */ -s32 atl1_write_phy_reg(struct atl1_hw *hw, u32 reg_addr, u16 phy_data) +module_init(atl1_init_module); +module_exit(atl1_exit_module); + +struct atl1_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define ATL1_STAT(m) \ + sizeof(((struct atl1_adapter *)0)->m), offsetof(struct atl1_adapter, m) + +static struct atl1_stats atl1_gstrings_stats[] = { + {"rx_packets", ATL1_STAT(soft_stats.rx_packets)}, + {"tx_packets", ATL1_STAT(soft_stats.tx_packets)}, + {"rx_bytes", ATL1_STAT(soft_stats.rx_bytes)}, + {"tx_bytes", ATL1_STAT(soft_stats.tx_bytes)}, + {"rx_errors", ATL1_STAT(soft_stats.rx_errors)}, + {"tx_errors", ATL1_STAT(soft_stats.tx_errors)}, + {"rx_dropped", ATL1_STAT(net_stats.rx_dropped)}, + {"tx_dropped", ATL1_STAT(net_stats.tx_dropped)}, + {"multicast", ATL1_STAT(soft_stats.multicast)}, + {"collisions", ATL1_STAT(soft_stats.collisions)}, + {"rx_length_errors", ATL1_STAT(soft_stats.rx_length_errors)}, + {"rx_over_errors", ATL1_STAT(soft_stats.rx_missed_errors)}, + {"rx_crc_errors", ATL1_STAT(soft_stats.rx_crc_errors)}, + {"rx_frame_errors", ATL1_STAT(soft_stats.rx_frame_errors)}, + {"rx_fifo_errors", ATL1_STAT(soft_stats.rx_fifo_errors)}, + {"rx_missed_errors", ATL1_STAT(soft_stats.rx_missed_errors)}, + {"tx_aborted_errors", ATL1_STAT(soft_stats.tx_aborted_errors)}, + {"tx_carrier_errors", ATL1_STAT(soft_stats.tx_carrier_errors)}, + {"tx_fifo_errors", ATL1_STAT(soft_stats.tx_fifo_errors)}, + {"tx_window_errors", ATL1_STAT(soft_stats.tx_window_errors)}, + {"tx_abort_exce_coll", ATL1_STAT(soft_stats.excecol)}, + {"tx_abort_late_coll", ATL1_STAT(soft_stats.latecol)}, + {"tx_deferred_ok", ATL1_STAT(soft_stats.deffer)}, + {"tx_single_coll_ok", ATL1_STAT(soft_stats.scc)}, + {"tx_multi_coll_ok", ATL1_STAT(soft_stats.mcc)}, + {"tx_underun", ATL1_STAT(soft_stats.tx_underun)}, + {"tx_trunc", ATL1_STAT(soft_stats.tx_trunc)}, + {"tx_pause", ATL1_STAT(soft_stats.tx_pause)}, + {"rx_pause", ATL1_STAT(soft_stats.rx_pause)}, + {"rx_rrd_ov", ATL1_STAT(soft_stats.rx_rrd_ov)}, + {"rx_trunc", ATL1_STAT(soft_stats.rx_trunc)} +}; + +static void atl1_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) { + struct atl1_adapter *adapter = netdev_priv(netdev); int i; - u32 val; - - val = ((u32) (phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT | - (reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT | - MDIO_SUP_PREAMBLE | - MDIO_START | MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT; - iowrite32(val, hw->hw_addr + REG_MDIO_CTRL); - ioread32(hw->hw_addr + REG_MDIO_CTRL); + char *p; - for (i = 0; i < MDIO_WAIT_TIMES; i++) { - udelay(2); - val = ioread32(hw->hw_addr + REG_MDIO_CTRL); - if (!(val & (MDIO_START | MDIO_BUSY))) - break; + for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) { + p = (char *)adapter+atl1_gstrings_stats[i].stat_offset; + data[i] = (atl1_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } - if (!(val & (MDIO_START | MDIO_BUSY))) - return 0; - - return ATLX_ERR_PHY; } -/* - * Make L001's PHY out of Power Saving State (bug) - * hw - Struct containing variables accessed by shared code - * when power on, L001's PHY always on Power saving State - * (Gigabit Link forbidden) - */ -static s32 atl1_phy_leave_power_saving(struct atl1_hw *hw) +static int atl1_get_sset_count(struct net_device *netdev, int sset) { - s32 ret; - ret = atl1_write_phy_reg(hw, 29, 0x0029); - if (ret) - return ret; - return atl1_write_phy_reg(hw, 30, 0); + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(atl1_gstrings_stats); + default: + return -EOPNOTSUPP; + } } -/* - *TODO: do something or get rid of this - */ -s32 atl1_phy_enter_power_saving(struct atl1_hw *hw) +static int atl1_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) { -/* s32 ret_val; - * u16 phy_data; - */ + struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_hw *hw = &adapter->hw; + + ecmd->supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_Autoneg | SUPPORTED_TP); + ecmd->advertising = ADVERTISED_TP; + if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || + hw->media_type == MEDIA_TYPE_1000M_FULL) { + ecmd->advertising |= ADVERTISED_Autoneg; + if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR) { + ecmd->advertising |= ADVERTISED_Autoneg; + ecmd->advertising |= + (ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Full); + } else + ecmd->advertising |= (ADVERTISED_1000baseT_Full); + } + ecmd->port = PORT_TP; + ecmd->phy_address = 0; + ecmd->transceiver = XCVR_INTERNAL; + + if (netif_carrier_ok(adapter->netdev)) { + u16 link_speed, link_duplex; + atl1_get_speed_and_duplex(hw, &link_speed, &link_duplex); + ecmd->speed = link_speed; + if (link_duplex == FULL_DUPLEX) + ecmd->duplex = DUPLEX_FULL; + else + ecmd->duplex = DUPLEX_HALF; + } else { + ecmd->speed = -1; + ecmd->duplex = -1; + } + if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || + hw->media_type == MEDIA_TYPE_1000M_FULL) + ecmd->autoneg = AUTONEG_ENABLE; + else + ecmd->autoneg = AUTONEG_DISABLE; -/* - ret_val = atl1_write_phy_reg(hw, ...); - ret_val = atl1_write_phy_reg(hw, ...); - .... -*/ return 0; } -/* - * Resets the PHY and make all config validate - * hw - Struct containing variables accessed by shared code - * - * Sets bit 15 and 12 of the MII Control regiser (for F001 bug) - */ -static s32 atl1_phy_reset(struct atl1_hw *hw) +static int atl1_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) { - struct pci_dev *pdev = hw->back->pdev; - struct atl1_adapter *adapter = hw->back; - s32 ret_val; + struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_hw *hw = &adapter->hw; u16 phy_data; + int ret_val = 0; + u16 old_media_type = hw->media_type; + if (netif_running(adapter->netdev)) { + if (netif_msg_link(adapter)) + dev_dbg(&adapter->pdev->dev, + "ethtool shutting down adapter\n"); + atl1_down(adapter); + } + + if (ecmd->autoneg == AUTONEG_ENABLE) + hw->media_type = MEDIA_TYPE_AUTO_SENSOR; + else { + if (ecmd->speed == SPEED_1000) { + if (ecmd->duplex != DUPLEX_FULL) { + if (netif_msg_link(adapter)) + dev_warn(&adapter->pdev->dev, + "1000M half is invalid\n"); + ret_val = -EINVAL; + goto exit_sset; + } + hw->media_type = MEDIA_TYPE_1000M_FULL; + } else if (ecmd->speed == SPEED_100) { + if (ecmd->duplex == DUPLEX_FULL) + hw->media_type = MEDIA_TYPE_100M_FULL; + else + hw->media_type = MEDIA_TYPE_100M_HALF; + } else { + if (ecmd->duplex == DUPLEX_FULL) + hw->media_type = MEDIA_TYPE_10M_FULL; + else + hw->media_type = MEDIA_TYPE_10M_HALF; + } + } + switch (hw->media_type) { + case MEDIA_TYPE_AUTO_SENSOR: + ecmd->advertising = + ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Full | + ADVERTISED_Autoneg | ADVERTISED_TP; + break; + case MEDIA_TYPE_1000M_FULL: + ecmd->advertising = + ADVERTISED_1000baseT_Full | + ADVERTISED_Autoneg | ADVERTISED_TP; + break; + default: + ecmd->advertising = 0; + break; + } + if (atl1_phy_setup_autoneg_adv(hw)) { + ret_val = -EINVAL; + if (netif_msg_link(adapter)) + dev_warn(&adapter->pdev->dev, + "invalid ethtool speed/duplex setting\n"); + goto exit_sset; + } if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || hw->media_type == MEDIA_TYPE_1000M_FULL) phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN; @@ -3306,260 +3222,344 @@ static s32 atl1_phy_reset(struct atl1_hw *hw) break; } } + atl1_write_phy_reg(hw, MII_BMCR, phy_data); +exit_sset: + if (ret_val) + hw->media_type = old_media_type; - ret_val = atl1_write_phy_reg(hw, MII_BMCR, phy_data); - if (ret_val) { - u32 val; - int i; - /* pcie serdes link may be down! */ - if (netif_msg_hw(adapter)) - dev_dbg(&pdev->dev, "pcie phy link down\n"); + if (netif_running(adapter->netdev)) { + if (netif_msg_link(adapter)) + dev_dbg(&adapter->pdev->dev, + "ethtool starting adapter\n"); + atl1_up(adapter); + } else if (!ret_val) { + if (netif_msg_link(adapter)) + dev_dbg(&adapter->pdev->dev, + "ethtool resetting adapter\n"); + atl1_reset(adapter); + } + return ret_val; +} - for (i = 0; i < 25; i++) { - msleep(1); - val = ioread32(hw->hw_addr + REG_MDIO_CTRL); - if (!(val & (MDIO_START | MDIO_BUSY))) - break; - } +static void atl1_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); - if ((val & (MDIO_START | MDIO_BUSY)) != 0) { - if (netif_msg_hw(adapter)) - dev_warn(&pdev->dev, - "pcie link down at least 25ms\n"); - return ret_val; - } - } + strncpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver)); + strncpy(drvinfo->version, ATLX_DRIVER_VERSION, + sizeof(drvinfo->version)); + strncpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); + strncpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); + drvinfo->eedump_len = ATL1_EEDUMP_LEN; +} + +static void atl1_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + + wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC; + wol->wolopts = 0; + if (adapter->wol & ATLX_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & ATLX_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & ATLX_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & ATLX_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; + return; +} + +static int atl1_set_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + + if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) + return -EOPNOTSUPP; + adapter->wol = 0; + if (wol->wolopts & WAKE_UCAST) + adapter->wol |= ATLX_WUFC_EX; + if (wol->wolopts & WAKE_MCAST) + adapter->wol |= ATLX_WUFC_MC; + if (wol->wolopts & WAKE_BCAST) + adapter->wol |= ATLX_WUFC_BC; + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= ATLX_WUFC_MAG; return 0; } -/* - * Configures PHY autoneg and flow control advertisement settings - * hw - Struct containing variables accessed by shared code - */ -s32 atl1_phy_setup_autoneg_adv(struct atl1_hw *hw) +static u32 atl1_get_msglevel(struct net_device *netdev) { - s32 ret_val; - s16 mii_autoneg_adv_reg; - s16 mii_1000t_ctrl_reg; + struct atl1_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} - /* Read the MII Auto-Neg Advertisement Register (Address 4). */ - mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK; +static void atl1_set_msglevel(struct net_device *netdev, u32 value) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = value; +} - /* Read the MII 1000Base-T Control Register (Address 9). */ - mii_1000t_ctrl_reg = MII_ATLX_CR_1000T_DEFAULT_CAP_MASK; +static int atl1_get_regs_len(struct net_device *netdev) +{ + return ATL1_REG_COUNT * sizeof(u32); +} - /* - * First we clear all the 10/100 mb speed bits in the Auto-Neg - * Advertisement Register (Address 4) and the 1000 mb speed bits in - * the 1000Base-T Control Register (Address 9). - */ - mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK; - mii_1000t_ctrl_reg &= ~MII_ATLX_CR_1000T_SPEED_MASK; +static void atl1_get_regs(struct net_device *netdev, struct ethtool_regs *regs, + void *p) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_hw *hw = &adapter->hw; + unsigned int i; + u32 *regbuf = p; - /* - * Need to parse media_type and set up - * the appropriate PHY registers. - */ - switch (hw->media_type) { - case MEDIA_TYPE_AUTO_SENSOR: - mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS | - MII_AR_10T_FD_CAPS | - MII_AR_100TX_HD_CAPS | - MII_AR_100TX_FD_CAPS); - mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS; - break; + for (i = 0; i < ATL1_REG_COUNT; i++) { + /* + * This switch statement avoids reserved regions + * of register space. + */ + switch (i) { + case 6 ... 9: + case 14: + case 29 ... 31: + case 34 ... 63: + case 75 ... 127: + case 136 ... 1023: + case 1027 ... 1087: + case 1091 ... 1151: + case 1194 ... 1195: + case 1200 ... 1201: + case 1206 ... 1213: + case 1216 ... 1279: + case 1290 ... 1311: + case 1323 ... 1343: + case 1358 ... 1359: + case 1368 ... 1375: + case 1378 ... 1383: + case 1388 ... 1391: + case 1393 ... 1395: + case 1402 ... 1403: + case 1410 ... 1471: + case 1522 ... 1535: + /* reserved region; don't read it */ + regbuf[i] = 0; + break; + default: + /* unreserved region */ + regbuf[i] = ioread32(hw->hw_addr + (i * sizeof(u32))); + } + } +} - case MEDIA_TYPE_1000M_FULL: - mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS; - break; +static void atl1_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_tpd_ring *txdr = &adapter->tpd_ring; + struct atl1_rfd_ring *rxdr = &adapter->rfd_ring; - case MEDIA_TYPE_100M_FULL: - mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS; - break; + ring->rx_max_pending = ATL1_MAX_RFD; + ring->tx_max_pending = ATL1_MAX_TPD; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = rxdr->count; + ring->tx_pending = txdr->count; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; +} - case MEDIA_TYPE_100M_HALF: - mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS; - break; +static int atl1_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_tpd_ring *tpdr = &adapter->tpd_ring; + struct atl1_rrd_ring *rrdr = &adapter->rrd_ring; + struct atl1_rfd_ring *rfdr = &adapter->rfd_ring; - case MEDIA_TYPE_10M_FULL: - mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS; - break; + struct atl1_tpd_ring tpd_old, tpd_new; + struct atl1_rfd_ring rfd_old, rfd_new; + struct atl1_rrd_ring rrd_old, rrd_new; + struct atl1_ring_header rhdr_old, rhdr_new; + int err; - default: - mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS; - break; - } + tpd_old = adapter->tpd_ring; + rfd_old = adapter->rfd_ring; + rrd_old = adapter->rrd_ring; + rhdr_old = adapter->ring_header; - /* flow control fixed to enable all */ - mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE); + if (netif_running(adapter->netdev)) + atl1_down(adapter); - hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg; - hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg; + rfdr->count = (u16) max(ring->rx_pending, (u32) ATL1_MIN_RFD); + rfdr->count = rfdr->count > ATL1_MAX_RFD ? ATL1_MAX_RFD : + rfdr->count; + rfdr->count = (rfdr->count + 3) & ~3; + rrdr->count = rfdr->count; - ret_val = atl1_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg); - if (ret_val) - return ret_val; + tpdr->count = (u16) max(ring->tx_pending, (u32) ATL1_MIN_TPD); + tpdr->count = tpdr->count > ATL1_MAX_TPD ? ATL1_MAX_TPD : + tpdr->count; + tpdr->count = (tpdr->count + 3) & ~3; + + if (netif_running(adapter->netdev)) { + /* try to get new resources before deleting old */ + err = atl1_setup_ring_resources(adapter); + if (err) + goto err_setup_ring; + + /* + * save the new, restore the old in order to free it, + * then restore the new back again + */ - ret_val = atl1_write_phy_reg(hw, MII_ATLX_CR, mii_1000t_ctrl_reg); - if (ret_val) - return ret_val; + rfd_new = adapter->rfd_ring; + rrd_new = adapter->rrd_ring; + tpd_new = adapter->tpd_ring; + rhdr_new = adapter->ring_header; + adapter->rfd_ring = rfd_old; + adapter->rrd_ring = rrd_old; + adapter->tpd_ring = tpd_old; + adapter->ring_header = rhdr_old; + atl1_free_ring_resources(adapter); + adapter->rfd_ring = rfd_new; + adapter->rrd_ring = rrd_new; + adapter->tpd_ring = tpd_new; + adapter->ring_header = rhdr_new; + err = atl1_up(adapter); + if (err) + return err; + } return 0; + +err_setup_ring: + adapter->rfd_ring = rfd_old; + adapter->rrd_ring = rrd_old; + adapter->tpd_ring = tpd_old; + adapter->ring_header = rhdr_old; + atl1_up(adapter); + return err; } -/* - * Configures link settings. - * hw - Struct containing variables accessed by shared code - * Assumes the hardware has previously been reset and the - * transmitter and receiver are not enabled. - */ -static s32 atl1_setup_link(struct atl1_hw *hw) +static void atl1_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *epause) { - struct pci_dev *pdev = hw->back->pdev; - struct atl1_adapter *adapter = hw->back; - s32 ret_val; + struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_hw *hw = &adapter->hw; - /* - * Options: - * PHY will advertise value(s) parsed from - * autoneg_advertised and fc - * no matter what autoneg is , We will not wait link result. - */ - ret_val = atl1_phy_setup_autoneg_adv(hw); - if (ret_val) { - if (netif_msg_link(adapter)) - dev_dbg(&pdev->dev, - "error setting up autonegotiation\n"); - return ret_val; - } - /* SW.Reset , En-Auto-Neg if needed */ - ret_val = atl1_phy_reset(hw); - if (ret_val) { - if (netif_msg_link(adapter)) - dev_dbg(&pdev->dev, "error resetting phy\n"); - return ret_val; + if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || + hw->media_type == MEDIA_TYPE_1000M_FULL) { + epause->autoneg = AUTONEG_ENABLE; + } else { + epause->autoneg = AUTONEG_DISABLE; } - hw->phy_configured = true; - return ret_val; + epause->rx_pause = 1; + epause->tx_pause = 1; } -static void atl1_init_flash_opcode(struct atl1_hw *hw) +static int atl1_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *epause) { - if (hw->flash_vendor >= ARRAY_SIZE(flash_table)) - /* Atmel */ - hw->flash_vendor = 0; + struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_hw *hw = &adapter->hw; - /* Init OP table */ - iowrite8(flash_table[hw->flash_vendor].cmd_program, - hw->hw_addr + REG_SPI_FLASH_OP_PROGRAM); - iowrite8(flash_table[hw->flash_vendor].cmd_sector_erase, - hw->hw_addr + REG_SPI_FLASH_OP_SC_ERASE); - iowrite8(flash_table[hw->flash_vendor].cmd_chip_erase, - hw->hw_addr + REG_SPI_FLASH_OP_CHIP_ERASE); - iowrite8(flash_table[hw->flash_vendor].cmd_rdid, - hw->hw_addr + REG_SPI_FLASH_OP_RDID); - iowrite8(flash_table[hw->flash_vendor].cmd_wren, - hw->hw_addr + REG_SPI_FLASH_OP_WREN); - iowrite8(flash_table[hw->flash_vendor].cmd_rdsr, - hw->hw_addr + REG_SPI_FLASH_OP_RDSR); - iowrite8(flash_table[hw->flash_vendor].cmd_wrsr, - hw->hw_addr + REG_SPI_FLASH_OP_WRSR); - iowrite8(flash_table[hw->flash_vendor].cmd_read, - hw->hw_addr + REG_SPI_FLASH_OP_READ); + if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || + hw->media_type == MEDIA_TYPE_1000M_FULL) { + epause->autoneg = AUTONEG_ENABLE; + } else { + epause->autoneg = AUTONEG_DISABLE; + } + + epause->rx_pause = 1; + epause->tx_pause = 1; + + return 0; } -/* - * Performs basic configuration of the adapter. - * hw - Struct containing variables accessed by shared code - * Assumes that the controller has previously been reset and is in a - * post-reset uninitialized state. Initializes multicast table, - * and Calls routines to setup link - * Leaves the transmit and receive units disabled and uninitialized. - */ -s32 atl1_init_hw(struct atl1_hw *hw) +/* FIXME: is this right? -- CHS */ +static u32 atl1_get_rx_csum(struct net_device *netdev) { - u32 ret_val = 0; - - /* Zero out the Multicast HASH table */ - iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE); - /* clear the old settings from the multicast hash table */ - iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2)); + return 1; +} - atl1_init_flash_opcode(hw); +static void atl1_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + u8 *p = data; + int i; - if (!hw->phy_configured) { - /* enable GPHY LinkChange Interrrupt */ - ret_val = atl1_write_phy_reg(hw, 18, 0xC00); - if (ret_val) - return ret_val; - /* make PHY out of power-saving state */ - ret_val = atl1_phy_leave_power_saving(hw); - if (ret_val) - return ret_val; - /* Call a subroutine to configure the link */ - ret_val = atl1_setup_link(hw); + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) { + memcpy(p, atl1_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + break; } - return ret_val; } -/* - * Detects the current speed and duplex settings of the hardware. - * hw - Struct containing variables accessed by shared code - * speed - Speed of the connection - * duplex - Duplex setting of the connection - */ -s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex) +static int atl1_nway_reset(struct net_device *netdev) { - struct pci_dev *pdev = hw->back->pdev; - struct atl1_adapter *adapter = hw->back; - s32 ret_val; - u16 phy_data; - - /* ; --- Read PHY Specific Status Register (17) */ - ret_val = atl1_read_phy_reg(hw, MII_ATLX_PSSR, &phy_data); - if (ret_val) - return ret_val; + struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_hw *hw = &adapter->hw; - if (!(phy_data & MII_ATLX_PSSR_SPD_DPLX_RESOLVED)) - return ATLX_ERR_PHY_RES; + if (netif_running(netdev)) { + u16 phy_data; + atl1_down(adapter); - switch (phy_data & MII_ATLX_PSSR_SPEED) { - case MII_ATLX_PSSR_1000MBS: - *speed = SPEED_1000; - break; - case MII_ATLX_PSSR_100MBS: - *speed = SPEED_100; - break; - case MII_ATLX_PSSR_10MBS: - *speed = SPEED_10; - break; - default: - if (netif_msg_hw(adapter)) - dev_dbg(&pdev->dev, "error getting speed\n"); - return ATLX_ERR_PHY_SPEED; - break; + if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || + hw->media_type == MEDIA_TYPE_1000M_FULL) { + phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN; + } else { + switch (hw->media_type) { + case MEDIA_TYPE_100M_FULL: + phy_data = MII_CR_FULL_DUPLEX | + MII_CR_SPEED_100 | MII_CR_RESET; + break; + case MEDIA_TYPE_100M_HALF: + phy_data = MII_CR_SPEED_100 | MII_CR_RESET; + break; + case MEDIA_TYPE_10M_FULL: + phy_data = MII_CR_FULL_DUPLEX | + MII_CR_SPEED_10 | MII_CR_RESET; + break; + default: + /* MEDIA_TYPE_10M_HALF */ + phy_data = MII_CR_SPEED_10 | MII_CR_RESET; + } + } + atl1_write_phy_reg(hw, MII_BMCR, phy_data); + atl1_up(adapter); } - if (phy_data & MII_ATLX_PSSR_DPLX) - *duplex = FULL_DUPLEX; - else - *duplex = HALF_DUPLEX; - return 0; } -void atl1_set_mac_addr(struct atl1_hw *hw) -{ - u32 value; - /* - * 00-0B-6A-F6-00-DC - * 0: 6AF600DC 1: 000B - * low dword - */ - value = (((u32) hw->mac_addr[2]) << 24) | - (((u32) hw->mac_addr[3]) << 16) | - (((u32) hw->mac_addr[4]) << 8) | (((u32) hw->mac_addr[5])); - iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR); - /* high dword */ - value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1])); - iowrite32(value, (hw->hw_addr + REG_MAC_STA_ADDR) + (1 << 2)); -} +const struct ethtool_ops atl1_ethtool_ops = { + .get_settings = atl1_get_settings, + .set_settings = atl1_set_settings, + .get_drvinfo = atl1_get_drvinfo, + .get_wol = atl1_get_wol, + .set_wol = atl1_set_wol, + .get_msglevel = atl1_get_msglevel, + .set_msglevel = atl1_set_msglevel, + .get_regs_len = atl1_get_regs_len, + .get_regs = atl1_get_regs, + .get_ringparam = atl1_get_ringparam, + .set_ringparam = atl1_set_ringparam, + .get_pauseparam = atl1_get_pauseparam, + .set_pauseparam = atl1_set_pauseparam, + .get_rx_csum = atl1_get_rx_csum, + .set_tx_csum = ethtool_op_set_tx_hw_csum, + .get_link = ethtool_op_get_link, + .set_sg = ethtool_op_set_sg, + .get_strings = atl1_get_strings, + .nway_reset = atl1_nway_reset, + .get_ethtool_stats = atl1_get_ethtool_stats, + .get_sset_count = atl1_get_sset_count, + .set_tso = ethtool_op_set_tso, +}; diff --git a/drivers/net/atlx/atl1.h b/drivers/net/atlx/atl1.h index 6220298..51893d6 100644 --- a/drivers/net/atlx/atl1.h +++ b/drivers/net/atlx/atl1.h @@ -56,20 +56,10 @@ struct atl1_adapter; struct atl1_hw; /* function prototypes needed by multiple files */ -s32 atl1_phy_setup_autoneg_adv(struct atl1_hw *hw); -s32 atl1_write_phy_reg(struct atl1_hw *hw, u32 reg_addr, u16 phy_data); -s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex); -s32 atl1_read_mac_addr(struct atl1_hw *hw); -s32 atl1_init_hw(struct atl1_hw *hw); -s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex); -s32 atl1_set_speed_and_duplex(struct atl1_hw *hw, u16 speed, u16 duplex); u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr); void atl1_hash_set(struct atl1_hw *hw, u32 hash_value); s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data); void atl1_set_mac_addr(struct atl1_hw *hw); -s32 atl1_phy_enter_power_saving(struct atl1_hw *hw); -s32 atl1_reset_hw(struct atl1_hw *hw); -void atl1_check_options(struct atl1_adapter *adapter); static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); static u32 atl1_check_link(struct atl1_adapter *adapter); -- cgit v1.1